code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
# -*- coding: utf-8 -*-
from django.contrib.contenttypes.models import ContentType
from django.db import models
import django
if (hasattr(django,"version") and django.version > 1.8) or (hasattr(django,"get_version") and django.get_version()):
from django.contrib.contenttypes.fields import GenericForeignKey
from django.db.transaction import atomic
else:
from django.contrib.contenttypes.generic import GenericForeignKey
from django.db.transaction import commit_manually as atomic
from django.contrib.contenttypes.models import ContentType
from django.db import transaction,IntegrityError
from datetime import timedelta
from django.utils import timezone
now = timezone.now
from django.db.models import signals
from django.conf import settings
class HitManager(models.Manager):
def get_for(self, obj, bucket=None):
if bucket is None:
bucket_kwargs = {'bucket__isnull': True}
else:
bucket_kwargs = {'bucket': bucket}
if isinstance(obj, models.Model):
content_type = ContentType.objects.get_for_model(obj.__class__)
object_pk = getattr(obj, obj._meta.pk.column)
try:
return self.get_or_create(content_type=content_type, object_pk=object_pk, **bucket_kwargs)[0]
except IntegrityError: # catch race condition
return self.get(content_type=content_type, object_pk=object_pk, **bucket_kwargs)
elif isinstance(obj, (str, unicode)):
try:
return self.get_or_create(content_type__isnull=True, object_pk=obj, **bucket_kwargs)[0]
except IntegrityError: # catch race condition
return self.get(content_type__isnull=True, object_pk=obj, **bucket_kwargs)
else:
raise Exception("Don't know what to do with this obj!?")
def hit(self, obj, user, ip, bucket=None):
hit = self.get_for(obj, bucket=bucket)
hit.hit(user, ip)
return hit
class Hit(models.Model):
content_type = models.ForeignKey(ContentType, null=True)
object_pk = models.CharField(max_length=50) # TextField not possible, because unique_together is needed, must be enough
content_object = GenericForeignKey(ct_field="content_type", fk_field="object_pk")
bucket = models.CharField(max_length=50, blank=True, null=True) # Each object may have multiple buckets hits get counted in
views = models.PositiveIntegerField(default=0) # page hits/views
visits = models.PositiveIntegerField(default=0) # unique visits
objects = HitManager()
def hit(self, user, ip):
try:
with transaction.atomic():
if self.has_hit_from(user, ip):
self.update_hit_from(user, ip)
Hit.objects.filter(pk=self.pk).update(views=models.F('views') + 1)
self.views += 1
return True
else:
self.log.create(user=user, ip=ip)
Hit.objects.filter(pk=self.pk).update(views=models.F('views') + 1, visits=models.F('visits') + 1)
self.views += 1
self.visits += 1
return True
except IntegrityError:
# CATCH RACE CONDITION
# log-extry was already created
# happens when users double-click or reload to fast
# (we ignore this)
return False
def has_hit_from(self, user, ip):
self.clear_log()
if self.log.filter(user=user, ip=ip).count():
return True
else:
return False
def update_hit_from(self, user, ip):
self.log.filter(user=user, ip=ip).update(when=now())
def clear_log(self):
timespan = now() - timedelta(days=30)
for l in self.log.filter(when__lt=timespan).order_by('-when')[25:]:
l.delete()
class Meta:
unique_together = (('content_type', 'object_pk', 'bucket'),)
class HitLog(models.Model):
hit = models.ForeignKey(Hit, related_name='log')
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='hits_log', null=True)
ip = models.GenericIPAddressField(null=True) if hasattr(models,"GenericIPAddressField") else models.IPAddressField(null=True)
when = models.DateTimeField(default=now)
class Meta:
unique_together = (('hit', 'user', 'ip'),)
class HitHistory(models.Model):
hit = models.ForeignKey(Hit, related_name='history')
when = models.DateTimeField(default=now)
views = models.PositiveIntegerField(default=0)
visits = models.PositiveIntegerField(default=0)
views_change = models.PositiveIntegerField(default=0)
visits_change = models.PositiveIntegerField(default=0)
|
[
"django.db.models.IPAddressField",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.PositiveIntegerField",
"django.contrib.contenttypes.models.ContentType.objects.get_for_model",
"django.db.models.GenericIPAddressField",
"django.db.models.F",
"datetime.timedelta",
"django.contrib.contenttypes.generic.GenericForeignKey",
"django.db.models.DateTimeField",
"django.db.transaction.atomic",
"django.get_version"
] |
[((2020, 2061), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ContentType'], {'null': '(True)'}), '(ContentType, null=True)\n', (2037, 2061), False, 'from django.db import models\n'), ((2078, 2109), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (2094, 2109), False, 'from django.db import models\n'), ((2208, 2272), 'django.contrib.contenttypes.generic.GenericForeignKey', 'GenericForeignKey', ([], {'ct_field': '"""content_type"""', 'fk_field': '"""object_pk"""'}), "(ct_field='content_type', fk_field='object_pk')\n", (2225, 2272), False, 'from django.contrib.contenttypes.generic import GenericForeignKey\n'), ((2287, 2341), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'blank': '(True)', 'null': '(True)'}), '(max_length=50, blank=True, null=True)\n', (2303, 2341), False, 'from django.db import models\n'), ((2416, 2454), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (2443, 2454), False, 'from django.db import models\n'), ((2487, 2525), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (2514, 2525), False, 'from django.db import models\n'), ((4017, 4059), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Hit'], {'related_name': '"""log"""'}), "(Hit, related_name='log')\n", (4034, 4059), False, 'from django.db import models\n'), ((4071, 4150), 'django.db.models.ForeignKey', 'models.ForeignKey', (['settings.AUTH_USER_MODEL'], {'related_name': '"""hits_log"""', 'null': '(True)'}), "(settings.AUTH_USER_MODEL, related_name='hits_log', null=True)\n", (4088, 4150), False, 'from django.db import models\n'), ((4293, 4326), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'now'}), '(default=now)\n', (4313, 4326), False, 'from django.db import models\n'), ((4439, 4485), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Hit'], {'related_name': '"""history"""'}), "(Hit, related_name='history')\n", (4456, 4485), False, 'from django.db import models\n'), ((4497, 4530), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'now'}), '(default=now)\n', (4517, 4530), False, 'from django.db import models\n'), ((4544, 4582), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4571, 4582), False, 'from django.db import models\n'), ((4596, 4634), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4623, 4634), False, 'from django.db import models\n'), ((4655, 4693), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4682, 4693), False, 'from django.db import models\n'), ((4714, 4752), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4741, 4752), False, 'from django.db import models\n'), ((220, 240), 'django.get_version', 'django.get_version', ([], {}), '()\n', (238, 240), False, 'import django\n'), ((4160, 4199), 'django.db.models.GenericIPAddressField', 'models.GenericIPAddressField', ([], {'null': '(True)'}), '(null=True)\n', (4188, 4199), False, 'from django.db import models\n'), ((4248, 4280), 'django.db.models.IPAddressField', 'models.IPAddressField', ([], {'null': '(True)'}), '(null=True)\n', (4269, 4280), False, 'from django.db import models\n'), ((1045, 1093), 'django.contrib.contenttypes.models.ContentType.objects.get_for_model', 'ContentType.objects.get_for_model', (['obj.__class__'], {}), '(obj.__class__)\n', (1078, 1093), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((3773, 3791), 'datetime.timedelta', 'timedelta', ([], {'days': '(30)'}), '(days=30)\n', (3782, 3791), False, 'from datetime import timedelta\n'), ((2631, 2651), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (2649, 2651), False, 'from django.db import transaction, IntegrityError\n'), ((2816, 2833), 'django.db.models.F', 'models.F', (['"""views"""'], {}), "('views')\n", (2824, 2833), False, 'from django.db import models\n'), ((3047, 3064), 'django.db.models.F', 'models.F', (['"""views"""'], {}), "('views')\n", (3055, 3064), False, 'from django.db import models\n'), ((3077, 3095), 'django.db.models.F', 'models.F', (['"""visits"""'], {}), "('visits')\n", (3085, 3095), False, 'from django.db import models\n')]
|
#!/usr/bin/env python
# stdlib imports
import urllib.request as request
import tempfile
import os.path
import sys
from datetime import datetime
# third party imports
import numpy as np
# local imports
from losspager.utils.expocat import ExpoCat
def commify(value):
if np.isnan(value):
return 'NaN'
return format(int(value), ",d")
def get_max_mmi(tdict, minimum=1000):
indices = ['MMI1', 'MMI2', 'MMI3', 'MMI4',
'MMI5', 'MMI6', 'MMI7', 'MMI8', 'MMI9+']
exparray = np.array([tdict[idx] for idx in indices])
imax = (exparray > 1000).nonzero()[0].max()
return (imax + 1, exparray[imax])
def test():
homedir = os.path.dirname(os.path.abspath(
__file__)) # where is this script?
expocat = ExpoCat.fromDefault()
clat = 0.37
clon = -79.94
radius = 400
ndeaths = 9
minicat = expocat.selectByRadius(clat, clon, radius)
print('Testing that historical events returned are correct...')
maxmmi = 8
nmaxmmi = 103000
events = minicat.getHistoricalEvents(maxmmi, nmaxmmi, ndeaths, clat, clon)
assert events[0]['EventID'] == '199603282303'
assert events[1]['EventID'] == '197912120759'
assert events[2]['EventID'] == '198703060410'
print('Passed.')
print('Testing that events selected by hazard are correct...')
fire = expocat.selectByHazard('fire')
tsunami = expocat.selectByHazard('tsunami')
liquefaction = expocat.selectByHazard('liquefaction')
landslide = expocat.selectByHazard('landslide')
assert fire._dataframe['Fire'].sum() == len(fire)
assert tsunami._dataframe['Tsunami'].sum() == len(tsunami)
assert liquefaction._dataframe['Liquefaction'].sum() == len(liquefaction)
assert landslide._dataframe['Landslide'].sum() == len(landslide)
# test exclusion method
test_time = datetime(1994, 1, 1)
expocat.excludeFutureEvents(test_time)
assert expocat._dataframe['Time'].max() < test_time
print('Passed.')
if __name__ == '__main__':
test()
|
[
"losspager.utils.expocat.ExpoCat.fromDefault",
"numpy.array",
"numpy.isnan",
"datetime.datetime"
] |
[((277, 292), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (285, 292), True, 'import numpy as np\n'), ((509, 550), 'numpy.array', 'np.array', (['[tdict[idx] for idx in indices]'], {}), '([tdict[idx] for idx in indices])\n', (517, 550), True, 'import numpy as np\n'), ((756, 777), 'losspager.utils.expocat.ExpoCat.fromDefault', 'ExpoCat.fromDefault', ([], {}), '()\n', (775, 777), False, 'from losspager.utils.expocat import ExpoCat\n'), ((1836, 1856), 'datetime.datetime', 'datetime', (['(1994)', '(1)', '(1)'], {}), '(1994, 1, 1)\n', (1844, 1856), False, 'from datetime import datetime\n')]
|
#!/usr/bin/env python
# encoding: utf-8
"""
AutomaticSeeding_t.py
Created by <NAME> on 2010-08-30.
Copyright (c) 2010 Fermilab. All rights reserved.
"""
from __future__ import print_function
import sys
import os
import unittest
from WMCore.JobSplitting.Generators.AutomaticSeeding import AutomaticSeeding
from WMCore.DataStructs.Job import Job
from PSetTweaks.PSetTweak import PSetTweak
class AutomaticSeeding_tTests(unittest.TestCase):
def testA(self):
"""test creating the plugin"""
try:
seeder = AutomaticSeeding()
except Exception as ex:
msg = "Failed to instantiate an AutomaticSeeder: "
msg += str(ex)
self.fail(msg)
def testB(self):
"""test plugin acts on a Job as expected"""
job = Job("TestJob")
seeder = AutomaticSeeding()
seeder(job)
def testC(self):
"""test building a tweak from the seeds"""
job = Job("TestJob")
seeder = AutomaticSeeding()
job.addBaggageParameter("process.RandomNumberGeneratorService.seed1.initialSeed", 123445)
job.addBaggageParameter("process.RandomNumberGeneratorService.seed2.initialSeed", 123445)
job.addBaggageParameter("process.RandomNumberGeneratorService.seed3.initialSeed", 7464738)
job.addBaggageParameter("process.RandomNumberGeneratorService.seed44.initialSeed", 98273762)
seeder(job)
tweak = PSetTweak()
for x in job.baggage.process.RandomNumberGeneratorService:
parameter = "process.RandomNumberGeneratorService.%s.initialSeed" % x._internal_name
tweak.addParameter(parameter, x.initialSeed)
print(tweak)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"WMCore.JobSplitting.Generators.AutomaticSeeding.AutomaticSeeding",
"WMCore.DataStructs.Job.Job",
"PSetTweaks.PSetTweak.PSetTweak"
] |
[((1731, 1746), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1744, 1746), False, 'import unittest\n'), ((797, 811), 'WMCore.DataStructs.Job.Job', 'Job', (['"""TestJob"""'], {}), "('TestJob')\n", (800, 811), False, 'from WMCore.DataStructs.Job import Job\n'), ((829, 847), 'WMCore.JobSplitting.Generators.AutomaticSeeding.AutomaticSeeding', 'AutomaticSeeding', ([], {}), '()\n', (845, 847), False, 'from WMCore.JobSplitting.Generators.AutomaticSeeding import AutomaticSeeding\n'), ((955, 969), 'WMCore.DataStructs.Job.Job', 'Job', (['"""TestJob"""'], {}), "('TestJob')\n", (958, 969), False, 'from WMCore.DataStructs.Job import Job\n'), ((987, 1005), 'WMCore.JobSplitting.Generators.AutomaticSeeding.AutomaticSeeding', 'AutomaticSeeding', ([], {}), '()\n', (1003, 1005), False, 'from WMCore.JobSplitting.Generators.AutomaticSeeding import AutomaticSeeding\n'), ((1442, 1453), 'PSetTweaks.PSetTweak.PSetTweak', 'PSetTweak', ([], {}), '()\n', (1451, 1453), False, 'from PSetTweaks.PSetTweak import PSetTweak\n'), ((538, 556), 'WMCore.JobSplitting.Generators.AutomaticSeeding.AutomaticSeeding', 'AutomaticSeeding', ([], {}), '()\n', (554, 556), False, 'from WMCore.JobSplitting.Generators.AutomaticSeeding import AutomaticSeeding\n')]
|
#!/usr/bin/env python2
"""
This script extracts crackable hashes from krb5's credential cache files (e.g.
/tmp/krb5cc_1000).
NOTE: This attack technique only works against MS Active Directory servers.
This was tested with CentOS 7.4 client running krb5-1.15.1 software against a
Windows 2012 R2 Active Directory server.
Usage: python ccache2john.py ccache_file
Upstream: https://github.com/rvazarkar/KrbCredExport
Authors: <NAME> (main author), <NAME> (splitting support), and <NAME> (misc. glue)
Resources,
https://lapo.it/asn1js/
https://tools.ietf.org/html/rfc1510#section-5.8.1
https://github.com/CoreSecurity/impacket/tree/master/impacket/krb5
https://www.gnu.org/software/shishi/manual/html_node/The-Credential-Cache-Binary-File-Format.html
https://github.com/wireshark/wireshark/blob/master/epan/dissectors/asn1/kerberos/KerberosV5Spec2.asn
"""
import sys
import os.path
import time
import struct
import datetime
from pyasn1.codec.ber import decoder
# LB is a single byte representing the length of the rest of the section
# LT is a 3 byte structure consisting of the byte 82 followed by 2 bytes representing the length of the rest of the file
# header {
# uint16 tag
# uint16 taglen
# uint8[taglen] tagdata
# }
class Header:
def __init__(self):
self.tag = None
self.taglen = None
self.deltatime = DeltaTime()
def parsefile(self, f):
self.tag, self.taglen = struct.unpack(">HH", f.read(4))
self.deltatime.parsefile(f)
def tostring(self):
r = ''
r += struct.pack(">HH", self.tag, self.taglen)
r += self.deltatime.tostring()
return r
# deltatime {
# uint32 time_offset
# uint32 usec_offset
# }
class DeltaTime:
def __init__(self):
self.usec_offset = None
self.time_offset = None
def parsefile(self, f):
self.time_offset, self.usec_offset = struct.unpack(">LL", f.read(8))
def tostring(self):
r = ''
r += struct.pack(">LL", self.time_offset, self.usec_offset)
return r
# ccacheheader {
# uint16 version
# uint16 header_len
# header[] headers
# principal primary_principal
# }
class CCacheHeader:
def __init__(self):
self.version = None
self.header_length = None
self.header = Header()
def parsefile(self, f):
self.version, = struct.unpack(">H", f.read(2))
self.header_length, = struct.unpack(">H", f.read(2))
# self.header.parsefile(f) # this is perhaps buggy?
f.read(self.header_length)
def tostring(self):
r = ''
r += struct.pack(">HH", self.version, self.header_length)
r += self.header.tostring()
return r
# times {
# uint32 authtime
# uint32 starttime
# uint32 endtime
# uint32 renew_till
# }
class KerbTimes:
def __init__(self):
self.authtime = None
self.starttime = None
self.endtime = None
self.renew_till = None
def parsefile(self, f):
self.authtime, self.starttime, self.endtime, self.renew_till = struct.unpack(">IIII", f.read(16))
def tostring(self):
return struct.pack(">IIII", self.authtime, self.starttime, self.endtime, self.renew_till)
# counted_octet {
# uint32 length
# uint8[char] data
# }
class CountedOctet:
def __init__(self):
self.length = None
self.data = None
def parsefile(self, f):
self.length, = struct.unpack(">L", f.read(4))
self.data, = struct.unpack(">%ds" % self.length, f.read(self.length))
def tostring(self):
r = b''
r += struct.pack(">L", self.length)
r += struct.pack(">%ds" % self.length, self.data)
return r
# keyblock {
# uint16 keytype
# uint16 etype
# uint16 keylen
# uint8[keylen] key
# }
class Keyblock:
def __init__(self):
self.keytype = None
self.etype = None
self.keylen = None
self.key = None
def parsefile(self, f):
self.keytype, self.etype, self.keylen = struct.unpack(">HHH", f.read(6))
self.key, = struct.unpack(">%ds" % self.keylen, f.read(self.keylen))
def tostring(self):
r = ''
r += struct.pack(">HHH", self.keytype, self.etype, self.keylen)
r += struct.pack(">%ds" % self.keylen, self.key)
return r
# principal {
# uint32 name_type
# uint32 num_components
# counted_octet realm
# counted_octet[num_components] components
# }
class Principal:
def __init__(self):
self.name_type = None
self.num_components = None
self.realm = CountedOctet()
self.components = []
def parsefile(self, f):
self.name_type, self.num_components = struct.unpack(">LL", f.read(8))
self.realm.parsefile(f)
for i in range(0, self.num_components):
component = CountedOctet()
component.parsefile(f)
self.components.append(component.data)
def tostring(self):
r = ''
r += struct.pack(">LL", self.name_type, self.num_components)
r += self.realm.tostring()
for i in self.components:
r += struct.pack(">L", len(i))
r += i
return r
# address {
# uint16 address_type
# counted_octet address
# }
class Address:
def __init__(self):
self.address_type = None
self.address = CountedOctet()
def parsefile(self, f):
self.address_type, = struct.unpack(">H", f.read(2))
self.address.parsefile(f)
def tostring(self):
r = ''
r += struct.pack(">H", self.address_type)
r += self.address.tostring()
return r
# authdata {
# uint16 authtype
# counted_octet authdata
# }
class AuthData:
def __init__(self):
self.authtype = None
self.authdata = CountedOctet()
def parsefile(self, f):
self.authtype, = struct.unpack(">H", f.read(2))
self.authdata.parsefile(f)
def tostring(self):
r = ''
r += struct.pack(">H", self.authtype)
r += self.authdata.tostring()
return r
# credential {
# principal client
# principal server
# keyblock key
# times timedata
# uint8 skey
# uint32 tktFlags (Reverse Byte Order!)
# uint32 num_address
# address[num_address] addresses
# uint32 num_authdata
# authdata[num_authdata] auths
# counted_octet ticket_1
# counted_octet ticket_2 (nothing here in what I've seen)
# }
class Credential:
def __init__(self):
self.client = Principal()
self.server = Principal()
self.keyblock = Keyblock()
self.times = KerbTimes()
self.is_skey = None
self.tktFlags = None
self.num_address = None
self.address = []
self.num_authdata = None
self.authdata = []
self.ticket = CountedOctet()
self.secondticket = CountedOctet()
def parsefile(self, f):
self.client.parsefile(f)
self.server.parsefile(f)
self.keyblock.parsefile(f)
self.times.parsefile(f)
self.is_skey, = struct.unpack(">B", f.read(1))
self.tktFlags, = struct.unpack("<I", f.read(4))
self.num_address, = struct.unpack(">I", f.read(4))
for i in range(0, self.num_address):
self.address.append(Address().parsefile(f))
self.num_authdata, = struct.unpack(">I", f.read(4))
for i in range(0, self.num_authdata):
self.authdata.append(AuthData().parsefile(f))
self.ticket.parsefile(f)
self.secondticket.parsefile(f)
def tostring(self):
r = ''
r += self.client.tostring()
r += self.server.tostring()
r += self.keyblock.tostring()
r += self.times.tostring()
r += struct.pack(">B", self.is_skey)
r += struct.pack("<I", self.tktFlags)
r += struct.pack(">I", self.num_address)
for i in self.address:
r += i.tostring()
r += struct.pack(">I", self.num_authdata)
for i in self.authdata:
r += i.tostring()
r += self.ticket.tostring()
r += self.secondticket.tostring()
return r
# Prepend, shortened for convenience
def p(a, b):
return b + a
# Returns the length of s as a single byte
def clen(s):
return chr(len(s))
# key {
# 0xA0 LB
# 0x30 LB
# 0xA0 0x03 0x02 0x01
# uint8 key_type
# 0xA1 LB
# 0x03 LB
# keydata
# }
class Key:
def __init__(self):
self.key = None
self.keytype = None
def parsefile(self, f):
f.read(8)
self.keytype, = struct.unpack('>B', f.read(1))
f.read(3)
keylen, = struct.unpack('>B', f.read(1))
self.key, = struct.unpack(">%ds" % keylen, f.read(keylen))
def tostring(self):
r = ''
r += self.key
r = p(r, clen(r))
r = p(r, '\x04')
r = p(r, clen(r))
r = p(r, '\xA1')
r = p(r, chr(self.keytype))
r = p(r, '\xA0\x03\x02\x01')
r = p(r, clen(r))
r = p(r, '\x30')
r = p(r, clen(r))
r = p(r, '\xA0')
return r
# This section represents the primary principal realm. Corresponds to the domain name
# prealm {
# 0xA1 LB
# 0x1B LB
# Primary Principal Realm
# }
class PRealm:
def __init__(self):
self.principal_realm = None
def parsefile(self, f):
f.read(3)
length, = struct.unpack(">b", f.read(1))
self.principal_realm, = struct.unpack(">%ds" % length, f.read(length))
def tostring(self):
r = ''
r += self.principal_realm
r = p(r, clen(r))
r = p(r, '\x1B')
r = p(r, clen(r))
r = p(r, '\xA1')
return r
# This section represents the primary principal realm
# pname {
# 0xA2 LB
# 0x30 LB
# 0xA0 0x03 0x02 0x01
# uint8 name_type
# 0xA1 LB
# 0x30 LB
# 0x1B LB
# Primary Principal Name
# }
class PName:
def __init__(self):
self.principal_components = []
self.principal_name_type = None
def parsefile(self, f):
f.read(8)
self.principal_name_type, = struct.unpack(">B", f.read(1))
f.read(3)
rem_length, = struct.unpack(">B", f.read(1))
while (rem_length > 0):
f.read(1)
l, = struct.unpack(">B", f.read(1))
component, = struct.unpack("%ds" % l, f.read(l))
self.principal_components.append(component)
rem_length -= (2 + l)
def tostring(self):
r = ''
for s in self.principal_components:
r += '\x1B' + chr(len(s)) + s
r = p(r, clen(r))
r = p(r, '\x30')
r = p(r, clen(r))
r = p(r, '\xA1')
r = p(r, chr(self.principal_name_type))
r = p(r, '\xA0\x03\x02\x01')
r = p(r, clen(r))
r = p(r, '\x30')
r = p(r, clen(r))
r = p(r, '\xA2')
return r
# This section details flags for the ticket
# tktflags {
# 0xA3 LB
# 0x03 LB
# 0x00 Always 0, apparently number of unused bytes. tktFlags is always a uint32
# uint32 Ticket Flags
# }
class TicketFlags:
def __init__(self):
self.ticket_flags = None
def parsefile(self, f):
f.read(5)
self.ticket_flags, = struct.unpack("I", f.read(4))
def tostring(self):
r = ''
r += struct.pack("I", self.ticket_flags)
r = p(r, '\x00')
r = p(r, clen(r))
r = p(r, '\x03')
r = p(r, clen(r))
r = p(r, '\xA3')
return r
# These sections contain the ticket timestamps. Note that the timestamps are in a consistent format, so length tags are always the same
# Timestamp format is YYYYmmddHHMMSSZ and must be UTC!
# 0xA5 is starttime, 0xA6 is endtime, 0xA7 is renew_till
# time {
# uint8 Identifier
# LB (Always 0x11)
# 0x18 LB (Always 0x0F)
# start_time
# }
class Time:
def __init__(self, identifier):
self.identifier = identifier
self.time = None
@staticmethod
def convert_to_unix(timestr):
epoch = datetime.datetime(1970, 1, 1)
t = datetime.datetime.strptime(timestr[:-1], '%Y%m%d%H%M%S')
td = t - epoch
return int((td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / 1e6)
@staticmethod
def convert_to_kerbtime(unixtime):
t = datetime.datetime.utcfromtimestamp(unixtime)
t = ''.join([t.strftime('%Y'), t.strftime('%m'), t.strftime('%d'),
t.strftime('%H'), t.strftime('%M'), t.strftime('%S'), 'Z'])
return t
def parsefile(self, f):
self.identifier, = struct.unpack(">B", f.read(1))
f.read(3)
strtime, = struct.unpack(">15s", f.read(15))
self.time = Time.convert_to_unix(strtime)
def tostring(self):
r = ''
r += struct.pack(">15s", Time.convert_to_kerbtime(self.time))
r = p(r, '\x11\x18\x0F')
r = p(r, chr(self.identifier))
return r
# This section represents the server realm (domain)
# srealm {
# 0xA8 LB
# 0x1B LB
# server_realm (domain name of server)
# }
class SRealm:
def __init__(self):
self.server_realm = None
def parsefile(self, f):
f.read(3)
length, = struct.unpack(">B", f.read(1))
self.server_realm, = struct.unpack(">%ds" % length, f.read(length))
def tostring(self):
r = ''
r += self.server_realm
r = p(r, clen(r))
r = p(r, '\x1B')
r = p(r, clen(r))
r = p(r, '\xA8')
return r
# This section represents the server name components
# sname {
# 0xA9 LB
# 0x30 LB
# 0xA0 0x03 0x02 0x01
# uint8 server_name_type
# 0xA1 LB
# 0x30 LB
# components[]
# }
#
# components {
# 0x1B
# uint8 Component Length
# Component
# }
class SName:
def __init__(self):
self.server_components = []
self.server_name_type = None
def parsefile(self, f):
f.read(8)
self.server_name_type, = struct.unpack(">B", f.read(1))
f.read(3)
rem_length, = struct.unpack(">B", f.read(1))
while rem_length > 0:
f.read(1)
l, = struct.unpack(">B", f.read(1))
component, = struct.unpack(">%ds" % l, f.read(l))
self.server_components.append(component)
rem_length -= (2 + l)
def tostring(self):
r = ''
for s in self.server_components:
r += '\x1B' + chr(len(s)) + s
r = p(r, clen(r))
r = p(r, '\x30')
r = p(r, clen(r))
r = p(r, '\xA1')
r = p(r, chr(self.server_name_type))
r = p(r, '\xA0\x03\x02\x01')
r = p(r, clen(r))
r = p(r, '\x30')
r = p(r, clen(r))
r = p(r, '\xA9')
return r
# header {
# 0x7D LT
# 0x30 LT
# 0xA0 LT
# 0x30 LT
# 0x30 LT
# }
class KrbCredInfo:
def __init__(self):
self.krbcredinfo = None
self.key = Key()
self.prealm = PRealm()
self.pname = PName()
self.flags = TicketFlags()
self.starttime = Time(165)
self.endtime = Time(166)
self.renew_till = Time(167)
self.srealm = SRealm()
self.sname = SName()
def parsefile(self, f):
f.read(20)
self.key.parsefile(f)
self.prealm.parsefile(f)
self.pname.parsefile(f)
self.flags.parsefile(f)
self.starttime.parsefile(f)
self.endtime.parsefile(f)
self.renew_till.parsefile(f)
self.srealm.parsefile(f)
self.sname.parsefile(f)
self.krbcredinfo = self.key.tostring() + self.prealm.tostring() + self.pname.tostring() + self.flags.tostring() + \
self.starttime.tostring() + self.endtime.tostring() + \
self.renew_till.tostring() + self.srealm.tostring() + \
self.sname.tostring()
def tostring(self):
r = self.krbcredinfo
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x30\x82')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x30\x82')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\xA0\x82')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x30\x82')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x7D\x82')
return r
def createkrbcrdinfo(self):
self.krbcredinfo = self.key.tostring() + self.prealm.tostring() + self.pname.tostring() + self.flags.tostring() + \
self.starttime.tostring() + self.endtime.tostring() + \
self.renew_till.tostring() + self.srealm.tostring() + \
self.sname.tostring()
# The encpart serves as a sort of header for the EncKrbCredPart
# encpart {
# 0xA0 0x03 0x02 0x01
# uint8 etype (Seems to always be 0 in my testing)
# 0xA2 LT
# 0x04 LT
# }
class EncPart:
def __init__(self):
self.krbcredinfo = KrbCredInfo()
self.etype = None
def parsefile(self, f):
f.read(4)
self.etype, = struct.unpack(">B", f.read(1))
f.read(8)
self.krbcredinfo.parsefile(f)
def tostring(self):
r = self.krbcredinfo.tostring()
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x04\x82')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\xA2\x82')
r = p(r, chr(self.etype))
r = p(r, '\xA0\x03\x02\x01')
return r
# This section represents the tickets section of the overall KrbCred
# tickets {
# 0xA2 0x82
# uint16 ticket_length + 4
# 0x30 0x82
# uint16 ticket_length
# ticket
# 0xA3 LT
# 0x30 LT
# }
class TicketPart:
def __init__(self):
self.ticket = None
self.encpart = EncPart()
def parsefile(self, f):
f.read(6)
ticketlen, = struct.unpack(">H", f.read(2))
self.ticket, = struct.unpack(">%ds" % ticketlen, f.read(ticketlen))
f.read(8)
self.encpart.parsefile(f)
def tostring(self):
r = self.encpart.tostring()
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x30\x82')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\xA3\x82')
r = p(r, self.ticket)
r = p(r, struct.pack(">H", len(self.ticket)))
r = p(r, '\x30\x82')
r = p(r, struct.pack(">H", len(self.ticket) + 4))
r = p(r, '\xA2\x82')
return r
# This is the header for the kerberos ticket, and the final section
# header {
# 0x76 LT
# 0x30 LT
# 0xA0 0x03 0x02 0x01
# uint8 pvno (Protocol Version, always 0x05)
# 0xA1 0x03 0x02 0x01
# uint8 msg-type (Always 0x16 for krbcred)
# }
class KrbCredHeader:
def __init__(self):
self.ticketpart = TicketPart()
def parsefile(self, f):
f.read(18)
self.ticketpart.parsefile(f)
def tostring(self):
r = self.ticketpart.tostring()
r = p(r, '\xA1\x03\x02\x01\x16')
r = p(r, '\xA0\x03\x02\x01\x05')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x30\x82')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x76\x82')
return r
# borrowed from https://stackoverflow.com
def swap32(i):
return struct.unpack("<I", struct.pack(">I", i))[0]
# src/include/krb5/krb5.h
"""
#define TKT_FLG_FORWARDABLE 0x40000000
#define TKT_FLG_FORWARDED 0x20000000
#define TKT_FLG_PROXIABLE 0x10000000
#define TKT_FLG_PROXY 0x08000000
#define TKT_FLG_MAY_POSTDATE 0x04000000
#define TKT_FLG_POSTDATED 0x02000000
#define TKT_FLG_INVALID 0x01000000
#define TKT_FLG_RENEWABLE 0x00800000
#define TKT_FLG_PRE_AUTH 0x00200000
#define TKT_FLG_HW_AUTH 0x00100000
#define TKT_FLG_TRANSIT_POLICY_CHECKED 0x00080000
#define TKT_FLG_OK_AS_DELEGATE 0x00040000
#define TKT_FLG_ENC_PA_REP 0x00010000
#define TKT_FLG_ANONYMOUS 0x00008000
"""
TKT_FLG_INITIAL = 0x00400000
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: {0} <input credential cache file>".format(sys.argv[0]))
print("\nExample: {0} /tmp/krb5cc_1000".format(sys.argv[0]))
sys.exit(0)
with open(sys.argv[1], 'rb') as f:
fileid, = struct.unpack(">B", f.read(1))
if fileid == 0x5: # Credential Cache (ccache)
f.seek(0)
header = CCacheHeader()
primary_principal = Principal()
credential = Credential()
header.parsefile(f)
primary_principal.parsefile(f)
i = 0
sys.stderr.write("WARNING: Not all the hashes generated by this program are crackable. Please select the relevant hashes manually!\n")
time.sleep(2)
# Check if you've reached the end of the file. If not get the next credential
while(f.read(1) != ''):
f.seek(-1, 1)
credential.parsefile(f)
out = []
KrbCred = KrbCredHeader()
KrbCred.ticketpart.ticket = credential.ticket.data # extract hash from here!
try:
# this code is terrible!
etype = str(decoder.decode(credential.ticket.data)[0][3][0])
data = str(decoder.decode(credential.ticket.data)[0][3][2])
if etype != "23":
sys.stderr.write("Unsupported etype %s found. Such hashes can't be cracked it seems.\n" % etype)
continue
except:
continue
# print(credential.ticket.data.encode("hex"))
KrbCred.ticketpart.encpart.etype = credential.keyblock.etype
krbcredinfo = KrbCred.ticketpart.encpart.krbcredinfo
krbcredinfo.key.key = credential.keyblock.key
krbcredinfo.key.keytype = credential.keyblock.keytype
# print(credential.keyblock.keytype)
krbcredinfo.prealm.principal_realm = primary_principal.realm.data
# print(primary_principal.realm.data)
krbcredinfo.pname.principal_components = primary_principal.components
# print(primary_principal.components)
krbcredinfo.pname.principal_name_type = primary_principal.name_type
krbcredinfo.flags.ticket_flags = credential.tktFlags
tktFlags = swap32(credential.tktFlags)
if tktFlags & TKT_FLG_INITIAL:
continue
krbcredinfo.starttime.time = credential.times.starttime
krbcredinfo.endtime.time = credential.times.endtime
krbcredinfo.renew_till.time = credential.times.renew_till
krbcredinfo.srealm.server_realm = credential.server.realm.data
# print(credential.server.realm.data)
krbcredinfo.sname.server_components = credential.server.components
for c in credential.server.components: # dirty hack
if c not in ['krbtgt', 'krb5_ccache_conf_data', 'pa_type']:
out.append(c)
name = b"-".join(out[-2:])
krbcredinfo.sname.server_name_type = credential.server.name_type
krbcredinfo.createkrbcrdinfo()
sys.stdout.write("%s:$krb5tgs$%s$%s$%s\n" % (os.path.basename(name), etype, data[:16].encode("hex"), data[16:].encode("hex")))
"""
# Write seperate files for each ticket found. postfix is just a number for now.
with open(sys.argv[2] + "_" + str(i), 'wb') as o:
o.write(KrbCred.tostring())
i = i + 1
"""
sys.exit(0)
elif fileid == 0x76: # untested code, don't use!
f.seek(0)
KrbCred = KrbCredHeader()
KrbCred.parsefile(f)
header = CCacheHeader()
primary_principal = Principal()
credential = Credential()
header.version = 0x504
header.header_length = 0xC
header.header.deltatime.time_offset = 4294967295
header.header.deltatime.usec_offset = 0
header.header.tag = 0x01
header.header.taglen = 0x08
KrbCredInfo_ = KrbCred.ticketpart.encpart.krbcredinfo
primary_principal.name_type = KrbCredInfo_.pname.principal_name_type
primary_principal.components = KrbCredInfo_.pname.principal_components
primary_principal.num_components = len(primary_principal.components)
primary_principal.realm.data = KrbCredInfo.prealm.principal_realm
primary_principal.realm.length = len(primary_principal.realm.data)
credential.client.name_type = KrbCredInfo.pname.principal_name_type
credential.client.components = KrbCredInfo.pname.principal_components
credential.client.num_components = len(credential.client.components)
credential.client.realm.data = KrbCredInfo.prealm.principal_realm
credential.client.realm.length = len(credential.client.realm.data)
credential.server.name_type = KrbCredInfo.sname.server_name_type
credential.server.components = KrbCredInfo.sname.server_components
credential.server.num_components = len(credential.server.components)
credential.server.realm.data = KrbCredInfo.srealm.server_realm
credential.server.realm.length = len(credential.server.realm.data)
credential.keyblock.etype = KrbCred.ticketpart.encpart.etype
credential.keyblock.key = KrbCredInfo.key.key
credential.keyblock.keylen = len(credential.keyblock.key)
credential.keyblock.keytype = KrbCredInfo.key.keytype
credential.times.authtime = KrbCredInfo.starttime.time
credential.times.starttime = KrbCredInfo.starttime.time
credential.times.endtime = KrbCredInfo.endtime.time
credential.times.renew_till = KrbCredInfo.renew_till.time
credential.is_skey = 0
credential.tktFlags = KrbCredInfo.flags.ticket_flags
credential.num_address = 0
credential.address = []
credential.num_authdata = 0
credential.authdata = []
credential.ticket.data = KrbCred.ticketpart.ticket
credential.ticket.length = len(credential.ticket.data)
credential.secondticket.length = 0
credential.secondticket.data = ''
with open(sys.argv[2], 'wb') as o:
o.write(header.tostring())
o.write(primary_principal.tostring())
o.write(credential.tostring())
sys.exit(0)
else:
print('Unknown File Type!')
sys.exit(0)
|
[
"struct.pack",
"datetime.datetime",
"time.sleep",
"datetime.datetime.utcfromtimestamp",
"datetime.datetime.strptime",
"pyasn1.codec.ber.decoder.decode",
"sys.stderr.write",
"sys.exit"
] |
[((1545, 1586), 'struct.pack', 'struct.pack', (['""">HH"""', 'self.tag', 'self.taglen'], {}), "('>HH', self.tag, self.taglen)\n", (1556, 1586), False, 'import struct\n'), ((1973, 2027), 'struct.pack', 'struct.pack', (['""">LL"""', 'self.time_offset', 'self.usec_offset'], {}), "('>LL', self.time_offset, self.usec_offset)\n", (1984, 2027), False, 'import struct\n'), ((2593, 2645), 'struct.pack', 'struct.pack', (['""">HH"""', 'self.version', 'self.header_length'], {}), "('>HH', self.version, self.header_length)\n", (2604, 2645), False, 'import struct\n'), ((3131, 3218), 'struct.pack', 'struct.pack', (['""">IIII"""', 'self.authtime', 'self.starttime', 'self.endtime', 'self.renew_till'], {}), "('>IIII', self.authtime, self.starttime, self.endtime, self.\n renew_till)\n", (3142, 3218), False, 'import struct\n'), ((3588, 3618), 'struct.pack', 'struct.pack', (['""">L"""', 'self.length'], {}), "('>L', self.length)\n", (3599, 3618), False, 'import struct\n'), ((3632, 3676), 'struct.pack', 'struct.pack', (["('>%ds' % self.length)", 'self.data'], {}), "('>%ds' % self.length, self.data)\n", (3643, 3676), False, 'import struct\n'), ((4174, 4232), 'struct.pack', 'struct.pack', (['""">HHH"""', 'self.keytype', 'self.etype', 'self.keylen'], {}), "('>HHH', self.keytype, self.etype, self.keylen)\n", (4185, 4232), False, 'import struct\n'), ((4246, 4289), 'struct.pack', 'struct.pack', (["('>%ds' % self.keylen)", 'self.key'], {}), "('>%ds' % self.keylen, self.key)\n", (4257, 4289), False, 'import struct\n'), ((4979, 5034), 'struct.pack', 'struct.pack', (['""">LL"""', 'self.name_type', 'self.num_components'], {}), "('>LL', self.name_type, self.num_components)\n", (4990, 5034), False, 'import struct\n'), ((5537, 5573), 'struct.pack', 'struct.pack', (['""">H"""', 'self.address_type'], {}), "('>H', self.address_type)\n", (5548, 5573), False, 'import struct\n'), ((5975, 6007), 'struct.pack', 'struct.pack', (['""">H"""', 'self.authtype'], {}), "('>H', self.authtype)\n", (5986, 6007), False, 'import struct\n'), ((7721, 7752), 'struct.pack', 'struct.pack', (['""">B"""', 'self.is_skey'], {}), "('>B', self.is_skey)\n", (7732, 7752), False, 'import struct\n'), ((7766, 7798), 'struct.pack', 'struct.pack', (['"""<I"""', 'self.tktFlags'], {}), "('<I', self.tktFlags)\n", (7777, 7798), False, 'import struct\n'), ((7812, 7847), 'struct.pack', 'struct.pack', (['""">I"""', 'self.num_address'], {}), "('>I', self.num_address)\n", (7823, 7847), False, 'import struct\n'), ((7922, 7958), 'struct.pack', 'struct.pack', (['""">I"""', 'self.num_authdata'], {}), "('>I', self.num_authdata)\n", (7933, 7958), False, 'import struct\n'), ((11278, 11313), 'struct.pack', 'struct.pack', (['"""I"""', 'self.ticket_flags'], {}), "('I', self.ticket_flags)\n", (11289, 11313), False, 'import struct\n'), ((11984, 12013), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (12001, 12013), False, 'import datetime\n'), ((12026, 12082), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['timestr[:-1]', '"""%Y%m%d%H%M%S"""'], {}), "(timestr[:-1], '%Y%m%d%H%M%S')\n", (12052, 12082), False, 'import datetime\n'), ((12267, 12311), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['unixtime'], {}), '(unixtime)\n', (12301, 12311), False, 'import datetime\n'), ((20057, 20068), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (20065, 20068), False, 'import sys\n'), ((19044, 19064), 'struct.pack', 'struct.pack', (['""">I"""', 'i'], {}), "('>I', i)\n", (19055, 19064), False, 'import struct\n'), ((20460, 20607), 'sys.stderr.write', 'sys.stderr.write', (['"""WARNING: Not all the hashes generated by this program are crackable. Please select the relevant hashes manually!\n"""'], {}), '(\n """WARNING: Not all the hashes generated by this program are crackable. Please select the relevant hashes manually!\n"""\n )\n', (20476, 20607), False, 'import sys\n'), ((20607, 20620), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (20617, 20620), False, 'import time\n'), ((23625, 23636), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (23633, 23636), False, 'import sys\n'), ((26740, 26751), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (26748, 26751), False, 'import sys\n'), ((26662, 26673), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (26670, 26673), False, 'import sys\n'), ((21269, 21374), 'sys.stderr.write', 'sys.stderr.write', (['("Unsupported etype %s found. Such hashes can\'t be cracked it seems.\\n" % etype\n )'], {}), '(\n "Unsupported etype %s found. Such hashes can\'t be cracked it seems.\\n" %\n etype)\n', (21285, 21374), False, 'import sys\n'), ((21078, 21116), 'pyasn1.codec.ber.decoder.decode', 'decoder.decode', (['credential.ticket.data'], {}), '(credential.ticket.data)\n', (21092, 21116), False, 'from pyasn1.codec.ber import decoder\n'), ((21158, 21196), 'pyasn1.codec.ber.decoder.decode', 'decoder.decode', (['credential.ticket.data'], {}), '(credential.ticket.data)\n', (21172, 21196), False, 'from pyasn1.codec.ber import decoder\n')]
|
import configparser
def get_base_url():
parser = configparser.ConfigParser()
parser.read('token.cfg')
token = parser.get('creds', 'token')
return f"https://api.telegram.org/bot{token}/"
|
[
"configparser.ConfigParser"
] |
[((55, 82), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (80, 82), False, 'import configparser\n')]
|
import os
from flask import Flask
from flask import request
try:
from SimpleHTTPServer import SimpleHTTPRequestHandler as Handler
from SocketServer import TCPServer as Server
except ImportError:
from http.server import SimpleHTTPRequestHandler as Handler
from http.server import HTTPServer as Server
# Read port selected by the cloud for our application
PORT = int(os.getenv('PORT', 8000))
# Change current directory to avoid exposure of control files
#os.chdir('static')
# server http port 8000
#httpd = Server(("", PORT), Handler)
#try:
# print("Start pythonserving at port %i" % PORT)
# httpd.serve_forever()
#except KeyboardInterrupt:
# pass
#httpd.server_close()
# flask http port
app = Flask(__name__)
print ("port num", PORT)
@app.route("/flask")
def hello():
return "Hello World! ftom flask \n"
if __name__ == '__main__':
app.run(debug=True, port=PORT)
#app.run(host="", debug=True)
|
[
"flask.Flask",
"os.getenv"
] |
[((711, 726), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (716, 726), False, 'from flask import Flask\n'), ((375, 398), 'os.getenv', 'os.getenv', (['"""PORT"""', '(8000)'], {}), "('PORT', 8000)\n", (384, 398), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
import networkx as nx
from networkx.readwrite import json_graph
import json
def read_json_file(filename: object) -> object:
# from http://stackoverflow.com/a/34665365
"""
:type filename: object
"""
with open(filename.name) as f:
js_graph = json.load(f)
return json_graph.node_link_graph(js_graph)
def city_build(name_list, qid_list) -> object:
object_list = []
for e in qid_list:
x = [e, name_list[e]]
object_list.append(x)
object_list = sorted(object_list, key=lambda x: x[1])
return object_list
def get(w, d) -> object:
wikipedia = nx.DiGraph(w)
wikidata = nx.DiGraph(d)
root_nodes = nx.get_node_attributes(wikipedia, 'group')
wikidata_root_nodes = nx.get_node_attributes(wikidata, 'group')
assert (len(root_nodes) == len(wikidata_root_nodes)), 'Error: Graph root size should be the same!'
url_wiki = nx.get_node_attributes(wikipedia, 'url')
url_data = nx.get_node_attributes(wikidata, 'url')
revision_id_wikipedia = nx.get_node_attributes(wikipedia, 'revision_id_wikipedia')
revision_id_wikidata = nx.get_node_attributes(wikidata, 'revision_id_wikidata')
city_list = []
for c in root_nodes.keys():
wg_neighbors = wikipedia.successors(c)
wd_neighbors = wikidata.successors(c)
pedia = set(wg_neighbors)
data = set(wd_neighbors)
intersection = set(pedia).intersection(data)
wikipedia_missing = set(data) - set(pedia)
wikidata_missing = set(pedia) - set(data)
city_dict = {'qid': c,
'revision_id_wikipedia': revision_id_wikipedia[c],
'revision_id_wikidata': revision_id_wikidata[c],
'url': url_wiki[c],
'miss_wikipedia': city_build(url_data, wikipedia_missing),
'intersection': city_build(url_wiki, intersection),
'data_cities': city_build(url_wiki, wikidata_missing)
}
city_list.append(city_dict)
city_list = sorted(city_list, key=lambda x: x['url'])
return city_list
|
[
"networkx.readwrite.json_graph.node_link_graph",
"networkx.DiGraph",
"json.load",
"networkx.get_node_attributes"
] |
[((317, 353), 'networkx.readwrite.json_graph.node_link_graph', 'json_graph.node_link_graph', (['js_graph'], {}), '(js_graph)\n', (343, 353), False, 'from networkx.readwrite import json_graph\n'), ((631, 644), 'networkx.DiGraph', 'nx.DiGraph', (['w'], {}), '(w)\n', (641, 644), True, 'import networkx as nx\n'), ((660, 673), 'networkx.DiGraph', 'nx.DiGraph', (['d'], {}), '(d)\n', (670, 673), True, 'import networkx as nx\n'), ((692, 734), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['wikipedia', '"""group"""'], {}), "(wikipedia, 'group')\n", (714, 734), True, 'import networkx as nx\n'), ((761, 802), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['wikidata', '"""group"""'], {}), "(wikidata, 'group')\n", (783, 802), True, 'import networkx as nx\n'), ((922, 962), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['wikipedia', '"""url"""'], {}), "(wikipedia, 'url')\n", (944, 962), True, 'import networkx as nx\n'), ((978, 1017), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['wikidata', '"""url"""'], {}), "(wikidata, 'url')\n", (1000, 1017), True, 'import networkx as nx\n'), ((1046, 1104), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['wikipedia', '"""revision_id_wikipedia"""'], {}), "(wikipedia, 'revision_id_wikipedia')\n", (1068, 1104), True, 'import networkx as nx\n'), ((1132, 1188), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['wikidata', '"""revision_id_wikidata"""'], {}), "(wikidata, 'revision_id_wikidata')\n", (1154, 1188), True, 'import networkx as nx\n'), ((293, 305), 'json.load', 'json.load', (['f'], {}), '(f)\n', (302, 305), False, 'import json\n')]
|
import numpy as np
import pandas as pd
import fasttext
from sklearn.preprocessing import MultiLabelBinarizer
from skmultilearn.model_selection import IterativeStratification, \
iterative_train_test_split
from functools import reduce
CIP_TAGS = list(map(lambda x: x.strip(),
"gratis, mat, musik, kurs, casino, dans, musuem, inlines, "
"båt, barn, film, språk, hockey, bowling, fika, sport, "
"biljard, bingo, bio, opera, kultur, grilla, kubb, "
"festival, cykel, brännboll, picknick, konsert, pub, "
"frisbeegolf, mc, gokart, svamp, bangolf, teater, "
"afterwork, promenad, humor, utmaning, fest, shopping, "
"resa, sällskapsspel, träna, pubquiz, poker, bok, foto, "
"hund, skridskor, karaoke, dart, bada, diskussion, "
"badminton, pyssel, golf, klättring, loppis, boule, mässa, "
"flytthjälp, yoga, innebandy, pingis, handboll, jogga, "
"tennis, högtid, astronomi, fiske, beachvolleyboll, "
"friluftsliv, volleyboll, geocaching, vindsurfing, "
"shuffleboard, SUP, standup, paddel".split(',')))
def load_raw_normalized_dataset(path, drop_missing):
"""Load raw CiP dataset.
Args:
path: Path to raw CSV file
drop_missing: If true, drop events with missing titles or descriptions
Returns:
events_df, tags_df: Event and tag dataframes as tuple
"""
# FIXME: Import 'id' as integer
cip_df = pd.read_csv(path,
header=None,
names=['id', 'weekday', 'time', 'title', 'description',
'tag_status', 'tag'],
na_values=['-01:00:00'])
# Drop any events with missing titles or descriptions
cip_df.dropna(subset=['title', 'description'], inplace=True)
# Convert time strings to actual times
cip_df['time'] = pd.to_datetime(cip_df['time']).dt.time
events_df = cip_df.groupby('id').first().drop(
columns=['tag_status', 'tag']).reset_index()
tags_df = pd.DataFrame({
'id': cip_df['id'],
'tag': cip_df['tag'],
'verified': cip_df['tag_status'] == 1,
'removed': cip_df['tag_status'] == 2
})
# Ignore verified and remove 'removed' tags
tags_df = tags_df[~tags_df['removed']]
tags_df.drop(columns=['verified', 'removed'], inplace=True)
return events_df, tags_df
def calculate_top_tags(tags_df, n_tags, use_cip_tags=True):
"""Calculate top tags from tags dataset
Args:
tags_df: Dataset to extract top tags from
n_tags: Number of topmost tags to get if generating
use_cip_tags: Use pre-defined tags from CiP (ignores `n_tags`)
Returns:
List of topmost tags
"""
tag_counts = tags_df['tag'].value_counts()
if use_cip_tags:
# Not all CiP tags are necessarily present in the dataset
# and not necessarily in sufficient amounts
present_tags = set(tag_counts[tag_counts > 5].index)
return list(filter(lambda t: t in present_tags, CIP_TAGS))
else:
return tag_counts.index[:n_tags]
def tags_to_matrix(events_df, tags_df, top_tags):
"""Converts tags to feature matrix
Args:
events_df: Events dataset
tags_df: Tags dataset
top_tags: Tags to include
Returns:
Feature matrix for tags
"""
# Combine tags into lists
tags = tags_df.groupby('id')['tag'].agg(lambda x: list(x)).reset_index()
# Handle events with no top tags
# TODO: Kludge, write nicer
missing_tags = pd.DataFrame({
'id': events_df[~events_df['id'].isin(tags['id'])]['id'].unique()
})
missing_tags['tag'] = [[] for _ in range(len(missing_tags))]
tags = pd.concat([tags, missing_tags])
# Align tags with events
aligned_tags = events_df.merge(tags, on='id')
# Convert aligned tags to matrix
mlb = MultiLabelBinarizer(classes=top_tags)
return mlb.fit_transform(aligned_tags['tag'])
def matrix_to_tags(tags, top_tags):
top_array = np.array(top_tags)
joined_tags = []
for row in tags:
joined_tags.append(reduce(lambda a, b: a + "," + b, top_array[row > 0]))
return np.array(joined_tags)
def load_datasets(path, drop_missing=True, n_tags=72,
test_size=0.2, random_state=42):
"""Load and split dataset from raw CiP data.
Args:
path: Path to raw CiP dataset
drop_missing: Drop events with no description or title
n_tags: Number of top tags to use (ignored)
test_size: Fraction of events to include in test set
random_state: Random state for the split
Returns:
(events_train, tags_train, events_test, tags_test, top_tags,
tags_train_stats)
"""
events_df, tags_df = load_raw_normalized_dataset(path,
drop_missing=drop_missing)
top_tags = calculate_top_tags(tags_df, n_tags=n_tags)
# Only keep top tags
tags_df = tags_df[tags_df['tag'].isin(top_tags)]
tag_matrix = tags_to_matrix(events_df, tags_df, top_tags)
# Split data into public training set and private test set
stratifier = IterativeStratification(
n_splits=2, order=2,
sample_distribution_per_fold=[test_size, 1.0 - test_size],
random_state=random_state)
train_indices, test_indices = next(stratifier.split(events_df, tag_matrix))
events_train, tags_train = events_df.iloc[train_indices], \
tag_matrix[train_indices, :]
events_test, tags_test = events_df.iloc[test_indices], \
tag_matrix[test_indices, :]
tags_train_stats = pd.DataFrame({
'tag': top_tags,
'count': tags_train.sum(axis=0)
}).sort_values('count', ascending=False)
return (events_train, tags_train, events_test, tags_test, top_tags,
tags_train_stats)
def extract_corpus(events_df):
"""Extract text corpus from event descriptions.
Args:
events_df: Event dataset
Returns:
List of event descriptions as raw text
"""
from tagger._preprocessing.html import HTMLToText
from tagger._preprocessing.characterset import CharacterSet
from tagger._preprocessing.lowercase import Lowercase
from sklearn.pipeline import Pipeline
cleaning_pipeline = Pipeline([
('html', HTMLToText()),
('cset', CharacterSet(punctuation=False)),
('lcase', Lowercase())
])
return list(cleaning_pipeline.fit_transform(events_df['description']))
def fasttext_wordvectors(corpus_path, model_path):
"""Compute word vectors using FastText.
Args:
corpus_path: Path to corpus
model_path: Path for storing FastText model
Returns:
FastText model
"""
model = fasttext.train_unsupervised(corpus_path)
model.save_model(model_path)
return model
def save_corpus(events_df, path):
"""Extract and store corpus for events.
Args:
events_df: Events dataset
path: Path for storing corpus
"""
corpus = extract_corpus(events_df)
with open(path, 'w') as f:
for doc in corpus:
f.write(doc + '\n')
if __name__ == '__main__':
# Generate static datasets and wordvectors for local dev
import os
print("Current working directory:", os.getcwd())
# Compute word vectors
events_df, tags_df = load_raw_normalized_dataset(
"../../../data/raw/citypolarna_public_events_out.csv",
drop_missing=True)
CORPUS_PATH = "../../../data/corpus.txt"
MODEL_PATH = "../../../data/wordvectors.bin"
save_corpus(events_df, CORPUS_PATH)
model = fasttext_wordvectors(CORPUS_PATH, MODEL_PATH)
# Split datasets
events_train, tags_train, events_test, tags_test, top_tags, tags_train_stats = load_datasets(
"../../../data/raw/citypolarna_public_events_out.csv"
)
print(f"Number of train events: {len(events_train)}")
print(f"Number of test events: {len(events_test)}")
# TODO: Proper path handling
DATA_PATH = "../../../data/"
events_train.to_csv(DATA_PATH + "events_train.csv", index=False)
events_test.to_csv(DATA_PATH + "events_test.csv", index=False)
# A kludge, but convenient — pandas can load from URL:s
pd.DataFrame(tags_train).to_csv(DATA_PATH + "tags_train.csv", index=False)
pd.DataFrame(tags_test).to_csv(DATA_PATH + "tags_test.csv", index=False)
pd.DataFrame({'tag': top_tags}).to_csv(DATA_PATH + "top_tags.csv",
index=False)
tags_train_stats.to_csv(DATA_PATH + "tags_train_stats.csv", index=False)
|
[
"pandas.DataFrame",
"pandas.read_csv",
"fasttext.train_unsupervised",
"os.getcwd",
"sklearn.preprocessing.MultiLabelBinarizer",
"tagger._preprocessing.characterset.CharacterSet",
"tagger._preprocessing.html.HTMLToText",
"skmultilearn.model_selection.IterativeStratification",
"numpy.array",
"pandas.to_datetime",
"functools.reduce",
"tagger._preprocessing.lowercase.Lowercase",
"pandas.concat"
] |
[((1601, 1738), 'pandas.read_csv', 'pd.read_csv', (['path'], {'header': 'None', 'names': "['id', 'weekday', 'time', 'title', 'description', 'tag_status', 'tag']", 'na_values': "['-01:00:00']"}), "(path, header=None, names=['id', 'weekday', 'time', 'title',\n 'description', 'tag_status', 'tag'], na_values=['-01:00:00'])\n", (1612, 1738), True, 'import pandas as pd\n'), ((2189, 2327), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': cip_df['id'], 'tag': cip_df['tag'], 'verified': cip_df['tag_status'] ==\n 1, 'removed': cip_df['tag_status'] == 2}"], {}), "({'id': cip_df['id'], 'tag': cip_df['tag'], 'verified': cip_df[\n 'tag_status'] == 1, 'removed': cip_df['tag_status'] == 2})\n", (2201, 2327), True, 'import pandas as pd\n'), ((3884, 3915), 'pandas.concat', 'pd.concat', (['[tags, missing_tags]'], {}), '([tags, missing_tags])\n', (3893, 3915), True, 'import pandas as pd\n'), ((4044, 4081), 'sklearn.preprocessing.MultiLabelBinarizer', 'MultiLabelBinarizer', ([], {'classes': 'top_tags'}), '(classes=top_tags)\n', (4063, 4081), False, 'from sklearn.preprocessing import MultiLabelBinarizer\n'), ((4186, 4204), 'numpy.array', 'np.array', (['top_tags'], {}), '(top_tags)\n', (4194, 4204), True, 'import numpy as np\n'), ((4339, 4360), 'numpy.array', 'np.array', (['joined_tags'], {}), '(joined_tags)\n', (4347, 4360), True, 'import numpy as np\n'), ((5332, 5467), 'skmultilearn.model_selection.IterativeStratification', 'IterativeStratification', ([], {'n_splits': '(2)', 'order': '(2)', 'sample_distribution_per_fold': '[test_size, 1.0 - test_size]', 'random_state': 'random_state'}), '(n_splits=2, order=2, sample_distribution_per_fold=[\n test_size, 1.0 - test_size], random_state=random_state)\n', (5355, 5467), False, 'from skmultilearn.model_selection import IterativeStratification, iterative_train_test_split\n'), ((6963, 7003), 'fasttext.train_unsupervised', 'fasttext.train_unsupervised', (['corpus_path'], {}), '(corpus_path)\n', (6990, 7003), False, 'import fasttext\n'), ((7499, 7510), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7508, 7510), False, 'import os\n'), ((2031, 2061), 'pandas.to_datetime', 'pd.to_datetime', (["cip_df['time']"], {}), "(cip_df['time'])\n", (2045, 2061), True, 'import pandas as pd\n'), ((4274, 4326), 'functools.reduce', 'reduce', (["(lambda a, b: a + ',' + b)", 'top_array[row > 0]'], {}), "(lambda a, b: a + ',' + b, top_array[row > 0])\n", (4280, 4326), False, 'from functools import reduce\n'), ((8447, 8471), 'pandas.DataFrame', 'pd.DataFrame', (['tags_train'], {}), '(tags_train)\n', (8459, 8471), True, 'import pandas as pd\n'), ((8526, 8549), 'pandas.DataFrame', 'pd.DataFrame', (['tags_test'], {}), '(tags_test)\n', (8538, 8549), True, 'import pandas as pd\n'), ((8604, 8635), 'pandas.DataFrame', 'pd.DataFrame', (["{'tag': top_tags}"], {}), "({'tag': top_tags})\n", (8616, 8635), True, 'import pandas as pd\n'), ((6531, 6543), 'tagger._preprocessing.html.HTMLToText', 'HTMLToText', ([], {}), '()\n', (6541, 6543), False, 'from tagger._preprocessing.html import HTMLToText\n'), ((6563, 6594), 'tagger._preprocessing.characterset.CharacterSet', 'CharacterSet', ([], {'punctuation': '(False)'}), '(punctuation=False)\n', (6575, 6594), False, 'from tagger._preprocessing.characterset import CharacterSet\n'), ((6615, 6626), 'tagger._preprocessing.lowercase.Lowercase', 'Lowercase', ([], {}), '()\n', (6624, 6626), False, 'from tagger._preprocessing.lowercase import Lowercase\n')]
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
class MessageSubscriber:
def __init__(
self,
node_name,
topic_name
):
rospy.init_node(node_name)
self._topic_name = topic_name
self._subscriber = rospy.Subscriber(
self._topic_name,
String,
callback=self._callback,
queue_size=1)
def _callback(self, msg):
rospy.loginfo("I heard: {}".format(msg.data))
if __name__ == "__main__":
message_subscriber = MessageSubscriber("message_subscriber", "example_messaging/messages")
rospy.spin()
|
[
"rospy.spin",
"rospy.Subscriber",
"rospy.init_node"
] |
[((631, 643), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (641, 643), False, 'import rospy\n'), ((194, 220), 'rospy.init_node', 'rospy.init_node', (['node_name'], {}), '(node_name)\n', (209, 220), False, 'import rospy\n'), ((287, 372), 'rospy.Subscriber', 'rospy.Subscriber', (['self._topic_name', 'String'], {'callback': 'self._callback', 'queue_size': '(1)'}), '(self._topic_name, String, callback=self._callback,\n queue_size=1)\n', (303, 372), False, 'import rospy\n')]
|
import gym
import rlkit.torch.pytorch_util as ptu
from rlkit.data_management.obs_dict_replay_buffer import ObsDictRelabelingBuffer, WeightedObsDictRelabelingBuffer
from rlkit.launchers.launcher_util import setup_logger
from rlkit.samplers.data_collector import GoalConditionedPathCollector
from rlkit.torch.her.her import HERTrainer
from rlkit.torch.networks import ConcatMlp
from rlkit.torch.sac.policies import MakeDeterministic, TanhGaussianPolicy
from rlkit.torch.sac.sac import SACTrainer
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
from robosuite.wrappers import Wrapper, GymWrapper
import robosuite as suite
from robosuite import load_controller_config
import numpy as np
class GoalMountainCar(gym.Wrapper):
def reset(self, **kwargs):
state = self.env.reset(**kwargs)
ag = np.array(self.env.state)
g = np.array([self.env.goal_position, self.env.goal_velocity])
state = {'observation': state, 'achieved_goal': ag, 'desired_goal': g}
return state
def compute_reward(self, achieved_goal, desired_goal, info):
shape = False
dense = 100*((math.sin(3*achieved_goal[0]) * 0.0025 + 0.5 * achieved_goal[1] * achieved_goal[1]) - (math.sin(3*desired_goal[0]) * 0.0025 + 0.5 * desired_goal[1] * desired_goal[1]))
if achieved_goal[0] != desired_goal[0]:
return -1 if not shape else dense
else:
return 0 if achieved_goal[0] >= desired_goal[0] else (-1 if not shape else dense)
def step(self, action):
state, _, done, info = super().step(action)
ag = np.array(self.env.state)
g = np.array([self.env.goal_position, self.env.goal_velocity])
reward = self.compute_reward(ag, g, info)
state = {'observation': state, 'achieved_goal': ag, 'desired_goal': g}
info['is_success'] = reward==0
return state, reward, done, info
class GoalMountainCarContinuous(gym.Wrapper):
def __init__(self, env):
super().__init__(env=env)
env = env.env
print(env)
self.observation_space = gym.spaces.Dict({"observation": env.observation_space, "achieved_goal": env.observation_space, "desired_goal":env.observation_space})
self.action_space = env.action_space
# Default goal_Velocity is 0 - any speed will do (>=)
self.goal = np.array([env.goal_position, 0])
def reset(self, **kwargs):
state = self.env.reset(**kwargs)
ag = np.array(state)
g = self.goal
state = {'observation': state, 'achieved_goal': ag, 'desired_goal': g}
return state
def compute_reward(self, achieved_goal, desired_goal, info):
return 100 if achieved_goal[1] >= desired_goal[1] and achieved_goal[0] >= desired_goal[0] else -1
def step(self, action):
state, _, done, info = super().step(action)
ag = np.array(state)
g = self.goal
reward = self.compute_reward(ag, g, None)
state = {'observation': state, 'achieved_goal': ag, 'desired_goal': g}
info['is_success'] = int(ag[1] >= g[1] and ag[0] >= g[0])
return state, reward, done, info
class DoorWrapper(Wrapper):
"""
Initializes the Gym wrapper. Mimics many of the required functionalities of the Wrapper class
found in the gym.core module
Args:
env (MujocoEnv): The environment to wrap.
keys (None or list of str): If provided, each observation will
consist of concatenated keys from the wrapped environment's
observation dictionary. Defaults to robot-state and object-state.
Raises:
AssertionError: [Object observations must be enabled if no keys]
"""
def __init__(self, env, keys=None):
# Run super method
super().__init__(env=env)
# Create name for gym
robots = "".join([type(robot.robot_model).__name__ for robot in self.env.robots])
self.name = robots + "_" + type(self.env).__name__
# Get reward range
self.reward_range = (0, self.env.reward_scale)
if keys is None:
assert self.env.use_object_obs, "Object observations need to be enabled."
keys = ["object-state"]
# Iterate over all robots to add to state
for idx in range(len(self.env.robots)):
keys += ["robot{}_robot-state".format(idx)]
self.keys = keys
# Gym specific attributes
self.env.spec = None
self.metadata = None
self.goal = np.array([.3])
# set up observation and action spaces
flat_ob = self._flatten_obs(self.env.reset(), verbose=True)
self.obs_dim = flat_ob.size
high = np.inf * np.ones(self.obs_dim)
low = -high
self.observation_space = gym.spaces.Dict({"observation": gym.spaces.Box(low=low, high=high), "achieved_goal": gym.spaces.Box(low=np.zeros(1), high=np.ones(1), shape=(1,)), "desired_goal": gym.spaces.Box(low=np.zeros(1), high=np.ones(1), shape=(1,))})
low, high = self.env.action_spec
self.action_space = gym.spaces.Box(low=low, high=high)
def _flatten_obs(self, obs_dict, verbose=False):
"""
Filters keys of interest out and concatenate the information.
Args:
obs_dict (OrderedDict): ordered dictionary of observations
verbose (bool): Whether to print out to console as observation keys are processed
Returns:
np.array: observations flattened into a 1d array
"""
ob_lst = []
for key in obs_dict:
if key in self.keys:
if verbose:
print("adding key: {}".format(key))
ob_lst.append(obs_dict[key])
return np.concatenate(ob_lst)
def reset(self):
"""
Extends env reset method to return flattened observation instead of normal OrderedDict.
Returns:
np.array: Flattened environment observation space after reset occurs
"""
ob_dict = self.env.reset()
state = self._flatten_obs(ob_dict)
ag = np.array([self.env.sim.data.qpos[self.env.hinge_qpos_addr]])
g = self.goal
return {'observation': state, 'achieved_goal': ag, 'desired_goal': g}
def step(self, action):
"""
Extends vanilla step() function call to return flattened observation instead of normal OrderedDict.
Args:
action (np.array): Action to take in environment
Returns:
4-tuple:
- (np.array) flattened observations from the environment
- (float) reward from the environment
- (bool) whether the current episode is completed or not
- (dict) misc information
"""
ob_dict, reward, done, info = self.env.step(action)
state = self._flatten_obs(ob_dict)
ag = np.array([self.env.sim.data.qpos[self.env.hinge_qpos_addr]])
g = self.goal
ob_dict = {'observation': state, 'achieved_goal': ag, 'desired_goal': g}
info['is_success'] = int(ag[0] > g[0])
return ob_dict, reward, done, info
def seed(self, seed=None):
"""
Utility function to set numpy seed
Args:
seed (None or int): If specified, numpy seed to set
Raises:
TypeError: [Seed must be integer]
"""
# Seed the generator
if seed is not None:
try:
np.random.seed(seed)
except:
TypeError("Seed must be an integer type!")
def compute_reward(self, achieved_goal, desired_goal, info):
return 1 if achieved_goal[0] > desired_goal[0] else 0
def make_env():
controller = load_controller_config(default_controller="OSC_POSE")
env = GymWrapper(suite.make(
"PickPlaceCan",
robots="Panda", # use Sawyer robot
use_camera_obs=False, # do not use pixel observations
has_offscreen_renderer=False, # not needed since not using pixel obs
has_renderer=False, # make sure we can render to the screen
reward_shaping=True, # use dense rewards
reward_scale=1.0, # scale max 1 per timestep
control_freq=20, # control should happen fast enough so that simulation looks smooth
horizon=500,
ignore_done=True,
hard_reset=False,
controller_configs=controller
))
return env
# GoalMountainCarContinuous(gym.make("MountainCarContinuous-v0"))
# GoalMountainCar(gym.make(MountainCar-v0))
def experiment(variant):
# unwrap the TimeLimitEnv wrapper since we manually termiante after 50 steps
# eval_env = gym.make('FetchPickAndPlace-v1').env
# expl_env = gym.make('FetchPickAndPlace-v1').env
eval_env = make_env()
expl_env = make_env()
print(eval_env.observation_space)
observation_key = 'observation'
desired_goal_key = 'desired_goal'
# achieved_goal_key = desired_goal_key.replace("desired", "achieved")
# replay_buffer = ObsDictRelabelingBuffer(
# env=eval_env,
# observation_key=observation_key,
# desired_goal_key=desired_goal_key,
# achieved_goal_key=achieved_goal_key,
# **variant['replay_buffer_kwargs']
# )
obs_dim = eval_env.observation_space.low.size
action_dim = eval_env.action_space.low.size
# goal_dim = eval_env.observation_space.spaces['desired_goal'].low.size
print(obs_dim)
print(action_dim)
# print(goal_dim)
qf1 = ConcatMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
qf2 = ConcatMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
target_qf1 = ConcatMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
target_qf2 = ConcatMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
policy = TanhGaussianPolicy(
obs_dim=obs_dim + goal_dim,
action_dim=action_dim,
**variant['policy_kwargs']
)
eval_policy = MakeDeterministic(policy)
trainer = SACTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
**variant['sac_trainer_kwargs']
)
trainer = HERTrainer(trainer, use_per=False)
eval_path_collector = GoalConditionedPathCollector(
eval_env,
eval_policy,
observation_key=observation_key,
desired_goal_key=desired_goal_key,
)
expl_path_collector = GoalConditionedPathCollector(
expl_env,
policy,
observation_key=observation_key,
desired_goal_key=desired_goal_key,
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algo_kwargs']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
variant = dict(
algorithm='HER-SAC',
version='normal',
algo_kwargs=dict(
batch_size=512,
num_epochs=500,
num_eval_steps_per_epoch=5000,
num_expl_steps_per_train_loop=500,
num_trains_per_train_loop=500,
min_num_steps_before_training=1000,
max_path_length=500,
),
sac_trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
use_automatic_entropy_tuning=True,
),
replay_buffer_kwargs=dict(
max_size=int(50000),
fraction_goals_rollout_goals=0.2, # equal to k = 4 in HER paper
fraction_goals_env_goals=0,
),
qf_kwargs=dict(
hidden_sizes=[256, 256],
),
policy_kwargs=dict(
hidden_sizes=[256, 256],
),
)
setup_logger('her-sac-door-experiment', variant=variant)
experiment(variant)
|
[
"rlkit.torch.sac.sac.SACTrainer",
"rlkit.torch.sac.policies.MakeDeterministic",
"rlkit.torch.torch_rl_algorithm.TorchBatchRLAlgorithm",
"numpy.concatenate",
"rlkit.samplers.data_collector.GoalConditionedPathCollector",
"robosuite.make",
"numpy.random.seed",
"rlkit.torch.her.her.HERTrainer",
"numpy.zeros",
"numpy.ones",
"rlkit.torch.networks.ConcatMlp",
"numpy.array",
"rlkit.launchers.launcher_util.setup_logger",
"rlkit.torch.sac.policies.TanhGaussianPolicy",
"gym.spaces.Box",
"robosuite.load_controller_config",
"gym.spaces.Dict"
] |
[((7709, 7762), 'robosuite.load_controller_config', 'load_controller_config', ([], {'default_controller': '"""OSC_POSE"""'}), "(default_controller='OSC_POSE')\n", (7731, 7762), False, 'from robosuite import load_controller_config\n'), ((9648, 9745), 'rlkit.torch.networks.ConcatMlp', 'ConcatMlp', ([], {'input_size': '(obs_dim + action_dim + goal_dim)', 'output_size': '(1)'}), "(input_size=obs_dim + action_dim + goal_dim, output_size=1, **\n variant['qf_kwargs'])\n", (9657, 9745), False, 'from rlkit.torch.networks import ConcatMlp\n'), ((9781, 9878), 'rlkit.torch.networks.ConcatMlp', 'ConcatMlp', ([], {'input_size': '(obs_dim + action_dim + goal_dim)', 'output_size': '(1)'}), "(input_size=obs_dim + action_dim + goal_dim, output_size=1, **\n variant['qf_kwargs'])\n", (9790, 9878), False, 'from rlkit.torch.networks import ConcatMlp\n'), ((9921, 10018), 'rlkit.torch.networks.ConcatMlp', 'ConcatMlp', ([], {'input_size': '(obs_dim + action_dim + goal_dim)', 'output_size': '(1)'}), "(input_size=obs_dim + action_dim + goal_dim, output_size=1, **\n variant['qf_kwargs'])\n", (9930, 10018), False, 'from rlkit.torch.networks import ConcatMlp\n'), ((10061, 10158), 'rlkit.torch.networks.ConcatMlp', 'ConcatMlp', ([], {'input_size': '(obs_dim + action_dim + goal_dim)', 'output_size': '(1)'}), "(input_size=obs_dim + action_dim + goal_dim, output_size=1, **\n variant['qf_kwargs'])\n", (10070, 10158), False, 'from rlkit.torch.networks import ConcatMlp\n'), ((10197, 10299), 'rlkit.torch.sac.policies.TanhGaussianPolicy', 'TanhGaussianPolicy', ([], {'obs_dim': '(obs_dim + goal_dim)', 'action_dim': 'action_dim'}), "(obs_dim=obs_dim + goal_dim, action_dim=action_dim, **\n variant['policy_kwargs'])\n", (10215, 10299), False, 'from rlkit.torch.sac.policies import MakeDeterministic, TanhGaussianPolicy\n'), ((10343, 10368), 'rlkit.torch.sac.policies.MakeDeterministic', 'MakeDeterministic', (['policy'], {}), '(policy)\n', (10360, 10368), False, 'from rlkit.torch.sac.policies import MakeDeterministic, TanhGaussianPolicy\n'), ((10383, 10524), 'rlkit.torch.sac.sac.SACTrainer', 'SACTrainer', ([], {'env': 'eval_env', 'policy': 'policy', 'qf1': 'qf1', 'qf2': 'qf2', 'target_qf1': 'target_qf1', 'target_qf2': 'target_qf2'}), "(env=eval_env, policy=policy, qf1=qf1, qf2=qf2, target_qf1=\n target_qf1, target_qf2=target_qf2, **variant['sac_trainer_kwargs'])\n", (10393, 10524), False, 'from rlkit.torch.sac.sac import SACTrainer\n'), ((10596, 10630), 'rlkit.torch.her.her.HERTrainer', 'HERTrainer', (['trainer'], {'use_per': '(False)'}), '(trainer, use_per=False)\n', (10606, 10630), False, 'from rlkit.torch.her.her import HERTrainer\n'), ((10657, 10781), 'rlkit.samplers.data_collector.GoalConditionedPathCollector', 'GoalConditionedPathCollector', (['eval_env', 'eval_policy'], {'observation_key': 'observation_key', 'desired_goal_key': 'desired_goal_key'}), '(eval_env, eval_policy, observation_key=\n observation_key, desired_goal_key=desired_goal_key)\n', (10685, 10781), False, 'from rlkit.samplers.data_collector import GoalConditionedPathCollector\n'), ((10842, 10961), 'rlkit.samplers.data_collector.GoalConditionedPathCollector', 'GoalConditionedPathCollector', (['expl_env', 'policy'], {'observation_key': 'observation_key', 'desired_goal_key': 'desired_goal_key'}), '(expl_env, policy, observation_key=\n observation_key, desired_goal_key=desired_goal_key)\n', (10870, 10961), False, 'from rlkit.samplers.data_collector import GoalConditionedPathCollector\n'), ((11012, 11264), 'rlkit.torch.torch_rl_algorithm.TorchBatchRLAlgorithm', 'TorchBatchRLAlgorithm', ([], {'trainer': 'trainer', 'exploration_env': 'expl_env', 'evaluation_env': 'eval_env', 'exploration_data_collector': 'expl_path_collector', 'evaluation_data_collector': 'eval_path_collector', 'replay_buffer': 'replay_buffer'}), "(trainer=trainer, exploration_env=expl_env,\n evaluation_env=eval_env, exploration_data_collector=expl_path_collector,\n evaluation_data_collector=eval_path_collector, replay_buffer=\n replay_buffer, **variant['algo_kwargs'])\n", (11033, 11264), False, 'from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm\n'), ((12398, 12454), 'rlkit.launchers.launcher_util.setup_logger', 'setup_logger', (['"""her-sac-door-experiment"""'], {'variant': 'variant'}), "('her-sac-door-experiment', variant=variant)\n", (12410, 12454), False, 'from rlkit.launchers.launcher_util import setup_logger\n'), ((825, 849), 'numpy.array', 'np.array', (['self.env.state'], {}), '(self.env.state)\n', (833, 849), True, 'import numpy as np\n'), ((862, 920), 'numpy.array', 'np.array', (['[self.env.goal_position, self.env.goal_velocity]'], {}), '([self.env.goal_position, self.env.goal_velocity])\n', (870, 920), True, 'import numpy as np\n'), ((1595, 1619), 'numpy.array', 'np.array', (['self.env.state'], {}), '(self.env.state)\n', (1603, 1619), True, 'import numpy as np\n'), ((1632, 1690), 'numpy.array', 'np.array', (['[self.env.goal_position, self.env.goal_velocity]'], {}), '([self.env.goal_position, self.env.goal_velocity])\n', (1640, 1690), True, 'import numpy as np\n'), ((2077, 2216), 'gym.spaces.Dict', 'gym.spaces.Dict', (["{'observation': env.observation_space, 'achieved_goal': env.\n observation_space, 'desired_goal': env.observation_space}"], {}), "({'observation': env.observation_space, 'achieved_goal': env\n .observation_space, 'desired_goal': env.observation_space})\n", (2092, 2216), False, 'import gym\n'), ((2332, 2364), 'numpy.array', 'np.array', (['[env.goal_position, 0]'], {}), '([env.goal_position, 0])\n', (2340, 2364), True, 'import numpy as np\n'), ((2452, 2467), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (2460, 2467), True, 'import numpy as np\n'), ((2857, 2872), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (2865, 2872), True, 'import numpy as np\n'), ((4489, 4504), 'numpy.array', 'np.array', (['[0.3]'], {}), '([0.3])\n', (4497, 4504), True, 'import numpy as np\n'), ((5050, 5084), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': 'low', 'high': 'high'}), '(low=low, high=high)\n', (5064, 5084), False, 'import gym\n'), ((5716, 5738), 'numpy.concatenate', 'np.concatenate', (['ob_lst'], {}), '(ob_lst)\n', (5730, 5738), True, 'import numpy as np\n'), ((6070, 6130), 'numpy.array', 'np.array', (['[self.env.sim.data.qpos[self.env.hinge_qpos_addr]]'], {}), '([self.env.sim.data.qpos[self.env.hinge_qpos_addr]])\n', (6078, 6130), True, 'import numpy as np\n'), ((6863, 6923), 'numpy.array', 'np.array', (['[self.env.sim.data.qpos[self.env.hinge_qpos_addr]]'], {}), '([self.env.sim.data.qpos[self.env.hinge_qpos_addr]])\n', (6871, 6923), True, 'import numpy as np\n'), ((7784, 8046), 'robosuite.make', 'suite.make', (['"""PickPlaceCan"""'], {'robots': '"""Panda"""', 'use_camera_obs': '(False)', 'has_offscreen_renderer': '(False)', 'has_renderer': '(False)', 'reward_shaping': '(True)', 'reward_scale': '(1.0)', 'control_freq': '(20)', 'horizon': '(500)', 'ignore_done': '(True)', 'hard_reset': '(False)', 'controller_configs': 'controller'}), "('PickPlaceCan', robots='Panda', use_camera_obs=False,\n has_offscreen_renderer=False, has_renderer=False, reward_shaping=True,\n reward_scale=1.0, control_freq=20, horizon=500, ignore_done=True,\n hard_reset=False, controller_configs=controller)\n", (7794, 8046), True, 'import robosuite as suite\n'), ((4680, 4701), 'numpy.ones', 'np.ones', (['self.obs_dim'], {}), '(self.obs_dim)\n', (4687, 4701), True, 'import numpy as np\n'), ((4787, 4821), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': 'low', 'high': 'high'}), '(low=low, high=high)\n', (4801, 4821), False, 'import gym\n'), ((7447, 7467), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (7461, 7467), True, 'import numpy as np\n'), ((4859, 4870), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (4867, 4870), True, 'import numpy as np\n'), ((4877, 4887), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (4884, 4887), True, 'import numpy as np\n'), ((4937, 4948), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (4945, 4948), True, 'import numpy as np\n'), ((4955, 4965), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (4962, 4965), True, 'import numpy as np\n')]
|
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import tensorflow as tf
from dual_encoder import keras_layers
l2_normalize_fn = lambda x: tf.keras.backend.l2_normalize(x, axis=-1)
class KerasLayersTest(absltest.TestCase):
def test_masked_average_3d(self):
masked_average_layer = keras_layers.MaskedAverage(1)
inputs = tf.constant([
[[0.5, 0.3], [0.4, 0.1], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]],
[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]],
[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]],
])
mask = tf.constant([[True, True, True],
[False, False, True],
[True, False, False],
[False, False, False]])
output_average = masked_average_layer.call(inputs, mask=mask)
output_mask = masked_average_layer.compute_mask(inputs, mask=mask)
expected_average = tf.constant([
[1.3 / 3, 0.5 / 3],
[0.4, 0.1],
[0.9, 0.4],
[0.0, 0.0]
])
expected_mask = None
tf.debugging.assert_near(expected_average, output_average)
self.assertEqual(expected_mask, output_mask)
def test_masked_average_4d(self):
masked_average_layer = keras_layers.MaskedAverage(2)
inputs = tf.constant([
[[[0.5, 0.3], [0.4, 0.1], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]]],
[[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]]],
[[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]]],
[[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]]],
])
mask = tf.constant([[[True, True, True], [True, False, True]],
[[False, False, True], [False, False, False]],
[[True, False, False], [True, True, True]],
[[False, False, False], [True, False, False]]])
output_average = masked_average_layer.call(inputs, mask=mask)
output_mask = masked_average_layer.compute_mask(inputs, mask=mask)
expected_average = tf.constant([
[[1.3 / 3, 0.5 / 3], [0.5, 0.45]],
[[0.4, 0.1], [0.0, 0.0]],
[[0.9, 0.4], [0.5, 1.3 / 3]],
[[0.0, 0.0], [0.6, 0.8]],
])
expected_mask = tf.constant([[True, True],
[True, False],
[True, True],
[False, True]])
tf.debugging.assert_near(expected_average, output_average)
tf.debugging.assert_equal(expected_mask, output_mask)
def test_masked_average_raises_error(self):
masked_average_layer = keras_layers.MaskedAverage(1)
inputs = tf.constant([
[[0.5, 0.3], [0.4, 0.1], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]],
[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]],
])
mask = None
with self.assertRaises(ValueError):
masked_average_layer.call(inputs, mask=mask)
with self.assertRaises(ValueError):
masked_average_layer.compute_mask(inputs, mask=mask)
def test_masked_reshape(self):
masked_reshape_layer = keras_layers.MaskedReshape((4, 4, 2, 1), (4, 4, 2))
inputs = tf.constant([
[[1.0], [2.0], [0.5], [0.4], [0.4], [0.1], [0.0], [0.0]],
[[0.4], [0.1], [0.0], [0.0], [0.0], [0.0], [0.6], [0.8]],
[[0.9], [0.4], [0.5], [3.0], [0.9], [0.4], [0.5], [3.0]],
[[0.0], [0.0], [0.6], [0.8], [0.4], [0.1], [0.0], [0.0]],
])
mask = tf.constant(
[[True, False, True, True, True, False, False, False],
[True, False, True, True, True, True, False, True],
[False, True, True, False, True, True, True, True],
[False, True, True, True, True, False, False, True]])
output = masked_reshape_layer.call(inputs, mask=mask)
output_mask = masked_reshape_layer.compute_mask(inputs, mask=mask)
expected_output = tf.constant([
[[[1.0], [2.0]], [[0.5], [0.4]], [[0.4], [0.1]], [[0.0], [0.0]]],
[[[0.4], [0.1]], [[0.0], [0.0]], [[0.0], [0.0]], [[0.6], [0.8]]],
[[[0.9], [0.4]], [[0.5], [3.0]], [[0.9], [0.4]], [[0.5], [3.0]]],
[[[0.0], [0.0]], [[0.6], [0.8]], [[0.4], [0.1]], [[0.0], [0.0]]],
])
expected_mask = tf.constant(
[[[True, False], [True, True], [True, False], [False, False]],
[[True, False], [True, True], [True, True], [False, True]],
[[False, True], [True, False], [True, True], [True, True]],
[[False, True], [True, True], [True, False], [False, True]]])
tf.debugging.assert_near(expected_output, output)
tf.debugging.assert_equal(expected_mask, output_mask)
def test_masked_reshape_unknown_batch_size(self):
masked_reshape_layer = keras_layers.MaskedReshape((-1, 4, 2, 1), (-1, 4, 2))
inputs = tf.constant([
[[1.0], [2.0], [0.5], [0.4], [0.4], [0.1], [0.0], [0.0]],
[[0.4], [0.1], [0.0], [0.0], [0.0], [0.0], [0.6], [0.8]],
[[0.9], [0.4], [0.5], [3.0], [0.9], [0.4], [0.5], [3.0]],
[[0.0], [0.0], [0.6], [0.8], [0.4], [0.1], [0.0], [0.0]],
])
mask = tf.constant(
[[True, False, True, True, True, False, False, False],
[True, False, True, True, True, True, False, True],
[False, True, True, False, True, True, True, True],
[False, True, True, True, True, False, False, True]])
output = masked_reshape_layer.call(inputs, mask=mask)
output_mask = masked_reshape_layer.compute_mask(inputs, mask=mask)
expected_output = tf.constant([
[[[1.0], [2.0]], [[0.5], [0.4]], [[0.4], [0.1]], [[0.0], [0.0]]],
[[[0.4], [0.1]], [[0.0], [0.0]], [[0.0], [0.0]], [[0.6], [0.8]]],
[[[0.9], [0.4]], [[0.5], [3.0]], [[0.9], [0.4]], [[0.5], [3.0]]],
[[[0.0], [0.0]], [[0.6], [0.8]], [[0.4], [0.1]], [[0.0], [0.0]]],
])
expected_mask = tf.constant(
[[[True, False], [True, True], [True, False], [False, False]],
[[True, False], [True, True], [True, True], [False, True]],
[[False, True], [True, False], [True, True], [True, True]],
[[False, True], [True, True], [True, False], [False, True]]])
tf.debugging.assert_near(expected_output, output)
tf.debugging.assert_equal(expected_mask, output_mask)
def test_masked_reshape_raises_error(self):
masked_reshape_layer = keras_layers.MaskedReshape((-1, 4, 2, 1), (-1, 4, 2))
inputs = tf.constant([
[[1.0], [2.0], [0.5], [0.4], [0.4], [0.1], [0.0], [0.0]],
[[0.4], [0.1], [0.0], [0.0], [0.0], [0.0], [0.6], [0.8]],
[[0.9], [0.4], [0.5], [3.0], [0.9], [0.4], [0.5], [3.0]],
[[0.0], [0.0], [0.6], [0.8], [0.4], [0.1], [0.0], [0.0]],
])
mask = None
with self.assertRaises(ValueError):
masked_reshape_layer.call(inputs, mask=mask)
with self.assertRaises(ValueError):
masked_reshape_layer.compute_mask(inputs, mask=mask)
def test_embedding_spreadout_regularizer_dot_product(self):
weights = tf.constant(
[[1.0, 0.0, 0.0],
[2.0, 2.0, 2.0],
[0.1, 0.2, 0.3],
[0.3, 0.2, 0.1],
[0.0, 1.0, 0.0]])
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.1,
normalization_fn=None,
l2_regularization=0.0)
# Similarities without diagonal looks like:
# 0.0 2.0 0.1 0.3 0.0
# 2.0 0.0 1.2 1.2 2.0
# 0.1 1.2 0.0 0.1 0.2
# 0.3 1.2 0.1 0.0 0.2
# 0.0 2.0 0.2 0.2 0.0
loss = regularizer(weights)
# L2 norm of above similarities.
expected_loss = 0.47053161424
tf.debugging.assert_near(expected_loss, loss)
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.1,
normalization_fn=None,
l2_regularization=1.0)
l2_regularizer = tf.keras.regularizers.l2(1.0)
loss = regularizer(weights)
expected_loss = 0.47053161424 + l2_regularizer(weights)
tf.debugging.assert_near(expected_loss, loss)
def test_embedding_spreadout_regularizer_cosine_similarity(self):
weights = tf.constant(
[[1.0, 0.0, 0.0],
[2.0, 2.0, 2.0],
[0.1, 0.2, 0.3],
[0.3, 0.2, 0.1],
[0.0, 1.0, 0.0]])
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.1,
normalization_fn=l2_normalize_fn,
l2_regularization=0.0)
loss = regularizer(weights)
# L2 norm of above similarities.
expected_loss = 0.2890284
tf.debugging.assert_near(expected_loss, loss)
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.1,
normalization_fn=l2_normalize_fn,
l2_regularization=1.0)
l2_regularizer = tf.keras.regularizers.l2(1.0)
loss = regularizer(weights)
expected_loss = 0.2890284 + l2_regularizer(weights)
tf.debugging.assert_near(expected_loss, loss)
def test_embedding_spreadout_regularizer_no_spreadout(self):
weights = tf.constant(
[[1.0, 0.0, 0.0],
[2.0, 2.0, 2.0],
[0.1, 0.2, 0.3],
[0.3, 0.2, 0.1],
[0.0, 1.0, 0.0]])
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.0,
normalization_fn=None,
l2_regularization=0.0)
loss = regularizer(weights)
expected_loss = 0.0
tf.debugging.assert_near(expected_loss, loss)
# Test that L2 normalization behaves normally.
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.0,
normalization_fn=None,
l2_regularization=0.1)
l2_regularizer = tf.keras.regularizers.l2(0.1)
loss = regularizer(weights)
l2_loss = l2_regularizer(weights)
tf.debugging.assert_near(l2_loss, loss)
# Test that normalization_fn has no effect.
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.0,
normalization_fn=l2_normalize_fn,
l2_regularization=0.1)
l2_regularizer = tf.keras.regularizers.l2(0.1)
loss = regularizer(weights)
l2_loss = l2_regularizer(weights)
tf.debugging.assert_near(l2_loss, loss)
def test_embedding_spreadout_regularizer_get_config(self):
weights = tf.constant(
[[1.0, 0.0, 0.0],
[2.0, 2.0, 2.0],
[0.1, 0.2, 0.3],
[0.3, 0.2, 0.1],
[0.0, 1.0, 0.0]])
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.0,
normalization_fn=l2_normalize_fn,
l2_regularization=0.1)
config = regularizer.get_config()
expected_config = {
'spreadout_lambda': 0.0,
'normalization_fn': l2_normalize_fn,
'l2_regularization': 0.1
}
new_regularizer = (
keras_layers.EmbeddingSpreadoutRegularizer.from_config(config))
l2_regularizer = tf.keras.regularizers.l2(0.1)
loss = new_regularizer(weights)
l2_loss = l2_regularizer(weights)
self.assertEqual(config, expected_config)
tf.debugging.assert_near(l2_loss, loss)
if __name__ == '__main__':
absltest.main()
|
[
"absl.testing.absltest.main",
"dual_encoder.keras_layers.EmbeddingSpreadoutRegularizer.from_config",
"tensorflow.debugging.assert_equal",
"dual_encoder.keras_layers.MaskedReshape",
"tensorflow.constant",
"dual_encoder.keras_layers.MaskedAverage",
"tensorflow.keras.backend.l2_normalize",
"tensorflow.debugging.assert_near",
"dual_encoder.keras_layers.EmbeddingSpreadoutRegularizer",
"tensorflow.keras.regularizers.l2"
] |
[((704, 745), 'tensorflow.keras.backend.l2_normalize', 'tf.keras.backend.l2_normalize', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (733, 745), True, 'import tensorflow as tf\n'), ((11535, 11550), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (11548, 11550), False, 'from absl.testing import absltest\n'), ((854, 883), 'dual_encoder.keras_layers.MaskedAverage', 'keras_layers.MaskedAverage', (['(1)'], {}), '(1)\n', (880, 883), False, 'from dual_encoder import keras_layers\n'), ((898, 1072), 'tensorflow.constant', 'tf.constant', (['[[[0.5, 0.3], [0.4, 0.1], [0.4, 0.1]], [[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]],\n [[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]], [[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]]]'], {}), '([[[0.5, 0.3], [0.4, 0.1], [0.4, 0.1]], [[0.6, 0.8], [0.5, 0.4],\n [0.4, 0.1]], [[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]], [[0.9, 0.4], [0.4, \n 0.1], [0.4, 0.1]]])\n', (909, 1072), True, 'import tensorflow as tf\n'), ((1114, 1218), 'tensorflow.constant', 'tf.constant', (['[[True, True, True], [False, False, True], [True, False, False], [False, \n False, False]]'], {}), '([[True, True, True], [False, False, True], [True, False, False],\n [False, False, False]])\n', (1125, 1218), True, 'import tensorflow as tf\n'), ((1448, 1517), 'tensorflow.constant', 'tf.constant', (['[[1.3 / 3, 0.5 / 3], [0.4, 0.1], [0.9, 0.4], [0.0, 0.0]]'], {}), '([[1.3 / 3, 0.5 / 3], [0.4, 0.1], [0.9, 0.4], [0.0, 0.0]])\n', (1459, 1517), True, 'import tensorflow as tf\n'), ((1586, 1644), 'tensorflow.debugging.assert_near', 'tf.debugging.assert_near', (['expected_average', 'output_average'], {}), '(expected_average, output_average)\n', (1610, 1644), True, 'import tensorflow as tf\n'), ((1758, 1787), 'dual_encoder.keras_layers.MaskedAverage', 'keras_layers.MaskedAverage', (['(2)'], {}), '(2)\n', (1784, 1787), False, 'from dual_encoder import keras_layers\n'), ((1802, 2143), 'tensorflow.constant', 'tf.constant', (['[[[[0.5, 0.3], [0.4, 0.1], [0.4, 0.1]], [[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]\n ]], [[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]], [[0.6, 0.8], [0.5, 0.4], [\n 0.4, 0.1]]], [[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]], [[0.6, 0.8], [0.5, \n 0.4], [0.4, 0.1]]], [[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]], [[0.6, 0.8],\n [0.5, 0.4], [0.4, 0.1]]]]'], {}), '([[[[0.5, 0.3], [0.4, 0.1], [0.4, 0.1]], [[0.6, 0.8], [0.5, 0.4],\n [0.4, 0.1]]], [[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]], [[0.6, 0.8], [0.5,\n 0.4], [0.4, 0.1]]], [[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]], [[0.6, 0.8],\n [0.5, 0.4], [0.4, 0.1]]], [[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]], [[0.6,\n 0.8], [0.5, 0.4], [0.4, 0.1]]]])\n', (1813, 2143), True, 'import tensorflow as tf\n'), ((2214, 2418), 'tensorflow.constant', 'tf.constant', (['[[[True, True, True], [True, False, True]], [[False, False, True], [False, \n False, False]], [[True, False, False], [True, True, True]], [[False, \n False, False], [True, False, False]]]'], {}), '([[[True, True, True], [True, False, True]], [[False, False, \n True], [False, False, False]], [[True, False, False], [True, True, True\n ]], [[False, False, False], [True, False, False]]])\n', (2225, 2418), True, 'import tensorflow as tf\n'), ((2642, 2777), 'tensorflow.constant', 'tf.constant', (['[[[1.3 / 3, 0.5 / 3], [0.5, 0.45]], [[0.4, 0.1], [0.0, 0.0]], [[0.9, 0.4],\n [0.5, 1.3 / 3]], [[0.0, 0.0], [0.6, 0.8]]]'], {}), '([[[1.3 / 3, 0.5 / 3], [0.5, 0.45]], [[0.4, 0.1], [0.0, 0.0]], [\n [0.9, 0.4], [0.5, 1.3 / 3]], [[0.0, 0.0], [0.6, 0.8]]])\n', (2653, 2777), True, 'import tensorflow as tf\n'), ((2832, 2903), 'tensorflow.constant', 'tf.constant', (['[[True, True], [True, False], [True, True], [False, True]]'], {}), '([[True, True], [True, False], [True, True], [False, True]])\n', (2843, 2903), True, 'import tensorflow as tf\n'), ((3008, 3066), 'tensorflow.debugging.assert_near', 'tf.debugging.assert_near', (['expected_average', 'output_average'], {}), '(expected_average, output_average)\n', (3032, 3066), True, 'import tensorflow as tf\n'), ((3071, 3124), 'tensorflow.debugging.assert_equal', 'tf.debugging.assert_equal', (['expected_mask', 'output_mask'], {}), '(expected_mask, output_mask)\n', (3096, 3124), True, 'import tensorflow as tf\n'), ((3199, 3228), 'dual_encoder.keras_layers.MaskedAverage', 'keras_layers.MaskedAverage', (['(1)'], {}), '(1)\n', (3225, 3228), False, 'from dual_encoder import keras_layers\n'), ((3243, 3374), 'tensorflow.constant', 'tf.constant', (['[[[0.5, 0.3], [0.4, 0.1], [0.4, 0.1]], [[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]],\n [[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]]]'], {}), '([[[0.5, 0.3], [0.4, 0.1], [0.4, 0.1]], [[0.6, 0.8], [0.5, 0.4],\n [0.4, 0.1]], [[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]]])\n', (3254, 3374), True, 'import tensorflow as tf\n'), ((3671, 3722), 'dual_encoder.keras_layers.MaskedReshape', 'keras_layers.MaskedReshape', (['(4, 4, 2, 1)', '(4, 4, 2)'], {}), '((4, 4, 2, 1), (4, 4, 2))\n', (3697, 3722), False, 'from dual_encoder import keras_layers\n'), ((3737, 3995), 'tensorflow.constant', 'tf.constant', (['[[[1.0], [2.0], [0.5], [0.4], [0.4], [0.1], [0.0], [0.0]], [[0.4], [0.1], [\n 0.0], [0.0], [0.0], [0.0], [0.6], [0.8]], [[0.9], [0.4], [0.5], [3.0],\n [0.9], [0.4], [0.5], [3.0]], [[0.0], [0.0], [0.6], [0.8], [0.4], [0.1],\n [0.0], [0.0]]]'], {}), '([[[1.0], [2.0], [0.5], [0.4], [0.4], [0.1], [0.0], [0.0]], [[\n 0.4], [0.1], [0.0], [0.0], [0.0], [0.0], [0.6], [0.8]], [[0.9], [0.4],\n [0.5], [3.0], [0.9], [0.4], [0.5], [3.0]], [[0.0], [0.0], [0.6], [0.8],\n [0.4], [0.1], [0.0], [0.0]]])\n', (3748, 3995), True, 'import tensorflow as tf\n'), ((4033, 4271), 'tensorflow.constant', 'tf.constant', (['[[True, False, True, True, True, False, False, False], [True, False, True, \n True, True, True, False, True], [False, True, True, False, True, True, \n True, True], [False, True, True, True, True, False, False, True]]'], {}), '([[True, False, True, True, True, False, False, False], [True, \n False, True, True, True, True, False, True], [False, True, True, False,\n True, True, True, True], [False, True, True, True, True, False, False, \n True]])\n', (4044, 4271), True, 'import tensorflow as tf\n'), ((4447, 4739), 'tensorflow.constant', 'tf.constant', (['[[[[1.0], [2.0]], [[0.5], [0.4]], [[0.4], [0.1]], [[0.0], [0.0]]], [[[0.4],\n [0.1]], [[0.0], [0.0]], [[0.0], [0.0]], [[0.6], [0.8]]], [[[0.9], [0.4]\n ], [[0.5], [3.0]], [[0.9], [0.4]], [[0.5], [3.0]]], [[[0.0], [0.0]], [[\n 0.6], [0.8]], [[0.4], [0.1]], [[0.0], [0.0]]]]'], {}), '([[[[1.0], [2.0]], [[0.5], [0.4]], [[0.4], [0.1]], [[0.0], [0.0]\n ]], [[[0.4], [0.1]], [[0.0], [0.0]], [[0.0], [0.0]], [[0.6], [0.8]]], [\n [[0.9], [0.4]], [[0.5], [3.0]], [[0.9], [0.4]], [[0.5], [3.0]]], [[[0.0\n ], [0.0]], [[0.6], [0.8]], [[0.4], [0.1]], [[0.0], [0.0]]]])\n', (4458, 4739), True, 'import tensorflow as tf\n'), ((4784, 5054), 'tensorflow.constant', 'tf.constant', (['[[[True, False], [True, True], [True, False], [False, False]], [[True, \n False], [True, True], [True, True], [False, True]], [[False, True], [\n True, False], [True, True], [True, True]], [[False, True], [True, True],\n [True, False], [False, True]]]'], {}), '([[[True, False], [True, True], [True, False], [False, False]],\n [[True, False], [True, True], [True, True], [False, True]], [[False, \n True], [True, False], [True, True], [True, True]], [[False, True], [\n True, True], [True, False], [False, True]]])\n', (4795, 5054), True, 'import tensorflow as tf\n'), ((5082, 5131), 'tensorflow.debugging.assert_near', 'tf.debugging.assert_near', (['expected_output', 'output'], {}), '(expected_output, output)\n', (5106, 5131), True, 'import tensorflow as tf\n'), ((5136, 5189), 'tensorflow.debugging.assert_equal', 'tf.debugging.assert_equal', (['expected_mask', 'output_mask'], {}), '(expected_mask, output_mask)\n', (5161, 5189), True, 'import tensorflow as tf\n'), ((5270, 5323), 'dual_encoder.keras_layers.MaskedReshape', 'keras_layers.MaskedReshape', (['(-1, 4, 2, 1)', '(-1, 4, 2)'], {}), '((-1, 4, 2, 1), (-1, 4, 2))\n', (5296, 5323), False, 'from dual_encoder import keras_layers\n'), ((5338, 5596), 'tensorflow.constant', 'tf.constant', (['[[[1.0], [2.0], [0.5], [0.4], [0.4], [0.1], [0.0], [0.0]], [[0.4], [0.1], [\n 0.0], [0.0], [0.0], [0.0], [0.6], [0.8]], [[0.9], [0.4], [0.5], [3.0],\n [0.9], [0.4], [0.5], [3.0]], [[0.0], [0.0], [0.6], [0.8], [0.4], [0.1],\n [0.0], [0.0]]]'], {}), '([[[1.0], [2.0], [0.5], [0.4], [0.4], [0.1], [0.0], [0.0]], [[\n 0.4], [0.1], [0.0], [0.0], [0.0], [0.0], [0.6], [0.8]], [[0.9], [0.4],\n [0.5], [3.0], [0.9], [0.4], [0.5], [3.0]], [[0.0], [0.0], [0.6], [0.8],\n [0.4], [0.1], [0.0], [0.0]]])\n', (5349, 5596), True, 'import tensorflow as tf\n'), ((5634, 5872), 'tensorflow.constant', 'tf.constant', (['[[True, False, True, True, True, False, False, False], [True, False, True, \n True, True, True, False, True], [False, True, True, False, True, True, \n True, True], [False, True, True, True, True, False, False, True]]'], {}), '([[True, False, True, True, True, False, False, False], [True, \n False, True, True, True, True, False, True], [False, True, True, False,\n True, True, True, True], [False, True, True, True, True, False, False, \n True]])\n', (5645, 5872), True, 'import tensorflow as tf\n'), ((6048, 6340), 'tensorflow.constant', 'tf.constant', (['[[[[1.0], [2.0]], [[0.5], [0.4]], [[0.4], [0.1]], [[0.0], [0.0]]], [[[0.4],\n [0.1]], [[0.0], [0.0]], [[0.0], [0.0]], [[0.6], [0.8]]], [[[0.9], [0.4]\n ], [[0.5], [3.0]], [[0.9], [0.4]], [[0.5], [3.0]]], [[[0.0], [0.0]], [[\n 0.6], [0.8]], [[0.4], [0.1]], [[0.0], [0.0]]]]'], {}), '([[[[1.0], [2.0]], [[0.5], [0.4]], [[0.4], [0.1]], [[0.0], [0.0]\n ]], [[[0.4], [0.1]], [[0.0], [0.0]], [[0.0], [0.0]], [[0.6], [0.8]]], [\n [[0.9], [0.4]], [[0.5], [3.0]], [[0.9], [0.4]], [[0.5], [3.0]]], [[[0.0\n ], [0.0]], [[0.6], [0.8]], [[0.4], [0.1]], [[0.0], [0.0]]]])\n', (6059, 6340), True, 'import tensorflow as tf\n'), ((6385, 6655), 'tensorflow.constant', 'tf.constant', (['[[[True, False], [True, True], [True, False], [False, False]], [[True, \n False], [True, True], [True, True], [False, True]], [[False, True], [\n True, False], [True, True], [True, True]], [[False, True], [True, True],\n [True, False], [False, True]]]'], {}), '([[[True, False], [True, True], [True, False], [False, False]],\n [[True, False], [True, True], [True, True], [False, True]], [[False, \n True], [True, False], [True, True], [True, True]], [[False, True], [\n True, True], [True, False], [False, True]]])\n', (6396, 6655), True, 'import tensorflow as tf\n'), ((6683, 6732), 'tensorflow.debugging.assert_near', 'tf.debugging.assert_near', (['expected_output', 'output'], {}), '(expected_output, output)\n', (6707, 6732), True, 'import tensorflow as tf\n'), ((6737, 6790), 'tensorflow.debugging.assert_equal', 'tf.debugging.assert_equal', (['expected_mask', 'output_mask'], {}), '(expected_mask, output_mask)\n', (6762, 6790), True, 'import tensorflow as tf\n'), ((6865, 6918), 'dual_encoder.keras_layers.MaskedReshape', 'keras_layers.MaskedReshape', (['(-1, 4, 2, 1)', '(-1, 4, 2)'], {}), '((-1, 4, 2, 1), (-1, 4, 2))\n', (6891, 6918), False, 'from dual_encoder import keras_layers\n'), ((6933, 7191), 'tensorflow.constant', 'tf.constant', (['[[[1.0], [2.0], [0.5], [0.4], [0.4], [0.1], [0.0], [0.0]], [[0.4], [0.1], [\n 0.0], [0.0], [0.0], [0.0], [0.6], [0.8]], [[0.9], [0.4], [0.5], [3.0],\n [0.9], [0.4], [0.5], [3.0]], [[0.0], [0.0], [0.6], [0.8], [0.4], [0.1],\n [0.0], [0.0]]]'], {}), '([[[1.0], [2.0], [0.5], [0.4], [0.4], [0.1], [0.0], [0.0]], [[\n 0.4], [0.1], [0.0], [0.0], [0.0], [0.0], [0.6], [0.8]], [[0.9], [0.4],\n [0.5], [3.0], [0.9], [0.4], [0.5], [3.0]], [[0.0], [0.0], [0.6], [0.8],\n [0.4], [0.1], [0.0], [0.0]]])\n', (6944, 7191), True, 'import tensorflow as tf\n'), ((7503, 7606), 'tensorflow.constant', 'tf.constant', (['[[1.0, 0.0, 0.0], [2.0, 2.0, 2.0], [0.1, 0.2, 0.3], [0.3, 0.2, 0.1], [0.0, \n 1.0, 0.0]]'], {}), '([[1.0, 0.0, 0.0], [2.0, 2.0, 2.0], [0.1, 0.2, 0.3], [0.3, 0.2, \n 0.1], [0.0, 1.0, 0.0]])\n', (7514, 7606), True, 'import tensorflow as tf\n'), ((7666, 7780), 'dual_encoder.keras_layers.EmbeddingSpreadoutRegularizer', 'keras_layers.EmbeddingSpreadoutRegularizer', ([], {'spreadout_lambda': '(0.1)', 'normalization_fn': 'None', 'l2_regularization': '(0.0)'}), '(spreadout_lambda=0.1,\n normalization_fn=None, l2_regularization=0.0)\n', (7708, 7780), False, 'from dual_encoder import keras_layers\n'), ((8090, 8135), 'tensorflow.debugging.assert_near', 'tf.debugging.assert_near', (['expected_loss', 'loss'], {}), '(expected_loss, loss)\n', (8114, 8135), True, 'import tensorflow as tf\n'), ((8155, 8269), 'dual_encoder.keras_layers.EmbeddingSpreadoutRegularizer', 'keras_layers.EmbeddingSpreadoutRegularizer', ([], {'spreadout_lambda': '(0.1)', 'normalization_fn': 'None', 'l2_regularization': '(1.0)'}), '(spreadout_lambda=0.1,\n normalization_fn=None, l2_regularization=1.0)\n', (8197, 8269), False, 'from dual_encoder import keras_layers\n'), ((8312, 8341), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(1.0)'], {}), '(1.0)\n', (8336, 8341), True, 'import tensorflow as tf\n'), ((8440, 8485), 'tensorflow.debugging.assert_near', 'tf.debugging.assert_near', (['expected_loss', 'loss'], {}), '(expected_loss, loss)\n', (8464, 8485), True, 'import tensorflow as tf\n'), ((8569, 8672), 'tensorflow.constant', 'tf.constant', (['[[1.0, 0.0, 0.0], [2.0, 2.0, 2.0], [0.1, 0.2, 0.3], [0.3, 0.2, 0.1], [0.0, \n 1.0, 0.0]]'], {}), '([[1.0, 0.0, 0.0], [2.0, 2.0, 2.0], [0.1, 0.2, 0.3], [0.3, 0.2, \n 0.1], [0.0, 1.0, 0.0]])\n', (8580, 8672), True, 'import tensorflow as tf\n'), ((8732, 8857), 'dual_encoder.keras_layers.EmbeddingSpreadoutRegularizer', 'keras_layers.EmbeddingSpreadoutRegularizer', ([], {'spreadout_lambda': '(0.1)', 'normalization_fn': 'l2_normalize_fn', 'l2_regularization': '(0.0)'}), '(spreadout_lambda=0.1,\n normalization_fn=l2_normalize_fn, l2_regularization=0.0)\n', (8774, 8857), False, 'from dual_encoder import keras_layers\n'), ((8984, 9029), 'tensorflow.debugging.assert_near', 'tf.debugging.assert_near', (['expected_loss', 'loss'], {}), '(expected_loss, loss)\n', (9008, 9029), True, 'import tensorflow as tf\n'), ((9049, 9174), 'dual_encoder.keras_layers.EmbeddingSpreadoutRegularizer', 'keras_layers.EmbeddingSpreadoutRegularizer', ([], {'spreadout_lambda': '(0.1)', 'normalization_fn': 'l2_normalize_fn', 'l2_regularization': '(1.0)'}), '(spreadout_lambda=0.1,\n normalization_fn=l2_normalize_fn, l2_regularization=1.0)\n', (9091, 9174), False, 'from dual_encoder import keras_layers\n'), ((9217, 9246), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(1.0)'], {}), '(1.0)\n', (9241, 9246), True, 'import tensorflow as tf\n'), ((9341, 9386), 'tensorflow.debugging.assert_near', 'tf.debugging.assert_near', (['expected_loss', 'loss'], {}), '(expected_loss, loss)\n', (9365, 9386), True, 'import tensorflow as tf\n'), ((9465, 9568), 'tensorflow.constant', 'tf.constant', (['[[1.0, 0.0, 0.0], [2.0, 2.0, 2.0], [0.1, 0.2, 0.3], [0.3, 0.2, 0.1], [0.0, \n 1.0, 0.0]]'], {}), '([[1.0, 0.0, 0.0], [2.0, 2.0, 2.0], [0.1, 0.2, 0.3], [0.3, 0.2, \n 0.1], [0.0, 1.0, 0.0]])\n', (9476, 9568), True, 'import tensorflow as tf\n'), ((9628, 9742), 'dual_encoder.keras_layers.EmbeddingSpreadoutRegularizer', 'keras_layers.EmbeddingSpreadoutRegularizer', ([], {'spreadout_lambda': '(0.0)', 'normalization_fn': 'None', 'l2_regularization': '(0.0)'}), '(spreadout_lambda=0.0,\n normalization_fn=None, l2_regularization=0.0)\n', (9670, 9742), False, 'from dual_encoder import keras_layers\n'), ((9826, 9871), 'tensorflow.debugging.assert_near', 'tf.debugging.assert_near', (['expected_loss', 'loss'], {}), '(expected_loss, loss)\n', (9850, 9871), True, 'import tensorflow as tf\n'), ((9942, 10056), 'dual_encoder.keras_layers.EmbeddingSpreadoutRegularizer', 'keras_layers.EmbeddingSpreadoutRegularizer', ([], {'spreadout_lambda': '(0.0)', 'normalization_fn': 'None', 'l2_regularization': '(0.1)'}), '(spreadout_lambda=0.0,\n normalization_fn=None, l2_regularization=0.1)\n', (9984, 10056), False, 'from dual_encoder import keras_layers\n'), ((10099, 10128), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(0.1)'], {}), '(0.1)\n', (10123, 10128), True, 'import tensorflow as tf\n'), ((10205, 10244), 'tensorflow.debugging.assert_near', 'tf.debugging.assert_near', (['l2_loss', 'loss'], {}), '(l2_loss, loss)\n', (10229, 10244), True, 'import tensorflow as tf\n'), ((10312, 10437), 'dual_encoder.keras_layers.EmbeddingSpreadoutRegularizer', 'keras_layers.EmbeddingSpreadoutRegularizer', ([], {'spreadout_lambda': '(0.0)', 'normalization_fn': 'l2_normalize_fn', 'l2_regularization': '(0.1)'}), '(spreadout_lambda=0.0,\n normalization_fn=l2_normalize_fn, l2_regularization=0.1)\n', (10354, 10437), False, 'from dual_encoder import keras_layers\n'), ((10480, 10509), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(0.1)'], {}), '(0.1)\n', (10504, 10509), True, 'import tensorflow as tf\n'), ((10586, 10625), 'tensorflow.debugging.assert_near', 'tf.debugging.assert_near', (['l2_loss', 'loss'], {}), '(l2_loss, loss)\n', (10610, 10625), True, 'import tensorflow as tf\n'), ((10702, 10805), 'tensorflow.constant', 'tf.constant', (['[[1.0, 0.0, 0.0], [2.0, 2.0, 2.0], [0.1, 0.2, 0.3], [0.3, 0.2, 0.1], [0.0, \n 1.0, 0.0]]'], {}), '([[1.0, 0.0, 0.0], [2.0, 2.0, 2.0], [0.1, 0.2, 0.3], [0.3, 0.2, \n 0.1], [0.0, 1.0, 0.0]])\n', (10713, 10805), True, 'import tensorflow as tf\n'), ((10864, 10989), 'dual_encoder.keras_layers.EmbeddingSpreadoutRegularizer', 'keras_layers.EmbeddingSpreadoutRegularizer', ([], {'spreadout_lambda': '(0.0)', 'normalization_fn': 'l2_normalize_fn', 'l2_regularization': '(0.1)'}), '(spreadout_lambda=0.0,\n normalization_fn=l2_normalize_fn, l2_regularization=0.1)\n', (10906, 10989), False, 'from dual_encoder import keras_layers\n'), ((11224, 11286), 'dual_encoder.keras_layers.EmbeddingSpreadoutRegularizer.from_config', 'keras_layers.EmbeddingSpreadoutRegularizer.from_config', (['config'], {}), '(config)\n', (11278, 11286), False, 'from dual_encoder import keras_layers\n'), ((11309, 11338), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(0.1)'], {}), '(0.1)\n', (11333, 11338), True, 'import tensorflow as tf\n'), ((11465, 11504), 'tensorflow.debugging.assert_near', 'tf.debugging.assert_near', (['l2_loss', 'loss'], {}), '(l2_loss, loss)\n', (11489, 11504), True, 'import tensorflow as tf\n')]
|
from typing import (
Any,
Callable,
cast,
Generic,
Mapping,
NoReturn,
Optional,
TypeVar,
Union,
)
from abc import ABC, abstractmethod
from functools import wraps
import math
import collections
from util import *
__all__ = ["Maybe", "Some", "Nothing", "maybe"]
class Maybe(Generic[T], ABC):
"""
Generic class and type for the pysome Maybe object. Two classes inherit from
this abstract class: Some and Nothing.
"""
def __init__(self, value, is_none=False):
if not is_none:
self._value = value
super().__init__()
@abstractmethod
def get(self) -> Union[T, NoReturn]:
"""
Get the value that is stored if it is a `Some` object, or throw an error
if it is `Nothing` object. Should only be used if you know that it is a
`Some` object. If you are unsure, you can use `Maybe.or_else()`.
"""
pass
@abstractmethod
def or_else(self, value: T) -> T:
"""
Get the value that is stored if it is a `Some` object, or return `value` if it is a `Nothing` object.
value: T
- A default value to be returned if it is a Nothing class
"""
pass
def some_or_else(self, value: T) -> "Maybe[T]":
return maybe(self.or_else(value))
@abstractmethod
def is_some(self) -> bool:
"""
Return whether or not the class is `Some`. Equivalent to `isinstnace(self, Some).
"""
pass
@abstractmethod
def is_none(self) -> bool:
"""
Return whether or not the class is `Some`. Equivalent to `isinstnace(self, Nothing).
"""
pass
@abstractmethod
def comb(self, *funcs: Callable[["Maybe"], "Maybe"]) -> "Maybe":
"""
Given a list of functions, call each function iteratively on self and
return the result. It should be noted that the following two are
equivalent. The functions are assumed to be "maybe" functions, in that
they take in a Maybe object and return a Maybe object. If you have a
function that is not of that type, you can use the @maybefunction
wrapper to convert it.
> something.comb(f1, f2, f3)
> something.comb(f1).comb(f2).comb(f3)
funcs: Callable[[Maybe], Maybe]
- A "maybe" function that takes in a Maybe object and returns
a Maybe object.
"""
pass
class Some(Maybe[T]):
"""
A class that contains something. While it is possible to directly instatiate
a `Some` object, you should instead use the `maybe()` function.
"""
def __init__(self, value):
super().__init__(value)
def get(self) -> T:
return self._value
def or_else(self, value: T) -> T:
return self.get()
def is_some(self):
return True
def is_none(self):
return False
def comb(self, *funcs: Callable[[Maybe[T]], Maybe[U]]) -> Maybe[U]:
value = self.get()
for func in funcs:
value = func(value)
if value == Nothing():
return value
return value
def __magic_wrapper(f):
def wrapper(self, *args):
return f(self.get(), *args)
return wrapper
def __eq__(self, other):
if isinstance(other, Some):
return self.get() == other.get()
elif isinstance(other, Nothing):
return False
return self.get() == other
def __getitem__(self, key: K) -> Maybe[V]:
try:
return maybe(self.get()[key])
except KeyError:
return Nothing()
except IndexError:
return Nothing()
except TypeError:
return Nothing()
def __setitem__(self, key, value):
self.get()[key] = value
__int__ = __magic_wrapper(int)
__complex__ = __magic_wrapper(complex)
__float__ = __magic_wrapper(float)
__bool__ = __magic_wrapper(bool)
__round__ = __magic_wrapper(round)
__trunc__ = __magic_wrapper(math.trunc)
__floor__ = __magic_wrapper(math.floor)
__ceil__ = __magic_wrapper(math.ceil)
__len__ = __magic_wrapper(len)
__hash__ = __magic_wrapper(hash)
def __op_wrapper(func):
@wraps(func)
def wrapper(self, other: Any) -> Maybe:
# Normalize
if isinstance(other, Some):
other = other.get()
try:
return maybe(func(self.get(), other)) # type: ignore
except TypeError:
return Nothing()
# Division case (I don't know how much overhead this adds)
except ZeroDivisionError:
return Nothing()
return wrapper
__add__ = __op_wrapper(lambda x, y: x + y)
__radd__ = __op_wrapper(lambda x, y: y + x)
__sub__ = __op_wrapper(lambda x, y: x - y)
__rsub__ = __op_wrapper(lambda x, y: y - x)
__mul__ = __op_wrapper(lambda x, y: x * y)
__rmul__ = __op_wrapper(lambda x, y: y * x)
__truediv__ = __op_wrapper(lambda x, y: x / y)
__rtruediv__ = __op_wrapper(lambda x, y: y / x)
def __getattr__(self, attr):
try:
if hasattr(self.get(), "__getattr__"):
return self.get().__getattr__(attr)
return self.get().__getattribute__(attr)
except AttributeError:
return Nothing()
def __str__(self):
return str(self.get())
def __repr__(self):
return "Some(%s)" % repr(self.get())
class Nothing(Maybe[T]):
def __init__(self):
super().__init__(None, True)
@staticmethod
def __return_nothing(*args, **kwargs):
return Nothing()
def get(self) -> NoReturn:
raise Exception("bad")
def or_else(self, value: T) -> T:
return value
def is_some(self):
return False
def is_none(self):
return True
def comb(self, *funcs: Callable[[T], Maybe]) -> Maybe:
return self
def __eq__(self, other):
return isinstance(other, Nothing)
# All operators should return Nothing
__add__ = __return_nothing
__radd__ = __return_nothing
__sub__ = __return_nothing
__rsub__ = __return_nothing
__mul__ = __return_nothing
__rmul__ = __return_nothing
__truediv__ = __return_nothing
__rtruediv__ = __return_nothing
__getitem__ = __return_nothing
__getattr__ = __return_nothing
__call__ = __return_nothing
def __str__(self):
return "None"
def __repr__(self):
return "Nothing"
def maybe(value):
if value == Nothing():
return Nothing()
elif isinstance(value, Some):
return value
return Some(value)
|
[
"functools.wraps"
] |
[((4287, 4298), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (4292, 4298), False, 'from functools import wraps\n')]
|
# программа сортировки файлов по папкам по их типу или расширению
import os
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from conf import conf_ext
print( ' PythonSorter by SI ver 0.5' )
print( '-----------------------------------------------------------' )
print( ' Здравствуйте, ' + os.getlogin() + '!' )
print( ' Вас приветствует программа сортировки ваших файлов' )
print( '-----------------------------------------------------------' )
print( ' Давайте начнём с выбора задачи: ' )
print( '-----------------------------------------------------------' )
print( ' 1 | сортировка файлов по папкам' )
print( ' 2 | просмотр конфигурации типов' )
print( '-----------------------------------------------------------' )
task = int( input( ' ' ) )
if not ((task == 1) or (task == 2)):
print( '-----------------------------------------------------------' )
print( ' Ошибка: нет такой задачи, вы вероятно ошиблись, бывает' )
print( ' Совет: перезапустите программу и попробуйте снова' )
print( '-----------------------------------------------------------' )
exit()
if task == 1:
print( '-----------------------------------------------------------' )
print( ' Перед началом работы давайте всё настроим как вам нужно' )
print( '-----------------------------------------------------------' )
print( ' Выберите режим сортировки:' )
print( '-----------------------------------------------------------' )
print( ' 1 | по типу' )
print( ' 2 | по расширению' )
print( '-----------------------------------------------------------' )
regime = int( input( ' ' ) )
if not ((regime == 1) or (regime == 2)):
print( '-----------------------------------------------------------' )
print( ' Ошибка: нет такого режима, вы вероятно ошиблись, бывает' )
print( ' Совет: перезапустите программу и попробуйте снова' )
print( '-----------------------------------------------------------' )
exit()
print( '-----------------------------------------------------------' )
print( ' Введите путь к папке в таком формате:' )
print( ' C:/example/path/to/files/' )
print( '-----------------------------------------------------------' )
folder = str( input( ' ' ) )
print( '-----------------------------------------------------------' )
print( ' Настройки сохранены, всё уже работает' )
print( ' Для выхода введите что угодно или закройте консоль' )
print( '-----------------------------------------------------------' )
print( ' Ход работы:' )
class Handler( FileSystemEventHandler ):
def on_modified( self, event ):
global count
count = 0
for filename in os.listdir( folder ):
extension = filename.split( "." )
if len( extension ) > 1:
if regime == 1:
count += 1
for i in range( 0, len( conf_ext ) ):
if extension[ -1 ].lower() in conf_ext[ i ][ 1 ]:
print( ' ' + str( count ) + ' | ' + conf_ext[ i ][ 0 ][ 0 ] + ' | ' + filename )
try:
os.chdir( folder + conf_ext[ i ][ 0 ][ 0 ] + '/' )
except:
try:
os.makedirs( folder + conf_ext[ i ][ 0 ][ 0 ] + '/' )
except:
pass
file = folder + filename
file_new = folder + conf_ext[ i ][ 0 ][ 0 ] + '/' + filename
try:
os.rename( file, file_new )
except:
file_new = folder + conf_ext[ i ][ 0 ][ 0 ] + '/' + str( count ) + filename
try:
os.rename( file, file_new )
except:
pass
if regime == 2:
count += 1
print( ' ' + str( count ) + ' | ' + extension[ -1 ].lower() + ' | ' + filename )
try:
os.chdir( folder + extension[ -1 ].lower() + '/' )
except:
try:
os.makedirs( folder + extension[ -1 ].lower() + '/' )
except:
pass
file = folder + filename
file_new = folder + extension[ -1 ].lower() + '/' + filename
try:
os.rename( file, file_new )
except:
file_new = folder + extension[ -1 ].lower() + '/' + str( count ) + filename
try:
os.rename( file, file_new )
except:
pass
handle = Handler()
observer = Observer()
observer.schedule( handle, folder, recursive = False )
observer.start()
if input():
observer.stop()
observer.join()
if task == 2:
print( '-----------------------------------------------------------' )
print( ' Запускаю вывод всех соотношений типов и расширений...' )
print( '-----------------------------------------------------------' )
conf_ext.sort()
for i in range(0, len(conf_ext)):
print( f' тип {conf_ext[i][0][0]}:' )
conf_ext[i][1].sort()
for j in range(0, len(conf_ext[i][1])):
print( f' - {conf_ext[i][1][j]}' )
print('')
# M:/FilesDump/
|
[
"os.getlogin",
"conf.conf_ext.sort",
"os.makedirs",
"os.rename",
"os.chdir",
"os.listdir",
"watchdog.observers.Observer"
] |
[((4334, 4344), 'watchdog.observers.Observer', 'Observer', ([], {}), '()\n', (4342, 4344), False, 'from watchdog.observers import Observer\n'), ((4716, 4731), 'conf.conf_ext.sort', 'conf_ext.sort', ([], {}), '()\n', (4729, 4731), False, 'from conf import conf_ext\n'), ((359, 372), 'os.getlogin', 'os.getlogin', ([], {}), '()\n', (370, 372), False, 'import os\n'), ((2713, 2731), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (2723, 2731), False, 'import os\n'), ((4087, 4112), 'os.rename', 'os.rename', (['file', 'file_new'], {}), '(file, file_new)\n', (4096, 4112), False, 'import os\n'), ((3085, 3127), 'os.chdir', 'os.chdir', (["(folder + conf_ext[i][0][0] + '/')"], {}), "(folder + conf_ext[i][0][0] + '/')\n", (3093, 3127), False, 'import os\n'), ((3405, 3430), 'os.rename', 'os.rename', (['file', 'file_new'], {}), '(file, file_new)\n', (3414, 3430), False, 'import os\n'), ((4236, 4261), 'os.rename', 'os.rename', (['file', 'file_new'], {}), '(file, file_new)\n', (4245, 4261), False, 'import os\n'), ((3179, 3224), 'os.makedirs', 'os.makedirs', (["(folder + conf_ext[i][0][0] + '/')"], {}), "(folder + conf_ext[i][0][0] + '/')\n", (3190, 3224), False, 'import os\n'), ((3562, 3587), 'os.rename', 'os.rename', (['file', 'file_new'], {}), '(file, file_new)\n', (3571, 3587), False, 'import os\n')]
|
import glob
import logging
import platform
import re
import socket
from typing import Any
logger = logging.getLogger("hepynet")
def get_current_platform_name() -> str:
"""Returns the name of the current platform.
Returns:
str: name of current platform
"""
return platform.platform()
def get_current_hostname() -> str:
"""Returns the hostname of current machine
Returns:
str: current hostname
"""
return socket.gethostname()
def get_default_if_none(input_var: Any, default_value: Any):
if input_var is None:
return default_value
else:
return input_var
def get_newest_file_version(
path_pattern: str,
n_digit: int = 2,
ver_num: int = None,
use_existing: bool = False,
):
"""Check existed file and return last available file path with version.
Version range 00 -> 99 (or 999)
If reach limit, last available version will be used. 99 (or 999)
"""
# Return file path if ver_num is given
if ver_num is not None:
return {
"ver_num": ver_num,
"path": path_pattern.format(str(ver_num).zfill(n_digit)),
}
# Otherwise try to find ver_num
path_list = glob.glob(path_pattern.format("*"))
path_list = sorted(path_list)
if len(path_list) < 1:
if use_existing:
logger.debug(
f"Can't find existing file with path pattern: {path_pattern}, returning empty."
)
return {}
else:
ver_num = 0
path = path_pattern.format(str(0).zfill(n_digit))
else:
path = path_list[-1] # Choose the last match
version_tag_search = re.compile("v(" + "\d" * n_digit + ")")
ver_num = int(version_tag_search.search(path).group(1))
if not use_existing:
ver_num += 1
path = path_pattern.format(str(ver_num).zfill(n_digit))
return {
"ver_num": ver_num,
"path": path,
}
def get_significant_digits(number: float, n_digits: int):
if round(number) == number:
m = len(str(number)) - 1 - n_digits
if number / (10 ** m) == 0.0:
return number
else:
return float(int(number) / (10 ** m) * (10 ** m))
if len(str(number)) > n_digits + 1:
return round(number, n_digits - len(str(int(number))))
else:
return number
|
[
"platform.platform",
"socket.gethostname",
"logging.getLogger",
"re.compile"
] |
[((100, 128), 'logging.getLogger', 'logging.getLogger', (['"""hepynet"""'], {}), "('hepynet')\n", (117, 128), False, 'import logging\n'), ((291, 310), 'platform.platform', 'platform.platform', ([], {}), '()\n', (308, 310), False, 'import platform\n'), ((458, 478), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (476, 478), False, 'import socket\n'), ((1684, 1724), 're.compile', 're.compile', (["('v(' + '\\\\d' * n_digit + ')')"], {}), "('v(' + '\\\\d' * n_digit + ')')\n", (1694, 1724), False, 'import re\n')]
|
# Copyright (C) 2019 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from django.contrib.auth import get_user_model
from aether.sdk.multitenancy.utils import get_current_realm, add_user_to_realm
UserModel = get_user_model()
user_objects = UserModel.objects
def get_or_create_user(request, username):
# gets the existing user or creates a new one
_username = parse_username(request, username)
try:
user = user_objects.get(username=_username)
except UserModel.DoesNotExist:
realm = get_current_realm(request)
user = user_objects.create_user(
username=_username,
first_name=username,
last_name=realm or '',
password=user_objects.make_random_password(length=100),
)
# only add user if it doesn't exist.
add_user_to_realm(request, user)
return user
def parse_username(request, username):
# the internal username prepends the realm name
realm = get_current_realm(request)
if realm and not username.startswith(f'{realm}__'):
username = f'{realm}__{username}'
return username
def unparse_username(request, username):
# the internal username prepends the realm name
realm = get_current_realm(request)
if realm and username.startswith(f'{realm}__'):
username = username[len(f'{realm}__'):]
return username
def user_to_string(user, request=None):
'''
Returns a readable name of the user.
- ``first_name`` + ``last_name``
- ``username``
'''
if user.first_name and user.last_name:
return f'{user.first_name} {user.last_name}'
if request:
return unparse_username(request, user.username)
return user.username
|
[
"aether.sdk.multitenancy.utils.get_current_realm",
"django.contrib.auth.get_user_model",
"aether.sdk.multitenancy.utils.add_user_to_realm"
] |
[((871, 887), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (885, 887), False, 'from django.contrib.auth import get_user_model\n'), ((1632, 1658), 'aether.sdk.multitenancy.utils.get_current_realm', 'get_current_realm', (['request'], {}), '(request)\n', (1649, 1658), False, 'from aether.sdk.multitenancy.utils import get_current_realm, add_user_to_realm\n'), ((1885, 1911), 'aether.sdk.multitenancy.utils.get_current_realm', 'get_current_realm', (['request'], {}), '(request)\n', (1902, 1911), False, 'from aether.sdk.multitenancy.utils import get_current_realm, add_user_to_realm\n'), ((1178, 1204), 'aether.sdk.multitenancy.utils.get_current_realm', 'get_current_realm', (['request'], {}), '(request)\n', (1195, 1204), False, 'from aether.sdk.multitenancy.utils import get_current_realm, add_user_to_realm\n'), ((1477, 1509), 'aether.sdk.multitenancy.utils.add_user_to_realm', 'add_user_to_realm', (['request', 'user'], {}), '(request, user)\n', (1494, 1509), False, 'from aether.sdk.multitenancy.utils import get_current_realm, add_user_to_realm\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import time
import paddle
from paddle.amp import auto_cast
from .mixup import Mixup
from core.evaluate import accuracy
from utils.comm import comm
def train_one_epoch(config, train_loader, model, criterion, optimizer,
epoch, output_dir, tb_log_dir, writer_dict, scaler=None):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
logging.info('=> switch to train mode')
model.train()
aug = config.AUG
mixup_fn = Mixup(mixup_alpha=aug.MIXUP, cutmix_alpha=aug.MIXCUT,
cutmix_minmax=aug.MIXCUT_MINMAX if aug.MIXCUT_MINMAX else None,
prob=aug.MIXUP_PROB, switch_prob=aug.MIXUP_SWITCH_PROB,
mode=aug.MIXUP_MODE, label_smoothing=config.LOSS.LABEL_SMOOTHING,
num_classes=config.MODEL.NUM_CLASSES) if aug.MIXUP_PROB > 0.0 else None
end = time.time()
for i, (x, y) in enumerate(train_loader):
data_time.update(time.time() - end)
if mixup_fn:
x, y = mixup_fn(x, y)
with auto_cast(enable=config.AMP.ENABLED):
outputs = model(x)
loss = criterion(outputs, y)
optimizer.clear_grad()
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
scaler.scale(loss).backward(create_graph=is_second_order)
if config.TRAIN.CLIP_GRAD_NORM > 0.0:
scaler.unscale_(optimizer)
paddle.fluid.layers.nn.clip_by_norm(
model.parameters(), config.TRAIN.CLIP_GRAD_NORM
)
scaler.step(optimizer)
scaler.update()
losses.update(loss.item(), x.shape[0])
if mixup_fn:
y = paddle.argmax(y, axis=1)
prec1, prec5 = accuracy(outputs, y, (1, 5))
top1.update(prec1, x.shape[0])
top5.update(prec5, x.shape[0])
batch_time.update(time.time() - end)
end = time.time()
if i % config.PRINT_FREQ == 0:
msg = (
'=> Epoch[{0}][{1}/{2}]: '
'Time {batch_time.val:.3f}s\t '
'({batch_time.avg:.3f}s)\t'
'Speed {speed:.1f} samples/s\t'
'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t'
'Loss {loss.val:.5f} ({loss.avg:.5f})\t'
'Accuracy@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Accuracy@5 {top5.val:.3f} ({top5.avg:.3f})\t'.format(epoch, i, len(train_loader),
batch_time=batch_time,
speed=x.shape[0] / batch_time.val,
data_time=data_time, loss=losses,
top1=top1, top5=top5))
logging.info(msg)
if writer_dict and comm.is_main_process():
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', losses.avg, global_steps)
writer.add_scalar('train_top1', top1.avg, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
@paddle.no_grad()
def test(config, val_loader, model, criterion, output_dir, tb_log_dir,
writer_dict=None, distributed=False, real_labels=None,
valid_labels=None):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
logging.info('=> switch to eval mode')
model.eval()
end = time.time()
for i, (x, y) in enumerate(val_loader):
outputs = model(x)
if valid_labels:
outputs = outputs[:, valid_labels]
loss = criterion(outputs, y)
if real_labels and not distributed:
real_labels.add_result(outputs)
losses.update(loss.item(), x.shape[0])
prec1, prec5 = accuracy(outputs, y, (1, 5))
top1.update(prec1, x.shape[0])
top5.update(prec5, x.shape[0])
batch_time.update(time.time() - end)
end = time.time()
logging.info('=> synchronize...')
comm.synchronize()
top1_acc, top5_acc, loss_avg = map(_meter_reduce if distributed else lambda x: x.avg,
[top1, top5, losses])
if real_labels and not distributed:
real_top1 = real_labels.get_accuracy(k=1)
real_top5 = real_labels.get_accuracy(k=5)
msg = ('=> TEST using Reassessed labels:\t'
'Error@1 {error1:.3f}%\t'
'Error@5 {error5:.3f}%\t'
'Accuracy@1 {top1:.3f}%\t'
'Accuracy@5 {top5:.3f}%\t'
.format(top1=real_top1, top5=real_top5,
error1=100 - real_top1,
error5=100 - real_top5))
logging.info(msg)
if comm.is_main_process():
msg = ('=> TEST:\t'
'Loss {loss_avg:.4f}\t'
'Error@1 {error1:.3f}%\t'
'Error@5 {error5:.3f}%\t'
'Accuracy@1 {top1:.3f}%\t'
'Accuracy@5 {top5:.3f}%\t'
.format(loss_avg=loss_avg, top1=top1_acc,
top5=top5_acc, error1=100 - top1_acc,
error5=100 - top5_acc))
logging.info(msg)
if writer_dict and comm.is_main_process():
writer = writer_dict['writer']
global_steps = writer_dict['valid_global_steps']
writer.add_scalar('valid_loss', loss_avg, global_steps)
writer.add_scalar('valid_top1', top1_acc, global_steps)
writer_dict['valid_global_steps'] = global_steps + 1
logging.info('=> switch to train mode')
model.train()
return top1_acc
def _meter_reduce(meter):
rank = comm.local_rank
meter_sum = paddle.to_tensor([meter.sum], dtype=paddle.float32)#.cuda(rank)
meter_count = paddle.to_tensor([meter.count], dtype=paddle.float32)#.cuda(rank)
paddle.distributed.reduce(meter_sum, 0)
paddle.distributed.reduce(meter_count, 0)
meter_avg = meter_sum / meter_count
return meter_avg.item()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
|
[
"core.evaluate.accuracy",
"paddle.argmax",
"paddle.no_grad",
"paddle.amp.auto_cast",
"time.time",
"logging.info",
"utils.comm.comm.synchronize",
"paddle.distributed.reduce",
"utils.comm.comm.is_main_process",
"paddle.to_tensor"
] |
[((3523, 3539), 'paddle.no_grad', 'paddle.no_grad', ([], {}), '()\n', (3537, 3539), False, 'import paddle\n'), ((598, 637), 'logging.info', 'logging.info', (['"""=> switch to train mode"""'], {}), "('=> switch to train mode')\n", (610, 637), False, 'import logging\n'), ((1108, 1119), 'time.time', 'time.time', ([], {}), '()\n', (1117, 1119), False, 'import time\n'), ((3830, 3868), 'logging.info', 'logging.info', (['"""=> switch to eval mode"""'], {}), "('=> switch to eval mode')\n", (3842, 3868), False, 'import logging\n'), ((3900, 3911), 'time.time', 'time.time', ([], {}), '()\n', (3909, 3911), False, 'import time\n'), ((4460, 4493), 'logging.info', 'logging.info', (['"""=> synchronize..."""'], {}), "('=> synchronize...')\n", (4472, 4493), False, 'import logging\n'), ((4499, 4517), 'utils.comm.comm.synchronize', 'comm.synchronize', ([], {}), '()\n', (4515, 4517), False, 'from utils.comm import comm\n'), ((5229, 5251), 'utils.comm.comm.is_main_process', 'comm.is_main_process', ([], {}), '()\n', (5249, 5251), False, 'from utils.comm import comm\n'), ((6035, 6074), 'logging.info', 'logging.info', (['"""=> switch to train mode"""'], {}), "('=> switch to train mode')\n", (6047, 6074), False, 'import logging\n'), ((6193, 6244), 'paddle.to_tensor', 'paddle.to_tensor', (['[meter.sum]'], {'dtype': 'paddle.float32'}), '([meter.sum], dtype=paddle.float32)\n', (6209, 6244), False, 'import paddle\n'), ((6276, 6329), 'paddle.to_tensor', 'paddle.to_tensor', (['[meter.count]'], {'dtype': 'paddle.float32'}), '([meter.count], dtype=paddle.float32)\n', (6292, 6329), False, 'import paddle\n'), ((6347, 6386), 'paddle.distributed.reduce', 'paddle.distributed.reduce', (['meter_sum', '(0)'], {}), '(meter_sum, 0)\n', (6372, 6386), False, 'import paddle\n'), ((6392, 6433), 'paddle.distributed.reduce', 'paddle.distributed.reduce', (['meter_count', '(0)'], {}), '(meter_count, 0)\n', (6417, 6433), False, 'import paddle\n'), ((2017, 2045), 'core.evaluate.accuracy', 'accuracy', (['outputs', 'y', '(1, 5)'], {}), '(outputs, y, (1, 5))\n', (2025, 2045), False, 'from core.evaluate import accuracy\n'), ((2191, 2202), 'time.time', 'time.time', ([], {}), '()\n', (2200, 2202), False, 'import time\n'), ((3201, 3223), 'utils.comm.comm.is_main_process', 'comm.is_main_process', ([], {}), '()\n', (3221, 3223), False, 'from utils.comm import comm\n'), ((4269, 4297), 'core.evaluate.accuracy', 'accuracy', (['outputs', 'y', '(1, 5)'], {}), '(outputs, y, (1, 5))\n', (4277, 4297), False, 'from core.evaluate import accuracy\n'), ((4441, 4452), 'time.time', 'time.time', ([], {}), '()\n', (4450, 4452), False, 'import time\n'), ((5201, 5218), 'logging.info', 'logging.info', (['msg'], {}), '(msg)\n', (5213, 5218), False, 'import logging\n'), ((5670, 5687), 'logging.info', 'logging.info', (['msg'], {}), '(msg)\n', (5682, 5687), False, 'import logging\n'), ((5714, 5736), 'utils.comm.comm.is_main_process', 'comm.is_main_process', ([], {}), '()\n', (5734, 5736), False, 'from utils.comm import comm\n'), ((1287, 1323), 'paddle.amp.auto_cast', 'auto_cast', ([], {'enable': 'config.AMP.ENABLED'}), '(enable=config.AMP.ENABLED)\n', (1296, 1323), False, 'from paddle.amp import auto_cast\n'), ((1968, 1992), 'paddle.argmax', 'paddle.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (1981, 1992), False, 'import paddle\n'), ((3157, 3174), 'logging.info', 'logging.info', (['msg'], {}), '(msg)\n', (3169, 3174), False, 'import logging\n'), ((1193, 1204), 'time.time', 'time.time', ([], {}), '()\n', (1202, 1204), False, 'import time\n'), ((2157, 2168), 'time.time', 'time.time', ([], {}), '()\n', (2166, 2168), False, 'import time\n'), ((4407, 4418), 'time.time', 'time.time', ([], {}), '()\n', (4416, 4418), False, 'import time\n')]
|
from localtileserver import examples
def test_get_blue_marble():
client = examples.get_blue_marble()
assert client.metadata()
def test_get_virtual_earth():
client = examples.get_virtual_earth()
assert client.metadata()
def test_get_arcgis():
client = examples.get_arcgis()
assert client.metadata()
def test_get_elevation():
client = examples.get_elevation()
assert client.metadata()
def test_get_bahamas():
client = examples.get_bahamas()
assert client.metadata()
def test_get_pine_gulch():
client = examples.get_pine_gulch()
assert client.metadata()
def test_get_landsat():
client = examples.get_landsat()
assert client.metadata()
def test_get_san_francisco():
client = examples.get_san_francisco()
assert client.metadata()
def test_get_oam2():
client = examples.get_oam2()
assert client.metadata()
def test_get_elevation_us():
client = examples.get_elevation_us()
assert client.metadata()
|
[
"localtileserver.examples.get_bahamas",
"localtileserver.examples.get_elevation_us",
"localtileserver.examples.get_blue_marble",
"localtileserver.examples.get_san_francisco",
"localtileserver.examples.get_oam2",
"localtileserver.examples.get_elevation",
"localtileserver.examples.get_pine_gulch",
"localtileserver.examples.get_landsat",
"localtileserver.examples.get_arcgis",
"localtileserver.examples.get_virtual_earth"
] |
[((80, 106), 'localtileserver.examples.get_blue_marble', 'examples.get_blue_marble', ([], {}), '()\n', (104, 106), False, 'from localtileserver import examples\n'), ((181, 209), 'localtileserver.examples.get_virtual_earth', 'examples.get_virtual_earth', ([], {}), '()\n', (207, 209), False, 'from localtileserver import examples\n'), ((277, 298), 'localtileserver.examples.get_arcgis', 'examples.get_arcgis', ([], {}), '()\n', (296, 298), False, 'from localtileserver import examples\n'), ((369, 393), 'localtileserver.examples.get_elevation', 'examples.get_elevation', ([], {}), '()\n', (391, 393), False, 'from localtileserver import examples\n'), ((462, 484), 'localtileserver.examples.get_bahamas', 'examples.get_bahamas', ([], {}), '()\n', (482, 484), False, 'from localtileserver import examples\n'), ((556, 581), 'localtileserver.examples.get_pine_gulch', 'examples.get_pine_gulch', ([], {}), '()\n', (579, 581), False, 'from localtileserver import examples\n'), ((650, 672), 'localtileserver.examples.get_landsat', 'examples.get_landsat', ([], {}), '()\n', (670, 672), False, 'from localtileserver import examples\n'), ((747, 775), 'localtileserver.examples.get_san_francisco', 'examples.get_san_francisco', ([], {}), '()\n', (773, 775), False, 'from localtileserver import examples\n'), ((841, 860), 'localtileserver.examples.get_oam2', 'examples.get_oam2', ([], {}), '()\n', (858, 860), False, 'from localtileserver import examples\n'), ((934, 961), 'localtileserver.examples.get_elevation_us', 'examples.get_elevation_us', ([], {}), '()\n', (959, 961), False, 'from localtileserver import examples\n')]
|
"""Added location to proposal
Revision ID: 4dbf686f4380
Revises: <PASSWORD>
Create Date: 2013-11-08 23:35:43.433963
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '1<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('proposal', sa.Column('location', sa.Unicode(length=80), server_default=sa.text(u"''"), nullable=False))
op.alter_column('proposal', 'location', server_default=None)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('proposal', 'location')
### end Alembic commands ###
|
[
"alembic.op.drop_column",
"alembic.op.alter_column",
"sqlalchemy.text",
"sqlalchemy.Unicode"
] |
[((469, 529), 'alembic.op.alter_column', 'op.alter_column', (['"""proposal"""', '"""location"""'], {'server_default': 'None'}), "('proposal', 'location', server_default=None)\n", (484, 529), False, 'from alembic import op\n'), ((650, 688), 'alembic.op.drop_column', 'op.drop_column', (['"""proposal"""', '"""location"""'], {}), "('proposal', 'location')\n", (664, 688), False, 'from alembic import op\n'), ((394, 415), 'sqlalchemy.Unicode', 'sa.Unicode', ([], {'length': '(80)'}), '(length=80)\n', (404, 415), True, 'import sqlalchemy as sa\n'), ((432, 446), 'sqlalchemy.text', 'sa.text', (['u"""\'\'"""'], {}), '(u"\'\'")\n', (439, 446), True, 'import sqlalchemy as sa\n')]
|
import functools
from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
"""
Same thought as LC96, we can generate trees recursively.
If the root of tree is i
The left subtree has a sequence of [start ... i - 1]
The right subtree has a sequence of [i + 1 ... end]
Can use cache to improve performance.
"""
def generateTrees(self, n: int) -> List[TreeNode]:
if not n:
return []
return self.generate_subtrees(1, n)
@functools.lru_cache(None)
def generate_subtrees(self, start, end):
res = []
if end < start:
return [None]
for i in range(start, end + 1):
# More concise than declare left/right list. Have same performance.
for left in self.generate_subtrees(start, i - 1):
for right in self.generate_subtrees(i + 1, end):
node = TreeNode(i)
node.left = left
node.right = right
res.append(node)
return res
|
[
"functools.lru_cache"
] |
[((604, 629), 'functools.lru_cache', 'functools.lru_cache', (['None'], {}), '(None)\n', (623, 629), False, 'import functools\n')]
|
import os
from pipeline_tools.shared import http_requests
from pipeline_tools.tests.http_requests_manager import HttpRequestsManager
class TestHttpRequestsManager(object):
def test_enter_creates_directory(self):
with HttpRequestsManager() as temp_dir:
assert os.path.isdir(temp_dir) is True
def test_exit_deletes_directory(self):
with HttpRequestsManager() as temp_dir:
temp_dir_name = temp_dir
assert os.path.isdir(temp_dir_name) is True
assert os.path.isdir(temp_dir) is False
def test_enter_sets_environment_vars(self):
with HttpRequestsManager() as temp_dir:
assert http_requests.HTTP_RECORD_DIR in os.environ
assert os.environ[http_requests.HTTP_RECORD_DIR] == temp_dir
assert http_requests.RECORD_HTTP_REQUESTS in os.environ
assert os.environ[http_requests.RECORD_HTTP_REQUESTS] == 'true'
assert http_requests.RETRY_MAX_TRIES in os.environ
assert os.environ[http_requests.RETRY_MAX_TRIES] == '3'
assert http_requests.RETRY_MAX_INTERVAL in os.environ
assert os.environ[http_requests.RETRY_MAX_INTERVAL] == '10'
assert http_requests.RETRY_TIMEOUT in os.environ
assert os.environ[http_requests.RETRY_TIMEOUT] == '1'
assert http_requests.RETRY_MULTIPLIER in os.environ
assert os.environ[http_requests.RETRY_MULTIPLIER] == '0.01'
assert http_requests.INDIVIDUAL_REQUEST_TIMEOUT in os.environ
assert os.environ[http_requests.INDIVIDUAL_REQUEST_TIMEOUT] == '1'
def test_exit_deletes_environment_var(self):
with HttpRequestsManager() as temp_dir:
pass
assert http_requests.HTTP_RECORD_DIR not in os.environ
assert http_requests.RECORD_HTTP_REQUESTS not in os.environ
assert http_requests.RETRY_MAX_TRIES not in os.environ
assert http_requests.RETRY_MAX_INTERVAL not in os.environ
assert http_requests.RETRY_TIMEOUT not in os.environ
assert http_requests.RETRY_MULTIPLIER not in os.environ
assert http_requests.INDIVIDUAL_REQUEST_TIMEOUT not in os.environ
|
[
"os.path.isdir",
"pipeline_tools.tests.http_requests_manager.HttpRequestsManager"
] |
[((232, 253), 'pipeline_tools.tests.http_requests_manager.HttpRequestsManager', 'HttpRequestsManager', ([], {}), '()\n', (251, 253), False, 'from pipeline_tools.tests.http_requests_manager import HttpRequestsManager\n'), ((375, 396), 'pipeline_tools.tests.http_requests_manager.HttpRequestsManager', 'HttpRequestsManager', ([], {}), '()\n', (394, 396), False, 'from pipeline_tools.tests.http_requests_manager import HttpRequestsManager\n'), ((518, 541), 'os.path.isdir', 'os.path.isdir', (['temp_dir'], {}), '(temp_dir)\n', (531, 541), False, 'import os\n'), ((613, 634), 'pipeline_tools.tests.http_requests_manager.HttpRequestsManager', 'HttpRequestsManager', ([], {}), '()\n', (632, 634), False, 'from pipeline_tools.tests.http_requests_manager import HttpRequestsManager\n'), ((1676, 1697), 'pipeline_tools.tests.http_requests_manager.HttpRequestsManager', 'HttpRequestsManager', ([], {}), '()\n', (1695, 1697), False, 'from pipeline_tools.tests.http_requests_manager import HttpRequestsManager\n'), ((286, 309), 'os.path.isdir', 'os.path.isdir', (['temp_dir'], {}), '(temp_dir)\n', (299, 309), False, 'import os\n'), ((466, 494), 'os.path.isdir', 'os.path.isdir', (['temp_dir_name'], {}), '(temp_dir_name)\n', (479, 494), False, 'import os\n')]
|
"""
Faça um programa que mostre na tela uma contagem regressiva para o estouro de fogos de
artifício, indo de 10 até 0, com uma pausa de 1 segundo entre eles.
"""
#importar a bliblioteca para esperar
from time import sleep
print("contagem regressiva para os fogos!!!!")
for a in range(10,0,-1):
print(a)
sleep(1)
print("FOGOS")
|
[
"time.sleep"
] |
[((315, 323), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (320, 323), False, 'from time import sleep\n')]
|
from __future__ import unicode_literals
from django import forms
from django.contrib.auth.models import User
from django.db import models
from ckeditor_uploader.fields import RichTextUploadingField
# Create your models here.
from django.utils.safestring import mark_safe
class Main(models.Model):
STATUS = (
('True', 'Evet'),
('False', 'Hayir'),
)
name = models.CharField(default='', max_length=40)
status = models.CharField(default='', max_length=40, choices=STATUS)
title = models.CharField(max_length=15)
about = RichTextUploadingField()
keyword = models.TextField(max_length=10000)
description = models.TextField(max_length=10000)
company = models.TextField(max_length=10000)
smtpserver = models.CharField(max_length=44)
smtpemail = models.CharField(max_length=44)
smtpPassword = models.CharField(max_length=150)
smtpPort = models.CharField(max_length=150)
pagefa = models.CharField(max_length=150)
pagetw = models.CharField(max_length=150)
pageyt = models.CharField(max_length=105)
pageLink = models.CharField(max_length=150)
pageTe = models.CharField(max_length=20,default=0)
icon = models.ImageField(blank=True,upload_to='images/')
name_set = models.CharField(max_length=20,default='-')
def __str__(self):
return self.name_set + " ||" +str(self.pk)
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
phone = models.CharField(blank=True,max_length=20)
address = models.CharField(blank=True,max_length=202)
city = models.CharField(blank=True,max_length=20)
country= models.CharField(blank=True,max_length=20)
image = models.ImageField(upload_to="images/profile_images/", blank=True)
def __str__(self):
return self.user.username
@property
def use_name(self):
return self.user.usernames
@property
def image_tag(self):
if self.image is None:
return ''
self.image.short_description = 'Image'
return mark_safe('<img src="{}" width="50" height="50" />'.format(self.image.url))
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('phone', 'address', 'city', 'country','image')
class FAQ(models.Model):
STATUS = (
('True', 'Evet'),
('False', 'Hayir'),
)
orderNumber = models.IntegerField()
question = models.CharField(default='', max_length=150)
answer = models.TextField(max_length=1000)
status = models.CharField(default='', max_length=40, choices=STATUS)
create_at = models.DateTimeField(auto_now_add=True)
update_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.question
|
[
"django.db.models.TextField",
"django.db.models.OneToOneField",
"django.db.models.CharField",
"ckeditor_uploader.fields.RichTextUploadingField",
"django.db.models.ImageField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField"
] |
[((387, 430), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(40)'}), "(default='', max_length=40)\n", (403, 430), False, 'from django.db import models\n'), ((444, 503), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(40)', 'choices': 'STATUS'}), "(default='', max_length=40, choices=STATUS)\n", (460, 503), False, 'from django.db import models\n'), ((517, 548), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(15)'}), '(max_length=15)\n', (533, 548), False, 'from django.db import models\n'), ((562, 586), 'ckeditor_uploader.fields.RichTextUploadingField', 'RichTextUploadingField', ([], {}), '()\n', (584, 586), False, 'from ckeditor_uploader.fields import RichTextUploadingField\n'), ((601, 635), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(10000)'}), '(max_length=10000)\n', (617, 635), False, 'from django.db import models\n'), ((655, 689), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(10000)'}), '(max_length=10000)\n', (671, 689), False, 'from django.db import models\n'), ((705, 739), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(10000)'}), '(max_length=10000)\n', (721, 739), False, 'from django.db import models\n'), ((758, 789), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(44)'}), '(max_length=44)\n', (774, 789), False, 'from django.db import models\n'), ((807, 838), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(44)'}), '(max_length=44)\n', (823, 838), False, 'from django.db import models\n'), ((858, 890), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (874, 890), False, 'from django.db import models\n'), ((907, 939), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (923, 939), False, 'from django.db import models\n'), ((954, 986), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (970, 986), False, 'from django.db import models\n'), ((1001, 1033), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (1017, 1033), False, 'from django.db import models\n'), ((1048, 1080), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(105)'}), '(max_length=105)\n', (1064, 1080), False, 'from django.db import models\n'), ((1097, 1129), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (1113, 1129), False, 'from django.db import models\n'), ((1143, 1185), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'default': '(0)'}), '(max_length=20, default=0)\n', (1159, 1185), False, 'from django.db import models\n'), ((1197, 1247), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'upload_to': '"""images/"""'}), "(blank=True, upload_to='images/')\n", (1214, 1247), False, 'from django.db import models\n'), ((1264, 1308), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'default': '"""-"""'}), "(max_length=20, default='-')\n", (1280, 1308), False, 'from django.db import models\n'), ((1430, 1482), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (1450, 1482), False, 'from django.db import models\n'), ((1495, 1538), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(20)'}), '(blank=True, max_length=20)\n', (1511, 1538), False, 'from django.db import models\n'), ((1552, 1596), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(202)'}), '(blank=True, max_length=202)\n', (1568, 1596), False, 'from django.db import models\n'), ((1607, 1650), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(20)'}), '(blank=True, max_length=20)\n', (1623, 1650), False, 'from django.db import models\n'), ((1663, 1706), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(20)'}), '(blank=True, max_length=20)\n', (1679, 1706), False, 'from django.db import models\n'), ((1718, 1783), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""images/profile_images/"""', 'blank': '(True)'}), "(upload_to='images/profile_images/', blank=True)\n", (1735, 1783), False, 'from django.db import models\n'), ((2422, 2443), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (2441, 2443), False, 'from django.db import models\n'), ((2459, 2503), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(150)'}), "(default='', max_length=150)\n", (2475, 2503), False, 'from django.db import models\n'), ((2517, 2550), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(1000)'}), '(max_length=1000)\n', (2533, 2550), False, 'from django.db import models\n'), ((2564, 2623), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(40)', 'choices': 'STATUS'}), "(default='', max_length=40, choices=STATUS)\n", (2580, 2623), False, 'from django.db import models\n'), ((2640, 2679), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2660, 2679), False, 'from django.db import models\n'), ((2696, 2735), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2716, 2735), False, 'from django.db import models\n')]
|
import numpy as np
from PIL import Image
import time
import cv2
global img
global point1, point2
global min_x, min_y, width, height, max_x, max_y
def on_mouse(event, x, y, flags, param):
global img, point1, point2, min_x, min_y, width, height, max_x, max_y
img2 = img.copy()
if event == cv2.EVENT_LBUTTONDOWN: # 左键点击
point1 = (x, y)
cv2.circle(img2, point1, 10, (0, 255, 0), 2)
cv2.imshow('image', img2)
elif event == cv2.EVENT_MOUSEMOVE and (flags & cv2.EVENT_FLAG_LBUTTON): # 按住左键拖曳
cv2.rectangle(img2, point1, (x, y), (255, 0, 0), 2)
cv2.imshow('image', img2)
elif event == cv2.EVENT_LBUTTONUP: # 左键释放
point2 = (x, y)
cv2.rectangle(img2, point1, point2, (0, 0, 255), 2)
cv2.imshow('image', img2)
min_y = min(point1[0], point2[0])
min_x = min(point1[1], point2[1])
width = abs(point1[0] - point2[0])
height = abs(point1[1] - point2[1])
max_x = min_x + height
max_y = min_y + width
def overlap_restricted_area(x, y, patch_size, min_x, max_x, min_y, max_y):
dx0 = dy0 = patch_size // 2
minx1 = x - dx0
miny1 = y - dy0
maxx1 = x + dx0
maxy1 = y + dy0
minx2 = min_x
miny2 = min_y
maxx2 = max_x
maxy2 = max_y
minx = max(minx1, minx2)
miny = max(miny1, miny2)
maxx = min(maxx1, maxx2)
maxy = min(maxy1, maxy2)
if minx > maxx or miny > maxy:
return False
else:
return True
def cal_distance(a, b, A_padding, B, p_size):
p = p_size // 2
patch_a = A_padding[a[0]:a[0] + p_size, a[1]:a[1] + p_size, :]
patch_b = B[b[0] - p:b[0] + p + 1, b[1] - p:b[1] + p + 1, :]
temp = patch_b - patch_a
num = np.sum(1 - np.int32(np.isnan(temp)))
dist = np.sum(np.square(np.nan_to_num(temp))) / num
return dist
def cal_alpha(dis, gamma=2.0):
return gamma ** (-dis)
def reconstruction(f, A, B, p_size, dist, min_x, max_x, min_y, max_y, itter):
A_h = np.size(A, 0)
A_w = np.size(A, 1)
B_h = np.size(B, 0)
B_w = np.size(B, 1)
temp = np.zeros_like(A)
p = p_size // 2
for i in range(A_h):
for j in range(A_w):
cnt = 0
ans = np.zeros(3)
for m in range(-p, p + 1, 1):
for n in range(-p, p + 1, 1):
if not ((0 <= i + m < A_h) and (0 <= j + n < A_w)):
continue
if not ((0 <= f[i + m][j + n][0] - m < B_h) and (0 <= f[i + m][j + n][1] - n < B_w)):
continue
if overlap_restricted_area(f[i + m][j + n][0] - m, f[i + m][j + n][1] - n, p_size, min_x, max_x,
min_y,
max_y):
continue
alpha = cal_alpha(dis=dist[i + m, j + n])
cnt += alpha
ans += alpha * B[f[i + m][j + n][0] - m, f[i + m][j + n][1] - n, :]
temp[i, j, :] = ans / cnt
tmp = np.copy(B)
# temp = cv2.GaussianBlur(temp, (3, 3), 0)
tmp[min_x:min_x + A_h, min_y:min_y + A_w, :] = temp
# Image.fromarray(tmp).show()
return tmp, temp
def initialization(A, B, f, p_size, min_x, max_x, min_y, max_y, create_f=False):
A_h = np.size(A, 0)
A_w = np.size(A, 1)
B_h = np.size(B, 0)
B_w = np.size(B, 1)
p = p_size // 2
# A_padding = np.ones([A_h+p*2, A_w+p*2, 3]) * np.nan
A_padding = B[min_x - p:min_x + A_h + p, min_y - p:min_y + A_w + p, :]
A_padding[p:A_h + p, p:A_w + p, :] = A
random_B_r = np.random.randint(p, B_h - p, [A_h, A_w])
random_B_c = np.random.randint(p, B_w - p, [A_h, A_w])
for i in range(A_h):
for j in range(A_w):
while overlap_restricted_area(random_B_r[i][j], random_B_c[i][j], p_size, min_x, max_x, min_y, max_y):
random_B_r[i][j] = np.random.randint(p, B_h - p)
random_B_c[i][j] = np.random.randint(p, B_w - p)
if create_f:
f = np.zeros([A_h, A_w], dtype=object)
dist = np.zeros([A_h, A_w])
for i in range(A_h):
for j in range(A_w):
a = np.array([i, j])
if create_f:
b = np.array([random_B_r[i, j], random_B_c[i, j]], dtype=np.int32)
f[i, j] = b
else:
b = np.array([random_B_r[i, j], random_B_c[i, j]], dtype=np.int32)
if (i % 2 == 0) or (j % 2 == 0):
f[i, j] = b
else:
b = f[i, j]
dist[i, j] = cal_distance(a, b, A_padding, B, p_size)
return f, dist, A_padding
def propagation(f, a, dist, A_padding, B, p_size, is_odd, min_x, max_x, min_y, max_y):
A_h = np.size(A_padding, 0) - p_size + 1
A_w = np.size(A_padding, 1) - p_size + 1
# print(A_h, A_w)
x = a[0]
y = a[1]
if is_odd:
d_left = dist[max(x - 1, 0), y]
d_up = dist[x, max(y - 1, 0)]
d_current = dist[x, y]
idx = np.argmin(np.array([d_current, d_left, d_up]))
if idx == 1 and (not overlap_restricted_area(f[max(x - 1, 0), y][0] + 1, f[max(x - 1, 0), y][1], p_size,
min_x, max_x, min_y, max_y)):
f[x, y] = f[max(x - 1, 0), y]
dist[x, y] = cal_distance(a, f[x, y], A_padding, B, p_size)
if idx == 2 and (not overlap_restricted_area(f[x, max(y - 1, 0)][0], f[x, max(y - 1, 0)][1] + 1, p_size,
min_x, max_x, min_y, max_y)):
f[x, y] = f[x, max(y - 1, 0)]
dist[x, y] = cal_distance(a, f[x, y], A_padding, B, p_size)
else:
# print(dist.shape)
# print(min(x + 1, A_h - 1), y)
d_right = dist[min(x + 1, A_h - 1), y]
d_down = dist[x, min(y + 1, A_w - 1)]
d_current = dist[x, y]
idx = np.argmin(np.array([d_current, d_right, d_down]))
if idx == 1 and (
not overlap_restricted_area(f[min(x + 1, A_h - 1), y][0] - 1, f[min(x + 1, A_h - 1), y][1], p_size,
min_x, max_x, min_y, max_y)):
f[x, y] = f[min(x + 1, A_h - 1), y]
dist[x, y] = cal_distance(a, f[x, y], A_padding, B, p_size)
if idx == 2 and (
not overlap_restricted_area(f[x, min(y + 1, A_w - 1)][0], f[x, min(y + 1, A_w - 1)][1] - 1, p_size,
min_x, max_x, min_y, max_y)):
f[x, y] = f[x, min(y + 1, A_w - 1)]
dist[x, y] = cal_distance(a, f[x, y], A_padding, B, p_size)
def random_search(f, a, dist, A_padding, B, p_size, min_x, max_x, min_y, max_y, alpha=0.5):
x = a[0]
y = a[1]
B_h = np.size(B, 0)
B_w = np.size(B, 1)
p = p_size // 2
i = 4
search_h = B_h * alpha ** i
search_w = B_w * alpha ** i
b_x = f[x, y][0]
b_y = f[x, y][1]
while search_h > 1 and search_w > 1:
search_min_r = max(b_x - search_h, p)
search_max_r = min(b_x + search_h, B_h - p)
random_b_x = np.random.randint(search_min_r, search_max_r)
search_min_c = max(b_y - search_w, p)
search_max_c = min(b_y + search_w, B_w - p)
random_b_y = np.random.randint(search_min_c, search_max_c)
search_h = B_h * alpha ** i
search_w = B_w * alpha ** i
b = np.array([random_b_x, random_b_y])
d = cal_distance(a, b, A_padding, B, p_size)
if d < dist[x, y] and (not overlap_restricted_area(b[0], b[1], p_size, min_x, max_x, min_y, max_y)):
dist[x, y] = d
f[x, y] = b
i += 1
def NNS(img, ref, p_size, itr, f, dist, img_padding, min_x, max_x, min_y, max_y):
A_h = np.size(img, 0)
A_w = np.size(img, 1)
# print(A_h, A_w)
# print(img_padding.shape)
for itr in range(1, itr + 1):
if itr % 2 == 0:
for i in range(A_h - 1, -1, -1):
for j in range(A_w - 1, -1, -1):
a = np.array([i, j])
propagation(f, a, dist, img_padding, ref, p_size, False, min_x, max_x, min_y, max_y)
random_search(f, a, dist, img_padding, ref, p_size, min_x, max_x, min_y, max_y)
else:
for i in range(A_h):
for j in range(A_w):
a = np.array([i, j])
propagation(f, a, dist, img_padding, ref, p_size, True, min_x, max_x, min_y, max_y)
random_search(f, a, dist, img_padding, ref, p_size, min_x, max_x, min_y, max_y)
print("iteration: %d" % (itr))
return f
def upsample_nnf(nnf):
temp = np.zeros((nnf.shape[0], nnf.shape[1], 3))
for x in range(nnf.shape[0]):
for y in range(nnf.shape[1]):
temp[x][y] = [nnf[x][y][0], nnf[x][y][1], 0]
# img = np.zeros(shape=(size, size, 2), dtype=np.int)
# small_size = nnf.shape[0]
aw_ratio = 2 # ((size) // small_size)
ah_ratio = 2 # ((size) // small_size)
temp = cv2.resize(temp, None, fx=aw_ratio, fy=aw_ratio, interpolation=cv2.INTER_NEAREST)
imge = np.zeros(shape=(temp.shape[0], temp.shape[1], 2), dtype=np.int)
for i in range(temp.shape[0]):
for j in range(temp.shape[1]):
pos = temp[i, j]
imge[i, j] = pos[0] * aw_ratio, pos[1] * ah_ratio
return imge
padding_size = [15, 15, 13, 9, 5, 2]
# padding_size = [9, 7, 5, 3, 3, 2]
iter_arr = [2, 2, 16, 40, 64, 64]
def main(img_path):
# img_path = 'IMAGE/face.jpg'
global img
img = cv2.imread(img_path)
cv2.namedWindow('image')
cv2.setMouseCallback('image', on_mouse)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# print(min_x, min_y, height, width)
global_min_x = min_x
global_min_y = min_y
global_max_x = max_x
global_max_y = max_y
# img = np.array(Image.open("./cup_a.jpg"))
origin_ref = np.array(Image.open(img_path))
# ref = cv2.pyrDown(origin_ref, (np.size(origin_ref, 0)//2, np.size(origin_ref, 1)//2))
# Image.fromarray(ref).show()
itr = 4
start = time.time()
# origin_img = origin_ref[min_x: max_x + 1, min_y:max_y + 1, :]
# img = cv2.resize(origin_img, None, fx=2 ** (-4), fy=2 ** (-4), interpolation=cv2.INTER_NEAREST)
f = 0
depth = 3
for l in range(depth, -1, -1):
p_size = padding_size[l]
gmin_x = global_min_x // (2 ** l)
gmin_y = global_min_y // (2 ** l)
gmax_x = global_max_x // (2 ** l)
gmax_y = global_max_y // (2 ** l)
# print(origin_ref.shape)
# ref = cv2.resize(origin_ref, None, fx=2 ** (-l), fy=2 ** (-l), interpolation=cv2.INTER_LINEAR)
ref = origin_ref
for kk in range(l):
ref = cv2.pyrDown(ref, (np.size(origin_ref, 0) // 2, np.size(origin_ref, 1) // 2))
# print(ref.shape)
# print(gmin_x, gmin_y, gmax_x, gmax_y)
# !!!!!!!!!
img = ref[gmin_x: gmax_x + 1, gmin_y:gmax_y + 1, :]
# !!!!!!!!!
if l == depth:
# img = ref[gmin_x: gmax_x + 1, gmin_y:gmax_y + 1, :]
# img = np.zeros([gmax_x - gmin_x + 1, gmax_y - gmin_y + 1, 3])
# !!!!!!!!!!
# img = np.random.randint(0, 256, size=(gmax_x - gmin_x + 1, gmax_y - gmin_y + 1, 3), dtype=np.uint8)
# !!!!!!!!!!
# print(np.shape(img)[0] // 4)
f, dist, img_padding = initialization(img, ref, f, p_size, gmin_x, gmax_x, gmin_y, gmax_y, create_f=True)
else:
# print(img.shape)
fake, dist, img_padding = initialization(img, ref, f, p_size, gmin_x, gmax_x, gmin_y, gmax_y,
create_f=False)
# Image.fromarray(ref).show()
# Image.fromarray(img).show()
# print(img.shape)
# print(img_padding.shape)
for itter in range(iter_arr[l]):
f = NNS(img, ref, p_size, itr, f, dist, img_padding, gmin_x, gmax_x, gmin_y, gmax_y)
end = time.time()
print(end - start)
print(l, itter + 1, '/', iter_arr[l])
tmp, img = reconstruction(f, img, ref, p_size, dist, gmin_x, gmax_x, gmin_y, gmax_y, itter)
# if itter == iter_arr[l] - 1:
# Image.fromarray(tmp).show()
# img = cv2.resize(img, None, fx=2, fy=2, interpolation=cv2.INTER_LINEAR)
# Image.fromarray(img).show()
img = cv2.pyrUp(img, (np.size(img, 0) * 2, np.size(img, 1) * 2))
f = upsample_nnf(f)
# Image.fromarray(img).show()
tmp = Image.fromarray(tmp)
tmp.save("temp.jpg")
return "temp.jpg"
if __name__ == '__main__':
img_path = 'D://project//Image_Completion//IMAGE//face.jpg'
# img_path = 'D://project//Image_Completion//IMAGE//birds.jpg'
while True:
img_path = main(img_path)
|
[
"numpy.nan_to_num",
"numpy.isnan",
"numpy.random.randint",
"cv2.rectangle",
"cv2.imshow",
"numpy.zeros_like",
"numpy.copy",
"cv2.setMouseCallback",
"cv2.destroyAllWindows",
"cv2.resize",
"numpy.size",
"cv2.circle",
"cv2.waitKey",
"numpy.zeros",
"time.time",
"PIL.Image.open",
"cv2.imread",
"numpy.array",
"PIL.Image.fromarray",
"cv2.namedWindow"
] |
[((1982, 1995), 'numpy.size', 'np.size', (['A', '(0)'], {}), '(A, 0)\n', (1989, 1995), True, 'import numpy as np\n'), ((2006, 2019), 'numpy.size', 'np.size', (['A', '(1)'], {}), '(A, 1)\n', (2013, 2019), True, 'import numpy as np\n'), ((2030, 2043), 'numpy.size', 'np.size', (['B', '(0)'], {}), '(B, 0)\n', (2037, 2043), True, 'import numpy as np\n'), ((2054, 2067), 'numpy.size', 'np.size', (['B', '(1)'], {}), '(B, 1)\n', (2061, 2067), True, 'import numpy as np\n'), ((2079, 2095), 'numpy.zeros_like', 'np.zeros_like', (['A'], {}), '(A)\n', (2092, 2095), True, 'import numpy as np\n'), ((3042, 3052), 'numpy.copy', 'np.copy', (['B'], {}), '(B)\n', (3049, 3052), True, 'import numpy as np\n'), ((3304, 3317), 'numpy.size', 'np.size', (['A', '(0)'], {}), '(A, 0)\n', (3311, 3317), True, 'import numpy as np\n'), ((3328, 3341), 'numpy.size', 'np.size', (['A', '(1)'], {}), '(A, 1)\n', (3335, 3341), True, 'import numpy as np\n'), ((3352, 3365), 'numpy.size', 'np.size', (['B', '(0)'], {}), '(B, 0)\n', (3359, 3365), True, 'import numpy as np\n'), ((3376, 3389), 'numpy.size', 'np.size', (['B', '(1)'], {}), '(B, 1)\n', (3383, 3389), True, 'import numpy as np\n'), ((3603, 3644), 'numpy.random.randint', 'np.random.randint', (['p', '(B_h - p)', '[A_h, A_w]'], {}), '(p, B_h - p, [A_h, A_w])\n', (3620, 3644), True, 'import numpy as np\n'), ((3662, 3703), 'numpy.random.randint', 'np.random.randint', (['p', '(B_w - p)', '[A_h, A_w]'], {}), '(p, B_w - p, [A_h, A_w])\n', (3679, 3703), True, 'import numpy as np\n'), ((4078, 4098), 'numpy.zeros', 'np.zeros', (['[A_h, A_w]'], {}), '([A_h, A_w])\n', (4086, 4098), True, 'import numpy as np\n'), ((6754, 6767), 'numpy.size', 'np.size', (['B', '(0)'], {}), '(B, 0)\n', (6761, 6767), True, 'import numpy as np\n'), ((6778, 6791), 'numpy.size', 'np.size', (['B', '(1)'], {}), '(B, 1)\n', (6785, 6791), True, 'import numpy as np\n'), ((7740, 7755), 'numpy.size', 'np.size', (['img', '(0)'], {}), '(img, 0)\n', (7747, 7755), True, 'import numpy as np\n'), ((7766, 7781), 'numpy.size', 'np.size', (['img', '(1)'], {}), '(img, 1)\n', (7773, 7781), True, 'import numpy as np\n'), ((8651, 8692), 'numpy.zeros', 'np.zeros', (['(nnf.shape[0], nnf.shape[1], 3)'], {}), '((nnf.shape[0], nnf.shape[1], 3))\n', (8659, 8692), True, 'import numpy as np\n'), ((9010, 9096), 'cv2.resize', 'cv2.resize', (['temp', 'None'], {'fx': 'aw_ratio', 'fy': 'aw_ratio', 'interpolation': 'cv2.INTER_NEAREST'}), '(temp, None, fx=aw_ratio, fy=aw_ratio, interpolation=cv2.\n INTER_NEAREST)\n', (9020, 9096), False, 'import cv2\n'), ((9103, 9166), 'numpy.zeros', 'np.zeros', ([], {'shape': '(temp.shape[0], temp.shape[1], 2)', 'dtype': 'np.int'}), '(shape=(temp.shape[0], temp.shape[1], 2), dtype=np.int)\n', (9111, 9166), True, 'import numpy as np\n'), ((9539, 9559), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (9549, 9559), False, 'import cv2\n'), ((9564, 9588), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""'], {}), "('image')\n", (9579, 9588), False, 'import cv2\n'), ((9593, 9632), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""image"""', 'on_mouse'], {}), "('image', on_mouse)\n", (9613, 9632), False, 'import cv2\n'), ((9637, 9661), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (9647, 9661), False, 'import cv2\n'), ((9666, 9680), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (9677, 9680), False, 'import cv2\n'), ((9685, 9708), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (9706, 9708), False, 'import cv2\n'), ((10096, 10107), 'time.time', 'time.time', ([], {}), '()\n', (10105, 10107), False, 'import time\n'), ((12563, 12583), 'PIL.Image.fromarray', 'Image.fromarray', (['tmp'], {}), '(tmp)\n', (12578, 12583), False, 'from PIL import Image\n'), ((365, 409), 'cv2.circle', 'cv2.circle', (['img2', 'point1', '(10)', '(0, 255, 0)', '(2)'], {}), '(img2, point1, 10, (0, 255, 0), 2)\n', (375, 409), False, 'import cv2\n'), ((418, 443), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img2'], {}), "('image', img2)\n", (428, 443), False, 'import cv2\n'), ((4032, 4066), 'numpy.zeros', 'np.zeros', (['[A_h, A_w]'], {'dtype': 'object'}), '([A_h, A_w], dtype=object)\n', (4040, 4066), True, 'import numpy as np\n'), ((7088, 7133), 'numpy.random.randint', 'np.random.randint', (['search_min_r', 'search_max_r'], {}), '(search_min_r, search_max_r)\n', (7105, 7133), True, 'import numpy as np\n'), ((7253, 7298), 'numpy.random.randint', 'np.random.randint', (['search_min_c', 'search_max_c'], {}), '(search_min_c, search_max_c)\n', (7270, 7298), True, 'import numpy as np\n'), ((7383, 7417), 'numpy.array', 'np.array', (['[random_b_x, random_b_y]'], {}), '([random_b_x, random_b_y])\n', (7391, 7417), True, 'import numpy as np\n'), ((9924, 9944), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (9934, 9944), False, 'from PIL import Image\n'), ((538, 589), 'cv2.rectangle', 'cv2.rectangle', (['img2', 'point1', '(x, y)', '(255, 0, 0)', '(2)'], {}), '(img2, point1, (x, y), (255, 0, 0), 2)\n', (551, 589), False, 'import cv2\n'), ((598, 623), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img2'], {}), "('image', img2)\n", (608, 623), False, 'import cv2\n'), ((2208, 2219), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2216, 2219), True, 'import numpy as np\n'), ((4169, 4185), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (4177, 4185), True, 'import numpy as np\n'), ((4753, 4774), 'numpy.size', 'np.size', (['A_padding', '(0)'], {}), '(A_padding, 0)\n', (4760, 4774), True, 'import numpy as np\n'), ((4798, 4819), 'numpy.size', 'np.size', (['A_padding', '(1)'], {}), '(A_padding, 1)\n', (4805, 4819), True, 'import numpy as np\n'), ((5029, 5064), 'numpy.array', 'np.array', (['[d_current, d_left, d_up]'], {}), '([d_current, d_left, d_up])\n', (5037, 5064), True, 'import numpy as np\n'), ((5912, 5950), 'numpy.array', 'np.array', (['[d_current, d_right, d_down]'], {}), '([d_current, d_right, d_down])\n', (5920, 5950), True, 'import numpy as np\n'), ((12008, 12019), 'time.time', 'time.time', ([], {}), '()\n', (12017, 12019), False, 'import time\n'), ((703, 754), 'cv2.rectangle', 'cv2.rectangle', (['img2', 'point1', 'point2', '(0, 0, 255)', '(2)'], {}), '(img2, point1, point2, (0, 0, 255), 2)\n', (716, 754), False, 'import cv2\n'), ((763, 788), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img2'], {}), "('image', img2)\n", (773, 788), False, 'import cv2\n'), ((1743, 1757), 'numpy.isnan', 'np.isnan', (['temp'], {}), '(temp)\n', (1751, 1757), True, 'import numpy as np\n'), ((1788, 1807), 'numpy.nan_to_num', 'np.nan_to_num', (['temp'], {}), '(temp)\n', (1801, 1807), True, 'import numpy as np\n'), ((3908, 3937), 'numpy.random.randint', 'np.random.randint', (['p', '(B_h - p)'], {}), '(p, B_h - p)\n', (3925, 3937), True, 'import numpy as np\n'), ((3973, 4002), 'numpy.random.randint', 'np.random.randint', (['p', '(B_w - p)'], {}), '(p, B_w - p)\n', (3990, 4002), True, 'import numpy as np\n'), ((4231, 4293), 'numpy.array', 'np.array', (['[random_B_r[i, j], random_B_c[i, j]]'], {'dtype': 'np.int32'}), '([random_B_r[i, j], random_B_c[i, j]], dtype=np.int32)\n', (4239, 4293), True, 'import numpy as np\n'), ((4360, 4422), 'numpy.array', 'np.array', (['[random_B_r[i, j], random_B_c[i, j]]'], {'dtype': 'np.int32'}), '([random_B_r[i, j], random_B_c[i, j]], dtype=np.int32)\n', (4368, 4422), True, 'import numpy as np\n'), ((8012, 8028), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (8020, 8028), True, 'import numpy as np\n'), ((8342, 8358), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (8350, 8358), True, 'import numpy as np\n'), ((12444, 12459), 'numpy.size', 'np.size', (['img', '(0)'], {}), '(img, 0)\n', (12451, 12459), True, 'import numpy as np\n'), ((12465, 12480), 'numpy.size', 'np.size', (['img', '(1)'], {}), '(img, 1)\n', (12472, 12480), True, 'import numpy as np\n'), ((10766, 10788), 'numpy.size', 'np.size', (['origin_ref', '(0)'], {}), '(origin_ref, 0)\n', (10773, 10788), True, 'import numpy as np\n'), ((10795, 10817), 'numpy.size', 'np.size', (['origin_ref', '(1)'], {}), '(origin_ref, 1)\n', (10802, 10817), True, 'import numpy as np\n')]
|
# Author: <NAME>
# Created: 2019-01-25
# Copyright (C) 2018, <NAME>
# License: MIT
import math
class Tween():
'''
Tweening class for scalar values
Initial value is set on construction.
wait() maintains the current value for the requested number of frames
pad() similar to wait, but pads until the total length of the tween is the required size.
set() sets a new current values, and adds it for the requested number of frames (which can be zero)
to() moves linearly from the current value to the supplied value. The first frame added will have the current value,
the last frame added will have the new value, with values spaced evenly in between. The final value will be set as
the new current value.
You can use get(n) to get the nth frame, or alternatively you can use tween[n]. The built in len() function can be
used to find the sequence length. Tween are iterable, so they can be used with for loops etc.
'''
def __init__(self, value=0):
self.check_value(value, None)
self.frames = []
self.previous = value
self.nextFrame = 0
def wait(self, count):
self.check_count(count)
self.frames.extend([self.previous for i in range(count)])
return self
def pad(self, final_length):
self.check_count(final_length)
required = final_length - len(self.frames)
if required > 0:
self.frames.extend([self.previous for i in range(required)])
return self
def set(self, value, count=0):
self.check_value(value, self.previous)
self.check_count(count)
self.frames.extend([value for i in range(count)])
self.previous = value
return self
def to(self, value, count):
self.check_value(value, self.previous)
self.check_count(count)
for i in range(count):
factor = (i + 1) / count
self.frames.append(self.previous + factor * (value - self.previous))
self.previous = value
return self
def ease(self, value, count, ease_function):
self.check_value(value, self.previous)
self.check_count(count)
for i in range(count):
factor = ease_function((i + 1) / count)
self.frames.append(self.previous + factor * (value - self.previous))
self.previous = value
return self
def get(self, frame):
if frame >= len(self.frames):
return self.previous
return self.frames[frame]
def __getitem__(self, key):
return self.get(key)
def __next__(self):
if self.nextFrame >= len(self.frames):
raise StopIteration()
frame = self.get(self.nextFrame)
self.nextFrame += 1
return frame
def __iter__(self):
return self
def check_value(self, value, previous):
if (not isinstance(value, (int, float))) or isinstance(value, bool):
raise ValueError('Numeric value required')
def check_index(self, value):
if not isinstance(value, int):
raise ValueError('Integer value required')
def check_count(self, value):
if not isinstance(value, int) or value < 0:
raise ValueError('Non-negative integer value required')
def __len__(self):
return len(self.frames)
class TweenVector(Tween):
'''
Tweening class for vector quantities.
Similar to Tween, but the values are vector quantities (ie tuples of lists), such as (x, y) positions or
(r, g, b, a) colours.
The vector quantities must have at least 1 element, but normally it will be 2 or more. Every value added must have
the same length as the initial value, for example if you start with an (x, y) value, every new value must also
have 2 dimansions.
'''
def __init__(self, value=(0, 0)):
Tween.__init__(self, value)
def to(self, value, count):
self.check_value(value, self.previous)
self.check_count(count)
for i in range(count):
nextvalue = []
factor = (i + 1) / count
for a, b in zip(self.previous, value):
nextvalue.append(a + factor * (b - a))
self.frames.append(nextvalue)
self.previous = value
return self
def ease(self, value, count, ease_function):
self.check_value(value, self.previous)
self.check_count(count)
for i in range(count):
nextvalue = []
factor = ease_function((i + 1) / count)
for a, b in zip(self.previous, value):
nextvalue.append(a + factor * (b - a))
self.frames.append(nextvalue)
self.previous = value
return self
def check_value(self, value, previous):
try:
if len(value) <= 0:
raise ValueError('Vectors of rank 0 are not supported')
if previous and len(value) != len(self.previous):
raise ValueError('All values must be vectors of equal rank')
except:
ValueError('Sequence value required')
def ease_linear():
return lambda x: x
def ease_in_harm():
return lambda x: 1 + math.sin(math.pi * (x / 2 - 0.5))
def ease_out_harm():
return lambda x: math.sin(math.pi * x / 2)
def ease_in_out_harm():
return lambda x: 0.5 + 0.5 * math.sin(math.pi * (x - 0.5))
def ease_in_elastic():
return lambda x: math.sin(2.25 * 2 * math.pi * (x)) * pow(2, 10 * (x - 1))
def ease_out_elastic():
return lambda x: 1 - math.sin(2.25 * 2 * math.pi * (1 - x)) * pow(2, -10 * x)
def ease_in_out_elastic():
def fn(x):
if x < 0.5:
f = 2 * x
return 0.5 * (math.sin(2.25 * 2 * math.pi * f) * pow(2, 10 * (f - 1)))
else:
f = (2 * x - 1)
return 0.5 * (1 - math.sin(2.25 * 2 * math.pi * (1 - f)) * pow(2, -10 * f)) + 0.5
return fn
def ease_in_back():
return lambda x: x * x * x - x * math.sin(x * math.pi)
def ease_out_back():
def fn(x):
f = (1 - x)
return 1 - (f * f * f - f * math.sin(f * math.pi))
return fn
def ease_in_out_back():
def fn(x):
if x < 0.5:
f = 2 * x
return 0.5 * (f * f * f - f * math.sin(f * math.pi))
else:
f = (1 - (2 * x - 1))
return 0.5 * (1 - (f * f * f - f * math.sin(f * math.pi))) + 0.5
return fn
# Basic bounce function used by the bounce easing functions.
# Don't use this function directly, use the ease_*_bounce functions instead.
def _bounce(x):
if x < 4 / 11.0:
return (121 * x * x) / 16.0
elif x < 8 / 11.0:
return (363 / 40.0 * x * x) - (99 / 10.0 * x) + 17 / 5.0
elif x < 9 / 10.0:
return (4356 / 361.0 * x * x) - (35442 / 1805.0 * x) + 16061 / 1805.0
else:
return (54 / 5.0 * x * x) - (513 / 25.0 * x) + 268 / 25.0
def ease_in_bounce():
return lambda x: 1 - _bounce(1 - x)
def ease_out_bounce():
return lambda x: _bounce(x)
def ease_in_out_bounce():
def fn(x):
if x < 0.5:
return 0.5 * (1 - _bounce(1 - x * 2))
else:
return 0.5 * _bounce(x * 2 - 1) + 0.5
return fn
|
[
"math.sin"
] |
[((5275, 5300), 'math.sin', 'math.sin', (['(math.pi * x / 2)'], {}), '(math.pi * x / 2)\n', (5283, 5300), False, 'import math\n'), ((5197, 5230), 'math.sin', 'math.sin', (['(math.pi * (x / 2 - 0.5))'], {}), '(math.pi * (x / 2 - 0.5))\n', (5205, 5230), False, 'import math\n'), ((5436, 5468), 'math.sin', 'math.sin', (['(2.25 * 2 * math.pi * x)'], {}), '(2.25 * 2 * math.pi * x)\n', (5444, 5468), False, 'import math\n'), ((5360, 5389), 'math.sin', 'math.sin', (['(math.pi * (x - 0.5))'], {}), '(math.pi * (x - 0.5))\n', (5368, 5389), False, 'import math\n'), ((5545, 5583), 'math.sin', 'math.sin', (['(2.25 * 2 * math.pi * (1 - x))'], {}), '(2.25 * 2 * math.pi * (1 - x))\n', (5553, 5583), False, 'import math\n'), ((5981, 6002), 'math.sin', 'math.sin', (['(x * math.pi)'], {}), '(x * math.pi)\n', (5989, 6002), False, 'import math\n'), ((5714, 5746), 'math.sin', 'math.sin', (['(2.25 * 2 * math.pi * f)'], {}), '(2.25 * 2 * math.pi * f)\n', (5722, 5746), False, 'import math\n'), ((6097, 6118), 'math.sin', 'math.sin', (['(f * math.pi)'], {}), '(f * math.pi)\n', (6105, 6118), False, 'import math\n'), ((6260, 6281), 'math.sin', 'math.sin', (['(f * math.pi)'], {}), '(f * math.pi)\n', (6268, 6281), False, 'import math\n'), ((5843, 5881), 'math.sin', 'math.sin', (['(2.25 * 2 * math.pi * (1 - f))'], {}), '(2.25 * 2 * math.pi * (1 - f))\n', (5851, 5881), False, 'import math\n'), ((6378, 6399), 'math.sin', 'math.sin', (['(f * math.pi)'], {}), '(f * math.pi)\n', (6386, 6399), False, 'import math\n')]
|
import requests
class MoneyAPI():
def __init__(self):
self.API_URL = "https://economia.awesomeapi.com.br/json/all/CAD"
self.SUCESS_STATUS_CODE = 200
def request_money(self):
resp = requests.get(self.API_URL)
if resp.status_code != self.SUCESS_STATUS_CODE:
raise Exception # ('GET /tasks/ {}'.format(resp.status_code))
dollar_info = resp.json()["CAD"]
# adicionando um item que é só as horas e os minutos
check_time = dollar_info["create_date"].split(" ")[1].split(":")
dollar_info["check_time"] = f"{check_time[0]}:{check_time[1]}"
return dollar_info
if __name__ == "__main__":
money = MoneyAPI()
print(money.request_money()["check_time"])
|
[
"requests.get"
] |
[((216, 242), 'requests.get', 'requests.get', (['self.API_URL'], {}), '(self.API_URL)\n', (228, 242), False, 'import requests\n')]
|
from hydra.experimental import compose, initialize
from random import randint
from random import seed
from soundbay.data import ClassifierDataset
import numpy as np
def test_dataloader() -> None:
seed(1)
with initialize(config_path="../soundbay/conf"):
# config is relative to a module
cfg = compose(config_name="runs/main")
data_loader = ClassifierDataset(cfg.data.train_dataset.data_path, cfg.data.train_dataset.metadata_path,
augmentations=cfg.data.train_dataset.augmentations,
augmentations_p=cfg.data.train_dataset.augmentations_p,
preprocessors=cfg.data.train_dataset.preprocessors)
assert data_loader.metadata.shape[1] == 5 # make sure metadata has 5 columns
assert data_loader.metadata.shape[0] > 0 # make sure metadata is not empty
data_size = data_loader.metadata.shape[0]
value = randint(0, data_size)
sample = data_loader[value]
assert np.issubdtype(sample[1], np.integer)
if 'spectrogram' in cfg.data.train_dataset.preprocessors:
assert len(sample[0].shape) == 3
if 'utils.LibrosaMelSpectrogram' in cfg.data.train_dataset.preprocessors.spectrogram._target_:
assert sample[0].shape[1] == cfg.data.train_dataset.preprocessors.spectrogram.n_mels
else:
assert sample[0].shape[1] == (cfg.data.train_dataset.preprocessors.spectrogram.n_fft // 2 + 1)
else:
assert sample[0].shape[1] == 1
|
[
"hydra.experimental.compose",
"random.randint",
"soundbay.data.ClassifierDataset",
"random.seed",
"hydra.experimental.initialize",
"numpy.issubdtype"
] |
[((202, 209), 'random.seed', 'seed', (['(1)'], {}), '(1)\n', (206, 209), False, 'from random import seed\n'), ((219, 261), 'hydra.experimental.initialize', 'initialize', ([], {'config_path': '"""../soundbay/conf"""'}), "(config_path='../soundbay/conf')\n", (229, 261), False, 'from hydra.experimental import compose, initialize\n'), ((318, 350), 'hydra.experimental.compose', 'compose', ([], {'config_name': '"""runs/main"""'}), "(config_name='runs/main')\n", (325, 350), False, 'from hydra.experimental import compose, initialize\n'), ((373, 636), 'soundbay.data.ClassifierDataset', 'ClassifierDataset', (['cfg.data.train_dataset.data_path', 'cfg.data.train_dataset.metadata_path'], {'augmentations': 'cfg.data.train_dataset.augmentations', 'augmentations_p': 'cfg.data.train_dataset.augmentations_p', 'preprocessors': 'cfg.data.train_dataset.preprocessors'}), '(cfg.data.train_dataset.data_path, cfg.data.train_dataset.\n metadata_path, augmentations=cfg.data.train_dataset.augmentations,\n augmentations_p=cfg.data.train_dataset.augmentations_p, preprocessors=\n cfg.data.train_dataset.preprocessors)\n', (390, 636), False, 'from soundbay.data import ClassifierDataset\n'), ((979, 1000), 'random.randint', 'randint', (['(0)', 'data_size'], {}), '(0, data_size)\n', (986, 1000), False, 'from random import randint\n'), ((1052, 1088), 'numpy.issubdtype', 'np.issubdtype', (['sample[1]', 'np.integer'], {}), '(sample[1], np.integer)\n', (1065, 1088), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import json, os, requests
from dotenv import load_dotenv
from telegram import Bot, InlineKeyboardButton, InlineKeyboardMarkup, Update
from telegram.ext import (CallbackContext, CallbackQueryHandler,
CommandHandler, Filters, MessageHandler, Updater)
load_dotenv()
TELEGRAM_TOKEN = os.getenv('TELEGRAM_TOKEN')
CHAT_ID = os.getenv('TELEGRAM_CHAT_ID')
DELLIN_KEY = os.getenv('DELLIN_KEY')
DELLIN_ID = os.getenv('DELLIN_ID')
URL_DELLIN_CALC = os.getenv('URL_DELLIN_CALC')
URL_DELLIN_KLADR = os.getenv('URL_DELLIN_KLADR')
URL_SBER = os.getenv('URL_SBER')
URL_GLAVDOSTAVKA = os.getenv('URL_GLAVDOSTAVKA')
USERS = {}
bot = Bot(TELEGRAM_TOKEN)
updater = Updater(TELEGRAM_TOKEN)
def start(update, context):
USERS[update.effective_user.id] = {
'progress': 1,
'derival': '',
'arrival': ''
}
bot.send_message(update.effective_message.chat.id,
'Введите город отправления посылки'
)
def progress(update, context):
if USERS[update.effective_user.id]['progress'] == 1:
return city(update, context)
elif USERS[update.effective_user.id]['progress'] == 2:
return result(update, context)
def city(update: Update, context: CallbackContext):
USERS[update.effective_user.id]['derival'] = update['message']['text']
USERS[update.effective_user.id]['progress'] = 2
bot.send_message(update.effective_message.chat.id,
'Введите город получения посылки'
)
def result(update: Update, context: CallbackContext):
USERS[update.effective_user.id]['arrival'] = update['message']['text']
derival = USERS[update.effective_user.id]['derival'].lower()
arrival = USERS[update.effective_user.id]['arrival'].lower()
derival_dellin = requests.post(
URL_DELLIN_KLADR,
json={"appkey": DELLIN_KEY,
"q": derival,
"limit": 1}
)
arrival_dellin = requests.post(
URL_DELLIN_KLADR,
json={"appkey": DELLIN_KEY,
"q": arrival,
"limit": 1}
)
try:
derival_dellin = derival_dellin.json().get('cities')[0]['code']
arrival_dellin = arrival_dellin.json().get('cities')[0]['code']
except IndexError:
del USERS[update.effective_user.id]
keyboard = [[InlineKeyboardButton(
'Новый расчет',
callback_data='new'
)]]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.send_message(update.effective_message.chat.id,
'Ошибка в названии города. Попробуйте еще.',
reply_markup=reply_markup
)
dellin = requests.post(
URL_DELLIN_CALC,
json={"appkey": DELLIN_KEY,
"sessionID": DELLIN_ID,
"derival": {"city": derival_dellin},
"arrival": {"city": arrival_dellin}
}
)
with open('sber_cities.json', 'r', encoding='utf-8') as g:
sber_cities = json.load(g)
derival_sber = [city['kladr_id'] for city in sber_cities \
if city.get('name').lower() == derival][0]
arrival_sber = [city['kladr_id'] for city in sber_cities \
if city.get('name').lower() == arrival][0]
sber = requests.post(
URL_SBER,
json={"id": "JsonRpcClient.js",
"jsonrpc": "2.0",
"method": "calculateShipping",
"params": {
"stock": True,
"kladr_id_from": derival_sber,
"kladr_id": arrival_sber,
"length": 50,
"width": 35,
"height": 35,
"weight": 5,
"cod": 0,
"declared_cost": 0,
"courier": "sberlogistics"
}
}
)
sber = sber.json()['result']['methods'][0]
with open('glav_cities.json', 'r', encoding='utf-8') as g:
GLAV_CITIES = json.load(g)
derival_glav = [city['id'] for city in GLAV_CITIES \
if city.get('name', '').lower() == derival][0]
arrival_glav = [city['id'] for city in GLAV_CITIES \
if city.get('name', '').lower() == arrival][0]
glavdostavka = requests.post(
URL_GLAVDOSTAVKA + f'&depPoint={derival_glav}&arrPoint={arrival_glav}'
)
price_glavdostavka = glavdostavka.json()['price']
dellin = dellin.json()['data']['terminals_standard']
price_dellin = dellin['price']
period_dellin = dellin['period_to']
price_sber = sber['cost']['total']['sum']
period_sber = sber['max_days']
del USERS[update.effective_user.id]
keyboard = [[InlineKeyboardButton('Новый расчет', callback_data='new')]]
reply_markup = InlineKeyboardMarkup(keyboard)
derival = derival[0].upper() + derival[1:]
arrival = arrival[0].upper() + arrival[1:]
bot.send_message(update.effective_message.chat.id,
f'Стоимость и сроки доставки посылки с габаритами '
f'не превышающими 0.5х0.35х0.35(м) и массой не более 5кг '
f'из города {derival} в город {arrival} '
f'(от терминала до терминала):\n\n'
f'Деловые линии: {price_dellin} руб. '
f'До {period_dellin} дней.\n'
f'СберЛогистика: {price_sber} руб. '
f'До {period_sber} дней.\n'
f'ГлавДоставка: {price_glavdostavka} руб',
reply_markup=reply_markup
)
def button(update: Update, context: CallbackContext):
start(update, context)
def main():
start_handler = CommandHandler('start', start)
updater.dispatcher.add_handler(start_handler)
updater.dispatcher.add_handler(CallbackQueryHandler(button))
updater.dispatcher.add_handler(MessageHandler(Filters.text, progress))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
|
[
"json.load",
"telegram.ext.CallbackQueryHandler",
"telegram.InlineKeyboardButton",
"dotenv.load_dotenv",
"telegram.ext.Updater",
"telegram.Bot",
"telegram.InlineKeyboardMarkup",
"telegram.ext.MessageHandler",
"requests.post",
"telegram.ext.CommandHandler",
"os.getenv"
] |
[((301, 314), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (312, 314), False, 'from dotenv import load_dotenv\n'), ((334, 361), 'os.getenv', 'os.getenv', (['"""TELEGRAM_TOKEN"""'], {}), "('TELEGRAM_TOKEN')\n", (343, 361), False, 'import json, os, requests\n'), ((372, 401), 'os.getenv', 'os.getenv', (['"""TELEGRAM_CHAT_ID"""'], {}), "('TELEGRAM_CHAT_ID')\n", (381, 401), False, 'import json, os, requests\n'), ((416, 439), 'os.getenv', 'os.getenv', (['"""DELLIN_KEY"""'], {}), "('DELLIN_KEY')\n", (425, 439), False, 'import json, os, requests\n'), ((452, 474), 'os.getenv', 'os.getenv', (['"""DELLIN_ID"""'], {}), "('DELLIN_ID')\n", (461, 474), False, 'import json, os, requests\n'), ((493, 521), 'os.getenv', 'os.getenv', (['"""URL_DELLIN_CALC"""'], {}), "('URL_DELLIN_CALC')\n", (502, 521), False, 'import json, os, requests\n'), ((541, 570), 'os.getenv', 'os.getenv', (['"""URL_DELLIN_KLADR"""'], {}), "('URL_DELLIN_KLADR')\n", (550, 570), False, 'import json, os, requests\n'), ((583, 604), 'os.getenv', 'os.getenv', (['"""URL_SBER"""'], {}), "('URL_SBER')\n", (592, 604), False, 'import json, os, requests\n'), ((625, 654), 'os.getenv', 'os.getenv', (['"""URL_GLAVDOSTAVKA"""'], {}), "('URL_GLAVDOSTAVKA')\n", (634, 654), False, 'import json, os, requests\n'), ((674, 693), 'telegram.Bot', 'Bot', (['TELEGRAM_TOKEN'], {}), '(TELEGRAM_TOKEN)\n', (677, 693), False, 'from telegram import Bot, InlineKeyboardButton, InlineKeyboardMarkup, Update\n'), ((704, 727), 'telegram.ext.Updater', 'Updater', (['TELEGRAM_TOKEN'], {}), '(TELEGRAM_TOKEN)\n', (711, 727), False, 'from telegram.ext import CallbackContext, CallbackQueryHandler, CommandHandler, Filters, MessageHandler, Updater\n'), ((1848, 1938), 'requests.post', 'requests.post', (['URL_DELLIN_KLADR'], {'json': "{'appkey': DELLIN_KEY, 'q': derival, 'limit': 1}"}), "(URL_DELLIN_KLADR, json={'appkey': DELLIN_KEY, 'q': derival,\n 'limit': 1})\n", (1861, 1938), False, 'import json, os, requests\n'), ((2010, 2100), 'requests.post', 'requests.post', (['URL_DELLIN_KLADR'], {'json': "{'appkey': DELLIN_KEY, 'q': arrival, 'limit': 1}"}), "(URL_DELLIN_KLADR, json={'appkey': DELLIN_KEY, 'q': arrival,\n 'limit': 1})\n", (2023, 2100), False, 'import json, os, requests\n'), ((2765, 2930), 'requests.post', 'requests.post', (['URL_DELLIN_CALC'], {'json': "{'appkey': DELLIN_KEY, 'sessionID': DELLIN_ID, 'derival': {'city':\n derival_dellin}, 'arrival': {'city': arrival_dellin}}"}), "(URL_DELLIN_CALC, json={'appkey': DELLIN_KEY, 'sessionID':\n DELLIN_ID, 'derival': {'city': derival_dellin}, 'arrival': {'city':\n arrival_dellin}})\n", (2778, 2930), False, 'import json, os, requests\n'), ((3363, 3680), 'requests.post', 'requests.post', (['URL_SBER'], {'json': "{'id': 'JsonRpcClient.js', 'jsonrpc': '2.0', 'method': 'calculateShipping',\n 'params': {'stock': True, 'kladr_id_from': derival_sber, 'kladr_id':\n arrival_sber, 'length': 50, 'width': 35, 'height': 35, 'weight': 5,\n 'cod': 0, 'declared_cost': 0, 'courier': 'sberlogistics'}}"}), "(URL_SBER, json={'id': 'JsonRpcClient.js', 'jsonrpc': '2.0',\n 'method': 'calculateShipping', 'params': {'stock': True,\n 'kladr_id_from': derival_sber, 'kladr_id': arrival_sber, 'length': 50,\n 'width': 35, 'height': 35, 'weight': 5, 'cod': 0, 'declared_cost': 0,\n 'courier': 'sberlogistics'}})\n", (3376, 3680), False, 'import json, os, requests\n'), ((4356, 4445), 'requests.post', 'requests.post', (["(URL_GLAVDOSTAVKA + f'&depPoint={derival_glav}&arrPoint={arrival_glav}')"], {}), "(URL_GLAVDOSTAVKA +\n f'&depPoint={derival_glav}&arrPoint={arrival_glav}')\n", (4369, 4445), False, 'import json, os, requests\n'), ((4863, 4893), 'telegram.InlineKeyboardMarkup', 'InlineKeyboardMarkup', (['keyboard'], {}), '(keyboard)\n', (4883, 4893), False, 'from telegram import Bot, InlineKeyboardButton, InlineKeyboardMarkup, Update\n'), ((5786, 5816), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""start"""', 'start'], {}), "('start', start)\n", (5800, 5816), False, 'from telegram.ext import CallbackContext, CallbackQueryHandler, CommandHandler, Filters, MessageHandler, Updater\n'), ((3087, 3099), 'json.load', 'json.load', (['g'], {}), '(g)\n', (3096, 3099), False, 'import json, os, requests\n'), ((4076, 4088), 'json.load', 'json.load', (['g'], {}), '(g)\n', (4085, 4088), False, 'import json, os, requests\n'), ((5902, 5930), 'telegram.ext.CallbackQueryHandler', 'CallbackQueryHandler', (['button'], {}), '(button)\n', (5922, 5930), False, 'from telegram.ext import CallbackContext, CallbackQueryHandler, CommandHandler, Filters, MessageHandler, Updater\n'), ((5967, 6005), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.text', 'progress'], {}), '(Filters.text, progress)\n', (5981, 6005), False, 'from telegram.ext import CallbackContext, CallbackQueryHandler, CommandHandler, Filters, MessageHandler, Updater\n'), ((2513, 2543), 'telegram.InlineKeyboardMarkup', 'InlineKeyboardMarkup', (['keyboard'], {}), '(keyboard)\n', (2533, 2543), False, 'from telegram import Bot, InlineKeyboardButton, InlineKeyboardMarkup, Update\n'), ((4784, 4841), 'telegram.InlineKeyboardButton', 'InlineKeyboardButton', (['"""Новый расчет"""'], {'callback_data': '"""new"""'}), "('Новый расчет', callback_data='new')\n", (4804, 4841), False, 'from telegram import Bot, InlineKeyboardButton, InlineKeyboardMarkup, Update\n'), ((2392, 2449), 'telegram.InlineKeyboardButton', 'InlineKeyboardButton', (['"""Новый расчет"""'], {'callback_data': '"""new"""'}), "('Новый расчет', callback_data='new')\n", (2412, 2449), False, 'from telegram import Bot, InlineKeyboardButton, InlineKeyboardMarkup, Update\n')]
|
import sys
import json
import os.path
import requests
def posts_at_url(url):
current_page = 1
max_page = sys.maxint
while current_page <= max_page:
url = os.path.expandvars(url)
resp = requests.get(url, params={'page':current_page, 'count': '-1'})
results = json.loads(resp.content)
current_page += 1
max_page = results['pages']
for p in results['posts']:
yield p
def documents(name, url, **kwargs):
for post in posts_at_url(url):
yield process_office(post)
def process_office(item):
item['_id'] = item['slug']
custom_fields = item['custom_fields']
# get intro text & subscribe form data from custom fields
for attr in ['intro_text', 'intro_subscribe_form', 'related_contact']:
if attr in custom_fields:
item[attr] = custom_fields[attr][0]
# build top story dict
top_story = {}
for attr in ['top_story_head', 'top_story_desc']:
if attr in custom_fields:
top_story[attr] = custom_fields[attr][0]
# convert top story links into a proper list
top_story_links = []
for x in xrange(0,5):
key = 'top_story_links_%s' % x
if key in custom_fields:
top_story_links.append(custom_fields[key])
if top_story_links:
top_story['top_story_links'] = top_story_links
if top_story:
item['top_story'] = top_story
# create list of office resource dicts
item['resources'] = []
for x in xrange(0,4):
resource = {}
fields = ['head', 'desc', 'icon', 'link']
for field in fields:
field_name = 'resource_%s_%s' % (str(x), field)
if field_name in custom_fields and custom_fields[field_name][0] != '':
if field == 'link':
resource[field] = custom_fields[field_name]
else:
resource[field] = custom_fields[field_name][0]
if resource:
item['resources'].append(resource)
return item
|
[
"json.loads",
"requests.get"
] |
[((221, 284), 'requests.get', 'requests.get', (['url'], {'params': "{'page': current_page, 'count': '-1'}"}), "(url, params={'page': current_page, 'count': '-1'})\n", (233, 284), False, 'import requests\n'), ((302, 326), 'json.loads', 'json.loads', (['resp.content'], {}), '(resp.content)\n', (312, 326), False, 'import json\n')]
|
import os
CMD = 'docker ps'
os.system(CMD)
ls = os.popen(CMD).read().split('\n')[1:-1]
zombies = []
for line in ls:
container, image = line.split()[:2]
if 'bigga' not in image and ':' not in image:
print(container, image)
zombies.append(container)
print("Zombies: ", " ".join(zombies))
# docker kill <zombies>
# docker system prune
|
[
"os.popen",
"os.system"
] |
[((30, 44), 'os.system', 'os.system', (['CMD'], {}), '(CMD)\n', (39, 44), False, 'import os\n'), ((50, 63), 'os.popen', 'os.popen', (['CMD'], {}), '(CMD)\n', (58, 63), False, 'import os\n')]
|
import sqlite3
from sqlite3 import IntegrityError
import logging
from typing import List
from datetime import datetime
from togglcmder.toggl.types.workspace import Workspace
from togglcmder.toggl.builders.workspace_builder import WorkspaceBuilder
from togglcmder.toggl.types.time_entry import TimeEntry
from togglcmder.toggl.builders.time_entry_builder import TimeEntryBuilder
from togglcmder.toggl.types.user import User
from togglcmder.toggl.builders.user_builder import UserBuilder
from togglcmder.toggl.types.tag import Tag
from togglcmder.toggl.builders.tag_builder import TagBuilder
from togglcmder.toggl.types.project import Project
from togglcmder.toggl.builders.project_builder import ProjectBuilder
class Caching(object):
WORKSPACE_TABLE = '''
CREATE TABLE IF NOT EXISTS workspaces (
name TEXT NOT NULL,
identifier INTEGER PRIMARY KEY,
last_updated TIMESTAMP NOT NULL
)
'''
PROJECT_TABLE = '''
CREATE TABLE IF NOT EXISTS projects (
name TEXT NOT NULL,
color INTEGER,
last_updated TIMESTAMP NOT NULL,
created TIMESTAMP NOT NULL,
identifier INTEGER PRIMARY KEY,
workspace_identifier INTEGER NOT NULL,
FOREIGN KEY (workspace_identifier) REFERENCES workspaces (identifier) ON DELETE CASCADE
)
'''
TAG_TABLE = '''
CREATE TABLE IF NOT EXISTS tags (
name TEXT,
identifier INTEGER PRIMARY KEY,
workspace_identifier INTEGER,
FOREIGN KEY (workspace_identifier) REFERENCES workspaces (identifier) ON DELETE CASCADE
)
'''
TIME_ENTRY_TABLE = '''
CREATE TABLE IF NOT EXISTS time_entries (
description TEXT,
start_time TIMESTAMP NOT NULL,
stop_time TIMESTAMP,
duration INTEGER,
identifier INTEGER PRIMARY KEY,
project_identifier INTEGER,
workspace_identifier INTEGER NOT NULL,
last_updated TIMESTAMP,
FOREIGN KEY (project_identifier) REFERENCES projects (identifier) ON DELETE CASCADE,
FOREIGN KEY (workspace_identifier) REFERENCES workspaces (identifier)
)
'''
TIME_ENTRY_TAG_JUNCTION_TABLE = '''
CREATE TABLE IF NOT EXISTS time_entry_tags (
tag_identifier INTEGER NOT NULL,
time_entry_identifier INTEGER NOT NULL,
FOREIGN KEY (tag_identifier) REFERENCES tags (identifier) ON DELETE CASCADE,
FOREIGN KEY (time_entry_identifier) REFERENCES time_entries (identifier) ON DELETE CASCADE
)
'''
USER_TABLE = '''
CREATE TABLE IF NOT EXISTS users (
name TEXT,
api_token TEXT,
identifier INTEGER PRIMARY KEY,
last_updated TIMESTAMP NOT NULL
)
'''
def __init__(self, *, cache_name: str = "cache.db"):
self.__connection = sqlite3.connect(cache_name)
self.__connection.set_trace_callback(logging.getLogger(__name__).debug)
self.__cursor = self.__connection.cursor()
self.__cursor.execute("PRAGMA foreign_keys = 1")
self.__connection.commit()
self.__cursor.execute(Caching.WORKSPACE_TABLE)
self.__cursor.execute(Caching.PROJECT_TABLE)
self.__cursor.execute(Caching.TAG_TABLE)
self.__cursor.execute(Caching.TIME_ENTRY_TABLE)
self.__cursor.execute(Caching.TIME_ENTRY_TAG_JUNCTION_TABLE)
self.__cursor.execute(Caching.USER_TABLE)
self.__connection.commit()
self.__workspaces: List[Workspace] = []
self.__projects: List[Project] = []
self.__tags: List[Tag] = []
self.__time_entries: List[TimeEntry] = []
def __del__(self):
self.__connection.close()
def update_workspace_cache(self, workspaces: List[Workspace]) -> int:
insert_sql = '''
INSERT INTO workspaces
(name, identifier, last_updated) VALUES
(?, ?, ?)
'''
update_sql = '''
UPDATE workspaces SET name=?, last_updated=?
WHERE identifier=?
'''
for workspace in workspaces:
try:
self.__cursor.execute(
insert_sql, (workspace.name,
workspace.identifier,
workspace.last_updated.timestamp()))
except IntegrityError:
self.__cursor.execute(
update_sql, (workspace.name,
workspace.last_updated.timestamp(),
workspace.identifier))
self.__connection.commit()
return self.__cursor.rowcount
def retrieve_workspace_cache(self) -> List[Workspace]:
sql = '''
SELECT name, identifier, last_updated FROM workspaces
'''
self.__cursor.execute(sql)
results = self.__cursor.fetchall()
if results:
return [
WorkspaceBuilder()
.name(result[0])
.identifier(result[1])
.last_updated(epoch=result[2]).build()
for result in results
]
def update_user_cache(self, user: User) -> int:
insert_sql = '''
INSERT INTO users
(name, api_token, identifier, last_updated) VALUES
(?, ?, ?, ?)
'''
update_sql = '''
UPDATE users SET name=?, api_token=?, last_updated=?
WHERE identifier=?
'''
try:
self.__cursor.execute(
insert_sql, (user.name,
user.api_token,
user.identifier,
user.last_updated.timestamp()))
except IntegrityError:
self.__cursor.execute(
update_sql, (user.name,
user.api_token,
user.last_updated.timestamp(),
user.identifier))
self.__connection.commit()
return self.__cursor.rowcount
def retrieve_user_cache(self) -> User:
sql = '''
SELECT name, api_token, identifier, last_updated FROM users
'''
self.__cursor.execute(sql)
results = self.__cursor.fetchone()
if results:
return UserBuilder()\
.name(results[0])\
.api_token(results[1])\
.identifier(results[2])\
.last_updated(epoch=results[3]).build()
def update_project_cache(self, projects: List[Project]) -> int:
insert_sql = '''
INSERT INTO projects
(name, color, last_updated, created, identifier, workspace_identifier) VALUES
(?, ?, ?, ?, ?, ?)
'''
update_sql = '''
UPDATE projects
SET name=?, color=?, last_updated=?, workspace_identifier=?
WHERE identifier=?
'''
for project in projects:
try:
self.__cursor.execute(
insert_sql, (project.name,
project.color.value,
project.last_updated.timestamp(),
project.created.timestamp() if project.created else datetime.now().timestamp(),
project.identifier,
project.workspace_identifier))
except IntegrityError:
self.__cursor.execute(
update_sql, (project.name,
project.color.value,
project.last_updated.timestamp(),
project.workspace_identifier,
project.identifier))
self.__connection.commit()
return self.__cursor.rowcount
def retrieve_project_cache(self) -> List[Project]:
sql = '''
SELECT name, color, last_updated, created, identifier, workspace_identifier FROM projects
'''
self.__cursor.execute(sql)
results = self.__cursor.fetchall()
if results:
return [
ProjectBuilder()
.name(result[0])
.color(result[1])
.last_updated(epoch=result[2])
.created(epoch=result[3])
.identifier(result[4])
.workspace_identifier(result[5]).build()
for result in results
]
def remove_project_from_cache(self, project: Project) -> None:
sql = '''
DELETE FROM projects
WHERE identifier=?
'''
self.__cursor.execute(sql, (project.identifier,))
self.__connection.commit()
def update_tag_cache(self, tags: List[Tag]) -> int:
insert_sql = '''
INSERT INTO tags
(name, identifier, workspace_identifier) VALUES
(?, ?, ?)
'''
update_sql = '''
UPDATE tags
SET name=?, workspace_identifier=?
WHERE identifier=?
'''
rows_affected = 0
for tag in tags:
try:
self.__cursor.execute(
insert_sql, (tag.name,
tag.identifier,
tag.workspace_identifier))
except IntegrityError:
self.__cursor.execute(
update_sql, (tag.name,
tag.workspace_identifier,
tag.identifier))
rows_affected += self.__cursor.rowcount
self.__connection.commit()
return rows_affected
def retrieve_tag_cache(self) -> List[Tag]:
sql = """
SELECT name, identifier, workspace_identifier FROM tags
"""
self.__cursor.execute(sql)
results = self.__cursor.fetchall()
if results:
return [
TagBuilder()
.name(result[0])
.identifier(result[1])
.workspace_identifier(result[2]).build()
for result in results
]
def remove_tag_from_cache(self, tag: Tag) -> None:
tag_removal_sql = '''
DELETE FROM tags
WHERE identifier=?
'''
self.__cursor.execute(tag_removal_sql, (tag.identifier,))
join_table_removal_sql = '''
DELETE FROM time_entry_tags
WHERE tag_identifier=?
'''
self.__cursor.execute(join_table_removal_sql, (tag.identifier,))
self.__connection.commit()
def __retrieve_time_entry_tags_join(self, time_entry_identifier: int) -> List[tuple]:
sql = '''
SELECT name, tag_identifier, time_entry_identifier
FROM time_entry_tags
INNER JOIN tags ON time_entry_tags.tag_identifier = tags.identifier
WHERE time_entry_identifier=?
'''
self.__cursor.execute(sql, (time_entry_identifier,))
return self.__cursor.fetchall()
def __retrieve_time_entry_tags(self, time_entry_identifier: int) -> List[tuple]:
sql = '''
SELECT tag_identifier, time_entry_identifier
FROM time_entry_tags
WHERE time_entry_identifier=?
'''
self.__cursor.execute(sql, (time_entry_identifier,))
return self.__cursor.fetchall()
def __check_existing(self, tags: List[int], time_entry_identifier: int) -> List[int]:
# returns a tuple of (tag_id, time_entry_id)
existing_time_entry_tags = self.__retrieve_time_entry_tags(time_entry_identifier)
if len(tags) == 0 or len(existing_time_entry_tags) == 0:
return tags
return list(
# 2. map each tuple to be just the tag identifier, so given x,
# return the tag_id
map(lambda x: x[0],
# 1. filter so we only get tags tuples that aren't in the existing list
# of tags (checked based on x[0], which is the tag identifier)
filter(lambda x: x[0] not in tags, existing_time_entry_tags)))
def update_time_entry_cache(self, time_entries: List[TimeEntry]) -> int:
insert_sql = '''
INSERT INTO time_entries
(description, start_time, stop_time, duration, identifier,
project_identifier, workspace_identifier, last_updated) VALUES
(?, ?, ?, ?, ?, ?, ?, ?)
'''
update_sql = '''
UPDATE time_entries
SET description=?,
start_time=?,
stop_time=?,
duration=?,
project_identifier=?,
workspace_identifier=?,
last_updated=?
WHERE identifier=?
'''
insert_time_entry_tag_sql = '''
INSERT INTO time_entry_tags
(tag_identifier, time_entry_identifier)
VALUES (?, ?)
'''
tag_rows = self.retrieve_tag_cache()
rows_affected = 0
for time_entry in time_entries:
try:
self.__cursor.execute(
insert_sql, (time_entry.description,
time_entry.start_time.timestamp(),
None if not time_entry.stop_time else time_entry.stop_time.timestamp(),
time_entry.duration,
time_entry.identifier,
time_entry.project_identifier,
time_entry.workspace_identifier,
time_entry.last_updated.timestamp()))
except IntegrityError:
self.__cursor.execute(
update_sql, (time_entry.description,
time_entry.start_time.timestamp(),
None if not time_entry.stop_time else time_entry.stop_time.timestamp(),
time_entry.duration,
time_entry.project_identifier,
time_entry.workspace_identifier,
time_entry.last_updated.timestamp(),
time_entry.identifier))
rows_affected += self.__cursor.rowcount
tag_ids = []
if time_entry.tags:
for tag in time_entry.tags:
for tag_row in tag_rows:
if tag == tag_row.name:
tag_ids.append(tag_row.identifier)
break
for tag_id in self.__check_existing(tag_ids, time_entry.identifier):
self.__cursor.execute(
insert_time_entry_tag_sql,
(tag_id,
time_entry.identifier))
self.__connection.commit()
return rows_affected
def retrieve_time_entry_cache(self) -> List[TimeEntry]:
time_entry_sql = """
SELECT description,
start_time,
stop_time,
duration,
identifier,
project_identifier,
workspace_identifier,
last_updated
FROM time_entries
"""
time_entries = []
self.__cursor.execute(time_entry_sql)
results = self.__cursor.fetchall()
for result in results:
tag_results = self.__retrieve_time_entry_tags_join(result[4])
builder = TimeEntryBuilder()\
.description(result[0])\
.start_time(epoch=result[1])\
.stop_time(epoch=result[2])\
.duration(result[3])\
.identifier(result[4])\
.project_identifier(result[5])\
.workspace_identifier(result[6])\
.last_updated(epoch=result[7])\
.tags([tag_result[0] for tag_result in tag_results])
time_entries.append(builder.build())
return time_entries
def remove_time_entry_from_cache(self, time_entry: TimeEntry) -> None:
entry_removal_sql = '''
DELETE FROM time_entries
WHERE identifier=?
'''
self.__cursor.execute(entry_removal_sql, (time_entry.identifier,))
joined_entry_removal_sql = '''
DELETE FROM time_entry_tags
WHERE time_entry_identifier=?
'''
self.__cursor.execute(joined_entry_removal_sql, (time_entry.identifier,))
self.__connection.commit()
def get_workspace_identifier(self, workspace_name: str) -> int:
sql = """
SELECT identifier
FROM workspaces
WHERE name=?
"""
self.__cursor.execute(sql, (workspace_name,))
return self.__cursor.fetchone()[0]
def get_project_identifier(self, project_name: str) -> int:
sql = """
SELECT identifier
FROM projects
WHERE name=?
"""
self.__cursor.execute(sql, (project_name,))
return self.__cursor.fetchone()[0]
|
[
"togglcmder.toggl.builders.tag_builder.TagBuilder",
"togglcmder.toggl.builders.workspace_builder.WorkspaceBuilder",
"togglcmder.toggl.builders.project_builder.ProjectBuilder",
"togglcmder.toggl.builders.user_builder.UserBuilder",
"sqlite3.connect",
"togglcmder.toggl.builders.time_entry_builder.TimeEntryBuilder",
"datetime.datetime.now",
"logging.getLogger"
] |
[((2782, 2809), 'sqlite3.connect', 'sqlite3.connect', (['cache_name'], {}), '(cache_name)\n', (2797, 2809), False, 'import sqlite3\n'), ((2855, 2882), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2872, 2882), False, 'import logging\n'), ((7221, 7235), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7233, 7235), False, 'from datetime import datetime\n'), ((4870, 4888), 'togglcmder.toggl.builders.workspace_builder.WorkspaceBuilder', 'WorkspaceBuilder', ([], {}), '()\n', (4886, 4888), False, 'from togglcmder.toggl.builders.workspace_builder import WorkspaceBuilder\n'), ((9954, 9966), 'togglcmder.toggl.builders.tag_builder.TagBuilder', 'TagBuilder', ([], {}), '()\n', (9964, 9966), False, 'from togglcmder.toggl.builders.tag_builder import TagBuilder\n'), ((6260, 6273), 'togglcmder.toggl.builders.user_builder.UserBuilder', 'UserBuilder', ([], {}), '()\n', (6271, 6273), False, 'from togglcmder.toggl.builders.user_builder import UserBuilder\n'), ((8123, 8139), 'togglcmder.toggl.builders.project_builder.ProjectBuilder', 'ProjectBuilder', ([], {}), '()\n', (8137, 8139), False, 'from togglcmder.toggl.builders.project_builder import ProjectBuilder\n'), ((15637, 15655), 'togglcmder.toggl.builders.time_entry_builder.TimeEntryBuilder', 'TimeEntryBuilder', ([], {}), '()\n', (15653, 15655), False, 'from togglcmder.toggl.builders.time_entry_builder import TimeEntryBuilder\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
def init_logger() -> None:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
|
[
"logging.basicConfig"
] |
[((96, 203), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (115, 203), False, 'import logging\n')]
|
import boto3
exceptions = boto3.client('discovery').exceptions
AuthorizationErrorException = exceptions.AuthorizationErrorException
ConflictErrorException = exceptions.ConflictErrorException
InvalidParameterException = exceptions.InvalidParameterException
InvalidParameterValueException = exceptions.InvalidParameterValueException
OperationNotPermittedException = exceptions.OperationNotPermittedException
ResourceInUseException = exceptions.ResourceInUseException
ResourceNotFoundException = exceptions.ResourceNotFoundException
ServerInternalErrorException = exceptions.ServerInternalErrorException
|
[
"boto3.client"
] |
[((27, 52), 'boto3.client', 'boto3.client', (['"""discovery"""'], {}), "('discovery')\n", (39, 52), False, 'import boto3\n')]
|
#!/usr/bin/env python
import os
import subprocess
import sys
from pathlib import Path
basedir = Path(__file__).parent.parent
os.chdir(basedir)
deps = {
"flake8": [
"darglint",
"flake8-bugbear",
"flake8-builtins",
"flake8-comprehensions",
"flake8-datetimez",
"flake8-debugger",
"flake8-docstrings",
"flake8-eradicate",
"flake8-print",
"flake8-too-many",
"pep8-naming",
"tryceratops",
],
"mypy": [
"arrow",
"httpx",
"hypothesis",
"importlib-metadata",
"pydantic",
"pytest",
"pytest-asyncio",
"starlette",
"types-dataclasses",
],
}
if __name__ == "__main__":
subprocess.call(["pip", "install", "-U", *deps[sys.argv[1]]])
exit(subprocess.call([sys.argv[1], "."]))
|
[
"pathlib.Path",
"subprocess.call",
"os.chdir"
] |
[((128, 145), 'os.chdir', 'os.chdir', (['basedir'], {}), '(basedir)\n', (136, 145), False, 'import os\n'), ((749, 810), 'subprocess.call', 'subprocess.call', (["['pip', 'install', '-U', *deps[sys.argv[1]]]"], {}), "(['pip', 'install', '-U', *deps[sys.argv[1]]])\n", (764, 810), False, 'import subprocess\n'), ((99, 113), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (103, 113), False, 'from pathlib import Path\n'), ((820, 855), 'subprocess.call', 'subprocess.call', (["[sys.argv[1], '.']"], {}), "([sys.argv[1], '.'])\n", (835, 855), False, 'import subprocess\n')]
|
import yaml
import os
import numpy as np
class DataOrganizer:
def __init__(self,parameter_file_path):
self.base_path = parameter_file_path
self.load_params()
def load_params(self):
params_file = os.path.join(self.base_path,'params.yaml')
with open(params_file) as yamlstream:
self.params = yaml.load(yamlstream,Loader=yaml.SafeLoader)
def get_closest_index_and_value(self,value,array):
index = np.argmin(np.abs(array - value))
value = array[index]
return index, value
|
[
"numpy.abs",
"yaml.load",
"os.path.join"
] |
[((229, 272), 'os.path.join', 'os.path.join', (['self.base_path', '"""params.yaml"""'], {}), "(self.base_path, 'params.yaml')\n", (241, 272), False, 'import os\n'), ((344, 389), 'yaml.load', 'yaml.load', (['yamlstream'], {'Loader': 'yaml.SafeLoader'}), '(yamlstream, Loader=yaml.SafeLoader)\n', (353, 389), False, 'import yaml\n'), ((471, 492), 'numpy.abs', 'np.abs', (['(array - value)'], {}), '(array - value)\n', (477, 492), True, 'import numpy as np\n')]
|
from flask import Blueprint, abort, current_app, render_template, request
from flask.json import jsonify
from jinja2 import TemplateNotFound
from .constants import EXTENSION_NAME
from .extension import FlaskPancake
bp = Blueprint("pancake", __name__, template_folder="templates")
def aggregate_data(ext: FlaskPancake):
if ext.group_funcs:
group_ids = list(ext.group_funcs.keys())
flags = [
{
"name": flag.name,
"default": flag.default,
"is_active": flag.is_active_globally(),
"groups": {
group_id: {
object_id: flag.is_active_group(
group_id=group_id, object_id=object_id
)
for object_id in func.get_candidate_ids()
}
for group_id, func in ext.group_funcs.items()
},
}
for flag in ext.flags.values()
]
else:
group_ids = []
flags = [
{
"name": flag.name,
"default": flag.default,
"is_active": flag.is_active_globally(),
"groups": {},
}
for flag in ext.flags.values()
]
samples = [
{"name": sample.name, "default": sample.default, "value": sample.get()}
for sample in ext.samples.values()
]
switches = [
{
"name": switch.name,
"default": switch.default,
"is_active": switch.is_active(),
}
for switch in ext.switches.values()
]
return {
"name": ext.name,
"group_ids": group_ids,
"flags": flags,
"samples": samples,
"switches": switches,
}
def aggregate_is_active_data(ext: FlaskPancake):
flags = [
{"name": flag.name, "is_active": flag.is_active()}
for flag in ext.flags.values()
]
samples = [
{"name": sample.name, "is_active": sample.is_active()}
for sample in ext.samples.values()
]
switches = [
{"name": switch.name, "is_active": switch.is_active()}
for switch in ext.switches.values()
]
return {
"flags": flags,
"samples": samples,
"switches": switches,
}
@bp.route("/overview", defaults={"pancake": EXTENSION_NAME})
@bp.route("/overview/<pancake>")
def overview(pancake):
ext = current_app.extensions.get(pancake)
if ext is None or not isinstance(ext, FlaskPancake):
return "Unknown", 404
context = aggregate_data(ext)
if request.accept_mimetypes.accept_html:
try:
return render_template("flask_pancake/overview.html", **context)
except TemplateNotFound: # pragma: no cover
abort(404)
else:
return jsonify(context)
@bp.route("/status", defaults={"pancake": EXTENSION_NAME})
@bp.route("/status/<pancake>")
def status(pancake):
ext = current_app.extensions.get(pancake)
if ext is None or not isinstance(ext, FlaskPancake):
return "Unknown", 404
context = aggregate_is_active_data(ext)
return jsonify(context)
|
[
"flask.Blueprint",
"flask.abort",
"flask.json.jsonify",
"flask.current_app.extensions.get",
"flask.render_template"
] |
[((222, 281), 'flask.Blueprint', 'Blueprint', (['"""pancake"""', '__name__'], {'template_folder': '"""templates"""'}), "('pancake', __name__, template_folder='templates')\n", (231, 281), False, 'from flask import Blueprint, abort, current_app, render_template, request\n'), ((2473, 2508), 'flask.current_app.extensions.get', 'current_app.extensions.get', (['pancake'], {}), '(pancake)\n', (2499, 2508), False, 'from flask import Blueprint, abort, current_app, render_template, request\n'), ((3008, 3043), 'flask.current_app.extensions.get', 'current_app.extensions.get', (['pancake'], {}), '(pancake)\n', (3034, 3043), False, 'from flask import Blueprint, abort, current_app, render_template, request\n'), ((3186, 3202), 'flask.json.jsonify', 'jsonify', (['context'], {}), '(context)\n', (3193, 3202), False, 'from flask.json import jsonify\n'), ((2868, 2884), 'flask.json.jsonify', 'jsonify', (['context'], {}), '(context)\n', (2875, 2884), False, 'from flask.json import jsonify\n'), ((2709, 2766), 'flask.render_template', 'render_template', (['"""flask_pancake/overview.html"""'], {}), "('flask_pancake/overview.html', **context)\n", (2724, 2766), False, 'from flask import Blueprint, abort, current_app, render_template, request\n'), ((2832, 2842), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (2837, 2842), False, 'from flask import Blueprint, abort, current_app, render_template, request\n')]
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import os
import os.path as osp
def save_network(model, network_label, epoch, iteration, args):
dataset = args.data_path.split(os.sep)[-1]
save_filename = "{0}_net_{1}_{2}_{3}.pth".format(network_label, args.model, epoch, iteration)
model_save_dir = osp.join(args.save_dir, dataset)
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
save_path = os.path.join(model_save_dir, save_filename)
model_state = {
'state_dict': model.cpu().state_dict(),
'epoch': epoch,
'iteration': iteration,
'model': args.model,
'color_space': args.color_space,
'batch_size': args.batch_size,
'dataset': dataset,
'image_size': args.image_size
}
torch.save(model_state, save_path)
model.cuda()
print("Saved {0} at epoch: {1}, iter: {2}".format(network_label, epoch, iteration))
def load_network(model, network_label, epoch, iteration, args):
dataset = args.data_path.split(os.sep)[-1]
save_filename = "{0}_net_{1}_{2}_{3}.pth".format(network_label, args.model, epoch, iteration)
# model_save_dir = osp.join(args.load_dir, dataset)
save_path = osp.join(args.load_dir, save_filename)
model_state = torch.load(save_path)
if "state_dict" in model_state:
model.load_state_dict(model_state["state_dict"])
else:
model.load_state_dict(model_state)
model_state = {
'state_dict': model.cpu().state_dict(),
'epoch': epoch,
'iteration': iteration,
'model': args.model,
'color_space': args.color_space,
'batch_size': args.batch_size,
'dataset': dataset,
'image_size': args.image_size
}
model.cuda(device_id=args.gpu)
print('Loaded {0} from epoch: {1} itr: {2}'.format(network_label, epoch, args.load))
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm2d') != -1 or classname.find('InstanceNorm2d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def get_norm_layer(norm_type):
if norm_type == 'batch':
norm_layer = nn.BatchNorm2d
elif norm_type == 'instance':
norm_layer = nn.InstanceNorm2d
else:
print('normalization layer [%s] is not found' % norm_type)
return norm_layer
def define_G(input_nc, output_nc, ngf, norm='batch', use_dropout=False, gpu_ids=[]):
netG = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids)
if len(gpu_ids) > 0:
netG.cuda(device_id=gpu_ids[0])
netG.apply(weights_init)
return netG
def define_D(input_nc, ndf, norm='batch', use_sigmoid=False, gpu_ids=[]):
netD = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
if use_gpu:
netD.cuda(device_id=gpu_ids[0])
netD.apply(weights_init)
return netD
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
# Defines the GAN loss which uses either LSGAN or the regular GAN.
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor.cuda())
# TODO define forward() for GANLoss?
# Defines the generator that consists of Resnet blocks between a few
# downsampling/upsampling operations.
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[]):
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
model = [nn.Conv2d(input_nc, ngf, kernel_size=7, padding=3),
norm_layer(ngf, affine=True),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1),
norm_layer(ngf * mult * 2, affine=True),
nn.ReLU(True)]
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, 'zero', norm_layer=norm_layer, use_dropout=use_dropout)]
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1),
norm_layer(int(ngf * mult / 2), affine=True),
nn.ReLU(True)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=3)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, x):
if self.gpu_ids and isinstance(x.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, x, self.gpu_ids)
else:
return self.model(x)
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout):
conv_block = []
p = 0
assert(padding_type == 'zero')
p = 1
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim, affine=True),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim, affine=True)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
# Defines the PatchGAN discriminator.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
super(NLayerDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
kw = 4
padw = int(np.ceil((kw-1)/2))
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2,
padding=padw), norm_layer(ndf * nf_mult,
affine=True), nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1,
padding=padw), norm_layer(ndf * nf_mult,
affine=True), nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, x):
if len(self.gpu_ids) and isinstance(x.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, x, self.gpu_ids)
else:
return self.model(x)
class GramMatrix(nn.Module):
def forward(self, input):
a, b, c, d = input.size() # a=batch size(=1)
# b=number of feature maps
# (c,d)=dimensions of a f. map (N=c*d)
features = input.view(a, b, c * d) # resize F_XL into \hat F_XL
G = torch.bmm(features, features.transpose(1, 2)) # compute the gram product
# normalize the values of the gram matrix
# by dividing by the number of element in each feature maps.
return G.div(b * c * d)
class FeatureExtractor(nn.Module):
# Extract features from intermediate layers of a network
def __init__(self, submodule, extracted_layers):
super(FeatureExtractor, self).__init__()
self.submodule = submodule
self.extracted_layers = extracted_layers
def forward(self, x):
outputs = []
for name, module in self.submodule._modules.items():
x = module(x)
if name in self.extracted_layers:
outputs += [x]
return outputs + [x]
|
[
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.nn.ReLU",
"os.makedirs",
"torch.nn.BCELoss",
"torch.nn.Sequential",
"torch.nn.Tanh",
"numpy.ceil",
"torch.load",
"torch.nn.Conv2d",
"os.path.exists",
"torch.autograd.Variable",
"torch.nn.Sigmoid",
"torch.save",
"torch.cuda.is_available",
"torch.nn.LeakyReLU",
"os.path.join",
"torch.nn.parallel.data_parallel"
] |
[((355, 387), 'os.path.join', 'osp.join', (['args.save_dir', 'dataset'], {}), '(args.save_dir, dataset)\n', (363, 387), True, 'import os.path as osp\n'), ((484, 527), 'os.path.join', 'os.path.join', (['model_save_dir', 'save_filename'], {}), '(model_save_dir, save_filename)\n', (496, 527), False, 'import os\n'), ((839, 873), 'torch.save', 'torch.save', (['model_state', 'save_path'], {}), '(model_state, save_path)\n', (849, 873), False, 'import torch\n'), ((1263, 1301), 'os.path.join', 'osp.join', (['args.load_dir', 'save_filename'], {}), '(args.load_dir, save_filename)\n', (1271, 1301), True, 'import os.path as osp\n'), ((1325, 1346), 'torch.load', 'torch.load', (['save_path'], {}), '(save_path)\n', (1335, 1346), False, 'import torch\n'), ((399, 429), 'os.path.exists', 'os.path.exists', (['model_save_dir'], {}), '(model_save_dir)\n', (413, 429), False, 'import os\n'), ((439, 466), 'os.makedirs', 'os.makedirs', (['model_save_dir'], {}), '(model_save_dir)\n', (450, 466), False, 'import os\n'), ((2754, 2779), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2777, 2779), False, 'import torch\n'), ((3226, 3251), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3249, 3251), False, 'import torch\n'), ((6976, 6997), 'torch.nn.Sequential', 'nn.Sequential', (['*model'], {}), '(*model)\n', (6989, 6997), True, 'import torch.nn as nn\n'), ((8015, 8041), 'torch.nn.Sequential', 'nn.Sequential', (['*conv_block'], {}), '(*conv_block)\n', (8028, 8041), True, 'import torch.nn as nn\n'), ((9545, 9569), 'torch.nn.Sequential', 'nn.Sequential', (['*sequence'], {}), '(*sequence)\n', (9558, 9569), True, 'import torch.nn as nn\n'), ((4164, 4176), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (4174, 4176), True, 'import torch.nn as nn\n'), ((4215, 4227), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (4225, 4227), True, 'import torch.nn as nn\n'), ((5830, 5880), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_nc', 'ngf'], {'kernel_size': '(7)', 'padding': '(3)'}), '(input_nc, ngf, kernel_size=7, padding=3)\n', (5839, 5880), True, 'import torch.nn as nn\n'), ((5946, 5959), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (5953, 5959), True, 'import torch.nn as nn\n'), ((6872, 6923), 'torch.nn.Conv2d', 'nn.Conv2d', (['ngf', 'output_nc'], {'kernel_size': '(7)', 'padding': '(3)'}), '(ngf, output_nc, kernel_size=7, padding=3)\n', (6881, 6923), True, 'import torch.nn as nn\n'), ((6943, 6952), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (6950, 6952), True, 'import torch.nn as nn\n'), ((7116, 7170), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['self.model', 'x', 'self.gpu_ids'], {}), '(self.model, x, self.gpu_ids)\n', (7141, 7170), True, 'import torch.nn as nn\n'), ((7670, 7715), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim', 'dim'], {'kernel_size': '(3)', 'padding': 'p'}), '(dim, dim, kernel_size=3, padding=p)\n', (7679, 7715), True, 'import torch.nn as nn\n'), ((7793, 7806), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (7800, 7806), True, 'import torch.nn as nn\n'), ((7899, 7944), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim', 'dim'], {'kernel_size': '(3)', 'padding': 'p'}), '(dim, dim, kernel_size=3, padding=p)\n', (7908, 7944), True, 'import torch.nn as nn\n'), ((8433, 8454), 'numpy.ceil', 'np.ceil', (['((kw - 1) / 2)'], {}), '((kw - 1) / 2)\n', (8440, 8454), True, 'import numpy as np\n'), ((8485, 8549), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_nc', 'ndf'], {'kernel_size': 'kw', 'stride': '(2)', 'padding': 'padw'}), '(input_nc, ndf, kernel_size=kw, stride=2, padding=padw)\n', (8494, 8549), True, 'import torch.nn as nn\n'), ((8563, 8586), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (8575, 8586), True, 'import torch.nn as nn\n'), ((9138, 9226), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * nf_mult_prev)', '(ndf * nf_mult)'], {'kernel_size': 'kw', 'stride': '(1)', 'padding': 'padw'}), '(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1,\n padding=padw)\n', (9147, 9226), True, 'import torch.nn as nn\n'), ((9334, 9357), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (9346, 9357), True, 'import torch.nn as nn\n'), ((9390, 9457), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * nf_mult)', '(1)'], {'kernel_size': 'kw', 'stride': '(1)', 'padding': 'padw'}), '(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)\n', (9399, 9457), True, 'import torch.nn as nn\n'), ((9694, 9748), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['self.model', 'x', 'self.gpu_ids'], {}), '(self.model, x, self.gpu_ids)\n', (9719, 9748), True, 'import torch.nn as nn\n'), ((4624, 4666), 'torch.autograd.Variable', 'Variable', (['real_tensor'], {'requires_grad': '(False)'}), '(real_tensor, requires_grad=False)\n', (4632, 4666), False, 'from torch.autograd import Variable\n'), ((5012, 5054), 'torch.autograd.Variable', 'Variable', (['fake_tensor'], {'requires_grad': '(False)'}), '(fake_tensor, requires_grad=False)\n', (5020, 5054), False, 'from torch.autograd import Variable\n'), ((6075, 6148), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ngf * mult)', '(ngf * mult * 2)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1)\n', (6084, 6148), True, 'import torch.nn as nn\n'), ((6267, 6280), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (6274, 6280), True, 'import torch.nn as nn\n'), ((6838, 6851), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (6845, 6851), True, 'import torch.nn as nn\n'), ((7859, 7874), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (7869, 7874), True, 'import torch.nn as nn\n'), ((8792, 8880), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * nf_mult_prev)', '(ndf * nf_mult)'], {'kernel_size': 'kw', 'stride': '(2)', 'padding': 'padw'}), '(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2,\n padding=padw)\n', (8801, 8880), True, 'import torch.nn as nn\n'), ((8996, 9019), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (9008, 9019), True, 'import torch.nn as nn\n'), ((9509, 9521), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (9519, 9521), True, 'import torch.nn as nn\n')]
|
from .loaders.loader import DatasetLoader
from omegaconf import OmegaConf
class DatasetSuite:
def __init__(self, name, cache_dir, datasets):
self.name = name
self.cache_dir = cache_dir
self.datasets = datasets
def fetch_and_cache_dataset(self, dataset_index):
loader_type = self.datasets[int(dataset_index)].pop('loader')
loader_cls = DatasetLoader.by_name(loader_type)
loader = loader_cls(**self.datasets[int(dataset_index)])
loader.fetch_and_cache(self.cache_dir)
@classmethod
def fetch_and_cache_from_cfg(cls, cfg, dataset_index):
resolved_cfg = OmegaConf.to_container(cfg, resolve=True)
suite = cls(resolved_cfg['name'], resolved_cfg['cache_dir'], resolved_cfg['datasets'])
suite.fetch_and_cache_dataset(dataset_index)
|
[
"omegaconf.OmegaConf.to_container"
] |
[((637, 678), 'omegaconf.OmegaConf.to_container', 'OmegaConf.to_container', (['cfg'], {'resolve': '(True)'}), '(cfg, resolve=True)\n', (659, 678), False, 'from omegaconf import OmegaConf\n')]
|
from collections import namedtuple
ImageBuilderResult = namedtuple(
"ImageBuilderResult",
("consumed_files", "file_errors_map", "new_images", "new_image_files"),
)
|
[
"collections.namedtuple"
] |
[((57, 165), 'collections.namedtuple', 'namedtuple', (['"""ImageBuilderResult"""', "('consumed_files', 'file_errors_map', 'new_images', 'new_image_files')"], {}), "('ImageBuilderResult', ('consumed_files', 'file_errors_map',\n 'new_images', 'new_image_files'))\n", (67, 165), False, 'from collections import namedtuple\n')]
|
"""ESCALATE Capture
Main point of entry for for EscalateCAPTURE
"""
import os
import sys
import ast
import xlrd
import logging
import argparse as ap
from log import init
from capture import specify
from capture import devconfig
from utils import globals, data_handling
def escalatecapture(rxndict, vardict):
"""Point of entry into the data pipeline
Manages processing calls to specify, generate, and prepare --> leads to execute
:param rxndict: dictionary of Excel-specified params
:param vardict: dictionary of dev-specified params
:return None:
"""
modlog = logging.getLogger('capture.escalatecapture')
modlog.info("Initializing specify")
specify.datapipeline(rxndict, vardict)
def linkprocess(linkfile):
"""TODO: what was this going to be for?"""
return
def build_rxndict(rxnvarfile):
"""Read Template file and return a dict representation
The rxndict is a mapping of Variables => Values (column B => column d) in the
uncommented rows of the reaction excel file
:param rxnvarfile: path to excel file containing reaction specification
:return rxndict: dictionary representation of reaction specification
"""
rxndict = {}
varfile = rxnvarfile
wb = xlrd.open_workbook(varfile)
sheet = wb.sheet_by_name('WF1')
for i in range(sheet.nrows):
commentval = sheet.cell(i, 0).value
if commentval == '#':
continue
else:
cell_dict_value = sheet.cell(i, 3).value
cell_dict_id = sheet.cell(i, 1).value
cell_dict_type = sheet.cell(i, 4).value
if cell_dict_id == "":
pass
if cell_dict_type == 'list':
rxndict[cell_dict_id] = ast.literal_eval(cell_dict_value)
else:
rxndict[cell_dict_id.strip()] = cell_dict_value
# cannot use globals.get_lab() here since it has not been set
# if rxndict['lab'] == 'MIT_PVLab':
# data_handling.get_user_actions(rxndict, sheet)
return rxndict
if __name__ == "__main__":
parser = ap.ArgumentParser(description='Generate experimental run data')
parser.add_argument('Variables', type=str,
help='Target xls file containing run information specified by the user\
format should be "filename.xlsx"')
parser.add_argument('-s', '--ss', default=0, type=int, choices=[0, 1, 2],
help='0 - quasi-random experiments generate, 1 - Generates stateset\
for exp_1 user specified reagents, 2 - generate prototype run for\
exp_1 user specified reagents')
parser.add_argument('-d', '--debug', default=0, type=int, choices=[0,1,2],
help='0 - complete run generation and upload to google drive,\
1 - retain all tables from gdrive & keep runtime content,\
2 - full offline debugging (no uploading)')
args = parser.parse_args()
challengeproblem = args.ss
rxndict = build_rxndict(args.Variables)
rxndict['challengeproblem'] = challengeproblem
# vardict will hold variables configured by developers
vardict = {
'exefilename': args.Variables,
'challengeproblem': challengeproblem,
'debug': args.debug,
'lab': rxndict['lab']
}
if not os.path.exists('./localfiles'):
os.mkdir('./localfiles')
globals.set_lab(rxndict['lab'])
init.runuidgen(rxndict, vardict)
loggerfile = init.buildlogger(rxndict)
rxndict['logfile'] = loggerfile
# log the initial state of the run
init.initialize(rxndict, vardict)
# TODO: >>>> insert variable tests here <<<<
escalatecapture(rxndict, vardict)
if vardict['debug'] == 0: # if no debuggin
if os.path.exists("./mycred.txt"):
os.remove("./mycred.txt")
if os.path.exists("./capture/user_cli_variables.py"):
os.remove("./capture/user_cli_variables.py")
|
[
"os.mkdir",
"os.remove",
"argparse.ArgumentParser",
"xlrd.open_workbook",
"utils.globals.set_lab",
"os.path.exists",
"log.init.initialize",
"log.init.buildlogger",
"log.init.runuidgen",
"ast.literal_eval",
"logging.getLogger",
"capture.specify.datapipeline"
] |
[((595, 639), 'logging.getLogger', 'logging.getLogger', (['"""capture.escalatecapture"""'], {}), "('capture.escalatecapture')\n", (612, 639), False, 'import logging\n'), ((684, 722), 'capture.specify.datapipeline', 'specify.datapipeline', (['rxndict', 'vardict'], {}), '(rxndict, vardict)\n', (704, 722), False, 'from capture import specify\n'), ((1242, 1269), 'xlrd.open_workbook', 'xlrd.open_workbook', (['varfile'], {}), '(varfile)\n', (1260, 1269), False, 'import xlrd\n'), ((2080, 2143), 'argparse.ArgumentParser', 'ap.ArgumentParser', ([], {'description': '"""Generate experimental run data"""'}), "(description='Generate experimental run data')\n", (2097, 2143), True, 'import argparse as ap\n'), ((3347, 3378), 'utils.globals.set_lab', 'globals.set_lab', (["rxndict['lab']"], {}), "(rxndict['lab'])\n", (3362, 3378), False, 'from utils import globals, data_handling\n'), ((3384, 3416), 'log.init.runuidgen', 'init.runuidgen', (['rxndict', 'vardict'], {}), '(rxndict, vardict)\n', (3398, 3416), False, 'from log import init\n'), ((3435, 3460), 'log.init.buildlogger', 'init.buildlogger', (['rxndict'], {}), '(rxndict)\n', (3451, 3460), False, 'from log import init\n'), ((3541, 3574), 'log.init.initialize', 'init.initialize', (['rxndict', 'vardict'], {}), '(rxndict, vardict)\n', (3556, 3574), False, 'from log import init\n'), ((3277, 3307), 'os.path.exists', 'os.path.exists', (['"""./localfiles"""'], {}), "('./localfiles')\n", (3291, 3307), False, 'import os\n'), ((3317, 3341), 'os.mkdir', 'os.mkdir', (['"""./localfiles"""'], {}), "('./localfiles')\n", (3325, 3341), False, 'import os\n'), ((3722, 3752), 'os.path.exists', 'os.path.exists', (['"""./mycred.txt"""'], {}), "('./mycred.txt')\n", (3736, 3752), False, 'import os\n'), ((3803, 3852), 'os.path.exists', 'os.path.exists', (['"""./capture/user_cli_variables.py"""'], {}), "('./capture/user_cli_variables.py')\n", (3817, 3852), False, 'import os\n'), ((3766, 3791), 'os.remove', 'os.remove', (['"""./mycred.txt"""'], {}), "('./mycred.txt')\n", (3775, 3791), False, 'import os\n'), ((3866, 3910), 'os.remove', 'os.remove', (['"""./capture/user_cli_variables.py"""'], {}), "('./capture/user_cli_variables.py')\n", (3875, 3910), False, 'import os\n'), ((1740, 1773), 'ast.literal_eval', 'ast.literal_eval', (['cell_dict_value'], {}), '(cell_dict_value)\n', (1756, 1773), False, 'import ast\n')]
|
""" CSE Partial Factorization and Post-Processing
The following script will perform partial factorization on SymPy expressions,
which should occur before common subexpression elimination (CSE) to prevent the
identification of undesirable patterns, and perform post-processing on the
the resulting replaced/reduced expressions after the CSE procedure was applied.
"""
# Author: <NAME>
# Email: <EMAIL>
from SIMDExprTree import ExprTree
import sympy as sp
# Input: expr_list = single SymPy expression or list of SymPy expressions
# prefix = string prefix for variable names (i.e. replacement symbols)
# declare = declare negative one symbol (i.e. _NegativeOne_)
# factor = perform partial factorization (excluding negative one)
# Output: modified SymPy expression(s) where all integers and rationals were replaced
# with temporary placeholder variables that allow for partial factorization
def cse_preprocess(expr_list, prefix='', declare=False, factor=True, debug=False):
if not isinstance(expr_list, list):
expr_list = [expr_list]
def expand(a, n):
if n == 2: return sp.Mul(a, a, evaluate=False)
elif n > 2: return sp.Mul(expand(a, n - 1), a, evaluate=False)
return sp.Pow(expand(a, -n), -1, evaluate=False)
_NegativeOne_ = sp.Symbol(prefix + '_NegativeOne_')
map_sym_to_rat, map_rat_to_sym = {}, {}
for i, expr in enumerate(expr_list):
tree = ExprTree(expr)
# Expand power function, preventing replacement of exponent argument
for subtree in tree.preorder(tree.root):
subexpr = subtree.expr
if subexpr.func == sp.Pow:
exponent = subtree.children[1].expr
if exponent.func == sp.Integer and abs(exponent) > 1:
subtree.expr = expand(*subexpr.args)
tree.build(subtree, clear=True)
# Search through expression tree for integers/rationals
for subtree in tree.preorder():
subexpr = subtree.expr
if isinstance(subexpr, sp.Rational) and subexpr != sp.S.NegativeOne:
# If rational < 0, factor out negative and declare positive rational
sign = 1 if subexpr >= 0 else -1
subexpr *= sign
# Check whether rational was already declared, otherwise declare rational
try: repl = map_rat_to_sym[subexpr]
except KeyError:
p, q = subexpr.p, subexpr.q
var_name = prefix + '_Rational_' + str(p) + '_' + str(q) \
if q != 1 else prefix + '_Integer_' + str(p)
repl = sp.Symbol(var_name)
map_sym_to_rat[repl], map_rat_to_sym[subexpr] = subexpr, repl
subtree.expr = repl * sign
if sign < 0: tree.build(subtree, clear=True)
# If declare == True, then declare symbol for -1 or extracted negative
elif declare and subexpr == sp.S.NegativeOne:
try: subtree.expr = map_rat_to_sym[sp.S.NegativeOne]
except KeyError:
repl = _NegativeOne_
map_sym_to_rat[repl], map_rat_to_sym[subexpr] = subexpr, repl
subtree.expr = repl
# If exponent was replaced with symbol (usually -1), then back-substitute
for subtree in tree.preorder(tree.root):
subexpr = subtree.expr
if subexpr.func == sp.Pow:
exponent = subtree.children[1].expr
if exponent.func == sp.Symbol:
subtree.children[1].expr = map_sym_to_rat[exponent]
expr = tree.reconstruct()
# If factor == True, then perform partial factoring
if factor:
# Handle the separate case of function arguments
for subtree in tree.preorder():
if isinstance(subtree.expr, sp.Function):
for var in map_sym_to_rat:
if var != _NegativeOne_:
child = subtree.children[0]
child.expr = sp.collect(child.expr, var)
child.children.clear()
expr = tree.reconstruct()
# Perform partial factoring on the expression(s)
for var in map_sym_to_rat:
if var != _NegativeOne_:
expr = sp.collect(expr, var)
# If debug == True, then back-substitute everything and check difference
if debug:
def lookup_rational(arg):
if arg.func == sp.Symbol:
try: arg = map_sym_to_rat[arg]
except KeyError: pass
return arg
debug_tree = ExprTree(expr)
for subtree in debug_tree.preorder():
subexpr = subtree.expr
if subexpr.func == sp.Symbol:
subtree.expr = lookup_rational(subexpr)
debug_expr = tree.reconstruct()
expr_diff = expr - debug_expr
if sp.simplify(expr_diff) != 0:
raise Warning('Expression Difference: ' + str(expr_diff))
expr_list[i] = expr
if len(expr_list) == 1:
expr_list = expr_list[0]
return expr_list, map_sym_to_rat
# Input: cse_output = output from SymPy CSE with tuple format: (list of ordered pairs that
# contain substituted symbols and their replaced expressions, reduced SymPy expression)
# Output: output from SymPy CSE where postprocessing, such as back-substitution of addition/product
# of symbols, has been applied to the replaced/reduced expression(s)
def cse_postprocess(cse_output):
replaced, reduced = cse_output
i = 0
while i < len(replaced):
sym, expr = replaced[i]
# Search through replaced expressions for negative symbols
if (expr.func == sp.Mul and len(expr.args) == 2 and \
any((arg.func == sp.Symbol) for arg in expr.args) and \
any((arg == sp.S.NegativeOne or '_NegativeOne_' in str(arg)) for arg in expr.args)):
for k in range(i + 1, len(replaced)):
if sym in replaced[k][1].free_symbols:
replaced[k] = (replaced[k][0], replaced[k][1].subs(sym, expr))
for k in range(len(reduced)):
if sym in reduced[k].free_symbols:
reduced[k] = reduced[k].subs(sym, expr)
# Remove the replaced expression from the list
replaced.pop(i)
if i != 0: i -= 1
# Search through replaced expressions for addition/product of 2 or less symbols
if ((expr.func == sp.Add or expr.func == sp.Mul) and 0 < len(expr.args) < 3 and \
all((arg.func == sp.Symbol or arg.is_integer or arg.is_rational) for arg in expr.args)) or \
(expr.func == sp.Pow and expr.args[0].func == sp.Symbol and expr.args[1] == 2):
sym_count = 0 # Count the number of occurrences of the substituted symbol
for k in range(len(replaced) - i):
# Check if the substituted symbol appears in the replaced expressions
if sym in replaced[i + k][1].free_symbols:
for arg in sp.preorder_traversal(replaced[i + k][1]):
if arg.func == sp.Symbol and str(arg) == str(sym):
sym_count += 1
for k in range(len(reduced)):
# Check if the substituted symbol appears in the reduced expression
if sym in reduced[k].free_symbols:
for arg in sp.preorder_traversal(reduced[k]):
if arg.func == sp.Symbol and str(arg) == str(sym):
sym_count += 1
# If the number of occurrences of the substituted symbol is 2 or less, back-substitute
if 0 < sym_count < 3:
for k in range(i + 1, len(replaced)):
if sym in replaced[k][1].free_symbols:
replaced[k] = (replaced[k][0], replaced[k][1].subs(sym, expr))
for k in range(len(reduced)):
if sym in reduced[k].free_symbols:
reduced[k] = reduced[k].subs(sym, expr)
# Remove the replaced expression from the list
replaced.pop(i); i -= 1
i += 1
return replaced, reduced
|
[
"sympy.Symbol",
"sympy.collect",
"SIMDExprTree.ExprTree",
"sympy.simplify",
"sympy.Mul",
"sympy.preorder_traversal"
] |
[((1318, 1353), 'sympy.Symbol', 'sp.Symbol', (["(prefix + '_NegativeOne_')"], {}), "(prefix + '_NegativeOne_')\n", (1327, 1353), True, 'import sympy as sp\n'), ((1454, 1468), 'SIMDExprTree.ExprTree', 'ExprTree', (['expr'], {}), '(expr)\n', (1462, 1468), False, 'from SIMDExprTree import ExprTree\n'), ((1140, 1168), 'sympy.Mul', 'sp.Mul', (['a', 'a'], {'evaluate': '(False)'}), '(a, a, evaluate=False)\n', (1146, 1168), True, 'import sympy as sp\n'), ((4772, 4786), 'SIMDExprTree.ExprTree', 'ExprTree', (['expr'], {}), '(expr)\n', (4780, 4786), False, 'from SIMDExprTree import ExprTree\n'), ((5084, 5106), 'sympy.simplify', 'sp.simplify', (['expr_diff'], {}), '(expr_diff)\n', (5095, 5106), True, 'import sympy as sp\n'), ((4426, 4447), 'sympy.collect', 'sp.collect', (['expr', 'var'], {}), '(expr, var)\n', (4436, 4447), True, 'import sympy as sp\n'), ((7277, 7318), 'sympy.preorder_traversal', 'sp.preorder_traversal', (['replaced[i + k][1]'], {}), '(replaced[i + k][1])\n', (7298, 7318), True, 'import sympy as sp\n'), ((7646, 7679), 'sympy.preorder_traversal', 'sp.preorder_traversal', (['reduced[k]'], {}), '(reduced[k])\n', (7667, 7679), True, 'import sympy as sp\n'), ((2684, 2703), 'sympy.Symbol', 'sp.Symbol', (['var_name'], {}), '(var_name)\n', (2693, 2703), True, 'import sympy as sp\n'), ((4141, 4168), 'sympy.collect', 'sp.collect', (['child.expr', 'var'], {}), '(child.expr, var)\n', (4151, 4168), True, 'import sympy as sp\n')]
|
# coding=utf-8
from aip import AipOcr
import re
opt_aux_word = ['《', '》']
def get_file_content(file):
with open(file, 'rb') as fp:
return fp.read()
def image_to_str(name, client):
image = get_file_content(name)
text_result = client.basicGeneral(image)
print(text_result)
result = get_question_and_options(text_result)
return result
def init_baidu_ocr(baidu_ocr_config):
app_id, api_key, secret_key = baidu_ocr_config
client = AipOcr(app_id, api_key, secret_key)
return client
# {'words_result': [{'words': '11.代表作之一是《蒙娜丽莎的眼'},
# {'words': '泪》的歌手是?'}, {'words': '林志颖'},
# {'words': '林志炫'}, {'words': '林志玲'}],
# 'log_id': 916087026228727188, 'words_result_num': 5}
def get_question_and_options(text):
if 'error_code' in text:
print('请确保百度OCR配置正确')
exit(-1)
if text['words_result_num'] == 0:
return '', []
result_arr = text['words_result']
option_arr = []
question_str = ''
question_obj, options_obj = get_question(result_arr)
for question in question_obj:
word = question['words']
word = re.sub('^\d+\.*', '', word)
question_str += word
for option in options_obj:
word = option['words']
if word.startswith('《'):
word = word[1:]
if word.endswith('》'):
word = word[:-1]
print(word)
option_arr.append(word)
print(question_str)
print(option_arr)
return question_str, option_arr
# 先按'?'分割问题和答案,若无问号,用索引分割
def get_question(result_arr):
result_num = len(result_arr)
index = -1
question_obj, options_obj = [], []
for i, result in enumerate(result_arr):
if '?' in result['words']:
index = i
break
if index > -1:
question_obj = result_arr[:index + 1]
options_obj = result_arr[index + 1:]
return question_obj, options_obj
else:
# 按照经验,4个结果为1行问题,5、6个为2行问题,8个以上为公布答案
if result_num <= 4:
question_obj = result_arr[:1]
options_obj = result_arr[1:]
elif result_num == 5:
question_obj = result_arr[:2]
options_obj = result_arr[2:]
elif result_num == 6: # 暂时
question_obj = result_arr[:2]
options_obj = result_arr[2:]
elif result_num == 7 or result_num == 8:
question_obj = result_arr[:3]
options_obj = result_arr[3:]
return question_obj, options_obj
|
[
"aip.AipOcr",
"re.sub"
] |
[((473, 508), 'aip.AipOcr', 'AipOcr', (['app_id', 'api_key', 'secret_key'], {}), '(app_id, api_key, secret_key)\n', (479, 508), False, 'from aip import AipOcr\n'), ((1145, 1174), 're.sub', 're.sub', (['"""^\\\\d+\\\\.*"""', '""""""', 'word'], {}), "('^\\\\d+\\\\.*', '', word)\n", (1151, 1174), False, 'import re\n')]
|
import torch, os
from os import path as osp
from math import ceil
import sys
from yaml import load
from basicsr.data import build_dataloader, build_dataset
from basicsr.utils.options import parse_options
import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.autograd import Variable
import cv2
from copy import deepcopy
import os.path as osp
from torch.nn.parallel import DataParallel, DistributedDataParallel
from basicsr.archs.edvr_arch import EDVR
def chop_forward(model, inp, shave=8, min_size=120000):
# This part will divide your input in 4 small images
b, n, c, h, w = inp.size()
h_half, w_half = h // 2, w // 2
h_size, w_size = h_half + shave, w_half + shave
mod_size = 4
if h_size % mod_size:
h_size = ceil(h_size/mod_size)*mod_size # The ceil() function returns the uploaded integer of a number
if w_size % mod_size:
w_size = ceil(w_size/mod_size)*mod_size
inputlist = [
inp[:, :, :, 0:h_size, 0:w_size],
inp[:, :, :, 0:h_size, (w - w_size):w],
inp[:, :, :, (h - h_size):h, 0:w_size],
inp[:, :, :, (h - h_size):h, (w - w_size):w]]
if w_size * h_size < min_size:
outputlist = []
for i in range(4):
with torch.no_grad():
input_batch = inputlist[i]
output_batch = model(input_batch)
outputlist.append(output_batch)
else:
outputlist = [
chop_forward(model, patch) \
for patch in inputlist]
scale=4
h, w = scale * h, scale * w
h_half, w_half = scale * h_half, scale * w_half
h_size, w_size = scale * h_size, scale * w_size
shave *= scale
with torch.no_grad():
output_ht = Variable(inp.data.new(b, c, h, w))
output_ht[:, :, 0:h_half, 0:w_half] = outputlist[0][:, :, 0:h_half, 0:w_half]
output_ht[:, :, 0:h_half, w_half:w] = outputlist[1][:, :, 0:h_half, (w_size - w + w_half):w_size]
output_ht[:, :, h_half:h, 0:w_half] = outputlist[2][:, :, (h_size - h + h_half):h_size, 0:w_half]
output_ht[:, :, h_half:h, w_half:w] = outputlist[3][:, :, (h_size - h + h_half):h_size, (w_size - w + w_half):w_size]
return output_ht
def demo_pipeline(root_path):
# parse options, set distributed setting, set ramdom seed
opt, args = parse_options(root_path, is_train=False)
print("video path: ",args.video_path)
video_name = osp.basename(args.video_path).split(".")[0]
torch.backends.cudnn.benchmark = True
# create test dataset and dataloader
test_loaders = []
for _, dataset_opt in sorted(opt['datasets'].items()):
test_set = build_dataset(dataset_opt)
test_loader = build_dataloader(
test_set, dataset_opt, num_gpu=opt['num_gpu'], dist=opt['dist'], sampler=None, seed=opt['manual_seed'])
test_loaders.append(test_loader)
# create model
model_config = opt['network_g']
_ = model_config.pop("type", "Unkown")
model = EDVR(**model_config)
device = torch.device('cuda' if opt['num_gpu'] != 0 else 'cpu')
model = model.to(device=device)
param_key='params'
load_net = torch.load(opt["path"].get("pretrain_network_g", "Unkown"), map_location=lambda storage, loc: storage)
find_unused_parameters = opt.get('find_unused_parameters', False)
model = DistributedDataParallel(
model, device_ids=[torch.cuda.current_device()], find_unused_parameters=find_unused_parameters)
# load weights
if param_key is not None:
if param_key not in load_net and 'params' in load_net:
param_key = 'params'
load_net = load_net[param_key]
for k, v in deepcopy(load_net).items():
load_net['module.' + k] = v
load_net.pop(k)
model.load_state_dict(load_net, strict=True)
model.eval()
# set min size
min_size = 921599
# test clips
for test_loader in test_loaders:
for idx, data in enumerate(test_loader):
frame_name = "{:08d}.png".format(idx)
frame_name = osp.join("sr_video", video_name, frame_name)
if osp.exists(frame_name): continue
height, width = data.size()[-2:]
if height * width < min_size:
output = model(data)
else:
output = chop_forward(model, data)
print("imwrite {:08d}.png. | totol: {}".format(idx, len(test_loader)))
output = torch.squeeze(output.data.cpu(), dim=0).clamp(0,1).permute(1,2,0).numpy()
cv2.imwrite(frame_name, cv2.cvtColor(output*255, cv2.COLOR_BGR2RGB), [cv2.IMWRITE_PNG_COMPRESSION, 0])
if __name__ == '__main__':
root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir))
demo_pipeline(root_path)
|
[
"copy.deepcopy",
"math.ceil",
"os.path.basename",
"cv2.cvtColor",
"basicsr.data.build_dataset",
"basicsr.archs.edvr_arch.EDVR",
"os.path.exists",
"torch.device",
"basicsr.utils.options.parse_options",
"torch.no_grad",
"os.path.join",
"basicsr.data.build_dataloader",
"torch.cuda.current_device"
] |
[((2310, 2350), 'basicsr.utils.options.parse_options', 'parse_options', (['root_path'], {'is_train': '(False)'}), '(root_path, is_train=False)\n', (2323, 2350), False, 'from basicsr.utils.options import parse_options\n'), ((2976, 2996), 'basicsr.archs.edvr_arch.EDVR', 'EDVR', ([], {}), '(**model_config)\n', (2980, 2996), False, 'from basicsr.archs.edvr_arch import EDVR\n'), ((3011, 3065), 'torch.device', 'torch.device', (["('cuda' if opt['num_gpu'] != 0 else 'cpu')"], {}), "('cuda' if opt['num_gpu'] != 0 else 'cpu')\n", (3023, 3065), False, 'import torch\n'), ((1698, 1713), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1711, 1713), False, 'import torch\n'), ((2641, 2667), 'basicsr.data.build_dataset', 'build_dataset', (['dataset_opt'], {}), '(dataset_opt)\n', (2654, 2667), False, 'from basicsr.data import build_dataloader, build_dataset\n'), ((2690, 2815), 'basicsr.data.build_dataloader', 'build_dataloader', (['test_set', 'dataset_opt'], {'num_gpu': "opt['num_gpu']", 'dist': "opt['dist']", 'sampler': 'None', 'seed': "opt['manual_seed']"}), "(test_set, dataset_opt, num_gpu=opt['num_gpu'], dist=opt[\n 'dist'], sampler=None, seed=opt['manual_seed'])\n", (2706, 2815), False, 'from basicsr.data import build_dataloader, build_dataset\n'), ((4674, 4716), 'os.path.join', 'osp.join', (['__file__', 'osp.pardir', 'osp.pardir'], {}), '(__file__, osp.pardir, osp.pardir)\n', (4682, 4716), True, 'import os.path as osp\n'), ((771, 794), 'math.ceil', 'ceil', (['(h_size / mod_size)'], {}), '(h_size / mod_size)\n', (775, 794), False, 'from math import ceil\n'), ((909, 932), 'math.ceil', 'ceil', (['(w_size / mod_size)'], {}), '(w_size / mod_size)\n', (913, 932), False, 'from math import ceil\n'), ((3659, 3677), 'copy.deepcopy', 'deepcopy', (['load_net'], {}), '(load_net)\n', (3667, 3677), False, 'from copy import deepcopy\n'), ((4035, 4079), 'os.path.join', 'osp.join', (['"""sr_video"""', 'video_name', 'frame_name'], {}), "('sr_video', video_name, frame_name)\n", (4043, 4079), True, 'import os.path as osp\n'), ((4096, 4118), 'os.path.exists', 'osp.exists', (['frame_name'], {}), '(frame_name)\n', (4106, 4118), True, 'import os.path as osp\n'), ((1256, 1271), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1269, 1271), False, 'import torch\n'), ((2412, 2441), 'os.path.basename', 'osp.basename', (['args.video_path'], {}), '(args.video_path)\n', (2424, 2441), True, 'import os.path as osp\n'), ((3380, 3407), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (3405, 3407), False, 'import torch\n'), ((4538, 4583), 'cv2.cvtColor', 'cv2.cvtColor', (['(output * 255)', 'cv2.COLOR_BGR2RGB'], {}), '(output * 255, cv2.COLOR_BGR2RGB)\n', (4550, 4583), False, 'import cv2\n')]
|
from pydeck_carto import load_carto_credentials
def test_load_carto_credentials(requests_mock):
requests_mock.post(
"https://auth.carto.com/oauth/token", text='{"access_token":"asdf1234"}'
)
creds = load_carto_credentials("tests/fixtures/mock_credentials.json")
assert creds == {
"apiVersion": "v3",
"apiBaseUrl": "https://api.carto.com",
"accessToken": "asdf1234",
}
|
[
"pydeck_carto.load_carto_credentials"
] |
[((221, 283), 'pydeck_carto.load_carto_credentials', 'load_carto_credentials', (['"""tests/fixtures/mock_credentials.json"""'], {}), "('tests/fixtures/mock_credentials.json')\n", (243, 283), False, 'from pydeck_carto import load_carto_credentials\n')]
|
"""Regular few-shot episode sampler.
Author: <NAME> (<EMAIL>)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from fewshot.data.registry import RegisterSampler
from fewshot.data.samplers.incremental_sampler import IncrementalSampler
@RegisterSampler('fewshot')
class FewshotSampler(IncrementalSampler):
"""Standard few-shot learning sampler."""
def __init__(self, seed):
super(FewshotSampler, self).__init__(seed)
def sample_episode_classes(self, n, nshot=1, **kwargs):
"""See EpisodeSampler for documentation."""
return super(FewshotSampler, self).sample_episode_classes(
n, nshot_min=nshot, nshot_max=nshot)
|
[
"fewshot.data.registry.RegisterSampler"
] |
[((304, 330), 'fewshot.data.registry.RegisterSampler', 'RegisterSampler', (['"""fewshot"""'], {}), "('fewshot')\n", (319, 330), False, 'from fewshot.data.registry import RegisterSampler\n')]
|
# -*- coding: utf-8 -*-
"""Wrapper to run RCSCON from the command line.
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdp, pkdc, pkdlog
from sirepo import simulation_db
from sirepo.template import sdds_util
from sirepo.template import template_common
import numpy as np
import py.path
import sirepo.template.rcscon as template
def run(cfg_dir):
template_common.exec_parameters()
template.extract_report_data(
py.path.local(cfg_dir),
simulation_db.read_json(template_common.INPUT_BASE_NAME),
)
def run_background(cfg_dir):
data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
if data.report == 'elegantAnimation':
return _run_elegant_simulation(cfg_dir)
template_common.exec_parameters()
def _build_arrays():
sigma = sdds_util.read_sdds_pages(
'run_setup.sigma.sdds',
['s', 's1', 's12', 's2', 's3', 's34', 's4', 's5', 's56', 's6'],
)
errors = _error_values()
inputs = []
outputs = []
k = 0
for i in range(len(errors)):
for j in range(int(len(sigma.s) / len(errors))):
initial_index = k - j
inputs.append([
errors[i, 1], errors[i, 2], sigma.s[k],
sigma.s1[initial_index], sigma.s12[initial_index], sigma.s2[initial_index],
sigma.s3[initial_index], sigma.s34[initial_index], sigma.s4[initial_index],
sigma.s5[initial_index], sigma.s56[initial_index], sigma.s6[initial_index],
])
outputs.append([
sigma.s1[k], sigma.s12[k], sigma.s2[k],
sigma.s3[k], sigma.s34[k], sigma.s4[k],
sigma.s5[k], sigma.s56[k], sigma.s6[k],
])
k+=1
return np.asarray(inputs), np.asarray(outputs)
def _error_values():
pages = sdds_util.read_sdds_pages(
'error_control.error_log.sdds',
['ElementParameter', 'ParameterValue'],
True)
res = []
for page in range(len(pages.ElementParameter)):
values = PKDict()
for idx in range(len(pages.ElementParameter[page])):
p = pages.ElementParameter[page][idx]
v = pages.ParameterValue[page][idx]
if p not in values:
values[p] = []
values[p].append(v)
res.append(
[page, np.mean(np.asarray(values.PHASE)), np.sum(np.asarray(values.VOLT))],
)
return np.asarray(res)
def _run_elegant_simulation(cfg_dir):
import sirepo.pkcli.elegant
sirepo.pkcli.elegant.run_elegant()
inputs, outputs = _build_arrays()
common = [
's1', 's12', 's2',
's3', 's34', 's4',
's5', 's56', 's6',
]
in_cols = ['average phase', 'total volts', 'position']
in_header = ','.join(in_cols + ['initial ' + x for x in common])
out_header = ','.join(common)
np.savetxt('inputs.csv', inputs, delimiter=',', comments='', header=in_header)
np.savetxt('outputs.csv', outputs, delimiter=',', comments='', header=out_header)
|
[
"sirepo.template.template_common.exec_parameters",
"numpy.asarray",
"numpy.savetxt",
"sirepo.simulation_db.read_json",
"sirepo.template.sdds_util.read_sdds_pages",
"pykern.pkcollections.PKDict"
] |
[((569, 602), 'sirepo.template.template_common.exec_parameters', 'template_common.exec_parameters', ([], {}), '()\n', (600, 602), False, 'from sirepo.template import template_common\n'), ((783, 839), 'sirepo.simulation_db.read_json', 'simulation_db.read_json', (['template_common.INPUT_BASE_NAME'], {}), '(template_common.INPUT_BASE_NAME)\n', (806, 839), False, 'from sirepo import simulation_db\n'), ((934, 967), 'sirepo.template.template_common.exec_parameters', 'template_common.exec_parameters', ([], {}), '()\n', (965, 967), False, 'from sirepo.template import template_common\n'), ((1003, 1120), 'sirepo.template.sdds_util.read_sdds_pages', 'sdds_util.read_sdds_pages', (['"""run_setup.sigma.sdds"""', "['s', 's1', 's12', 's2', 's3', 's34', 's4', 's5', 's56', 's6']"], {}), "('run_setup.sigma.sdds', ['s', 's1', 's12', 's2',\n 's3', 's34', 's4', 's5', 's56', 's6'])\n", (1028, 1120), False, 'from sirepo.template import sdds_util\n'), ((2026, 2134), 'sirepo.template.sdds_util.read_sdds_pages', 'sdds_util.read_sdds_pages', (['"""error_control.error_log.sdds"""', "['ElementParameter', 'ParameterValue']", '(True)'], {}), "('error_control.error_log.sdds', [\n 'ElementParameter', 'ParameterValue'], True)\n", (2051, 2134), False, 'from sirepo.template import sdds_util\n'), ((2629, 2644), 'numpy.asarray', 'np.asarray', (['res'], {}), '(res)\n', (2639, 2644), True, 'import numpy as np\n'), ((3062, 3140), 'numpy.savetxt', 'np.savetxt', (['"""inputs.csv"""', 'inputs'], {'delimiter': '""","""', 'comments': '""""""', 'header': 'in_header'}), "('inputs.csv', inputs, delimiter=',', comments='', header=in_header)\n", (3072, 3140), True, 'import numpy as np\n'), ((3145, 3231), 'numpy.savetxt', 'np.savetxt', (['"""outputs.csv"""', 'outputs'], {'delimiter': '""","""', 'comments': '""""""', 'header': 'out_header'}), "('outputs.csv', outputs, delimiter=',', comments='', header=\n out_header)\n", (3155, 3231), True, 'import numpy as np\n'), ((677, 733), 'sirepo.simulation_db.read_json', 'simulation_db.read_json', (['template_common.INPUT_BASE_NAME'], {}), '(template_common.INPUT_BASE_NAME)\n', (700, 733), False, 'from sirepo import simulation_db\n'), ((1951, 1969), 'numpy.asarray', 'np.asarray', (['inputs'], {}), '(inputs)\n', (1961, 1969), True, 'import numpy as np\n'), ((1971, 1990), 'numpy.asarray', 'np.asarray', (['outputs'], {}), '(outputs)\n', (1981, 1990), True, 'import numpy as np\n'), ((2237, 2245), 'pykern.pkcollections.PKDict', 'PKDict', ([], {}), '()\n', (2243, 2245), False, 'from pykern.pkcollections import PKDict\n'), ((2547, 2571), 'numpy.asarray', 'np.asarray', (['values.PHASE'], {}), '(values.PHASE)\n', (2557, 2571), True, 'import numpy as np\n'), ((2581, 2604), 'numpy.asarray', 'np.asarray', (['values.VOLT'], {}), '(values.VOLT)\n', (2591, 2604), True, 'import numpy as np\n')]
|
# Generated by Django 2.2.10 on 2020-02-05 01:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dcim', '0092_fix_rack_outer_unit'),
]
operations = [
migrations.AddField(
model_name='device',
name='cpus',
field=models.PositiveSmallIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='device',
name='disk',
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='device',
name='memory',
field=models.PositiveIntegerField(blank=True, null=True),
),
]
|
[
"django.db.models.PositiveIntegerField",
"django.db.models.PositiveSmallIntegerField"
] |
[((331, 386), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (363, 386), False, 'from django.db import migrations, models\n'), ((504, 554), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (531, 554), False, 'from django.db import migrations, models\n'), ((674, 724), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (701, 724), False, 'from django.db import migrations, models\n')]
|
from django import forms
from django.core.exceptions import ValidationError
from django_select2.forms import Select2MultipleWidget
from tracker.models import Track, Tracker
class TrackerForm(forms.ModelForm):
class Meta:
model = Tracker
fields = ('nom', 'icone', 'color')
widgets = {
'nom': forms.TextInput(attrs={'class': 'form-control'})
}
class TrackForm(forms.ModelForm):
datetime = forms.DateTimeField(
required=True,
input_formats=['%Y-%m-%dT%H:%M']
)
class Meta:
model = Track
fields = ('commentaire', 'datetime')
widgets = {
'tracker': forms.HiddenInput(),
'commentaire': forms.TextInput(attrs={'placeholder': 'Commentaire facultatif', 'class': 'form-control'})
}
labels = {'commentaire': 'Ajouter un nouveau track'}
class SelectTrackersForm(forms.Form):
trackers = forms.ModelMultipleChoiceField(
label='Sélectionner des trackers à comparer',
queryset=Tracker.objects.all(),
widget=Select2MultipleWidget(attrs={'class': 'form-control'})
)
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['trackers'].queryset = user.profil.trackers.all()
def clean_trackers(self):
trackers = self.cleaned_data.get('trackers')
if len(trackers) < 2:
raise ValidationError('Veuillez sélectionner au minimum 2 trackers.')
return trackers
|
[
"django.core.exceptions.ValidationError",
"django.forms.DateTimeField",
"django.forms.TextInput",
"django_select2.forms.Select2MultipleWidget",
"django.forms.HiddenInput",
"tracker.models.Tracker.objects.all"
] |
[((444, 512), 'django.forms.DateTimeField', 'forms.DateTimeField', ([], {'required': '(True)', 'input_formats': "['%Y-%m-%dT%H:%M']"}), "(required=True, input_formats=['%Y-%m-%dT%H:%M'])\n", (463, 512), False, 'from django import forms\n'), ((334, 382), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (349, 382), False, 'from django import forms\n'), ((662, 681), 'django.forms.HiddenInput', 'forms.HiddenInput', ([], {}), '()\n', (679, 681), False, 'from django import forms\n'), ((710, 803), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': 'Commentaire facultatif', 'class': 'form-control'}"}), "(attrs={'placeholder': 'Commentaire facultatif', 'class':\n 'form-control'})\n", (725, 803), False, 'from django import forms\n'), ((1029, 1050), 'tracker.models.Tracker.objects.all', 'Tracker.objects.all', ([], {}), '()\n', (1048, 1050), False, 'from tracker.models import Track, Tracker\n'), ((1067, 1121), 'django_select2.forms.Select2MultipleWidget', 'Select2MultipleWidget', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (1088, 1121), False, 'from django_select2.forms import Select2MultipleWidget\n'), ((1420, 1483), 'django.core.exceptions.ValidationError', 'ValidationError', (['"""Veuillez sélectionner au minimum 2 trackers."""'], {}), "('Veuillez sélectionner au minimum 2 trackers.')\n", (1435, 1483), False, 'from django.core.exceptions import ValidationError\n')]
|
import datetime as dt
from datetime import datetime
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from sqlalchemy import inspect
from dateutil.relativedelta import relativedelta
from flask import Flask, jsonify
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# We can view all of the classes that automap found
Base.classes.keys()
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(bind=engine)
# Flask Setup
app = Flask(__name__)
# Flask Routes
# @app.route("/") - List all routes that are available.
@app.route("/")
def home_page():
"""List all routes."""
return (
f"All Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/start<br/>"
f"/api/v1.0/start-end<br/>"
)
# /api/v1.0/precipitation
# Convert the query results to a dictionary using date as the key and prcp as the value.
# Return the JSON representation of your dictionary.
@app.route("/api/v1.0/precipitation/")
def precipitation():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list of all precipitation amounts"""
# Retrieve precipitation data
precip = (session.query(Measurement.date, Measurement.tobs).order_by(Measurement.date))
session.close()
# Convert the query results to a dictionary using date as the key and prcp as the value.
prcp_list = []
for result in precip:
prcp_dict = {}
prcp_dict["date"] = result[0]
prcp_dict["prcp"] = result[1]
prcp_list.append(prcp_dict)
return jsonify(prcp_list)
# /api/v1.0/stations Return a JSON list of stations from the dataset.
@app.route("/api/v1.0/stations/")
def stations():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list of all active stations"""
# Query all active stations
results_stations = session.query(Station).all()
#session.close()
list_stations = []
for station in results_stations:
station_dict = {}
station_dict["id"] = station.id
station_dict["station"] = station.station
station_dict["name"] = station.name
station_dict["latitude"] = station.latitude
station_dict["longitude"] = station.longitude
station_dict["elevation"] = station.elevation
list_stations.append(station_dict)
return jsonify(list_stations)
# /api/v1.0/tobs
# Query the dates and temperature observations of the most active station for the last year of data.
# Return a JSON list of temperature observations (TOBS) for the previous year.
@app.route("/api/v1.0/tobs")
def tobs():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list of all tobs"""
# Determine the last date and year ago
latest_date = (session.query(Measurement.date).order_by(Measurement.date.desc()).first())
latest = latest_date[0]
# Calculate the date 1 year ago from the last data point in the database
latest = dt.datetime.strptime(latest, '%Y-%m-%d')
latest = latest.date()
year_ago = latest - relativedelta(days=365)
# Determine active stations and order by most active
active_stations = session.query(Measurement.station, func.count(Measurement.station)).group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all()
# Identify most active station
most_active = active_stations[0][0]
# Query the dates and temperature observations of the most active station for the last year of data.
temp_data = session.query(Measurement.date, Measurement.tobs). filter(Measurement.date >= year_ago).filter(Measurement.station==most_active).all()
session.close()
# Return a list of all tobs
all_tobs = []
for tob in temp_data:
tobs_dict = {}
tobs_dict["date"] = tob.date
tobs_dict["tobs"] = tob.tobs
all_tobs.append(tobs_dict)
return jsonify(all_tobs)
# /api/v1.0/<start>
# Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.
# When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date.
@app.route("/api/v1.0/start")
def start():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Start date"""
# Design a query to retrieve the last 12 months of precipitation data and plot the results
latest_date = (session.query(Measurement.date).order_by(Measurement.date.desc()).first())
latest = latest_date[0]
# Calculate the date 1 year ago from the last data point in the database
latest = dt.datetime.strptime(latest, '%Y-%m-%d')
latest = latest.date()
year_ago = latest - relativedelta(days=365)
active_stations = session.query(Measurement.station, func.count(Measurement.station)).group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all()
most_active = active_stations[0][0]
results = session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).filter(Measurement.station==most_active).filter(Measurement.date >= year_ago).all()
session.close()
return jsonify(results)
# /api/v1.0/<api/v1.0/start-end
# Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.
# When given the start and the end date, calculate the TMIN, TAVG, and TMAX for dates between the start and end date inclusive.
@app.route("/api/v1.0/start-end")
def start_end():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Start - End date"""
# Design a query to retrieve the last 12 months of precipitation data and plot the results
latest_date = (session.query(Measurement.date).order_by(Measurement.date.desc()).first())
latest = latest_date[0]
# Calculate the date 1 year ago from the last data point in the database
latest = dt.datetime.strptime(latest, '%Y-%m-%d')
latest = latest.date()
year_ago = latest - relativedelta(days=365)
active_stations = session.query(Measurement.station, func.count(Measurement.station)).group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all()
most_active = active_stations[0][0]
results = session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).filter(Measurement.station==most_active).filter(Measurement.date >= year_ago).filter(Measurement.date <= latest).all()
session.close()
return jsonify(results)
if __name__ == '__main__':
app.run(debug=True)
|
[
"sqlalchemy.func.avg",
"flask.Flask",
"dateutil.relativedelta.relativedelta",
"sqlalchemy.orm.Session",
"flask.jsonify",
"datetime.datetime.strptime",
"sqlalchemy.func.min",
"sqlalchemy.func.count",
"sqlalchemy.create_engine",
"sqlalchemy.ext.automap.automap_base",
"sqlalchemy.func.max"
] |
[((360, 410), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///Resources/hawaii.sqlite"""'], {}), "('sqlite:///Resources/hawaii.sqlite')\n", (373, 410), False, 'from sqlalchemy import create_engine, func\n'), ((467, 481), 'sqlalchemy.ext.automap.automap_base', 'automap_base', ([], {}), '()\n', (479, 481), False, 'from sqlalchemy.ext.automap import automap_base\n'), ((777, 797), 'sqlalchemy.orm.Session', 'Session', ([], {'bind': 'engine'}), '(bind=engine)\n', (784, 797), False, 'from sqlalchemy.orm import Session\n'), ((819, 834), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (824, 834), False, 'from flask import Flask, jsonify\n'), ((1494, 1509), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (1501, 1509), False, 'from sqlalchemy.orm import Session\n'), ((2011, 2029), 'flask.jsonify', 'jsonify', (['prcp_list'], {}), '(prcp_list)\n', (2018, 2029), False, 'from flask import Flask, jsonify\n'), ((2223, 2238), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (2230, 2238), False, 'from sqlalchemy.orm import Session\n'), ((2865, 2887), 'flask.jsonify', 'jsonify', (['list_stations'], {}), '(list_stations)\n', (2872, 2887), False, 'from flask import Flask, jsonify\n'), ((3204, 3219), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (3211, 3219), False, 'from sqlalchemy.orm import Session\n'), ((3526, 3566), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['latest', '"""%Y-%m-%d"""'], {}), "(latest, '%Y-%m-%d')\n", (3546, 3566), True, 'import datetime as dt\n'), ((4500, 4517), 'flask.jsonify', 'jsonify', (['all_tobs'], {}), '(all_tobs)\n', (4507, 4517), False, 'from flask import Flask, jsonify\n'), ((4911, 4926), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (4918, 4926), False, 'from sqlalchemy.orm import Session\n'), ((5272, 5312), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['latest', '"""%Y-%m-%d"""'], {}), "(latest, '%Y-%m-%d')\n", (5292, 5312), True, 'import datetime as dt\n'), ((5853, 5869), 'flask.jsonify', 'jsonify', (['results'], {}), '(results)\n', (5860, 5869), False, 'from flask import Flask, jsonify\n'), ((6301, 6316), 'sqlalchemy.orm.Session', 'Session', (['engine'], {}), '(engine)\n', (6308, 6316), False, 'from sqlalchemy.orm import Session\n'), ((6674, 6714), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['latest', '"""%Y-%m-%d"""'], {}), "(latest, '%Y-%m-%d')\n", (6694, 6714), True, 'import datetime as dt\n'), ((7315, 7331), 'flask.jsonify', 'jsonify', (['results'], {}), '(results)\n', (7322, 7331), False, 'from flask import Flask, jsonify\n'), ((3618, 3641), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(365)'}), '(days=365)\n', (3631, 3641), False, 'from dateutil.relativedelta import relativedelta\n'), ((5364, 5387), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(365)'}), '(days=365)\n', (5377, 5387), False, 'from dateutil.relativedelta import relativedelta\n'), ((6766, 6789), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(365)'}), '(days=365)\n', (6779, 6789), False, 'from dateutil.relativedelta import relativedelta\n'), ((3838, 3869), 'sqlalchemy.func.count', 'func.count', (['Measurement.station'], {}), '(Measurement.station)\n', (3848, 3869), False, 'from sqlalchemy import create_engine, func\n'), ((5522, 5553), 'sqlalchemy.func.count', 'func.count', (['Measurement.station'], {}), '(Measurement.station)\n', (5532, 5553), False, 'from sqlalchemy import create_engine, func\n'), ((6925, 6956), 'sqlalchemy.func.count', 'func.count', (['Measurement.station'], {}), '(Measurement.station)\n', (6935, 6956), False, 'from sqlalchemy import create_engine, func\n'), ((3766, 3797), 'sqlalchemy.func.count', 'func.count', (['Measurement.station'], {}), '(Measurement.station)\n', (3776, 3797), False, 'from sqlalchemy import create_engine, func\n'), ((5450, 5481), 'sqlalchemy.func.count', 'func.count', (['Measurement.station'], {}), '(Measurement.station)\n', (5460, 5481), False, 'from sqlalchemy import create_engine, func\n'), ((5648, 5674), 'sqlalchemy.func.min', 'func.min', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (5656, 5674), False, 'from sqlalchemy import create_engine, func\n'), ((5676, 5702), 'sqlalchemy.func.max', 'func.max', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (5684, 5702), False, 'from sqlalchemy import create_engine, func\n'), ((5704, 5730), 'sqlalchemy.func.avg', 'func.avg', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (5712, 5730), False, 'from sqlalchemy import create_engine, func\n'), ((6853, 6884), 'sqlalchemy.func.count', 'func.count', (['Measurement.station'], {}), '(Measurement.station)\n', (6863, 6884), False, 'from sqlalchemy import create_engine, func\n'), ((7058, 7084), 'sqlalchemy.func.min', 'func.min', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (7066, 7084), False, 'from sqlalchemy import create_engine, func\n'), ((7086, 7112), 'sqlalchemy.func.max', 'func.max', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (7094, 7112), False, 'from sqlalchemy import create_engine, func\n'), ((7114, 7140), 'sqlalchemy.func.avg', 'func.avg', (['Measurement.tobs'], {}), '(Measurement.tobs)\n', (7122, 7140), False, 'from sqlalchemy import create_engine, func\n')]
|
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import pandas as pd
import os
dam_cols_ap2 = ['co2_dam', 'so2_dam_ap2', 'nox_dam_ap2', 'pm25_dam_ap2']
dam_cols_eas = ['co2_dam', 'so2_dam_eas', 'nox_dam_eas', 'pm25_dam_eas']
# Plotting total damage stacked plot
def plot_total_damages(dam_type, intervention_effects_df, example_case, title=False):
fontsize=18
plt.rcParams['hatch.linewidth'] = 0.5
sns.set(style="whitegrid", color_codes=True)
dam_cols = dam_cols_eas if dam_type == 'EASIUR' else dam_cols_ap2
se_col = 'dam_{}-se'.format('eas' if dam_type == 'EASIUR' else 'ap2')
df = get_onelabel_formatted(dam_cols, intervention_effects_df, example_case)
if example_case == 'building_lighting':
df = (df.set_index(['spat', 'kind', 'time'])).reset_index()
elif example_case == 'demand_response':
df = (df.set_index(['spat', 'kind', 'time'])/1e6).reset_index() # millions of dollars
elif example_case == 'summer_load':
df = (df.set_index(['spat', 'kind', 'time'])/1e9).reset_index() # billions of dollars
df_cum = df.set_index(['spat', 'kind', 'time']).cumsum(axis=1).reset_index()
# Stacked bar plot
g = sns.FacetGrid(data=df_cum, col='spat', size=3, aspect=1)
hatches = ['||', '///', '', '\\\\\\']
hue_orders = dict([('building_lighting', ['Annual', 'Monthly', 'Monthly TOD']),
('demand_response', ['Annual', 'Monthly']),
('summer_load', ['Annual', 'Monthly', 'Monthly TOD', 'Hourly'])])
colors = dict([('building_lighting', [0,2,3]), ('demand_response', [0,2]), ('summer_load', [0,2,3,4])])
g.map(sns.barplot, 'kind', dam_cols[-1], 'time',
hue_order=hue_orders[example_case], order=['Marginal', 'Average'],
palette=[sns.color_palette('muted')[x] for x in colors[example_case]], edgecolor='black', hatch=hatches[0])
g.map(sns.barplot, 'kind', dam_cols[-2], 'time',
hue_order=hue_orders[example_case], order=['Marginal', 'Average'],
palette=[sns.color_palette('muted')[x] for x in colors[example_case]], edgecolor='black', hatch=hatches[1])
g.map(sns.barplot, 'kind', dam_cols[-3], 'time',
hue_order=hue_orders[example_case], order=['Marginal', 'Average'],
palette=[sns.color_palette('muted')[x] for x in colors[example_case]], edgecolor='black', hatch=hatches[2])
g.map(sns.barplot, 'kind', dam_cols[-4], 'time',
hue_order=hue_orders[example_case], order=['Marginal', 'Average'],
palette=[sns.color_palette('muted')[x] for x in colors[example_case]], edgecolor='black', hatch=hatches[3]).set_titles('{col_name}')
g.despine(left='true')
# Legend, fontsize, and other formatting
xoffset=0.035
for i, ax in enumerate(g.axes.flatten()):
ax.set_xlabel('') # No x-label
if i == 0:
# y label on left plot
dollar_units = dict([('building_lighting', ''), ('demand_response', ' millions'), ('summer_load', ' billions')])
ax.set_ylabel('Total damages\n{}(\${})'.format(
'' if example_case == 'summer_load' else 'avoided ',
dollar_units[example_case]))
# pollutants legend
leg_dict = dict(zip(dam_cols, ['CO$_2$', 'SO$_2$', 'NO$_x$', 'PM$_{{2.5}}$']))
dam_patches = []
for dam, hatch in zip(dam_cols, hatches[::-1]):
patch = mpatches.Patch(facecolor='white', label=leg_dict[dam], edgecolor='black', hatch=hatch)
dam_patches.append(patch)
offsets = dict([('building_lighting', (0.16, -0.175)), ('demand_response', (0.18, -0.17)), ('summer_load', (0.16, -0.175))])
lgd = ax.legend(handles=dam_patches, loc='center left',
bbox_to_anchor=(0.3+xoffset, -0.15), ncol=4, frameon=True, fontsize=fontsize,
bbox_transform=plt.gcf().transFigure)
plt.text(offsets[example_case][0]+xoffset, offsets[example_case][1], 'Pollutants:', transform=plt.gcf().transFigure,
fontsize=fontsize, fontweight='bold')
lgd.get_frame().set_edgecolor('white')
if i == 1:
# temporal scope legend (same length as pollutants legend for alignment)
if example_case == 'building_lighting':
blank_patch = mpatches.Patch(color='white', label='')
ann_patch = mpatches.Patch(color=sns.color_palette('muted')[0], label='Annual', edgecolor='black')
month_patch = mpatches.Patch(color=sns.color_palette('muted')[2], label='Monthly', edgecolor='black')
tod_patch = mpatches.Patch(color=sns.color_palette('muted')[3], label='Monthly TOD', edgecolor='black')
time_patches = [blank_patch, ann_patch, month_patch, tod_patch, blank_patch]
lgd2 = ax.legend(handles=time_patches, loc='center left',
bbox_to_anchor=(0.19+xoffset, -0.025), ncol=5, frameon=True, fontsize=fontsize,
bbox_transform=plt.gcf().transFigure)
plt.text(0.09+xoffset, -0.045, 'Temporal scopes:', transform=plt.gcf().transFigure,
fontsize=fontsize, fontweight='bold')
elif example_case == 'demand_response':
blank_patch = mpatches.Patch(color='white', label='')
ann_patch = mpatches.Patch(color=sns.color_palette('muted')[0], label='Annual', edgecolor='black')
tod_patch = mpatches.Patch(color=sns.color_palette('muted')[2], label='Monthly', edgecolor='black')
time_patches = [blank_patch, ann_patch, tod_patch, blank_patch]
lgd2 = ax.legend(handles=time_patches, loc='center left',
bbox_to_anchor=(0.3+xoffset, -0.0115), ncol=4, frameon=True, fontsize=fontsize,
bbox_transform=plt.gcf().transFigure)
plt.text(0.11+xoffset, -0.0425, 'Temporal scopes:', transform=plt.gcf().transFigure,
fontsize=fontsize, fontweight='bold')
elif example_case == 'summer_load':
blank_patch = mpatches.Patch(color='white', label='')
ann_patch = mpatches.Patch(color=sns.color_palette('muted')[0], label='Annual', edgecolor='black')
month_patch = mpatches.Patch(color=sns.color_palette('muted')[2], label='Monthly', edgecolor='black')
tod_patch = mpatches.Patch(color=sns.color_palette('muted')[3], label='Monthly TOD', edgecolor='black')
hr_patch = mpatches.Patch(color=sns.color_palette('muted')[4], label='Hourly', edgecolor='black')
time_patches = [ann_patch, month_patch, tod_patch, hr_patch]
lgd2 = ax.legend(handles=time_patches, loc='center left',
bbox_to_anchor=(0.27+xoffset, -0.025), ncol=4, frameon=True, fontsize=fontsize-1,
bbox_transform=plt.gcf().transFigure)
plt.text(0.09+xoffset, -0.045, 'Temporal scopes:', transform=plt.gcf().transFigure,
fontsize=fontsize, fontweight='bold')
lgd2.get_frame().set_edgecolor('white')
# Annotate baseline
# For building lighting: PJM fossil-plus marginal monthly TOD
# For demand response: PJM fossil-plus marginal monthly
# For summer load: PJM fossil-plus average monthly TOD
baseline_locs = dict([('building_lighting', (2, 0.27)),
('demand_response', (2, 0.2)),
('summer_load', (2, 1.3))])
if i == baseline_locs[example_case][0]:
baseline_x = baseline_locs[example_case][1]
patch_width = [p.get_width() for p in ax.patches][0]
baseline_y = max([p.get_height() \
for p in ax.patches if abs(p.get_xy()[0]+patch_width/2-baseline_x)<=patch_width/4])
ax.text(s='*', x=baseline_x, y=1.05 * baseline_y,
horizontalalignment='center', verticalalignment='center',
fontsize=fontsize*2, fontweight='bold')
# Set font size
for item in ([
# ax.title,
ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
ax.title.set_fontsize(fontsize-2)
# Hacky errorbars
df_tot = get_onelabel_formatted(se_col[:-3], intervention_effects_df, example_case)
df_tot_se = get_onelabel_formatted(se_col, intervention_effects_df, example_case)
errbar_locs = dict([('building_lighting', [-0.27, 0, 0.27]), ('demand_response', [-0.2, 0.2]), ('summer_load', [-0.3,-0.1, 0.1])])
for i, ax in enumerate(g.axes[0]):
spat = df_tot['spat'].dtype.categories[i]
df_tot_slice = df_tot.query('spat == @spat').query('kind == "Marginal"')
df_tot_se_slice = df_tot_se.query('spat == @spat').query('kind == "Marginal"')
if example_case == 'building_lighting':
val, errval = df_tot_slice[se_col[:-3]].values, df_tot_se_slice[se_col].values
elif example_case == 'demand_response':
val, errval = df_tot_slice[se_col[:-3]].values/1e6, df_tot_se_slice[se_col].values/1e6
elif example_case == 'summer_load':
val, errval = df_tot_slice[se_col[:-3]].values/1e9, df_tot_se_slice[se_col].values/1e9
ax.errorbar(errbar_locs[example_case], val, yerr=errval, ms=20, color='black',
linewidth=0, elinewidth=2, capsize=2, capthick=2)
# Line around legend
fig = plt.gcf()
if example_case == 'building_lighting':
leg_line = \
mpatches.Rectangle(
(0.073+xoffset, -0.2), 0.8, 0.24, facecolor='none', edgecolor='lightgray',
transform=fig.transFigure, figure=fig)
elif example_case == 'demand_response':
leg_line = \
mpatches.Rectangle(
(0.1+xoffset, -0.195), 0.77, 0.24, facecolor='none', edgecolor='lightgray',
transform=fig.transFigure, figure=fig)
elif example_case == 'summer_load':
leg_line = \
mpatches.Rectangle(
(0.073+xoffset, -0.2), 0.875, 0.24, facecolor='none', edgecolor='lightgray',
transform=fig.transFigure, figure=fig)
fig.patches.extend([leg_line])
extra_artists = (lgd, lgd2)
if title:
sup=fig.text(0.5, 0.9, 'Total damages ({})\n'.format(dam_type),
fontsize=fontsize, fontweight='bold', fontstyle='italic',
transform=fig.transFigure, ha='center')
extra_artists = extra_artists + (sup,)
plt.tight_layout()
dirname = os.path.join('plots', example_case)
if not os.path.exists(dirname): os.makedirs(dirname)
g.fig.savefig(os.path.join(dirname,
'{}-stacked-with-error{}.pdf'.format(dam_type, '-titled' if title else '')),
bbox_extra_artists=extra_artists, bbox_inches='tight')
# Plotting one graph per damage factor
def get_stacked_plot(label, intervention_effects_df, example_case):
df = get_onelabel_formatted(label, intervention_effects_df, example_case)
df_se = get_onelabel_formatted('{}-se'.format(label), intervention_effects_df, example_case)
# Get bar plot
sns.set(style="whitegrid")
colors = dict([('building_lighting', [0,2,3]), ('demand_response', [0,2]), ('summer_load', [0,2,3,4])])
g = sns.catplot(x='kind', y=label, hue='time', col='spat', data=df,
kind='bar', palette=[sns.color_palette('muted')[x] for x in colors[example_case]], legend=False, ci=None,
height=3, aspect=1).set_titles('{col_name}')
g.despine(left=True);
# Adjust font size and add legend
fontsize=18
for i, ax in enumerate(g.axes.flatten()):
ax.set_xlabel('')
for item in ([#ax.title,
ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
ax.title.set_fontsize(fontsize-2)
if i == 0:
ncols = dict([('building_lighting', 3), ('demand_response', 2), ('summer_load', 4)])
lgd = ax.legend(loc='center left', bbox_to_anchor=(0.75, -0.3), ncol=ncols[example_case], frameon=True, fontsize=fontsize)
ax.set_ylabel(format_axis(label, example_case))
# Annotate baseline
# For building lighting: PJM fossil-plus marginal monthly TOD
# For demand response: PJM fossil-plus marginal monthly
# For summer load: PJM fossil-plus average monthly TOD
baseline_locs = dict([('building_lighting', (2, 0.27)), ('demand_response', (2, 0.2)), ('summer_load', (2, 1.3))])
if i == baseline_locs[example_case][0]:
baseline_x = baseline_locs[example_case][1]
patch_width = [p.get_width() for p in ax.patches][0]
baseline_y = max([p.get_height() \
for p in ax.patches if abs(p.get_xy()[0]+patch_width/2-baseline_x)<=patch_width/4])
ax.text(s='*', x=baseline_x, y=1.05 * baseline_y,
horizontalalignment='center', verticalalignment='center',
fontsize=fontsize*2, fontweight='bold')
# Hacky errorbars
errbar_locs = dict([('building_lighting', [-0.27, 0, 0.27]), ('demand_response', [-0.2, 0.2]), ('summer_load', [-0.3,-0.1, 0.1])])
for i, ax in enumerate(g.axes[0]):
spat = df['spat'].dtype.categories[i]
df_slice = df.query('spat == @spat').query('kind == "Marginal"')
df_se_slice = df_se.query('spat == @spat').query('kind == "Marginal"')
ax.errorbar(errbar_locs[example_case], df_slice[label].values,
yerr=df_se_slice['{}-se'.format(label)].values, ms=20, color='black',
linewidth=0, elinewidth=2, capsize=2, capthick=2)
# Plot title
fig = plt.gcf()
# sup=fig.text(0.5, 1, format_title(label),
# fontsize=fontsize, fontweight='bold', fontstyle='italic',
# transform=fig.transFigure, ha='center')
plt.tight_layout()
dirname = os.path.join('plots', example_case)
if not os.path.exists(dirname): os.makedirs(dirname)
g.fig.savefig(os.path.join(dirname, 'si-{}.pdf'.format(label)),
bbox_extra_artists=(lgd,), #(lgd,sup),
bbox_inches='tight')
## Formatting helpers
def get_dam_name(dam_abbr):
return 'AP2' if dam_abbr == 'ap2' else 'EASIUR'
FULL_DAMS = ['dam_ap2', 'dam_eas']
def format_title(label):
l = label.split('_')
if label in FULL_DAMS:
t = 'Total damages ({})'.format('AP2' if l[1] == 'ap2' else 'EASIUR')
else:
t = '{0}$_{{{1}}}$ {2}'.format(l[0][:2].upper(), l[0][2:], 'emissions' if l[1] == 'kg' else 'damages')
if len(l) > 2: t += ' ({})'.format('AP2' if l[2] == 'ap2' else 'EASIUR')
return t
def format_axis(label, example_case):
l = label.split('_')
if example_case == 'summer_load':
if label in FULL_DAMS:
t = 'Total damages ($)'
elif len(l) > 2 or l[1] == 'dam':
t = 'Damages ($)'
else:
t = 'Emissions (kg)'
else:
if label in FULL_DAMS:
t = 'Total damages\navoided ($)'
elif len(l) > 2 or l[1] == 'dam':
t = 'Damages\navoided ($)'
else:
t = 'Emissions\navoided (kg)'
return t
# Get formatted df with intervention effects for given label
def get_onelabel_formatted(label, intervention_effects_df, example_case):
kind_map = dict([('MEF', 'Marginal'), ('AEF', 'Average')])
time_map = dict([('YearOnly', 'Annual'), ('MonthTOD', 'Monthly TOD'), ('Month', 'Monthly'), ('Hour', 'Hourly')])
df = intervention_effects_df[label].reset_index()
df['spat'] = df.apply(
lambda x: '{} ({}-{}{})'.format(
x['region'], x['fuel_type'][:-4].lower(), x['fuel_type'][-4:].lower(),
' 2016' if x['year'] == 2016 else ''), axis=1)
df['spat'] = df['spat'].str.replace('fossil-plus', 'fossil+non-emit')
df = df.drop(['region', 'fuel_type', 'year'], axis=1)
df['kind'] = df['kind'].map(lambda x: kind_map[x]).astype(
pd.CategoricalDtype(categories=['Marginal', 'Average'], ordered=True))
times = dict([('building_lighting', ['Annual', 'Monthly', 'Monthly TOD']),
('demand_response', ['Annual', 'Monthly']),
('summer_load', ['Annual', 'Monthly', 'Monthly TOD', 'Hourly'])])
df['time'] = df['time'].map(lambda x: time_map[x]).astype(
pd.CategoricalDtype(categories=times[example_case], ordered=True))
df['spat'] = df['spat'].astype(pd.CategoricalDtype(
categories=['PJM (fossil-only)', 'PJM (fossil+non-emit 2016)', 'PJM (fossil+non-emit)', 'RFC (fossil-only)'],
ordered=True))
df = df.sort_values(['spat', 'kind', 'time'])
return df
|
[
"matplotlib.pyplot.tight_layout",
"seaborn.set",
"os.makedirs",
"matplotlib.patches.Rectangle",
"os.path.exists",
"pandas.CategoricalDtype",
"seaborn.color_palette",
"matplotlib.patches.Patch",
"matplotlib.pyplot.gcf",
"os.path.join",
"seaborn.FacetGrid"
] |
[((465, 509), 'seaborn.set', 'sns.set', ([], {'style': '"""whitegrid"""', 'color_codes': '(True)'}), "(style='whitegrid', color_codes=True)\n", (472, 509), True, 'import seaborn as sns\n'), ((1239, 1295), 'seaborn.FacetGrid', 'sns.FacetGrid', ([], {'data': 'df_cum', 'col': '"""spat"""', 'size': '(3)', 'aspect': '(1)'}), "(data=df_cum, col='spat', size=3, aspect=1)\n", (1252, 1295), True, 'import seaborn as sns\n'), ((9693, 9702), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (9700, 9702), True, 'import matplotlib.pyplot as plt\n'), ((10788, 10806), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10804, 10806), True, 'import matplotlib.pyplot as plt\n'), ((10821, 10856), 'os.path.join', 'os.path.join', (['"""plots"""', 'example_case'], {}), "('plots', example_case)\n", (10833, 10856), False, 'import os\n'), ((11447, 11473), 'seaborn.set', 'sns.set', ([], {'style': '"""whitegrid"""'}), "(style='whitegrid')\n", (11454, 11473), True, 'import seaborn as sns\n'), ((14129, 14138), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (14136, 14138), True, 'import matplotlib.pyplot as plt\n'), ((14324, 14342), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14340, 14342), True, 'import matplotlib.pyplot as plt\n'), ((14362, 14397), 'os.path.join', 'os.path.join', (['"""plots"""', 'example_case'], {}), "('plots', example_case)\n", (14374, 14397), False, 'import os\n'), ((9780, 9918), 'matplotlib.patches.Rectangle', 'mpatches.Rectangle', (['(0.073 + xoffset, -0.2)', '(0.8)', '(0.24)'], {'facecolor': '"""none"""', 'edgecolor': '"""lightgray"""', 'transform': 'fig.transFigure', 'figure': 'fig'}), "((0.073 + xoffset, -0.2), 0.8, 0.24, facecolor='none',\n edgecolor='lightgray', transform=fig.transFigure, figure=fig)\n", (9798, 9918), True, 'import matplotlib.patches as mpatches\n'), ((10868, 10891), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (10882, 10891), False, 'import os\n'), ((10893, 10913), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (10904, 10913), False, 'import os\n'), ((14409, 14432), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (14423, 14432), False, 'import os\n'), ((14434, 14454), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (14445, 14454), False, 'import os\n'), ((16451, 16520), 'pandas.CategoricalDtype', 'pd.CategoricalDtype', ([], {'categories': "['Marginal', 'Average']", 'ordered': '(True)'}), "(categories=['Marginal', 'Average'], ordered=True)\n", (16470, 16520), True, 'import pandas as pd\n'), ((16799, 16864), 'pandas.CategoricalDtype', 'pd.CategoricalDtype', ([], {'categories': 'times[example_case]', 'ordered': '(True)'}), '(categories=times[example_case], ordered=True)\n', (16818, 16864), True, 'import pandas as pd\n'), ((16902, 17053), 'pandas.CategoricalDtype', 'pd.CategoricalDtype', ([], {'categories': "['PJM (fossil-only)', 'PJM (fossil+non-emit 2016)', 'PJM (fossil+non-emit)',\n 'RFC (fossil-only)']", 'ordered': '(True)'}), "(categories=['PJM (fossil-only)',\n 'PJM (fossil+non-emit 2016)', 'PJM (fossil+non-emit)',\n 'RFC (fossil-only)'], ordered=True)\n", (16921, 17053), True, 'import pandas as pd\n'), ((10024, 10163), 'matplotlib.patches.Rectangle', 'mpatches.Rectangle', (['(0.1 + xoffset, -0.195)', '(0.77)', '(0.24)'], {'facecolor': '"""none"""', 'edgecolor': '"""lightgray"""', 'transform': 'fig.transFigure', 'figure': 'fig'}), "((0.1 + xoffset, -0.195), 0.77, 0.24, facecolor='none',\n edgecolor='lightgray', transform=fig.transFigure, figure=fig)\n", (10042, 10163), True, 'import matplotlib.patches as mpatches\n'), ((3451, 3541), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'facecolor': '"""white"""', 'label': 'leg_dict[dam]', 'edgecolor': '"""black"""', 'hatch': 'hatch'}), "(facecolor='white', label=leg_dict[dam], edgecolor='black',\n hatch=hatch)\n", (3465, 3541), True, 'import matplotlib.patches as mpatches\n'), ((4387, 4426), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""white"""', 'label': '""""""'}), "(color='white', label='')\n", (4401, 4426), True, 'import matplotlib.patches as mpatches\n'), ((10265, 10405), 'matplotlib.patches.Rectangle', 'mpatches.Rectangle', (['(0.073 + xoffset, -0.2)', '(0.875)', '(0.24)'], {'facecolor': '"""none"""', 'edgecolor': '"""lightgray"""', 'transform': 'fig.transFigure', 'figure': 'fig'}), "((0.073 + xoffset, -0.2), 0.875, 0.24, facecolor='none',\n edgecolor='lightgray', transform=fig.transFigure, figure=fig)\n", (10283, 10405), True, 'import matplotlib.patches as mpatches\n'), ((1809, 1835), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {}), "('muted')\n", (1826, 1835), True, 'import seaborn as sns\n'), ((2060, 2086), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {}), "('muted')\n", (2077, 2086), True, 'import seaborn as sns\n'), ((2309, 2335), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {}), "('muted')\n", (2326, 2335), True, 'import seaborn as sns\n'), ((5376, 5415), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""white"""', 'label': '""""""'}), "(color='white', label='')\n", (5390, 5415), True, 'import matplotlib.patches as mpatches\n'), ((3936, 3945), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3943, 3945), True, 'import matplotlib.pyplot as plt\n'), ((4065, 4074), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4072, 4074), True, 'import matplotlib.pyplot as plt\n'), ((6227, 6266), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""white"""', 'label': '""""""'}), "(color='white', label='')\n", (6241, 6266), True, 'import matplotlib.patches as mpatches\n'), ((2558, 2584), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {}), "('muted')\n", (2575, 2584), True, 'import seaborn as sns\n'), ((4476, 4502), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {}), "('muted')\n", (4493, 4502), True, 'import seaborn as sns\n'), ((4593, 4619), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {}), "('muted')\n", (4610, 4619), True, 'import seaborn as sns\n'), ((4709, 4735), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {}), "('muted')\n", (4726, 4735), True, 'import seaborn as sns\n'), ((5107, 5116), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5114, 5116), True, 'import matplotlib.pyplot as plt\n'), ((5207, 5216), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5214, 5216), True, 'import matplotlib.pyplot as plt\n'), ((11698, 11724), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {}), "('muted')\n", (11715, 11724), True, 'import seaborn as sns\n'), ((5465, 5491), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {}), "('muted')\n", (5482, 5491), True, 'import seaborn as sns\n'), ((5580, 5606), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {}), "('muted')\n", (5597, 5606), True, 'import seaborn as sns\n'), ((5961, 5970), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5968, 5970), True, 'import matplotlib.pyplot as plt\n'), ((6062, 6071), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (6069, 6071), True, 'import matplotlib.pyplot as plt\n'), ((6316, 6342), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {}), "('muted')\n", (6333, 6342), True, 'import seaborn as sns\n'), ((6433, 6459), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {}), "('muted')\n", (6450, 6459), True, 'import seaborn as sns\n'), ((6549, 6575), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {}), "('muted')\n", (6566, 6575), True, 'import seaborn as sns\n'), ((6668, 6694), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {}), "('muted')\n", (6685, 6694), True, 'import seaborn as sns\n'), ((7047, 7056), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (7054, 7056), True, 'import matplotlib.pyplot as plt\n'), ((7147, 7156), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (7154, 7156), True, 'import matplotlib.pyplot as plt\n')]
|
# Main.py - Pixels Fighting #
# Author: <NAME> #
# ---------------------#
# Imports #
import pygame
from pygame.locals import *
from helpers import *
import random
import numpy as np
import time
# ---------------------#
# Initialize number of rows/columns
INT = 100
INT_SQ = INT*INT
# Initialize size of arrays
SIZE = 5
# Initialize Pygame
pygame.init()
# Initialize screen, status and clock
screen = pygame.display.set_mode((80+INT*SIZE,160+INT*SIZE))
running = True
clock = pygame.time.Clock()
# Defining Colors
COLOR_ALIVE = (random.randint(1,256),random.randint(0,256),random.randint(0,256))
COLOR_DEAD = (random.randint(1,256),random.randint(0,256),random.randint(0,256))
# Initialize Status Array - Making an array with half dead and half alive
zero = np.zeros((INT,INT//2), dtype=int)
one = np.ones((INT,INT//2), dtype=int)
current_status_array = np.concatenate((zero,one), axis=1)
# ---------------------#
# For Title Text to be displayed
# Defining font style and size
font = pygame.font.Font('freesansbold.ttf', 32)
text_title = font.render('Pixels Fighting', True, (255,255,255), (0,0,0))
textRectTitle = text_title.get_rect()
textRectTitle.center = (40+INT*SIZE/2, 40)
# ---------------------#
# Defining Box Class
class Box():
# Status can be dead (0) or alive(1);
def __init__(self, x, y, alive):
self.x = x
self.y = y
self.alive = alive
self.surf = pygame.Surface((SIZE,SIZE))
self.rect = (40 + SIZE*self.y, 100 + SIZE*self.x)
# Function to fill surface with color
def assign_color(self):
if self.alive == 0:
self.surf.fill(COLOR_DEAD)
else:
self.surf.fill(COLOR_ALIVE)
screen.blit(self.surf,self.rect)
# Function to update surface; as per current_status_array
def update(self):
self.alive = current_status_array[self.x][self.y]
self.assign_color()
# ---------------------#
# Creating 'INT_SQ' instances of box class, and appending them to a list for accessibility
boxes = []
for i in range(INT_SQ):
# x,y will be filled sequentially
x = i//INT
y = i%INT
# Alive status depening on current array
boxes.append(Box(x,y,current_status_array[x][y]))
# ---------------------#
# For Ratio Text to be displayed and updated continuously
# Defining font style and size
font = pygame.font.Font('freesansbold.ttf', 25)
def UpdateRatioText():
# For the alive ones
text_alive = font.render('Alive: {:.4f}'.format(IsAliveWinning(current_status_array, INT_SQ)), True, COLOR_ALIVE, (0,0,0))
textRectAlive = text_alive.get_rect()
textRectAlive.x = 80 + INT*SIZE - 210
textRectAlive.y = 115 + INT*SIZE
# For the dead ones
text_dead = font.render('Dead: {:.4f}'.format(1-IsAliveWinning(current_status_array, INT_SQ)), True, COLOR_DEAD, (0,0,0))
textRectDead = text_dead.get_rect()
textRectDead.x = 60
textRectDead.y = 115 + INT*SIZE
# Updating the font on the rect
screen.blit(text_alive, textRectAlive)
screen.blit(text_dead, textRectDead)
# ---------------------#
# Main python loop
while running:
# Main python quit function
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# For updating array and boxes status
current_status_array = UpdateArray(current_status_array, INT)
for box in boxes:
box.update()
# Update Ratio text
UpdateRatioText()
# Display Title
screen.blit(text_title, textRectTitle)
# Refresh screen
pygame.display.update()
# A more optimal version of the clock.tick() function, determines fps of display basically
time.sleep(0.1)
# ---------------------#
|
[
"random.randint",
"pygame.Surface",
"pygame.event.get",
"pygame.display.set_mode",
"numpy.zeros",
"numpy.ones",
"pygame.init",
"time.sleep",
"pygame.display.update",
"pygame.font.Font",
"pygame.time.Clock",
"numpy.concatenate"
] |
[((346, 359), 'pygame.init', 'pygame.init', ([], {}), '()\n', (357, 359), False, 'import pygame\n'), ((408, 468), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(80 + INT * SIZE, 160 + INT * SIZE)'], {}), '((80 + INT * SIZE, 160 + INT * SIZE))\n', (431, 468), False, 'import pygame\n'), ((483, 502), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (500, 502), False, 'import pygame\n'), ((768, 804), 'numpy.zeros', 'np.zeros', (['(INT, INT // 2)'], {'dtype': 'int'}), '((INT, INT // 2), dtype=int)\n', (776, 804), True, 'import numpy as np\n'), ((808, 843), 'numpy.ones', 'np.ones', (['(INT, INT // 2)'], {'dtype': 'int'}), '((INT, INT // 2), dtype=int)\n', (815, 843), True, 'import numpy as np\n'), ((864, 899), 'numpy.concatenate', 'np.concatenate', (['(zero, one)'], {'axis': '(1)'}), '((zero, one), axis=1)\n', (878, 899), True, 'import numpy as np\n'), ((998, 1038), 'pygame.font.Font', 'pygame.font.Font', (['"""freesansbold.ttf"""', '(32)'], {}), "('freesansbold.ttf', 32)\n", (1014, 1038), False, 'import pygame\n'), ((2375, 2415), 'pygame.font.Font', 'pygame.font.Font', (['"""freesansbold.ttf"""', '(25)'], {}), "('freesansbold.ttf', 25)\n", (2391, 2415), False, 'import pygame\n'), ((538, 560), 'random.randint', 'random.randint', (['(1)', '(256)'], {}), '(1, 256)\n', (552, 560), False, 'import random\n'), ((560, 582), 'random.randint', 'random.randint', (['(0)', '(256)'], {}), '(0, 256)\n', (574, 582), False, 'import random\n'), ((582, 604), 'random.randint', 'random.randint', (['(0)', '(256)'], {}), '(0, 256)\n', (596, 604), False, 'import random\n'), ((619, 641), 'random.randint', 'random.randint', (['(1)', '(256)'], {}), '(1, 256)\n', (633, 641), False, 'import random\n'), ((641, 663), 'random.randint', 'random.randint', (['(0)', '(256)'], {}), '(0, 256)\n', (655, 663), False, 'import random\n'), ((663, 685), 'random.randint', 'random.randint', (['(0)', '(256)'], {}), '(0, 256)\n', (677, 685), False, 'import random\n'), ((3204, 3222), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (3220, 3222), False, 'import pygame\n'), ((3579, 3602), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (3600, 3602), False, 'import pygame\n'), ((3703, 3718), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (3713, 3718), False, 'import time\n'), ((1427, 1455), 'pygame.Surface', 'pygame.Surface', (['(SIZE, SIZE)'], {}), '((SIZE, SIZE))\n', (1441, 1455), False, 'import pygame\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2019-2021, Dr.-Ing. <NAME>
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import sys
import sympy as sp
# coronary model with a 3-element Windkessel (ZCR, proximal part) in series with a 2-element Windkessel (CR, distal part)
# according to Vieira et al. (2018) "Patient-specific modeling of right coronary circulation vulnerability post-liver transplant in Alagille’s syndrome", PLoS ONE 13(11), e0205829
# here their R_e is Z_corp_sys, C_e is C_corp_sys, R_p is R_corp_sys, C_i is C_cord_sys, and R_d is R_cord_sys
# the distal compliance is fed by the left ventricular pressure in order to have a phase-dependent tone of the coronary
# (coronaries almost entirely fill in diastole, not during systole)
#\begin{align}
#&C_{\mathrm{cor,p}}^{\mathrm{sys}} \left(\frac{\mathrm{d}p_{\mathrm{ar}}^{\mathrm{sys}}}{\mathrm{d}t}-Z_{\mathrm{cor,p}}^{\mathrm{sys}}\frac{\mathrm{d}q_{\mathrm{cor,p,in}}^{\mathrm{sys}}}{\mathrm{d}t}\right) = q_{\mathrm{cor,p,in}}^{\mathrm{sys}} - q_{\mathrm{cor,p}}^{\mathrm{sys}}\\
#&R_{\mathrm{cor,p}}^{\mathrm{sys}}\,q_{\mathrm{cor,p}}^{\mathrm{sys}}=p_{\mathrm{ar}}^{\mathrm{sys}}-p_{\mathrm{cor,d}}^{\mathrm{sys}} - Z_{\mathrm{cor,p}}^{\mathrm{sys}}\,q_{\mathrm{cor,p,in}}^{\mathrm{sys}}\\
#&C_{\mathrm{cor,d}}^{\mathrm{sys}} \frac{\mathrm{d}(p_{\mathrm{cor,d}}^{\mathrm{sys}}-p_{\mathrm{v}}^{\ell})}{\mathrm{d}t} = q_{\mathrm{cor,p}}^{\mathrm{sys}} - q_{\mathrm{cor,d}}^{\mathrm{sys}}\\
#&R_{\mathrm{cor,d}}^{\mathrm{sys}}\,q_{\mathrm{cor,d}}^{\mathrm{sys}}=p_{\mathrm{cor,d}}^{\mathrm{sys}}-p_{\mathrm{at}}^{r}
#\end{align}
class coronary_circ_ZCRp_CRd():
def __init__(self, params, varmap, auxmap, vs):
self.Z_corp_sys = params['Z_corp_sys']
self.C_corp_sys = params['C_corp_sys']
self.R_corp_sys = params['R_corp_sys']
self.C_cord_sys = params['C_cord_sys']
self.R_cord_sys = params['R_cord_sys']
try: self.V_corp_sys_u = params['V_corp_sys_u']
except: self.V_corp_sys_u = 0
try: self.V_cord_sys_u = params['V_cord_sys_u']
except: self.V_cord_sys_u = 0
self.ndcor = 4
self.varmap = varmap
self.auxmap = auxmap
self.vs = vs
def equation_map(self, vindex, aindex, x_, a_, df_, f_, p_ar_, p_v_, p_at_):
self.varmap['q_corp_sys_in'] = vindex
self.varmap['q_corp_sys'] = vindex+1
self.varmap['p_cord_sys'] = vindex+2
self.varmap['q_ven'+str(self.vs+1)+'_sys'] = vindex+3
q_corp_sys_in_ = sp.Symbol('q_corp_sys_in_')
q_corp_sys_ = sp.Symbol('q_corp_sys_')
p_cord_sys_ = sp.Symbol('p_cord_sys_')
q_cord_sys_ = sp.Symbol('q_ven'+str(self.vs+1)+'_sys_')
x_[self.varmap['q_corp_sys_in']] = q_corp_sys_in_
x_[self.varmap['q_corp_sys']] = q_corp_sys_
x_[self.varmap['p_cord_sys']] = p_cord_sys_
x_[self.varmap['q_ven'+str(self.vs+1)+'_sys']] = q_cord_sys_
# populate df_ and f_ arrays
df_[vindex] = self.C_corp_sys * (p_ar_[0] - self.Z_corp_sys * q_corp_sys_in_) # coronary proximal volume rate
df_[vindex+1] = 0.
df_[vindex+2] = self.C_cord_sys * (p_cord_sys_ - p_v_) # coronary distal volume rate
df_[vindex+3] = 0.
f_[vindex] = q_corp_sys_ - q_corp_sys_in_ # coronary proximal flow balance
f_[vindex+1] = (p_cord_sys_ - p_ar_[0] + self.Z_corp_sys * q_corp_sys_in_)/self.R_corp_sys + q_corp_sys_ # coronary proximal momentum
f_[vindex+2] = q_cord_sys_ - q_corp_sys_ # coronary distal flow balance
f_[vindex+3] = (p_at_ - p_cord_sys_)/self.R_cord_sys + q_cord_sys_ # coronary distal momentum
# auxiliary map and variables
self.auxmap['V_corp_sys'] = aindex
self.auxmap['V_cord_sys'] = aindex+1
a_[self.auxmap['V_corp_sys']] = self.C_corp_sys * (p_ar_[0] - self.Z_corp_sys * q_corp_sys_in_) + self.V_corp_sys_u
a_[self.auxmap['V_cord_sys']] = self.C_cord_sys * (p_cord_sys_ - p_v_) + self.V_cord_sys_u
# safety check that we don't hand in a zero symbol for p_v
if p_v_ is sp.S.Zero: raise ValueError("Zero symbol for left ventricular pressure!")
return [q_corp_sys_in_], q_cord_sys_
def initialize(self, var, iniparam):
try: var[self.varmap['q_corp_sys_in']] = iniparam['q_corp_sys_in_0']
except: var[self.varmap['q_corp_sys_in']] = iniparam['q_corp_sys_0']
var[self.varmap['q_corp_sys']] = iniparam['q_corp_sys_0']
var[self.varmap['p_cord_sys']] = iniparam['p_cord_sys_0']
try: var[self.varmap['q_ven'+str(self.vs+1)+'_sys']] = iniparam['q_ven'+str(self.vs+1)+'_sys_0']
except: var[self.varmap['q_ven'+str(self.vs+1)+'_sys']] = iniparam['q_ven_sys_0']
def print_to_screen(self, var_sq, aux):
print("Output of 0D coronary model (ZCRp_CRd):")
print('{:<10s}{:<3s}{:<7.3f}'.format('p_cord_sys',' = ',var_sq[self.varmap['p_cord_sys']]))
sys.stdout.flush()
# equivalent model to ZCRp_CRd, but individually for left and right coronary arteries
#\begin{align}
#&C_{\mathrm{cor,p}}^{\mathrm{sys},\ell} \left(\frac{\mathrm{d}p_{\mathrm{ar}}^{\mathrm{sys},\ell}}{\mathrm{d}t}-Z_{\mathrm{cor,p}}^{\mathrm{sys},\ell}\frac{\mathrm{d}q_{\mathrm{cor,p,in}}^{\mathrm{sys},\ell}}{\mathrm{d}t}\right) = q_{\mathrm{cor,p,in}}^{\mathrm{sys},\ell} - q_{\mathrm{cor,p}}^{\mathrm{sys},\ell}\\
#&R_{\mathrm{cor,p}}^{\mathrm{sys},\ell}\,q_{\mathrm{cor,p}}^{\mathrm{sys},\ell}=p_{\mathrm{ar}}^{\mathrm{sys}}-p_{\mathrm{cor,d}}^{\mathrm{sys},\ell} - Z_{\mathrm{cor,p}}^{\mathrm{sys},\ell}\,q_{\mathrm{cor,p,in}}^{\mathrm{sys},\ell}\\
#&C_{\mathrm{cor,d}}^{\mathrm{sys},\ell} \frac{\mathrm{d}(p_{\mathrm{cor,d}}^{\mathrm{sys},\ell}-p_{\mathrm{v}}^{\ell})}{\mathrm{d}t} = q_{\mathrm{cor,p}}^{\mathrm{sys},\ell} - q_{\mathrm{cor,d}}^{\mathrm{sys},\ell}\\
#&R_{\mathrm{cor,d}}^{\mathrm{sys},\ell}\,q_{\mathrm{cor,d}}^{\mathrm{sys},\ell}=p_{\mathrm{cor,d}}^{\mathrm{sys},\ell}-p_{\mathrm{at}}^{r}\\
#&C_{\mathrm{cor,p}}^{\mathrm{sys},r} \left(\frac{\mathrm{d}p_{\mathrm{ar}}^{\mathrm{sys},r}}{\mathrm{d}t}-Z_{\mathrm{cor,p}}^{\mathrm{sys},r}\frac{\mathrm{d}q_{\mathrm{cor,p,in}}^{\mathrm{sys},r}}{\mathrm{d}t}\right) = q_{\mathrm{cor,p,in}}^{\mathrm{sys},r} - q_{\mathrm{cor,p}}^{\mathrm{sys},r}\\
#&R_{\mathrm{cor,p}}^{\mathrm{sys},r}\,q_{\mathrm{cor,p}}^{\mathrm{sys},r}=p_{\mathrm{ar}}^{\mathrm{sys}}-p_{\mathrm{cor,d}}^{\mathrm{sys},r} - Z_{\mathrm{cor,p}}^{\mathrm{sys},r}\,q_{\mathrm{cor,p,in}}^{\mathrm{sys},r}\\
#&C_{\mathrm{cor,d}}^{\mathrm{sys},r} \frac{\mathrm{d}(p_{\mathrm{cor,d}}^{\mathrm{sys},r}-p_{\mathrm{v}}^{\ell})}{\mathrm{d}t} = q_{\mathrm{cor,p}}^{\mathrm{sys},r} - q_{\mathrm{cor,d}}^{\mathrm{sys},r}\\
#&R_{\mathrm{cor,d}}^{\mathrm{sys},r}\,q_{\mathrm{cor,d}}^{\mathrm{sys},r}=p_{\mathrm{cor,d}}^{\mathrm{sys},r}-p_{\mathrm{at}}^{r}\\
#&0=q_{\mathrm{cor,d}}^{\mathrm{sys},\ell}+q_{\mathrm{cor,d}}^{\mathrm{sys},r}-q_{\mathrm{cor,d,out}}^{\mathrm{sys}}
#\end{align}
class coronary_circ_ZCRp_CRd_lr():
def __init__(self, params, varmap, auxmap, vs):
self.Z_corp_sys_l = params['Z_corp_sys_l']
self.C_corp_sys_l = params['C_corp_sys_l']
self.R_corp_sys_l = params['R_corp_sys_l']
self.C_cord_sys_l = params['C_cord_sys_l']
self.R_cord_sys_l = params['R_cord_sys_l']
self.Z_corp_sys_r = params['Z_corp_sys_r']
self.C_corp_sys_r = params['C_corp_sys_r']
self.R_corp_sys_r = params['R_corp_sys_r']
self.C_cord_sys_r = params['C_cord_sys_r']
self.R_cord_sys_r = params['R_cord_sys_r']
try: self.V_corp_sys_l_u = params['V_corp_sys_l_u']
except: self.V_corp_sys_l_u = 0
try: self.V_cord_sys_l_u = params['V_cord_sys_l_u']
except: self.V_cord_sys_l_u = 0
try: self.V_corp_sys_r_u = params['V_corp_sys_r_u']
except: self.V_corp_sys_r_u = 0
try: self.V_cord_sys_r_u = params['V_cord_sys_r_u']
except: self.V_cord_sys_r_u = 0
self.ndcor = 9
self.varmap = varmap
self.auxmap = auxmap
self.vs = vs
def equation_map(self, vindex, aindex, x_, a_, df_, f_, p_ar_, p_v_, p_at_):
self.varmap['q_corp_sys_l_in'] = vindex
self.varmap['q_corp_sys_l'] = vindex+1
self.varmap['p_cord_sys_l'] = vindex+2
self.varmap['q_cord_sys_l'] = vindex+3
self.varmap['q_corp_sys_r_in'] = vindex+4
self.varmap['q_corp_sys_r'] = vindex+5
self.varmap['p_cord_sys_r'] = vindex+6
self.varmap['q_cord_sys_r'] = vindex+7
self.varmap['q_ven'+str(self.vs+1)+'_sys'] = vindex+8
q_corp_sys_l_in_ = sp.Symbol('q_corp_sys_l_in_')
q_corp_sys_l_ = sp.Symbol('q_corp_sys_l_')
p_cord_sys_l_ = sp.Symbol('p_cord_sys_l_')
q_cord_sys_l_ = sp.Symbol('q_cord_sys_l_')
q_corp_sys_r_in_ = sp.Symbol('q_corp_sys_r_in_')
q_corp_sys_r_ = sp.Symbol('q_corp_sys_r_')
p_cord_sys_r_ = sp.Symbol('p_cord_sys_r_')
q_cord_sys_r_ = sp.Symbol('q_cord_sys_r_')
q_cord_sys_out_ = sp.Symbol('q_ven'+str(self.vs+1)+'_sys_')
x_[self.varmap['q_corp_sys_l_in']] = q_corp_sys_l_in_
x_[self.varmap['q_corp_sys_l']] = q_corp_sys_l_
x_[self.varmap['p_cord_sys_l']] = p_cord_sys_l_
x_[self.varmap['q_cord_sys_l']] = q_cord_sys_l_
x_[self.varmap['q_corp_sys_r_in']] = q_corp_sys_r_in_
x_[self.varmap['q_corp_sys_r']] = q_corp_sys_r_
x_[self.varmap['p_cord_sys_r']] = p_cord_sys_r_
x_[self.varmap['q_cord_sys_r']] = q_cord_sys_r_
x_[self.varmap['q_ven'+str(self.vs+1)+'_sys']] = q_cord_sys_out_
# populate df_ and f_ arrays
df_[vindex] = self.C_corp_sys_l * (p_ar_[0] - self.Z_corp_sys_l * q_corp_sys_l_in_) # left coronary proximal volume rate
df_[vindex+1] = 0.
df_[vindex+2] = self.C_cord_sys_l * (p_cord_sys_l_ - p_v_) # left coronary distal volume rate
df_[vindex+3] = 0.
df_[vindex+4] = self.C_corp_sys_r * (p_ar_[1] - self.Z_corp_sys_r * q_corp_sys_r_in_) # right coronary proximal volume rate
df_[vindex+5] = 0.
df_[vindex+6] = self.C_cord_sys_r * (p_cord_sys_r_ - p_v_) # right coronary distal volume rate
df_[vindex+7] = 0.
df_[vindex+8] = 0.
f_[vindex] = q_corp_sys_l_ - q_corp_sys_l_in_ # left coronary proximal flow balance
f_[vindex+1] = (p_cord_sys_l_ - p_ar_[0] + self.Z_corp_sys_l * q_corp_sys_l_in_)/self.R_corp_sys_l + q_corp_sys_l_ # left coronary proximal momentum
f_[vindex+2] = q_cord_sys_l_ - q_corp_sys_l_ # left coronary distal flow balance
f_[vindex+3] = (p_at_ - p_cord_sys_l_)/self.R_cord_sys_l + q_cord_sys_l_ # left coronary distal momentum
f_[vindex+4] = q_corp_sys_r_ - q_corp_sys_r_in_ # right coronary proximal flow balance
f_[vindex+5] = (p_cord_sys_r_ - p_ar_[1] + self.Z_corp_sys_r * q_corp_sys_r_in_)/self.R_corp_sys_r + q_corp_sys_r_ # right coronary proximal momentum
f_[vindex+6] = q_cord_sys_r_ - q_corp_sys_r_ # right coronary distal flow balance
f_[vindex+7] = (p_at_ - p_cord_sys_r_)/self.R_cord_sys_r + q_cord_sys_r_ # right coronary distal momentum
f_[vindex+8] = q_cord_sys_out_ - q_cord_sys_l_ - q_cord_sys_r_ # coronary sinus flow balance
# auxiliary map and variables
self.auxmap['V_corp_sys_l'] = aindex
self.auxmap['V_cord_sys_l'] = aindex+1
self.auxmap['V_corp_sys_r'] = aindex+2
self.auxmap['V_cord_sys_r'] = aindex+3
a_[self.auxmap['V_corp_sys_l']] = self.C_corp_sys_l * (p_ar_[0] - self.Z_corp_sys_l * q_corp_sys_l_in_) + self.V_corp_sys_l_u
a_[self.auxmap['V_cord_sys_l']] = self.C_cord_sys_l * (p_cord_sys_l_ - p_v_) + self.V_cord_sys_l_u
a_[self.auxmap['V_corp_sys_r']] = self.C_corp_sys_r * (p_ar_[1] - self.Z_corp_sys_r * q_corp_sys_r_in_) + self.V_corp_sys_r_u
a_[self.auxmap['V_cord_sys_r']] = self.C_cord_sys_r * (p_cord_sys_r_ - p_v_) + self.V_cord_sys_r_u
# safety check that we don't hand in a zero symbol for p_v
if p_v_ is sp.S.Zero: raise ValueError("Zero symbol for left ventricular pressure!")
return [q_corp_sys_l_in_,q_corp_sys_r_in_], q_cord_sys_out_
def initialize(self, var, iniparam):
try: var[self.varmap['q_corp_sys_l_in']] = iniparam['q_corp_sys_l_in_0']
except: var[self.varmap['q_corp_sys_l_in']] = iniparam['q_corp_sys_l_0']
var[self.varmap['q_corp_sys_l']] = iniparam['q_corp_sys_l_0']
var[self.varmap['p_cord_sys_l']] = iniparam['p_cord_sys_l_0']
var[self.varmap['q_cord_sys_l']] = iniparam['q_cord_sys_l_0']
try: var[self.varmap['q_corp_sys_r_in']] = iniparam['q_corp_sys_r_in_0']
except: var[self.varmap['q_corp_sys_r_in']] = iniparam['q_corp_sys_r_0']
var[self.varmap['q_corp_sys_r']] = iniparam['q_corp_sys_r_0']
var[self.varmap['p_cord_sys_r']] = iniparam['p_cord_sys_r_0']
var[self.varmap['q_cord_sys_r']] = iniparam['q_cord_sys_r_0']
try: var[self.varmap['q_ven'+str(self.vs+1)+'_sys']] = iniparam['q_ven'+str(self.vs+1)+'_sys_0']
except: var[self.varmap['q_ven'+str(self.vs+1)+'_sys']] = iniparam['q_ven_sys_0']
def print_to_screen(self, var_sq, aux):
print("Output of 0D coronary model (ZCRp_CRd_lr):")
print('{:<12s}{:<3s}{:<7.3f}{:<3s}{:<12s}{:<3s}{:<7.3f}'.format('p_cord_sys_l',' = ',var_sq[self.varmap['p_cord_sys_l']],' ','p_cord_sys_r',' = ',var_sq[self.varmap['p_cord_sys_r']]))
sys.stdout.flush()
|
[
"sympy.Symbol",
"sys.stdout.flush"
] |
[((2717, 2744), 'sympy.Symbol', 'sp.Symbol', (['"""q_corp_sys_in_"""'], {}), "('q_corp_sys_in_')\n", (2726, 2744), True, 'import sympy as sp\n'), ((2770, 2794), 'sympy.Symbol', 'sp.Symbol', (['"""q_corp_sys_"""'], {}), "('q_corp_sys_')\n", (2779, 2794), True, 'import sympy as sp\n'), ((2820, 2844), 'sympy.Symbol', 'sp.Symbol', (['"""p_cord_sys_"""'], {}), "('p_cord_sys_')\n", (2829, 2844), True, 'import sympy as sp\n'), ((5570, 5588), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5586, 5588), False, 'import sys\n'), ((9463, 9492), 'sympy.Symbol', 'sp.Symbol', (['"""q_corp_sys_l_in_"""'], {}), "('q_corp_sys_l_in_')\n", (9472, 9492), True, 'import sympy as sp\n'), ((9520, 9546), 'sympy.Symbol', 'sp.Symbol', (['"""q_corp_sys_l_"""'], {}), "('q_corp_sys_l_')\n", (9529, 9546), True, 'import sympy as sp\n'), ((9574, 9600), 'sympy.Symbol', 'sp.Symbol', (['"""p_cord_sys_l_"""'], {}), "('p_cord_sys_l_')\n", (9583, 9600), True, 'import sympy as sp\n'), ((9628, 9654), 'sympy.Symbol', 'sp.Symbol', (['"""q_cord_sys_l_"""'], {}), "('q_cord_sys_l_')\n", (9637, 9654), True, 'import sympy as sp\n'), ((9691, 9720), 'sympy.Symbol', 'sp.Symbol', (['"""q_corp_sys_r_in_"""'], {}), "('q_corp_sys_r_in_')\n", (9700, 9720), True, 'import sympy as sp\n'), ((9748, 9774), 'sympy.Symbol', 'sp.Symbol', (['"""q_corp_sys_r_"""'], {}), "('q_corp_sys_r_')\n", (9757, 9774), True, 'import sympy as sp\n'), ((9802, 9828), 'sympy.Symbol', 'sp.Symbol', (['"""p_cord_sys_r_"""'], {}), "('p_cord_sys_r_')\n", (9811, 9828), True, 'import sympy as sp\n'), ((9856, 9882), 'sympy.Symbol', 'sp.Symbol', (['"""q_cord_sys_r_"""'], {}), "('q_cord_sys_r_')\n", (9865, 9882), True, 'import sympy as sp\n'), ((15260, 15278), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (15276, 15278), False, 'import sys\n')]
|
import genism
from genism.models.doc2vec import Doc2Vec , TaggedDocument
from sklearn.metrics.pairwise import cosine_similarity
f= open('dataset.txt','r')
print(f.read)
corpus = [
"This is first Sentence",
"This is second Sentence",
"This is third Sentence",
"This is fourth Sentence",
"This is fifth Sentence",
]
documents = [TaggedDocument(doc,[i]) for i, doc in enumerate(corpus)]
model = Doc2Vec(documents , vector_size = 10 , window = 2 , min_count = 1 , workers =4)
model.save('sentenceEmbedderModel.pkl')
print('Model Creation Successful.' + repr(model))
vector = model.infer_vector(['this is not a sentence'])
vector_2 = model.infer_vector(['this is not a first sentence'])
vector_3 = model.infer_vector(['this is not a sentence'])
print("vector is " + repr(vector))
print("1 vs 2 " + repr(cosine_similarity([vector],[vector_2])))
print("1 vs 3 " + repr(cosine_similarity([vector],[vector_3])))
|
[
"genism.models.doc2vec.Doc2Vec",
"sklearn.metrics.pairwise.cosine_similarity",
"genism.models.doc2vec.TaggedDocument"
] |
[((431, 499), 'genism.models.doc2vec.Doc2Vec', 'Doc2Vec', (['documents'], {'vector_size': '(10)', 'window': '(2)', 'min_count': '(1)', 'workers': '(4)'}), '(documents, vector_size=10, window=2, min_count=1, workers=4)\n', (438, 499), False, 'from genism.models.doc2vec import Doc2Vec, TaggedDocument\n'), ((365, 389), 'genism.models.doc2vec.TaggedDocument', 'TaggedDocument', (['doc', '[i]'], {}), '(doc, [i])\n', (379, 389), False, 'from genism.models.doc2vec import Doc2Vec, TaggedDocument\n'), ((850, 889), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['[vector]', '[vector_2]'], {}), '([vector], [vector_2])\n', (867, 889), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((915, 954), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['[vector]', '[vector_3]'], {}), '([vector], [vector_3])\n', (932, 954), False, 'from sklearn.metrics.pairwise import cosine_similarity\n')]
|
from helper import unittest, PillowTestCase, hopper
from PIL import Image, BmpImagePlugin
import io
class TestFileBmp(PillowTestCase):
def roundtrip(self, im):
outfile = self.tempfile("temp.bmp")
im.save(outfile, 'BMP')
reloaded = Image.open(outfile)
reloaded.load()
self.assertEqual(im.mode, reloaded.mode)
self.assertEqual(im.size, reloaded.size)
self.assertEqual(reloaded.format, "BMP")
def test_sanity(self):
self.roundtrip(hopper())
self.roundtrip(hopper("1"))
self.roundtrip(hopper("L"))
self.roundtrip(hopper("P"))
self.roundtrip(hopper("RGB"))
def test_invalid_file(self):
with open("Tests/images/flower.jpg", "rb") as fp:
self.assertRaises(SyntaxError,
BmpImagePlugin.BmpImageFile, fp)
def test_save_to_bytes(self):
output = io.BytesIO()
im = hopper()
im.save(output, "BMP")
output.seek(0)
reloaded = Image.open(output)
self.assertEqual(im.mode, reloaded.mode)
self.assertEqual(im.size, reloaded.size)
self.assertEqual(reloaded.format, "BMP")
def test_dpi(self):
dpi = (72, 72)
output = io.BytesIO()
im = hopper()
im.save(output, "BMP", dpi=dpi)
output.seek(0)
reloaded = Image.open(output)
self.assertEqual(reloaded.info["dpi"], dpi)
def test_save_bmp_with_dpi(self):
# Test for #1301
# Arrange
outfile = self.tempfile("temp.jpg")
im = Image.open("Tests/images/hopper.bmp")
# Act
im.save(outfile, 'JPEG', dpi=im.info['dpi'])
# Assert
reloaded = Image.open(outfile)
reloaded.load()
self.assertEqual(im.info['dpi'], reloaded.info['dpi'])
self.assertEqual(im.size, reloaded.size)
self.assertEqual(reloaded.format, "JPEG")
def test_load_dib(self):
# test for #1293, Imagegrab returning Unsupported Bitfields Format
im = BmpImagePlugin.DibImageFile('Tests/images/clipboard.dib')
target = Image.open('Tests/images/clipboard_target.png')
self.assert_image_equal(im, target)
if __name__ == '__main__':
unittest.main()
|
[
"io.BytesIO",
"helper.unittest.main",
"PIL.Image.open",
"PIL.BmpImagePlugin.DibImageFile",
"helper.hopper"
] |
[((2252, 2267), 'helper.unittest.main', 'unittest.main', ([], {}), '()\n', (2265, 2267), False, 'from helper import unittest, PillowTestCase, hopper\n'), ((265, 284), 'PIL.Image.open', 'Image.open', (['outfile'], {}), '(outfile)\n', (275, 284), False, 'from PIL import Image, BmpImagePlugin\n'), ((914, 926), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (924, 926), False, 'import io\n'), ((940, 948), 'helper.hopper', 'hopper', ([], {}), '()\n', (946, 948), False, 'from helper import unittest, PillowTestCase, hopper\n'), ((1023, 1041), 'PIL.Image.open', 'Image.open', (['output'], {}), '(output)\n', (1033, 1041), False, 'from PIL import Image, BmpImagePlugin\n'), ((1256, 1268), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1266, 1268), False, 'import io\n'), ((1282, 1290), 'helper.hopper', 'hopper', ([], {}), '()\n', (1288, 1290), False, 'from helper import unittest, PillowTestCase, hopper\n'), ((1374, 1392), 'PIL.Image.open', 'Image.open', (['output'], {}), '(output)\n', (1384, 1392), False, 'from PIL import Image, BmpImagePlugin\n'), ((1585, 1622), 'PIL.Image.open', 'Image.open', (['"""Tests/images/hopper.bmp"""'], {}), "('Tests/images/hopper.bmp')\n", (1595, 1622), False, 'from PIL import Image, BmpImagePlugin\n'), ((1728, 1747), 'PIL.Image.open', 'Image.open', (['outfile'], {}), '(outfile)\n', (1738, 1747), False, 'from PIL import Image, BmpImagePlugin\n'), ((2052, 2109), 'PIL.BmpImagePlugin.DibImageFile', 'BmpImagePlugin.DibImageFile', (['"""Tests/images/clipboard.dib"""'], {}), "('Tests/images/clipboard.dib')\n", (2079, 2109), False, 'from PIL import Image, BmpImagePlugin\n'), ((2127, 2174), 'PIL.Image.open', 'Image.open', (['"""Tests/images/clipboard_target.png"""'], {}), "('Tests/images/clipboard_target.png')\n", (2137, 2174), False, 'from PIL import Image, BmpImagePlugin\n'), ((507, 515), 'helper.hopper', 'hopper', ([], {}), '()\n', (513, 515), False, 'from helper import unittest, PillowTestCase, hopper\n'), ((541, 552), 'helper.hopper', 'hopper', (['"""1"""'], {}), "('1')\n", (547, 552), False, 'from helper import unittest, PillowTestCase, hopper\n'), ((577, 588), 'helper.hopper', 'hopper', (['"""L"""'], {}), "('L')\n", (583, 588), False, 'from helper import unittest, PillowTestCase, hopper\n'), ((613, 624), 'helper.hopper', 'hopper', (['"""P"""'], {}), "('P')\n", (619, 624), False, 'from helper import unittest, PillowTestCase, hopper\n'), ((649, 662), 'helper.hopper', 'hopper', (['"""RGB"""'], {}), "('RGB')\n", (655, 662), False, 'from helper import unittest, PillowTestCase, hopper\n')]
|
from keras.layers.convolutional import Convolution2D
from keras import backend as K
import tensorflow as tf
permutation = [[1, 0], [0, 0], [0, 1], [2, 0], [1, 1], [0, 2], [2, 1], [2, 2], [1, 2]]
def shift_rotate(w, shift=1):
shape = w.get_shape()
for i in range(shift):
w = tf.reshape(tf.gather_nd(w, permutation), shape)
return w
class Convolution2D_4(Convolution2D):
def call(self, x, mask=None):
w = self.W
w_rot = [w]
for i in range(3):
w = shift_rotate(w, shift=2)
w_rot.append(w)
outputs = tf.stack([K.conv2d(x, w_i, strides=self.subsample,
border_mode=self.border_mode,
dim_ordering=self.dim_ordering,
filter_shape=self.W_shape) for w_i in w_rot])
output = K.max(outputs, 0)
if self.bias:
if self.dim_ordering == 'th':
output += K.reshape(self.b, (1, self.nb_filter, 1, 1))
elif self.dim_ordering == 'tf':
output += K.reshape(self.b, (1, 1, 1, self.nb_filter))
else:
raise ValueError('Invalid dim_ordering:', self.dim_ordering)
output = self.activation(output)
return output
class Convolution2D_8(Convolution2D):
def call(self, x, mask=None):
w = self.W
w_rot = [w]
for i in range(7):
w = shift_rotate(w)
w_rot.append(w)
outputs = tf.stack([K.conv2d(x, w_i, strides=self.subsample,
border_mode=self.border_mode,
dim_ordering=self.dim_ordering,
filter_shape=self.W_shape) for w_i in w_rot])
output = K.max(outputs, 0)
if self.bias:
if self.dim_ordering == 'th':
output += K.reshape(self.b, (1, self.nb_filter, 1, 1))
elif self.dim_ordering == 'tf':
output += K.reshape(self.b, (1, 1, 1, self.nb_filter))
else:
raise ValueError('Invalid dim_ordering:', self.dim_ordering)
output = self.activation(output)
return output
|
[
"tensorflow.gather_nd",
"keras.backend.reshape",
"keras.backend.conv2d",
"keras.backend.max"
] |
[((871, 888), 'keras.backend.max', 'K.max', (['outputs', '(0)'], {}), '(outputs, 0)\n', (876, 888), True, 'from keras import backend as K\n'), ((1805, 1822), 'keras.backend.max', 'K.max', (['outputs', '(0)'], {}), '(outputs, 0)\n', (1810, 1822), True, 'from keras import backend as K\n'), ((305, 333), 'tensorflow.gather_nd', 'tf.gather_nd', (['w', 'permutation'], {}), '(w, permutation)\n', (317, 333), True, 'import tensorflow as tf\n'), ((593, 726), 'keras.backend.conv2d', 'K.conv2d', (['x', 'w_i'], {'strides': 'self.subsample', 'border_mode': 'self.border_mode', 'dim_ordering': 'self.dim_ordering', 'filter_shape': 'self.W_shape'}), '(x, w_i, strides=self.subsample, border_mode=self.border_mode,\n dim_ordering=self.dim_ordering, filter_shape=self.W_shape)\n', (601, 726), True, 'from keras import backend as K\n'), ((980, 1024), 'keras.backend.reshape', 'K.reshape', (['self.b', '(1, self.nb_filter, 1, 1)'], {}), '(self.b, (1, self.nb_filter, 1, 1))\n', (989, 1024), True, 'from keras import backend as K\n'), ((1527, 1660), 'keras.backend.conv2d', 'K.conv2d', (['x', 'w_i'], {'strides': 'self.subsample', 'border_mode': 'self.border_mode', 'dim_ordering': 'self.dim_ordering', 'filter_shape': 'self.W_shape'}), '(x, w_i, strides=self.subsample, border_mode=self.border_mode,\n dim_ordering=self.dim_ordering, filter_shape=self.W_shape)\n', (1535, 1660), True, 'from keras import backend as K\n'), ((1914, 1958), 'keras.backend.reshape', 'K.reshape', (['self.b', '(1, self.nb_filter, 1, 1)'], {}), '(self.b, (1, self.nb_filter, 1, 1))\n', (1923, 1958), True, 'from keras import backend as K\n'), ((1095, 1139), 'keras.backend.reshape', 'K.reshape', (['self.b', '(1, 1, 1, self.nb_filter)'], {}), '(self.b, (1, 1, 1, self.nb_filter))\n', (1104, 1139), True, 'from keras import backend as K\n'), ((2029, 2073), 'keras.backend.reshape', 'K.reshape', (['self.b', '(1, 1, 1, self.nb_filter)'], {}), '(self.b, (1, 1, 1, self.nb_filter))\n', (2038, 2073), True, 'from keras import backend as K\n')]
|
import kahoot
import threading
import utils
class Flooder:
def __init__(self, gamepin, botname, amount, delay, window):
self.gamepin = gamepin
self.botname = botname
self.amount = amount
self.delay = delay
self.window = window
self.suffix = 0
self.bot = kahoot.client()
def loop(self):
if self.suffix < int(self.amount):
self.suffix += 1
self.bot.join(int(self.gamepin), f"{self.botname} [{self.suffix}]")
self.bot.on("joined")
self.window.after(int(self.delay), self.loop())
def start(self):
notifier = utils.Notifier()
notifier.send("Kahoot Flooder", f"Starting flood with {self.amount} bots, GUI may hang.")
self.window.after(int(self.delay), self.loop())
|
[
"utils.Notifier",
"kahoot.client"
] |
[((316, 331), 'kahoot.client', 'kahoot.client', ([], {}), '()\n', (329, 331), False, 'import kahoot\n'), ((653, 669), 'utils.Notifier', 'utils.Notifier', ([], {}), '()\n', (667, 669), False, 'import utils\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json as jsn
import os
import sys
import unicodedata
from utils import six
def read_json(filename):
def _convert_from_unicode(data):
new_data = dict()
for name, value in six.iteritems(data):
if isinstance(name, six.string_types):
name = unicodedata.normalize('NFKD', name).encode(
'ascii', 'ignore')
if isinstance(value, six.string_types):
value = unicodedata.normalize('NFKD', value).encode(
'ascii', 'ignore')
if isinstance(value, dict):
value = _convert_from_unicode(value)
new_data[name] = value
return new_data
output_dict = None
with open(filename, "r") as f:
lines = f.readlines()
try:
output_dict = jsn.loads(''.join(lines), encoding='utf-8')
except:
raise ValueError('Could not read %s. %s' % (filename, sys.exc_info()[1]))
output_dict = _convert_from_unicode(output_dict)
return output_dict
def _replace_quotes(x):
return x.replace("\'", "\"")
def _parse_value(value):
if isinstance(value, tuple):
value = list(value)
if value is None:
return "null"
if isinstance(value, str):
if value.lower() == "none":
return "null"
if value.lower() == "false":
return "false"
if value.lower() == "true":
return "true"
value = value.replace("\'", "\"")
return "\"%s\"" % _replace_quotes(value)
if isinstance(value, bool):
return str(value).lower()
if isinstance(value, list):
result = "["
for i, item in enumerate(value):
result += _parse_value(item)
if i < len(value) - 1:
result += ", "
result += "]"
return result
if isinstance(value, dict):
result = "{"
item_iterator = six.itersorteditems(value)
for i, (dict_key, dict_value) in enumerate(item_iterator):
result += "\"%s\": %s" % (dict_key, _parse_value(dict_value))
if i < len(value) - 1:
result += ", "
result += "}"
return result
return "%s" % _replace_quotes(str(value))
# ----------------------------------------------------------------------------
# Writes all pairs to a filename for book keeping
# Either .txt or .json
# ----------------------------------------------------------------------------
def write_dictionary_to_file(input_dict, filename, sortkeys=False):
# ensure dir
d = os.path.dirname(filename)
if not os.path.exists(d):
os.makedirs(d)
item_iterator = six.itersorteditems(input_dict) if sortkeys else six.iteritems(input_dict)
# check for json extension
ext = os.path.splitext(filename)[1]
if ext == ".json":
with open(filename, 'w') as file:
file.write("{\n")
for i, (key, value) in enumerate(item_iterator):
file.write(" \"%s\": %s" % (key, _parse_value(value)))
if i < len(input_dict) - 1:
file.write(',\n')
else:
file.write('\n')
file.write("}\n")
else:
with open(filename, 'w') as file:
for key, value in item_iterator:
file.write('%s: %s\n' % (key, value))
|
[
"unicodedata.normalize",
"utils.six.iteritems",
"os.makedirs",
"os.path.dirname",
"os.path.exists",
"utils.six.itersorteditems",
"os.path.splitext",
"sys.exc_info"
] |
[((2698, 2723), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (2713, 2723), False, 'import os\n'), ((309, 328), 'utils.six.iteritems', 'six.iteritems', (['data'], {}), '(data)\n', (322, 328), False, 'from utils import six\n'), ((2047, 2073), 'utils.six.itersorteditems', 'six.itersorteditems', (['value'], {}), '(value)\n', (2066, 2073), False, 'from utils import six\n'), ((2735, 2752), 'os.path.exists', 'os.path.exists', (['d'], {}), '(d)\n', (2749, 2752), False, 'import os\n'), ((2762, 2776), 'os.makedirs', 'os.makedirs', (['d'], {}), '(d)\n', (2773, 2776), False, 'import os\n'), ((2798, 2829), 'utils.six.itersorteditems', 'six.itersorteditems', (['input_dict'], {}), '(input_dict)\n', (2817, 2829), False, 'from utils import six\n'), ((2847, 2872), 'utils.six.iteritems', 'six.iteritems', (['input_dict'], {}), '(input_dict)\n', (2860, 2872), False, 'from utils import six\n'), ((2915, 2941), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (2931, 2941), False, 'import os\n'), ((404, 439), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFKD"""', 'name'], {}), "('NFKD', name)\n", (425, 439), False, 'import unicodedata\n'), ((563, 599), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFKD"""', 'value'], {}), "('NFKD', value)\n", (584, 599), False, 'import unicodedata\n'), ((1053, 1067), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1065, 1067), False, 'import sys\n')]
|
import json
import numpy as np
import os
import pkg_resources
import re
from typing import Any, AnyStr, Dict, List, Optional, Tuple
def load_peripheral(pdata, templates=None):
"""Load a peripheral from a dict
This loads a peripheral with support for templates, as used in the board
definition file format
Args:
pdata: A dict containing the peripheral definition
templates: A dict mapping types to template definitions
"""
if not 'type' in pdata:
raise ValueError("Peripheral definition requires a type field")
template = None
if templates is not None and pdata['type'] in templates:
template = templates[pdata['type']]
periph = pdata
# Override electrodes with fields from template
def map_electrode(e):
eid = e['id']
if template is None:
return e
e_template = next((x for x in template['electrodes'] if x['id'] == eid), None)
if e_template is None:
return e
# Merge dicts, with values in e taking priority in case of duplicate keys
return {**e_template, **e}
periph['electrodes'] = [map_electrode(e) for e in periph['electrodes']]
return periph
class Fiducial(object):
"""Represents a fiducial location
"""
def __init__(self, corners: List[List[int]], label: str=""):
self.corners = corners
self.label = label
@staticmethod
def from_dict(data):
return Fiducial(**data)
def to_dict(self):
return {
'corners': self.corners,
'label': self.label
}
class ControlPoint(object):
"""Represents a control point in an image
A control point is a pair of corresponding points -- one in image coordinates
and the other in grid coordinates -- used to calibrate the position of
the electrode grid relative to fiducials.
"""
def __init__(self, grid_coord: Tuple[float, float], image_coord: Tuple[float, float]):
self.grid = grid_coord
self.image = image_coord
def from_dict(data):
if not 'grid' in data:
raise ValueError(f'A control point must have a `grid` and `image` attribute: {data}')
if not 'image' in data:
raise ValueError(f'A control point must have a `grid` and `image` attribute: {data}')
return ControlPoint(data['grid'], data['image'])
class Registration(object):
"""A registration is a collection of fiducials and control points which
together define relationship between the electrode locations and fiducials
"""
def __init__(self, data: dict):
if not 'fiducials' in data:
raise ValueError(f'A Registration requires a fiducials attribute, not found in: {data}')
if not 'control_points' in data:
raise ValueError(f'A Registration requires a control points attribute, not found in: {data}')
if not isinstance(data['fiducials'], list):
raise ValueError(f'A Registration `fiducial` attribute must be a list: {data}')
if not isinstance(data['control_points'], list):
raise ValueError(f'a Registration `control_points` attribute must be a list: {data}')
self.fiducials = [Fiducial.from_dict(f) for f in data['fiducials']]
self.control_points = [ControlPoint.from_dict(cp) for cp in data['control_points']]
class Layout(object):
"""Represents the 'layout' property of a baord definition
A layout defines the placement and pin mapping for the electrodes on the
board.
"""
def __init__(self, layout_def: Dict[str, Any]):
self.peripherals = None
self.grids = []
def intify_pins(grid_pins):
result = []
for row in grid_pins:
new_row: List[Optional[int]] = []
for pin in row:
if pin == -1 or pin is None:
new_row.append(None)
else:
new_row.append(int(pin))
result.append(new_row)
return result
# Old format files use 'grid' to define a single grid
# New format uses an array of objects, under the key 'grids'
if 'grid' in layout_def:
self.grids.append({
'origin': [0.0, 0.0],
'pitch': 1.0,
'pins': intify_pins(layout_def['grid'])
})
elif 'grids' in layout_def:
for g in layout_def['grids']:
self.grids.append({
'origin': g['origin'],
'pitch': g['pitch'],
'pins': intify_pins(g['pins']),
})
if 'peripherals' in layout_def:
self.peripherals = [load_peripheral(p, layout_def.get('peripheral_templates', None)) for p in layout_def['peripherals']]
def grid_location_to_pin(self, x: int, y: int, grid_number:int =0):
"""Return the pin number at given grid location, or None if no pin is
defined there.
"""
if grid_number < len(self.grids):
grid = self.grids[grid_number]['pins']
else:
grid = [[]] # Empty grid
if y < 0 or y >= len(grid):
return None
row = grid[y]
if x < 0 or x >= len(row):
return None
return grid[y][x]
def pin_to_grid_location(self, pin: int) -> Optional[Tuple[Tuple[int, int], int]]:
"""Return the grid location of a given pin number
"""
for g, grid in enumerate(self.grids):
for y, row in enumerate(grid['pins']):
for x, p in enumerate(row):
if p == pin:
return ((x, y), g)
return None
def pin_polygon(self, pin: int) -> Optional[List[Tuple[int, int]]]:
"""Get the polygon defining a pin in board coordinates
"""
# Try to find the pin in a grid
grid_info = self.pin_to_grid_location(pin)
if grid_info is not None:
loc, grid_idx = grid_info
square = np.array([[0., 0.], [0., 1.], [1., 1.], [1., 0.]])
grid = self.grids[grid_idx]
polygon = (square + loc) * grid['pitch'] + grid['origin']
return polygon.tolist()
# Try to find the pin in a peripheral
if self.peripherals is None:
return None
for periph in self.peripherals:
for el in periph['electrodes']:
if el['pin'] == pin:
polygon = np.array(el['polygon'])
rotation = np.deg2rad(periph.get('rotation', 0.0))
R = np.array([[np.cos(rotation), -np.sin(rotation)], [np.sin(rotation), np.cos(rotation)]])
polygon = np.dot(R, polygon.T).T
return (polygon + periph['origin']).tolist()
return None
def as_dict(self) -> dict:
"""Return a serializable dict version of the board definition
"""
return {
"grids": self.grids,
"peripherals": self.peripherals
}
class Board(object):
"""Represents the top-level object in an electrode board definition file
"""
def __init__(self, board_def: Dict[str, Any]):
self.registration: Optional[Registration] = None
if not 'layout' in board_def:
raise RuntimeError("Board definition file must contain a 'layout' object")
self.layout = Layout(board_def['layout'])
self.oversized_electrodes = board_def.get('oversized_electrodes', [])
if 'registration' in board_def:
self.registration = Registration(board_def['registration'])
@staticmethod
def load_from_file(filepath):
"""Create a Board from a board definition file
"""
with open(filepath, 'r') as f:
data = json.loads(f.read())
return Board(data)
@staticmethod
def load_from_string(data: AnyStr) -> 'Board':
"""Create a board from a JSON string in memory
"""
return Board(json.loads(data))
def as_dict(self) -> dict:
"""Return a serializable dict representation of the board
"""
return {
'layout': self.layout.as_dict(),
'oversized_electrodes': self.oversized_electrodes,
}
def list_boards():
"""Find all available board definitions.
Uses same search rules as load_board; see :func:`load_board`.
Returns:
A list of board names, which can be passed to `load_board`
"""
config_dir = os.path.expanduser("~/.config/purpledrop/boards")
package_files = pkg_resources.resource_listdir('purpledrop', 'boards')
if os.path.isdir(config_dir):
config_files = os.listdir(config_dir)
else:
config_files = []
board_names = []
def add_files(files):
for f in files:
print(f"Checking {f}")
match = re.match(r'(.+).json', os.path.basename(f))
if match:
board_names.append(match.group(1))
# Config files take priority, if there are any duplicates
add_files(package_files)
add_files(config_files)
return board_names
def load_board(name) -> Optional[Board]:
"""Load a board definition by name or path
Attempt to load a board definition from the name, using the following
priorities (the first to succeed is returned):
1. Load as a full path
2. Load from ~/.config/purpledrop/boards/{name}.json
3. Load from package resources (`purpledrop/boards` in repo)
"""
if os.path.isfile(name):
return Board.load_from_file(name)
home_path = os.path.expanduser(f"~/.config/purpledrop/boards/{name}.json")
if os.path.isfile(home_path):
return Board.load_from_file(home_path)
try:
resource_data = pkg_resources.resource_string('purpledrop', f"boards/{name}.json")
return Board.load_from_string(resource_data)
except FileNotFoundError:
pass
return None
|
[
"pkg_resources.resource_listdir",
"json.loads",
"os.path.basename",
"os.path.isdir",
"os.path.isfile",
"numpy.sin",
"numpy.array",
"pkg_resources.resource_string",
"numpy.cos",
"numpy.dot",
"os.path.expanduser",
"os.listdir"
] |
[((8557, 8606), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.config/purpledrop/boards"""'], {}), "('~/.config/purpledrop/boards')\n", (8575, 8606), False, 'import os\n'), ((8627, 8681), 'pkg_resources.resource_listdir', 'pkg_resources.resource_listdir', (['"""purpledrop"""', '"""boards"""'], {}), "('purpledrop', 'boards')\n", (8657, 8681), False, 'import pkg_resources\n'), ((8689, 8714), 'os.path.isdir', 'os.path.isdir', (['config_dir'], {}), '(config_dir)\n', (8702, 8714), False, 'import os\n'), ((9567, 9587), 'os.path.isfile', 'os.path.isfile', (['name'], {}), '(name)\n', (9581, 9587), False, 'import os\n'), ((9648, 9710), 'os.path.expanduser', 'os.path.expanduser', (['f"""~/.config/purpledrop/boards/{name}.json"""'], {}), "(f'~/.config/purpledrop/boards/{name}.json')\n", (9666, 9710), False, 'import os\n'), ((9718, 9743), 'os.path.isfile', 'os.path.isfile', (['home_path'], {}), '(home_path)\n', (9732, 9743), False, 'import os\n'), ((8739, 8761), 'os.listdir', 'os.listdir', (['config_dir'], {}), '(config_dir)\n', (8749, 8761), False, 'import os\n'), ((9826, 9892), 'pkg_resources.resource_string', 'pkg_resources.resource_string', (['"""purpledrop"""', 'f"""boards/{name}.json"""'], {}), "('purpledrop', f'boards/{name}.json')\n", (9855, 9892), False, 'import pkg_resources\n'), ((6070, 6128), 'numpy.array', 'np.array', (['[[0.0, 0.0], [0.0, 1.0], [1.0, 1.0], [1.0, 0.0]]'], {}), '([[0.0, 0.0], [0.0, 1.0], [1.0, 1.0], [1.0, 0.0]])\n', (6078, 6128), True, 'import numpy as np\n'), ((8056, 8072), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (8066, 8072), False, 'import json\n'), ((8948, 8967), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (8964, 8967), False, 'import os\n'), ((6525, 6548), 'numpy.array', 'np.array', (["el['polygon']"], {}), "(el['polygon'])\n", (6533, 6548), True, 'import numpy as np\n'), ((6762, 6782), 'numpy.dot', 'np.dot', (['R', 'polygon.T'], {}), '(R, polygon.T)\n', (6768, 6782), True, 'import numpy as np\n'), ((6655, 6671), 'numpy.cos', 'np.cos', (['rotation'], {}), '(rotation)\n', (6661, 6671), True, 'import numpy as np\n'), ((6694, 6710), 'numpy.sin', 'np.sin', (['rotation'], {}), '(rotation)\n', (6700, 6710), True, 'import numpy as np\n'), ((6712, 6728), 'numpy.cos', 'np.cos', (['rotation'], {}), '(rotation)\n', (6718, 6728), True, 'import numpy as np\n'), ((6674, 6690), 'numpy.sin', 'np.sin', (['rotation'], {}), '(rotation)\n', (6680, 6690), True, 'import numpy as np\n')]
|
from multiprocessing import Queue, Process
from threading import Thread
import numpy as np
import utils
from agent import PPOAgent
from policy import get_policy
from worker import Worker
import environments
class SimpleMaster:
def __init__(self, env_producer):
self.env_name = env_producer.get_env_name()
self.config = environments.get_config(self.env_name)
self.worker_size = self.config["worker_num"]
self.env_producer = env_producer
self.queues = []
self.w_in_queue = Queue()
self.init_workers()
self.session = None
self.trainable_vars = None
self.accum_vars = None
self.p_opt_vars = None
self.v_opt_vars = None
self.assign_op = None
self.agent = None
self.saver = None
self.summary_writer = None
self.beta = 1
self.lr_multiplier = 1.0
self.iter_count = 1
self.variables_file_path = "models/%s/variables.txt" % self.env_name
self.model_path = "models/%s/model" % self.env_name
self.initialized = False
self.cur_step = -1
self.start()
def init_workers(self):
for i in range(self.worker_size):
q = Queue()
self.queues.append(q)
t = Process(target=make_worker, args=(self.env_producer, i, q, self.w_in_queue))
t.start()
def start(self):
import tensorflow as tf
env_opts = environments.get_env_options(self.env_name, self.env_producer.get_use_gpu())
self.summary_writer = tf.summary.FileWriter("logs/%s" % self.env_name)
self.session = utils.create_session(env_opts, True)
with tf.variable_scope("master-0"):
pol = get_policy(env_opts, self.session)
self.agent = PPOAgent(pol, self.session, "master-0", env_opts)
self.trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "master-0")
self.accum_vars = [tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in
self.trainable_vars]
p_vars = self.agent.p_opt.variables()
v_vars = self.agent.v_opt.variables()
self.p_opt_vars = [tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in p_vars]
self.v_opt_vars = [tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in v_vars]
p_assign_ops = [p_vars[i].assign(self.p_opt_vars[i]) for i in range(len(p_vars))]
v_assign_ops = [v_vars[i].assign(self.v_opt_vars[i]) for i in range(len(v_vars))]
assign_ops = [self.trainable_vars[i].assign(self.accum_vars[i]) for i in
range(len(self.trainable_vars))]
self.assign_op = tf.group(assign_ops + p_assign_ops + v_assign_ops)
self.restore_variables()
self.saver = tf.train.Saver(max_to_keep=1)
self.session.run(tf.global_variables_initializer())
try:
self.saver = tf.train.import_meta_graph(
tf.train.latest_checkpoint("models/%s/" % env_opts["env_name"]) + ".meta")
self.saver.restore(self.session,
tf.train.latest_checkpoint("models/%s/" % env_opts["env_name"]))
except:
print("failed to restore model")
while True:
if self.iter_count % 10 == 0:
print("Saving model...")
self.save_variables()
self.saver.save(self.session, self.model_path, self.iter_count)
print("Model saved")
self.broadcast_weights()
self.merge_weights()
self.iter_count += 1
def restore_variables(self):
try:
lines = open(self.variables_file_path).readlines()
result = {}
for l in lines:
a, b = l.split("=")
b = b.strip()
result[a] = b
self.iter_count = int(result["global_step"]) + 1
self.beta = float(result["beta"])
self.lr_multiplier = float(result["lr_multiplier"])
except:
print("failed to restore variables")
def save_variables(self):
f = open(self.variables_file_path, "w")
lines = []
lines.append("global_step=%s\n" % self.iter_count)
lines.append("beta=%s\n" % self.beta)
lines.append("lr_multiplier=%s\n" % self.lr_multiplier)
f.writelines(lines)
f.close()
def broadcast_weights(self):
weights, p_opt_weights, v_opt_weights = self.session.run([self.trainable_vars,
self.agent.p_opt.variables(),
self.agent.v_opt.variables()])
arr = [self.beta, self.lr_multiplier, p_opt_weights, v_opt_weights, weights]
for q in self.queues:
q.put(arr)
def merge_weights(self):
results = []
for i in range(self.worker_size):
results.append(self.w_in_queue.get())
self.beta = np.mean([x[0] for x in results])
self.lr_multiplier = np.mean([x[1] for x in results])
p_opt_weights = self.make_means([x[2] for x in results])
v_opt_weights = self.make_means([x[3] for x in results])
weights = self.make_means([x[4] for x in results])
first_worker = [x for x in results if x[5]["idx"] == 0][0]
self.record_stats(first_worker[5])
fd = {}
for i, t in enumerate(self.accum_vars):
fd[t] = weights[i]
for i, t in enumerate(self.p_opt_vars):
fd[t] = p_opt_weights[i]
for i, t in enumerate(self.v_opt_vars):
fd[t] = v_opt_weights[i]
self.session.run(self.assign_op, feed_dict=fd)
def make_means(self, weights):
result = []
for i in range(len(weights[0])):
acc = []
for j in range(len(weights)):
acc.append(weights[j][i])
acc = np.mean(acc, axis=0)
result.append(acc)
return result
def record_stats(self, stats):
if self.cur_step == stats["step"]:
return
self.cur_step = stats["step"]
self.record_losses(stats["kl"], stats["entropy"], stats["hinge"], stats["src_policy_loss"],
stats["vloss"], stats["ploss"], stats["step"])
cum_rew = 0
for s in stats["stats"]:
self.log_summary(s["reward"], s["step"], s["a_probs"], s["picked_a"], s["a_dim"], s["discrete"])
cum_rew += s["reward"]
cum_rew /= max(1, len(stats["stats"]))
print("Average reward: %s" % cum_rew)
def record_losses(self, cur_kl, entropy, hinge, src_policy_loss, vloss, ploss, step):
import tensorflow as tf
summary = tf.Summary()
summary.value.add(tag='Losses/value_loss', simple_value=vloss)
summary.value.add(tag='Losses/policy_loss', simple_value=ploss)
summary.value.add(tag='Losses/kl_divergence', simple_value=cur_kl)
summary.value.add(tag='Losses/entropy', simple_value=entropy)
summary.value.add(tag='Losses/src_policy_loss', simple_value=src_policy_loss)
summary.value.add(tag='Losses/hinge', simple_value=hinge)
summary.value.add(tag='Vars/beta', simple_value=self.beta)
summary.value.add(tag='Vars/lr_multiplier', simple_value=self.lr_multiplier)
self.summary_writer.add_summary(summary, step)
self.summary_writer.flush()
def log_summary(self, reward, step, a_probs, picked_a, a_dim, discrete):
import tensorflow as tf
summary = tf.Summary()
summary.value.add(tag='Reward/per_episode', simple_value=float(reward))
if not discrete:
for i in range(a_dim):
prefix = "Action" + str(i)
summary.value.add(tag=prefix + '/mean', simple_value=float(a_probs[i]))
summary.value.add(tag=prefix + "/std", simple_value=float(a_probs[i + a_dim]))
summary.value.add(tag=prefix + '/picked', simple_value=float(picked_a[i]))
else:
for i in range(a_dim):
prefix = "Action" + str(i)
summary.value.add(tag=prefix + '/prob', simple_value=float(a_probs[i]))
summary.value.add(tag='Action/picked', simple_value=float(picked_a))
self.summary_writer.add_summary(summary, step)
self.summary_writer.flush()
def make_worker(env_producer, i, q, w_in_queue):
return Worker(env_producer, i, q, w_in_queue)
|
[
"policy.get_policy",
"environments.get_config",
"utils.create_session",
"tensorflow.train.Saver",
"tensorflow.Summary",
"tensorflow.get_collection",
"tensorflow.global_variables_initializer",
"worker.Worker",
"tensorflow.variable_scope",
"tensorflow.summary.FileWriter",
"numpy.mean",
"agent.PPOAgent",
"multiprocessing.Queue",
"tensorflow.group",
"tensorflow.train.latest_checkpoint",
"multiprocessing.Process"
] |
[((8593, 8631), 'worker.Worker', 'Worker', (['env_producer', 'i', 'q', 'w_in_queue'], {}), '(env_producer, i, q, w_in_queue)\n', (8599, 8631), False, 'from worker import Worker\n'), ((343, 381), 'environments.get_config', 'environments.get_config', (['self.env_name'], {}), '(self.env_name)\n', (366, 381), False, 'import environments\n'), ((527, 534), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (532, 534), False, 'from multiprocessing import Queue, Process\n'), ((1561, 1609), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["('logs/%s' % self.env_name)"], {}), "('logs/%s' % self.env_name)\n", (1582, 1609), True, 'import tensorflow as tf\n'), ((1633, 1669), 'utils.create_session', 'utils.create_session', (['env_opts', '(True)'], {}), '(env_opts, True)\n', (1653, 1669), False, 'import utils\n'), ((2903, 2932), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(1)'}), '(max_to_keep=1)\n', (2917, 2932), True, 'import tensorflow as tf\n'), ((5136, 5168), 'numpy.mean', 'np.mean', (['[x[0] for x in results]'], {}), '([x[0] for x in results])\n', (5143, 5168), True, 'import numpy as np\n'), ((5198, 5230), 'numpy.mean', 'np.mean', (['[x[1] for x in results]'], {}), '([x[1] for x in results])\n', (5205, 5230), True, 'import numpy as np\n'), ((6885, 6897), 'tensorflow.Summary', 'tf.Summary', ([], {}), '()\n', (6895, 6897), True, 'import tensorflow as tf\n'), ((7709, 7721), 'tensorflow.Summary', 'tf.Summary', ([], {}), '()\n', (7719, 7721), True, 'import tensorflow as tf\n'), ((1224, 1231), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (1229, 1231), False, 'from multiprocessing import Queue, Process\n'), ((1282, 1358), 'multiprocessing.Process', 'Process', ([], {'target': 'make_worker', 'args': '(self.env_producer, i, q, self.w_in_queue)'}), '(target=make_worker, args=(self.env_producer, i, q, self.w_in_queue))\n', (1289, 1358), False, 'from multiprocessing import Queue, Process\n'), ((1683, 1712), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""master-0"""'], {}), "('master-0')\n", (1700, 1712), True, 'import tensorflow as tf\n'), ((1732, 1766), 'policy.get_policy', 'get_policy', (['env_opts', 'self.session'], {}), '(env_opts, self.session)\n', (1742, 1766), False, 'from policy import get_policy\n'), ((1792, 1841), 'agent.PPOAgent', 'PPOAgent', (['pol', 'self.session', '"""master-0"""', 'env_opts'], {}), "(pol, self.session, 'master-0', env_opts)\n", (1800, 1841), False, 'from agent import PPOAgent\n'), ((1876, 1939), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES', '"""master-0"""'], {}), "(tf.GraphKeys.TRAINABLE_VARIABLES, 'master-0')\n", (1893, 1939), True, 'import tensorflow as tf\n'), ((2797, 2847), 'tensorflow.group', 'tf.group', (['(assign_ops + p_assign_ops + v_assign_ops)'], {}), '(assign_ops + p_assign_ops + v_assign_ops)\n', (2805, 2847), True, 'import tensorflow as tf\n'), ((2958, 2991), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2989, 2991), True, 'import tensorflow as tf\n'), ((6070, 6090), 'numpy.mean', 'np.mean', (['acc'], {'axis': '(0)'}), '(acc, axis=0)\n', (6077, 6090), True, 'import numpy as np\n'), ((3226, 3289), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (["('models/%s/' % env_opts['env_name'])"], {}), "('models/%s/' % env_opts['env_name'])\n", (3252, 3289), True, 'import tensorflow as tf\n'), ((3075, 3138), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (["('models/%s/' % env_opts['env_name'])"], {}), "('models/%s/' % env_opts['env_name'])\n", (3101, 3138), True, 'import tensorflow as tf\n')]
|
import os
from flask import abort
from flask import request
from flask import send_from_directory
from app import app
from app.main.RequestParameters import RequestParameters
from app.main.Session import Session
from app.main.Session import Status
from app.main.exceptions.exceptions import InvalidSessionIdError
API_PATH_PREFIX = '/api'
def _get_session(session_id):
try:
return Session(session_id)
except InvalidSessionIdError:
abort(404)
def _get_status_url(session_id):
return API_PATH_PREFIX + '/status/' + session_id
def _get_result_file_url(session_id):
return API_PATH_PREFIX + '/results/' + session_id
@app.route(API_PATH_PREFIX + '/process', methods=['POST'])
def new_request():
try:
file = request.files['file']
if not file:
abort(400)
# TODO Validate file
session = Session()
session.store_input_file(file)
request_parameters = RequestParameters.parse(request.form)
session.start_processing(request_parameters)
return {
'status_url': _get_status_url(session.session_id),
}
except ValueError:
abort(400)
@app.route(API_PATH_PREFIX + '/health')
def health():
return 'health'
@app.route(API_PATH_PREFIX + '/status/<path:session_id>')
def check_status(session_id):
session = _get_session(session_id)
status_result = {
'status': 'in_progress',
}
status = session.get_status()
if status == Status.SUCCESS:
status_result['status'] = 'done'
elif status == Status.FAILURE:
status_result['status'] = 'error'
is_done = session.get_status() == Status.SUCCESS
if is_done:
status_result['result_url'] = _get_result_file_url(session_id)
return status_result
@app.route(API_PATH_PREFIX + '/results/<path:session_id>')
def results(session_id):
session = _get_session(session_id)
return send_from_directory(
os.path.join('..', session.get_session_directory()),
session.get_result_file_name(),
)
|
[
"app.app.route",
"app.main.Session.Session",
"flask.abort",
"app.main.RequestParameters.RequestParameters.parse"
] |
[((655, 712), 'app.app.route', 'app.route', (["(API_PATH_PREFIX + '/process')"], {'methods': "['POST']"}), "(API_PATH_PREFIX + '/process', methods=['POST'])\n", (664, 712), False, 'from app import app\n'), ((1175, 1213), 'app.app.route', 'app.route', (["(API_PATH_PREFIX + '/health')"], {}), "(API_PATH_PREFIX + '/health')\n", (1184, 1213), False, 'from app import app\n'), ((1251, 1307), 'app.app.route', 'app.route', (["(API_PATH_PREFIX + '/status/<path:session_id>')"], {}), "(API_PATH_PREFIX + '/status/<path:session_id>')\n", (1260, 1307), False, 'from app import app\n'), ((1793, 1850), 'app.app.route', 'app.route', (["(API_PATH_PREFIX + '/results/<path:session_id>')"], {}), "(API_PATH_PREFIX + '/results/<path:session_id>')\n", (1802, 1850), False, 'from app import app\n'), ((397, 416), 'app.main.Session.Session', 'Session', (['session_id'], {}), '(session_id)\n', (404, 416), False, 'from app.main.Session import Session\n'), ((870, 879), 'app.main.Session.Session', 'Session', ([], {}), '()\n', (877, 879), False, 'from app.main.Session import Session\n'), ((948, 985), 'app.main.RequestParameters.RequestParameters.parse', 'RequestParameters.parse', (['request.form'], {}), '(request.form)\n', (971, 985), False, 'from app.main.RequestParameters import RequestParameters\n'), ((459, 469), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (464, 469), False, 'from flask import abort\n'), ((811, 821), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (816, 821), False, 'from flask import abort\n'), ((1161, 1171), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (1166, 1171), False, 'from flask import abort\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the Windows Event Log message resource extractor class."""
import unittest
from dfvfs.helpers import fake_file_system_builder
from dfvfs.helpers import windows_path_resolver
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from winevtrc import extractor
from winevtrc import resources
from tests import test_lib as shared_test_lib
class TestOutputWriter(object):
"""Class that defines a test output writer.
Attributes:
event_log_providers (list[EventLogProvider]): event log providers.
message_files (list[MessageFile]): message files.
"""
def __init__(self):
"""Initializes the test output writer."""
super(TestOutputWriter, self).__init__()
self.event_log_providers = []
self.message_files = []
def Close(self):
"""Closes the output writer."""
return
def Open(self):
"""Opens the output writer.
Returns:
bool: True if successful or False if not.
"""
return True
def WriteEventLogProvider(self, event_log_provider):
"""Writes the Event Log provider.
Args:
event_log_provider (EventLogProvider): event log provider.
"""
self.event_log_providers.append(event_log_provider)
# pylint: disable=unused-argument
def WriteMessageFile(
self, event_log_provider, message_resource_file, message_filename,
message_file_type):
"""Writes the Windows Message Resource file.
Args:
event_log_provider (EventLogProvider): event log provider.
message_resource_file (MessageResourceFile): message resource file.
message_filename (str): message filename.
message_file_type (str): message file type.
"""
self.message_files.append(message_resource_file)
class EventMessageStringRegistryFileReaderTest(shared_test_lib.BaseTestCase):
"""Tests for the Windows Registry file reader."""
def testOpen(self):
"""Tests the Open function."""
volume_scanner = extractor.EventMessageStringExtractor()
file_reader = extractor.EventMessageStringRegistryFileReader(
volume_scanner)
test_file_path = self._GetTestFilePath(['SOFTWARE'])
# TODO: implement tests.
# file_reader.Open(test_file_path)
# file_reader.Open('bogus')
_ = file_reader
_ = test_file_path
@shared_test_lib.skipUnlessHasTestFile(['SOFTWARE'])
@shared_test_lib.skipUnlessHasTestFile(['SYSTEM'])
class EventMessageStringExtractorTest(shared_test_lib.BaseTestCase):
"""Tests for the Windows Event Log message resource extractor."""
# pylint: disable=protected-access
def _CreateTestEventMessageStringExtractor(self):
"""Creates an event message string extractor for testing.
Returns:
EventMessageStringExtractor: an event message string extractor.
"""
file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()
test_file_path = self._GetTestFilePath(['SOFTWARE'])
file_system_builder.AddFileReadData(
'/Windows/System32/config/SOFTWARE', test_file_path)
test_file_path = self._GetTestFilePath(['SYSTEM'])
file_system_builder.AddFileReadData(
'/Windows/System32/config/SYSTEM', test_file_path)
mount_point = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_FAKE, location='/')
extractor_object = extractor.EventMessageStringExtractor()
extractor_object._file_system = file_system_builder.file_system
extractor_object._path_resolver = (
windows_path_resolver.WindowsPathResolver(
file_system_builder.file_system, mount_point))
extractor_object._windows_directory = 'C:\\Windows'
extractor_object._path_resolver.SetEnvironmentVariable(
'SystemRoot', extractor_object._windows_directory)
extractor_object._path_resolver.SetEnvironmentVariable(
'WinDir', extractor_object._windows_directory)
return extractor_object
def testWindowsVersionProperty(self):
"""Tests the windows_version property."""
extractor_object = self._CreateTestEventMessageStringExtractor()
windows_version = extractor_object.windows_version
# TODO: improve test.
self.assertIsNone(windows_version)
def testCollectEventLogTypes(self):
"""Tests the _CollectEventLogTypes function."""
extractor_object = self._CreateTestEventMessageStringExtractor()
event_log_types = extractor_object._CollectEventLogTypes()
self.assertEqual(len(event_log_types), 3)
self.assertEqual(len(event_log_types['Application']), 65)
self.assertEqual(len(event_log_types['Security']), 7)
self.assertEqual(len(event_log_types['System']), 186)
# TODO: hide duplication warnings.
event_log_types = extractor_object._CollectEventLogTypes(
all_control_sets=True)
self.assertEqual(len(event_log_types), 3)
self.assertEqual(len(event_log_types['Application']), 65)
self.assertEqual(len(event_log_types['Security']), 7)
self.assertEqual(len(event_log_types['System']), 186)
def testCollectEventLogProvidersFromKey(self):
"""Tests the _CollectEventLogProvidersFromKey function."""
extractor_object = self._CreateTestEventMessageStringExtractor()
generator = extractor_object._CollectEventLogProvidersFromKey(None)
# TODO: fix generator method.
self.assertIsNotNone(generator)
def testExtractMessageFile(self):
"""Tests the _ExtractMessageFile function."""
extractor_object = self._CreateTestEventMessageStringExtractor()
# TODO: improve test.
output_writer = TestOutputWriter()
processed_message_filenames = []
event_log_provider = resources.EventLogProvider(
'log_type', 'log_source', 'provider_guid')
message_filename = ''
message_file_type = ''
extractor_object._ExtractMessageFile(
output_writer, processed_message_filenames, event_log_provider,
message_filename, message_file_type)
self.assertEqual(len(output_writer.event_log_providers), 0)
self.assertEqual(len(output_writer.message_files), 0)
def testGetEventLogProviders(self):
"""Tests the _GetEventLogProviders function."""
extractor_object = self._CreateTestEventMessageStringExtractor()
event_log_providers = list(extractor_object._GetEventLogProviders())
self.assertEqual(len(event_log_providers), 258)
event_log_providers = list(
extractor_object._GetEventLogProviders(all_control_sets=True))
self.assertEqual(len(event_log_providers), 516)
def testGetSystemRoot(self):
"""Tests the _GetSystemRoot function."""
extractor_object = self._CreateTestEventMessageStringExtractor()
system_root = extractor_object._GetSystemRoot()
self.assertEqual(system_root, 'C:\\WINDOWS')
def testGetWindowsVersion(self):
"""Tests the _GetWindowsVersion function."""
extractor_object = self._CreateTestEventMessageStringExtractor()
windows_version = extractor_object._GetWindowsVersion()
# TODO: improve test.
self.assertIsNone(windows_version)
def testOpenMessageResourceFile(self):
"""Tests the _OpenMessageResourceFile function."""
extractor_object = self._CreateTestEventMessageStringExtractor()
# TODO: improve test.
message_resource_file = extractor_object._OpenMessageResourceFile(
'C:\\Windows\\System32\\wrc_test.dll')
self.assertIsNone(message_resource_file)
# TODO: test _OpenMessageResourceFileByPathSpec
def testExtractEventLogMessageStrings(self):
"""Tests the ExtractEventLogMessageStrings function."""
extractor_object = self._CreateTestEventMessageStringExtractor()
output_writer = TestOutputWriter()
extractor_object.ExtractEventLogMessageStrings(output_writer)
self.assertEqual(len(output_writer.event_log_providers), 258)
self.assertEqual(len(output_writer.message_files), 0)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"dfvfs.helpers.windows_path_resolver.WindowsPathResolver",
"winevtrc.resources.EventLogProvider",
"dfvfs.helpers.fake_file_system_builder.FakeFileSystemBuilder",
"winevtrc.extractor.EventMessageStringRegistryFileReader",
"tests.test_lib.skipUnlessHasTestFile",
"dfvfs.path.factory.Factory.NewPathSpec",
"winevtrc.extractor.EventMessageStringExtractor"
] |
[((2358, 2409), 'tests.test_lib.skipUnlessHasTestFile', 'shared_test_lib.skipUnlessHasTestFile', (["['SOFTWARE']"], {}), "(['SOFTWARE'])\n", (2395, 2409), True, 'from tests import test_lib as shared_test_lib\n'), ((2411, 2460), 'tests.test_lib.skipUnlessHasTestFile', 'shared_test_lib.skipUnlessHasTestFile', (["['SYSTEM']"], {}), "(['SYSTEM'])\n", (2448, 2460), True, 'from tests import test_lib as shared_test_lib\n'), ((7880, 7895), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7893, 7895), False, 'import unittest\n'), ((2021, 2060), 'winevtrc.extractor.EventMessageStringExtractor', 'extractor.EventMessageStringExtractor', ([], {}), '()\n', (2058, 2060), False, 'from winevtrc import extractor\n'), ((2080, 2142), 'winevtrc.extractor.EventMessageStringRegistryFileReader', 'extractor.EventMessageStringRegistryFileReader', (['volume_scanner'], {}), '(volume_scanner)\n', (2126, 2142), False, 'from winevtrc import extractor\n'), ((2869, 2917), 'dfvfs.helpers.fake_file_system_builder.FakeFileSystemBuilder', 'fake_file_system_builder.FakeFileSystemBuilder', ([], {}), '()\n', (2915, 2917), False, 'from dfvfs.helpers import fake_file_system_builder\n'), ((3253, 3347), 'dfvfs.path.factory.Factory.NewPathSpec', 'path_spec_factory.Factory.NewPathSpec', (['dfvfs_definitions.TYPE_INDICATOR_FAKE'], {'location': '"""/"""'}), "(dfvfs_definitions.TYPE_INDICATOR_FAKE,\n location='/')\n", (3290, 3347), True, 'from dfvfs.path import factory as path_spec_factory\n'), ((3377, 3416), 'winevtrc.extractor.EventMessageStringExtractor', 'extractor.EventMessageStringExtractor', ([], {}), '()\n', (3414, 3416), False, 'from winevtrc import extractor\n'), ((3534, 3625), 'dfvfs.helpers.windows_path_resolver.WindowsPathResolver', 'windows_path_resolver.WindowsPathResolver', (['file_system_builder.file_system', 'mount_point'], {}), '(file_system_builder.file_system,\n mount_point)\n', (3575, 3625), False, 'from dfvfs.helpers import windows_path_resolver\n'), ((5647, 5716), 'winevtrc.resources.EventLogProvider', 'resources.EventLogProvider', (['"""log_type"""', '"""log_source"""', '"""provider_guid"""'], {}), "('log_type', 'log_source', 'provider_guid')\n", (5673, 5716), False, 'from winevtrc import resources\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import rospkg
import threading
import yaml
from copy import deepcopy
import message_filters
import numpy as np
import pyrobot.utils.util as prutil
import rospy
from pyrobot.core import Camera
from sensor_msgs.msg import CameraInfo
from sensor_msgs.msg import Image
from std_msgs.msg import Float64
import sys
ros_path = '/opt/ros/kinetic/lib/python2.7/dist-packages'
if ros_path in sys.path:
sys.path.remove(ros_path)
import cv2
sys.path.append(ros_path)
from cv_bridge import CvBridge, CvBridgeError
class Kinect2Camera(Camera):
"""
This is camera class that interfaces with the KinectV2 camera
"""
def __init__(self, configs):
"""
Constructor of the KinectV2Camera class.
:param configs: Camera specific configuration object
:type configs: YACS CfgNode
"""
super(Kinect2Camera, self).__init__(configs=configs)
self.cv_bridge = CvBridge()
self.camera_info_lock = threading.RLock()
self.camera_img_lock = threading.RLock()
self.rgb_img = None
self.depth_img = None
self.camera_info = None
self.camera_P = None
rospy.Subscriber(self.configs.CAMERA.ROSTOPIC_CAMERA_INFO_STREAM,
CameraInfo,
self._camera_info_callback)
rgb_topic = self.configs.CAMERA.ROSTOPIC_CAMERA_RGB_STREAM
self.rgb_sub = message_filters.Subscriber(rgb_topic, Image)
depth_topic = self.configs.CAMERA.ROSTOPIC_CAMERA_DEPTH_STREAM
self.depth_sub = message_filters.Subscriber(depth_topic, Image)
img_subs = [self.rgb_sub, self.depth_sub]
self.sync = message_filters.ApproximateTimeSynchronizer(img_subs,
queue_size=10,
slop=0.2)
self.sync.registerCallback(self._sync_callback)
self.DepthMapFactor = float(self.configs.CAMERA.DEPTH_MAP_FACTOR)
self.intrinsic_mat = None
def _sync_callback(self, rgb, depth):
self.camera_img_lock.acquire()
try:
self.rgb_img = self.cv_bridge.imgmsg_to_cv2(rgb, "bgr8")
self.rgb_img = self.rgb_img[:, :, ::-1]
self.depth_img = self.cv_bridge.imgmsg_to_cv2(depth, "passthrough")
except CvBridgeError as e:
rospy.logerr(e)
self.camera_img_lock.release()
def _camera_info_callback(self, msg):
self.camera_info_lock.acquire()
self.camera_info = msg
self.camera_P = np.array(msg.P).reshape((3, 4))
self.camera_info_lock.release()
def get_rgb(self):
'''
This function returns the RGB image perceived by the camera.
:rtype: np.ndarray or None
'''
self.camera_img_lock.acquire()
rgb = deepcopy(self.rgb_img)
self.camera_img_lock.release()
return rgb
def get_depth(self):
'''
This function returns the depth image perceived by the camera.
:rtype: np.ndarray or None
'''
self.camera_img_lock.acquire()
depth = deepcopy(self.depth_img)
self.camera_img_lock.release()
return depth
def get_rgb_depth(self):
'''
This function returns both the RGB and depth
images perceived by the camera.
:rtype: np.ndarray or None
'''
self.camera_img_lock.acquire()
rgb = deepcopy(self.rgb_img)
depth = deepcopy(self.depth_img)
self.camera_img_lock.release()
return rgb, depth
def get_intrinsics(self):
"""
This function returns the camera intrinsics.
:rtype: np.ndarray
"""
if self.camera_P is None:
return self.camera_P
self.camera_info_lock.acquire()
P = deepcopy(self.camera_P)
self.camera_info_lock.release()
return P[:3, :3]
def get_current_pcd(self):
"""
Return the point cloud at current time step (one frame only)
:returns: tuple (pts, colors)
pts: point coordinates in camera frame (shape: :math:`[N, 3]`)
colors: rgb values for pts_in_cam (shape: :math:`[N, 3]`)
:rtype: tuple(np.ndarray, np.ndarray)
"""
rgb_im, depth_im = self.get_rgb_depth()
depth = depth_im.reshape(-1) / self.DepthMapFactor
rgb = rgb_im.reshape(-1, 3)
if self.intrinsic_mat is None:
self.intrinsic_mat = self.get_intrinsics()
self.intrinsic_mat_inv = np.linalg.inv(self.intrinsic_mat)
#TODO: image height --> rgb_im.shape[0] and width--> rgb_im.shape[1]
img_pixs = np.mgrid[0: rgb_im.shape[0]: 1,
0: rgb_im.shape[1]: 1]
img_pixs = img_pixs.reshape(2, -1)
img_pixs[[0, 1], :] = img_pixs[[1, 0], :]
self.uv_one = np.concatenate((img_pixs,
np.ones((1, img_pixs.shape[1]))))
self.uv_one_in_cam = np.dot(self.intrinsic_mat_inv, self.uv_one)
pts_in_cam = np.multiply(self.uv_one_in_cam, depth)
pts_in_cam = np.concatenate((pts_in_cam,
np.ones((1, pts_in_cam.shape[1]))),
axis=0)
pts = pts_in_cam[:3, :].T
return pts, rgb
def pix_to_3dpt(self, rs, cs, reduce = 'none', k=5):
"""
Get the 3D points of the pixels in RGB images.
:param rs: rows of interest in the RGB image.
It can be a list or 1D numpy array
which contains the row indices.
The default value is None,
which means all rows.
:param cs: columns of interest in the RGB image.
It can be a list or 1D numpy array
which contains the column indices.
The default value is None,
which means all columns.
:param reduce: whether to consider the depth at nearby pixels
'none': no neighbour consideration
'mean': depth based on the mean of kernel sized k centered at [rs,cs]
'max': depth based on the max of kernel sized k centered at [rs,cs]
'min': depth based on the min of kernel sized k centered at [rs,cs]
:param k: kernel size for reduce type['mean', 'max', 'min']
:type rs: list or np.ndarray
:type cs: list or np.ndarray
:type reduce: str
:tyep k: int
:returns: tuple (pts, colors)
pts: point coordinates in world frame
(shape: :math:`[N, 3]`)
colors: rgb values for pts_in_cam
(shape: :math:`[N, 3]`)
:rtype: tuple(np.ndarray, np.ndarray)
"""
assert isinstance(rs,
int) or isinstance(rs,
list) or isinstance(rs,
np.ndarray)
assert isinstance(cs,
int) or isinstance(cs,
list) or isinstance(cs,
np.ndarray)
if isinstance(rs, int):
rs = [rs]
if isinstance(cs, int):
cs = [cs]
if isinstance(rs, np.ndarray):
rs = rs.flatten()
if isinstance(cs, np.ndarray):
cs = cs.flatten()
rgb_im, depth_im = self.get_rgb_depth()
R,C,_ = rgb_im.shape
if reduce == 'none':
depth_im = depth_im[rs, cs]
elif reduce == 'mean':
depth_im = np.array([np.mean(depth_im[max(i-k,0):min(i+k,R), max(j-k,0):min(j+k,C)]) for i,j in zip(rs,cs)])
elif reduce == 'max':
depth_im = np.array([np.max(depth_im[max(i-k,0):min(i+k,R), max(j-k,0):min(j+k,C)]) for i,j in zip(rs,cs)])
elif reduce == 'min':
depth_im = np.array([np.min(depth_im[max(i-k,0):min(i+k,R), max(j-k,0):min(j+k,C)]) for i,j in zip(rs,cs)])
else:
raise ValueError('Invalid reduce name provided, only the following'
' are currently available: [{}, {}, {}, {}]'.format('none','mean', 'max', 'min'))
#depth_im = depth_im[rs, cs]
depth = depth_im.reshape(-1) / self.DepthMapFactor
img_pixs = np.stack((rs, cs)).reshape(2, -1)
img_pixs[[0, 1], :] = img_pixs[[1, 0], :]
uv_one = np.concatenate((img_pixs,
np.ones((1, img_pixs.shape[1]))))
if self.intrinsic_mat is None:
self.intrinsic_mat = self.get_intrinsics()
self.intrinsic_mat_inv = np.linalg.inv(self.intrinsic_mat)
uv_one_in_cam = np.dot(self.intrinsic_mat_inv, uv_one)
pts_in_cam = np.multiply(uv_one_in_cam, depth)
pts_in_cam = np.concatenate((pts_in_cam,
np.ones((1, pts_in_cam.shape[1]))),
axis=0)
pts = pts_in_cam[:3, :].T
colors = rgb_im[rs, cs].reshape(-1, 3)
return pts, colors
|
[
"sys.path.append",
"numpy.stack",
"cv_bridge.CvBridge",
"sys.path.remove",
"rospy.Subscriber",
"copy.deepcopy",
"numpy.multiply",
"rospy.logerr",
"threading.RLock",
"message_filters.ApproximateTimeSynchronizer",
"numpy.ones",
"numpy.linalg.inv",
"numpy.array",
"message_filters.Subscriber",
"numpy.dot"
] |
[((627, 652), 'sys.path.append', 'sys.path.append', (['ros_path'], {}), '(ros_path)\n', (642, 652), False, 'import sys\n'), ((586, 611), 'sys.path.remove', 'sys.path.remove', (['ros_path'], {}), '(ros_path)\n', (601, 611), False, 'import sys\n'), ((1101, 1111), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (1109, 1111), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((1144, 1161), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (1159, 1161), False, 'import threading\n'), ((1193, 1210), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (1208, 1210), False, 'import threading\n'), ((1338, 1447), 'rospy.Subscriber', 'rospy.Subscriber', (['self.configs.CAMERA.ROSTOPIC_CAMERA_INFO_STREAM', 'CameraInfo', 'self._camera_info_callback'], {}), '(self.configs.CAMERA.ROSTOPIC_CAMERA_INFO_STREAM,\n CameraInfo, self._camera_info_callback)\n', (1354, 1447), False, 'import rospy\n'), ((1585, 1629), 'message_filters.Subscriber', 'message_filters.Subscriber', (['rgb_topic', 'Image'], {}), '(rgb_topic, Image)\n', (1611, 1629), False, 'import message_filters\n'), ((1726, 1772), 'message_filters.Subscriber', 'message_filters.Subscriber', (['depth_topic', 'Image'], {}), '(depth_topic, Image)\n', (1752, 1772), False, 'import message_filters\n'), ((1843, 1921), 'message_filters.ApproximateTimeSynchronizer', 'message_filters.ApproximateTimeSynchronizer', (['img_subs'], {'queue_size': '(10)', 'slop': '(0.2)'}), '(img_subs, queue_size=10, slop=0.2)\n', (1886, 1921), False, 'import message_filters\n'), ((3035, 3057), 'copy.deepcopy', 'deepcopy', (['self.rgb_img'], {}), '(self.rgb_img)\n', (3043, 3057), False, 'from copy import deepcopy\n'), ((3327, 3351), 'copy.deepcopy', 'deepcopy', (['self.depth_img'], {}), '(self.depth_img)\n', (3335, 3351), False, 'from copy import deepcopy\n'), ((3647, 3669), 'copy.deepcopy', 'deepcopy', (['self.rgb_img'], {}), '(self.rgb_img)\n', (3655, 3669), False, 'from copy import deepcopy\n'), ((3686, 3710), 'copy.deepcopy', 'deepcopy', (['self.depth_img'], {}), '(self.depth_img)\n', (3694, 3710), False, 'from copy import deepcopy\n'), ((4030, 4053), 'copy.deepcopy', 'deepcopy', (['self.camera_P'], {}), '(self.camera_P)\n', (4038, 4053), False, 'from copy import deepcopy\n'), ((5306, 5344), 'numpy.multiply', 'np.multiply', (['self.uv_one_in_cam', 'depth'], {}), '(self.uv_one_in_cam, depth)\n', (5317, 5344), True, 'import numpy as np\n'), ((9059, 9097), 'numpy.dot', 'np.dot', (['self.intrinsic_mat_inv', 'uv_one'], {}), '(self.intrinsic_mat_inv, uv_one)\n', (9065, 9097), True, 'import numpy as np\n'), ((9119, 9152), 'numpy.multiply', 'np.multiply', (['uv_one_in_cam', 'depth'], {}), '(uv_one_in_cam, depth)\n', (9130, 9152), True, 'import numpy as np\n'), ((4759, 4792), 'numpy.linalg.inv', 'np.linalg.inv', (['self.intrinsic_mat'], {}), '(self.intrinsic_mat)\n', (4772, 4792), True, 'import numpy as np\n'), ((5232, 5275), 'numpy.dot', 'np.dot', (['self.intrinsic_mat_inv', 'self.uv_one'], {}), '(self.intrinsic_mat_inv, self.uv_one)\n', (5238, 5275), True, 'import numpy as np\n'), ((9001, 9034), 'numpy.linalg.inv', 'np.linalg.inv', (['self.intrinsic_mat'], {}), '(self.intrinsic_mat)\n', (9014, 9034), True, 'import numpy as np\n'), ((2565, 2580), 'rospy.logerr', 'rospy.logerr', (['e'], {}), '(e)\n', (2577, 2580), False, 'import rospy\n'), ((2758, 2773), 'numpy.array', 'np.array', (['msg.P'], {}), '(msg.P)\n', (2766, 2773), True, 'import numpy as np\n'), ((5431, 5464), 'numpy.ones', 'np.ones', (['(1, pts_in_cam.shape[1])'], {}), '((1, pts_in_cam.shape[1]))\n', (5438, 5464), True, 'import numpy as np\n'), ((8676, 8694), 'numpy.stack', 'np.stack', (['(rs, cs)'], {}), '((rs, cs))\n', (8684, 8694), True, 'import numpy as np\n'), ((8836, 8867), 'numpy.ones', 'np.ones', (['(1, img_pixs.shape[1])'], {}), '((1, img_pixs.shape[1]))\n', (8843, 8867), True, 'import numpy as np\n'), ((9239, 9272), 'numpy.ones', 'np.ones', (['(1, pts_in_cam.shape[1])'], {}), '((1, pts_in_cam.shape[1]))\n', (9246, 9272), True, 'import numpy as np\n'), ((5165, 5196), 'numpy.ones', 'np.ones', (['(1, img_pixs.shape[1])'], {}), '((1, img_pixs.shape[1]))\n', (5172, 5196), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
from typing import List, Union
from ..storage import History
from .util import to_lists_or_default
def plot_sample_numbers(
histories: Union[List, History],
labels: Union[List, str] = None,
rotation: int = 0,
title: str = "Total required samples",
size: tuple = None):
"""
Plot required numbers of samples over all iterations.
Parameters
----------
histories: Union[List, History]
The histories to plot from. History ids must be set correctly.
labels: Union[List ,str], optional
Labels corresponding to the histories. If None are provided,
indices are used as labels.
rotation: int, optional (default = 0)
Rotation to apply to the plot's x tick labels. For longer labels,
a tilting of 45 or even 90 can be preferable.
title: str, optional (default = "Total required samples")
Title for the plot.
size: tuple of float, optional
The size of the plot in inches.
Returns
-------
ax: Axis of the generated plot.
"""
# preprocess input
histories, labels = to_lists_or_default(histories, labels)
# create figure
fig, ax = plt.subplots()
n_run = len(histories)
# extract sample numbers
samples = []
for history in histories:
# note: the first entry corresponds to the calibration and should
# be included here to be fair against methods not requiring
# calibration
samples.append(np.array(history.get_all_populations()['samples']))
# create matrix
n_pop = max(len(sample) for sample in samples)
matrix = np.zeros((n_pop, n_run))
for i_sample, sample in enumerate(samples):
matrix[:len(sample), i_sample] = sample
# plot bars
for i_pop in range(n_pop):
ax.bar(x=np.arange(n_run),
height=matrix[i_pop, :],
bottom=np.sum(matrix[:i_pop, :], axis=0))
# add labels
ax.set_xticks(np.arange(n_run))
ax.set_xticklabels(labels, rotation=rotation)
ax.set_title(title)
ax.set_ylabel("Samples")
ax.set_xlabel("Run")
# set size
if size is not None:
fig.set_size_inches(size)
fig.tight_layout()
return ax
|
[
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.subplots",
"numpy.arange"
] |
[((1238, 1252), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1250, 1252), True, 'import matplotlib.pyplot as plt\n'), ((1682, 1706), 'numpy.zeros', 'np.zeros', (['(n_pop, n_run)'], {}), '((n_pop, n_run))\n', (1690, 1706), True, 'import numpy as np\n'), ((2019, 2035), 'numpy.arange', 'np.arange', (['n_run'], {}), '(n_run)\n', (2028, 2035), True, 'import numpy as np\n'), ((1868, 1884), 'numpy.arange', 'np.arange', (['n_run'], {}), '(n_run)\n', (1877, 1884), True, 'import numpy as np\n'), ((1948, 1981), 'numpy.sum', 'np.sum', (['matrix[:i_pop, :]'], {'axis': '(0)'}), '(matrix[:i_pop, :], axis=0)\n', (1954, 1981), True, 'import numpy as np\n')]
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Manages a debugging session with GDB.
This module is meant to be imported from inside GDB. Once loaded, the
|DebugSession| attaches GDB to a running Mojo Shell process on an Android
device using a remote gdbserver.
At startup and each time the execution stops, |DebugSession| associates
debugging symbols for every frame. For more information, see |DebugSession|
documentation.
"""
import gdb
import glob
import itertools
import logging
import os
import os.path
import shutil
import subprocess
import sys
import tempfile
import traceback
import urllib2
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import android_gdb.config as config
from android_gdb.remote_file_connection import RemoteFileConnection
from android_gdb.signatures import get_signature
logging.getLogger().setLevel(logging.INFO)
def _gdb_execute(command):
"""Executes a GDB command."""
return gdb.execute(command, to_string=True)
class Mapping(object):
"""Represents a mapped memory region."""
def __init__(self, line):
self.start = int(line[0], 16)
self.end = int(line[1], 16)
self.size = int(line[2], 16)
self.offset = int(line[3], 16)
self.filename = line[4]
def _get_mapped_files():
"""Retrieves all the files mapped into the debugged process memory.
Returns:
List of mapped memory regions grouped by files.
"""
# info proc map returns a space-separated table with the following fields:
# start address, end address, size, offset, file path.
mappings = [Mapping(x) for x in
[x.split() for x in
_gdb_execute("info proc map").split('\n')]
if len(x) == 5 and x[4][0] == '/']
res = {}
for m in mappings:
libname = m.filename[m.filename.rfind('/') + 1:]
res[libname] = res.get(libname, []) + [m]
return res.values()
class DebugSession(object):
def __init__(self, build_directory_list, package_name, pyelftools_dir, adb):
build_directories = build_directory_list.split(',')
if len(build_directories) == 0 or not all(map(os.path.exists,
build_directories)):
logging.fatal("Please pass a list of valid build directories")
sys.exit(1)
self._package_name = package_name
self._adb = adb
self._remote_file_cache = os.path.join(os.getenv('HOME'), '.mojosymbols')
if pyelftools_dir != None:
sys.path.append(pyelftools_dir)
try:
import elftools.elf.elffile as elffile
except ImportError:
logging.fatal("Unable to find elftools module; please install pyelftools "
"and specify its path on the command line using "
"--pyelftools-dir.")
sys.exit(1)
self._elffile_module = elffile
self._libraries = self._find_libraries(build_directories)
self._rfc = RemoteFileConnection('localhost', 10000)
self._remote_file_reader_process = None
if not os.path.exists(self._remote_file_cache):
os.makedirs(self._remote_file_cache)
self._done_mapping = set()
self._downloaded_files = []
def __del__(self):
# Note that, per python interpreter documentation, __del__ is not
# guaranteed to be called when the interpreter (GDB, in our case) quits.
# Also, most (all?) globals are no longer available at this time (launching
# a subprocess does not work).
self.stop()
def stop(self, _unused_return_value=None):
if self._remote_file_reader_process != None:
self._remote_file_reader_process.kill()
def _find_libraries(self, lib_dirs):
"""Finds all libraries in |lib_dirs| and key them by their signatures.
"""
res = {}
for lib_dir in lib_dirs:
for fn in glob.glob('%s/*.so' % lib_dir):
with open(fn, 'r') as f:
s = get_signature(f, self._elffile_module)
if s is not None:
res[s] = fn
return res
def _associate_symbols(self, mapping, local_file):
with open(local_file, "r") as f:
elf = self._elffile_module.ELFFile(f)
s = elf.get_section_by_name(".text")
text_address = mapping[0].start + s['sh_offset']
_gdb_execute("add-symbol-file %s 0x%x" % (local_file, text_address))
def _download_file(self, signature, remote):
"""Downloads a remote file either from the cloud or through GDB connection.
Returns:
The filename of the downloaded file
"""
temp_file = tempfile.NamedTemporaryFile()
logging.info("Trying to download symbols from the cloud.")
symbols_url = "http://storage.googleapis.com/mojo/symbols/%s" % signature
try:
symbol_file = urllib2.urlopen(symbols_url)
try:
with open(temp_file.name, "w") as dst:
shutil.copyfileobj(symbol_file, dst)
logging.info("Getting symbols for %s at %s." % (remote, symbols_url))
# This allows the deletion of temporary files on disk when the
# debugging session terminates.
self._downloaded_files.append(temp_file)
return temp_file.name
finally:
symbol_file.close()
except urllib2.HTTPError:
pass
logging.info("Downloading file %s" % remote)
_gdb_execute("remote get %s %s" % (remote, temp_file.name))
# This allows the deletion of temporary files on disk when the debugging
# session terminates.
self._downloaded_files.append(temp_file)
return temp_file.name
def _find_mapping_for_address(self, mappings, address):
"""Returns the list of all mappings of the file occupying the |address|
memory address.
"""
for file_mappings in mappings:
for mapping in file_mappings:
if address >= mapping.start and address <= mapping.end:
return file_mappings
return None
def _try_to_map(self, mapping):
remote_file = mapping[0].filename
if remote_file in self._done_mapping:
return False
self._done_mapping.add(remote_file)
self._rfc.open(remote_file)
signature = get_signature(self._rfc, self._elffile_module)
if signature is not None:
if signature in self._libraries:
self._associate_symbols(mapping, self._libraries[signature])
else:
# This library file is not known locally. Download it from the device or
# the cloud and put it in cache so, if it got symbols, we can see them.
local_file = os.path.join(self._remote_file_cache, signature)
if not os.path.exists(local_file):
tmp_output = self._download_file(signature, remote_file)
shutil.move(tmp_output, local_file)
self._associate_symbols(mapping, local_file)
return True
return False
def _map_symbols_on_current_thread(self, mapped_files):
"""Updates the symbols for the current thread using files from mapped_files.
"""
frame = gdb.newest_frame()
while frame and frame.is_valid():
if frame.name() is None:
m = self._find_mapping_for_address(mapped_files, frame.pc())
if m is not None and self._try_to_map(m):
# Force gdb to recompute its frames.
_gdb_execute("info threads")
frame = gdb.newest_frame()
assert frame.is_valid()
if (frame.older() is not None and
frame.older().is_valid() and
frame.older().pc() != frame.pc()):
frame = frame.older()
else:
frame = None
def update_symbols(self, current_thread_only):
"""Updates the mapping between symbols as seen from GDB and local library
files.
If current_thread_only is True, only update symbols for the current thread.
"""
logging.info("Updating symbols")
mapped_files = _get_mapped_files()
# Map all symbols from native libraries packages with the APK.
for file_mappings in mapped_files:
filename = file_mappings[0].filename
if ((filename.startswith('/data/data/') or
filename.startswith('/data/app')) and
not filename.endswith('.apk') and
not filename.endswith('.dex')):
logging.info('Pre-mapping: %s' % file_mappings[0].filename)
self._try_to_map(file_mappings)
if current_thread_only:
self._map_symbols_on_current_thread(mapped_files)
else:
logging.info('Updating all threads\' symbols')
current_thread = gdb.selected_thread()
nb_threads = len(_gdb_execute("info threads").split("\n")) - 2
for i in xrange(nb_threads):
try:
_gdb_execute("thread %d" % (i + 1))
self._map_symbols_on_current_thread(mapped_files)
except gdb.error:
traceback.print_exc()
current_thread.switch()
def _get_device_application_pid(self, application):
"""Gets the PID of an application running on a device."""
output = subprocess.check_output([self._adb, 'shell', 'ps'])
for line in output.split('\n'):
elements = line.split()
if len(elements) > 0 and elements[-1] == application:
return elements[1]
return None
def start(self):
"""Starts a debugging session."""
gdbserver_pid = self._get_device_application_pid('gdbserver')
if gdbserver_pid is not None:
subprocess.check_call([self._adb, 'shell', 'kill', gdbserver_pid])
shell_pid = self._get_device_application_pid(self._package_name)
if shell_pid is None:
raise Exception('Unable to find a running mojo shell.')
subprocess.check_call([self._adb, 'forward', 'tcp:9999', 'tcp:9999'])
subprocess.Popen(
[self._adb, 'shell', 'gdbserver', '--attach', ':9999', shell_pid],
# os.setpgrp ensures signals passed to this file (such as SIGINT) are
# not propagated to child processes.
preexec_fn = os.setpgrp)
# Kill stray remote reader processes. See __del__ comment for more info.
remote_file_reader_pid = self._get_device_application_pid(
config.REMOTE_FILE_READER_DEVICE_PATH)
if remote_file_reader_pid is not None:
subprocess.check_call([self._adb, 'shell', 'kill',
remote_file_reader_pid])
self._remote_file_reader_process = subprocess.Popen(
[self._adb, 'shell', config.REMOTE_FILE_READER_DEVICE_PATH],
stdout=subprocess.PIPE, preexec_fn = os.setpgrp)
port = int(self._remote_file_reader_process.stdout.readline())
subprocess.check_call([self._adb, 'forward', 'tcp:10000', 'tcp:%d' % port])
self._rfc.connect()
_gdb_execute('target remote localhost:9999')
self.update_symbols(current_thread_only=False)
def on_stop(_):
self.update_symbols(current_thread_only=True)
gdb.events.stop.connect(on_stop)
gdb.events.exited.connect(self.stop)
# Register the update-symbols command.
UpdateSymbols(self)
class UpdateSymbols(gdb.Command):
"""Command to update symbols loaded into GDB.
GDB usage: update-symbols [all|current]
"""
_UPDATE_COMMAND = "update-symbols"
def __init__(self, session):
super(UpdateSymbols, self).__init__(self._UPDATE_COMMAND, gdb.COMMAND_STACK)
self._session = session
def invoke(self, arg, _unused_from_tty):
if arg == 'current':
self._session.update_symbols(current_thread_only=True)
else:
self._session.update_symbols(current_thread_only=False)
def complete(self, text, _unused_word):
if text == self._UPDATE_COMMAND:
return ('all', 'current')
elif text in self._UPDATE_COMMAND + ' all':
return ['all']
elif text in self._UPDATE_COMMAND + ' current':
return ['current']
else:
return []
|
[
"glob.glob",
"os.path.join",
"urllib2.urlopen",
"subprocess.check_call",
"sys.path.append",
"os.path.abspath",
"traceback.print_exc",
"os.path.exists",
"gdb.newest_frame",
"shutil.copyfileobj",
"subprocess.Popen",
"subprocess.check_output",
"android_gdb.remote_file_connection.RemoteFileConnection",
"logging.fatal",
"gdb.events.exited.connect",
"os.getenv",
"sys.exit",
"tempfile.NamedTemporaryFile",
"gdb.selected_thread",
"gdb.execute",
"gdb.events.stop.connect",
"os.makedirs",
"logging.info",
"android_gdb.signatures.get_signature",
"shutil.move",
"logging.getLogger"
] |
[((1069, 1105), 'gdb.execute', 'gdb.execute', (['command'], {'to_string': '(True)'}), '(command, to_string=True)\n', (1080, 1105), False, 'import gdb\n'), ((956, 975), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (973, 975), False, 'import logging\n'), ((2991, 3031), 'android_gdb.remote_file_connection.RemoteFileConnection', 'RemoteFileConnection', (['"""localhost"""', '(10000)'], {}), "('localhost', 10000)\n", (3011, 3031), False, 'from android_gdb.remote_file_connection import RemoteFileConnection\n'), ((4557, 4586), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (4584, 4586), False, 'import tempfile\n'), ((4591, 4649), 'logging.info', 'logging.info', (['"""Trying to download symbols from the cloud."""'], {}), "('Trying to download symbols from the cloud.')\n", (4603, 4649), False, 'import logging\n'), ((5257, 5301), 'logging.info', 'logging.info', (["('Downloading file %s' % remote)"], {}), "('Downloading file %s' % remote)\n", (5269, 5301), False, 'import logging\n'), ((6107, 6153), 'android_gdb.signatures.get_signature', 'get_signature', (['self._rfc', 'self._elffile_module'], {}), '(self._rfc, self._elffile_module)\n', (6120, 6153), False, 'from android_gdb.signatures import get_signature\n'), ((6939, 6957), 'gdb.newest_frame', 'gdb.newest_frame', ([], {}), '()\n', (6955, 6957), False, 'import gdb\n'), ((7722, 7754), 'logging.info', 'logging.info', (['"""Updating symbols"""'], {}), "('Updating symbols')\n", (7734, 7754), False, 'import logging\n'), ((8869, 8920), 'subprocess.check_output', 'subprocess.check_output', (["[self._adb, 'shell', 'ps']"], {}), "([self._adb, 'shell', 'ps'])\n", (8892, 8920), False, 'import subprocess\n'), ((9482, 9551), 'subprocess.check_call', 'subprocess.check_call', (["[self._adb, 'forward', 'tcp:9999', 'tcp:9999']"], {}), "([self._adb, 'forward', 'tcp:9999', 'tcp:9999'])\n", (9503, 9551), False, 'import subprocess\n'), ((9556, 9666), 'subprocess.Popen', 'subprocess.Popen', (["[self._adb, 'shell', 'gdbserver', '--attach', ':9999', shell_pid]"], {'preexec_fn': 'os.setpgrp'}), "([self._adb, 'shell', 'gdbserver', '--attach', ':9999',\n shell_pid], preexec_fn=os.setpgrp)\n", (9572, 9666), False, 'import subprocess\n'), ((10186, 10315), 'subprocess.Popen', 'subprocess.Popen', (["[self._adb, 'shell', config.REMOTE_FILE_READER_DEVICE_PATH]"], {'stdout': 'subprocess.PIPE', 'preexec_fn': 'os.setpgrp'}), "([self._adb, 'shell', config.REMOTE_FILE_READER_DEVICE_PATH\n ], stdout=subprocess.PIPE, preexec_fn=os.setpgrp)\n", (10202, 10315), False, 'import subprocess\n'), ((10401, 10476), 'subprocess.check_call', 'subprocess.check_call', (["[self._adb, 'forward', 'tcp:10000', 'tcp:%d' % port]"], {}), "([self._adb, 'forward', 'tcp:10000', 'tcp:%d' % port])\n", (10422, 10476), False, 'import subprocess\n'), ((10679, 10711), 'gdb.events.stop.connect', 'gdb.events.stop.connect', (['on_stop'], {}), '(on_stop)\n', (10702, 10711), False, 'import gdb\n'), ((10716, 10752), 'gdb.events.exited.connect', 'gdb.events.exited.connect', (['self.stop'], {}), '(self.stop)\n', (10741, 10752), False, 'import gdb\n'), ((772, 797), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (787, 797), False, 'import os\n'), ((2301, 2363), 'logging.fatal', 'logging.fatal', (['"""Please pass a list of valid build directories"""'], {}), "('Please pass a list of valid build directories')\n", (2314, 2363), False, 'import logging\n'), ((2370, 2381), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2378, 2381), False, 'import sys\n'), ((2483, 2500), 'os.getenv', 'os.getenv', (['"""HOME"""'], {}), "('HOME')\n", (2492, 2500), False, 'import os\n'), ((2556, 2587), 'sys.path.append', 'sys.path.append', (['pyelftools_dir'], {}), '(pyelftools_dir)\n', (2571, 2587), False, 'import sys\n'), ((3087, 3126), 'os.path.exists', 'os.path.exists', (['self._remote_file_cache'], {}), '(self._remote_file_cache)\n', (3101, 3126), False, 'import os\n'), ((3134, 3170), 'os.makedirs', 'os.makedirs', (['self._remote_file_cache'], {}), '(self._remote_file_cache)\n', (3145, 3170), False, 'import os\n'), ((3856, 3886), 'glob.glob', 'glob.glob', (["('%s/*.so' % lib_dir)"], {}), "('%s/*.so' % lib_dir)\n", (3865, 3886), False, 'import glob\n'), ((4757, 4785), 'urllib2.urlopen', 'urllib2.urlopen', (['symbols_url'], {}), '(symbols_url)\n', (4772, 4785), False, 'import urllib2\n'), ((8336, 8381), 'logging.info', 'logging.info', (['"""Updating all threads\' symbols"""'], {}), '("Updating all threads\' symbols")\n', (8348, 8381), False, 'import logging\n'), ((8406, 8427), 'gdb.selected_thread', 'gdb.selected_thread', ([], {}), '()\n', (8425, 8427), False, 'import gdb\n'), ((9254, 9320), 'subprocess.check_call', 'subprocess.check_call', (["[self._adb, 'shell', 'kill', gdbserver_pid]"], {}), "([self._adb, 'shell', 'kill', gdbserver_pid])\n", (9275, 9320), False, 'import subprocess\n'), ((10042, 10117), 'subprocess.check_call', 'subprocess.check_call', (["[self._adb, 'shell', 'kill', remote_file_reader_pid]"], {}), "([self._adb, 'shell', 'kill', remote_file_reader_pid])\n", (10063, 10117), False, 'import subprocess\n'), ((2672, 2821), 'logging.fatal', 'logging.fatal', (['"""Unable to find elftools module; please install pyelftools and specify its path on the command line using --pyelftools-dir."""'], {}), "(\n 'Unable to find elftools module; please install pyelftools and specify its path on the command line using --pyelftools-dir.'\n )\n", (2685, 2821), False, 'import logging\n'), ((2864, 2875), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2872, 2875), False, 'import sys\n'), ((6486, 6534), 'os.path.join', 'os.path.join', (['self._remote_file_cache', 'signature'], {}), '(self._remote_file_cache, signature)\n', (6498, 6534), False, 'import os\n'), ((8135, 8194), 'logging.info', 'logging.info', (["('Pre-mapping: %s' % file_mappings[0].filename)"], {}), "('Pre-mapping: %s' % file_mappings[0].filename)\n", (8147, 8194), False, 'import logging\n'), ((3935, 3973), 'android_gdb.signatures.get_signature', 'get_signature', (['f', 'self._elffile_module'], {}), '(f, self._elffile_module)\n', (3948, 3973), False, 'from android_gdb.signatures import get_signature\n'), ((4854, 4890), 'shutil.copyfileobj', 'shutil.copyfileobj', (['symbol_file', 'dst'], {}), '(symbol_file, dst)\n', (4872, 4890), False, 'import shutil\n'), ((4901, 4970), 'logging.info', 'logging.info', (["('Getting symbols for %s at %s.' % (remote, symbols_url))"], {}), "('Getting symbols for %s at %s.' % (remote, symbols_url))\n", (4913, 4970), False, 'import logging\n'), ((6550, 6576), 'os.path.exists', 'os.path.exists', (['local_file'], {}), '(local_file)\n', (6564, 6576), False, 'import os\n'), ((6655, 6690), 'shutil.move', 'shutil.move', (['tmp_output', 'local_file'], {}), '(tmp_output, local_file)\n', (6666, 6690), False, 'import shutil\n'), ((7250, 7268), 'gdb.newest_frame', 'gdb.newest_frame', ([], {}), '()\n', (7266, 7268), False, 'import gdb\n'), ((8687, 8708), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (8706, 8708), False, 'import traceback\n')]
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="norma43parser",
version="1.1.2",
license="MIT",
author="<NAME>",
author_email="<EMAIL>",
description="Parser for Bank Account information files formatted in Norma 43",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/sergief/norma43parser",
packages=setuptools.find_packages(),
keywords=["norma43", "parser", "bank", "account", "n43", "csb"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
python_requires=">=3.6",
)
|
[
"setuptools.find_packages"
] |
[((459, 485), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (483, 485), False, 'import setuptools\n')]
|
"""
Search for a good model for the
[MNIST](https://keras.io/datasets/#mnist-database-of-handwritten-digits) dataset.
"""
import argparse
import os
import autokeras as ak
import tensorflow_cloud as tfc
from tensorflow.keras.datasets import mnist
parser = argparse.ArgumentParser(description="Model save path arguments.")
parser.add_argument("--path", required=True, type=str, help="Keras model save path")
args = parser.parse_args()
tfc.run(
chief_config=tfc.COMMON_MACHINE_CONFIGS["V100_1X"],
docker_base_image="haifengjin/autokeras:1.0.3",
)
# Prepare the dataset.
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(x_train.shape) # (60000, 28, 28)
print(y_train.shape) # (60000,)
print(y_train[:3]) # array([7, 2, 1], dtype=uint8)
# Initialize the ImageClassifier.
clf = ak.ImageClassifier(max_trials=2)
# Search for the best model.
clf.fit(x_train, y_train, epochs=10)
# Evaluate on the testing data.
print("Accuracy: {accuracy}".format(accuracy=clf.evaluate(x_test, y_test)[1]))
clf.export_model().save(os.path.join(args.path, "model.h5"))
|
[
"argparse.ArgumentParser",
"autokeras.ImageClassifier",
"tensorflow.keras.datasets.mnist.load_data",
"os.path.join",
"tensorflow_cloud.run"
] |
[((258, 323), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Model save path arguments."""'}), "(description='Model save path arguments.')\n", (281, 323), False, 'import argparse\n'), ((437, 548), 'tensorflow_cloud.run', 'tfc.run', ([], {'chief_config': "tfc.COMMON_MACHINE_CONFIGS['V100_1X']", 'docker_base_image': '"""haifengjin/autokeras:1.0.3"""'}), "(chief_config=tfc.COMMON_MACHINE_CONFIGS['V100_1X'],\n docker_base_image='haifengjin/autokeras:1.0.3')\n", (444, 548), True, 'import tensorflow_cloud as tfc\n'), ((619, 636), 'tensorflow.keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (634, 636), False, 'from tensorflow.keras.datasets import mnist\n'), ((803, 835), 'autokeras.ImageClassifier', 'ak.ImageClassifier', ([], {'max_trials': '(2)'}), '(max_trials=2)\n', (821, 835), True, 'import autokeras as ak\n'), ((1038, 1073), 'os.path.join', 'os.path.join', (['args.path', '"""model.h5"""'], {}), "(args.path, 'model.h5')\n", (1050, 1073), False, 'import os\n')]
|
from flask_restful import Resource, reqparse, request
from flask_restful import fields, marshal_with, marshal
from sqlalchemy.exc import IntegrityError
from sqlalchemy import or_, and_, text
from flask_jwt_extended import jwt_required
from models.course import Course
from app import db
from utils.util import max_res
from helpers.courses_resource_helper import *
class CoursesResource(Resource):
@jwt_required
def get(self, course_id=None):
if course_id:
course = Course.find_by_id(course_id)
return max_res(marshal(course, course_fields))
else:
conditions = []
args = course_query_parser.parse_args()
page = args['page']
per_page = args['pagesize']
if args['orderby'] not in sortable_fields:
return max_res('', code=500, errmsg='排序非法字段')
sort = args['orderby']
if args['desc']>0:
sort = args['orderby'] + ' desc'
conditions = make_conditions(conditions,args)
# 在这里添加更多的 条件查询 例如
# if args['name'] is not None:
# conditions.append(Course.name.like('%'+args['name']+'%'))
if conditions is []:
pagination = Course.query.order_by(text(sort)).paginate(page, per_page, error_out=False)
else:
pagination = Course.query.filter(*conditions).order_by(text(sort)).paginate(page, per_page, error_out=False)
paginate = {
'total':pagination.total,
'pageSize': pagination.per_page,
'current': pagination.page
}
print(pagination.items)
return max_res(marshal({
'pagination': paginate,
'list': [marshal(u, course_fields) for u in pagination.items]
}, course_list_fields))
@jwt_required
def post(self):
args = course_post_parser.parse_args()
course = Course(**args)
try:
course.add()
except IntegrityError:
return max_res('', code=401, errmsg='名称重复')
return max_res(marshal(course, course_fields))
def put(self, course_id=None):
course = Course.find_by_id(course_id)
args = course_update_parser.parse_args()
course = update_all_fields(args, course)
#可以在这里继续添加 需要更新的字段 如
# if args['name']:
# o.name = args['name']
#
db.session.commit()
try:
course.update()
except Exception as e:
return max_res('',500, 'Failed to modify.')
return max_res(marshal(course, course_fields))
def delete(self, course_id=None):
course = Course.find_by_id(course_id)
try:
course.delete()
except Exception as e:
return max_res('',500, 'The record has already deleted.')
return max_res('The course has been deleted.')
|
[
"utils.util.max_res",
"models.course.Course.query.filter",
"sqlalchemy.text",
"app.db.session.commit",
"models.course.Course",
"flask_restful.marshal",
"models.course.Course.find_by_id"
] |
[((1993, 2007), 'models.course.Course', 'Course', ([], {}), '(**args)\n', (1999, 2007), False, 'from models.course import Course\n'), ((2247, 2275), 'models.course.Course.find_by_id', 'Course.find_by_id', (['course_id'], {}), '(course_id)\n', (2264, 2275), False, 'from models.course import Course\n'), ((2500, 2519), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2517, 2519), False, 'from app import db\n'), ((2764, 2792), 'models.course.Course.find_by_id', 'Course.find_by_id', (['course_id'], {}), '(course_id)\n', (2781, 2792), False, 'from models.course import Course\n'), ((2952, 2991), 'utils.util.max_res', 'max_res', (['"""The course has been deleted."""'], {}), "('The course has been deleted.')\n", (2959, 2991), False, 'from utils.util import max_res\n'), ((497, 525), 'models.course.Course.find_by_id', 'Course.find_by_id', (['course_id'], {}), '(course_id)\n', (514, 525), False, 'from models.course import Course\n'), ((2157, 2187), 'flask_restful.marshal', 'marshal', (['course', 'course_fields'], {}), '(course, course_fields)\n', (2164, 2187), False, 'from flask_restful import fields, marshal_with, marshal\n'), ((2672, 2702), 'flask_restful.marshal', 'marshal', (['course', 'course_fields'], {}), '(course, course_fields)\n', (2679, 2702), False, 'from flask_restful import fields, marshal_with, marshal\n'), ((553, 583), 'flask_restful.marshal', 'marshal', (['course', 'course_fields'], {}), '(course, course_fields)\n', (560, 583), False, 'from flask_restful import fields, marshal_with, marshal\n'), ((831, 869), 'utils.util.max_res', 'max_res', (['""""""'], {'code': '(500)', 'errmsg': '"""排序非法字段"""'}), "('', code=500, errmsg='排序非法字段')\n", (838, 869), False, 'from utils.util import max_res\n'), ((2096, 2132), 'utils.util.max_res', 'max_res', (['""""""'], {'code': '(401)', 'errmsg': '"""名称重复"""'}), "('', code=401, errmsg='名称重复')\n", (2103, 2132), False, 'from utils.util import max_res\n'), ((2611, 2648), 'utils.util.max_res', 'max_res', (['""""""', '(500)', '"""Failed to modify."""'], {}), "('', 500, 'Failed to modify.')\n", (2618, 2648), False, 'from utils.util import max_res\n'), ((2885, 2936), 'utils.util.max_res', 'max_res', (['""""""', '(500)', '"""The record has already deleted."""'], {}), "('', 500, 'The record has already deleted.')\n", (2892, 2936), False, 'from utils.util import max_res\n'), ((1289, 1299), 'sqlalchemy.text', 'text', (['sort'], {}), '(sort)\n', (1293, 1299), False, 'from sqlalchemy import or_, and_, text\n'), ((1432, 1442), 'sqlalchemy.text', 'text', (['sort'], {}), '(sort)\n', (1436, 1442), False, 'from sqlalchemy import or_, and_, text\n'), ((1799, 1824), 'flask_restful.marshal', 'marshal', (['u', 'course_fields'], {}), '(u, course_fields)\n', (1806, 1824), False, 'from flask_restful import fields, marshal_with, marshal\n'), ((1390, 1422), 'models.course.Course.query.filter', 'Course.query.filter', (['*conditions'], {}), '(*conditions)\n', (1409, 1422), False, 'from models.course import Course\n')]
|
import random
import math
import numpy as np
import matplotlib.pyplot as plt
# Calculating Pi using Monte Carlo algorithm.
def montecarlo_pi(times:int):
inside = 0
total = times
for i in range(times):
x_i = random.random()
y_i = random.random()
delta = x_i ** 2 + y_i **2 - 1
if delta <= 0:
inside += 1
approx_pi = 4 * inside / total
print('\nRandom test: ' + str(times))
print('Approximation of pi is:{:.8f}'.format(approx_pi))
return approx_pi
if __name__ == '__main__':
numlist = [100, 500, 1000, 5000, 10000, 50000, 100000, 500000, 1000000, 5000000, 10000000, 30000000, 50000000, 75000000, 100000000]
x_list = list(np.log10(numlist))
pi_ = []
for times in numlist:
pi_.append(montecarlo_pi(times))
plt.figure()
plt.plot([min(x_list), max(x_list)], [math.pi, math.pi], color='red', label='true value')
plt.plot(x_list, pi_, 'b.-', label='approximation')
plt.legend()
plt.xlabel('log10(n)')
plt.ylabel('pi')
my_y_ticks = np.arange(3, 3.4, 0.02)
plt.yticks(my_y_ticks)
plt.ylim((min(pi_)-0.1, max(pi_)+0.1))
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.yticks",
"random.random",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"numpy.log10",
"matplotlib.pyplot.xlabel"
] |
[((816, 828), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (826, 828), True, 'import matplotlib.pyplot as plt\n'), ((927, 978), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'pi_', '"""b.-"""'], {'label': '"""approximation"""'}), "(x_list, pi_, 'b.-', label='approximation')\n", (935, 978), True, 'import matplotlib.pyplot as plt\n'), ((984, 996), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (994, 996), True, 'import matplotlib.pyplot as plt\n'), ((1001, 1023), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""log10(n)"""'], {}), "('log10(n)')\n", (1011, 1023), True, 'import matplotlib.pyplot as plt\n'), ((1028, 1044), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""pi"""'], {}), "('pi')\n", (1038, 1044), True, 'import matplotlib.pyplot as plt\n'), ((1063, 1086), 'numpy.arange', 'np.arange', (['(3)', '(3.4)', '(0.02)'], {}), '(3, 3.4, 0.02)\n', (1072, 1086), True, 'import numpy as np\n'), ((1091, 1113), 'matplotlib.pyplot.yticks', 'plt.yticks', (['my_y_ticks'], {}), '(my_y_ticks)\n', (1101, 1113), True, 'import matplotlib.pyplot as plt\n'), ((1162, 1172), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1170, 1172), True, 'import matplotlib.pyplot as plt\n'), ((231, 246), 'random.random', 'random.random', ([], {}), '()\n', (244, 246), False, 'import random\n'), ((261, 276), 'random.random', 'random.random', ([], {}), '()\n', (274, 276), False, 'import random\n'), ((712, 729), 'numpy.log10', 'np.log10', (['numlist'], {}), '(numlist)\n', (720, 729), True, 'import numpy as np\n')]
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# TODO: Remove this when https://github.com/parejkoj/astropy/tree/luptonRGB
# is in Astropy.
"""
Combine 3 images to produce a properly-scaled RGB image following Lupton et al. (2004).
For details, see : http://adsabs.harvard.edu/abs/2004PASP..116..133L
The three images must be aligned and have the same pixel scale and size.
Example usage:
imageR = np.random.random((100,100))
imageG = np.random.random((100,100))
imageB = np.random.random((100,100))
image = lupton_rgb.makeRGB(imageR, imageG, imageB, fileName='randoms.png')
lupton_rgb.displayRGB(image)
"""
import numpy as np
try:
import scipy.misc
HAVE_SCIPY_MISC = True
except ImportError:
HAVE_SCIPY_MISC = False
# from lsst.afw.display.displayLib import replaceSaturatedPixels, getZScale
def compute_intensity(imageR, imageG=None, imageB=None):
"""
Return a naive total intensity from the red, blue, and green intensities.
Parameters
----------
imageR : `~numpy.ndarray`
Intensity of image to be mapped to red; or total intensity if imageG and
imageB are None.
imageG : `~numpy.ndarray`
Intensity of image to be mapped to green; or None.
imageB : `~numpy.ndarray`
Intensity of image to be mapped to blue; or None.
"""
if imageG is None or imageB is None:
assert imageG is None and imageB is None, \
"Please specify either a single image or red, green, and blue images"
return imageR
intensity = (imageR + imageG + imageB)/3.0
# Repack into whatever type was passed to us
return np.array(intensity, dtype=imageR.dtype)
def zscale(image, nSamples=1000, contrast=0.25):
"""
TBD: replace with newly added astropy.zscale function.
This emulates ds9's zscale feature. Returns the suggested minimum and
maximum values to display.
Parameters
----------
image : `~numpy.ndarray`
The image to compute the scaling on.
nSamples : int
How many samples to take when building the histogram.
contrast : float
???
"""
stride = image.size/nSamples
samples = image.flatten()[::stride]
samples.sort()
chop_size = int(0.10*len(samples))
subset = samples[chop_size:-chop_size]
i_midpoint = int(len(subset)/2)
I_mid = subset[i_midpoint]
fit = np.polyfit(np.arange(len(subset)) - i_midpoint, subset, 1)
# fit = [ slope, intercept]
z1 = I_mid + fit[0]/contrast * (1-i_midpoint)/1.0
z2 = I_mid + fit[0]/contrast * (len(subset)-i_midpoint)/1.0
return z1, z2
class Mapping(object):
"""Baseclass to map red, blue, green intensities into uint8 values"""
def __init__(self, minimum=None, image=None):
"""
Create a mapping
Parameters
----------
minimum : float or sequence(3)
Intensity that should be mapped to black (a scalar or array for R, G, B).
image : `~numpy.ndarray`
The image to be used to calculate the mapping.
If provided, it is also used as the default for makeRgbImage().
"""
self._uint8Max = float(np.iinfo(np.uint8).max)
try:
len(minimum)
except:
minimum = 3*[minimum]
assert len(minimum) == 3, "Please provide 1 or 3 values for minimum"
self.minimum = minimum
self._image = image
def makeRgbImage(self, imageR=None, imageG=None, imageB=None,
xSize=None, ySize=None, rescaleFactor=None):
"""
Convert 3 arrays, imageR, imageG, and imageB into a numpy RGB image.
Parameters
----------
imageR : `~numpy.ndarray`
Image to map to red (if None, use the image passed to the constructor).
imageG : `~numpy.ndarray`
Image to map to green (if None, use imageR).
imageB : `~numpy.ndarray`
Image to map to blue (if None, use imageR).
xSize : int
Desired width of RGB image (or None). If ySize is None, preserve
aspect ratio.
ySize : int
Desired height of RGB image (or None).
rescaleFactor : float
Make size of output image rescaleFactor*size of the input image.
Cannot be specified if xSize or ySize are given.
"""
if imageR is None:
if self._image is None:
raise RuntimeError("You must provide an image or pass one to the constructor")
imageR = self._image
if imageG is None:
imageG = imageR
if imageB is None:
imageB = imageR
if xSize is not None or ySize is not None:
assert rescaleFactor is None, "You may not specify a size and rescaleFactor"
h, w = imageR.shape
if ySize is None:
ySize = int(xSize*h/float(w) + 0.5)
elif xSize is None:
xSize = int(ySize*w/float(h) + 0.5)
# need to cast to int when passing tuple to imresize.
size = (int(ySize), int(xSize)) # n.b. y, x order for scipy
elif rescaleFactor is not None:
size = float(rescaleFactor) # a float is intepreted as a percentage
else:
size = None
if size is not None:
if not HAVE_SCIPY_MISC:
raise RuntimeError("Unable to rescale as scipy.misc is unavailable.")
imageR = scipy.misc.imresize(imageR, size, interp='bilinear', mode='F')
imageG = scipy.misc.imresize(imageG, size, interp='bilinear', mode='F')
imageB = scipy.misc.imresize(imageB, size, interp='bilinear', mode='F')
return np.dstack(self._convertImagesToUint8(imageR, imageG, imageB)).astype(np.uint8)
def intensity(self, imageR, imageG, imageB):
"""
Return the total intensity from the red, blue, and green intensities.
This is a naive computation, and may be overridden by subclasses.
"""
return compute_intensity(imageR, imageG, imageB)
def mapIntensityToUint8(self, I):
"""
Return an array which, when multiplied by an image, returns that image
mapped to the range of a uint8, [0, 255] (but not converted to uint8).
The intensity is assumed to have had minimum subtracted (as that can be
done per-band).
"""
with np.errstate(invalid='ignore', divide='ignore'): # n.b. np.where can't and doesn't short-circuit
return np.where(I <= 0, 0, np.where(I < self._uint8Max, I, self._uint8Max))
def _convertImagesToUint8(self, imageR, imageG, imageB):
"""Use the mapping to convert images imageR, imageG, and imageB to a triplet of uint8 images"""
imageR = imageR - self.minimum[0] # n.b. makes copy
imageG = imageG - self.minimum[1]
imageB = imageB - self.minimum[2]
fac = self.mapIntensityToUint8(self.intensity(imageR, imageG, imageB))
imageRGB = [imageR, imageG, imageB]
for c in imageRGB:
c *= fac
c[c < 0] = 0 # individual bands can still be < 0, even if fac isn't
pixmax = self._uint8Max
r0, g0, b0 = imageRGB # copies -- could work row by row to minimise memory usage
with np.errstate(invalid='ignore', divide='ignore'): # n.b. np.where can't and doesn't short-circuit
for i, c in enumerate(imageRGB):
c = np.where(r0 > g0,
np.where(r0 > b0,
np.where(r0 >= pixmax, c*pixmax/r0, c),
np.where(b0 >= pixmax, c*pixmax/b0, c)),
np.where(g0 > b0,
np.where(g0 >= pixmax, c*pixmax/g0, c),
np.where(b0 >= pixmax, c*pixmax/b0, c))).astype(np.uint8)
c[c > pixmax] = pixmax
imageRGB[i] = c
return imageRGB
class LinearMapping(Mapping):
"""A linear map map of red, blue, green intensities into uint8 values"""
def __init__(self, minimum=None, maximum=None, image=None):
"""
A linear stretch from [minimum, maximum].
If one or both are omitted use image min and/or max to set them.
Parameters
----------
minimum : float
Intensity that should be mapped to black (a scalar or array for R, G, B).
maximum : float
Intensity that should be mapped to white (a scalar).
"""
if minimum is None or maximum is None:
assert image is not None, "You must provide an image if you don't set both minimum and maximum"
if minimum is None:
minimum = image.min()
if maximum is None:
maximum = image.max()
Mapping.__init__(self, minimum=minimum, image=image)
self.maximum = maximum
if maximum is None:
self._range = None
else:
assert maximum - minimum != 0, "minimum and maximum values must not be equal"
self._range = float(maximum - minimum)
def mapIntensityToUint8(self, I):
with np.errstate(invalid='ignore', divide='ignore'): # n.b. np.where can't and doesn't short-circuit
return np.where(I <= 0, 0,
np.where(I >= self._range, self._uint8Max/I, self._uint8Max/self._range))
class ZScaleMapping(LinearMapping):
"""
A mapping for a linear stretch chosen by the zscale algorithm.
(preserving colours independent of brightness)
x = (I - minimum)/range
"""
def __init__(self, image, nSamples=1000, contrast=0.25):
"""
A linear stretch from [z1, z2] chosen by the zscale algorithm.
Parameters
----------
nSamples : int
The number of samples to use to estimate the zscale parameters.
contrast : float
The number of samples to use to estimate the zscale parameters.
"""
z1, z2 = zscale(image, nSamples, contrast)
LinearMapping.__init__(self, z1, z2, image)
class AsinhMapping(Mapping):
"""
A mapping for an asinh stretch (preserving colours independent of brightness)
x = asinh(Q (I - minimum)/range)/Q
This reduces to a linear stretch if Q == 0
See http://adsabs.harvard.edu/abs/2004PASP..116..133L
"""
def __init__(self, minimum, dataRange, Q=8):
"""
asinh stretch from minimum to minimum + dataRange, scaled by Q, via:
x = asinh(Q (I - minimum)/dataRange)/Q
Parameters
----------
minimum : float
Intensity that should be mapped to black (a scalar or array for R, G, B).
dataRange : float
minimum+dataRange defines the white level of the image.
Q : float
The asinh softening parameter.
"""
Mapping.__init__(self, minimum)
epsilon = 1.0/2**23 # 32bit floating point machine epsilon; sys.float_info.epsilon is 64bit
if abs(Q) < epsilon:
Q = 0.1
else:
Qmax = 1e10
if Q > Qmax:
Q = Qmax
if False:
self._slope = self._uint8Max/Q # gradient at origin is self._slope
else:
frac = 0.1 # gradient estimated using frac*range is _slope
self._slope = frac*self._uint8Max/np.arcsinh(frac*Q)
self._soften = Q/float(dataRange)
def mapIntensityToUint8(self, I):
with np.errstate(invalid='ignore', divide='ignore'): # n.b. np.where can't and doesn't short-circuit
return np.where(I <= 0, 0, np.arcsinh(I*self._soften)*self._slope/I)
class AsinhZScaleMapping(AsinhMapping):
"""
A mapping for an asinh stretch, estimating the linear stretch by zscale.
x = asinh(Q (I - z1)/(z2 - z1))/Q
See AsinhMapping
"""
def __init__(self, image1, image2=None, image3=None, Q=8, pedestal=None):
"""
Create an asinh mapping from an image, setting the linear part of the
stretch using zscale.
Parameters
----------
image1 : `~numpy.ndarray`
The image to analyse,
# or a list of 3 images to be converted to an intensity image.
image2 : `~numpy.ndarray`
the second image to analyse (must be specified with image3).
image3 : `~numpy.ndarray`
the third image to analyse (must be specified with image2).
Q : float
The asinh softening parameter.
pedestal : float or sequence(3)
The value, or array of 3 values, to subtract from the images; or None.
pedestal, if not None, is removed from the images when calculating
the zscale stretch, and added back into Mapping.minimum.
"""
if image2 is None or image3 is None:
assert image2 is None and image3 is None, "Please specify either a single image or three images."
image = [image1]
else:
image = [image1, image2, image3]
if pedestal is not None:
try:
assert len(pedestal) in (1, 3,), "Please provide 1 or 3 pedestals."
except TypeError:
pedestal = 3*[pedestal]
image = list(image) # needs to be mutable
for i, im in enumerate(image):
if pedestal[i] != 0.0:
image[i] = im - pedestal[i] # n.b. a copy
else:
pedestal = len(image)*[0.0]
image = compute_intensity(*image)
zscale = ZScaleMapping(image)
dataRange = zscale.maximum - zscale.minimum[0] # zscale.minimum is always a triple
minimum = zscale.minimum
for i, level in enumerate(pedestal):
minimum[i] += level
AsinhMapping.__init__(self, minimum, dataRange, Q)
self._image = image
def makeRGB(imageR, imageG=None, imageB=None, minimum=0, dataRange=5, Q=8,
saturatedBorderWidth=0, saturatedPixelValue=None,
xSize=None, ySize=None, rescaleFactor=None,
fileName=None):
"""
Make an RGB color image from 3 images using an asinh stretch.
Parameters
----------
imageR : `~numpy.ndarray`
Image to map to red (if None, use the image passed to the constructor).
imageG : `~numpy.ndarray`
Image to map to green (if None, use imageR).
imageB : `~numpy.ndarray`
Image to map to blue (if None, use imageR).
minimum : float
Intensity that should be mapped to black (a scalar or array for R, G, B).
dataRange : float
minimum+dataRange defines the white level of the image.
Q : float
The asinh softening parameter.
saturatedBorderWidth : int
If saturatedBorderWidth is non-zero, replace saturated pixels with saturatedPixelValue.
Note that replacing saturated pixels requires that the input images be MaskedImages.
saturatedPixelValue : float
Value to replace saturated pixels with.
xSize : int
Desired width of RGB image (or None). If ySize is None, preserve aspect ratio.
ySize : int
Desired height of RGB image (or None).
rescaleFactor : float
Make size of output image rescaleFactor*size of the input image.
Cannot be specified if xSize or ySize are given.
"""
if imageG is None:
imageG = imageR
if imageB is None:
imageB = imageR
if saturatedBorderWidth:
if saturatedPixelValue is None:
raise ValueError("saturatedPixelValue must be set if saturatedBorderWidth is set")
msg = "Cannot do this until we extract replaceSaturatedPixels out of afw/display/saturated.cc"
raise NotImplementedError(msg)
# replaceSaturatedPixels(imageR, imageG, imageB, saturatedBorderWidth, saturatedPixelValue)
asinhMap = AsinhMapping(minimum, dataRange, Q)
rgb = asinhMap.makeRgbImage(imageR, imageG, imageB,
xSize=xSize, ySize=ySize, rescaleFactor=rescaleFactor)
if fileName:
writeRGB(fileName, rgb)
return rgb
def displayRGB(rgb, show=True, title=None):
"""
Display an rgb image using matplotlib.
Parameters
----------
rgb : `~numpy.ndarray`
The RGB image to display
show : bool
If true, call plt.show()
title : str
Title to use for the displayed image.
"""
import matplotlib.pyplot as plt
plt.imshow(rgb, interpolation='nearest', origin="lower")
if title:
plt.title(title)
if show:
plt.show()
return plt
def writeRGB(fileName, rgbImage):
"""
Write an RGB image to disk.
Most versions of matplotlib support png and pdf (although the eps/pdf/svg
writers may be buggy, possibly due an interaction with useTeX=True in the
matplotlib settings).
If your matplotlib bundles pil/pillow you should also be able to write jpeg
and tiff files.
Parameters
----------
fileName : str
The output file. The extension defines the format, and must be
supported by matplotlib.imsave().
rgbImage : `~numpy.ndarray`
The RGB image to save.
"""
import matplotlib.image
matplotlib.image.imsave(fileName, rgbImage)
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"numpy.iinfo",
"numpy.errstate",
"numpy.where",
"numpy.array",
"numpy.arcsinh"
] |
[((1660, 1699), 'numpy.array', 'np.array', (['intensity'], {'dtype': 'imageR.dtype'}), '(intensity, dtype=imageR.dtype)\n', (1668, 1699), True, 'import numpy as np\n'), ((16626, 16682), 'matplotlib.pyplot.imshow', 'plt.imshow', (['rgb'], {'interpolation': '"""nearest"""', 'origin': '"""lower"""'}), "(rgb, interpolation='nearest', origin='lower')\n", (16636, 16682), True, 'import matplotlib.pyplot as plt\n'), ((16705, 16721), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (16714, 16721), True, 'import matplotlib.pyplot as plt\n'), ((16743, 16753), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16751, 16753), True, 'import matplotlib.pyplot as plt\n'), ((6445, 6491), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""', 'divide': '"""ignore"""'}), "(invalid='ignore', divide='ignore')\n", (6456, 6491), True, 'import numpy as np\n'), ((7355, 7401), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""', 'divide': '"""ignore"""'}), "(invalid='ignore', divide='ignore')\n", (7366, 7401), True, 'import numpy as np\n'), ((9274, 9320), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""', 'divide': '"""ignore"""'}), "(invalid='ignore', divide='ignore')\n", (9285, 9320), True, 'import numpy as np\n'), ((11647, 11693), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""', 'divide': '"""ignore"""'}), "(invalid='ignore', divide='ignore')\n", (11658, 11693), True, 'import numpy as np\n'), ((3197, 3215), 'numpy.iinfo', 'np.iinfo', (['np.uint8'], {}), '(np.uint8)\n', (3205, 3215), True, 'import numpy as np\n'), ((6581, 6628), 'numpy.where', 'np.where', (['(I < self._uint8Max)', 'I', 'self._uint8Max'], {}), '(I < self._uint8Max, I, self._uint8Max)\n', (6589, 6628), True, 'import numpy as np\n'), ((9438, 9514), 'numpy.where', 'np.where', (['(I >= self._range)', '(self._uint8Max / I)', '(self._uint8Max / self._range)'], {}), '(I >= self._range, self._uint8Max / I, self._uint8Max / self._range)\n', (9446, 9514), True, 'import numpy as np\n'), ((11533, 11553), 'numpy.arcsinh', 'np.arcsinh', (['(frac * Q)'], {}), '(frac * Q)\n', (11543, 11553), True, 'import numpy as np\n'), ((11783, 11811), 'numpy.arcsinh', 'np.arcsinh', (['(I * self._soften)'], {}), '(I * self._soften)\n', (11793, 11811), True, 'import numpy as np\n'), ((7620, 7662), 'numpy.where', 'np.where', (['(r0 >= pixmax)', '(c * pixmax / r0)', 'c'], {}), '(r0 >= pixmax, c * pixmax / r0, c)\n', (7628, 7662), True, 'import numpy as np\n'), ((7698, 7740), 'numpy.where', 'np.where', (['(b0 >= pixmax)', '(c * pixmax / b0)', 'c'], {}), '(b0 >= pixmax, c * pixmax / b0, c)\n', (7706, 7740), True, 'import numpy as np\n'), ((7824, 7866), 'numpy.where', 'np.where', (['(g0 >= pixmax)', '(c * pixmax / g0)', 'c'], {}), '(g0 >= pixmax, c * pixmax / g0, c)\n', (7832, 7866), True, 'import numpy as np\n'), ((7902, 7944), 'numpy.where', 'np.where', (['(b0 >= pixmax)', '(c * pixmax / b0)', 'c'], {}), '(b0 >= pixmax, c * pixmax / b0, c)\n', (7910, 7944), True, 'import numpy as np\n')]
|
import os
import re
from mendeley.response import SessionResponseObject
class File(SessionResponseObject):
"""
A file attached to a document.
.. attribute:: id
.. attribute:: size
.. attribute:: file_name
.. attribute:: mime_type
.. attribute:: filehash
.. attribute:: download_url
"""
content_type = 'application/vnd.mendeley-file.1+json'
filename_regex = re.compile('filename="(\S+)"')
@property
def download_url(self):
"""
the URL at which the file can be downloaded. This is only valid for a short time, so should not be cached.
"""
file_url = '/files/%s' % self.id
rsp = self.session.get(file_url, allow_redirects=False)
return rsp.headers['location']
def document(self, view=None):
"""
:param view: document view to return.
:return: a :class:`UserDocument <mendeley.models.documents.UserDocument>` or
:class:`CatalogDocument <mendeley.models.catalog.CatalogDocument>`, depending on which the document is
attached to.
"""
if 'document_id' in self.json:
return self.session.documents.get_lazy(self.json['document_id'], view=view)
elif 'catalog_id' in self.json:
return self.session.catalog.get_lazy(self.json['catalog_id'], view=view)
else:
return None
def download(self, directory):
"""
Downloads the file.
:param directory: the directory to download the file to. This must exist.
:return: the path to the downloaded file.
"""
rsp = self.session.get('/files/%s' % self.id, stream=True)
filename = self.filename_regex.search(rsp.headers['content-disposition']).group(1)
path = os.path.join(directory, filename)
with open(path, 'wb') as f:
for block in rsp.iter_content(1024):
if not block:
break
f.write(block)
return path
def delete(self):
"""
Deletes the file.
"""
self.session.delete('/files/%s' % self.id)
@classmethod
def fields(cls):
return ['id', 'size', 'file_name', 'mime_type', 'filehash']
|
[
"os.path.join",
"re.compile"
] |
[((405, 436), 're.compile', 're.compile', (['"""filename="(\\\\S+)\\""""'], {}), '(\'filename="(\\\\S+)"\')\n', (415, 436), False, 'import re\n'), ((1789, 1822), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (1801, 1822), False, 'import os\n')]
|
import tensorflow as tf
from . tf_net import TFNet
class Resnet(TFNet):
"""
"""
def __init__(self, data, data_format, num_classes, is_train=True):
dtype = data.dtype.base_dtype
super(Resnet, self).__init__(dtype, data_format, train=is_train)
self.net_out = tf.identity(data, name='data')
self.num_classes = num_classes
def _resnet_block(self, filters, kernel, stride=(1,1), act_fn='relu', conv_1x1=0, name=None):
""" """
data = self.net_out
shortcut = data
bn_out = self.batch_norm(data, act_fn, name=name+'_bn')
if conv_1x1:
shortcut = self.convolution(bn_out, filters, (1,1), stride, pad='same', act_fn='',
no_bias=True, name=name+'_1x1_conv')
net_out = self.convolution(bn_out, filters, kernel, stride, act_fn=act_fn, add_bn=True,
name=name+'_conv1')
net_out = self.convolution(net_out, filters, kernel, (1,1), act_fn='', no_bias=True,
name=name+'_conv2')
self.net_out = net_out + shortcut
def _resnet_unit(self, num_blocks, filters, kernel, stride=1, act_fn='relu', name=None):
""" """
strides = (stride, stride)
self._resnet_block(filters, kernel, strides, act_fn, conv_1x1=1, name=name+'_block0')
for i in range(1, num_blocks):
self._resnet_block(filters, kernel, (1,1), act_fn, name=name+'_block'+str(i))
def __call__(self, num_stages=3, num_blocks=3, filters=[16, 32, 64], strides=[1,2,2]):
""" """
self.net_out = self.convolution(self.net_out, filters[0], (3,3), (1,1), act_fn='',
no_bias=True, name='Conv0')
for k in range(num_stages):
self._resnet_unit(num_blocks, filters[k], (3,3), strides[k], name='stage'+str(k))
net_out = self.pooling(self.net_out, 'avg', (8,8), name="global_pool")
net_out = self.dropout(net_out, 0.5)
net_out = self.flatten(net_out)
net_out = self.Softmax(net_out, self.num_classes)
return net_out
def snpx_net_create(num_classes, input_data, data_format="NHWC", is_training=True):
""" """
net = Resnet(input_data, data_format, num_classes, is_training)
net_out = net(num_stages=3, num_blocks=3, filters=[16, 32, 64], strides=[1,2,2])
return net_out
|
[
"tensorflow.identity"
] |
[((294, 324), 'tensorflow.identity', 'tf.identity', (['data'], {'name': '"""data"""'}), "(data, name='data')\n", (305, 324), True, 'import tensorflow as tf\n')]
|
import copy
import typing
import splendor_sim.interfaces.action.i_action as i_action
import splendor_sim.interfaces.card.i_card as i_card
import splendor_sim.interfaces.coin.i_coin_type as i_coin_type
import splendor_sim.interfaces.game_state.i_game_state as i_game_state
import splendor_sim.interfaces.player.i_player as i_player
class ReserveCardAction(i_action.IAction):
def __init__(
self,
valid_coin_type_set: typing.Set[i_coin_type.ICoinType],
current_player: i_player.IPlayer,
coins: typing.Dict[i_coin_type.ICoinType, int],
card: i_card.ICard,
):
self._validate_input(valid_coin_type_set, coins)
self._card = card
self._coin_dictionary = copy.copy(coins)
self._current_player = current_player
def validate(self, game_state: i_game_state.IGameState) -> bool:
number_of_reserved_cards = (
self._current_player.get_card_inventory().get_number_of_reserved_cards()
)
max_number_of_reserved_cards = (
self._current_player.get_card_inventory().get_max_number_of_reserved_cards()
)
if number_of_reserved_cards < max_number_of_reserved_cards:
if self._card in game_state.get_card_reserve().get_cards_for_sale():
if game_state.get_coin_reserve().has_minimum(self._coin_dictionary):
return True
return False
def execute(self, game_state: i_game_state.IGameState) -> None:
if not self.validate(game_state):
raise ValueError("invalid action")
game_state.get_coin_reserve().remove_coins(self._coin_dictionary)
game_state.get_card_reserve().remove_card(self._card)
self._current_player.get_coin_inventory().add_coins(self._coin_dictionary)
self._current_player.get_card_inventory().add_card_to_reserved(self._card)
@staticmethod
def _validate_input(
valid_coin_type_set: typing.Set[i_coin_type.ICoinType],
coins: typing.Dict[i_coin_type.ICoinType, int],
):
if len(coins.keys()) != 1:
raise ValueError("can only take 1 type of coin")
for coin_type, number_of_coins in coins.items():
if number_of_coins != 1:
raise ValueError("can only take one coin")
if coin_type not in valid_coin_type_set:
raise ValueError("invalid coin type")
|
[
"copy.copy"
] |
[((721, 737), 'copy.copy', 'copy.copy', (['coins'], {}), '(coins)\n', (730, 737), False, 'import copy\n')]
|
import re
HUEVELS = {
'у': 'хую',
'У': 'хую',
'е': 'хуе',
'Е': 'хуе',
'ё': 'хуё',
'Ё': 'хуё',
'а': 'хуя',
'А': 'хуя',
'о': 'хуё',
'О': 'хуё',
'э': 'хуе',
'Э': 'хуе',
'я': 'хуя',
'Я': 'хуя',
'и': 'хуи',
'И': 'хуи',
'ы': 'хуы',
'Ы': 'хуы',
'ю': 'хую',
'Ю': 'хую'
}
PUNCT_MARKS = [',', '.', ';', ':']
def count_syllabiles(word):
count = 0
for letter in word:
if letter in HUEVELS:
count += 1
return count
def get_last_letter(word):
if word == '':
return word
last_letter = word[-1]
if last_letter in PUNCT_MARKS:
return get_last_letter(word[:-1])
return last_letter
def first_vowel(word):
res = re.search("[уеёыаоэяию]", word, re.IGNORECASE)
if res:
return res.start(), res.group()
return -1, ''
def huificator(word):
num_syl = count_syllabiles(word)
last_letter = get_last_letter(word)
if num_syl == 0:
return word
if num_syl == 1:
if last_letter in HUEVELS:
return word
pos, vow = first_vowel(word)
if pos == -1:
return word
repl = HUEVELS[vow].upper() if len(word) >= 2 and word[:2].isupper() else HUEVELS[vow]
result = repl + word[pos+1:]
if word.isupper():
result = result.upper()
elif word[:1].isupper():
result = result[:1].upper() + result[1:]
return result
|
[
"re.search"
] |
[((748, 794), 're.search', 're.search', (['"""[уеёыаоэяию]"""', 'word', 're.IGNORECASE'], {}), "('[уеёыаоэяию]', word, re.IGNORECASE)\n", (757, 794), False, 'import re\n')]
|
import model
import tensorflow as tf
import utils
def train(target,
num_param_servers,
is_chief,
lstm_size=64,
input_filenames=None,
sentence_length=128,
vocab_size=2**15,
learning_rate=0.01,
output_dir=None,
batch_size=1024,
embedding_size=128,
num_epochs=2):
graph = tf.Graph()
with graph.as_default():
sentences, scores = model.get_inputs(
input_filenames, batch_size, num_epochs, sentence_length)
with tf.device(tf.train.replica_device_setter()):
lstm = model.BasicRegressionLSTM(
sentences,
scores,
num_param_servers,
vocab_size,
learning_rate,
embedding_size,
lstm_size
)
tf.contrib.learn.train(
graph,
output_dir,
lstm.train_op,
lstm.loss,
global_step_tensor=lstm.global_step,
supervisor_is_chief=is_chief,
supervisor_master=target
)
if __name__ == "__main__":
parser = utils.base_parser()
parser.add_argument(
'--learning-rate',
type=float,
default=0.01
)
utils.dispatch(
train,
**parser.parse_args().__dict__
)
|
[
"model.get_inputs",
"utils.base_parser",
"tensorflow.contrib.learn.train",
"model.BasicRegressionLSTM",
"tensorflow.Graph",
"tensorflow.train.replica_device_setter"
] |
[((386, 396), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (394, 396), True, 'import tensorflow as tf\n'), ((869, 1037), 'tensorflow.contrib.learn.train', 'tf.contrib.learn.train', (['graph', 'output_dir', 'lstm.train_op', 'lstm.loss'], {'global_step_tensor': 'lstm.global_step', 'supervisor_is_chief': 'is_chief', 'supervisor_master': 'target'}), '(graph, output_dir, lstm.train_op, lstm.loss,\n global_step_tensor=lstm.global_step, supervisor_is_chief=is_chief,\n supervisor_master=target)\n', (891, 1037), True, 'import tensorflow as tf\n'), ((1134, 1153), 'utils.base_parser', 'utils.base_parser', ([], {}), '()\n', (1151, 1153), False, 'import utils\n'), ((454, 528), 'model.get_inputs', 'model.get_inputs', (['input_filenames', 'batch_size', 'num_epochs', 'sentence_length'], {}), '(input_filenames, batch_size, num_epochs, sentence_length)\n', (470, 528), False, 'import model\n'), ((620, 741), 'model.BasicRegressionLSTM', 'model.BasicRegressionLSTM', (['sentences', 'scores', 'num_param_servers', 'vocab_size', 'learning_rate', 'embedding_size', 'lstm_size'], {}), '(sentences, scores, num_param_servers, vocab_size,\n learning_rate, embedding_size, lstm_size)\n', (645, 741), False, 'import model\n'), ((566, 598), 'tensorflow.train.replica_device_setter', 'tf.train.replica_device_setter', ([], {}), '()\n', (596, 598), True, 'import tensorflow as tf\n')]
|
"""The main Klaxer server"""
import logging
import json
import hug
from falcon import HTTP_400, HTTP_500
from klaxer.rules import Rules
from klaxer.errors import AuthorizationError, NoRouteFoundError, ServiceNotDefinedError
from klaxer.lib import classify, enrich, filtered, route, send, validate
from klaxer.models import Alert
from klaxer.users import create_user, add_message, bootstrap, api_key_authentication, is_existing_user
CURRENT_FILTERS = []
RULES = Rules()
@hug.post('/alert/{service_name}/{token}')
def incoming(service_name: hug.types.text, token: hug.types.text, response, debug=False, body=None):
"""An incoming alert. The core API method"""
try:
validate(service_name, token)
alert = Alert.from_service(service_name, body)
alert = classify(alert, RULES.get_classification_rules(service_name))
# Filter based on rules (e.g. junk an alert if a string is in the body or if it came from a CI bot).
if filtered(alert, RULES.get_exclusion_rules(service_name)):
return
#Filtered based on user interactions (e.g. bail if we've snoozed the notification type snoozed).
if filtered(alert, CURRENT_FILTERS):
return
#Enriched based on custom rules (e.g. all alerts with 'keepalive' have '@deborah' appended to them so Deborah gets an extra level of notification priority.
alert = enrich(alert, RULES.get_enrichment_rules(service_name))
# Determine where the message goes
alert = route(alert, RULES.get_routing_rules(service_name))
# Present relevant debug info without actually sending the Alert
if debug:
return alert.to_dict()
#The target channel gets queried for the most recent message. If it's identical, perform rollup. Otherwise, post the alert.
send(alert)
return {"status": "ok"}
except (AuthorizationError, NoRouteFoundError, ServiceNotDefinedError) as error:
logging.exception('Failed to serve an alert response')
response.status = HTTP_500
return {"status": error.message}
@hug.post('/user/register')
def register(response, body=None):
"""Register for Klaxer and get a key in return."""
if not body:
response.status = HTTP_400
return {"status": "No request body provided"}
email = body.get('email')
name = body.get('name')
if not email or not name:
response.status = HTTP_400
return {"status": "Please provide a valid name and email."}
if is_existing_user(email):
response.status = HTTP_400
return {"status": f"Email {email} is already registered"}
user = create_user(name=name, email=email)
return {
'id': user.id,
'api_key': user.api_key
}
@hug.get('/user/me', requires=api_key_authentication)
def profile(user: hug.directives.user, response, body=None):
"""If authenticated, give the user back their profile information."""
return user.to_dict()
@hug.startup()
def startup(api):
"""Bootstrap the database when the API starts."""
bootstrap()
|
[
"klaxer.lib.send",
"klaxer.rules.Rules",
"hug.get",
"hug.post",
"logging.exception",
"klaxer.models.Alert.from_service",
"klaxer.users.is_existing_user",
"hug.startup",
"klaxer.users.create_user",
"klaxer.lib.filtered",
"klaxer.lib.validate",
"klaxer.users.bootstrap"
] |
[((467, 474), 'klaxer.rules.Rules', 'Rules', ([], {}), '()\n', (472, 474), False, 'from klaxer.rules import Rules\n'), ((477, 518), 'hug.post', 'hug.post', (['"""/alert/{service_name}/{token}"""'], {}), "('/alert/{service_name}/{token}')\n", (485, 518), False, 'import hug\n'), ((2101, 2127), 'hug.post', 'hug.post', (['"""/user/register"""'], {}), "('/user/register')\n", (2109, 2127), False, 'import hug\n'), ((2772, 2824), 'hug.get', 'hug.get', (['"""/user/me"""'], {'requires': 'api_key_authentication'}), "('/user/me', requires=api_key_authentication)\n", (2779, 2824), False, 'import hug\n'), ((2989, 3002), 'hug.startup', 'hug.startup', ([], {}), '()\n', (3000, 3002), False, 'import hug\n'), ((2522, 2545), 'klaxer.users.is_existing_user', 'is_existing_user', (['email'], {}), '(email)\n', (2538, 2545), False, 'from klaxer.users import create_user, add_message, bootstrap, api_key_authentication, is_existing_user\n'), ((2659, 2694), 'klaxer.users.create_user', 'create_user', ([], {'name': 'name', 'email': 'email'}), '(name=name, email=email)\n', (2670, 2694), False, 'from klaxer.users import create_user, add_message, bootstrap, api_key_authentication, is_existing_user\n'), ((3079, 3090), 'klaxer.users.bootstrap', 'bootstrap', ([], {}), '()\n', (3088, 3090), False, 'from klaxer.users import create_user, add_message, bootstrap, api_key_authentication, is_existing_user\n'), ((686, 715), 'klaxer.lib.validate', 'validate', (['service_name', 'token'], {}), '(service_name, token)\n', (694, 715), False, 'from klaxer.lib import classify, enrich, filtered, route, send, validate\n'), ((732, 770), 'klaxer.models.Alert.from_service', 'Alert.from_service', (['service_name', 'body'], {}), '(service_name, body)\n', (750, 770), False, 'from klaxer.models import Alert\n'), ((1162, 1194), 'klaxer.lib.filtered', 'filtered', (['alert', 'CURRENT_FILTERS'], {}), '(alert, CURRENT_FILTERS)\n', (1170, 1194), False, 'from klaxer.lib import classify, enrich, filtered, route, send, validate\n'), ((1830, 1841), 'klaxer.lib.send', 'send', (['alert'], {}), '(alert)\n', (1834, 1841), False, 'from klaxer.lib import classify, enrich, filtered, route, send, validate\n'), ((1967, 2021), 'logging.exception', 'logging.exception', (['"""Failed to serve an alert response"""'], {}), "('Failed to serve an alert response')\n", (1984, 2021), False, 'import logging\n')]
|
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""HTTP2 Test Server"""
import argparse
import logging
import twisted
import twisted.internet
import twisted.internet.endpoints
import twisted.internet.reactor
import http2_base_server
import test_goaway
import test_max_streams
import test_ping
import test_rst_after_data
import test_rst_after_header
import test_rst_during_data
_TEST_CASE_MAPPING = {
'rst_after_header': test_rst_after_header.TestcaseRstStreamAfterHeader,
'rst_after_data': test_rst_after_data.TestcaseRstStreamAfterData,
'rst_during_data': test_rst_during_data.TestcaseRstStreamDuringData,
'goaway': test_goaway.TestcaseGoaway,
'ping': test_ping.TestcasePing,
'max_streams': test_max_streams.TestcaseSettingsMaxStreams,
}
class H2Factory(twisted.internet.protocol.Factory):
def __init__(self, testcase):
logging.info('Creating H2Factory for new connection.')
self._num_streams = 0
self._testcase = testcase
def buildProtocol(self, addr):
self._num_streams += 1
logging.info('New Connection: %d' % self._num_streams)
if not _TEST_CASE_MAPPING.has_key(self._testcase):
logging.error('Unknown test case: %s' % self._testcase)
assert(0)
else:
t = _TEST_CASE_MAPPING[self._testcase]
if self._testcase == 'goaway':
return t(self._num_streams).get_base_server()
else:
return t().get_base_server()
if __name__ == '__main__':
logging.basicConfig(
format='%(levelname) -10s %(asctime)s %(module)s:%(lineno)s | %(message)s',
level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--test_case', choices=sorted(_TEST_CASE_MAPPING.keys()),
help='test case to run', required=True)
parser.add_argument('--port', type=int, default=8080,
help='port to run the server (default: 8080)')
args = parser.parse_args()
logging.info('Running test case %s on port %d' % (args.test_case, args.port))
endpoint = twisted.internet.endpoints.TCP4ServerEndpoint(
twisted.internet.reactor, args.port, backlog=128)
endpoint.listen(H2Factory(args.test_case))
twisted.internet.reactor.run()
|
[
"logging.error",
"argparse.ArgumentParser",
"logging.basicConfig",
"logging.info",
"twisted.internet.reactor.run",
"twisted.internet.endpoints.TCP4ServerEndpoint"
] |
[((2906, 3030), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname) -10s %(asctime)s %(module)s:%(lineno)s | %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(levelname) -10s %(asctime)s %(module)s:%(lineno)s | %(message)s',\n level=logging.INFO)\n", (2925, 3030), False, 'import logging\n'), ((3042, 3067), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3065, 3067), False, 'import argparse\n'), ((3330, 3407), 'logging.info', 'logging.info', (["('Running test case %s on port %d' % (args.test_case, args.port))"], {}), "('Running test case %s on port %d' % (args.test_case, args.port))\n", (3342, 3407), False, 'import logging\n'), ((3421, 3520), 'twisted.internet.endpoints.TCP4ServerEndpoint', 'twisted.internet.endpoints.TCP4ServerEndpoint', (['twisted.internet.reactor', 'args.port'], {'backlog': '(128)'}), '(twisted.internet.reactor,\n args.port, backlog=128)\n', (3466, 3520), False, 'import twisted\n'), ((3569, 3599), 'twisted.internet.reactor.run', 'twisted.internet.reactor.run', ([], {}), '()\n', (3597, 3599), False, 'import twisted\n'), ((2324, 2378), 'logging.info', 'logging.info', (['"""Creating H2Factory for new connection."""'], {}), "('Creating H2Factory for new connection.')\n", (2336, 2378), False, 'import logging\n'), ((2500, 2554), 'logging.info', 'logging.info', (["('New Connection: %d' % self._num_streams)"], {}), "('New Connection: %d' % self._num_streams)\n", (2512, 2554), False, 'import logging\n'), ((2616, 2671), 'logging.error', 'logging.error', (["('Unknown test case: %s' % self._testcase)"], {}), "('Unknown test case: %s' % self._testcase)\n", (2629, 2671), False, 'import logging\n')]
|
"""This module implements the QFactor class."""
from __future__ import annotations
import logging
from typing import Any
from typing import TYPE_CHECKING
import numpy as np
import numpy.typing as npt
from bqskitrs import QFactorInstantiatorNative
from bqskit.ir.opt.instantiater import Instantiater
from bqskit.qis.state.state import StateVector
from bqskit.qis.unitary import LocallyOptimizableUnitary
from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix
if TYPE_CHECKING:
from bqskit.ir.circuit import Circuit
_logger = logging.getLogger(__name__)
class QFactor(QFactorInstantiatorNative, Instantiater):
"""The QFactor circuit instantiater."""
def __new__(cls, **kwargs: dict[str, Any]) -> Any:
if 'cost_fn_gen' in kwargs:
del kwargs['cost_fn_gen']
return super().__new__(cls, **kwargs)
def instantiate(
self,
circuit: Circuit,
target: UnitaryMatrix | StateVector,
x0: npt.NDArray[np.float64],
) -> npt.NDArray[np.float64]:
"""Instantiate `circuit`, see Instantiater for more info."""
return super().instantiate(circuit, target, x0)
@staticmethod
def is_capable(circuit: Circuit) -> bool:
"""Return true if the circuit can be instantiated."""
return all(
isinstance(gate, LocallyOptimizableUnitary)
for gate in circuit.gate_set
)
@staticmethod
def get_violation_report(circuit: Circuit) -> str:
"""
Return a message explaining why `circuit` cannot be instantiated.
Args:
circuit (Circuit): Generate a report for this circuit.
Raises:
ValueError: If `circuit` can be instantiated with this
instantiater.
"""
invalid_gates = {
gate
for gate in circuit.gate_set
if not isinstance(gate, LocallyOptimizableUnitary)
}
if len(invalid_gates) == 0:
raise ValueError('Circuit can be instantiated.')
return (
'Cannot instantiate circuit with qfactor'
' because the following gates are not locally optimizable: %s.'
% ', '.join(str(g) for g in invalid_gates)
)
@staticmethod
def get_method_name() -> str:
"""Return the name of this method."""
return 'qfactor'
|
[
"logging.getLogger"
] |
[((538, 565), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (555, 565), False, 'import logging\n')]
|
import tensorflow as tf
def create_pb_model(pb_path, sz, bs):
def load_graph(frozen_graph_filename):
# We load the protobuf file from the disk and parse it to retrieve the
# unserialized graph_def
with tf.compat.v1.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
# Then, we can use again a convenient built-in function to import a graph_def into the
# current default Graph
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def,input_map=None,return_elements=None,name="prefix",op_dict=None,producer_op_list=None)
return graph, graph_def
def wrap_frozen_graph(graph_def, inputs, outputs):
def _imports_graph_def():
tf.compat.v1.import_graph_def(graph_def, name="")
wrapped_import = tf.compat.v1.wrap_function(_imports_graph_def, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
tf.nest.map_structure(import_graph.as_graph_element, inputs),
tf.nest.map_structure(import_graph.as_graph_element, outputs))
graph, graph_def = load_graph(pb_path)
model_fn = wrap_frozen_graph(graph_def, 'data:0', 'prob:0')
lam = tf.keras.layers.Lambda(model_fn)
inpt = tf.keras.layers.Input(sz, batch_size=bs)
out = lam(inpt)
model = tf.keras.models.Model(inpt, out)
return model
|
[
"tensorflow.compat.v1.wrap_function",
"tensorflow.compat.v1.gfile.GFile",
"tensorflow.keras.models.Model",
"tensorflow.nest.map_structure",
"tensorflow.keras.layers.Input",
"tensorflow.Graph",
"tensorflow.import_graph_def",
"tensorflow.compat.v1.GraphDef",
"tensorflow.keras.layers.Lambda",
"tensorflow.compat.v1.import_graph_def"
] |
[((1285, 1317), 'tensorflow.keras.layers.Lambda', 'tf.keras.layers.Lambda', (['model_fn'], {}), '(model_fn)\n', (1307, 1317), True, 'import tensorflow as tf\n'), ((1330, 1370), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['sz'], {'batch_size': 'bs'}), '(sz, batch_size=bs)\n', (1351, 1370), True, 'import tensorflow as tf\n'), ((1404, 1436), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', (['inpt', 'out'], {}), '(inpt, out)\n', (1425, 1436), True, 'import tensorflow as tf\n'), ((894, 944), 'tensorflow.compat.v1.wrap_function', 'tf.compat.v1.wrap_function', (['_imports_graph_def', '[]'], {}), '(_imports_graph_def, [])\n', (920, 944), True, 'import tensorflow as tf\n'), ((231, 284), 'tensorflow.compat.v1.gfile.GFile', 'tf.compat.v1.gfile.GFile', (['frozen_graph_filename', '"""rb"""'], {}), "(frozen_graph_filename, 'rb')\n", (255, 284), True, 'import tensorflow as tf\n'), ((315, 338), 'tensorflow.compat.v1.GraphDef', 'tf.compat.v1.GraphDef', ([], {}), '()\n', (336, 338), True, 'import tensorflow as tf\n'), ((573, 698), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'input_map': 'None', 'return_elements': 'None', 'name': '"""prefix"""', 'op_dict': 'None', 'producer_op_list': 'None'}), "(graph_def, input_map=None, return_elements=None, name=\n 'prefix', op_dict=None, producer_op_list=None)\n", (592, 698), True, 'import tensorflow as tf\n'), ((821, 870), 'tensorflow.compat.v1.import_graph_def', 'tf.compat.v1.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (850, 870), True, 'import tensorflow as tf\n'), ((1032, 1092), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['import_graph.as_graph_element', 'inputs'], {}), '(import_graph.as_graph_element, inputs)\n', (1053, 1092), True, 'import tensorflow as tf\n'), ((1104, 1165), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['import_graph.as_graph_element', 'outputs'], {}), '(import_graph.as_graph_element, outputs)\n', (1125, 1165), True, 'import tensorflow as tf\n'), ((527, 537), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (535, 537), True, 'import tensorflow as tf\n')]
|
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
from urllib.parse import urlparse
from cfn_policy_validator.application_error import ApplicationError
from cfn_policy_validator.parsers.output import Policy, Resource
class SqsQueuePolicyParser:
""" AWS::SQS::QueuePolicy
"""
def __init__(self):
self.queue_policies = []
def parse(self, _, resource):
evaluated_resource = resource.eval(sqs_queue_policy_schema)
properties = evaluated_resource['Properties']
queue_urls = properties['Queues']
policy_document = properties['PolicyDocument']
for queue in queue_urls:
parsed_url = urlparse(queue)
try:
queue_name = parsed_url.path.split('/')[2]
except IndexError:
raise ApplicationError(f'Invalid queue URL. Unable to parse name from URL. Invalid value: "{queue}"')
policy = Policy('QueuePolicy', policy_document)
resource = Resource(queue_name, 'AWS::SQS::Queue', policy)
self.queue_policies.append(resource)
def get_policies(self):
return self.queue_policies
sqs_queue_policy_schema = {
'type': 'object',
'properties': {
'Properties': {
'type': 'object',
'properties': {
'PolicyDocument': {
'type': 'object'
},
'Queues': {
'type': 'array',
'minItems': 1,
'items': {
'type': 'string'
}
}
},
'required': ['PolicyDocument', 'Queues']
}
},
'required': ['Properties']
}
|
[
"cfn_policy_validator.application_error.ApplicationError",
"cfn_policy_validator.parsers.output.Policy",
"urllib.parse.urlparse",
"cfn_policy_validator.parsers.output.Resource"
] |
[((713, 728), 'urllib.parse.urlparse', 'urlparse', (['queue'], {}), '(queue)\n', (721, 728), False, 'from urllib.parse import urlparse\n'), ((976, 1014), 'cfn_policy_validator.parsers.output.Policy', 'Policy', (['"""QueuePolicy"""', 'policy_document'], {}), "('QueuePolicy', policy_document)\n", (982, 1014), False, 'from cfn_policy_validator.parsers.output import Policy, Resource\n'), ((1038, 1085), 'cfn_policy_validator.parsers.output.Resource', 'Resource', (['queue_name', '"""AWS::SQS::Queue"""', 'policy'], {}), "(queue_name, 'AWS::SQS::Queue', policy)\n", (1046, 1085), False, 'from cfn_policy_validator.parsers.output import Policy, Resource\n'), ((858, 963), 'cfn_policy_validator.application_error.ApplicationError', 'ApplicationError', (['f"""Invalid queue URL. Unable to parse name from URL. Invalid value: "{queue}\\""""'], {}), '(\n f\'Invalid queue URL. Unable to parse name from URL. Invalid value: "{queue}"\'\n )\n', (874, 963), False, 'from cfn_policy_validator.application_error import ApplicationError\n')]
|
import torch
import torch.nn as nn
from torch.functional import F
from utils import *
class SequenceModel(nn.Module):
"""docstring for SequenceModel"""
def __init__(self, input_size, hidden_size, n_layers, **kwargs):
super(SequenceModel, self).__init__()
self.rnn = nn.LSTM(input_size, hidden_size, n_layers, **kwargs)
self.linear = nn.Sequential(linear(hidden_size, hidden_size, True, 0.5),
linear(hidden_size, hidden_size, True, 0.5))
self.fc = nn.Linear(hidden_size, 128)
# load
def forward(self, x):
x, _ = self.rnn(x)
x = x.mean(1)
x = self.linear(x)
return self.fc(x)
|
[
"torch.nn.LSTM",
"torch.nn.Linear"
] |
[((277, 329), 'torch.nn.LSTM', 'nn.LSTM', (['input_size', 'hidden_size', 'n_layers'], {}), '(input_size, hidden_size, n_layers, **kwargs)\n', (284, 329), True, 'import torch.nn as nn\n'), ((472, 499), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(128)'], {}), '(hidden_size, 128)\n', (481, 499), True, 'import torch.nn as nn\n')]
|
import pytest
import numpy as np
import pandas as pd
from .stats import IV, WOE, gini, gini_cond, entropy_cond, quality, _IV, VIF
np.random.seed(1)
feature = np.random.rand(500)
target = np.random.randint(2, size = 500)
A = np.random.randint(100, size = 500)
B = np.random.randint(100, size = 500)
mask = np.random.randint(8, size = 500)
df = pd.DataFrame({
'feature': feature,
'target': target,
'A': A,
'B': B,
})
def test_woe():
value = WOE(0.2, 0.3)
assert value == -0.4054651081081643
def test_iv_priv():
value, _ = _IV(df['feature'], df['target'])
assert value == 0.010385942643745403
def test_iv():
value = IV(df['feature'], df['target'], n_bins = 10, method = 'dt')
assert value == 0.2735917707743619
def test_iv_return_sub():
_, sub = IV(mask, df['target'], return_sub = True, n_bins = 10, method = 'dt')
assert len(sub) == 8
assert sub[4] == 0.006449386778057019
def test_iv_frame():
res = IV(df, 'target', n_bins = 10, method = 'chi')
assert res.loc[0, 'A'] == 0.226363832867123
def test_gini():
value = gini(df['target'])
assert value == 0.499352
def test_gini_cond():
value = gini_cond(df['feature'], df['target'])
assert value == 0.4970162601626016
def test_entropy_cond():
value = entropy_cond(df['feature'], df['target'])
assert value == 0.6924990371522171
def test_quality():
result = quality(df, 'target')
assert result.loc['feature', 'iv'] == 0.2735917707743619
assert result.loc['A', 'gini'] == 0.49284164671885444
assert result.loc['B', 'entropy'] == 0.6924956879070063
assert result.loc['feature', 'unique'] == 500
def test_quality_iv_only():
result = quality(df, 'target', iv_only = True)
assert np.isnan(result.loc['feature', 'gini'])
def test_quality_object_type_array_with_nan():
feature = np.array([np.nan, 'A', 'B', 'C', 'D', 'E', 'F', 'G'], dtype = 'O')[mask]
df = pd.DataFrame({
'feature': feature,
'target': target,
})
result = quality(df)
assert result.loc['feature', 'iv'] == 0.016379338180530334
def test_vif():
vif = VIF(df)
assert vif['A'] == 2.969336442640111
|
[
"pandas.DataFrame",
"numpy.random.seed",
"numpy.isnan",
"numpy.random.randint",
"numpy.array",
"numpy.random.rand"
] |
[((133, 150), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (147, 150), True, 'import numpy as np\n'), ((162, 181), 'numpy.random.rand', 'np.random.rand', (['(500)'], {}), '(500)\n', (176, 181), True, 'import numpy as np\n'), ((191, 221), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(500)'}), '(2, size=500)\n', (208, 221), True, 'import numpy as np\n'), ((228, 260), 'numpy.random.randint', 'np.random.randint', (['(100)'], {'size': '(500)'}), '(100, size=500)\n', (245, 260), True, 'import numpy as np\n'), ((267, 299), 'numpy.random.randint', 'np.random.randint', (['(100)'], {'size': '(500)'}), '(100, size=500)\n', (284, 299), True, 'import numpy as np\n'), ((309, 339), 'numpy.random.randint', 'np.random.randint', (['(8)'], {'size': '(500)'}), '(8, size=500)\n', (326, 339), True, 'import numpy as np\n'), ((348, 416), 'pandas.DataFrame', 'pd.DataFrame', (["{'feature': feature, 'target': target, 'A': A, 'B': B}"], {}), "({'feature': feature, 'target': target, 'A': A, 'B': B})\n", (360, 416), True, 'import pandas as pd\n'), ((1746, 1785), 'numpy.isnan', 'np.isnan', (["result.loc['feature', 'gini']"], {}), "(result.loc['feature', 'gini'])\n", (1754, 1785), True, 'import numpy as np\n'), ((1931, 1983), 'pandas.DataFrame', 'pd.DataFrame', (["{'feature': feature, 'target': target}"], {}), "({'feature': feature, 'target': target})\n", (1943, 1983), True, 'import pandas as pd\n'), ((1848, 1912), 'numpy.array', 'np.array', (["[np.nan, 'A', 'B', 'C', 'D', 'E', 'F', 'G']"], {'dtype': '"""O"""'}), "([np.nan, 'A', 'B', 'C', 'D', 'E', 'F', 'G'], dtype='O')\n", (1856, 1912), True, 'import numpy as np\n')]
|
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# *****************************************************************************
import abc
from contextlib import contextmanager
import os
from shutil import copyfile
import subprocess
from time import sleep
from scp import SCPClient
import paramiko
import six
from dlab_core.domain.helper import break_after
@six.add_metaclass(abc.ABCMeta)
class BaseCommandExecutor(object):
@abc.abstractmethod
def run(self, command):
"""Run cli command
:type command: str
:param command: cli command
"""
raise NotImplementedError
@abc.abstractmethod
def sudo(self, command):
"""Run cli sudo command
:type command: str
:param command: cli command
"""
raise NotImplementedError
@abc.abstractmethod
def cd(self, path):
"""Change work directory to path
:type path: str
:param path: directory location
"""
raise NotImplementedError
@abc.abstractmethod
def put(self, local_path, remote_path):
"""Copy file
:type local_path: str
:param local_path: path to local object
:type remote_path: str
:param remote_path: path to remote object
"""
raise NotImplementedError
class LocalCommandExecutor(BaseCommandExecutor):
def run(self, command): # pragma: no cover
"""Run cli command
:type command: str
:param command: cli command
:rtype: str
:return execution result
"""
lines = []
process = subprocess.Popen(
command, shell=True, universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while process.poll() is None:
line = process.stdout.readline()
lines.append(line)
# TODO: Add logging
return ' '.join(lines)
def sudo(self, command):
"""Run cli sudo command
:type command: str
:param command: cli command
:rtype: str
:return execution result
"""
raise NotImplementedError
@contextmanager
def cd(self, path):
"""Change work directory to path
:type path: str
:param path: directory location
"""
current_dir = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(current_dir)
def put(self, local_path, remote_path):
"""Copy file
:type local_path: str
:param local_path: path to local object
:type remote_path: str
:param remote_path: path to remote object
"""
copyfile(local_path, remote_path)
class ParamikoCommandExecutor(BaseCommandExecutor):
def __init__(self, host, name, identity_file):
"""
:type host: str
:param host: ip address or host name
:type name: str
:param name: user name
:type: str
:param identity_file: path to file
"""
self.current_dir = None
self._connection = None
self.host = host
self.name = name
self.identity_file = identity_file
@property
def connection(self):
"""Return paramiko connection"""
return self._connection or self.init_connection()
@break_after(180)
def init_connection(self):
"""Init connection"""
connection = paramiko.SSHClient()
connection.set_missing_host_key_policy(
paramiko.AutoAddPolicy())
while True:
try:
connection.connect(self.host, username=self.name,
key_filename=self.identity_file)
connection.exec_command('ls')
return connection
except Exception:
sleep(10)
@property
def current_dir(self):
"""Default directory"""
return self._current_dir
@current_dir.setter
def current_dir(self, val):
"""Set default directory
:type val: str
:param val: new directory
"""
self._current_dir = val
def run(self, command):
"""Run cli command
:type command: str
:param command: cli command
:rtype: str
:return execution result
"""
if self.current_dir:
command = 'cd {}; {}'.format(self.current_dir, command)
stdin, stdout, stderr = self.connection.exec_command(command)
return stdout.read().decode('ascii').strip("\n")
def sudo(self, command):
"""Run sudo cli command
:type command: str
:param command: cli command
:rtype: str
:return execution result
"""
command = 'sudo {}'.format(command)
return self.run(command)
@contextmanager
def cd(self, path):
try:
self.current_dir = path
yield
finally:
self.current_dir = None
def put(self, local_path, remote_path):
"""Copy file
:type local_path: str
:param local_path: path to local object
:type remote_path: str
:param remote_path: path to remote object
"""
scp = SCPClient(self.connection.get_transport())
scp.put(local_path, recursive=True, remote_path=remote_path)
scp.close()
|
[
"subprocess.Popen",
"paramiko.SSHClient",
"os.getcwd",
"six.add_metaclass",
"paramiko.AutoAddPolicy",
"time.sleep",
"dlab_core.domain.helper.break_after",
"shutil.copyfile",
"os.chdir"
] |
[((1183, 1213), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (1200, 1213), False, 'import six\n'), ((4171, 4187), 'dlab_core.domain.helper.break_after', 'break_after', (['(180)'], {}), '(180)\n', (4182, 4187), False, 'from dlab_core.domain.helper import break_after\n'), ((2424, 2541), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'shell': '(True)', 'universal_newlines': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(command, shell=True, universal_newlines=True, stdout=\n subprocess.PIPE, stderr=subprocess.STDOUT)\n', (2440, 2541), False, 'import subprocess\n'), ((3150, 3161), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3159, 3161), False, 'import os\n'), ((3517, 3550), 'shutil.copyfile', 'copyfile', (['local_path', 'remote_path'], {}), '(local_path, remote_path)\n', (3525, 3550), False, 'from shutil import copyfile\n'), ((4270, 4290), 'paramiko.SSHClient', 'paramiko.SSHClient', ([], {}), '()\n', (4288, 4290), False, 'import paramiko\n'), ((3187, 3201), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (3195, 3201), False, 'import os\n'), ((3249, 3270), 'os.chdir', 'os.chdir', (['current_dir'], {}), '(current_dir)\n', (3257, 3270), False, 'import os\n'), ((4351, 4375), 'paramiko.AutoAddPolicy', 'paramiko.AutoAddPolicy', ([], {}), '()\n', (4373, 4375), False, 'import paramiko\n'), ((4674, 4683), 'time.sleep', 'sleep', (['(10)'], {}), '(10)\n', (4679, 4683), False, 'from time import sleep\n')]
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'acq4/modules/MultiPatch/pipetteTemplate.ui'
#
# Created by: PyQt5 UI code generator 5.8.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_PipetteControl(object):
def setupUi(self, PipetteControl):
PipetteControl.setObjectName("PipetteControl")
PipetteControl.resize(333, 75)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(PipetteControl.sizePolicy().hasHeightForWidth())
PipetteControl.setSizePolicy(sizePolicy)
self.gridLayout = QtWidgets.QGridLayout(PipetteControl)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(3)
self.gridLayout.setObjectName("gridLayout")
self.targetBtn = QtWidgets.QPushButton(PipetteControl)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.targetBtn.sizePolicy().hasHeightForWidth())
self.targetBtn.setSizePolicy(sizePolicy)
self.targetBtn.setMaximumSize(Qt.QSize(40, 16777215))
self.targetBtn.setObjectName("targetBtn")
self.gridLayout.addWidget(self.targetBtn, 1, 4, 1, 1)
self.stateCombo = QtWidgets.QComboBox(PipetteControl)
self.stateCombo.setObjectName("stateCombo")
self.stateCombo.addItem("")
self.stateCombo.addItem("")
self.stateCombo.addItem("")
self.stateCombo.addItem("")
self.stateCombo.addItem("")
self.stateCombo.addItem("")
self.stateCombo.addItem("")
self.stateCombo.addItem("")
self.gridLayout.addWidget(self.stateCombo, 0, 3, 1, 2)
self.plotLayoutWidget = QtWidgets.QWidget(PipetteControl)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plotLayoutWidget.sizePolicy().hasHeightForWidth())
self.plotLayoutWidget.setSizePolicy(sizePolicy)
self.plotLayoutWidget.setObjectName("plotLayoutWidget")
self.plotLayout = QtWidgets.QHBoxLayout(self.plotLayoutWidget)
self.plotLayout.setContentsMargins(0, 0, 0, 0)
self.plotLayout.setSpacing(0)
self.plotLayout.setObjectName("plotLayout")
self.gridLayout.addWidget(self.plotLayoutWidget, 0, 5, 4, 1)
self.selectBtn = QtWidgets.QPushButton(PipetteControl)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.selectBtn.sizePolicy().hasHeightForWidth())
self.selectBtn.setSizePolicy(sizePolicy)
self.selectBtn.setMaximumSize(Qt.QSize(30, 16777215))
font = Qt.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.selectBtn.setFont(font)
self.selectBtn.setCheckable(True)
self.selectBtn.setObjectName("selectBtn")
self.gridLayout.addWidget(self.selectBtn, 0, 0, 4, 1)
self.tipBtn = QtWidgets.QPushButton(PipetteControl)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tipBtn.sizePolicy().hasHeightForWidth())
self.tipBtn.setSizePolicy(sizePolicy)
self.tipBtn.setMaximumSize(Qt.QSize(40, 16777215))
self.tipBtn.setObjectName("tipBtn")
self.gridLayout.addWidget(self.tipBtn, 2, 4, 1, 1)
self.soloBtn = QtWidgets.QPushButton(PipetteControl)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.soloBtn.sizePolicy().hasHeightForWidth())
self.soloBtn.setSizePolicy(sizePolicy)
self.soloBtn.setMaximumSize(Qt.QSize(30, 16777215))
self.soloBtn.setCheckable(True)
self.soloBtn.setObjectName("soloBtn")
self.gridLayout.addWidget(self.soloBtn, 2, 3, 1, 1)
self.lockBtn = QtWidgets.QPushButton(PipetteControl)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lockBtn.sizePolicy().hasHeightForWidth())
self.lockBtn.setSizePolicy(sizePolicy)
self.lockBtn.setMaximumSize(Qt.QSize(30, 16777215))
self.lockBtn.setCheckable(True)
self.lockBtn.setObjectName("lockBtn")
self.gridLayout.addWidget(self.lockBtn, 1, 3, 1, 1)
self.retranslateUi(PipetteControl)
Qt.QMetaObject.connectSlotsByName(PipetteControl)
def retranslateUi(self, PipetteControl):
_translate = Qt.QCoreApplication.translate
PipetteControl.setWindowTitle(_translate("PipetteControl", "Form"))
self.targetBtn.setText(_translate("PipetteControl", "target"))
self.stateCombo.setItemText(0, _translate("PipetteControl", "out"))
self.stateCombo.setItemText(1, _translate("PipetteControl", "bath"))
self.stateCombo.setItemText(2, _translate("PipetteControl", "approach"))
self.stateCombo.setItemText(3, _translate("PipetteControl", "seal"))
self.stateCombo.setItemText(4, _translate("PipetteControl", "attached"))
self.stateCombo.setItemText(5, _translate("PipetteControl", "break in"))
self.stateCombo.setItemText(6, _translate("PipetteControl", "whole cell"))
self.stateCombo.setItemText(7, _translate("PipetteControl", "outside-out"))
self.selectBtn.setText(_translate("PipetteControl", "1"))
self.tipBtn.setText(_translate("PipetteControl", "tip"))
self.soloBtn.setText(_translate("PipetteControl", "Solo"))
self.lockBtn.setText(_translate("PipetteControl", "Lock"))
|
[
"PyQt5.QtWidgets.QComboBox",
"PyQt5.QtWidgets.QSizePolicy",
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QHBoxLayout"
] |
[((458, 550), 'PyQt5.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.Expanding', 'QtWidgets.QSizePolicy.Expanding'], {}), '(QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Expanding)\n', (479, 550), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((791, 828), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['PipetteControl'], {}), '(PipetteControl)\n', (812, 828), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((999, 1036), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['PipetteControl'], {}), '(PipetteControl)\n', (1020, 1036), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1058, 1153), 'PyQt5.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.MinimumExpanding', 'QtWidgets.QSizePolicy.Fixed'], {}), '(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.\n QSizePolicy.Fixed)\n', (1079, 1153), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1568, 1603), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['PipetteControl'], {}), '(PipetteControl)\n', (1587, 1603), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2039, 2072), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['PipetteControl'], {}), '(PipetteControl)\n', (2056, 2072), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2094, 2186), 'PyQt5.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.Expanding', 'QtWidgets.QSizePolicy.Expanding'], {}), '(QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Expanding)\n', (2115, 2186), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2505, 2549), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', (['self.plotLayoutWidget'], {}), '(self.plotLayoutWidget)\n', (2526, 2549), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2789, 2826), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['PipetteControl'], {}), '(PipetteControl)\n', (2810, 2826), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2848, 2936), 'PyQt5.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.Maximum', 'QtWidgets.QSizePolicy.Minimum'], {}), '(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.\n Minimum)\n', (2869, 2936), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3536, 3573), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['PipetteControl'], {}), '(PipetteControl)\n', (3557, 3573), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3595, 3683), 'PyQt5.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.Expanding', 'QtWidgets.QSizePolicy.Fixed'], {}), '(QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Fixed)\n', (3616, 3683), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4077, 4114), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['PipetteControl'], {}), '(PipetteControl)\n', (4098, 4114), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4136, 4215), 'PyQt5.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.Fixed', 'QtWidgets.QSizePolicy.Fixed'], {}), '(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n', (4157, 4215), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4660, 4697), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['PipetteControl'], {}), '(PipetteControl)\n', (4681, 4697), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4719, 4798), 'PyQt5.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', (['QtWidgets.QSizePolicy.Fixed', 'QtWidgets.QSizePolicy.Fixed'], {}), '(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n', (4740, 4798), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')]
|
import sys
import string
import datetime
import logging
from nltk.tokenize import word_tokenize
from transformers import BertTokenizer
logger = logging.getLogger(__name__)
def decode_preprocessing(dataset, output_file, tokenizer, max_len, mode):
with open(dataset, 'r') as fin:
input_lines = fin.readlines()
starttime = datetime.datetime.now()
if mode == 'sent_line':
lines = []
for line in input_lines:
tok_line = word_tokenize(line.strip())
lines += [token + ' O\n' for token in tok_line] + ['\n']
else:
lines = input_lines
subword_len_counter = 0
last_punc_buffer = ""
output = ""
for line in lines:
line_copy = line
line = line.rstrip()
if not line:
# print(line)
output += line + '\n'
last_punc_buffer = ""
subword_len_counter = 0
continue
token = line.split()[0]
current_subwords_len = len(tokenizer.tokenize(token))
# Token contains strange control characters like \x96 or \x95
# Just filter out the complete line
if current_subwords_len == 0:
continue
if all(char in string.punctuation for char in token) and line.split()[1] == 'O':
last_punc_buffer = ""
else:
last_punc_buffer += line_copy
if (subword_len_counter + current_subwords_len) > max_len:
# print("")
output += '\n'
# print(last_punc_buffer.rstrip())
output += last_punc_buffer.rstrip() + '\n'
subword_len_counter = len(last_punc_buffer.split('\n'))
last_punc_buffer = ""
continue
subword_len_counter += current_subwords_len
# print(line)
output += line + '\n'
endtime = datetime.datetime.now()
duration = (endtime-starttime).total_seconds()
logger.info(duration)
with open(output_file, 'w') as fout:
fout.write(output)
return
|
[
"datetime.datetime.now",
"logging.getLogger"
] |
[((145, 172), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (162, 172), False, 'import logging\n'), ((343, 366), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (364, 366), False, 'import datetime\n'), ((1844, 1867), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1865, 1867), False, 'import datetime\n')]
|
import socket
class VMUDPBase:
def __init__(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.settimeout(2.0)
|
[
"socket.socket"
] |
[((77, 125), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (90, 125), False, 'import socket\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-10 17:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('locations', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='locationformat',
name='char_definition',
field=models.CharField(help_text="Determine the character position definition where alpha='\\a', numeric='\\d', punctuation='\\p', or any hard coded character. ex. \\a\\d\\d\\d could be B001 or \\a@\\d\\d could be D@99.", max_length=250, verbose_name='Format'),
),
]
|
[
"django.db.models.CharField"
] |
[((409, 673), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Determine the character position definition where alpha=\'\\\\a\', numeric=\'\\\\d\', punctuation=\'\\\\p\', or any hard coded character. ex. \\\\a\\\\d\\\\d\\\\d could be B001 or \\\\a@\\\\d\\\\d could be D@99."""', 'max_length': '(250)', 'verbose_name': '"""Format"""'}), '(help_text=\n "Determine the character position definition where alpha=\'\\\\a\', numeric=\'\\\\d\', punctuation=\'\\\\p\', or any hard coded character. ex. \\\\a\\\\d\\\\d\\\\d could be B001 or \\\\a@\\\\d\\\\d could be D@99."\n , max_length=250, verbose_name=\'Format\')\n', (425, 673), False, 'from django.db import migrations, models\n')]
|
# This file will implement images
import os
from skimage.io import imsave
def visualize_image(image, image_name):
"""Given an image, will save the image to the figures directory
Parameters:
image: a [N,M,3] tensor
filename (str): name of the image
"""
image_path = os.path.join("../figures", image_name + ".jpg")
print(image_path)
imsave(image_path, image)
|
[
"os.path.join",
"skimage.io.imsave"
] |
[((301, 348), 'os.path.join', 'os.path.join', (['"""../figures"""', "(image_name + '.jpg')"], {}), "('../figures', image_name + '.jpg')\n", (313, 348), False, 'import os\n'), ((376, 401), 'skimage.io.imsave', 'imsave', (['image_path', 'image'], {}), '(image_path, image)\n', (382, 401), False, 'from skimage.io import imsave\n')]
|
import pickle
import numpy as np
with open('data/fake.pkl', 'rb') as f:
points, labels, scores, keys = pickle.load(f)
with open('data/fake_gt.pkl', 'rb') as f:
gt_points, gt_bboxes, gt_labels, gt_areas, gt_crowdeds = pickle.load(f)
gt_points_yx = []
gt_point_is_valids = []
for gt_point in gt_points:
gt_point_yx = []
gt_point_is_valid = []
for pnt in gt_point:
gt_point_yx.append(pnt[:, :2])
gt_point_is_valid.append(pnt[:, 2])
gt_points_yx.append(gt_point_yx)
gt_point_is_valids.append(gt_point_is_valid)
points_yx = []
for point in points:
point_yx = []
for pnt in point:
point_yx.append(pnt[:, :2])
points_yx.append(point_yx)
np.savez('eval_point_coco_dataset_2019_02_18.npz',
points=gt_points_yx,
is_valids=gt_point_is_valids,
bboxes=gt_bboxes,
labels=gt_labels,
areas=gt_areas,
crowdeds=gt_crowdeds)
np.savez('eval_point_coco_result_2019_02_18.npz',
points=points_yx,
scores=scores,
labels=labels,)
|
[
"numpy.savez",
"pickle.load"
] |
[((700, 884), 'numpy.savez', 'np.savez', (['"""eval_point_coco_dataset_2019_02_18.npz"""'], {'points': 'gt_points_yx', 'is_valids': 'gt_point_is_valids', 'bboxes': 'gt_bboxes', 'labels': 'gt_labels', 'areas': 'gt_areas', 'crowdeds': 'gt_crowdeds'}), "('eval_point_coco_dataset_2019_02_18.npz', points=gt_points_yx,\n is_valids=gt_point_is_valids, bboxes=gt_bboxes, labels=gt_labels, areas\n =gt_areas, crowdeds=gt_crowdeds)\n", (708, 884), True, 'import numpy as np\n'), ((930, 1032), 'numpy.savez', 'np.savez', (['"""eval_point_coco_result_2019_02_18.npz"""'], {'points': 'points_yx', 'scores': 'scores', 'labels': 'labels'}), "('eval_point_coco_result_2019_02_18.npz', points=points_yx, scores=\n scores, labels=labels)\n", (938, 1032), True, 'import numpy as np\n'), ((109, 123), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (120, 123), False, 'import pickle\n'), ((228, 242), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (239, 242), False, 'import pickle\n')]
|
import pytest
from reggol import strip_prefix
from testfixtures import LogCapture
from ravestate import *
DEFAULT_MODULE_NAME = 'module'
DEFAULT_PROPERTY_NAME = 'property'
DEFAULT_PROPERTY_ID = f"{DEFAULT_MODULE_NAME}:{DEFAULT_PROPERTY_NAME}"
DEFAULT_PROPERTY_VALUE = 'Kruder'
DEFAULT_PROPERTY_CHANGED = f"{DEFAULT_PROPERTY_ID}:changed"
NEW_PROPERTY_VALUE = 'Dorfmeister'
DEFAULT_PROPERTY = Property(name=DEFAULT_PROPERTY_NAME, default_value=DEFAULT_PROPERTY_VALUE)
DEFAULT_PROPERTY.set_parent_path(DEFAULT_MODULE_NAME)
SIGNAL_A = SignalRef(f"{DEFAULT_MODULE_NAME}:a")
SIGNAL_B = SignalRef(f"{DEFAULT_MODULE_NAME}:b")
SIGNAL_C = SignalRef(f"{DEFAULT_MODULE_NAME}:c")
SIGNAL_D = SignalRef(f"{DEFAULT_MODULE_NAME}:d")
@pytest.fixture
def state_fixture(mocker):
@state(write=(DEFAULT_PROPERTY,), read=(DEFAULT_PROPERTY,))
def state_mock_fn(ctx):
ctx[DEFAULT_PROPERTY] = "test"
state_mock_fn.module_name = DEFAULT_MODULE_NAME
return state_mock_fn
@pytest.fixture
def state_signal_a_fixture(mocker):
@state(read=(DEFAULT_PROPERTY,), signal=SIGNAL_A)
def state_mock_a_fn(ctx):
pass
state_mock_a_fn.module_name = DEFAULT_MODULE_NAME
return state_mock_a_fn
@pytest.fixture
def state_signal_b_fixture(mocker):
@state(signal=SIGNAL_B, cond=SIGNAL_A)
def state_mock_b_fn(ctx):
pass
state_mock_b_fn.module_name = DEFAULT_MODULE_NAME
return state_mock_b_fn
@pytest.fixture
def state_signal_c_fixture(mocker):
@state(signal=SIGNAL_C, cond=SIGNAL_A)
def state_mock_c_fn(ctx):
pass
state_mock_c_fn.module_name = DEFAULT_MODULE_NAME
return state_mock_c_fn
@pytest.fixture
def state_signal_d_fixture(mocker):
@state(signal=SIGNAL_D, cond=SIGNAL_B | SIGNAL_C)
def state_mock_c_fn(ctx):
pass
state_mock_c_fn.module_name = DEFAULT_MODULE_NAME
return state_mock_c_fn
@pytest.fixture
def context_fixture(mocker):
return Context()
@pytest.fixture
def context_with_property_fixture(mocker, context_fixture) -> Context:
context_fixture.add_prop(prop=DEFAULT_PROPERTY)
mocker.patch.object(context_fixture, 'add_prop')
return context_fixture
@pytest.fixture
def context_with_property_and_state_fixture(mocker, context_with_property_fixture, state_fixture):
context_with_property_fixture.add_state(st=state_fixture)
mocker.patch.object(context_with_property_fixture, 'add_state')
return context_with_property_fixture
@pytest.fixture
def context_wrapper_fixture(context_with_property_fixture, state_fixture):
return ContextWrapper(ctx=context_with_property_fixture, state=state_fixture)
@pytest.fixture
def activation_fixture(state_fixture: State, context_with_property_and_state_fixture: Context):
return Activation(state_fixture, context_with_property_and_state_fixture)
@pytest.fixture
def activation_fixture_fallback(activation_fixture: Activation):
activation_fixture.state_to_activate.write_props = None
return activation_fixture
@pytest.fixture
def spike_fixture():
return Spike(sig=DEFAULT_PROPERTY_CHANGED)
@pytest.fixture
def triple_fixture(mocker):
token_mock = mocker.Mock()
token_mock.children = ()
from ravestate_nlp import Triple
return Triple(token_mock, token_mock, token_mock)
|
[
"ravestate_nlp.Triple"
] |
[((3237, 3279), 'ravestate_nlp.Triple', 'Triple', (['token_mock', 'token_mock', 'token_mock'], {}), '(token_mock, token_mock, token_mock)\n', (3243, 3279), False, 'from ravestate_nlp import Triple\n')]
|
from scipy.ndimage.filters import maximum_filter as _max_filter
from scipy.ndimage.morphology import binary_erosion as _binary_erosion
from skimage.feature import peak_local_max
def detect_skimage(image, neighborhood, threshold=1e-12):
"""Detect peaks using a local maximum filter (via skimage)
Parameters
----------
image : numpy.ndarray (2D)
The imagery to find the local maxima of
neighborhood : numpy.ndarray (2D)
A boolean matrix specifying a scanning window for maxima detection.
The neigborhood size is implicitly defined by the matrix dimensions.
threshold : float
The minimum acceptable value of a peak
Returns
-------
numpy.ndarray (2D)
A boolean matrix specifying maxima locations (True) and background
locations (False)
"""
return peak_local_max(image,
footprint=neighborhood,
threshold_abs=threshold,
indices=False)
def detect_maximum_filter(image, neighborhood, threshold=1e-12):
"""Detect peaks using a local maximum filter
Code courtesy https://stackoverflow.com/a/3689710 (adapted slightly).
Parameters
----------
image : numpy.ndarray (2D)
The imagery to find the local maxima of
neighborhood : numpy.ndarray (2D)
A boolean matrix specifying a scanning window for maxima detection.
The neigborhood size is implicitly defined by the matrix dimensions.
threshold : float
The minimum acceptable value of a peak
Returns
-------
numpy.ndarray (2D)
A boolean matrix specifying maxima locations (True) and background
locations (False)
"""
# Apply the local maximum filter, then remove any background (below
# threshold) values from our result.
detected_peaks = _max_filter(image, footprint=neighborhood) == image
detected_peaks[image < threshold] = False
return detected_peaks
|
[
"scipy.ndimage.filters.maximum_filter",
"skimage.feature.peak_local_max"
] |
[((839, 928), 'skimage.feature.peak_local_max', 'peak_local_max', (['image'], {'footprint': 'neighborhood', 'threshold_abs': 'threshold', 'indices': '(False)'}), '(image, footprint=neighborhood, threshold_abs=threshold,\n indices=False)\n', (853, 928), False, 'from skimage.feature import peak_local_max\n'), ((1856, 1898), 'scipy.ndimage.filters.maximum_filter', '_max_filter', (['image'], {'footprint': 'neighborhood'}), '(image, footprint=neighborhood)\n', (1867, 1898), True, 'from scipy.ndimage.filters import maximum_filter as _max_filter\n')]
|
# -*- coding: utf-8 -*-
import random
"""
折半插入排序 O(n) = n^2
"""
class BinaryInsertion(object):
def __init__(self, original_list):
self.original_list = original_list
def sort(self):
length = len(self.original_list)
for i in range(1, length):
self.binary(start=0, end=i-1, current=i)
def binary(self, start, end, current):
cursor = int((end + start) / 2) if end != start else end
if (end == start) or (cursor == 0) or (self.original_list[current] == self.original_list[cursor]):
if self.original_list[current] >= self.original_list[cursor]:
self.original_list.insert(cursor+1, self.original_list[current])
else:
self.original_list.insert(cursor, self.original_list[current])
del self.original_list[current+1]
elif self.original_list[current] > self.original_list[cursor]:
self.binary(cursor+1, end, current)
elif self.original_list[current] < self.original_list[cursor]:
self.binary(start, cursor-1, current)
if __name__ == '__main__':
my_list = [random.randint(0, 100) for _ in range(0, 10)]
print("before sort: {}".format(my_list))
BinaryInsertion(my_list).sort()
print("after sort: {}".format(my_list))
|
[
"random.randint"
] |
[((1127, 1149), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (1141, 1149), False, 'import random\n')]
|
"""models.py - Contains class definitions for Datastore entities
used by the Concentration Game API. Definitions for User, Game, and
Score classes, with associated methods. Additionally, contains
definitions for Forms used in transmitting messages to users."""
### Imports
import random
import pickle
from datetime import date
import httplib
import endpoints
from protorpc import messages
from google.appengine.ext import ndb
### Import game logic
import game as gm
### User Related Classes and Methods
class User(ndb.Model):
"""User profile"""
name = ndb.StringProperty(required=True)
email = ndb.StringProperty()
total_games = ndb.IntegerProperty(default = 0)
total_score = ndb.IntegerProperty(default = 0)
avg_score = ndb.FloatProperty(default = 0)
def to_form(self):
"""Returns a UserForm representation of a User"""
form = UserForm()
form.name = self.name
form.urlsafe_key = self.key.urlsafe()
form.total_games = self.total_games
form.total_score = self.total_score
form.avg_score = round(self.avg_score)
return form
def calc_score(self):
"""Calculate the player's average score -- to be
called whenever a new game is won"""
avg_score = self.total_score / self.total_games
return avg_score
### Game Related Class and Methods
class Game(ndb.Model):
"""Game object"""
board = ndb.StringProperty(repeated=True)
boardState = ndb.StringProperty(repeated=True)
guesses = ndb.IntegerProperty(required=True, default=0)
cards = ndb.IntegerProperty(required=True, default=52)
status = ndb.StringProperty(required=True, default='In Progress')
user = ndb.KeyProperty(required=True, kind='User')
history = ndb.PickleProperty(repeated=True)
score = ndb.FloatProperty()
@classmethod
def new_game(self, user, cards=52):
"""Creates and returns a new game"""
if cards < 8 or cards > 52 or cards % 2 != 0:
raise ValueError('Cards dealt must be an even number between 8 and 52')
newGame = Game(board=gm.constructBoard(cards),
boardState=gm.initialBoardState(cards),
guesses=0,
cards=cards,
status='In Progress',
user=user)
newGame.put()
return newGame
def to_form(self, message):
"""Returns a GameForm representation of the Game"""
form = GameForm()
form.urlsafe_key = self.key.urlsafe()
form.user_name = self.user.get().name
form.guesses = self.guesses
form.cards = self.cards
form.status = self.status
form.message = message
form.boardState = self.boardState
return form
def to_mini_form(self):
"""Return a MiniGameForm representation of a Game"""
form = MiniGameForm()
form.urlsafe_key = self.key.urlsafe()
form.guesses = self.guesses
form.cards = self.cards
form.status = self.status
return form
def to_history_form(self):
"""Returns a game history form after a game has been won"""
form = HistoryForm()
form.urlsafe_key = self.key.urlsafe()
form.cards = self.cards
form.guesses = self.guesses
form.board = self.board
form.score = self.score
form.history = [h for h in self.history]
return form
def win_game(self):
"""Updates score and user information once game is won"""
# Add the game to the score 'board'
total_score = int(round((self.cards ** 4) / self.guesses))
self.score = total_score
self.put()
score = Score(user=self.user, date=date.today(), cards=self.cards,
guesses=self.guesses, score=total_score)
score.put()
user = self.user.get()
# Add the current score to the user's total score, but handle error
# if user's current score is 0
try:
user.total_score += total_score
except TypeError:
user.total_score = total_score
user.put()
user.avg_score = user.calc_score()
user.put()
### Score Class and Methods
class Score(ndb.Model):
"""Score object"""
user = ndb.KeyProperty(required=True, kind='User')
date = ndb.DateProperty(required=True)
cards = ndb.IntegerProperty(required=True)
guesses = ndb.IntegerProperty(required=True)
score = ndb.FloatProperty(required=True)
def to_form(self):
return ScoreForm(user_name=self.user.get().name,
cards=self.cards,
date=str(self.date),
guesses=self.guesses,
score=self.score)
### Game Forms -- Display
class GameForm(messages.Message):
"""GameForm for outbound game state information"""
urlsafe_key = messages.StringField(1)
guesses = messages.IntegerField(2)
status = messages.StringField(3)
message = messages.StringField(4)
boardState = messages.StringField(5, repeated=True)
user_name = messages.StringField(6)
cards = messages.IntegerField(7)
class MiniGameForm(messages.Message):
"""Abbreviated Game Form for reporting, rather than play purposes"""
urlsafe_key = messages.StringField(1)
guesses = messages.IntegerField(2)
cards = messages.IntegerField(3)
status = messages.StringField(4)
class HistoryForm(messages.Message):
"""Form to display a game history, as well as score information"""
urlsafe_key = messages.StringField(1)
cards = messages.IntegerField(2)
guesses = messages.IntegerField(3)
board = messages.StringField(4, repeated=True)
score = messages.FloatField(5)
history = messages.StringField(6, repeated=True)
class MiniGameForms(messages.Message):
"""Hold a list of abbreviated Game Forms"""
games = messages.MessageField(MiniGameForm, 1, repeated=True)
class NewGameForm(messages.Message):
"""Used to create a new game"""
user_name = messages.StringField(1, required=True)
cards = messages.IntegerField(2, default=52)
### Gameplay Forms
class FlipCardForm(messages.Message):
"""Form to allow players to guess a card by supplying its index"""
queryCard = messages.IntegerField(1, required=True)
class CardForm(messages.Message):
"""Form to respond to player guess by revealing a card value"""
cardValue = messages.StringField(1)
class MakeGuessForm(messages.Message):
"""Used to make a move in an existing game"""
card1 = messages.IntegerField(1, required=True)
card2 = messages.IntegerField(2, required=True)
class HintForm(messages.Message):
"""Send the index of a matching card (hint) back to a user"""
hint = messages.IntegerField(1, required=True)
### Score Forms
class ScoreForm(messages.Message):
"""ScoreForm for outbound Score information"""
user_name = messages.StringField(1, required=True)
date = messages.StringField(2, required=True)
cards = messages.IntegerField(3, required=True)
guesses = messages.IntegerField(4, required=True)
score = messages.FloatField(5, required=True)
class ScoreForms(messages.Message):
"""Return multiple ScoreForms"""
items = messages.MessageField(ScoreForm, 1, repeated=True)
## User and Rankings Message Classes
class UserForm(messages.Message):
"""User detail form"""
name = messages.StringField(1)
urlsafe_key = messages.StringField(2)
total_games = messages.IntegerField(3)
total_score = messages.IntegerField(4)
avg_score = messages.FloatField(5)
class UserForms(messages.Message):
"""Return information mulitiple users for ranking"""
users = messages.MessageField(UserForm, 1, repeated=True)
### Assorted Message Classes
class StringMessage(messages.Message):
"""StringMessage-- outbound (single) string message"""
message = messages.StringField(1, required=True)
|
[
"google.appengine.ext.ndb.FloatProperty",
"protorpc.messages.FloatField",
"datetime.date.today",
"protorpc.messages.IntegerField",
"protorpc.messages.StringField",
"game.constructBoard",
"google.appengine.ext.ndb.PickleProperty",
"google.appengine.ext.ndb.IntegerProperty",
"google.appengine.ext.ndb.StringProperty",
"google.appengine.ext.ndb.DateProperty",
"google.appengine.ext.ndb.KeyProperty",
"protorpc.messages.MessageField",
"game.initialBoardState"
] |
[((568, 601), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {'required': '(True)'}), '(required=True)\n', (586, 601), False, 'from google.appengine.ext import ndb\n'), ((614, 634), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (632, 634), False, 'from google.appengine.ext import ndb\n'), ((653, 683), 'google.appengine.ext.ndb.IntegerProperty', 'ndb.IntegerProperty', ([], {'default': '(0)'}), '(default=0)\n', (672, 683), False, 'from google.appengine.ext import ndb\n'), ((704, 734), 'google.appengine.ext.ndb.IntegerProperty', 'ndb.IntegerProperty', ([], {'default': '(0)'}), '(default=0)\n', (723, 734), False, 'from google.appengine.ext import ndb\n'), ((753, 781), 'google.appengine.ext.ndb.FloatProperty', 'ndb.FloatProperty', ([], {'default': '(0)'}), '(default=0)\n', (770, 781), False, 'from google.appengine.ext import ndb\n'), ((1428, 1461), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {'repeated': '(True)'}), '(repeated=True)\n', (1446, 1461), False, 'from google.appengine.ext import ndb\n'), ((1479, 1512), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {'repeated': '(True)'}), '(repeated=True)\n', (1497, 1512), False, 'from google.appengine.ext import ndb\n'), ((1527, 1572), 'google.appengine.ext.ndb.IntegerProperty', 'ndb.IntegerProperty', ([], {'required': '(True)', 'default': '(0)'}), '(required=True, default=0)\n', (1546, 1572), False, 'from google.appengine.ext import ndb\n'), ((1585, 1631), 'google.appengine.ext.ndb.IntegerProperty', 'ndb.IntegerProperty', ([], {'required': '(True)', 'default': '(52)'}), '(required=True, default=52)\n', (1604, 1631), False, 'from google.appengine.ext import ndb\n'), ((1645, 1701), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {'required': '(True)', 'default': '"""In Progress"""'}), "(required=True, default='In Progress')\n", (1663, 1701), False, 'from google.appengine.ext import ndb\n'), ((1713, 1756), 'google.appengine.ext.ndb.KeyProperty', 'ndb.KeyProperty', ([], {'required': '(True)', 'kind': '"""User"""'}), "(required=True, kind='User')\n", (1728, 1756), False, 'from google.appengine.ext import ndb\n'), ((1771, 1804), 'google.appengine.ext.ndb.PickleProperty', 'ndb.PickleProperty', ([], {'repeated': '(True)'}), '(repeated=True)\n', (1789, 1804), False, 'from google.appengine.ext import ndb\n'), ((1817, 1836), 'google.appengine.ext.ndb.FloatProperty', 'ndb.FloatProperty', ([], {}), '()\n', (1834, 1836), False, 'from google.appengine.ext import ndb\n'), ((4300, 4343), 'google.appengine.ext.ndb.KeyProperty', 'ndb.KeyProperty', ([], {'required': '(True)', 'kind': '"""User"""'}), "(required=True, kind='User')\n", (4315, 4343), False, 'from google.appengine.ext import ndb\n'), ((4355, 4386), 'google.appengine.ext.ndb.DateProperty', 'ndb.DateProperty', ([], {'required': '(True)'}), '(required=True)\n', (4371, 4386), False, 'from google.appengine.ext import ndb\n'), ((4399, 4433), 'google.appengine.ext.ndb.IntegerProperty', 'ndb.IntegerProperty', ([], {'required': '(True)'}), '(required=True)\n', (4418, 4433), False, 'from google.appengine.ext import ndb\n'), ((4448, 4482), 'google.appengine.ext.ndb.IntegerProperty', 'ndb.IntegerProperty', ([], {'required': '(True)'}), '(required=True)\n', (4467, 4482), False, 'from google.appengine.ext import ndb\n'), ((4495, 4527), 'google.appengine.ext.ndb.FloatProperty', 'ndb.FloatProperty', ([], {'required': '(True)'}), '(required=True)\n', (4512, 4527), False, 'from google.appengine.ext import ndb\n'), ((4927, 4950), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {}), '(1)\n', (4947, 4950), False, 'from protorpc import messages\n'), ((4965, 4989), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(2)'], {}), '(2)\n', (4986, 4989), False, 'from protorpc import messages\n'), ((5003, 5026), 'protorpc.messages.StringField', 'messages.StringField', (['(3)'], {}), '(3)\n', (5023, 5026), False, 'from protorpc import messages\n'), ((5041, 5064), 'protorpc.messages.StringField', 'messages.StringField', (['(4)'], {}), '(4)\n', (5061, 5064), False, 'from protorpc import messages\n'), ((5082, 5120), 'protorpc.messages.StringField', 'messages.StringField', (['(5)'], {'repeated': '(True)'}), '(5, repeated=True)\n', (5102, 5120), False, 'from protorpc import messages\n'), ((5137, 5160), 'protorpc.messages.StringField', 'messages.StringField', (['(6)'], {}), '(6)\n', (5157, 5160), False, 'from protorpc import messages\n'), ((5173, 5197), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(7)'], {}), '(7)\n', (5194, 5197), False, 'from protorpc import messages\n'), ((5329, 5352), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {}), '(1)\n', (5349, 5352), False, 'from protorpc import messages\n'), ((5367, 5391), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(2)'], {}), '(2)\n', (5388, 5391), False, 'from protorpc import messages\n'), ((5404, 5428), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(3)'], {}), '(3)\n', (5425, 5428), False, 'from protorpc import messages\n'), ((5442, 5465), 'protorpc.messages.StringField', 'messages.StringField', (['(4)'], {}), '(4)\n', (5462, 5465), False, 'from protorpc import messages\n'), ((5593, 5616), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {}), '(1)\n', (5613, 5616), False, 'from protorpc import messages\n'), ((5629, 5653), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(2)'], {}), '(2)\n', (5650, 5653), False, 'from protorpc import messages\n'), ((5668, 5692), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(3)'], {}), '(3)\n', (5689, 5692), False, 'from protorpc import messages\n'), ((5705, 5743), 'protorpc.messages.StringField', 'messages.StringField', (['(4)'], {'repeated': '(True)'}), '(4, repeated=True)\n', (5725, 5743), False, 'from protorpc import messages\n'), ((5756, 5778), 'protorpc.messages.FloatField', 'messages.FloatField', (['(5)'], {}), '(5)\n', (5775, 5778), False, 'from protorpc import messages\n'), ((5793, 5831), 'protorpc.messages.StringField', 'messages.StringField', (['(6)'], {'repeated': '(True)'}), '(6, repeated=True)\n', (5813, 5831), False, 'from protorpc import messages\n'), ((5933, 5986), 'protorpc.messages.MessageField', 'messages.MessageField', (['MiniGameForm', '(1)'], {'repeated': '(True)'}), '(MiniGameForm, 1, repeated=True)\n', (5954, 5986), False, 'from protorpc import messages\n'), ((6078, 6116), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {'required': '(True)'}), '(1, required=True)\n', (6098, 6116), False, 'from protorpc import messages\n'), ((6129, 6165), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(2)'], {'default': '(52)'}), '(2, default=52)\n', (6150, 6165), False, 'from protorpc import messages\n'), ((6313, 6352), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(1)'], {'required': '(True)'}), '(1, required=True)\n', (6334, 6352), False, 'from protorpc import messages\n'), ((6473, 6496), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {}), '(1)\n', (6493, 6496), False, 'from protorpc import messages\n'), ((6600, 6639), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(1)'], {'required': '(True)'}), '(1, required=True)\n', (6621, 6639), False, 'from protorpc import messages\n'), ((6652, 6691), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(2)'], {'required': '(True)'}), '(2, required=True)\n', (6673, 6691), False, 'from protorpc import messages\n'), ((6805, 6844), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(1)'], {'required': '(True)'}), '(1, required=True)\n', (6826, 6844), False, 'from protorpc import messages\n'), ((6966, 7004), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {'required': '(True)'}), '(1, required=True)\n', (6986, 7004), False, 'from protorpc import messages\n'), ((7016, 7054), 'protorpc.messages.StringField', 'messages.StringField', (['(2)'], {'required': '(True)'}), '(2, required=True)\n', (7036, 7054), False, 'from protorpc import messages\n'), ((7067, 7106), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(3)'], {'required': '(True)'}), '(3, required=True)\n', (7088, 7106), False, 'from protorpc import messages\n'), ((7121, 7160), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(4)'], {'required': '(True)'}), '(4, required=True)\n', (7142, 7160), False, 'from protorpc import messages\n'), ((7173, 7210), 'protorpc.messages.FloatField', 'messages.FloatField', (['(5)'], {'required': '(True)'}), '(5, required=True)\n', (7192, 7210), False, 'from protorpc import messages\n'), ((7298, 7348), 'protorpc.messages.MessageField', 'messages.MessageField', (['ScoreForm', '(1)'], {'repeated': '(True)'}), '(ScoreForm, 1, repeated=True)\n', (7319, 7348), False, 'from protorpc import messages\n'), ((7461, 7484), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {}), '(1)\n', (7481, 7484), False, 'from protorpc import messages\n'), ((7503, 7526), 'protorpc.messages.StringField', 'messages.StringField', (['(2)'], {}), '(2)\n', (7523, 7526), False, 'from protorpc import messages\n'), ((7545, 7569), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(3)'], {}), '(3)\n', (7566, 7569), False, 'from protorpc import messages\n'), ((7588, 7612), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(4)'], {}), '(4)\n', (7609, 7612), False, 'from protorpc import messages\n'), ((7629, 7651), 'protorpc.messages.FloatField', 'messages.FloatField', (['(5)'], {}), '(5)\n', (7648, 7651), False, 'from protorpc import messages\n'), ((7758, 7807), 'protorpc.messages.MessageField', 'messages.MessageField', (['UserForm', '(1)'], {'repeated': '(True)'}), '(UserForm, 1, repeated=True)\n', (7779, 7807), False, 'from protorpc import messages\n'), ((7952, 7990), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {'required': '(True)'}), '(1, required=True)\n', (7972, 7990), False, 'from protorpc import messages\n'), ((2107, 2131), 'game.constructBoard', 'gm.constructBoard', (['cards'], {}), '(cards)\n', (2124, 2131), True, 'import game as gm\n'), ((2164, 2191), 'game.initialBoardState', 'gm.initialBoardState', (['cards'], {}), '(cards)\n', (2184, 2191), True, 'import game as gm\n'), ((3742, 3754), 'datetime.date.today', 'date.today', ([], {}), '()\n', (3752, 3754), False, 'from datetime import date\n')]
|
import os
import sys
import shutil
from io import BytesIO
from functools import wraps
from textwrap import dedent
from random import choice, shuffle
from collections import defaultdict
import urllib.request
required = ["locmaker.py","locmaker_README.txt","Run.cmd"]
for item in required:
print('Downloading '+item+'...')
url = 'https://raw.github.com/Chupachu/LocMaker/master/'+item
urllib.request.urlretrieve(url, item)
optionals = ["countries.txt","ideologies.txt","out.yml"]
for item in optionals:
if not os.path.isfile(item):
print('Downloading '+item+'...')
url = 'https://raw.github.com/Chupachu/LocMaker/master/'+item
urllib.request.urlretrieve(url, item)
|
[
"os.path.isfile"
] |
[((531, 551), 'os.path.isfile', 'os.path.isfile', (['item'], {}), '(item)\n', (545, 551), False, 'import os\n')]
|
'''
This contains tests for the parse and get_bad_paths methods.
'''
import unittest
import parser
class TestParser(unittest.TestCase):
'''
This contains tests for the parse and get_bad_paths methods.
'''
def test_empty(self):
''' Parse an empty string.'''
self.assertEqual(parser.parse(''), [])
def test_one_disallow(self):
''' Parse a string with one disallow.'''
self.assertEqual(parser.parse("Disallow: /stuff/"), ['/stuff/'])
def test_two_disallows(self):
''' Parse a string with two disallows.'''
self.assertEqual(parser.parse("Disallow: /stuff/\nDisallow: /home/"), ['/stuff/', '/home/'])
def test_allow(self):
''' Parse an string with an allow statemnt.'''
self.assertEqual(parser.parse("Allow: /stuff/"), [])
def test_applicable_useragent(self):
''' Parse a string with a user-agent and a relevant disallow.'''
self.assertEqual(parser.parse("User-agent: * \nDisallow: /stuff/"), ['/stuff/'])
def test_not_applicable_useragent(self):
''' Parse a string with an unknown user-agent and a disallow that is ignored.'''
self.assertEqual(parser.parse("User-agent: someone else \nDisallow: /stuff/"), [])
def test_basic_page(self):
''' Test a simple robots.txt page. '''
expected_result = ['/cgi-bin/', '/rcs/', '/~sibel/poetry/poems/', '/~sibel/poetry/books/', '/~musser/dagproc']
self.assertEqual(parser.get_bad_paths("http://cs.rpi.edu/robots.txt"), expected_result)
def test_nonexistent_page(self):
''' Test a page that doesn't exist.'''
self.assertEqual(parser.get_bad_paths("http://rpi.edu/robots.taxt"), [])
def test_targeted_disallows(self):
''' Test a page that has targeted disallows.'''
expected_result = ['/feed/', '/c/accounts/', '/c/crontab/', '/c/graphics/', '/c/locale/', '/c.new/', '/c.bak/', '/c_hacks', '/c/pinc/', '/c/setup/', '/c/stats/', '/c/tools/', '/c/users/', '/down/', '/dpmail/', '/d', '/out', '/jpgraph/', '/jpgraph-1.14', '/archive', '/projects', '/mailman/', '/noncvs', '/phpbb2', '/phpbb3', '/phpbb-3.2.0', '/phpmyadmin', '/sawiki', '/squirrels', '/stats/', '/tools', '/w', '/wikiheiro']
self.assertEqual(parser.get_bad_paths("https://www.pgdp.net/robots.txt"), expected_result)
def test_allows(self):
''' Test a page that has allows.'''
self.assertEqual(parser.get_bad_paths("https://www.choiceofgames.com/robots.txt"), [])
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"parser.parse",
"parser.get_bad_paths"
] |
[((2541, 2556), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2554, 2556), False, 'import unittest\n'), ((317, 333), 'parser.parse', 'parser.parse', (['""""""'], {}), "('')\n", (329, 333), False, 'import parser\n'), ((447, 480), 'parser.parse', 'parser.parse', (['"""Disallow: /stuff/"""'], {}), "('Disallow: /stuff/')\n", (459, 480), False, 'import parser\n'), ((605, 661), 'parser.parse', 'parser.parse', (['"""Disallow: /stuff/\nDisallow: /home/"""'], {}), '("""Disallow: /stuff/\nDisallow: /home/""")\n', (617, 661), False, 'import parser\n'), ((790, 820), 'parser.parse', 'parser.parse', (['"""Allow: /stuff/"""'], {}), "('Allow: /stuff/')\n", (802, 820), False, 'import parser\n'), ((966, 1018), 'parser.parse', 'parser.parse', (['"""User-agent: * \nDisallow: /stuff/"""'], {}), '("""User-agent: * \nDisallow: /stuff/""")\n', (978, 1018), False, 'import parser\n'), ((1190, 1253), 'parser.parse', 'parser.parse', (['"""User-agent: someone else \nDisallow: /stuff/"""'], {}), '("""User-agent: someone else \nDisallow: /stuff/""")\n', (1202, 1253), False, 'import parser\n'), ((1479, 1531), 'parser.get_bad_paths', 'parser.get_bad_paths', (['"""http://cs.rpi.edu/robots.txt"""'], {}), "('http://cs.rpi.edu/robots.txt')\n", (1499, 1531), False, 'import parser\n'), ((1660, 1710), 'parser.get_bad_paths', 'parser.get_bad_paths', (['"""http://rpi.edu/robots.taxt"""'], {}), "('http://rpi.edu/robots.taxt')\n", (1680, 1710), False, 'import parser\n'), ((2267, 2322), 'parser.get_bad_paths', 'parser.get_bad_paths', (['"""https://www.pgdp.net/robots.txt"""'], {}), "('https://www.pgdp.net/robots.txt')\n", (2287, 2322), False, 'import parser\n'), ((2438, 2502), 'parser.get_bad_paths', 'parser.get_bad_paths', (['"""https://www.choiceofgames.com/robots.txt"""'], {}), "('https://www.choiceofgames.com/robots.txt')\n", (2458, 2502), False, 'import parser\n')]
|
from datetime import datetime
time_now = datetime.now()
print(time_now.strftime('%B/%d/%Y:%H/%M'))
|
[
"datetime.datetime.now"
] |
[((42, 56), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (54, 56), False, 'from datetime import datetime\n')]
|
"""
Test module for the Fedex Tools.
"""
import unittest
import logging
import sys
sys.path.insert(0, '..')
import fedex.config
import fedex.services.ship_service as service # Any request object will do.
import fedex.tools.conversion
logging.getLogger('suds').setLevel(logging.ERROR)
logging.getLogger('fedex').setLevel(logging.INFO)
class FedexToolsTests(unittest.TestCase):
"""
These tests verify that the fedex tools are working properly.
"""
def test_conversion_tools(self):
# Empty config, since we are not actually sending anything
config = fedex.config.FedexConfig(key='', password='',
account_number='', meter_number='',
use_test_server=True)
# We need a mock suds object, a request object or sub-object will do.
waybill_request = service.FedexProcessShipmentRequest(config)
obj = waybill_request.create_wsdl_object_of_type('ProcessShipmentRequest')
# Test basic sobject to dict.
dict_obj = fedex.tools.conversion.basic_sobject_to_dict(obj)
assert type(dict_obj) == dict
# Test with serialization and case conversion.
dict_obj = fedex.tools.conversion.sobject_to_dict(obj, key_to_lower=True, json_serialize=True)
assert type(dict_obj) == dict
# JSON string object test
dict_obj = fedex.tools.conversion.sobject_to_json(obj)
assert dict_obj, "Expecting a JSON string object."
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
unittest.main()
|
[
"unittest.main",
"logging.basicConfig",
"sys.path.insert",
"fedex.services.ship_service.FedexProcessShipmentRequest",
"logging.getLogger"
] |
[((85, 109), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (100, 109), False, 'import sys\n'), ((1540, 1598), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (1559, 1598), False, 'import logging\n'), ((1603, 1618), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1616, 1618), False, 'import unittest\n'), ((239, 264), 'logging.getLogger', 'logging.getLogger', (['"""suds"""'], {}), "('suds')\n", (256, 264), False, 'import logging\n'), ((289, 315), 'logging.getLogger', 'logging.getLogger', (['"""fedex"""'], {}), "('fedex')\n", (306, 315), False, 'import logging\n'), ((880, 923), 'fedex.services.ship_service.FedexProcessShipmentRequest', 'service.FedexProcessShipmentRequest', (['config'], {}), '(config)\n', (915, 923), True, 'import fedex.services.ship_service as service\n')]
|
from .algo import Algo
from .algo_code import AlgoCode
from .entity_slot import Slot
from .entity_space import Space
from . import log, show_adding_box_log
from .exception import DistributionException
import time
class AlgoSingle(Algo):
"""
pack items into a single
bin
for single bin packing we merely
need to operate on one bin. Don't
accept input bins larger than size one
@param item_collection: set of items
@returns one bin packed with items
"""
def run(self):
log.debug("Entering algorithm SINGLE")
bin_collection = self.bins
item_collection =self.items
if len(bin_collection.items) == 0 or len(bin_collection.items) > 1:
raise DistributionException("Single takes only one item")
bin = bin_collection.next()
"""
checks with the bin can continue within the space
for single algo
"""
def continue_fn(bin, space, item):
if bin.occupied_space(space, item):
return AlgoCode.NO_SPACE
m_y = bin.get_min_y_pos(space.y)
if space.x + (item.w > bin.w):
""" try z now """
space.z += item.d
space.x = 0
else:
space.x += 1
""" if space.z fails and so does space.x """
""" go up in height make sure y """
""" is at the proper juxtaposition """
if space.z + item.d > bin.d:
space.y += m_y.max_y
space.x = m_y.min_x
space.z = m_y.min_z
if int(space.y + item.h) > bin.h:
return AlgoCode.LAST_ITEM
return AlgoCode.FOUND_SPACE
while bin:
log.info("Trying to allocate items for bin: {0}".format(bin.id))
item_collection.reset()
bin.start_time = time.time()
item = item_collection.next()
while item:
item = item_collection.current()
if not bin.can_fit( item ) :
item_collection.next()
continue
space = Space(x=0, y=0, z=0)
""" if item.w > bin.w: """
""" self.binner.add_lost(item) """
can_continue = continue_fn(bin, space, item)
while can_continue == AlgoCode.NO_SPACE:
""" if were at the top of the box """
""" we cannot allocate any more space so we can move on """
space.compute_next_sequence()
can_continue = continue_fn(bin, space, item)
if can_continue == AlgoCode.LAST_ITEM:
continue
show_adding_box_log(space, item)
slot = Slot.from_space_and_item(space, item)
bin.append(slot)
item = item_collection.next()
bin.end_time = time.time()
bin = bin_collection.next()
return self.binner
|
[
"time.time"
] |
[((1717, 1728), 'time.time', 'time.time', ([], {}), '()\n', (1726, 1728), False, 'import time\n'), ((2598, 2609), 'time.time', 'time.time', ([], {}), '()\n', (2607, 2609), False, 'import time\n')]
|
import numpy as np
import time
from unityagents import UnityEnvironment
from agent_utils import env_initialize, env_reset, state_reward_done_unpack
from dqn_agent import DQN_Agent
from agent_utils import load_dqn
from agent_utils import load_params, load_weights
def demo_agent(env, agent, n_episodes, epsilon=0.05, seed=0, train_mode=False):
print(f'\r\nRunning demo of \'{agent.name}\' with epsilon={epsilon}')
scores = []
for i in range(1, n_episodes+1):
score = 0
state = env_reset(env, agent.brain_name, train_mode=train_mode)
while True:
action = int(agent.act(state, epsilon))
env_info = env.step(action)[agent.brain_name]
next_state, reward, done = state_reward_done_unpack(env_info)
score += reward
state = next_state
if done:
break
scores.append(score)
print(f'Episode {i}\tScore: {score:.2f}')
print('\r\nDemo complete! Scores:\tMin:{:.2f}\tMax:{:.2f}\tAvg:{:.3f}'.format(
np.min(scores), np.max(scores), np.mean(scores)))
return scores
def demo_saved_agent(env, agent_name, n_episodes=3, epsilon=0.05, seed=0,
train_mode=False, verbose=False):
# initialize environment and scenario info
brain, brain_name, state, action_size, state_size = env_initialize(env, train_mode=train_mode)
# load the agent params and create the agent
params, local_weights, target_weights = load_dqn(agent_name, verbose=verbose)
agent = DQN_Agent(state_size, action_size, brain_name, seed, params=params)
print(agent.display_params())
# set trained agent weights
agent.qnetwork_local.load_state_dict(local_weights)
agent.qnetwork_target.load_state_dict(target_weights)
# run demo
return demo_agent(env, agent,
n_episodes=n_episodes, epsilon=epsilon,
seed=seed, train_mode=train_mode)
def demo_random_agent_discrete(env, n_episodes=3, train_mode=False, verbose=False):
""" Runs the environment using a uniform random action selection policy. """
# setup the environment and get initial info
brain, brain_name, state, action_size, state_size = env_initialize(env, train_mode=train_mode, verbose=verbose)
start_time = time.time()
for n_episode in range(1, n_episodes+1):
# reset the environment for the new episode
state = env_reset(env, brain_name, train_mode=train_mode)
# track scores and the number of steps in an episode
score = 0
steps = 0
while True:
# choose a random action
action = np.random.randint(action_size)
# send action to environment and get updated info
env_info = env.step(action)[brain_name]
next_state, reward, done = state_reward_done_unpack(env_info)
score += reward
steps += 1
# set the state for next iteration
state = next_state
if done:
break # end episode if we get the done signal
print (f'Episode {n_episode} score: {score} in {steps} steps.')
end_time = time.time()
avg_episode_time = (end_time - start_time) / n_episodes
print (f'Random agent demo complete, avg episode duration: {avg_episode_time:.3f}s.')
|
[
"agent_utils.env_initialize",
"agent_utils.state_reward_done_unpack",
"agent_utils.load_dqn",
"dqn_agent.DQN_Agent",
"time.time",
"numpy.min",
"numpy.max",
"numpy.mean",
"numpy.random.randint",
"agent_utils.env_reset"
] |
[((1355, 1397), 'agent_utils.env_initialize', 'env_initialize', (['env'], {'train_mode': 'train_mode'}), '(env, train_mode=train_mode)\n', (1369, 1397), False, 'from agent_utils import env_initialize, env_reset, state_reward_done_unpack\n'), ((1496, 1533), 'agent_utils.load_dqn', 'load_dqn', (['agent_name'], {'verbose': 'verbose'}), '(agent_name, verbose=verbose)\n', (1504, 1533), False, 'from agent_utils import load_dqn\n'), ((1550, 1617), 'dqn_agent.DQN_Agent', 'DQN_Agent', (['state_size', 'action_size', 'brain_name', 'seed'], {'params': 'params'}), '(state_size, action_size, brain_name, seed, params=params)\n', (1559, 1617), False, 'from dqn_agent import DQN_Agent\n'), ((2243, 2302), 'agent_utils.env_initialize', 'env_initialize', (['env'], {'train_mode': 'train_mode', 'verbose': 'verbose'}), '(env, train_mode=train_mode, verbose=verbose)\n', (2257, 2302), False, 'from agent_utils import env_initialize, env_reset, state_reward_done_unpack\n'), ((2325, 2336), 'time.time', 'time.time', ([], {}), '()\n', (2334, 2336), False, 'import time\n'), ((3255, 3266), 'time.time', 'time.time', ([], {}), '()\n', (3264, 3266), False, 'import time\n'), ((508, 563), 'agent_utils.env_reset', 'env_reset', (['env', 'agent.brain_name'], {'train_mode': 'train_mode'}), '(env, agent.brain_name, train_mode=train_mode)\n', (517, 563), False, 'from agent_utils import env_initialize, env_reset, state_reward_done_unpack\n'), ((2450, 2499), 'agent_utils.env_reset', 'env_reset', (['env', 'brain_name'], {'train_mode': 'train_mode'}), '(env, brain_name, train_mode=train_mode)\n', (2459, 2499), False, 'from agent_utils import env_initialize, env_reset, state_reward_done_unpack\n'), ((733, 767), 'agent_utils.state_reward_done_unpack', 'state_reward_done_unpack', (['env_info'], {}), '(env_info)\n', (757, 767), False, 'from agent_utils import env_initialize, env_reset, state_reward_done_unpack\n'), ((1054, 1068), 'numpy.min', 'np.min', (['scores'], {}), '(scores)\n', (1060, 1068), True, 'import numpy as np\n'), ((1070, 1084), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (1076, 1084), True, 'import numpy as np\n'), ((1086, 1101), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (1093, 1101), True, 'import numpy as np\n'), ((2693, 2723), 'numpy.random.randint', 'np.random.randint', (['action_size'], {}), '(action_size)\n', (2710, 2723), True, 'import numpy as np\n'), ((2890, 2924), 'agent_utils.state_reward_done_unpack', 'state_reward_done_unpack', (['env_info'], {}), '(env_info)\n', (2914, 2924), False, 'from agent_utils import env_initialize, env_reset, state_reward_done_unpack\n')]
|
# Copyright (c) 2020 BlenderNPR and contributors. MIT license.
import os, time
import bpy
from BlenderMalt import MaltMaterial, MaltMeshes, MaltTextures
__BRIDGE = None
__PIPELINE_PARAMETERS = None
__INITIALIZED = False
TIMESTAMP = time.time()
def get_bridge(world=None):
global __BRIDGE
bridge = __BRIDGE
if bridge is None or bridge.lost_connection:
__BRIDGE = None
try:
if world is None:
bpy.context.scene.world.malt.update_pipeline(bpy.context)
else:
world.malt.update_pipeline(bpy.context)
except:
pass
return __BRIDGE
def set_bridge(bridge):
global __BRIDGE
__BRIDGE = bridge
def set_pipeline_parameters(parameters):
global __PIPELINE_PARAMETERS
__PIPELINE_PARAMETERS = parameters
def set_initialized(initialized):
global __INITIALIZED
__INITIALIZED = initialized
class MaltPipeline(bpy.types.PropertyGroup):
def update_pipeline(self, context):
global TIMESTAMP
TIMESTAMP = time.time()
#TODO: Sync all scenes. Only one active pipeline per Blender instance is supported atm.
pipeline = self.pipeline
if pipeline == '':
current_dir = os.path.dirname(os.path.abspath(__file__))
default_pipeline = os.path.join(current_dir,'.MaltPath','Malt','Pipelines','NPR_Pipeline','NPR_Pipeline.py')
pipeline = default_pipeline
debug_mode = bool(bpy.context.preferences.addons['BlenderMalt'].preferences.debug_mode)
path = bpy.path.abspath(pipeline, library=self.id_data.library)
import Bridge
bridge = Bridge.Client_API.Bridge(path, debug_mode)
import logging as log
log.info('Blender {} {} {}'.format(bpy.app.version_string, bpy.app.build_branch, bpy.app.build_hash))
params = bridge.get_parameters()
set_bridge(bridge)
set_pipeline_parameters(params)
MaltMaterial.reset_materials()
MaltMeshes.reset_meshes()
MaltTextures.reset_textures()
setup_all_ids()
set_initialized(True)
pipeline : bpy.props.StringProperty(name="Malt Pipeline", subtype='FILE_PATH', update=update_pipeline)
# There's no StringVectorProperty ?!?!?
overrides : bpy.props.StringProperty(name='Pipeline Overrides', default='Preview,Final Render')
def draw_ui(self, layout):
layout.prop(self, 'pipeline')
class MALT_PT_Pipeline(bpy.types.Panel):
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "world"
bl_label = "Pipeline Settings"
COMPAT_ENGINES = {'MALT'}
@classmethod
def poll(cls, context):
return context.scene.render.engine == 'MALT' and context.world is not None
def draw(self, context):
context.scene.world.malt.draw_ui(self.layout)
classes = (
MaltPipeline,
MALT_PT_Pipeline,
)
def setup_all_ids():
setup_parameters(bpy.data.scenes)
setup_parameters(bpy.data.worlds)
setup_parameters(bpy.data.cameras)
setup_parameters(bpy.data.objects)
setup_parameters(bpy.data.materials)
setup_parameters(bpy.data.meshes)
setup_parameters(bpy.data.curves)
setup_parameters(bpy.data.lights)
MaltMaterial.track_shader_changes(force_update=True)
def setup_parameters(ids):
global __PIPELINE_PARAMETERS
pipeline_parameters = __PIPELINE_PARAMETERS
class_parameters_map = {
bpy.types.Scene : pipeline_parameters.scene,
bpy.types.World : pipeline_parameters.world,
bpy.types.Camera : pipeline_parameters.camera,
bpy.types.Object : pipeline_parameters.object,
bpy.types.Material : pipeline_parameters.material,
bpy.types.Mesh : pipeline_parameters.mesh,
bpy.types.Curve : pipeline_parameters.mesh,
bpy.types.Light : pipeline_parameters.light,
}
for bid in ids:
for cls, parameters in class_parameters_map.items():
if isinstance(bid, cls):
bid.malt_parameters.setup(parameters)
@bpy.app.handlers.persistent
def depsgraph_update(scene, depsgraph):
global __INITIALIZED
if scene.render.engine != 'MALT':
# Don't do anything if Malt is not the active renderer,
# but make sure we setup all IDs the next time Malt is enabled
__INITIALIZED = False
return
if __INITIALIZED == False:
scene.world.malt.update_pipeline(bpy.context)
return
ids = []
class_data_map = {
bpy.types.Scene : bpy.data.scenes,
bpy.types.World : bpy.data.worlds,
bpy.types.Camera : bpy.data.cameras,
bpy.types.Object : bpy.data.objects,
bpy.types.Material : bpy.data.materials,
bpy.types.Mesh : bpy.data.meshes,
bpy.types.Curve : bpy.data.curves,
bpy.types.Light : bpy.data.lights,
}
for update in depsgraph.updates:
# Try to avoid as much re-setups as possible.
# Ideally we would do it only on ID creation.
if update.is_updated_geometry == True or update.is_updated_transform == False:
for cls, data in class_data_map.items():
if isinstance(update.id, cls):
ids.append(data[update.id.name])
setup_parameters(ids)
redraw = False
for update in depsgraph.updates:
if update.is_updated_geometry:
if 'Object' in str(update.id.__class__):
MaltMeshes.unload_mesh(update.id)
if update.id.__class__ == bpy.types.Image:
MaltTextures.unload_texture(update.id)
redraw = True
elif update.id.__class__ == bpy.types.Material:
MaltTextures.unload_gradients(update.id)
redraw = True
if redraw:
for screen in bpy.data.screens:
for area in screen.areas:
area.tag_redraw()
@bpy.app.handlers.persistent
def load_scene(dummy1=None,dummy2=None):
global __INITIALIZED
__INITIALIZED = False
def track_pipeline_changes():
if bpy.context.scene.render.engine != 'MALT':
return 1
try:
scene = bpy.context.scene
malt = scene.world.malt
path = bpy.path.abspath(malt.pipeline, library=malt.id_data.library)
if os.path.exists(path):
stats = os.stat(path)
if stats.st_mtime > TIMESTAMP:
malt.update_pipeline(bpy.context)
except:
import traceback
print(traceback.format_exc())
return 1
def register():
for _class in classes: bpy.utils.register_class(_class)
bpy.types.World.malt = bpy.props.PointerProperty(type=MaltPipeline)
bpy.app.handlers.depsgraph_update_post.append(depsgraph_update)
bpy.app.handlers.load_post.append(load_scene)
bpy.app.timers.register(track_pipeline_changes, persistent=True)
def unregister():
for _class in classes: bpy.utils.unregister_class(_class)
del bpy.types.World.malt
bpy.app.handlers.depsgraph_update_post.remove(depsgraph_update)
bpy.app.handlers.load_post.remove(load_scene)
bpy.app.timers.unregister(track_pipeline_changes)
|
[
"BlenderMalt.MaltTextures.unload_texture",
"BlenderMalt.MaltMaterial.reset_materials",
"os.path.join",
"bpy.app.handlers.load_post.append",
"bpy.props.PointerProperty",
"BlenderMalt.MaltMaterial.track_shader_changes",
"bpy.context.scene.world.malt.update_pipeline",
"os.path.abspath",
"bpy.path.abspath",
"os.path.exists",
"bpy.utils.unregister_class",
"traceback.format_exc",
"BlenderMalt.MaltMeshes.reset_meshes",
"Bridge.Client_API.Bridge",
"bpy.app.handlers.depsgraph_update_post.append",
"BlenderMalt.MaltTextures.reset_textures",
"os.stat",
"bpy.app.handlers.depsgraph_update_post.remove",
"BlenderMalt.MaltTextures.unload_gradients",
"bpy.app.handlers.load_post.remove",
"bpy.app.timers.unregister",
"bpy.app.timers.register",
"BlenderMalt.MaltMeshes.unload_mesh",
"time.time",
"bpy.props.StringProperty",
"bpy.utils.register_class"
] |
[((237, 248), 'time.time', 'time.time', ([], {}), '()\n', (246, 248), False, 'import os, time\n'), ((2152, 2248), 'bpy.props.StringProperty', 'bpy.props.StringProperty', ([], {'name': '"""Malt Pipeline"""', 'subtype': '"""FILE_PATH"""', 'update': 'update_pipeline'}), "(name='Malt Pipeline', subtype='FILE_PATH', update=\n update_pipeline)\n", (2176, 2248), False, 'import bpy\n'), ((2305, 2393), 'bpy.props.StringProperty', 'bpy.props.StringProperty', ([], {'name': '"""Pipeline Overrides"""', 'default': '"""Preview,Final Render"""'}), "(name='Pipeline Overrides', default=\n 'Preview,Final Render')\n", (2329, 2393), False, 'import bpy\n'), ((3259, 3311), 'BlenderMalt.MaltMaterial.track_shader_changes', 'MaltMaterial.track_shader_changes', ([], {'force_update': '(True)'}), '(force_update=True)\n', (3292, 3311), False, 'from BlenderMalt import MaltMaterial, MaltMeshes, MaltTextures\n'), ((6604, 6648), 'bpy.props.PointerProperty', 'bpy.props.PointerProperty', ([], {'type': 'MaltPipeline'}), '(type=MaltPipeline)\n', (6629, 6648), False, 'import bpy\n'), ((6653, 6716), 'bpy.app.handlers.depsgraph_update_post.append', 'bpy.app.handlers.depsgraph_update_post.append', (['depsgraph_update'], {}), '(depsgraph_update)\n', (6698, 6716), False, 'import bpy\n'), ((6721, 6766), 'bpy.app.handlers.load_post.append', 'bpy.app.handlers.load_post.append', (['load_scene'], {}), '(load_scene)\n', (6754, 6766), False, 'import bpy\n'), ((6771, 6835), 'bpy.app.timers.register', 'bpy.app.timers.register', (['track_pipeline_changes'], {'persistent': '(True)'}), '(track_pipeline_changes, persistent=True)\n', (6794, 6835), False, 'import bpy\n'), ((6954, 7017), 'bpy.app.handlers.depsgraph_update_post.remove', 'bpy.app.handlers.depsgraph_update_post.remove', (['depsgraph_update'], {}), '(depsgraph_update)\n', (6999, 7017), False, 'import bpy\n'), ((7022, 7067), 'bpy.app.handlers.load_post.remove', 'bpy.app.handlers.load_post.remove', (['load_scene'], {}), '(load_scene)\n', (7055, 7067), False, 'import bpy\n'), ((7072, 7121), 'bpy.app.timers.unregister', 'bpy.app.timers.unregister', (['track_pipeline_changes'], {}), '(track_pipeline_changes)\n', (7097, 7121), False, 'import bpy\n'), ((1042, 1053), 'time.time', 'time.time', ([], {}), '()\n', (1051, 1053), False, 'import os, time\n'), ((1570, 1626), 'bpy.path.abspath', 'bpy.path.abspath', (['pipeline'], {'library': 'self.id_data.library'}), '(pipeline, library=self.id_data.library)\n', (1586, 1626), False, 'import bpy\n'), ((1666, 1708), 'Bridge.Client_API.Bridge', 'Bridge.Client_API.Bridge', (['path', 'debug_mode'], {}), '(path, debug_mode)\n', (1690, 1708), False, 'import Bridge\n'), ((1974, 2004), 'BlenderMalt.MaltMaterial.reset_materials', 'MaltMaterial.reset_materials', ([], {}), '()\n', (2002, 2004), False, 'from BlenderMalt import MaltMaterial, MaltMeshes, MaltTextures\n'), ((2013, 2038), 'BlenderMalt.MaltMeshes.reset_meshes', 'MaltMeshes.reset_meshes', ([], {}), '()\n', (2036, 2038), False, 'from BlenderMalt import MaltMaterial, MaltMeshes, MaltTextures\n'), ((2047, 2076), 'BlenderMalt.MaltTextures.reset_textures', 'MaltTextures.reset_textures', ([], {}), '()\n', (2074, 2076), False, 'from BlenderMalt import MaltMaterial, MaltMeshes, MaltTextures\n'), ((6189, 6250), 'bpy.path.abspath', 'bpy.path.abspath', (['malt.pipeline'], {'library': 'malt.id_data.library'}), '(malt.pipeline, library=malt.id_data.library)\n', (6205, 6250), False, 'import bpy\n'), ((6262, 6282), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6276, 6282), False, 'import os, time\n'), ((6544, 6576), 'bpy.utils.register_class', 'bpy.utils.register_class', (['_class'], {}), '(_class)\n', (6568, 6576), False, 'import bpy\n'), ((6886, 6920), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['_class'], {}), '(_class)\n', (6912, 6920), False, 'import bpy\n'), ((1319, 1417), 'os.path.join', 'os.path.join', (['current_dir', '""".MaltPath"""', '"""Malt"""', '"""Pipelines"""', '"""NPR_Pipeline"""', '"""NPR_Pipeline.py"""'], {}), "(current_dir, '.MaltPath', 'Malt', 'Pipelines', 'NPR_Pipeline',\n 'NPR_Pipeline.py')\n", (1331, 1417), False, 'import os, time\n'), ((5547, 5585), 'BlenderMalt.MaltTextures.unload_texture', 'MaltTextures.unload_texture', (['update.id'], {}), '(update.id)\n', (5574, 5585), False, 'from BlenderMalt import MaltMaterial, MaltMeshes, MaltTextures\n'), ((6304, 6317), 'os.stat', 'os.stat', (['path'], {}), '(path)\n', (6311, 6317), False, 'import os, time\n'), ((452, 509), 'bpy.context.scene.world.malt.update_pipeline', 'bpy.context.scene.world.malt.update_pipeline', (['bpy.context'], {}), '(bpy.context)\n', (496, 509), False, 'import bpy\n'), ((1261, 1286), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1276, 1286), False, 'import os, time\n'), ((5450, 5483), 'BlenderMalt.MaltMeshes.unload_mesh', 'MaltMeshes.unload_mesh', (['update.id'], {}), '(update.id)\n', (5472, 5483), False, 'from BlenderMalt import MaltMaterial, MaltMeshes, MaltTextures\n'), ((5680, 5720), 'BlenderMalt.MaltTextures.unload_gradients', 'MaltTextures.unload_gradients', (['update.id'], {}), '(update.id)\n', (5709, 5720), False, 'from BlenderMalt import MaltMaterial, MaltMeshes, MaltTextures\n'), ((6462, 6484), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (6482, 6484), False, 'import traceback\n')]
|
import os
from time import sleep
from datetime import datetime
MESSAGE_COUNT = int(os.getenv("MESSAGE_COUNT", 10000))
SIZE = int(os.getenv("SIZE", 128))
FREQ = float(os.getenv("FREQ", "1"))
MESSAGE_COUNT = max(MESSAGE_COUNT, 5)
MY_HOST = os.getenv("MY_HOST", os.uname()[1])
def print_beginning():
print("---begin---")
def print_ending():
later = datetime.now()
print("generated %d messages in %d seconds" % (MESSAGE_COUNT, int((later - now).total_seconds())))
print("EPS: %d" % (MESSAGE_COUNT / (later - now).total_seconds()))
print("---end---")
def print_log(i):
log_meta = " ".join(["num:", str(i), "|", MY_HOST, "|", datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f"), "|"])
print(log_meta, "r"*(max(1, SIZE-len(log_meta))))
sleep(FREQ)
now = datetime.now()
i = 1
if MESSAGE_COUNT <= 0:
print_beginning()
while True:
print_log(i)
i += 1
else:
print_beginning()
while i <= MESSAGE_COUNT - 4:
print_log(i)
i += 1
print_ending()
while True:
sleep(60)
|
[
"os.uname",
"datetime.datetime.now",
"os.getenv",
"time.sleep"
] |
[((789, 803), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (801, 803), False, 'from datetime import datetime\n'), ((84, 117), 'os.getenv', 'os.getenv', (['"""MESSAGE_COUNT"""', '(10000)'], {}), "('MESSAGE_COUNT', 10000)\n", (93, 117), False, 'import os\n'), ((130, 152), 'os.getenv', 'os.getenv', (['"""SIZE"""', '(128)'], {}), "('SIZE', 128)\n", (139, 152), False, 'import os\n'), ((167, 189), 'os.getenv', 'os.getenv', (['"""FREQ"""', '"""1"""'], {}), "('FREQ', '1')\n", (176, 189), False, 'import os\n'), ((360, 374), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (372, 374), False, 'from datetime import datetime\n'), ((765, 776), 'time.sleep', 'sleep', (['FREQ'], {}), '(FREQ)\n', (770, 776), False, 'from time import sleep\n'), ((1042, 1051), 'time.sleep', 'sleep', (['(60)'], {}), '(60)\n', (1047, 1051), False, 'from time import sleep\n'), ((261, 271), 'os.uname', 'os.uname', ([], {}), '()\n', (269, 271), False, 'import os\n'), ((652, 666), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (664, 666), False, 'from datetime import datetime\n')]
|
#!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
import logging
from geocoder.base import OneResult, MultipleResultsQuery
class GeolyticaResult(OneResult):
def __init__(self, json_content):
# create safe shortcuts
self._standard = json_content.get('standard', {})
# proceed with super.__init__
super(GeolyticaResult, self).__init__(json_content)
@property
def lat(self):
lat = self.raw.get('latt', '').strip()
if lat:
return float(lat)
@property
def lng(self):
lng = self.raw.get('longt', '').strip()
if lng:
return float(lng)
@property
def postal(self):
return self.raw.get('postal', '').strip()
@property
def housenumber(self):
return self._standard.get('stnumber', '').strip()
@property
def street(self):
return self._standard.get('staddress', '').strip()
@property
def city(self):
return self._standard.get('city', '').strip()
@property
def state(self):
return self._standard.get('prov', '').strip()
@property
def address(self):
if self.street_number:
return u'{0} {1}, {2}'.format(self.street_number, self.route, self.locality)
elif self.route and self.route != 'un-known':
return u'{0}, {1}'.format(self.route, self.locality)
else:
return self.locality
class GeolyticaQuery(MultipleResultsQuery):
"""
Geocoder.ca
===========
A Canadian and US location geocoder.
API Reference
-------------
http://geocoder.ca/?api=1
"""
provider = 'geolytica'
method = 'geocode'
_URL = 'http://geocoder.ca'
_RESULT_CLASS = GeolyticaResult
_KEY_MANDATORY = False
def _build_params(self, location, provider_key, **kwargs):
params = {
'json': 1,
'locate': location,
'geoit': 'xml'
}
if 'strictmode' in kwargs:
params.update({'strictmode': kwargs.pop('strictmode')})
if 'strict' in kwargs:
params.update({'strict': kwargs.pop('strict')})
if 'auth' in kwargs:
params.update({'auth': kwargs.pop('auth')})
return params
def _adapt_results(self, json_response):
return [json_response]
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
g = GeolyticaQuery('1552 Payette dr., Ottawa')
g.debug()
|
[
"logging.basicConfig"
] |
[((2385, 2424), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (2404, 2424), False, 'import logging\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.