code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
"""A DOM implementation that offers traversal and ranges on top of
minidom, using the 4DOM traversal implementation."""
import minidom, string
class DOMImplementation(minidom.DOMImplementation):
def hasFeature(self, feature, version):
if version not in ("1.0", "2.0"):
return 0
feature = string.lower(feature)
if feature in ['traversal','range']:
return 1
return minidom.DOMImplementation.hasFeature(self, feature, version)
def _createDocument(self):
return Document()
class Document(minidom.Document):
implementation = DOMImplementation()
def createNodeIterator(self, root, whatToShow, filter, entityReferenceExpansion):
from xml.dom import NodeIterator
nodi = NodeIterator.NodeIterator(root, whatToShow, filter, entityReferenceExpansion)
return nodi
def createTreeWalker(self, root, whatToShow, filter, entityReferenceExpansion):
from TreeWalker import TreeWalker
return TreeWalker(root, whatToShow, filter, entityReferenceExpansion)
def createRange(self):
import Range
return Range.Range(self)
def getDOMImplementation():
return Document.implementation
|
[
"minidom.DOMImplementation.hasFeature",
"xml.dom.NodeIterator.NodeIterator",
"string.lower",
"Range.Range",
"TreeWalker.TreeWalker"
] |
[((322, 343), 'string.lower', 'string.lower', (['feature'], {}), '(feature)\n', (334, 343), False, 'import minidom, string\n'), ((425, 485), 'minidom.DOMImplementation.hasFeature', 'minidom.DOMImplementation.hasFeature', (['self', 'feature', 'version'], {}), '(self, feature, version)\n', (461, 485), False, 'import minidom, string\n'), ((762, 839), 'xml.dom.NodeIterator.NodeIterator', 'NodeIterator.NodeIterator', (['root', 'whatToShow', 'filter', 'entityReferenceExpansion'], {}), '(root, whatToShow, filter, entityReferenceExpansion)\n', (787, 839), False, 'from xml.dom import NodeIterator\n'), ((1002, 1064), 'TreeWalker.TreeWalker', 'TreeWalker', (['root', 'whatToShow', 'filter', 'entityReferenceExpansion'], {}), '(root, whatToShow, filter, entityReferenceExpansion)\n', (1012, 1064), False, 'from TreeWalker import TreeWalker\n'), ((1129, 1146), 'Range.Range', 'Range.Range', (['self'], {}), '(self)\n', (1140, 1146), False, 'import Range\n')]
|
import redis
from tools.common import test_http_proxy
import threading
def http_task():
# 连接redis数据库
POOL = redis.ConnectionPool(host='127.0.0.1', port=6379)
CONN_REDIS = redis.Redis(connection_pool=POOL)
# 取出一个ip进行测试
# proxy = CONN_REDIS.("freeProxy:AfterVerifyOKhttp")
ip = CONN_REDIS.srandmember("freeProxy:AfterVerifyOKhttp",1)
# 判断redis中ip数量是否为空
if not ip:
return 0
else:
# print("INFO: Get proxy from Redis freeProxy:BeforeVerifyhttp list")
proxy = str(ip[0], encoding="utf-8")
flag = test_http_proxy(proxy)
if flag == True:
# CONN_REDIS.sadd("freeProxy:AfterVerifyOKhttp", proxy)
# print("INFO: Save this Proxy IP in freeProxy:AfterVerifyOKhttp")
with open("pass.txt", "a+") as f:
f.write(proxy + "/n")
print("Pass:", proxy)
else:
# CONN_REDIS.sadd("freeProxy_Bad:AfterVerifyFailhttp", proxy)
# print("INFO: Abandon this Proxy IP!")
with open("fail.txt", "a+") as f:
f.write(proxy + "+/n")
print("Fail:", proxy)
return 1
def loop_test(name):
print("*Start thread task %s" % name)
while True:
result = http_task()
print("\n")
if result == 0:
break
if __name__ == "__main__":
jobs = []
num = 8
for i in range(1, num+1):
name = "Thread-" + str(i)
jobs.append(threading.Thread(target=loop_test, args=(name,)))
# 开启多线程
for t in jobs:
t.start()
for t in jobs:
t.join()
|
[
"redis.Redis",
"threading.Thread",
"tools.common.test_http_proxy",
"redis.ConnectionPool"
] |
[((118, 167), 'redis.ConnectionPool', 'redis.ConnectionPool', ([], {'host': '"""127.0.0.1"""', 'port': '(6379)'}), "(host='127.0.0.1', port=6379)\n", (138, 167), False, 'import redis\n'), ((185, 218), 'redis.Redis', 'redis.Redis', ([], {'connection_pool': 'POOL'}), '(connection_pool=POOL)\n', (196, 218), False, 'import redis\n'), ((561, 583), 'tools.common.test_http_proxy', 'test_http_proxy', (['proxy'], {}), '(proxy)\n', (576, 583), False, 'from tools.common import test_http_proxy\n'), ((1462, 1510), 'threading.Thread', 'threading.Thread', ([], {'target': 'loop_test', 'args': '(name,)'}), '(target=loop_test, args=(name,))\n', (1478, 1510), False, 'import threading\n')]
|
import pytesseract
from pytesseract import Output
import cv2
import os
from shapely.geometry import Polygon
pytesseract.pytesseract.tesseract_cmd = "Tesseract path e.g c:\Tesseract-OCR\tesseract "
import sys
from os import chdir, listdir
from os.path import join
## Hyper Params
L = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
CHAR_THRESHOLD = 3
LINE_WIDTH = 2
LINE_COLOR = (0, 0, 0)
## Algo
def get_image_data(img_path):
img = cv2.imread(img_path)
image_to_data = pytesseract.image_to_data(img, output_type=Output.DICT)
Xmax = img.shape[1]
Ymax = img.shape[0]
return image_to_data, Xmax, Ymax
def draw_lines_v1(img_path, image_to_data):
img = cv2.imread(img_path)
Xmax = img.shape[1]
n_boxes = len(image_to_data['level'])
for i in range(n_boxes):
if filter_boxes(image_to_data, i) :
(x, y, w, h) = (image_to_data['left'][i], image_to_data['top'][i], image_to_data['width'][i], image_to_data['height'][i])
#cv2.line(img, (0 , y +h +5 ),(Xmax, y +h +5) ,(0, 0, 0), 3)
#cv2.line(img, (0 , y+h ), (Xmax + w, y + h), (0, 255, 0), 1)
cv2.rectangle(img, (x, y), ( x + w, y + h), LINE_COLOR, LINE_WIDTH)
"""
cv2.line(img, (0 , 0),(0, Ymax) ,(0, 0, 0), 5)
cv2.line(img, (0 , 0),(Xmax, 0) ,(0, 0, 0), 5)
cv2.line(img, (0, Ymax),(Xmax, Ymax) ,(0, 0, 0), 5)
cv2.line(img, (Xmax , 0),(Xmax, Ymax) ,(0, 0, 0), 5)
"""
cv2.namedWindow("output2", cv2.WINDOW_NORMAL)
cv2.imshow('output2', img)
def draw_lines(img_path, image_to_data, margin = 0):
"""
Draw extracted and filtred boxes
"""
img = cv2.imread(img_path)
Xmax = img.shape[1]
Ymax = img.shape[0]
n_boxes = len(image_to_data)
for i in range(n_boxes-1):
"""
For each line, we will draw a line between the bottom of the line and the next line top
"""
(x, y, w, h) = (image_to_data[i][0], image_to_data[i][1], image_to_data[i][2], image_to_data[i][3])
y_next = image_to_data[i+1][1]
y_middle = (y+h+y_next)//2
"""
To avoid the case of drawin a line over a word, we will set a threshold to y_middle, In case a hole section is not detected.
"""
y_new = min(y_middle, y+h+margin)
cv2.line(img, (x , y_new),(w, y_new) ,LINE_COLOR, LINE_WIDTH)
#cv2.line(img, (0 , y+h ), (Xmax + w, y + h), (0, 255, 0), 1)
#cv2.rectangle(img, (x, y), ( x + w, y + h), (0, 255, 0), 1)
cv2.line(img, (0 , 0),(0, Ymax) ,LINE_COLOR, 5)
cv2.line(img, (0 , 0),(Xmax, 0) ,LINE_COLOR, 5)
cv2.line(img, (0, Ymax),(Xmax, Ymax) ,LINE_COLOR, 5)
cv2.line(img, (Xmax , 0),(Xmax, Ymax) ,LINE_COLOR, 5)
#cv2.namedWindow("output", cv2.WINDOW_NORMAL)
#cv2.imshow('output', img)
return img
def check_intersection(elem1, elem2):
for l in elem1:
if l in elem2:
return True
return False
## Processing extracted boxes
def check_polygon_intersection(p1, p2):
if p1.distance(p2) == 0 :
return True
return False
def create_polygon(x, y, w, h):
p = Polygon([(x, y),(x+w, y),(x+w, y + h),(x, y + h)])
return p
def filter_boxes(image_to_data, ind):
text = image_to_data["text"][ind]
h = image_to_data["height"][ind]
w = image_to_data["width"][ind]
if len(text) > CHAR_THRESHOLD and w > h:
return True
return False
def process_image_to_data(image_to_data, Xmax, Ymax):
boxes_list = list()
boxes_list.append([0, 0, 0, 0])
all_zero_distance = list()
n_boxes = len(image_to_data['level'])
"""
A first loop to merge close boxes
"""
for i in range(n_boxes):
if filter_boxes(image_to_data, i) :
(y, h) = (image_to_data['top'][i], image_to_data['height'][i])
p1 = create_polygon(0, y, Xmax, h)
n_b = len(boxes_list)
flag = 0
zero_distance = list()
for j in range(n_b):
elem = boxes_list[j]
p2 = create_polygon(elem[0], elem[1], elem[2], elem[3])
if check_polygon_intersection(p1, p2):
zero_distance.append(j)
new_y = min(y, elem[1])
new_h = max(y+h, elem[1] + elem[3]) - min(y, elem[1])
new_elem = [0, new_y, Xmax, new_h]
boxes_list[j]=new_elem
flag = 1
if flag == 0 :
new_elem = [0, y, Xmax, h]
boxes_list.append(new_elem)
return boxes_list
def clean_loop(boxes_list):
Xmax = boxes_list[1][2]
n = len(boxes_list)
global_flag = 0
all_to_be_merged = list()
used_ind = list()
for i in range(n):
if i not in used_ind:
to_be_merged = list()
boxe1 = boxes_list[i]
p1 = create_polygon(boxe1[0],boxe1[1],boxe1[2],boxe1[3])
m = len(boxes_list)
for j in range(m):
if j not in used_ind:
boxe2=boxes_list[j]
p2 = create_polygon(boxe2[0],boxe2[1],boxe2[2],boxe2[3])
if check_polygon_intersection(p1, p2):
to_be_merged.append(boxe2)
used_ind.append(j)
all_to_be_merged.append(to_be_merged)
n_detected = len(all_to_be_merged)
new_boxes_list = list()
for i in range(n_detected):
small_list = all_to_be_merged[i]
p = len(small_list)
new_y = min([boxe[1] for boxe in small_list])
new_h = max([boxe[1] + boxe[3] - new_y for boxe in small_list])
new_elem = [0, new_y, Xmax, new_h]
new_boxes_list.append(new_elem)
return new_boxes_list
def process_table(img_path,draw_path):
#try:
image_to_data, Xmax, Ymax = get_image_data(img_path)
image_to_data = process_image_to_data(image_to_data, Xmax, Ymax)
image_to_data = clean_loop(image_to_data)
img = draw_lines(img_path, image_to_data, margin =2)
image_name = os.path.basename(img_path).split(os.extsep)[0].replace(" ", "_")
processed_im_path = draw_path+"\\"+image_name+'pro.png'
cv2.imwrite(processed_im_path, img)
def process_path(file_path,draw_path):
all_files = listdir(file_path)
n = len(all_files)
for i in range(n):
f = all_files[i]
img_path = join(file_path, f)
process_table(img_path,draw_path)
|
[
"cv2.line",
"shapely.geometry.Polygon",
"os.path.basename",
"cv2.imwrite",
"pytesseract.image_to_data",
"cv2.imread",
"cv2.rectangle",
"cv2.imshow",
"os.path.join",
"os.listdir",
"cv2.namedWindow"
] |
[((478, 498), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (488, 498), False, 'import cv2\n'), ((520, 575), 'pytesseract.image_to_data', 'pytesseract.image_to_data', (['img'], {'output_type': 'Output.DICT'}), '(img, output_type=Output.DICT)\n', (545, 575), False, 'import pytesseract\n'), ((722, 742), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (732, 742), False, 'import cv2\n'), ((1500, 1545), 'cv2.namedWindow', 'cv2.namedWindow', (['"""output2"""', 'cv2.WINDOW_NORMAL'], {}), "('output2', cv2.WINDOW_NORMAL)\n", (1515, 1545), False, 'import cv2\n'), ((1551, 1577), 'cv2.imshow', 'cv2.imshow', (['"""output2"""', 'img'], {}), "('output2', img)\n", (1561, 1577), False, 'import cv2\n'), ((1705, 1725), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1715, 1725), False, 'import cv2\n'), ((2583, 2630), 'cv2.line', 'cv2.line', (['img', '(0, 0)', '(0, Ymax)', 'LINE_COLOR', '(5)'], {}), '(img, (0, 0), (0, Ymax), LINE_COLOR, 5)\n', (2591, 2630), False, 'import cv2\n'), ((2637, 2684), 'cv2.line', 'cv2.line', (['img', '(0, 0)', '(Xmax, 0)', 'LINE_COLOR', '(5)'], {}), '(img, (0, 0), (Xmax, 0), LINE_COLOR, 5)\n', (2645, 2684), False, 'import cv2\n'), ((2691, 2744), 'cv2.line', 'cv2.line', (['img', '(0, Ymax)', '(Xmax, Ymax)', 'LINE_COLOR', '(5)'], {}), '(img, (0, Ymax), (Xmax, Ymax), LINE_COLOR, 5)\n', (2699, 2744), False, 'import cv2\n'), ((2750, 2803), 'cv2.line', 'cv2.line', (['img', '(Xmax, 0)', '(Xmax, Ymax)', 'LINE_COLOR', '(5)'], {}), '(img, (Xmax, 0), (Xmax, Ymax), LINE_COLOR, 5)\n', (2758, 2803), False, 'import cv2\n'), ((3225, 3282), 'shapely.geometry.Polygon', 'Polygon', (['[(x, y), (x + w, y), (x + w, y + h), (x, y + h)]'], {}), '([(x, y), (x + w, y), (x + w, y + h), (x, y + h)])\n', (3232, 3282), False, 'from shapely.geometry import Polygon\n'), ((6383, 6418), 'cv2.imwrite', 'cv2.imwrite', (['processed_im_path', 'img'], {}), '(processed_im_path, img)\n', (6394, 6418), False, 'import cv2\n'), ((6480, 6498), 'os.listdir', 'listdir', (['file_path'], {}), '(file_path)\n', (6487, 6498), False, 'from os import chdir, listdir\n'), ((2368, 2429), 'cv2.line', 'cv2.line', (['img', '(x, y_new)', '(w, y_new)', 'LINE_COLOR', 'LINE_WIDTH'], {}), '(img, (x, y_new), (w, y_new), LINE_COLOR, LINE_WIDTH)\n', (2376, 2429), False, 'import cv2\n'), ((6595, 6613), 'os.path.join', 'join', (['file_path', 'f'], {}), '(file_path, f)\n', (6599, 6613), False, 'from os.path import join\n'), ((1184, 1250), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', 'LINE_COLOR', 'LINE_WIDTH'], {}), '(img, (x, y), (x + w, y + h), LINE_COLOR, LINE_WIDTH)\n', (1197, 1250), False, 'import cv2\n'), ((6250, 6276), 'os.path.basename', 'os.path.basename', (['img_path'], {}), '(img_path)\n', (6266, 6276), False, 'import os\n')]
|
from django.db import models
from django.db.models import Q
from django.utils.translation import gettext_lazy as _
from django.utils import timezone
from django.urls import reverse
from datetime import timedelta
from danceschool.core.models import (
Instructor, Location, Room, DanceRole, Event, PricingTier,
EventRegistration, Customer, StaffMember
)
from danceschool.core.constants import getConstant
from danceschool.core.mixins import EmailRecipientMixin
from danceschool.core.utils.timezone import ensure_localtime
class InstructorPrivateLessonDetails(models.Model):
instructor = models.OneToOneField(StaffMember, on_delete=models.CASCADE)
defaultPricingTier = models.ForeignKey(
PricingTier, verbose_name=_('Default Pricing Tier'), null=True,
blank=True, on_delete=models.SET_NULL
)
roles = models.ManyToManyField(DanceRole, blank=True)
couples = models.BooleanField(_('Private lessons for couples'), default=True)
smallGroups = models.BooleanField(_('Private lessons for small groups'), default=True)
def __str__(self):
return str(_('Instructor Private lesson details for %s' % self.instructor.fullName))
class Meta:
ordering = ('instructor__lastName', 'instructor__firstName')
verbose_name = _('Instructor private lesson details')
verbose_name_plural = _('Instructors\' private lesson details')
class PrivateLessonEvent(Event):
'''
This is the event object for which an individual registers. The event is created when the user books a lesson.
All of the registration logic is still handled by the core app, and this model inherits all of the fields
associated with other types of events (location, etc.)
'''
pricingTier = models.ForeignKey(
PricingTier, verbose_name=_('Pricing Tier'), null=True, blank=True,
on_delete=models.SET_NULL
)
participants = models.PositiveSmallIntegerField(_('Expected # of Participants'), null=True, blank=True, default=1)
comments = models.TextField(
_('Comments/Notes'), null=True, blank=True, help_text=_('For internal use and recordkeeping.')
)
def getBasePrice(self, **kwargs):
'''
This method overrides the method of the base Event class by
checking the pricingTier associated with this PrivateLessonEvent and getting
the appropriate price for it.
'''
if not self.pricingTier:
return None
return self.pricingTier.getBasePrice(**kwargs) * max(self.numSlots, 1)
def finalizeBooking(self, **kwargs):
notifyStudent = kwargs.get('notifyStudent', True)
notifyTeachers = kwargs.get('notifyTeachers', getConstant('privateLessons__notifyInstructor'))
eventRegistration = kwargs.get('eventRegistration', None)
affectedSlots = self.instructoravailabilityslot_set.all()
affectedSlots.update(
status=InstructorAvailabilitySlot.SlotStatus.booked,
eventRegistration=eventRegistration,
)
if notifyStudent:
# This is the email template used to notify students that their private lesson has been
# successfully scheduled
template = getConstant('privateLessons__lessonBookedEmailTemplate')
if template.defaultFromAddress and template.content:
for customer in self.customers:
customer.email_recipient(
template.subject,
template.content,
send_html=False,
from_address=template.defaultFromAddress,
from_name=template.defaultFromName,
cc=template.defaultCC,
to=customer.email,
lesson=self,
)
if notifyTeachers:
# This is the email template used to notify individuals who run registration
# that they have been compensated
template = getConstant('privateLessons__lessonBookedInstructorEmailTemplate')
if template.defaultFromAddress and template.content:
emailMixin = EmailRecipientMixin()
instructors = [
x.staffMember for x in
self.eventstaffmember_set.exclude(
Q(staffMember__privateEmail__isnull=True) & Q(staffMember__publicEmail__isnull=True)
)
]
for instructor in instructors:
if not instructor.privateEmail and not instructor.publicEmail:
# Without an email address, instructor cannot be notified
continue
emailMixin.email_recipient(
template.subject,
template.content,
send_html=False,
from_address=template.defaultFromAddress,
from_name=template.defaultFromName,
cc=template.defaultCC,
to=instructor.privateEmail or instructor.publicEmail,
lesson=self,
instructor=instructor,
customers=self.customers,
calendarUrl=reverse('privateCalendar'),
)
@property
def customers(self):
'''
List both any individuals signed up via the registration and payment system,
and any individuals signed up without payment.
'''
return Customer.objects.filter(
Q(privatelessoncustomer__lesson=self) |
Q(registration__eventregistration__event=self)
).distinct()
customers.fget.short_description = _('Customers')
@property
def numSlots(self):
''' Used for various pricing discounts related things '''
return self.instructoravailabilityslot_set.count()
@property
def discountPointsMultiplier(self):
'''
If installed, the discounts app looks for this property to determine
how many points this lesson is worth toward a discount. Since private
lesson points are based on the number of slots booked, this just returns
the number of slots associated with this event (or 1).
'''
return max(self.numSlots, 1)
def nameAndDate(self, withDate=True):
teacherNames = ' and '.join([x.staffMember.fullName for x in self.eventstaffmember_set.all()])
if self.customers:
customerNames = ' ' + ' and '.join([x.fullName for x in self.customers])
elif self.eventregistration_set.all():
names = ' and '.join([x.registration.fullName for x in self.eventregistration_set.all()])
customerNames = ' ' + names if names else ''
else:
customerNames = ''
if not teacherNames and not customerNames and not withDate:
return _('Private Lesson')
return _('Private Lesson: %s%s%s%s' % (
teacherNames,
_(' for ') if teacherNames and customerNames else '',
customerNames,
(
(', ' if (teacherNames or customerNames) else '') +
self.startTime.strftime('%Y-%m-%d')
) if withDate else ''
))
@property
def name(self):
return self.nameAndDate(withDate=True)
def save(self, *args, **kwargs):
''' Set registration status to hidden if it is not specified otherwise '''
if not self.status:
self.status == Event.RegStatus.hidden
super().save(*args, **kwargs)
def __str__(self):
return str(self.name)
class Meta:
permissions = (
('view_others_lessons', _('Can view scheduled private lessons for all instructors')),
)
verbose_name = _('Private lesson')
verbose_name_plural = _('Private lessons')
class PrivateLessonCustomer(models.Model):
'''
For private lessons that go through registration and payment, the customers
are the individuals who are registered. For private lessons that are booked
without payment, this just provides a record that they signed up for
the lesson.
'''
customer = models.ForeignKey(
Customer, verbose_name=_('Customer'), on_delete=models.CASCADE
)
lesson = models.ForeignKey(
PrivateLessonEvent, verbose_name=_('Lesson'), on_delete=models.CASCADE
)
def __str__(self):
return str(_('Private lesson customer: %s for lesson #%s' % (self.customer.fullName, self.lesson.id)))
class Meta:
unique_together = ('customer', 'lesson')
verbose_name = _('Private lesson customer')
verbose_name_plural = _('Private lesson customers')
class InstructorAvailabilitySlot(models.Model):
class SlotStatus(models.TextChoices):
available = ('A', _('Available'))
booked = ('B', _('Booked'))
tentative = ('T', _('Tentative Booking'))
unavailable = ('U', _('Unavailable'))
instructor = models.ForeignKey(Instructor, verbose_name=_('Instructor'), on_delete=models.CASCADE)
pricingTier = models.ForeignKey(
PricingTier, verbose_name=_('Pricing Tier'), null=True, blank=True, on_delete=models.SET_NULL
)
startTime = models.DateTimeField(_('Start time'))
duration = models.PositiveSmallIntegerField(_('Slot duration (minutes)'), default=30)
location = models.ForeignKey(
Location, verbose_name=_('Location'), null=True, blank=True, on_delete=models.SET_NULL,
)
room = models.ForeignKey(
Room, verbose_name=_('Room'), null=True, blank=True, on_delete=models.SET_NULL,
)
status = models.CharField(max_length=1, choices=SlotStatus.choices, default=SlotStatus.available)
# We need both a link to the registrations and a link to the event because
# in the event that an expired (temporary) Registration is deleted, we still want to
# be able to identify the Event that was created for this private lesson.
lessonEvent = models.ForeignKey(
PrivateLessonEvent, verbose_name=_('Scheduled lesson'), null=True, blank=True,
on_delete=models.SET_NULL,
)
eventRegistration = models.ForeignKey(
EventRegistration, verbose_name=_('event registration'),
null=True, blank=True, on_delete=models.SET_NULL, related_name='privateLessonSlots'
)
creationDate = models.DateTimeField(auto_now_add=True)
modifiedDate = models.DateTimeField(auto_now=True)
@property
def availableDurations(self):
'''
A lesson can always be booked for the length of a single slot, but this method
checks if multiple slots are available. This method requires that slots are
non-overlapping, which needs to be enforced on slot save.
'''
potential_slots = InstructorAvailabilitySlot.objects.filter(
instructor=self.instructor,
location=self.location,
room=self.room,
pricingTier=self.pricingTier,
startTime__gte=self.startTime,
startTime__lte=self.startTime + timedelta(minutes=getConstant('privateLessons__maximumLessonLength')),
).exclude(id=self.id).order_by('startTime')
duration_list = [self.duration, ]
last_start = self.startTime
last_duration = self.duration
max_duration = self.duration
for slot in potential_slots:
if max_duration + slot.duration > getConstant('privateLessons__maximumLessonLength'):
break
if (
slot.startTime == last_start + timedelta(minutes=last_duration) and
slot.isAvailable
):
duration_list.append(max_duration + slot.duration)
last_start = slot.startTime
last_duration = slot.duration
max_duration += slot.duration
return duration_list
@property
def availableRoles(self):
'''
Some instructors only offer private lessons for certain roles, so we should only allow booking
for the roles that have been selected for the instructor.
'''
if not hasattr(self.instructor, 'instructorprivatelessondetails'):
return []
return [
[x.id, x.name] for x in
self.instructor.instructorprivatelessondetails.roles.all()
]
def checkIfAvailable(self, dateTime=timezone.now()):
'''
Available slots are available, but also tentative slots that have been held as tentative
past their expiration date
'''
return (
self.startTime >= dateTime + timedelta(days=getConstant('privateLessons__closeBookingDays')) and
self.startTime <= dateTime + timedelta(days=getConstant('privateLessons__openBookingDays')) and not
self.eventRegistration and (
self.status == self.SlotStatus.available or (
self.status == self.SlotStatus.tentative and
getattr(
getattr(
getattr(self.eventRegistration, 'invoiceItem', None),
'invoice', None
),
'expirationDate',
timezone.now()
) <= timezone.now()
)
)
)
# isAvailable indicates if a slot is currently available
isAvailable = property(fget=checkIfAvailable)
isAvailable.fget.short_description = _('Available')
@property
def name(self):
return _('%s: %s at %s') % (
self.instructor.fullName,
ensure_localtime(self.startTime).strftime('%b %-d, %Y %-I:%M %p'),
self.location
)
def __str__(self):
return str(self.name)
class Meta:
ordering = ('-startTime', 'instructor__lastName', 'instructor__firstName')
verbose_name = _('Private lesson availability slot')
verbose_name_plural = _('Private lesson availability slots')
permissions = (
('edit_own_availability', _('Can edit one\'s own private lesson availability.')),
('edit_others_availability', _('Can edit other instructors\' private lesson availability.')),
)
|
[
"django.db.models.OneToOneField",
"danceschool.core.mixins.EmailRecipientMixin",
"django.db.models.ManyToManyField",
"django.utils.translation.gettext_lazy",
"django.db.models.CharField",
"django.utils.timezone.now",
"django.db.models.Q",
"danceschool.core.utils.timezone.ensure_localtime",
"django.urls.reverse",
"datetime.timedelta",
"django.db.models.DateTimeField",
"danceschool.core.constants.getConstant"
] |
[((601, 660), 'django.db.models.OneToOneField', 'models.OneToOneField', (['StaffMember'], {'on_delete': 'models.CASCADE'}), '(StaffMember, on_delete=models.CASCADE)\n', (621, 660), False, 'from django.db import models\n'), ((841, 886), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['DanceRole'], {'blank': '(True)'}), '(DanceRole, blank=True)\n', (863, 886), False, 'from django.db import models\n'), ((5789, 5803), 'django.utils.translation.gettext_lazy', '_', (['"""Customers"""'], {}), "('Customers')\n", (5790, 5803), True, 'from django.utils.translation import gettext_lazy as _\n'), ((9751, 9844), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'choices': 'SlotStatus.choices', 'default': 'SlotStatus.available'}), '(max_length=1, choices=SlotStatus.choices, default=\n SlotStatus.available)\n', (9767, 9844), False, 'from django.db import models\n'), ((10478, 10517), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (10498, 10517), False, 'from django.db import models\n'), ((10537, 10572), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (10557, 10572), False, 'from django.db import models\n'), ((13622, 13636), 'django.utils.translation.gettext_lazy', '_', (['"""Available"""'], {}), "('Available')\n", (13623, 13636), True, 'from django.utils.translation import gettext_lazy as _\n'), ((922, 954), 'django.utils.translation.gettext_lazy', '_', (['"""Private lessons for couples"""'], {}), "('Private lessons for couples')\n", (923, 954), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1008, 1045), 'django.utils.translation.gettext_lazy', '_', (['"""Private lessons for small groups"""'], {}), "('Private lessons for small groups')\n", (1009, 1045), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1287, 1325), 'django.utils.translation.gettext_lazy', '_', (['"""Instructor private lesson details"""'], {}), "('Instructor private lesson details')\n", (1288, 1325), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1356, 1396), 'django.utils.translation.gettext_lazy', '_', (['"""Instructors\' private lesson details"""'], {}), '("Instructors\' private lesson details")\n', (1357, 1396), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1940, 1971), 'django.utils.translation.gettext_lazy', '_', (['"""Expected # of Participants"""'], {}), "('Expected # of Participants')\n", (1941, 1971), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2048, 2067), 'django.utils.translation.gettext_lazy', '_', (['"""Comments/Notes"""'], {}), "('Comments/Notes')\n", (2049, 2067), True, 'from django.utils.translation import gettext_lazy as _\n'), ((7894, 7913), 'django.utils.translation.gettext_lazy', '_', (['"""Private lesson"""'], {}), "('Private lesson')\n", (7895, 7913), True, 'from django.utils.translation import gettext_lazy as _\n'), ((7944, 7964), 'django.utils.translation.gettext_lazy', '_', (['"""Private lessons"""'], {}), "('Private lessons')\n", (7945, 7964), True, 'from django.utils.translation import gettext_lazy as _\n'), ((8728, 8756), 'django.utils.translation.gettext_lazy', '_', (['"""Private lesson customer"""'], {}), "('Private lesson customer')\n", (8729, 8756), True, 'from django.utils.translation import gettext_lazy as _\n'), ((8787, 8816), 'django.utils.translation.gettext_lazy', '_', (['"""Private lesson customers"""'], {}), "('Private lesson customers')\n", (8788, 8816), True, 'from django.utils.translation import gettext_lazy as _\n'), ((9370, 9385), 'django.utils.translation.gettext_lazy', '_', (['"""Start time"""'], {}), "('Start time')\n", (9371, 9385), True, 'from django.utils.translation import gettext_lazy as _\n'), ((9435, 9463), 'django.utils.translation.gettext_lazy', '_', (['"""Slot duration (minutes)"""'], {}), "('Slot duration (minutes)')\n", (9436, 9463), True, 'from django.utils.translation import gettext_lazy as _\n'), ((12513, 12527), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (12525, 12527), False, 'from django.utils import timezone\n'), ((14039, 14076), 'django.utils.translation.gettext_lazy', '_', (['"""Private lesson availability slot"""'], {}), "('Private lesson availability slot')\n", (14040, 14076), True, 'from django.utils.translation import gettext_lazy as _\n'), ((14107, 14145), 'django.utils.translation.gettext_lazy', '_', (['"""Private lesson availability slots"""'], {}), "('Private lesson availability slots')\n", (14108, 14145), True, 'from django.utils.translation import gettext_lazy as _\n'), ((739, 764), 'django.utils.translation.gettext_lazy', '_', (['"""Default Pricing Tier"""'], {}), "('Default Pricing Tier')\n", (740, 764), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1104, 1176), 'django.utils.translation.gettext_lazy', '_', (["('Instructor Private lesson details for %s' % self.instructor.fullName)"], {}), "('Instructor Private lesson details for %s' % self.instructor.fullName)\n", (1105, 1176), True, 'from django.utils.translation import gettext_lazy as _\n'), ((1806, 1823), 'django.utils.translation.gettext_lazy', '_', (['"""Pricing Tier"""'], {}), "('Pricing Tier')\n", (1807, 1823), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2102, 2142), 'django.utils.translation.gettext_lazy', '_', (['"""For internal use and recordkeeping."""'], {}), "('For internal use and recordkeeping.')\n", (2103, 2142), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2693, 2740), 'danceschool.core.constants.getConstant', 'getConstant', (['"""privateLessons__notifyInstructor"""'], {}), "('privateLessons__notifyInstructor')\n", (2704, 2740), False, 'from danceschool.core.constants import getConstant\n'), ((3217, 3273), 'danceschool.core.constants.getConstant', 'getConstant', (['"""privateLessons__lessonBookedEmailTemplate"""'], {}), "('privateLessons__lessonBookedEmailTemplate')\n", (3228, 3273), False, 'from danceschool.core.constants import getConstant\n'), ((4020, 4086), 'danceschool.core.constants.getConstant', 'getConstant', (['"""privateLessons__lessonBookedInstructorEmailTemplate"""'], {}), "('privateLessons__lessonBookedInstructorEmailTemplate')\n", (4031, 4086), False, 'from danceschool.core.constants import getConstant\n'), ((6981, 7000), 'django.utils.translation.gettext_lazy', '_', (['"""Private Lesson"""'], {}), "('Private Lesson')\n", (6982, 7000), True, 'from django.utils.translation import gettext_lazy as _\n'), ((8341, 8354), 'django.utils.translation.gettext_lazy', '_', (['"""Customer"""'], {}), "('Customer')\n", (8342, 8354), True, 'from django.utils.translation import gettext_lazy as _\n'), ((8460, 8471), 'django.utils.translation.gettext_lazy', '_', (['"""Lesson"""'], {}), "('Lesson')\n", (8461, 8471), True, 'from django.utils.translation import gettext_lazy as _\n'), ((8547, 8641), 'django.utils.translation.gettext_lazy', '_', (["('Private lesson customer: %s for lesson #%s' % (self.customer.fullName,\n self.lesson.id))"], {}), "('Private lesson customer: %s for lesson #%s' % (self.customer.fullName,\n self.lesson.id))\n", (8548, 8641), True, 'from django.utils.translation import gettext_lazy as _\n'), ((8936, 8950), 'django.utils.translation.gettext_lazy', '_', (['"""Available"""'], {}), "('Available')\n", (8937, 8950), True, 'from django.utils.translation import gettext_lazy as _\n'), ((8975, 8986), 'django.utils.translation.gettext_lazy', '_', (['"""Booked"""'], {}), "('Booked')\n", (8976, 8986), True, 'from django.utils.translation import gettext_lazy as _\n'), ((9014, 9036), 'django.utils.translation.gettext_lazy', '_', (['"""Tentative Booking"""'], {}), "('Tentative Booking')\n", (9015, 9036), True, 'from django.utils.translation import gettext_lazy as _\n'), ((9066, 9082), 'django.utils.translation.gettext_lazy', '_', (['"""Unavailable"""'], {}), "('Unavailable')\n", (9067, 9082), True, 'from django.utils.translation import gettext_lazy as _\n'), ((9145, 9160), 'django.utils.translation.gettext_lazy', '_', (['"""Instructor"""'], {}), "('Instructor')\n", (9146, 9160), True, 'from django.utils.translation import gettext_lazy as _\n'), ((9259, 9276), 'django.utils.translation.gettext_lazy', '_', (['"""Pricing Tier"""'], {}), "('Pricing Tier')\n", (9260, 9276), True, 'from django.utils.translation import gettext_lazy as _\n'), ((9542, 9555), 'django.utils.translation.gettext_lazy', '_', (['"""Location"""'], {}), "('Location')\n", (9543, 9555), True, 'from django.utils.translation import gettext_lazy as _\n'), ((9670, 9679), 'django.utils.translation.gettext_lazy', '_', (['"""Room"""'], {}), "('Room')\n", (9671, 9679), True, 'from django.utils.translation import gettext_lazy as _\n'), ((10165, 10186), 'django.utils.translation.gettext_lazy', '_', (['"""Scheduled lesson"""'], {}), "('Scheduled lesson')\n", (10166, 10186), True, 'from django.utils.translation import gettext_lazy as _\n'), ((10335, 10358), 'django.utils.translation.gettext_lazy', '_', (['"""event registration"""'], {}), "('event registration')\n", (10336, 10358), True, 'from django.utils.translation import gettext_lazy as _\n'), ((13687, 13704), 'django.utils.translation.gettext_lazy', '_', (['"""%s: %s at %s"""'], {}), "('%s: %s at %s')\n", (13688, 13704), True, 'from django.utils.translation import gettext_lazy as _\n'), ((4182, 4203), 'danceschool.core.mixins.EmailRecipientMixin', 'EmailRecipientMixin', ([], {}), '()\n', (4201, 4203), False, 'from danceschool.core.mixins import EmailRecipientMixin\n'), ((7798, 7857), 'django.utils.translation.gettext_lazy', '_', (['"""Can view scheduled private lessons for all instructors"""'], {}), "('Can view scheduled private lessons for all instructors')\n", (7799, 7857), True, 'from django.utils.translation import gettext_lazy as _\n'), ((11547, 11597), 'danceschool.core.constants.getConstant', 'getConstant', (['"""privateLessons__maximumLessonLength"""'], {}), "('privateLessons__maximumLessonLength')\n", (11558, 11597), False, 'from danceschool.core.constants import getConstant\n'), ((14209, 14261), 'django.utils.translation.gettext_lazy', '_', (['"""Can edit one\'s own private lesson availability."""'], {}), '("Can edit one\'s own private lesson availability.")\n', (14210, 14261), True, 'from django.utils.translation import gettext_lazy as _\n'), ((14306, 14367), 'django.utils.translation.gettext_lazy', '_', (['"""Can edit other instructors\' private lesson availability."""'], {}), '("Can edit other instructors\' private lesson availability.")\n', (14307, 14367), True, 'from django.utils.translation import gettext_lazy as _\n'), ((5630, 5667), 'django.db.models.Q', 'Q', ([], {'privatelessoncustomer__lesson': 'self'}), '(privatelessoncustomer__lesson=self)\n', (5631, 5667), False, 'from django.db.models import Q\n'), ((5682, 5728), 'django.db.models.Q', 'Q', ([], {'registration__eventregistration__event': 'self'}), '(registration__eventregistration__event=self)\n', (5683, 5728), False, 'from django.db.models import Q\n'), ((7088, 7098), 'django.utils.translation.gettext_lazy', '_', (['""" for """'], {}), "(' for ')\n", (7089, 7098), True, 'from django.utils.translation import gettext_lazy as _\n'), ((11685, 11717), 'datetime.timedelta', 'timedelta', ([], {'minutes': 'last_duration'}), '(minutes=last_duration)\n', (11694, 11717), False, 'from datetime import timedelta\n'), ((13413, 13427), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (13425, 13427), False, 'from django.utils import timezone\n'), ((13759, 13791), 'danceschool.core.utils.timezone.ensure_localtime', 'ensure_localtime', (['self.startTime'], {}), '(self.startTime)\n', (13775, 13791), False, 'from danceschool.core.utils.timezone import ensure_localtime\n'), ((5324, 5350), 'django.urls.reverse', 'reverse', (['"""privateCalendar"""'], {}), "('privateCalendar')\n", (5331, 5350), False, 'from django.urls import reverse\n'), ((12759, 12806), 'danceschool.core.constants.getConstant', 'getConstant', (['"""privateLessons__closeBookingDays"""'], {}), "('privateLessons__closeBookingDays')\n", (12770, 12806), False, 'from danceschool.core.constants import getConstant\n'), ((12868, 12914), 'danceschool.core.constants.getConstant', 'getConstant', (['"""privateLessons__openBookingDays"""'], {}), "('privateLessons__openBookingDays')\n", (12879, 12914), False, 'from danceschool.core.constants import getConstant\n'), ((13373, 13387), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (13385, 13387), False, 'from django.utils import timezone\n'), ((4359, 4400), 'django.db.models.Q', 'Q', ([], {'staffMember__privateEmail__isnull': '(True)'}), '(staffMember__privateEmail__isnull=True)\n', (4360, 4400), False, 'from django.db.models import Q\n'), ((4403, 4443), 'django.db.models.Q', 'Q', ([], {'staffMember__publicEmail__isnull': '(True)'}), '(staffMember__publicEmail__isnull=True)\n', (4404, 4443), False, 'from django.db.models import Q\n'), ((11204, 11254), 'danceschool.core.constants.getConstant', 'getConstant', (['"""privateLessons__maximumLessonLength"""'], {}), "('privateLessons__maximumLessonLength')\n", (11215, 11254), False, 'from danceschool.core.constants import getConstant\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Description: Templatetags test units.
"""
__author__ = "<NAME> (<EMAIL>)"
from decimal import Decimal
from django.test import TestCase
from currency.models import Currency
from currency.templatetags.to_currency import to_currency
class ToCurrencyTestCase(TestCase):
"""
Test unit for ```to_currency``` template tag.
"""
def setUp(self):
super(ToCurrencyTestCase, self).setUp()
def tearDown(self):
super(ToCurrencyTestCase, self).tearDown()
def test_to_currency(self):
"""
Tests a successful currency convertion, when the scale exists in model.
"""
currency = Currency.objects.get(code='ARS')
self.assertEquals(Decimal('13'),
to_currency(Decimal('1.55'), currency))
currency = Currency.objects.get(code='USD')
self.assertEquals(Decimal('2.0'),
to_currency(Decimal('1.55'), currency))
|
[
"currency.models.Currency.objects.get",
"decimal.Decimal"
] |
[((690, 722), 'currency.models.Currency.objects.get', 'Currency.objects.get', ([], {'code': '"""ARS"""'}), "(code='ARS')\n", (710, 722), False, 'from currency.models import Currency\n'), ((850, 882), 'currency.models.Currency.objects.get', 'Currency.objects.get', ([], {'code': '"""USD"""'}), "(code='USD')\n", (870, 882), False, 'from currency.models import Currency\n'), ((749, 762), 'decimal.Decimal', 'Decimal', (['"""13"""'], {}), "('13')\n", (756, 762), False, 'from decimal import Decimal\n'), ((909, 923), 'decimal.Decimal', 'Decimal', (['"""2.0"""'], {}), "('2.0')\n", (916, 923), False, 'from decimal import Decimal\n'), ((802, 817), 'decimal.Decimal', 'Decimal', (['"""1.55"""'], {}), "('1.55')\n", (809, 817), False, 'from decimal import Decimal\n'), ((963, 978), 'decimal.Decimal', 'Decimal', (['"""1.55"""'], {}), "('1.55')\n", (970, 978), False, 'from decimal import Decimal\n')]
|
import math
import click
import os.path
import shutil
import atoms_simulator
import numpy
import matplotlib.pyplot as plt
def get_project_path():
return os.path.dirname(atoms_simulator.__file__)
def get_path(path):
i = 1
while True:
if not os.path.lexists(f"{path}{i}"):
return f"{path}{i}"
i += 1
@click.group()
def ats():
"""Allows to perform detailed tests using atoms_simulator module."""
pass
@ats.command()
def init():
"""Creates a settings_ats.toml file in the current directory."""
if not os.path.isfile("settings_ats.toml"):
source = os.path.join(get_project_path(), "assets/settings_source.toml")
target = os.path.join(os.getcwd(), "settings_ats.toml")
shutil.copy(source, target)
click.echo("Settings file generated successfully.")
else:
click.echo("Settings file already exists. Please delete it in order to generate a new configuration file.")
@ats.command()
@click.option("-g", "--graphics", "graphics", help="Turn on pygame simulation", is_flag=True)
@click.option("--no-save", "no_save", help="Disable saving the results of the test.", is_flag=True)
def test(graphics, no_save):
"""Performs a series of tests based on the data in the settings_ats.toml file."""
settings_ats = atoms_simulator.Settings("settings_ats.toml")
if not settings_ats.load():
click.echo("No settings file detected. Generate the file first.")
return
if settings_ats["N_min"] is None:
click.echo("The settings file is corrupted, please generate a new settings file.")
return
if settings_ats["N_step"] is None:
click.echo("The settings file is corrupted, please generate a new settings file.")
return
if settings_ats["N_number"] is None:
click.echo("The settings file is corrupted, please generate a new settings file.")
return
if settings_ats["R"] is None:
click.echo("The settings file is corrupted, please generate a new settings file.")
return
click.echo("Starting simulation...")
n_stop = settings_ats["N_min"] + settings_ats["N_step"] * (settings_ats["N_number"] - 1)
# size = max([settings_ats['h'], settings_ats['w'], math.ceil((4 * (n_stop + 1)) ** 0.5)])
# settings_ats['h'] = size
# settings_ats['w'] = size
test_cases = [
[i for _ in range(settings_ats['R'])] for i in range(settings_ats["N_min"], n_stop + 1, settings_ats["N_step"])
]
bounce = numpy.empty((len(test_cases), settings_ats['R']), dtype=int)
bounce_results = numpy.empty(len(test_cases), dtype=int)
cop = numpy.empty((len(test_cases), settings_ats['R']), dtype=float)
cop_results = numpy.empty(len(test_cases), dtype=float)
settings_ats.new('N', settings_ats["N_min"])
with click.progressbar(
range(len(test_cases) * settings_ats['R'] - 1, -1, -1), label="Performing simulations:", show_eta=False
) as progress:
for i in progress:
settings_ats['N'] = test_cases[i // settings_ats['R']][i % settings_ats['R']]
try:
bounce[i // settings_ats['R']][i % settings_ats['R']], \
cop[i // settings_ats['R']][i % settings_ats['R']] = atoms_simulator.simulate(settings_ats, graphics)
except ValueError as error:
click.echo(f"\n{error} Please generate a new settings file.")
return
if i % settings_ats['R'] == 0:
bounce_results[i // settings_ats['R']] = int(bounce[i // settings_ats['R']].mean())
cop_results[i // settings_ats['R']] = cop[i // settings_ats['R']].mean()
if not no_save:
if not os.path.isdir(results_path := os.path.join(os.getcwd(), "ats_results")):
os.mkdir(results_path)
target_path = get_path(os.path.join(results_path, "data_batch"))
os.mkdir(target_path)
numpy.savetxt(os.path.join(target_path, "bounces.csv"), bounce_results)
numpy.savetxt(os.path.join(target_path, "change_of_position.csv"), cop_results)
settings_ats.save(target=os.path.join(target_path, "used.toml"))
@ats.command()
@click.option("-b", "--data_batch", "data_batch", prompt=True, help="Name of the previously generated data batch.")
def plot(data_batch):
"""Plots the previously generated data."""
if not os.path.isdir(results_path := os.path.join(os.getcwd(), "ats_results")):
click.echo(
"The ats_results catalog doesn't exist within the current working directory. Generate some data first."
)
return
if not os.path.isdir(path := os.path.join(os.getcwd(), "ats_results", data_batch)):
click.echo(
f"The ats_results/{data_batch} catalog doesn't exist within the current working directory."
)
return
target_path = get_path(os.path.join(results_path, "figures_batch"))
os.mkdir(target_path)
settings_ats = atoms_simulator.Settings(os.path.join(path, "used.toml"))
if not (settings_ats.load() and os.path.isfile(os.path.join(path, "bounces.csv"))
and os.path.isfile(os.path.join(path, "change_of_position.csv"))):
click.echo("This data batch is corrupted.")
return
n_stop = settings_ats["N_min"] + settings_ats["N_step"] * (settings_ats["N_number"] - 1)
x = numpy.arange(settings_ats["N_min"], n_stop + 1, settings_ats["N_step"])
bounce = numpy.loadtxt(os.path.join(path, "bounces.csv"))
plt.plot(x, bounce, marker='o')
plt.title(f"Zależność liczby zderzeń od ilości atomów, M = {settings_ats['M']}")
plt.xlabel("Liczba atomów w pojemniku")
plt.ylabel("Liczba odbić atomu czerownego")
plt.grid(True)
plt.savefig(os.path.join(target_path, "bounces.png"))
plt.clf()
cop = numpy.loadtxt(os.path.join(path, "change_of_position.csv"))
plt.plot(x, cop, marker='o')
plt.title(f"Zależność średniej drogi swobodnej od ilości atomów, M = {settings_ats['M']}")
plt.xlabel("Liczba atomów w pojemniku")
plt.ylabel("Średnia droga swobodna atomu czerwonego")
plt.grid(True)
plt.savefig(os.path.join(target_path, "change_of_position.png"))
plt.clf()
settings_ats.save(os.path.join(target_path, "used.toml"))
click.echo("Figures created successfullly.")
|
[
"matplotlib.pyplot.title",
"atoms_simulator.Settings",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"atoms_simulator.simulate",
"click.option",
"click.echo",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"click.group",
"matplotlib.pyplot.grid",
"shutil.copy",
"matplotlib.pyplot.xlabel"
] |
[((345, 358), 'click.group', 'click.group', ([], {}), '()\n', (356, 358), False, 'import click\n'), ((983, 1080), 'click.option', 'click.option', (['"""-g"""', '"""--graphics"""', '"""graphics"""'], {'help': '"""Turn on pygame simulation"""', 'is_flag': '(True)'}), "('-g', '--graphics', 'graphics', help=\n 'Turn on pygame simulation', is_flag=True)\n", (995, 1080), False, 'import click\n'), ((1077, 1180), 'click.option', 'click.option', (['"""--no-save"""', '"""no_save"""'], {'help': '"""Disable saving the results of the test."""', 'is_flag': '(True)'}), "('--no-save', 'no_save', help=\n 'Disable saving the results of the test.', is_flag=True)\n", (1089, 1180), False, 'import click\n'), ((4172, 4291), 'click.option', 'click.option', (['"""-b"""', '"""--data_batch"""', '"""data_batch"""'], {'prompt': '(True)', 'help': '"""Name of the previously generated data batch."""'}), "('-b', '--data_batch', 'data_batch', prompt=True, help=\n 'Name of the previously generated data batch.')\n", (4184, 4291), False, 'import click\n'), ((1310, 1355), 'atoms_simulator.Settings', 'atoms_simulator.Settings', (['"""settings_ats.toml"""'], {}), "('settings_ats.toml')\n", (1334, 1355), False, 'import atoms_simulator\n'), ((2057, 2093), 'click.echo', 'click.echo', (['"""Starting simulation..."""'], {}), "('Starting simulation...')\n", (2067, 2093), False, 'import click\n'), ((5346, 5417), 'numpy.arange', 'numpy.arange', (["settings_ats['N_min']", '(n_stop + 1)', "settings_ats['N_step']"], {}), "(settings_ats['N_min'], n_stop + 1, settings_ats['N_step'])\n", (5358, 5417), False, 'import numpy\n'), ((5484, 5515), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'bounce'], {'marker': '"""o"""'}), "(x, bounce, marker='o')\n", (5492, 5515), True, 'import matplotlib.pyplot as plt\n'), ((5520, 5605), 'matplotlib.pyplot.title', 'plt.title', (['f"""Zależność liczby zderzeń od ilości atomów, M = {settings_ats[\'M\']}"""'], {}), '(f"Zależność liczby zderzeń od ilości atomów, M = {settings_ats[\'M\']}"\n )\n', (5529, 5605), True, 'import matplotlib.pyplot as plt\n'), ((5605, 5644), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Liczba atomów w pojemniku"""'], {}), "('Liczba atomów w pojemniku')\n", (5615, 5644), True, 'import matplotlib.pyplot as plt\n'), ((5649, 5692), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Liczba odbić atomu czerownego"""'], {}), "('Liczba odbić atomu czerownego')\n", (5659, 5692), True, 'import matplotlib.pyplot as plt\n'), ((5697, 5711), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5705, 5711), True, 'import matplotlib.pyplot as plt\n'), ((5774, 5783), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5781, 5783), True, 'import matplotlib.pyplot as plt\n'), ((5859, 5887), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'cop'], {'marker': '"""o"""'}), "(x, cop, marker='o')\n", (5867, 5887), True, 'import matplotlib.pyplot as plt\n'), ((5892, 5992), 'matplotlib.pyplot.title', 'plt.title', (['f"""Zależność średniej drogi swobodnej od ilości atomów, M = {settings_ats[\'M\']}"""'], {}), '(\n f"Zależność średniej drogi swobodnej od ilości atomów, M = {settings_ats[\'M\']}"\n )\n', (5901, 5992), True, 'import matplotlib.pyplot as plt\n'), ((5987, 6026), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Liczba atomów w pojemniku"""'], {}), "('Liczba atomów w pojemniku')\n", (5997, 6026), True, 'import matplotlib.pyplot as plt\n'), ((6031, 6084), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Średnia droga swobodna atomu czerwonego"""'], {}), "('Średnia droga swobodna atomu czerwonego')\n", (6041, 6084), True, 'import matplotlib.pyplot as plt\n'), ((6089, 6103), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6097, 6103), True, 'import matplotlib.pyplot as plt\n'), ((6177, 6186), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6184, 6186), True, 'import matplotlib.pyplot as plt\n'), ((6254, 6298), 'click.echo', 'click.echo', (['"""Figures created successfullly."""'], {}), "('Figures created successfullly.')\n", (6264, 6298), False, 'import click\n'), ((751, 778), 'shutil.copy', 'shutil.copy', (['source', 'target'], {}), '(source, target)\n', (762, 778), False, 'import shutil\n'), ((787, 838), 'click.echo', 'click.echo', (['"""Settings file generated successfully."""'], {}), "('Settings file generated successfully.')\n", (797, 838), False, 'import click\n'), ((857, 974), 'click.echo', 'click.echo', (['"""Settings file already exists. Please delete it in order to generate a new configuration file."""'], {}), "(\n 'Settings file already exists. Please delete it in order to generate a new configuration file.'\n )\n", (867, 974), False, 'import click\n'), ((1396, 1461), 'click.echo', 'click.echo', (['"""No settings file detected. Generate the file first."""'], {}), "('No settings file detected. Generate the file first.')\n", (1406, 1461), False, 'import click\n'), ((1523, 1610), 'click.echo', 'click.echo', (['"""The settings file is corrupted, please generate a new settings file."""'], {}), "(\n 'The settings file is corrupted, please generate a new settings file.')\n", (1533, 1610), False, 'import click\n'), ((1668, 1755), 'click.echo', 'click.echo', (['"""The settings file is corrupted, please generate a new settings file."""'], {}), "(\n 'The settings file is corrupted, please generate a new settings file.')\n", (1678, 1755), False, 'import click\n'), ((1815, 1902), 'click.echo', 'click.echo', (['"""The settings file is corrupted, please generate a new settings file."""'], {}), "(\n 'The settings file is corrupted, please generate a new settings file.')\n", (1825, 1902), False, 'import click\n'), ((1955, 2042), 'click.echo', 'click.echo', (['"""The settings file is corrupted, please generate a new settings file."""'], {}), "(\n 'The settings file is corrupted, please generate a new settings file.')\n", (1965, 2042), False, 'import click\n'), ((4448, 4573), 'click.echo', 'click.echo', (['"""The ats_results catalog doesn\'t exist within the current working directory. Generate some data first."""'], {}), '(\n "The ats_results catalog doesn\'t exist within the current working directory. Generate some data first."\n )\n', (4458, 4573), False, 'import click\n'), ((4697, 4810), 'click.echo', 'click.echo', (['f"""The ats_results/{data_batch} catalog doesn\'t exist within the current working directory."""'], {}), '(\n f"The ats_results/{data_batch} catalog doesn\'t exist within the current working directory."\n )\n', (4707, 4810), False, 'import click\n'), ((5186, 5229), 'click.echo', 'click.echo', (['"""This data batch is corrupted."""'], {}), "('This data batch is corrupted.')\n", (5196, 5229), False, 'import click\n'), ((3245, 3293), 'atoms_simulator.simulate', 'atoms_simulator.simulate', (['settings_ats', 'graphics'], {}), '(settings_ats, graphics)\n', (3269, 3293), False, 'import atoms_simulator\n'), ((3350, 3414), 'click.echo', 'click.echo', (['f"""\n{error} Please generate a new settings file."""'], {}), '(f"""\n{error} Please generate a new settings file.""")\n', (3360, 3414), False, 'import click\n')]
|
import numpy as np
from scipy.optimize import linprog
c = [10, 3.8, 1.5]
A_ub = [
[1, 1, 1],
[-1, -1, -1],
[-1, -1. / 3., -1. / 6.]]
b_ub = [18, -12, -9]
res = linprog(c, A_ub=A_ub, b_ub=b_ub)
print(res)
|
[
"scipy.optimize.linprog"
] |
[((175, 207), 'scipy.optimize.linprog', 'linprog', (['c'], {'A_ub': 'A_ub', 'b_ub': 'b_ub'}), '(c, A_ub=A_ub, b_ub=b_ub)\n', (182, 207), False, 'from scipy.optimize import linprog\n')]
|
#!/usr/bin/env python
# Processes OpenEthereum warp snapshot and collects 4-byte code prefixes of all accounts.
#
# openethereum --chain=kovan snapshot --snapshot-threads=8 snapshot.warp
# warp2code-prefixes.py snapshot.warp
import sys
import rlp
import snappy
import collections
prefix_map = collections.defaultdict(int)
filename = sys.argv[1]
print(f"{filename=}")
with open(filename, 'rb') as f:
f.seek(0,2)
size = f.tell()
print(f"{size=}")
f.seek(-8,2)
manifest_end = f.tell()
manifest_off_bytes = f.read(8)
print(f"{manifest_off_bytes=}")
manifest_off = int.from_bytes(manifest_off_bytes, 'little')
print(f"{manifest_off=}")
f.seek(manifest_off,0)
manifest_bytes = f.read(manifest_end-manifest_off)
manifest = rlp.decode(manifest_bytes)
manifest_ver = int.from_bytes(manifest[0], 'big')
block_number = int.from_bytes(manifest[4], 'big')
block_hash = manifest[5]
print(f"{manifest_ver=}")
print(f"{block_number=}")
print(f"block_hash={block_hash.hex()}")
state_chunks = manifest[1]
num_chunks=len(state_chunks)
print(f"{num_chunks=}")
for i in range(num_chunks):
info = state_chunks[i]
chunk_len = int.from_bytes(info[1], 'big')
chunk_pos = int.from_bytes(info[2], 'big')
print(f"{i}/{num_chunks}: {chunk_pos=} {chunk_len=}", end='')
f.seek(chunk_pos)
chunk_compressed = f.read(chunk_len)
chunk_bytes = snappy.uncompress(chunk_compressed)
chunk = rlp.decode(chunk_bytes)
print(f" uncompressed_len={len(chunk_bytes)} num_accounts={len(chunk)}", flush=True)
for entry in chunk:
acc = entry[1]
has_code = acc[2] == b'\x01'
if has_code:
code_prefix = bytes(acc[3][:4])
prefix_map[code_prefix] += 1
for k,v in prefix_map.items():
print(f"{k.hex()} : {v}")
|
[
"collections.defaultdict",
"snappy.uncompress",
"rlp.decode"
] |
[((302, 330), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (325, 330), False, 'import collections\n'), ((743, 769), 'rlp.decode', 'rlp.decode', (['manifest_bytes'], {}), '(manifest_bytes)\n', (753, 769), False, 'import rlp\n'), ((1362, 1397), 'snappy.uncompress', 'snappy.uncompress', (['chunk_compressed'], {}), '(chunk_compressed)\n', (1379, 1397), False, 'import snappy\n'), ((1409, 1432), 'rlp.decode', 'rlp.decode', (['chunk_bytes'], {}), '(chunk_bytes)\n', (1419, 1432), False, 'import rlp\n')]
|
"""Web Worker script."""
# In web workers, "window" is replaced by "self".
from browser import bind, self
import contextlib
import traceback
class OutputWriter:
def __init__(self, id, window):
self.id = id
self.window = window
self.buf = []
def write(self, text):
"""Write output to the screen."""
self.buf.append(text)
self.window.send([self.id, 'output', text])
def getvalue(self):
"""Get everything that was printed."""
return ''.join(self.buf)
@bind(self, "message")
def on_message(event):
"""Handle a message sent by the main script.
evt.data is the message body.
"""
msg = event.data
try:
id = msg['id']
except KeyError:
return
source = msg['source']
mode = msg['mode']
buff = OutputWriter(id, self)
with contextlib.redirect_stdout(buff), contextlib.redirect_stderr(buff):
self.send([id, 'ready', 0])
try:
code = compile(source, filename='python-now', mode=mode)
namespace = {
'__name__': '__main__',
'__filename__': '<python-now>'
}
result = exec(code, namespace)
except BaseException:
self.send([id, 'err', traceback.format_exc()])
else:
if result is not None:
print(repr(result))
# If we have exercises, run them as tests
if msg['exercises']:
if mode == 'exec':
test_ns = namespace.copy()
else:
test_ns = {}
test_ns.update(
source=source,
result=result,
output=buff.getvalue(),
)
exec(msg['exercises'], test_ns)
tests = []
for name, test in test_ns.items():
if name.startswith('test_') and callable(test):
tests.append(test)
for test_id, test in enumerate(tests):
try:
test()
except BaseException:
err = traceback.format_exc() + repr(test_ns)
else:
err = None
self.send([id, 'ex_result', (test_id, err)])
|
[
"browser.bind",
"contextlib.redirect_stderr",
"browser.self.buf.append",
"contextlib.redirect_stdout",
"traceback.format_exc",
"browser.self.window.send",
"browser.self.send"
] |
[((532, 553), 'browser.bind', 'bind', (['self', '"""message"""'], {}), "(self, 'message')\n", (536, 553), False, 'from browser import bind, self\n'), ((350, 371), 'browser.self.buf.append', 'self.buf.append', (['text'], {}), '(text)\n', (365, 371), False, 'from browser import bind, self\n'), ((380, 423), 'browser.self.window.send', 'self.window.send', (["[self.id, 'output', text]"], {}), "([self.id, 'output', text])\n", (396, 423), False, 'from browser import bind, self\n'), ((852, 884), 'contextlib.redirect_stdout', 'contextlib.redirect_stdout', (['buff'], {}), '(buff)\n', (878, 884), False, 'import contextlib\n'), ((886, 918), 'contextlib.redirect_stderr', 'contextlib.redirect_stderr', (['buff'], {}), '(buff)\n', (912, 918), False, 'import contextlib\n'), ((928, 955), 'browser.self.send', 'self.send', (["[id, 'ready', 0]"], {}), "([id, 'ready', 0])\n", (937, 955), False, 'from browser import bind, self\n'), ((2251, 2295), 'browser.self.send', 'self.send', (["[id, 'ex_result', (test_id, err)]"], {}), "([id, 'ex_result', (test_id, err)])\n", (2260, 2295), False, 'from browser import bind, self\n'), ((1272, 1294), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1292, 1294), False, 'import traceback\n'), ((2143, 2165), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2163, 2165), False, 'import traceback\n')]
|
import numpy as np
import pandas as pd
from typing import Union, Callable
from pandas.core.frame import DataFrame
from NitroFE.time_based_features.indicator_features._AbsolutePriceOscillator import (
AbsolutePriceOscillator,
)
from NitroFE.time_based_features.moving_average_features.moving_average_features import (
ExponentialMovingFeature,
)
class MovingAverageConvergenceDivergence:
"""
Provided dataframe must be in ascending order.
"""
def __init__(
self,
fast_period: int = 26,
slow_period: int = 12,
smoothing_period: int = 9,
fast_operation: str = "mean",
slow_operation: str = "mean",
smoothing_operation: str = "mean",
initialize_using_operation: bool = False,
initialize_span: int = None,
min_periods: int = 0,
ignore_na: bool = False,
axis: int = 0,
times: str = None,
return_histogram=False,
):
"""
Parameters
----------
fast_period : int, optional
specify decay in terms of span, for the fast moving feature, by default 12
slow_period : int, optional
specify decay in terms of span, for the slow moving feature, by default 26
smoothing_period : int, optional
specify decay in terms of span, for the smoothing moving feature, by default 9
fast_operation : str, {'mean','var','std'}
operation to be performed for the fast moving feature, by default 'mean'
slow_operation : str, {'mean','var','std'}
operation to be performed for the slow moving feature, by default 'mean'
smoothing_operation : str, optional
operation to be performed for the smoothing moving feature, by default 'mean'
initialize_using_operation : bool, optional
If True, then specified 'operation' is performed on the first 'initialize_span' values, and then the exponential moving average is calculated, by default False
initialize_span : int, optional
the span over which 'operation' would be performed for initialization, by default None
min_periods : int, optional
Minimum number of observations in window required to have a value (otherwise result is NA), by default 0
ignore_na : bool, optional
Ignore missing values when calculating weights; specify True to reproduce pre-0.15.0 behavior, by default False
axis : int, optional
The axis to use. The value 0 identifies the rows, and 1 identifies the columns, by default 0
times : str, optional
Times corresponding to the observations. Must be monotonically increasing and datetime64[ns] dtype, by default None
"""
self.span_fast = fast_period
self.span_slow = slow_period
self.min_periods = min_periods
self.ignore_na = ignore_na
self.axis = axis
self.times = times
self.fast_operation = fast_operation
self.slow_operation = slow_operation
self.smoothing_operation = smoothing_operation
self.smoothing_period = smoothing_period
self.return_histogram = return_histogram
self.initialize_using_operation = initialize_using_operation
self.initialize_span = initialize_span
def fit(
self,
dataframe: Union[pd.DataFrame, pd.Series],
first_fit: bool = True,
):
"""
For your training/initial fit phase (very first fit) use fit_first=True, and for any production/test implementation pass fit_first=False
Returns --> Smoothed signal line , macd histogram
Parameters
----------
dataframe : Union[pd.DataFrame, pd.Series]
dataframe containing column values to create feature over
first_fit : bool, optional
Indicator features require past values for calculation.
Use True, when calculating for training data (very first fit)
Use False, when calculating for subsequent testing/production data { in which case the values, which
were saved during the last phase, will be utilized for calculation }, by default True
"""
if first_fit:
self._raw_macd_object = AbsolutePriceOscillator(
fast_period=self.span_fast,
slow_period=self.span_slow,
fast_operation=self.fast_operation,
slow_operation=self.slow_operation,
min_periods=self.min_periods,
initialize_using_operation=self.initialize_using_operation,
initialize_span=self.initialize_span,
ignore_na=self.ignore_na,
axis=self.axis,
times=self.times,
)
self._macd_object = ExponentialMovingFeature(
span=self.smoothing_period,
ignore_na=self.ignore_na,
axis=self.axis,
times=self.times,
operation=self.smoothing_operation,
initialize_using_operation=self.initialize_using_operation,
initialize_span=self.initialize_span,
)
raw_macd = self._raw_macd_object.fit(dataframe, first_fit=first_fit)
macd = self._macd_object.fit(dataframe=raw_macd, first_fit=first_fit)
return raw_macd - macd if self.return_histogram else macd
|
[
"NitroFE.time_based_features.indicator_features._AbsolutePriceOscillator.AbsolutePriceOscillator",
"NitroFE.time_based_features.moving_average_features.moving_average_features.ExponentialMovingFeature"
] |
[((4388, 4750), 'NitroFE.time_based_features.indicator_features._AbsolutePriceOscillator.AbsolutePriceOscillator', 'AbsolutePriceOscillator', ([], {'fast_period': 'self.span_fast', 'slow_period': 'self.span_slow', 'fast_operation': 'self.fast_operation', 'slow_operation': 'self.slow_operation', 'min_periods': 'self.min_periods', 'initialize_using_operation': 'self.initialize_using_operation', 'initialize_span': 'self.initialize_span', 'ignore_na': 'self.ignore_na', 'axis': 'self.axis', 'times': 'self.times'}), '(fast_period=self.span_fast, slow_period=self.\n span_slow, fast_operation=self.fast_operation, slow_operation=self.\n slow_operation, min_periods=self.min_periods,\n initialize_using_operation=self.initialize_using_operation,\n initialize_span=self.initialize_span, ignore_na=self.ignore_na, axis=\n self.axis, times=self.times)\n', (4411, 4750), False, 'from NitroFE.time_based_features.indicator_features._AbsolutePriceOscillator import AbsolutePriceOscillator\n'), ((4949, 5210), 'NitroFE.time_based_features.moving_average_features.moving_average_features.ExponentialMovingFeature', 'ExponentialMovingFeature', ([], {'span': 'self.smoothing_period', 'ignore_na': 'self.ignore_na', 'axis': 'self.axis', 'times': 'self.times', 'operation': 'self.smoothing_operation', 'initialize_using_operation': 'self.initialize_using_operation', 'initialize_span': 'self.initialize_span'}), '(span=self.smoothing_period, ignore_na=self.\n ignore_na, axis=self.axis, times=self.times, operation=self.\n smoothing_operation, initialize_using_operation=self.\n initialize_using_operation, initialize_span=self.initialize_span)\n', (4973, 5210), False, 'from NitroFE.time_based_features.moving_average_features.moving_average_features import ExponentialMovingFeature\n')]
|
from pathlib import Path
from typing import Union, List, Dict, Optional
import pandas as pd
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from torchvision.transforms import transforms
from src.utils.utils import get_logger
class ArcheryBowlingDataModule(pl.LightningDataModule):
def __init__(self,
data_root: str,
test: bool = False,
val_ratio: float = None,
batch_size: int = 1,
window_size: int = 10,
normalisation: str = 'WithoutNormalization',
szenario: str = 'Archery',
features: List[str] = ['CenterEyeAnchor_pos_X', 'LeftVirtualHand_pos_X', 'RightVirtualHand_pos_X'],
identifier_col: str = 'seq_id',
label_col: str = 'ParticipantID',
sorting_cols: List[str] = None,
num_workers: int = 1,
shuffle_windows=False
):
super(ArcheryBowlingDataModule, self).__init__()
self.num_workers = num_workers
self.logger = get_logger(name='A-B-DataModule')
self.szenario = szenario
self.features = features
self.identifier_col = identifier_col if identifier_col is not None else 'seq_id'
self.label_col = label_col if label_col is not None else 'ParticipantID'
self.sorting_cols = sorting_cols
self.normalisation = normalisation
self.window_size = window_size
self.batch_size = batch_size
self.val_ratio = val_ratio
self.separate = test
self.data_root = Path(data_root) # Path is just more convenient
self.transform = transforms.Compose([
transforms.ToTensor(),
])
self.shuffle_windows = shuffle_windows
self.num_features = len(features)
self.dims = (self.window_size, self.num_features)
self.train_dataset, self.val_dataset, self.test_dataset = None, None, None
self.logger.info('__init__ done.')
def setup(self, stage: Optional[str] = None) -> None:
# do i want to load all data at once, and slice afterwords?
# slice all modulo matching repetitions to validation
# keep the remaining as train
# drop unused columns
# initiate DatasetObjects and return them
# return ArcheryBowlingDataset(None, 1, 1), ArcheryBowlingDataset(None, 1, 1), ArcheryBowlingDataset(None, 1, 1)
if stage in (None, 'fit'): # TODO no validation set throws a Nonetype Error on val data loader...
self.logger.info(f'stage:{stage}. creating Dataset...')
# regexing or sorting the file path seems to be a pain. therefore ill load all relevant (normalized + session1)
train_val_files = self.get_file_list(session=1)
train_val_files = list(train_val_files)
self.logger.info(f'found {len(train_val_files)} files.')
train_val_df = ArcheryBowlingDataModule.load_dataframe_from_multiple_files(train_val_files)
# TODO refactor this ifelse structure to a neat structure
if self.val_ratio and self.val_ratio > 0: # not none and > 0
modulo = int(1 / self.val_ratio)
if modulo > 12 or modulo < 2:
self.logger.info(
f'validation split ratio({self.val_ratio}) was set, '
f'but would result in either all or no data being available for training. '
f'Therefore all Data will be used as train-set!')
from src.datamodules.datasets.archery_bowling_dataset import ArcheryBowlingDataset
self.train_dataset = ArcheryBowlingDataset.create_from_dataframe(train_val_df, self.window_size,
self.batch_size, name='TRAIN',
feature_cols=self.features,
identifier_col=self.identifier_col,
label_col=self.label_col,
shuffle_windows=self.shuffle_windows,
sorting_cols=self.sorting_cols
)
else:
val_df = train_val_df[train_val_df['repetition'] % modulo == 0]
from src.datamodules.datasets.archery_bowling_dataset import ArcheryBowlingDataset
self.val_dataset = ArcheryBowlingDataset.create_from_dataframe(val_df, self.window_size,
self.batch_size, name='VAL',
feature_cols=self.features,
identifier_col=self.identifier_col,
label_col=self.label_col,
shuffle_windows=self.shuffle_windows,
sorting_cols=self.sorting_cols
)
del val_df
train_df = train_val_df[train_val_df['repetition'] % modulo != 0]
del train_val_df
self.train_dataset = ArcheryBowlingDataset.create_from_dataframe(train_df, self.window_size,
self.batch_size, name='TRAIN',
feature_cols=self.features,
identifier_col=self.identifier_col,
label_col=self.label_col,
shuffle_windows=self.shuffle_windows,
sorting_cols=self.sorting_cols
)
del train_df
else:
from src.datamodules.datasets.archery_bowling_dataset import ArcheryBowlingDataset
self.train_dataset = ArcheryBowlingDataset.create_from_dataframe(train_val_df, self.window_size,
self.batch_size, name='TRAIN',
feature_cols=self.features,
identifier_col=self.identifier_col,
label_col=self.label_col,
shuffle_windows=self.shuffle_windows,
sorting_cols=self.sorting_cols
)
self.val_dataset = None
self.logger.info('train/val Data initialized!')
if stage in (None, 'test'):
# slice all 'session2' entries for test data
# create a list of paths for test data files (basically everything with session 2
self.logger.info(f'stage:{stage}. creating Dataset...')
test_files = self.get_file_list(session=2)
test_files = (list(test_files))
self.logger.info(f'found {len(test_files)} test-files.')
# create test Dataset
from src.datamodules.datasets.archery_bowling_dataset import ArcheryBowlingDataset
test_df = ArcheryBowlingDataModule.load_dataframe_from_multiple_files(test_files)
computed_batch_size = self.batch_size
rest = len(test_df) % self.batch_size
computed_batch_size -= rest
self.test_dataset = ArcheryBowlingDataset.create_from_dataframe(test_df, self.window_size, computed_batch_size,
name='TEST', feature_cols=self.features,
identifier_col=self.identifier_col,
label_col=self.label_col,
shuffle_windows=False,
sorting_cols=self.sorting_cols
)
self.logger.info('test Data initialized!')
self.logger.info(f'Datasets are setup.')
self.logger.info(self)
def get_file_list(self, session=1):
train_val_files = self.data_root.glob(f'{self.szenario}*{self.normalisation}*session{session}*.csv')
return train_val_files
@staticmethod
def load_dataframe_from_multiple_files(file_list: List[Path]):
df_list = []
for i in file_list:
tmp = pd.read_csv(i)
df_list.append(tmp)
return pd.concat(df_list, ignore_index=True)
def _create_info_dict(self):
return {
'train dataset': None if not self.train_dataset else str(self.train_dataset),
'val dataset': None if not self.val_dataset else str(self.val_dataset),
'test dataset': None if not self.test_dataset else str(self.test_dataset),
'dims': self.dims,
'#batches': len(self.test_dataset),
'window size': self.window_size,
'batch size': self.batch_size,
'normalisation name': self.normalisation
}
def train_dataloader(self) -> Union[DataLoader, List[DataLoader], Dict[str, DataLoader]]:
return DataLoader(self.train_dataset, batch_size=None, num_workers=self.num_workers
)
def val_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(self.val_dataset, batch_size=None, num_workers=self.num_workers
)
def test_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
# TODO handle num_workers...
return DataLoader(self.test_dataset, batch_size=None, num_workers=self.num_workers
)
def __repr__(self):
return f"DataModule(train_dataset={self.train_dataset!r}, " \
f"val_dataset={self.val_dataset!r}, " \
f"test_dataset={self.test_dataset!r}, " \
f"dims={self.dims!r}, " \
f"normalisation_name={self.normalisation!r}), " \
f"Szenario={self.szenario})"
def __rich_repr__(self):
yield "train_dataset", self.train_dataset
yield "val_dataset", self.val_dataset
yield "test_dataset", self.test_dataset
yield "dims", self.dims
yield "normalisation_name", self.normalisation
yield "szenario", self.szenario
|
[
"src.utils.utils.get_logger",
"torch.utils.data.DataLoader",
"pandas.read_csv",
"src.datamodules.datasets.archery_bowling_dataset.ArcheryBowlingDataset.create_from_dataframe",
"torchvision.transforms.transforms.ToTensor",
"pathlib.Path",
"pandas.concat"
] |
[((1110, 1143), 'src.utils.utils.get_logger', 'get_logger', ([], {'name': '"""A-B-DataModule"""'}), "(name='A-B-DataModule')\n", (1120, 1143), False, 'from src.utils.utils import get_logger\n'), ((1629, 1644), 'pathlib.Path', 'Path', (['data_root'], {}), '(data_root)\n', (1633, 1644), False, 'from pathlib import Path\n'), ((9918, 9955), 'pandas.concat', 'pd.concat', (['df_list'], {'ignore_index': '(True)'}), '(df_list, ignore_index=True)\n', (9927, 9955), True, 'import pandas as pd\n'), ((10608, 10685), 'torch.utils.data.DataLoader', 'DataLoader', (['self.train_dataset'], {'batch_size': 'None', 'num_workers': 'self.num_workers'}), '(self.train_dataset, batch_size=None, num_workers=self.num_workers)\n', (10618, 10685), False, 'from torch.utils.data import DataLoader\n'), ((10798, 10873), 'torch.utils.data.DataLoader', 'DataLoader', (['self.val_dataset'], {'batch_size': 'None', 'num_workers': 'self.num_workers'}), '(self.val_dataset, batch_size=None, num_workers=self.num_workers)\n', (10808, 10873), False, 'from torch.utils.data import DataLoader\n'), ((11024, 11100), 'torch.utils.data.DataLoader', 'DataLoader', (['self.test_dataset'], {'batch_size': 'None', 'num_workers': 'self.num_workers'}), '(self.test_dataset, batch_size=None, num_workers=self.num_workers)\n', (11034, 11100), False, 'from torch.utils.data import DataLoader\n'), ((8679, 8940), 'src.datamodules.datasets.archery_bowling_dataset.ArcheryBowlingDataset.create_from_dataframe', 'ArcheryBowlingDataset.create_from_dataframe', (['test_df', 'self.window_size', 'computed_batch_size'], {'name': '"""TEST"""', 'feature_cols': 'self.features', 'identifier_col': 'self.identifier_col', 'label_col': 'self.label_col', 'shuffle_windows': '(False)', 'sorting_cols': 'self.sorting_cols'}), "(test_df, self.window_size,\n computed_batch_size, name='TEST', feature_cols=self.features,\n identifier_col=self.identifier_col, label_col=self.label_col,\n shuffle_windows=False, sorting_cols=self.sorting_cols)\n", (8722, 8940), False, 'from src.datamodules.datasets.archery_bowling_dataset import ArcheryBowlingDataset\n'), ((9856, 9870), 'pandas.read_csv', 'pd.read_csv', (['i'], {}), '(i)\n', (9867, 9870), True, 'import pandas as pd\n'), ((1735, 1756), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1754, 1756), False, 'from torchvision.transforms import transforms\n'), ((6923, 7201), 'src.datamodules.datasets.archery_bowling_dataset.ArcheryBowlingDataset.create_from_dataframe', 'ArcheryBowlingDataset.create_from_dataframe', (['train_val_df', 'self.window_size', 'self.batch_size'], {'name': '"""TRAIN"""', 'feature_cols': 'self.features', 'identifier_col': 'self.identifier_col', 'label_col': 'self.label_col', 'shuffle_windows': 'self.shuffle_windows', 'sorting_cols': 'self.sorting_cols'}), "(train_val_df, self.window_size,\n self.batch_size, name='TRAIN', feature_cols=self.features,\n identifier_col=self.identifier_col, label_col=self.label_col,\n shuffle_windows=self.shuffle_windows, sorting_cols=self.sorting_cols)\n", (6966, 7201), False, 'from src.datamodules.datasets.archery_bowling_dataset import ArcheryBowlingDataset\n'), ((3729, 4007), 'src.datamodules.datasets.archery_bowling_dataset.ArcheryBowlingDataset.create_from_dataframe', 'ArcheryBowlingDataset.create_from_dataframe', (['train_val_df', 'self.window_size', 'self.batch_size'], {'name': '"""TRAIN"""', 'feature_cols': 'self.features', 'identifier_col': 'self.identifier_col', 'label_col': 'self.label_col', 'shuffle_windows': 'self.shuffle_windows', 'sorting_cols': 'self.sorting_cols'}), "(train_val_df, self.window_size,\n self.batch_size, name='TRAIN', feature_cols=self.features,\n identifier_col=self.identifier_col, label_col=self.label_col,\n shuffle_windows=self.shuffle_windows, sorting_cols=self.sorting_cols)\n", (3772, 4007), False, 'from src.datamodules.datasets.archery_bowling_dataset import ArcheryBowlingDataset\n'), ((4840, 5113), 'src.datamodules.datasets.archery_bowling_dataset.ArcheryBowlingDataset.create_from_dataframe', 'ArcheryBowlingDataset.create_from_dataframe', (['val_df', 'self.window_size', 'self.batch_size'], {'name': '"""VAL"""', 'feature_cols': 'self.features', 'identifier_col': 'self.identifier_col', 'label_col': 'self.label_col', 'shuffle_windows': 'self.shuffle_windows', 'sorting_cols': 'self.sorting_cols'}), "(val_df, self.window_size, self.\n batch_size, name='VAL', feature_cols=self.features, identifier_col=self\n .identifier_col, label_col=self.label_col, shuffle_windows=self.\n shuffle_windows, sorting_cols=self.sorting_cols)\n", (4883, 5113), False, 'from src.datamodules.datasets.archery_bowling_dataset import ArcheryBowlingDataset\n'), ((5877, 6151), 'src.datamodules.datasets.archery_bowling_dataset.ArcheryBowlingDataset.create_from_dataframe', 'ArcheryBowlingDataset.create_from_dataframe', (['train_df', 'self.window_size', 'self.batch_size'], {'name': '"""TRAIN"""', 'feature_cols': 'self.features', 'identifier_col': 'self.identifier_col', 'label_col': 'self.label_col', 'shuffle_windows': 'self.shuffle_windows', 'sorting_cols': 'self.sorting_cols'}), "(train_df, self.window_size,\n self.batch_size, name='TRAIN', feature_cols=self.features,\n identifier_col=self.identifier_col, label_col=self.label_col,\n shuffle_windows=self.shuffle_windows, sorting_cols=self.sorting_cols)\n", (5920, 6151), False, 'from src.datamodules.datasets.archery_bowling_dataset import ArcheryBowlingDataset\n')]
|
import sys
import click
from bulk_import_rename.commands.detect_modifications import track_modifications
from bulk_import_rename.commands.rename_import import run_rename
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
@click.version_option(version='0.0.1')
def app():
pass
@app.command()
@click.argument('project_path', type=click.Path(exists=True))
@click.option('--origin_branch', default='master', help='Branch to start the evaluation')
@click.option('--work_branch', default=False, help='Name of the branch that has the modifications')
@click.option('--output_file', default='list_output.py', help='Change the name of the output file')
def track(**kwargs):
track_modifications(**kwargs)
@app.command()
@click.argument('project_path', nargs=-1, type=click.Path(exists=True))
@click.argument('moved_imports_file', type=click.Path(exists=True, resolve_path=True))
def rename(**kwargs):
run_rename(**kwargs)
if __name__ == '__main__':
# The sys.argv[1:] is necessary for debug on python2
# Link: https://goo.gl/vp5hfz
app(sys.argv[1:])
|
[
"click.version_option",
"click.option",
"bulk_import_rename.commands.detect_modifications.track_modifications",
"bulk_import_rename.commands.rename_import.run_rename",
"click.Path",
"click.group"
] |
[((246, 292), 'click.group', 'click.group', ([], {'context_settings': 'CONTEXT_SETTINGS'}), '(context_settings=CONTEXT_SETTINGS)\n', (257, 292), False, 'import click\n'), ((295, 332), 'click.version_option', 'click.version_option', ([], {'version': '"""0.0.1"""'}), "(version='0.0.1')\n", (315, 332), False, 'import click\n'), ((440, 533), 'click.option', 'click.option', (['"""--origin_branch"""'], {'default': '"""master"""', 'help': '"""Branch to start the evaluation"""'}), "('--origin_branch', default='master', help=\n 'Branch to start the evaluation')\n", (452, 533), False, 'import click\n'), ((531, 634), 'click.option', 'click.option', (['"""--work_branch"""'], {'default': '(False)', 'help': '"""Name of the branch that has the modifications"""'}), "('--work_branch', default=False, help=\n 'Name of the branch that has the modifications')\n", (543, 634), False, 'import click\n'), ((632, 735), 'click.option', 'click.option', (['"""--output_file"""'], {'default': '"""list_output.py"""', 'help': '"""Change the name of the output file"""'}), "('--output_file', default='list_output.py', help=\n 'Change the name of the output file')\n", (644, 735), False, 'import click\n'), ((758, 787), 'bulk_import_rename.commands.detect_modifications.track_modifications', 'track_modifications', ([], {}), '(**kwargs)\n', (777, 787), False, 'from bulk_import_rename.commands.detect_modifications import track_modifications\n'), ((997, 1017), 'bulk_import_rename.commands.rename_import.run_rename', 'run_rename', ([], {}), '(**kwargs)\n', (1007, 1017), False, 'from bulk_import_rename.commands.rename_import import run_rename\n'), ((413, 436), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (423, 436), False, 'import click\n'), ((856, 879), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (866, 879), False, 'import click\n'), ((925, 967), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'resolve_path': '(True)'}), '(exists=True, resolve_path=True)\n', (935, 967), False, 'import click\n')]
|
import thread
import threading
import abc
from time import sleep
class AttackMethod:
"""
The AttackMethod class represents a DOS attack. The AttackMethod class is an abstract class and needs to be
extended by other classes. An AttackMethod runs in its own thread. The thread loop starts when the
start_attack() function is called and stops when the stop_attack() function is called.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, proxy, target):
"""
Constructor. Creates a new AttackMethod instance.
:type target: Destination
:type proxy: Proxy
"""
self._proxy = proxy
self._attack_is_active = False
self._innerThread = None
self._attack_lock = threading.Lock()
self._attack_target = target
self._loop_delay = 0.050
self.exception = None
def start_attack(self):
"""
Starts the DOS attack.
"""
self._attack_lock.acquire()
if not self._attack_is_active:
self._attack_is_active = True
self._attack_lock.release()
self._innerThread = thread.start_new_thread(self._thread_loop, ())
else:
self._attack_lock.release()
def stop_attack(self):
"""
Stops the attack loop.
"""
self._set_attack_active(False)
def has_exception(self):
if not self.is_active():
return self.exception is not None
else:
return False
def get_exception(self):
return self.exception
def _thread_loop(self):
"""
The main loop of the attack thread. This function is called by the attack thread and could not be called
directly.
"""
while self.is_active():
try:
self._attack_loop()
sleep(self._loop_delay)
except Exception as ex:
self.exception = ex
self.stop_attack()
def is_active(self):
"""
Checks the value of the _attack_is_active value in a thread safe was.
Use this function to get the value of _attack_is_active instead of checking the value directly.
:return: True if the attack is active and False otherwise
"""
self._attack_lock.acquire()
attack_active = self._attack_is_active
self._attack_lock.release()
return attack_active
def _set_attack_active(self, value):
"""
Thread-safe setter for the _attack_is_active value. This function is only for internal use.
:param value: New value of the _attack_is_value value (True or False)
"""
if not isinstance(value, bool):
raise ValueError('set_attack_active value has to be a boolean and not a ' + type(value))
self._attack_lock.acquire()
self._attack_is_active = value
self._attack_lock.release()
@abc.abstractmethod
def _attack_loop(self):
"""
Part of the _thread_loop. This function has to be implemented by the class which extends from the
AttackMethod class. The function gets called repeatedly until the stop_attack function gets called.
The class which extends from this class has to implement it's attack logic in this function.
"""
return
|
[
"threading.Lock",
"thread.start_new_thread",
"time.sleep"
] |
[((777, 793), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (791, 793), False, 'import threading\n'), ((1171, 1217), 'thread.start_new_thread', 'thread.start_new_thread', (['self._thread_loop', '()'], {}), '(self._thread_loop, ())\n', (1194, 1217), False, 'import thread\n'), ((1899, 1922), 'time.sleep', 'sleep', (['self._loop_delay'], {}), '(self._loop_delay)\n', (1904, 1922), False, 'from time import sleep\n')]
|
# This file is needed to initialize the models and migrations
import os
import sys
from nopea.dbobject import DbObject
from nopea import fields
from nopea.migrations import Migration
if 'sqlite' in sys.argv:
from nopea.adaptors.sqlite import SQLiteAdaptor
DbObject.adaptor = SQLiteAdaptor('sqless.db')
elif 'mysql' in sys.argv:
from nopea.adaptors.mysql import MySQLAdaptor
DbObject.adaptor = MySQLAdaptor({
'host': 'localhost',
'user': 'sqless',
'db': 'sqless',
'use_unicode': True,
'charset': 'utf8'
})
elif 'postgres' in sys.argv:
from nopea.adaptors.postgres import PostgreSQLAdaptor
DbObject.adaptor = PostgreSQLAdaptor({
'host': 'localhost',
'user': 'sqless',
'database': 'sqless',
'password': '<PASSWORD>'
})
class User(DbObject):
username = fields.CharField(max_length=20)
email = fields.CharField(max_length=100)
failed_logins = fields.IntegerField(default=0)
class Post(DbObject):
title = fields.CharField(max_length=100)
content = fields.TextField()
class Song(DbObject):
title = fields.CharField(max_length=100)
artist = fields.CharField(max_length=100)
album = fields.CharField(max_length=100)
in_collection = fields.BooleanField(default=False)
Migration.migration_dir = os.path.join(os.getcwd(), 'utils/migrations')
migrations = Migration()
migrations.create_migrations()
migrations.run_migrations()
users = [
{"username": "TestUser1", "email": "<EMAIL>"},
{"username": "TestUser2", "email": "<EMAIL>"},
{"username": "TestUser3", "email": "<EMAIL>", "failed_logins": 12}
]
for user in users:
User.objects.get_or_create(**user)
posts = [
{"title": "TestPosting", "content": "Lorem Ipsum Dolor Sit"},
{"title": "SomeOtherStuff", "content": "hello, world!"},
]
for post in posts:
Post.objects.get_or_create(**post)
songs = [
{
"title": "Love Like Cyanide",
"artist": "Sirenia",
"album": "Arcane Astral Aeons"
},
{
"title": "The Greatest Show On Earth",
"artist": "Nightwish",
"album": "Decades"
},
{
"title": "Ghost Love Score",
"artist": "Nightwish",
"album": "Decades"
},
{
"title": "Devil And The Deep Dark Ocean",
"artist": "Nightwish",
"album": "Decades"
},
{
"title": "One By One",
"artist": "Immortal_",
"album": "Sons Of Northern Darkness"
},
{
"title": "Sons Of Northern Darkness",
"artist": "Immortal_",
"album": "Sons Of Northern Darkness"
}
]
for song in songs:
Song.objects.get_or_create(**song)
|
[
"nopea.adaptors.sqlite.SQLiteAdaptor",
"nopea.adaptors.postgres.PostgreSQLAdaptor",
"nopea.fields.TextField",
"os.getcwd",
"nopea.fields.IntegerField",
"nopea.fields.CharField",
"nopea.adaptors.mysql.MySQLAdaptor",
"nopea.migrations.Migration",
"nopea.fields.BooleanField"
] |
[((1397, 1408), 'nopea.migrations.Migration', 'Migration', ([], {}), '()\n', (1406, 1408), False, 'from nopea.migrations import Migration\n'), ((287, 313), 'nopea.adaptors.sqlite.SQLiteAdaptor', 'SQLiteAdaptor', (['"""sqless.db"""'], {}), "('sqless.db')\n", (300, 313), False, 'from nopea.adaptors.sqlite import SQLiteAdaptor\n'), ((865, 896), 'nopea.fields.CharField', 'fields.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (881, 896), False, 'from nopea import fields\n'), ((909, 941), 'nopea.fields.CharField', 'fields.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (925, 941), False, 'from nopea import fields\n'), ((962, 992), 'nopea.fields.IntegerField', 'fields.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (981, 992), False, 'from nopea import fields\n'), ((1029, 1061), 'nopea.fields.CharField', 'fields.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1045, 1061), False, 'from nopea import fields\n'), ((1076, 1094), 'nopea.fields.TextField', 'fields.TextField', ([], {}), '()\n', (1092, 1094), False, 'from nopea import fields\n'), ((1131, 1163), 'nopea.fields.CharField', 'fields.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1147, 1163), False, 'from nopea import fields\n'), ((1177, 1209), 'nopea.fields.CharField', 'fields.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1193, 1209), False, 'from nopea import fields\n'), ((1222, 1254), 'nopea.fields.CharField', 'fields.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1238, 1254), False, 'from nopea import fields\n'), ((1275, 1309), 'nopea.fields.BooleanField', 'fields.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1294, 1309), False, 'from nopea import fields\n'), ((1351, 1362), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1360, 1362), False, 'import os\n'), ((414, 527), 'nopea.adaptors.mysql.MySQLAdaptor', 'MySQLAdaptor', (["{'host': 'localhost', 'user': 'sqless', 'db': 'sqless', 'use_unicode': True,\n 'charset': 'utf8'}"], {}), "({'host': 'localhost', 'user': 'sqless', 'db': 'sqless',\n 'use_unicode': True, 'charset': 'utf8'})\n", (426, 527), False, 'from nopea.adaptors.mysql import MySQLAdaptor\n'), ((681, 791), 'nopea.adaptors.postgres.PostgreSQLAdaptor', 'PostgreSQLAdaptor', (["{'host': 'localhost', 'user': 'sqless', 'database': 'sqless', 'password':\n '<PASSWORD>'}"], {}), "({'host': 'localhost', 'user': 'sqless', 'database':\n 'sqless', 'password': '<PASSWORD>'})\n", (698, 791), False, 'from nopea.adaptors.postgres import PostgreSQLAdaptor\n')]
|
import os
import math
import pandas as pd
import datetime
variables = [
'date_stamp',
'age_group',
'cnt_confirmed',
'pct_confirmed'
]
def cleanData(data, fileName):
# source data frame from csv file
source = pd.DataFrame(data)
source.columns = ['v1','v2','v3']
print(source)
# the target data frame
df = pd.DataFrame(columns = variables)
df['age_group'] = source['v1'].map({ '0-10':'00', '11-20': '11', '21-30': '21', '31-40': '31', '41-50': '41', '51-60': '51', '61-70': '61', '71-80': '71', '81-90': '81', '90+': '91', 'Age Unknown': '99' })
df['cnt_confirmed'] = source['v2']
df['pct_confirmed'] = list(map(lambda x: x[:-1], source['v3'].values))
df['date_stamp'] = fileName[0:-4]
# apply data types
df['date_stamp'] = pd.to_datetime(df['date_stamp']).dt.strftime('%Y-%m-%d')
df['cnt_confirmed'] = df['cnt_confirmed'].astype(pd.Int32Dtype())
return df
def deleteFiles(path):
today = datetime.date.today();
one_week = datetime.timedelta(days=7)
week = today - one_week
week_ago = datetime.datetime.combine(week, datetime.time(0, 0))
for filename in os.listdir(path):
if(filename.endswith('.csv')):
newFilename = filename.replace('.csv', '');
filedate = datetime.datetime.strptime(newFilename, '%Y-%m-%d')
if(filedate < week_ago):
print('removing files that are more than a week old: ',path,'/',filename)
os.remove(f"{path}/{filename}")
return None
if __name__ == "__main__":
path = os.path
# Loop over the files within the folder
for filename in sorted(os.listdir('./data/us-tn/co-knox/covid_age/raw')):
if filename.endswith('.csv') and path.exists(f"./data/us-tn/co-knox/covid_age/clean/{filename}") == False:
print(filename)
# For each csv file, map the transformed data to its respective file in the harvested folder
data = pd.read_csv(f"./data/us-tn/co-knox/covid_age/raw/{filename}")
df = cleanData(data, filename)
df.to_csv(f"./data/us-tn/co-knox/covid_age/clean/{filename}", index=False)
# if there is no aggregate file create one, otherwise append to it.
if path.exists(f"./data/us-tn/co-knox/covid_age/latest.csv"):
df.to_csv(f"./data/us-tn/co-knox/covid_age/latest.csv", mode='a', header=False, index=False)
else:
df.to_csv(f"./data/us-tn/co-knox/covid_age/latest.csv", index=False)
deleteFiles('./data/us-tn/co-knox/covid_age/raw')
deleteFiles('./data/us-tn/co-knox/covid_age/clean')
|
[
"pandas.DataFrame",
"os.remove",
"pandas.read_csv",
"datetime.date.today",
"pandas.Int32Dtype",
"datetime.datetime.strptime",
"datetime.timedelta",
"pandas.to_datetime",
"datetime.time",
"os.listdir"
] |
[((225, 243), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (237, 243), True, 'import pandas as pd\n'), ((339, 370), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'variables'}), '(columns=variables)\n', (351, 370), True, 'import pandas as pd\n'), ((954, 975), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (973, 975), False, 'import datetime\n'), ((989, 1015), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(7)'}), '(days=7)\n', (1007, 1015), False, 'import datetime\n'), ((1123, 1139), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1133, 1139), False, 'import os\n'), ((889, 904), 'pandas.Int32Dtype', 'pd.Int32Dtype', ([], {}), '()\n', (902, 904), True, 'import pandas as pd\n'), ((1085, 1104), 'datetime.time', 'datetime.time', (['(0)', '(0)'], {}), '(0, 0)\n', (1098, 1104), False, 'import datetime\n'), ((1566, 1614), 'os.listdir', 'os.listdir', (['"""./data/us-tn/co-knox/covid_age/raw"""'], {}), "('./data/us-tn/co-knox/covid_age/raw')\n", (1576, 1614), False, 'import os\n'), ((1235, 1286), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['newFilename', '"""%Y-%m-%d"""'], {}), "(newFilename, '%Y-%m-%d')\n", (1261, 1286), False, 'import datetime\n'), ((1885, 1946), 'pandas.read_csv', 'pd.read_csv', (['f"""./data/us-tn/co-knox/covid_age/raw/{filename}"""'], {}), "(f'./data/us-tn/co-knox/covid_age/raw/{filename}')\n", (1896, 1946), True, 'import pandas as pd\n'), ((779, 811), 'pandas.to_datetime', 'pd.to_datetime', (["df['date_stamp']"], {}), "(df['date_stamp'])\n", (793, 811), True, 'import pandas as pd\n'), ((1403, 1434), 'os.remove', 'os.remove', (['f"""{path}/{filename}"""'], {}), "(f'{path}/{filename}')\n", (1412, 1434), False, 'import os\n')]
|
import pytest
import requests
from project import create_app, db
from flask import current_app
from project.models import Stock, User
from datetime import datetime
########################
#### Helper Classes ####
########################
class MockSuccessResponse(object):
def __init__(self, url):
self.status_code = 200
self.url = url
self.headers = {'blaa': '1234'}
def json(self):
return {
'Meta Data': {
"2. Symbol": "MSFT",
"3. Last Refreshed": "2022-02-10"
},
'Time Series (Daily)': {
"2022-02-10": {
"4. close": "302.3800",
},
"2022-02-09": {
"4. close": "301.9800",
}
}
}
class MockFailedResponse(object):
def __init__(self, url):
self.status_code = 404
self.url = url
self.headers = {'blaa': '1234'}
def json(self):
return {'error': 'bad'}
class MockSuccessResponseDaily(object):
def __init__(self, url):
self.status_code = 200
self.url = url
def json(self):
return {
'Meta Data': {
"2. Symbol": "AAPL",
"3. Last Refreshed": "2020-03-24"
},
'Time Series (Daily)': {
"2022-02-10": {
"4. close": "148.3400",
},
"2022-02-09": {
"4. close": "135.9800",
}
}
}
class MockApiRateLimitExceededResponse(object):
def __init__(self, url):
self.status_code = 200
self.url = url
def json(self):
return {
'Note': 'Thank you for using Alpha Vantage! Our standard API call frequency is ' +
'5 calls per minute and 500 calls per day.'
}
class MockFailedResponse(object):
def __init__(self, url):
self.status_code = 404
self.url = url
def json(self):
return {'error': 'bad'}
class MockSuccessResponseWeekly(object):
def __init__(self, url):
self.status_code = 200
self.url = url
def json(self):
return {
'Meta Data': {
"2. Symbol": "AAPL",
"3. Last Refreshed": "2020-07-28"
},
'Weekly Adjusted Time Series': {
"2020-07-24": {
"4. close": "379.2400",
},
"2020-07-17": {
"4. close": "362.7600",
},
"2020-06-11": {
"4. close": "354.3400",
},
"2020-02-25": {
"4. close": "432.9800",
}
}
}
@pytest.fixture(scope='module')
def test_client():
flask_app = create_app()
flask_app.config.from_object('config.TestingConfig')
flask_app.extensions['mail'].suppress = True #to avoid sending emails during the tests
# Create a test client using the Flask application configured for testing
with flask_app.test_client() as testing_client:
# establish an app ctx be4 accessing the logger
with flask_app.app_context():
flask_app.logger.info('Creating database tables in test_client fixture...')
yield testing_client #where the test happens
@pytest.fixture(scope='function')
def new_stock():
stock = Stock('AAPL', '16', '406.78', 7, datetime(2022, 2, 12))
return stock
@pytest.fixture(scope='module')
def new_user():
user = User('<EMAIL>', '<PASSWORD>')
return user
# to register a default user
@pytest.fixture(scope='module')
def register_default_user(test_client):
# Register the default user
test_client.post('/users/register',
data={'name':'<NAME>', 'email': '<EMAIL>',
'password': '<PASSWORD>'},
follow_redirects=True)
return
# is default user logged in?
@pytest.fixture(scope='function')
def log_in_default_user(test_client, register_default_user):
# Log in the default user
test_client.post('/users/login',
data={'email': '<EMAIL>',
'password': '<PASSWORD>'},
follow_redirects=True)
yield # this is where the testing happens!
# Log out the default user
test_client.get('/users/logout', follow_redirects=True)
@pytest.fixture(scope='function')
def confirm_email_default_user(test_client, log_in_default_user):
# Mark the user as having their email address confirmed
user = User.query.filter_by(email='<EMAIL>').first()
user.email_confirmed = True
user.email_confirmed_on = datetime(2020, 7, 8)
db.session.add(user)
db.session.commit()
yield user # this is where the testing happens!
# Mark the user as not having their email address confirmed (clean up)
user = User.query.filter_by(email='<EMAIL>').first()
user.email_confirmed = False
user.email_confirmed_on = None
db.session.add(user)
db.session.commit()
@pytest.fixture(scope='function')
def afterwards_reset_default_user_password():
yield # this is where the testing happens!
# Since a test using this fixture could change the password for the default user,
# reset the password back to the default password
user = User.query.filter_by(email='<EMAIL>').first()
user.set_password('<PASSWORD>')
db.session.add(user)
db.session.commit()
@pytest.fixture(scope='function')
def add_stocks_for_default_user(test_client, log_in_default_user):
# Add three stocks for the default user
test_client.post('/add_stock', data={'stock_symbol': 'SAM',
'number_of_shares': '27',
'purchase_price': '301.23',
'purchase_date': '2020-07-01'})
test_client.post('/add_stock', data={'stock_symbol': 'COST',
'number_of_shares': '76',
'purchase_price': '14.67',
'purchase_date': '2019-05-26'})
test_client.post('/add_stock', data={'stock_symbol': 'TWTR',
'number_of_shares': '146',
'purchase_price': '34.56',
'purchase_date': '2020-02-03'})
return
# ***fixtures for moking requests.get()***
@pytest.fixture(scope='function')
def mock_requests_get_success_daily(monkeypatch):
# Create a mock for the requests.get() call to prevent making the actual API call
def mock_get(url):
return MockSuccessResponseDaily(url)
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=MSFT&apikey=demo'
monkeypatch.setattr(requests, 'get', mock_get)
@pytest.fixture(scope='function')
def mock_requests_get_api_rate_limit_exceeded(monkeypatch):
def mock_get(url):
return MockApiRateLimitExceededResponse(url)
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=MSFT&apikey=demo'
monkeypatch.setattr(requests, 'get', mock_get)
@pytest.fixture(scope='function')
def mock_requests_get_failure(monkeypatch):
def mock_get(url):
return MockFailedResponse(url)
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=MSFT&apikey=demo'
monkeypatch.setattr(requests, 'get', mock_get)
@pytest.fixture(scope='function')
def mock_requests_get_success_weekly(monkeypatch):
# Create a mock for the requests.get() call to prevent making the actual API call
def mock_get(url):
return MockSuccessResponseWeekly(url)
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_WEEKLY_ADJUSTED&symbol=MSFT&apikey=demo'
monkeypatch.setattr(requests, 'get', mock_get)
# ***register-login-logout 2nd user***
@pytest.fixture(scope='module')
def register_second_user(test_client):
"""Registers the second user using the '/users/register' route."""
test_client.post('/users/register',
data={'name':'<NAME>', 'email': '<EMAIL>',
'password': '<PASSWORD>'})
@pytest.fixture(scope='function')
def log_in_second_user(test_client, register_second_user):
# Log in the user
test_client.post('/users/login',
data={'email': '<EMAIL>',
'password': '<PASSWORD>'})
yield # this is where the testing happens!
# Log out the user
test_client.get('/users/logout', follow_redirects=True)
|
[
"project.db.session.add",
"project.db.session.commit",
"project.models.User.query.filter_by",
"project.create_app",
"pytest.fixture",
"datetime.datetime",
"project.models.User"
] |
[((2827, 2857), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (2841, 2857), False, 'import pytest\n'), ((3430, 3462), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (3444, 3462), False, 'import pytest\n'), ((3567, 3597), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (3581, 3597), False, 'import pytest\n'), ((3702, 3732), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (3716, 3732), False, 'import pytest\n'), ((4004, 4036), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (4018, 4036), False, 'import pytest\n'), ((4397, 4429), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (4411, 4429), False, 'import pytest\n'), ((5051, 5083), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (5065, 5083), False, 'import pytest\n'), ((5463, 5495), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (5477, 5495), False, 'import pytest\n'), ((6150, 6182), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (6164, 6182), False, 'import pytest\n'), ((6538, 6570), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (6552, 6570), False, 'import pytest\n'), ((6858, 6890), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (6872, 6890), False, 'import pytest\n'), ((7148, 7180), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (7162, 7180), False, 'import pytest\n'), ((7587, 7617), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (7601, 7617), False, 'import pytest\n'), ((7849, 7881), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (7863, 7881), False, 'import pytest\n'), ((2893, 2905), 'project.create_app', 'create_app', ([], {}), '()\n', (2903, 2905), False, 'from project import create_app, db\n'), ((3625, 3654), 'project.models.User', 'User', (['"""<EMAIL>"""', '"""<PASSWORD>"""'], {}), "('<EMAIL>', '<PASSWORD>')\n", (3629, 3654), False, 'from project.models import Stock, User\n'), ((4675, 4695), 'datetime.datetime', 'datetime', (['(2020)', '(7)', '(8)'], {}), '(2020, 7, 8)\n', (4683, 4695), False, 'from datetime import datetime\n'), ((4700, 4720), 'project.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (4714, 4720), False, 'from project import create_app, db\n'), ((4725, 4744), 'project.db.session.commit', 'db.session.commit', ([], {}), '()\n', (4742, 4744), False, 'from project import create_app, db\n'), ((5004, 5024), 'project.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (5018, 5024), False, 'from project import create_app, db\n'), ((5029, 5048), 'project.db.session.commit', 'db.session.commit', ([], {}), '()\n', (5046, 5048), False, 'from project import create_app, db\n'), ((5416, 5436), 'project.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (5430, 5436), False, 'from project import create_app, db\n'), ((5441, 5460), 'project.db.session.commit', 'db.session.commit', ([], {}), '()\n', (5458, 5460), False, 'from project import create_app, db\n'), ((3525, 3546), 'datetime.datetime', 'datetime', (['(2022)', '(2)', '(12)'], {}), '(2022, 2, 12)\n', (3533, 3546), False, 'from datetime import datetime\n'), ((4567, 4604), 'project.models.User.query.filter_by', 'User.query.filter_by', ([], {'email': '"""<EMAIL>"""'}), "(email='<EMAIL>')\n", (4587, 4604), False, 'from project.models import Stock, User\n'), ((4886, 4923), 'project.models.User.query.filter_by', 'User.query.filter_by', ([], {'email': '"""<EMAIL>"""'}), "(email='<EMAIL>')\n", (4906, 4923), False, 'from project.models import Stock, User\n'), ((5330, 5367), 'project.models.User.query.filter_by', 'User.query.filter_by', ([], {'email': '"""<EMAIL>"""'}), "(email='<EMAIL>')\n", (5350, 5367), False, 'from project.models import Stock, User\n')]
|
import email
import pandas as pd
def extract(data, structured_fields=[], extract_payload=True):
r"""This function extracts data for the given header list from the Enron email dataset.
It provides flexibilty to choose which fields needs to be extracted.
The header list provided by the user are the tags in the email of Enron dataset, eg. Date, Subject etc.
By default, if no header is provided, this function returns only the email text body of the Enron dataset.
Arguments:
1) data: Dataframe It is the Enron dataset with column headings. This argument can not be kept empty.
2) structured_fields: List It is a of tags for which data needs to be extracted. Example: ['Date', 'Subject', 'X-To']. This argument can be droppped if not required.
3) extract_pyload: Boolean True if email text body is required. False in case only structured_fields needs to be extracted. This field can alo be dropped while calling the function. In case nothing is specified, default boolean value True is used.
return: Dataframe A dataframe with specified fields along with the original columns passsed as the data argument.
This function is created to take off the burden of extracting desired fields from the Enron dataset. However, this does not clean the data, eg. it does not remove the empty rows or columns. Neither it does the pre-processing of data like lowercase and removal of unwanted characters.
In order to make it more powerful, above functions can be added.
"""
headers=data.columns
emails = data.rename(columns={headers[0]:'email_path', headers[1]:'email'})
#getting structured text
def create_dict(dictionary, key, value):
if key in dictionary:
values = dictionary.get(key)
values.append(value)
dictionary[key] = values
else:
dictionary[key] = [value]
return dictionary
def get_structured_data(df, fields):
structured_data = {}
messages = df["email"]
for message in messages:
e = email.message_from_string(message)
for header in fields:
header_data = e.get(header)
create_dict(dictionary = structured_data, key = header, value = header_data)
return pd.DataFrame(structured_data)
#getting unstructured text
def get_unstructured_email(df):
messages = []
for item in df["email"]:
e = email.message_from_string(item)
message_body = e.get_payload()
#message_body = message_body.lower()
messages.append(message_body)
return messages
if extract_payload == True:
email_body = get_unstructured_email(emails)
emails["Message-Body"] = email_body
structured_data = get_structured_data(emails, structured_fields)
emails = pd.concat([emails, structured_data], axis=1)
else:
structured_data = get_structured_data(emails, structured_fields)
emails = pd.concat([emails, structured_data], axis=1)
return emails
|
[
"pandas.DataFrame",
"email.message_from_string",
"pandas.concat"
] |
[((2387, 2416), 'pandas.DataFrame', 'pd.DataFrame', (['structured_data'], {}), '(structured_data)\n', (2399, 2416), True, 'import pandas as pd\n'), ((2981, 3025), 'pandas.concat', 'pd.concat', (['[emails, structured_data]'], {'axis': '(1)'}), '([emails, structured_data], axis=1)\n', (2990, 3025), True, 'import pandas as pd\n'), ((3136, 3180), 'pandas.concat', 'pd.concat', (['[emails, structured_data]'], {'axis': '(1)'}), '([emails, structured_data], axis=1)\n', (3145, 3180), True, 'import pandas as pd\n'), ((2166, 2200), 'email.message_from_string', 'email.message_from_string', (['message'], {}), '(message)\n', (2191, 2200), False, 'import email\n'), ((2563, 2594), 'email.message_from_string', 'email.message_from_string', (['item'], {}), '(item)\n', (2588, 2594), False, 'import email\n')]
|
"""
domonic.events
====================================
dom events
"""
# from typing import *
import time
# TODO - bring EventTarget here and get rid of this one?
class EventDispatcher(object):
""" EventDispatcher is a class you can extend to give your obj event dispatching abilities """
def __init__(self, *args, **kwargs):
self.listeners = {}
def hasEventListener(self, _type):
return _type in self.listeners
# TODO - event: str, function, useCapture: bool
# def addEventListener(self, event: str, function, useCapture: bool) -> None:
def addEventListener(self, _type, callback, *args, **kwargs):
if _type not in self.listeners:
self.listeners[_type] = []
self.listeners[_type].append(callback)
def removeEventListener(self, _type, callback):
if _type not in self.listeners:
return
stack = self.listeners[_type]
for thing in stack:
if thing == callback:
stack.remove(thing)
return
def dispatchEvent(self, event):
if event.type not in self.listeners:
return True # huh?. surely false?
stack = self.listeners[event.type]
# .slice()
event.target = self # TODO/NOTE - is this correct? - cant think where else would set it
for thing in stack:
try:
thing(event)
# type(thing, (Event,), self)
except Exception as e:
print(e)
thing() # try calling without params, user may not create param
return not event.defaultPrevented
class Event(object):
""" event """
EMPTIED = "emptied" #:
ABORT = "abort" #:
AFTERPRINT = "afterprint" #:
BEFOREPRINT = "beforeprint" #:
BEFOREUNLOAD = "beforeunload" #:
CANPLAY = "canplay" #:
CANPLAYTHROUGH = "canplaythrough" #:
CHANGE = "change" #:
DURATIONCHANGE = "durationchange" #:
ENDED = "ended" #:
ERROR = "error" #:
FULLSCREENCHANGE = "fullscreenchange" #:
FULLSCREENERROR = "fullscreenerror" #:
INPUT = "input" #:
INVALID = "invalid" #:
LOAD = "load" #:
LOADEDDATA = "loadeddata" #:
LOADEDMETADATA = "loadedmetadata" #:
MESSAGE = "message" #:
OFFLINE = "offline" #:
ONLINE = "online" #:
OPEN = "open" #:
PAUSE = "pause" #:
PLAY = "play" #:
PLAYING = "playing" #:
PROGRESS = "progress" #:
RATECHANGE = "ratechange" #:
RESIZE = "resize" #:
RESET = "reset" #:
SCROLL = "scroll" #:
SEARCH = "search" #:
SEEKED = "seeked" #:
SEEKING = "seeking" #:
SELECT = "select" #:
SHOW = "show" #:
STALLED = "stalled" #:
SUBMIT = "submit" #:
SUSPEND = "suspend" #:
TOGGLE = "toggle" #:
UNLOAD = "unload" #:
VOLUMECHANGE = "volumechange" #:
WAITING = "waiting" #:
# Event("look", {"bubbles":true, "cancelable":false});
def __init__(self, _type=None, *args, **kwargs):
# print('type', _type)
self.type = _type
self.bubbles = None
self.cancelable = None
self.cancelBubble = None
self.composed = None
self.currentTarget = None
self.defaultPrevented = False
self.eventPhase = None
self.explicitOriginalTarget = None
self.isTrusted = None
self.originalTarget = None
self.returnValue = None
self.srcElement = None
self.target = None
# ms = time.time_ns() // 1000000 3.7 up
self.timeStamp = int(round(time.time() * 1000))
def composedPath(self):
return self.type + ":" + str(self.timeStamp)
def initEvent(self, _type=None, *args, **kwargs):
self.__init__(_type, args, kwargs)
def stopPropagation(self):
"""[prevents further propagation of the current event in the capturing and bubbling phases]"""
# self.defaultPrevented = True
# self.returnValue = None
# self.originalTarget = None
# self.explicitOriginalTarget = None
# self.target = None
# self.srcElement = None
# self.bubbles = None
# self.cancelable = None
# self.cancelBubble = None
# self.composed = None
# self.currentTarget = None
# self.eventPhase = None
# self.isTrusted = None
# self.returnValue = None
# self.timeStamp = int(round(time.time() * 1000))
# self.type = None
pass
def msConvertURL(self):
pass
def preventDefault(self):
pass
def stopImmediatePropagation(self):
pass
class MouseEvent(Event):
""" mouse events """
CLICK = "click" #:
CONTEXTMENU = "contextmenu" #:
DBLCLICK = "dblclick" #:
MOUSEDOWN = "mousedown" #:
MOUSEENTER = "mouseenter" #:
MOUSELEAVE = "mouseleave" #:
MOUSEMOVE = "mousemove" #:
MOUSEOVER = "mouseover" #:
MOUSEOUT = "mouseout" #:
MOUSEUP = "mouseup" #:
def __init__(self, _type, *args, **kwargs):
# self.args = args
# self.kwargs = kwargs
self.x = 0
self.y = 0
self._clientX = 0
self._clientX = 0
self._altKey = False
self._ctrlKey = False
self._shiftKey = False
self._metaKey = False
self._button = None
self._buttons = []
super().__init__(_type, *args, **kwargs)
def initMouseEvent(self, _type=None, canBubble=True, cancelable=True, view=None,
detail=None, screenX=0, screenY=0, clientX=0, clientY=0,
ctrlKey=False, altKey=False, shiftKey=False, metaKey=False,
button=None, relatedTarget=None, from_json={}, *args, **kwargs):
# print('initMouseEvent')
self._type = _type
self.canBubble = canBubble
self.cancelable = cancelable
self.view = view
self.detail = detail
self.screenX = screenX
self.screenY = screenY
self._clientX = clientX
self._clientY = clientY
self._ctrlKey = ctrlKey
self._altKey = altKey
self._shiftKey = shiftKey
self._metaKey = metaKey
self._button = button
self.relatedTarget = relatedTarget
# TODO - parse from_json - so can relay
@property
def clientX(self):
return self.x
@property
def clientY(self):
return self.y
@property
def altKey(self):
return self._altKey
@property
def ctrlKey(self):
return self._ctrlKey
@property
def shiftKey(self):
return self._shiftKey
@property
def metaKey(self):
return self._metaKey
@property
def button(self):
return self._button
@property
def buttons(self):
return self._buttons
@property
def which(self):
return self._button
# MOUSE_EVENT
# getModifierState() Returns an array containing target ranges that will be affected by the insertion/deletion MouseEvent
# MovementX Returns the horizontal coordinate of the mouse pointer relative to the position of the last mousemove event MouseEvent
# MovementY Returns the vertical coordinate of the mouse pointer relative to the position of the last mousemove event MouseEvent
# offsetX Returns the horizontal coordinate of the mouse pointer relative to the position of the edge of the target element MouseEvent
# offsetY Returns the vertical coordinate of the mouse pointer relative to the position of the edge of the target element MouseEvent
# pageX Returns the horizontal coordinate of the mouse pointer, relative to the document, when the mouse event was triggered MouseEvent
# pageY Returns the vertical coordinate of the mouse pointer, relative to the document, when the mouse event was triggered MouseEvent
# region MouseEvent
# relatedTarget Returns the element related to the element that triggered the mouse event MouseEvent, FocusEvent
class KeyboardEvent(Event):
""" keyboard events """
KEYDOWN = "keydown" #:
KEYPRESS = "keypress" #:
KEYUP = "keyup" #:
def __init__(self, _type, *args, **kwargs):
# self.args = args
# self.kwargs = kwargs
self._altKey = False
self._ctrlKey = False
self._shiftKey = False
self._metaKey = False
self.charCode = None
self.code = None
self.key = None
self.keyCode = None
super().__init__(_type, *args, **kwargs)
def initKeyboardEvent(self, typeArg, canBubbleArg, cancelableArg, viewArg, charArg, keyArg,
locationArg, modifiersListArg, repeat):
self._type = typeArg
self.canBubbleArg = canBubbleArg
self.cancelableArg = cancelableArg
self.viewArg = viewArg
self.charArg = charArg
self.keyArg = keyArg
self.locationArg = locationArg
self.modifiersListArg = modifiersListArg
self.repeat = repeat
@property
def altKey(self):
return self._altKey
@property
def ctrlKey(self):
return self._ctrlKey
@property
def shiftKey(self):
return self._shiftKey
@property
def metaKey(self):
return self._metaKey
@property
def unicode(self):
return self.key
# @property
# def keyCode(self):
# return self.keyCode
# @property
# def charCode(self):
# return self.charCode
# @property
# def code(self):
# return self.code
# @property
# def key(self):
# return self.key
# def isComposing(self, *args, **kwargs):
# pass
# KeyboardEvent
# isComposing Returns whether the state of the event is composing or not InputEvent, KeyboardEvent
# repeat Returns whether a key is being hold down repeatedly, or not KeyboardEvent
# location Returns the location of a key on the keyboard or device KeyboardEvent
class UIEvent(Event):
""" UIEvent """
def __init__(self, _type, *args, **kwargs):
self.detail = None
self.view = None
super().__init__(_type, *args, **kwargs)
class CompositionEvent(UIEvent):
""" CompositionEvent """
START = "compositionstart"
END = "compositionend"
UPDATE = "compositionupdate"
def __init__(self, _type, *args, **kwargs):
self.data = None #: Returns the characters generated by the input method that raised the event
self.locale = None
super().__init__(_type, *args, **kwargs)
class FocusEvent(Event):
""" FocusEvent """
BLUR = "blur" #:
FOCUS = "focus" #:
FOCUSIN = "focusin" #:
FOCUSOUT = "focusout" #:
def __init__(self, _type, *args, **kwargs):
self.relatedTarget = None
super().__init__(_type, *args, **kwargs)
class TouchEvent(Event):
""" TouchEvent """
TOUCHCANCEL = "touchcancel" #:
TOUCHEND = "touchend" #:
TOUCHMOVE = "touchmove" #:
TOUCHSTART = "touchstart" #:
def __init__(self, _type, *args, **kwargs):
self.shiftKey = None
self.altKey = None
self.changedTouches = None
self.ctrlKey = None
self.metaKey = None
self.shiftKey = None
self.targetTouches = None
self.touches = None
super().__init__(_type, *args, **kwargs)
class WheelEvent(Event):
""" WheelEvent """
MOUSEWHEEL = "mousewheel" # DEPRECATED - USE WHEEL #:
WHEEL = "wheel" #:
def __init__(self, _type, *args, **kwargs):
self.deltaX = None
self.deltaY = None
self.deltaZ = None
self.deltaMode = None
super().__init__(_type, *args, **kwargs)
class AnimationEvent(Event):
""" AnimationEvent """
ANIMATIONEND = "animationend" #:
ANIMATIONITERATION = "animationiteration" #:
ANIMATIONSTART = "animationstart" #:
def __init__(self, _type, *args, **kwargs):
self.animationName = None
""" Returns the name of the animation """
self.elapsedTime = None
""" Returns the number of seconds an animation has been running """
self.pseudoElement = None
""" Returns the name of the pseudo-element of the animation """
super().__init__(_type, *args, **kwargs)
class ClipboardEvent(Event):
""" ClipboardEvent """
COPY = "copy" #:
CUT = "cut" #:
PASTE = "paste" #:
def __init__(self, _type, *args, **kwargs):
self.clipboardData = None
""" Returns an object containing the data affected by the clipboard operation """
super().__init__(_type, *args, **kwargs)
class ErrorEvent(Event):
""" ErrorEvent """
ERROR = "error" #:
def __init__(self, _type, *args, **kwargs):
self.message = None
# self.filename=None
# self.lineno=0
# self.colno=0
# self.error={}
super().__init__(_type, *args, **kwargs)
class SubmitEvent(Event):
""" SubmitEvent """
SUBMIT = "submit" #:
def __init__(self, _type, *args, **kwargs):
super().__init__(_type, *args, **kwargs)
class PointerEvent(Event):
""" PointerEvent """
POINTER = "pointer" #:
def __init__(self, _type, *args, **kwargs):
self.pointerId = None
self.width = None
self.height = None
self.pressure = None
self.tangentialPressure = None
self.tiltX = None
self.tiltY = None
self.twist = None
self.pointerType = None
self.isPrimary = None
super().__init__(_type, *args, **kwargs)
class BeforeUnloadEvent(Event):
BEFOREUNLOAD = "beforeunload" #:
""" BeforeUnloadEvent """
def __init__(self, _type, *args, **kwargs):
super().__init__(_type, *args, **kwargs)
class SVGEvent(Event):
""" SVGEvent """
def __init__(self, _type, *args, **kwargs):
super().__init__(_type, *args, **kwargs)
class TimerEvent(Event):
TIMER = "timer" #:
""" TimerEvent """
def __init__(self, _type, *args, **kwargs):
super().__init__(_type, *args, **kwargs)
class DragEvent(Event):
""" DragEvent """
DRAG = "drag" #:
END = "dragend" #:
ENTER = "dragenter" #:
EXIT = "dragexit" #:
LEAVE = "dragleave" #:
OVER = "dragover" #:
START = "dragstart" #:
DROP = "drop" #:
def __init__(self, _type, *args, **kwargs):
self.dataTransfer = None
""" Returns the data that is dragged/dropped """
super().__init__(_type, *args, **kwargs)
class HashChangeEvent(Event):
""" HashChangeEvent """
CHANGE = "hashchange" #:
def __init__(self, _type, *args, **kwargs):
self.newURL = None
self.oldURL = None
super().__init__(_type, *args, **kwargs)
class InputEvent(Event):
""" InputEvent """
def __init__(self, _type, *args, **kwargs):
self.data = None
""" Returns the inserted characters """
self.dataTransfer
""" Returns an object containing information about the inserted/deleted data """
self.getTargetRanges
""" Returns an array containing target ranges that will be affected by the insertion/deletion """
self.inputType
""" Returns the type of the change (i.e "inserting" or "deleting") """
self.isComposing
""" Returns whether the state of the event is composing or not """
super().__init__(_type, *args, **kwargs)
class PageTransitionEvent(Event):
""" PageTransitionEvent """
PAGEHIDE = "pagehide" #:
PAGESHOW = "pageshow" #:
def __init__(self, _type, *args, **kwargs):
self.persisted = None
""" Returns whether the webpage was cached by the browser """
super().__init__(_type, *args, **kwargs)
class PopStateEvent(Event):
""" PopStateEvent """
def __init__(self, _type, *args, **kwargs):
self.state = None
""" Returns an object containing a copy of the history entries """
super().__init__(_type, *args, **kwargs)
class StorageEvent(Event):
""" StorageEvent """
def __init__(self, _type, *args, **kwargs):
self.key = None
""" Returns the key of the changed storage item """
self.newValue = None
""" Returns the new value of the changed storage item """
self.oldValue = None
""" Returns the old value of the changed storage item """
self.storageArea = None
""" Returns an object representing the affected storage object """
self.url = None
""" Returns the URL of the changed item's document """
super().__init__(_type, *args, **kwargs)
class TransitionEvent(Event):
""" TransitionEvent """
TRANSITIONEND = "transitionend" #:
def __init__(self, _type, *args, **kwargs):
self.propertyName = None
""" Returns the name of the transition"""
self.elapsedTime = None
""" Returns the number of seconds a transition has been running """
self.pseudoElement = None
""" Returns the name of the pseudo-element of the transition """
super().__init__(_type, *args, **kwargs)
class ProgressEvent(Event):
""" ProgressEvent """
LOADSTART = "loadstart" #:
def __init__(self, _type, *args, **kwargs):
super().__init__(_type, *args, **kwargs)
class CustomEvent(Event):
""" CustomEvent """
def __init__(self, _type, *args, **kwargs):
self.detail = None
super().__init__(_type, *args, **kwargs)
def initCustomEvent(self):
pass
class GamePadEvent(Event):
""" GamePadEvent """
START = "gamepadconnected" #:
STOP = "gamepaddisconnected" #:
def __init__(self, _type, *args, **kwargs):
self.gamepad = None
super().__init__(_type, *args, **kwargs)
class TweenEvent(Event):
""" TweenEvent """
START = "onStart" #:
STOP = "onStop" #:
RESET = "onReset" #:
PAUSE = "onPause" #:
UNPAUSE = "onUnPause" #:
UPDATE_START = "onUpdateStart" #:
UPDATE_END = "onUpdateEnd" #:
COMPLETE = "onComplete" #:
TIMER = "onTimer" #:
_source = None
@property
def source(self):
return self._source
@source.setter
def source(self, source):
self._source = source
def __init__(self, _type, source=None, bubbles=False, cancelable=False):
# super.__init__(self, type, bubbles, cancelable)
super().__init__(_type) # TODO -
self.source = source
class GlobalEventHandler: # (EventDispatcher):
# def __init__(self):
# super().__init__(self)
# self.addEventListener(KeyboardEvent.KEYDOWN, self.onkeydown)
# self.addEventListener(KeyboardEvent.KEYUP, self.onkeyup)
# self.addEventListener(MouseEvent.MOUSEMOVE, self.onmousemove)
# self.addEventListener(MouseEvent.MOUSEDOWN, self.onmousedown)
# self.addEventListener(MouseEvent.MOUSEUP, self.onmouseup)
# self.addEventListener(DragEvent.DRAG, self.ondrag)
# self.addEventListener(DragEvent.END, self.ondragend)
# self.addEventListener(DragEvent.ENTER, self.ondragenter)
# self.addEventListener(DragEvent.EXIT, self.ondragexit)
# self.addEventListener(DragEvent.LEAVE, self.ondragleave)
# self.addEventListener(DragEvent.OVER, self.ondragover)
# self.addEventListener(DragEvent.START, self.ondragstart)
# self.addEventListener(DragEvent.DROP, self.ondrop)
# self.addEventListener(ClipboardEvent.CUT, self.oncut)
# self.addEventListener(ClipboardEvent.COPY, self.oncopy)
# self.addEventListener(ClipboardEvent.PASTE, self.onpaste)
def onabort(self, event):
print(event)
raise NotImplementedError
def onblur(self, event):
print(event)
raise NotImplementedError
def oncancel(self, event):
print(event)
raise NotImplementedError
def oncanplay(self, event):
print(event)
raise NotImplementedError
def oncanplaythrough(self, event):
print(event)
raise NotImplementedError
def onchange(self, event):
print(event)
raise NotImplementedError
def onclick(self, event):
print(event)
raise NotImplementedError
def onclose(self, event):
print(event)
raise NotImplementedError
def oncontextmenu(self, event):
print(event)
raise NotImplementedError
def oncuechange(self, event):
print(event)
raise NotImplementedError
def ondblclick(self, event):
print(event)
raise NotImplementedError
def ondrag(self, event):
print(event)
raise NotImplementedError
def ondragend(self, event):
print(event)
raise NotImplementedError
def ondragenter(self, event):
print(event)
raise NotImplementedError
def ondragexit(self, event):
print(event)
raise NotImplementedError
def ondragleave(self, event):
print(event)
raise NotImplementedError
def ondragover(self, event):
print(event)
raise NotImplementedError
def ondragstart(self, event):
print(event)
raise NotImplementedError
def ondrop(self, event):
print(event)
raise NotImplementedError
def ondurationchange(self, event):
print(event)
raise NotImplementedError
def onemptied(self, event):
print(event)
raise NotImplementedError
def onended(self, event):
print(event)
raise NotImplementedError
def onerror(self, event):
print(event)
raise NotImplementedError
def onfocus(self, event):
print(event)
raise NotImplementedError
def ongotpointercapture(self, event):
print(event)
raise NotImplementedError
def oninput(self, event):
print(event)
raise NotImplementedError
def oninvalid(self, event):
print(event)
raise NotImplementedError
def onkeydown(self, event):
print(event)
raise NotImplementedError
def onkeypress(self, event):
print(event)
raise NotImplementedError
def onkeyup(self, event):
print(event)
raise NotImplementedError
def onload(self, event):
print(event)
raise NotImplementedError
def onloadeddata(self, event):
print(event)
raise NotImplementedError
def onloadedmetadata(self, event):
print(event)
raise NotImplementedError
def onloadend(self, event):
print(event)
raise NotImplementedError
def onloadstart(self, event):
print(event)
raise NotImplementedError
def onlostpointercapture(self, event):
print(event)
raise NotImplementedError
def onmouseenter(self, event):
print(event)
raise NotImplementedError
def onmouseleave(self, event):
print(event)
raise NotImplementedError
def onmousemove(self, event):
print(event)
raise NotImplementedError
def onmouseout(self, event):
print(event)
raise NotImplementedError
def onmouseover(self, event):
print(event)
raise NotImplementedError
def onmouseup(self, event):
print(event)
raise NotImplementedError
def onpause(self, event):
print(event)
raise NotImplementedError
def onplay(self, event):
print(event)
raise NotImplementedError
def onplaying(self, event):
print(event)
raise NotImplementedError
def onpointercancel(self, event):
print(event)
raise NotImplementedError
def onpointerdown(self, event):
print(event)
raise NotImplementedError
def onpointerenter(self, event):
print(event)
raise NotImplementedError
def onpointerleave(self, event):
print(event)
raise NotImplementedError
def onpointermove(self, event):
print(event)
raise NotImplementedError
def onpointerout(self, event):
print(event)
raise NotImplementedError
def onpointerover(self, event):
print(event)
raise NotImplementedError
def onpointerup(self, event):
print(event)
raise NotImplementedError
def onprogress(self, event):
print(event)
raise NotImplementedError
def onratechange(self, event):
print(event)
raise NotImplementedError
def onreset(self, event):
print(event)
raise NotImplementedError
def onresize(self, event):
print(event)
raise NotImplementedError
def onscroll(self, event):
print(event)
raise NotImplementedError
def onseeked(self, event):
print(event)
raise NotImplementedError
def onseeking(self, event):
print(event)
raise NotImplementedError
def onselect(self, event):
print(event)
raise NotImplementedError
def onselectionchange(self, event):
print(event)
raise NotImplementedError
def onselectstart(self, event):
print(event)
raise NotImplementedError
def onshow(self, event):
print(event)
raise NotImplementedError
def onstalled(self, event):
print(event)
raise NotImplementedError
def onsubmit(self, event):
print(event)
raise NotImplementedError
def onsuspend(self, event):
print(event)
raise NotImplementedError
def ontimeupdate(self, event):
print(event)
raise NotImplementedError
def onvolumechange(self, event):
print(event)
raise NotImplementedError
def onwaiting(self, event):
print(event)
raise NotImplementedError
def onwheel(self, event):
print(event)
raise NotImplementedError
def onanimationcancel(self, event):
print(event)
raise NotImplementedError
def onanimationend(self, event):
print(event)
raise NotImplementedError
def onanimationiteration(self, event):
print(event)
raise NotImplementedError
def onauxclick(self, event):
print(event)
raise NotImplementedError
def onformdata(self, event):
print(event)
raise NotImplementedError
def onmousedown(self, event):
print(event)
raise NotImplementedError
def ontouchcancel(self, event):
print(event)
raise NotImplementedError
def ontouchstart(self, event):
print(event)
raise NotImplementedError
def ontransitioncancel(self, event):
print(event)
raise NotImplementedError
def ontransitionend(self, event):
print(event)
raise NotImplementedError
|
[
"time.time"
] |
[((3599, 3610), 'time.time', 'time.time', ([], {}), '()\n', (3608, 3610), False, 'import time\n')]
|
import pandas as pd
import numpy as np
import torch
def min_max_x(x):
for index, col in enumerate(x.T):
min_col = np.min(col)
max_col = np.max(col)
if min_col != max_col:
x.T[index] = (x.T[index] - min_col)/(max_col - min_col)
else:
x.T[index] = x.T[index] - min_col
return x
def load_dataset(path='./processed_dataset/data.csv', split=0.8, shuffle=True, seed=0):
np.random.seed(seed)
df = pd.read_csv(path)
df = df.values
if shuffle:
np.random.shuffle(df)
train = df[:int(df.shape[0]*split)]
validation = df[int(df.shape[0]*split):]
train_x, train_y = train.T[:12].T, train.T[12:].T
validation_x, validation_y = validation.T[:12].T, validation.T[12:].T
train_x, validation_x = min_max_x(train_x), min_max_x(validation_x)
train_x, train_y, validation_x, validation_y = train_x.astype(np.float32), train_y.astype(np.float32), validation_x.astype(np.float32), validation_y.astype(np.float32)
train_x, train_y, validation_x, validation_y = torch.from_numpy(train_x), torch.from_numpy(train_y), torch.from_numpy(validation_x), torch.from_numpy(validation_y)
return train_x, train_y, validation_x, validation_y
if __name__ == '__main__':
train_x, train_y, validation_x, validation_y = load_dataset()
print(train_x.shape, train_y.shape, validation_x.shape, validation_y.shape)
|
[
"numpy.random.seed",
"pandas.read_csv",
"numpy.min",
"numpy.max",
"numpy.random.shuffle",
"torch.from_numpy"
] |
[((384, 404), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (398, 404), True, 'import numpy as np\n'), ((411, 428), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (422, 428), True, 'import pandas as pd\n'), ((118, 129), 'numpy.min', 'np.min', (['col'], {}), '(col)\n', (124, 129), True, 'import numpy as np\n'), ((142, 153), 'numpy.max', 'np.max', (['col'], {}), '(col)\n', (148, 153), True, 'import numpy as np\n'), ((460, 481), 'numpy.random.shuffle', 'np.random.shuffle', (['df'], {}), '(df)\n', (477, 481), True, 'import numpy as np\n'), ((973, 998), 'torch.from_numpy', 'torch.from_numpy', (['train_x'], {}), '(train_x)\n', (989, 998), False, 'import torch\n'), ((1000, 1025), 'torch.from_numpy', 'torch.from_numpy', (['train_y'], {}), '(train_y)\n', (1016, 1025), False, 'import torch\n'), ((1027, 1057), 'torch.from_numpy', 'torch.from_numpy', (['validation_x'], {}), '(validation_x)\n', (1043, 1057), False, 'import torch\n'), ((1059, 1089), 'torch.from_numpy', 'torch.from_numpy', (['validation_y'], {}), '(validation_y)\n', (1075, 1089), False, 'import torch\n')]
|
"""
This code returns a DFA that is equivalent to the Tree constructed by compressing all the traces into one tree.
"""
import read_traces, DFA_utils_tree_only, time, tree_utils
def solve_tree_only(g_pos, G, Sigma, T, timeout, info, be_quiet=False):
assert g_pos in G, f"Error, g_pos not in G"
# creating the auxiliary tree structure
tree = tree_utils.create_tree(g_pos, G, Sigma, T, prune=False)
nodes = tree_utils.get_reachable_nodes(tree)
# creating an equivalent DFA
q_0 = 0
q_pos = 1
q_neg = 2
# assigning ids to each node
n_current = 3
for n in nodes:
if n.is_root():
n.assign_id(q_0)
elif n.is_positive_node():
n.assign_id(q_pos)
elif n.is_negative_node():
n.assign_id(q_neg)
else:
n.assign_id(n_current)
n_current += 1
# creating the dfa
dfa = {}
for ni in nodes:
if ni.is_terminal():
continue
ni_id = ni.get_id()
for nj in ni.get_children():
nj_id = nj.get_id()
ni_sigma = nj.get_psigma()
dfa[(ni_id,ni_sigma)] = nj_id
DFA_utils_tree_only.clean_dfa(q_0, dfa, T)
# Adding the probabilities
pos_prob = DFA_utils_tree_only.add_probabilities(q_0, dfa, T, g_pos)
return q_0, dfa, pos_prob
|
[
"DFA_utils_tree_only.add_probabilities",
"tree_utils.get_reachable_nodes",
"tree_utils.create_tree",
"DFA_utils_tree_only.clean_dfa"
] |
[((348, 403), 'tree_utils.create_tree', 'tree_utils.create_tree', (['g_pos', 'G', 'Sigma', 'T'], {'prune': '(False)'}), '(g_pos, G, Sigma, T, prune=False)\n', (370, 403), False, 'import read_traces, DFA_utils_tree_only, time, tree_utils\n'), ((413, 449), 'tree_utils.get_reachable_nodes', 'tree_utils.get_reachable_nodes', (['tree'], {}), '(tree)\n', (443, 449), False, 'import read_traces, DFA_utils_tree_only, time, tree_utils\n'), ((999, 1041), 'DFA_utils_tree_only.clean_dfa', 'DFA_utils_tree_only.clean_dfa', (['q_0', 'dfa', 'T'], {}), '(q_0, dfa, T)\n', (1028, 1041), False, 'import read_traces, DFA_utils_tree_only, time, tree_utils\n'), ((1083, 1140), 'DFA_utils_tree_only.add_probabilities', 'DFA_utils_tree_only.add_probabilities', (['q_0', 'dfa', 'T', 'g_pos'], {}), '(q_0, dfa, T, g_pos)\n', (1120, 1140), False, 'import read_traces, DFA_utils_tree_only, time, tree_utils\n')]
|
# Source:
# https://www.tensorflow.org/api_guides/python/reading_data
import tensorflow as tf
# creates a FIFO queue for holding the filenames until the reader needs them.
# The following line is equivalent to :
# filename_queue = tf.train.string_input_producer(["file0.csv", "file1.csv"])
filename_queue = tf.train.string_input_producer([("file%d" % i) for i in range(2)])
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
# Default values, in case of empty columns. Also specifies the type of the decoded result.
# Try a simpler expression:
# col1, col2, col3, col4, col5 = tf.decode_csv(value, record_defaults=[[1]]*5)
record_defaults = [[1], [1], [1], [1], [1]]
col1, col2, col3, col4, col5 = tf.decode_csv(
value, record_defaults=record_defaults)
features = tf.stack([col1, col2, col3, col4])
with tf.Session() as sess:
# Start populating the filename queue.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(1200):
# Retrieve a single instance:
example, label = sess.run([features, col5])
coord.request_stop()
coord.join(threads)
|
[
"tensorflow.train.Coordinator",
"tensorflow.Session",
"tensorflow.stack",
"tensorflow.train.start_queue_runners",
"tensorflow.TextLineReader",
"tensorflow.decode_csv"
] |
[((394, 413), 'tensorflow.TextLineReader', 'tf.TextLineReader', ([], {}), '()\n', (411, 413), True, 'import tensorflow as tf\n'), ((736, 789), 'tensorflow.decode_csv', 'tf.decode_csv', (['value'], {'record_defaults': 'record_defaults'}), '(value, record_defaults=record_defaults)\n', (749, 789), True, 'import tensorflow as tf\n'), ((808, 842), 'tensorflow.stack', 'tf.stack', (['[col1, col2, col3, col4]'], {}), '([col1, col2, col3, col4])\n', (816, 842), True, 'import tensorflow as tf\n'), ((851, 863), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (861, 863), True, 'import tensorflow as tf\n'), ((930, 952), 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (950, 952), True, 'import tensorflow as tf\n'), ((968, 1009), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'coord': 'coord'}), '(coord=coord)\n', (996, 1009), True, 'import tensorflow as tf\n')]
|
"""
Author: <NAME>
Github: github.com/yashbmewada
Program for demonstrating simple line fitting using Tensorflow and Gradient Descent Algorithm
This program trains the model to fit two values, slope(m) and x-intercept(b) in the equation
of line y=mx+b. Here we would provide very small dataset of randomly generated pointset xs and ys
and train the tensorflow model to adjust the values of m and b in order to fit a straight line.
This straight line can further be used to predict any unknown value Y for a given unknown X based on the
learned value of m and b.
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # called in order to minimize the warnings about SSE4.1 instructions.
import tensorflow as tf
"""
Random points of X and Y form the training data. aka Dataset (only training. no validation or test)
"""
xs = [0.00,2.00,4.00,6.00,8.00,10.00,12.00,14.00] #features
ys = [-0.82,-0.90,-0.12,0.26,0.31,0.64,1.02,1.00] #labels (actual outputs)
"""
Initial values for m and b. These values would be adjusted to fit the above dataset point
"""
m_initial = -0.50
b_initial = 1.00
"""
tf.Variable : allows us to create variables whose values can be adjusted in order to learn at each pass on the dataset.
"""
m = tf.Variable(m_initial)
b = tf.Variable(b_initial)
"""
In order to adjust and fit the line, we try to minimize the "error" between two given values of (x,y) so that the
line can be fit properly as we minimize the value of distances between our m and b i.e. predicted_y and actual y
(from "ys").
"""
error = 0.0
"""
We write an operation for calculation of error and also iteration over the value of X and Y from the Dataset [xs,ys].
Running this over around 1000 times we would be able to minimize the error to a respecable fit for the line.
"""
for x,y in zip(xs,ys):
predicted_y = m*x + b
error += (y-predicted_y)**2 # this is the square of difference of error added to the total error 'cost' which we minimize.
"""
Now, in order to train over this operation set we defined above, we use tensorflow Gradient Descent Optimizer which allows
us to train over this data set and we pass the "error" to the minimize() function of this optimizer as a parameter.abs
here while initialization of the Gradient Descent optimizer, we define a learning_rate = 0.001.
This learning rate defines the magnitude OR "how big" of a jump we want to make while minimizing the "cost" / "error".abs
Remember Too Small a learning rate would make your training very slow and Too big learning rate would make the training never find
an optimum solution. Best Learning Rate can be found by trying different values. Here we take 0.001 randomly as it usually works in
most cases.
"""
optimizer_op = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(error)
"""
Tensorflow uses a "session" to run the above mentioned training steps.
So before starting the session it is always advisable to initialize variables randomly.
"""
init_op = tf.global_variables_initializer()
"""
All the calculations would now be done in a Session
"""
with tf.Session() as session:
session.run(init_op)
_ITERATIONS = 1000 #number of passes on the dataset
for iteration in range(_ITERATIONS):
session.run(optimizer_op) #calling our optimization operator to minimize error
slope, intercept = session.run((m,b)) #calling our adjusted values
print('slope: ', slope , 'Intercept: ', intercept)
|
[
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.Variable",
"tensorflow.Session"
] |
[((1227, 1249), 'tensorflow.Variable', 'tf.Variable', (['m_initial'], {}), '(m_initial)\n', (1238, 1249), True, 'import tensorflow as tf\n'), ((1254, 1276), 'tensorflow.Variable', 'tf.Variable', (['b_initial'], {}), '(b_initial)\n', (1265, 1276), True, 'import tensorflow as tf\n'), ((2965, 2998), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2996, 2998), True, 'import tensorflow as tf\n'), ((3066, 3078), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3076, 3078), True, 'import tensorflow as tf\n'), ((2715, 2769), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (2748, 2769), True, 'import tensorflow as tf\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 23 17:20:22 2019
convert txt to excel
@author: zyb_as
"""
import os
import argparse, textwrap
import xlwt
# set options
parser = argparse.ArgumentParser(description = 'convert txt to excel',
usage = textwrap.dedent('''\
command example:
python %(prog)s --file_name='test.txt' --splitter='\\t' '''),
formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument('--file_name', type = str, default = 'test.txt',
help = 'the path of the txt file')
parser.add_argument('--splitter', type = str, default = '\t',
help = 'the splitter for each line in the txt file.')
#parser.add_argument('--fields_num', type = int, default = 1,
# help = 'the fields number each line.')
parser.add_argument('--max_lines', type = int, default = 50000,
help = 'max lines number in one excel')
def download_from_txt():
# get options
args = parser.parse_args()
file_name = args.file_name
splitter = args.splitter
#fields_num = args.fields_num
max_lines = args.max_lines
if not os.path.exists(file_name):
print("ERROR! the file need to be convert does't exists")
excel_file = ''
if file_name[-4:] == '.txt':
excel_file = file_name[:-4] + '.xls'
else:
excel_file = file_name + '.xls'
if splitter == '\\t':
splitter = '\t'
cnt = 0
xls_index = 0
cur_excel_file = excel_file[:-4] + '_' + str(xls_index) + '.xls'
# 创建表
workbook = xlwt.Workbook(encoding = 'utf-8')
worksheet = workbook.add_sheet('temp', cell_overwrite_ok = True)
worksheet.write(0, 0, label = 'Row 0, Column 0 Value')
for line in open(file_name, 'r').readlines():
if cnt == max_lines:
workbook.save(cur_excel_file)
xls_index += 1
cur_excel_file = excel_file[:-4] + '_' + str(xls_index) + '.xls'
workbook = xlwt.Workbook(encoding = 'utf-8')
worksheet = workbook.add_sheet('temp')
cnt = 0
item = line.split(splitter)
print(cnt)
for idx, it in enumerate(item):
worksheet.write(cnt, idx, it.decode('utf-8', 'ignore'))
cnt += 1
if cnt <= max_lines:
workbook.save(cur_excel_file)
if __name__ == "__main__":
download_from_txt()
|
[
"textwrap.dedent",
"xlwt.Workbook",
"os.path.exists"
] |
[((1536, 1567), 'xlwt.Workbook', 'xlwt.Workbook', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (1549, 1567), False, 'import xlwt\n'), ((258, 380), 'textwrap.dedent', 'textwrap.dedent', (['""" command example:\n python %(prog)s --file_name=\'test.txt\' --splitter=\'\\\\t\' """'], {}), '(\n """ command example:\n python %(prog)s --file_name=\'test.txt\' --splitter=\'\\\\t\' """\n )\n', (273, 380), False, 'import argparse, textwrap\n'), ((1110, 1135), 'os.path.exists', 'os.path.exists', (['file_name'], {}), '(file_name)\n', (1124, 1135), False, 'import os\n'), ((1946, 1977), 'xlwt.Workbook', 'xlwt.Workbook', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (1959, 1977), False, 'import xlwt\n')]
|
from pybuilder.core import init, use_plugin, Author
use_plugin('python.core')
use_plugin('python.flake8')
use_plugin('python.unittest')
use_plugin('python.coverage')
use_plugin('python.distutils')
use_plugin("python.install_dependencies")
authors = [Author('Dachaz', '<EMAIL>')]
license = 'MIT'
name = 'scenery'
summary = 'A pattern-based scene release renamer'
description = """A command-line tool that automates renaming of so-called "Scene Release"
files by fetching episode names (from TVMaze) and which uses pattern-based generic building
blocks (show name, season number, episode number, episode title) to format the output.
"""
url = 'https://github.com/dachaz/scenery'
version = '1.0.1'
requires_python = ">=2.7"
default_task = ["install_dependencies", "analyze", "publish"]
@init
def initialize(project):
project.build_depends_on('mockito')
project.set_property('dir_source_main_python', 'src')
project.set_property('dir_source_unittest_python', 'test')
project.set_property('flake8_break_build', True)
project.set_property('flake8_include_test_sources', True)
project.set_property('flake8_include_scripts', True)
# relevant tests are in Scenery_tests.py
project.get_property('coverage_exceptions').append('scenery.__main__')
project.get_property('coverage_exceptions').append('scenery')
project.set_property('distutils_console_scripts', ['scenery = scenery:main'])
project.set_property('distutils_classifiers', [
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Topic :: Communications :: File Sharing',
'Topic :: Multimedia',
'Topic :: Multimedia :: Video',
'Topic :: Utilities'
])
|
[
"pybuilder.core.Author",
"pybuilder.core.use_plugin"
] |
[((53, 78), 'pybuilder.core.use_plugin', 'use_plugin', (['"""python.core"""'], {}), "('python.core')\n", (63, 78), False, 'from pybuilder.core import init, use_plugin, Author\n'), ((79, 106), 'pybuilder.core.use_plugin', 'use_plugin', (['"""python.flake8"""'], {}), "('python.flake8')\n", (89, 106), False, 'from pybuilder.core import init, use_plugin, Author\n'), ((107, 136), 'pybuilder.core.use_plugin', 'use_plugin', (['"""python.unittest"""'], {}), "('python.unittest')\n", (117, 136), False, 'from pybuilder.core import init, use_plugin, Author\n'), ((137, 166), 'pybuilder.core.use_plugin', 'use_plugin', (['"""python.coverage"""'], {}), "('python.coverage')\n", (147, 166), False, 'from pybuilder.core import init, use_plugin, Author\n'), ((167, 197), 'pybuilder.core.use_plugin', 'use_plugin', (['"""python.distutils"""'], {}), "('python.distutils')\n", (177, 197), False, 'from pybuilder.core import init, use_plugin, Author\n'), ((198, 239), 'pybuilder.core.use_plugin', 'use_plugin', (['"""python.install_dependencies"""'], {}), "('python.install_dependencies')\n", (208, 239), False, 'from pybuilder.core import init, use_plugin, Author\n'), ((252, 279), 'pybuilder.core.Author', 'Author', (['"""Dachaz"""', '"""<EMAIL>"""'], {}), "('Dachaz', '<EMAIL>')\n", (258, 279), False, 'from pybuilder.core import init, use_plugin, Author\n')]
|
from packetraven.packets.structures import DoublyLinkedList
def test_index():
list_1 = DoublyLinkedList([0, 5, 4, 'foo', 5, 6])
assert list_1[0] == 0
assert list_1[0] is list_1.head.value
assert list_1[3] == 'foo'
assert list_1[-2] == 5
assert list_1[-1] == 6
assert list_1[-1] is list_1.tail.value
assert list_1[:2] == [0, 5]
assert list_1[[1, 3, 0]] == [5, 'foo', 0]
def test_length():
list_1 = DoublyLinkedList()
assert len(list_1) == 0
list_2 = DoublyLinkedList([0, 'foo'])
assert len(list_2) == 2
def test_extend():
list_1 = DoublyLinkedList([0])
list_1.extend(['foo', 5])
assert list_1 == [0, 'foo', 5]
assert list_1.head is not list_1.tail
def test_append():
list_1 = DoublyLinkedList()
list_1.append(0)
assert list_1[0] == 0
assert list_1[-1] == 0
assert list_1.head is list_1.tail
def test_insert():
list_1 = DoublyLinkedList([0, 'foo'])
list_1.insert('bar', 0)
assert list_1 == ['bar', 0, 'foo']
def test_equality():
list_1 = DoublyLinkedList([5, 4, 'foo'])
assert list_1 == [5, 4, 'foo']
assert list_1 == (5, 4, 'foo')
assert list_1 != [5, 4, 'foo', 6, 2]
def test_remove():
list_1 = DoublyLinkedList(['a', 'a'])
list_1.remove('a')
assert len(list_1) == 0
assert list_1.head is None
assert list_1.tail is None
list_2 = DoublyLinkedList(['a', 'b', 'c'])
del list_2[0]
del list_2[-1]
assert len(list_2) == 1
assert list_2[0] == 'b'
assert list_2[-1] == 'b'
list_3 = DoublyLinkedList([0, 5, 4, 'foo', 0, 0])
list_3.remove(0)
assert list_3 == [5, 4, 'foo']
assert list_3[0] == 5
assert list_3[-1] == 'foo'
|
[
"packetraven.packets.structures.DoublyLinkedList"
] |
[((93, 133), 'packetraven.packets.structures.DoublyLinkedList', 'DoublyLinkedList', (["[0, 5, 4, 'foo', 5, 6]"], {}), "([0, 5, 4, 'foo', 5, 6])\n", (109, 133), False, 'from packetraven.packets.structures import DoublyLinkedList\n'), ((442, 460), 'packetraven.packets.structures.DoublyLinkedList', 'DoublyLinkedList', ([], {}), '()\n', (458, 460), False, 'from packetraven.packets.structures import DoublyLinkedList\n'), ((504, 532), 'packetraven.packets.structures.DoublyLinkedList', 'DoublyLinkedList', (["[0, 'foo']"], {}), "([0, 'foo'])\n", (520, 532), False, 'from packetraven.packets.structures import DoublyLinkedList\n'), ((596, 617), 'packetraven.packets.structures.DoublyLinkedList', 'DoublyLinkedList', (['[0]'], {}), '([0])\n', (612, 617), False, 'from packetraven.packets.structures import DoublyLinkedList\n'), ((760, 778), 'packetraven.packets.structures.DoublyLinkedList', 'DoublyLinkedList', ([], {}), '()\n', (776, 778), False, 'from packetraven.packets.structures import DoublyLinkedList\n'), ((926, 954), 'packetraven.packets.structures.DoublyLinkedList', 'DoublyLinkedList', (["[0, 'foo']"], {}), "([0, 'foo'])\n", (942, 954), False, 'from packetraven.packets.structures import DoublyLinkedList\n'), ((1059, 1090), 'packetraven.packets.structures.DoublyLinkedList', 'DoublyLinkedList', (["[5, 4, 'foo']"], {}), "([5, 4, 'foo'])\n", (1075, 1090), False, 'from packetraven.packets.structures import DoublyLinkedList\n'), ((1237, 1265), 'packetraven.packets.structures.DoublyLinkedList', 'DoublyLinkedList', (["['a', 'a']"], {}), "(['a', 'a'])\n", (1253, 1265), False, 'from packetraven.packets.structures import DoublyLinkedList\n'), ((1394, 1427), 'packetraven.packets.structures.DoublyLinkedList', 'DoublyLinkedList', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (1410, 1427), False, 'from packetraven.packets.structures import DoublyLinkedList\n'), ((1565, 1605), 'packetraven.packets.structures.DoublyLinkedList', 'DoublyLinkedList', (["[0, 5, 4, 'foo', 0, 0]"], {}), "([0, 5, 4, 'foo', 0, 0])\n", (1581, 1605), False, 'from packetraven.packets.structures import DoublyLinkedList\n')]
|
"""Test functions loading."""
import inspect
from griffe.loader import GriffeLoader
from tests import FIXTURES_DIR
loader = GriffeLoader()
def test_loading_functions_arguments(): # noqa: WPS218
"""Test functions arguments loading."""
module = loader.load_module(FIXTURES_DIR / "functions" / "arguments.py")
assert module.members
assert len(module.members) == 11 # noqa: WPS432
function = module["f_posonly"]
assert len(function.arguments) == 1
arg = function.arguments[0]
assert arg is function.arguments["posonly"]
assert arg.name == "posonly"
assert arg.kind is inspect.Parameter.POSITIONAL_ONLY
assert arg.default is None
function = module["f_posonly_default"]
assert len(function.arguments) == 1
arg = function.arguments[0]
assert arg is function.arguments["posonly"]
assert arg.name == "posonly"
assert arg.kind is inspect.Parameter.POSITIONAL_ONLY
assert arg.default == "0"
function = module["f_posonly_poskw"]
assert len(function.arguments) == 2
arg = function.arguments[0]
assert arg is function.arguments["posonly"]
assert arg.name == "posonly"
assert arg.kind is inspect.Parameter.POSITIONAL_ONLY
assert arg.default is None
arg = function.arguments[1]
assert arg is function.arguments["poskw"]
assert arg.name == "poskw"
assert arg.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD
assert arg.default is None
function = module["f_posonly_poskw_default"]
assert len(function.arguments) == 2
arg = function.arguments[0]
assert arg is function.arguments["posonly"]
assert arg.name == "posonly"
assert arg.kind is inspect.Parameter.POSITIONAL_ONLY
assert arg.default is None
arg = function.arguments[1]
assert arg is function.arguments["poskw"]
assert arg.name == "poskw"
assert arg.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD
assert arg.default == "0"
function = module["f_posonly_default_poskw_default"]
assert len(function.arguments) == 2
arg = function.arguments[0]
assert arg is function.arguments["posonly"]
assert arg.name == "posonly"
assert arg.kind is inspect.Parameter.POSITIONAL_ONLY
assert arg.default == "0"
arg = function.arguments[1]
assert arg is function.arguments["poskw"]
assert arg.name == "poskw"
assert arg.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD
assert arg.default == "1"
function = module["f_posonly_poskw_kwonly"]
assert len(function.arguments) == 3
arg = function.arguments[0]
assert arg is function.arguments["posonly"]
assert arg.name == "posonly"
assert arg.kind is inspect.Parameter.POSITIONAL_ONLY
assert arg.default is None
arg = function.arguments[1]
assert arg is function.arguments["poskw"]
assert arg.name == "poskw"
assert arg.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD
assert arg.default is None
arg = function.arguments[2]
assert arg is function.arguments["kwonly"]
assert arg.name == "kwonly"
assert arg.kind is inspect.Parameter.KEYWORD_ONLY
assert arg.default is None
function = module["f_posonly_poskw_kwonly_default"]
assert len(function.arguments) == 3
arg = function.arguments[0]
assert arg is function.arguments["posonly"]
assert arg.name == "posonly"
assert arg.kind is inspect.Parameter.POSITIONAL_ONLY
assert arg.default is None
arg = function.arguments[1]
assert arg is function.arguments["poskw"]
assert arg.name == "poskw"
assert arg.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD
assert arg.default is None
arg = function.arguments[2]
assert arg is function.arguments["kwonly"]
assert arg.name == "kwonly"
assert arg.kind is inspect.Parameter.KEYWORD_ONLY
assert arg.default == "0"
function = module["f_posonly_poskw_default_kwonly_default"]
assert len(function.arguments) == 3
arg = function.arguments[0]
assert arg is function.arguments["posonly"]
assert arg.name == "posonly"
assert arg.kind is inspect.Parameter.POSITIONAL_ONLY
assert arg.default is None
arg = function.arguments[1]
assert arg is function.arguments["poskw"]
assert arg.name == "poskw"
assert arg.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD
assert arg.default == "0"
arg = function.arguments[2]
assert arg is function.arguments["kwonly"]
assert arg.name == "kwonly"
assert arg.kind is inspect.Parameter.KEYWORD_ONLY
assert arg.default == "1"
function = module["f_posonly_default_poskw_default_kwonly_default"]
arg = function.arguments[0]
assert arg is function.arguments["posonly"]
assert arg.name == "posonly"
assert arg.kind is inspect.Parameter.POSITIONAL_ONLY
assert arg.default == "0"
arg = function.arguments[1]
assert arg is function.arguments["poskw"]
assert arg.name == "poskw"
assert arg.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD
assert arg.default == "1"
arg = function.arguments[2]
assert arg is function.arguments["kwonly"]
assert arg.name == "kwonly"
assert arg.kind is inspect.Parameter.KEYWORD_ONLY
assert arg.default == "2"
function = module["f_var"]
assert len(function.arguments) == 3
arg = function.arguments[0]
assert arg.name == "*args"
assert arg.annotation == "str"
arg = function.arguments[1]
assert arg.annotation is None
arg = function.arguments[2]
assert arg.name == "**kwargs"
assert arg.annotation == "int"
function = module["f_annorations"]
assert len(function.arguments) == 4
arg = function.arguments[0]
assert arg.annotation == "str"
arg = function.arguments[1]
assert arg.annotation == "Any"
arg = function.arguments[2]
assert arg.annotation == "typing.Optional[typing.List[int]]"
arg = function.arguments[3]
assert arg.annotation == "float | None"
|
[
"griffe.loader.GriffeLoader"
] |
[((127, 141), 'griffe.loader.GriffeLoader', 'GriffeLoader', ([], {}), '()\n', (139, 141), False, 'from griffe.loader import GriffeLoader\n')]
|
from flask_wtf import FlaskForm, RecaptchaField
from wtforms import StringField , PasswordField, SubmitField
from wtforms.validators import DataRequired, Email, Length
class SignupForm(FlaskForm):
firstname = StringField("First name", validators=[DataRequired("Enter your name")])
lastname = StringField("Last name", validators=[DataRequired("Enter your last name")])
email = StringField("Email", validators=[DataRequired("Provide your email"), Email("Please enter a valid email")])
password = PasswordField("Password", validators=[DataRequired("Enter a valid password"), Length(min=8, message="Password must be a minimum of 8 charaters")])
submit = SubmitField("Submit", validators=[DataRequired()])
#recaptcha = RecaptchaField({'hl': 'zh', 'render': 'explicit'})
|
[
"wtforms.validators.Length",
"wtforms.validators.DataRequired",
"wtforms.validators.Email"
] |
[((253, 284), 'wtforms.validators.DataRequired', 'DataRequired', (['"""Enter your name"""'], {}), "('Enter your name')\n", (265, 284), False, 'from wtforms.validators import DataRequired, Email, Length\n'), ((339, 375), 'wtforms.validators.DataRequired', 'DataRequired', (['"""Enter your last name"""'], {}), "('Enter your last name')\n", (351, 375), False, 'from wtforms.validators import DataRequired, Email, Length\n'), ((424, 458), 'wtforms.validators.DataRequired', 'DataRequired', (['"""Provide your email"""'], {}), "('Provide your email')\n", (436, 458), False, 'from wtforms.validators import DataRequired, Email, Length\n'), ((460, 495), 'wtforms.validators.Email', 'Email', (['"""Please enter a valid email"""'], {}), "('Please enter a valid email')\n", (465, 495), False, 'from wtforms.validators import DataRequired, Email, Length\n'), ((551, 589), 'wtforms.validators.DataRequired', 'DataRequired', (['"""Enter a valid password"""'], {}), "('Enter a valid password')\n", (563, 589), False, 'from wtforms.validators import DataRequired, Email, Length\n'), ((591, 657), 'wtforms.validators.Length', 'Length', ([], {'min': '(8)', 'message': '"""Password must be a minimum of 8 charaters"""'}), "(min=8, message='Password must be a minimum of 8 charaters')\n", (597, 657), False, 'from wtforms.validators import DataRequired, Email, Length\n'), ((707, 721), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (719, 721), False, 'from wtforms.validators import DataRequired, Email, Length\n')]
|
import sys
import os
from os.path import *
def pl():
return dirname(dirname(dirname(dirname(os.path.abspath(__file__)))))
class SparkEnv:
def __init__(self, name):
os.environ['HADOOP_HOME'] = dirname(dirname(dirname(dirname(os.path.abspath(__file__))))) + r'/hadoopdir'
os.environ['SPARK_HOME'] = r"D:\assistlibs\hadoop\spark-2.2.3-bin-hadoop2.6"
sys.path.append(r"D:\assistlibs\hadoop\spark-2.2.3-bin-hadoop2.6\python")
from pyspark import SparkContext
self.sc = SparkContext("local", name)
self.sc.setLogLevel("WARN")
from pyspark.sql import SparkSession
self.ss = SparkSession.builder.appName(name).getOrCreate()
def postInit(self):
return (self, self.sc, self.ss)
def projLoc(self):
return dirname(dirname(dirname(dirname(os.path.abspath(__file__)))))
|
[
"sys.path.append",
"os.path.abspath",
"pyspark.sql.SparkSession.builder.appName",
"pyspark.SparkContext"
] |
[((383, 459), 'sys.path.append', 'sys.path.append', (['"""D:\\\\assistlibs\\\\hadoop\\\\spark-2.2.3-bin-hadoop2.6\\\\python"""'], {}), "('D:\\\\assistlibs\\\\hadoop\\\\spark-2.2.3-bin-hadoop2.6\\\\python')\n", (398, 459), False, 'import sys\n'), ((516, 543), 'pyspark.SparkContext', 'SparkContext', (['"""local"""', 'name'], {}), "('local', name)\n", (528, 543), False, 'from pyspark import SparkContext\n'), ((643, 677), 'pyspark.sql.SparkSession.builder.appName', 'SparkSession.builder.appName', (['name'], {}), '(name)\n', (671, 677), False, 'from pyspark.sql import SparkSession\n'), ((98, 123), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (113, 123), False, 'import os\n'), ((828, 853), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (843, 853), False, 'import os\n'), ((244, 269), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (259, 269), False, 'import os\n')]
|
from scipy.optimize import root
from scipy.integrate import solve_ivp
import numpy as np
def phase_cond(u, dudt):
res= np.array(dudt(0,u))
return res
def periodicity_cond(u, dudt, T):
# integrate the ode for time t from starting position U
res = np.array(u - solve_ivp(dudt, (0, T), u).y[:,-1])
return res
def g(state_vec, dudt):
T = state_vec[-1]
u=state_vec[0:-1]
res = np.concatenate((
periodicity_cond(u, dudt, T),
phase_cond(u, dudt),
))
return res
def shooting(state_vec, dudt):
"""
A function that returns an estimation of the starting condition of a BVP
subject to the first order differential equations
USAGE: shooting(state_vec, dudt)
INPUTS:
state_vec : ndarray
the state_vector to solve, [u0...uN,T] the final argument should be
the expected period of the limit cycle or the period of the limit
cycle.
dudt : ndarray
containing the first order differtial equations to be solved
----------
OUTPUT : an ndarray containing the corrected initial values for the limit cycle.
NOTE: This function is currently having issues when used with npc however it
is also currently passing all of its tests
"""
sol = root(g, state_vec, args=(dudt,), method="lm")
if sol["success"] == True:
print("Root finder found the solution u={} after {} function calls"
.format(sol["x"], sol["nfev"]))
return sol["x"]
else:
print("Root finder failed with error message: {}".format(sol["message"]))
return None
|
[
"scipy.optimize.root",
"scipy.integrate.solve_ivp"
] |
[((1291, 1336), 'scipy.optimize.root', 'root', (['g', 'state_vec'], {'args': '(dudt,)', 'method': '"""lm"""'}), "(g, state_vec, args=(dudt,), method='lm')\n", (1295, 1336), False, 'from scipy.optimize import root\n'), ((277, 303), 'scipy.integrate.solve_ivp', 'solve_ivp', (['dudt', '(0, T)', 'u'], {}), '(dudt, (0, T), u)\n', (286, 303), False, 'from scipy.integrate import solve_ivp\n')]
|
import logging
import os
from datetime import datetime
import pandas as pd
from analysis import calibrationreport, resource_usage, cpuefficiency, sampling
from analysis import jobreportanalysis
from analysis import jobreportcleaning
from analysis import nodeanalysis
from analysis.demandextraction import FilteredJobClassifier, JobDemandExtractor
from data.dataset import Metric
from exporters.datasetexport import ReferenceWalltimeExporter
from importers.dataset_import import DatasetImporter
from importers.gridkadata import GridKaNodeDataImporter, ColumnCoreUsageImporter, \
CPUEfficiencyReferenceImporter
from importers.jmimport import JMImporter
from importers.jobcounts import JobCountImporter
from importers.wmaimport import SummarizedWMAImporter
from interfaces.workflow import CalibrationWorkflow
from merge import job_node
from merge.merge_datasets import UnionDatasetMerge
from merge.reportmatching import JobReportMatcher
from utils import config, visualization
from utils import report as rp
from utils.report import ReportBuilder
from workflows.workflowutils import export_job_counts, export_parameters
# Todo Split this up into smaller methods
class GridKaCalibration(CalibrationWorkflow):
def __init__(self):
self.report = ReportBuilder(base_path=config.outputDirectory, filename='calibration-report.md')
def run(self):
self.report.append('# GridKa Calibration Run')
time_now = datetime.now().strftime('%Y-%m-%d, %H:%M:%S')
self.report.append('at {}'.format(time_now))
logging.info("Model Calibration run at {}".format(time_now))
start_date = pd.to_datetime(config.startDate)
end_date = pd.to_datetime(config.endDate)
day_count = (end_date - start_date).days
self.report.append()
self.report.append("Start date: {} \nEnd date: {}".format(start_date, end_date))
# Import data sets
##################
# Timezone correction correct for errors in timestamps of JobMonitoring data
dataset_importer = DatasetImporter(
JMImporter(timezone_correction='Europe/Berlin', hostname_suffix='.gridka.de', with_files=False))
jm_dataset = dataset_importer.import_dataset(config.inputPaths['jm'], start_date, end_date)
wm_dataset = DatasetImporter(SummarizedWMAImporter(with_files=False)) \
.import_dataset(config.inputPaths['wma'], start_date, end_date)
cached_matches = None
use_caching = config.cacheDir is not None
if use_caching:
match_cache_file = os.path.join(config.cacheDir, 'jm-wma-matches.csv')
if os.path.isfile(match_cache_file):
try:
cached_matches = pd.read_csv(match_cache_file,
usecols=[jm_dataset.df.index.name, wm_dataset.df.index.name])
logging.info(
"Loaded {} matches from match cache {}!".format(cached_matches.shape[0], match_cache_file))
except Exception:
logging.warning("No match cache found at {}!".format(match_cache_file))
# Match Jobmonitoring and WMArchive job reports
matcher = JobReportMatcher(timestamp_tolerance=10, time_grouping_freq='D')
matches = matcher.match_reports(jm_dataset, wm_dataset, use_files=False, previous_matches=cached_matches)
if use_caching:
match_cache_file = os.path.join(config.cacheDir, 'jm-wma-matches.csv')
logging.info("Writing {} matches to file {}".format(matches.shape[0], match_cache_file))
matches.to_csv(match_cache_file)
jobs_dataset = UnionDatasetMerge().merge_datasets(matches, jm_dataset, wm_dataset, left_index='UniqueID',
right_index='wmaid', left_suffix='jm', right_suffix='wma')
jobs_dataset.df = jobreportcleaning.clean_job_reports(jobs_dataset.df)
# Import node information
nodes = GridKaNodeDataImporter().import_file(config.inputPaths['nodeInfo'])
nodes = nodeanalysis.add_performance_data(nodes, simulated_cores=config.workflowOptions['coreSimulationMethod'],
thread_rate_method=config.workflowOptions['threadPerformanceMethod'])
# Match jobs to nodes
matched_jobs = job_node.match_jobs_to_node(jobs_dataset.df, nodes)
matched_jobs = jobreportanalysis.add_missing_node_info(matched_jobs, nodes)
jm_dataset.df = jobreportanalysis.add_performance_data(matched_jobs)
job_data = jm_dataset.df
# Import additional information for usage of GridKa site
core_importer = ColumnCoreUsageImporter()
core_df = core_importer.import_file(config.inputPaths['coreUsage'], start_date, end_date)
cms_avg_cores = core_df['cms'].mean()
avg_jobslots_reports = self.draw_jobslot_usage(jm_dataset, core_df)
# Visualize number of jobs in calibration report
job_counts_reference_summary = self.add_jobs_over_time(start_date, end_date)
# CPU Efficiencies
self.add_cpu_efficiency(job_data, start_date, end_date)
# Compute calibration parameters
node_types = nodeanalysis.extract_node_types(nodes)
# Scale the resource environment with both information from the job reports and the Pilot jobs
scaled_nodes_pilots = nodeanalysis.scale_site_by_jobslots(node_types, cms_avg_cores)
scaled_nodes_reports = nodeanalysis.scale_site_by_jobslots(node_types, avg_jobslots_reports)
type_split_cols = config.workflowOptions['typeSplitCols']
split_types = None
if 'splitTypes' in config.workflowOptions:
split_types = list(map(tuple, config.workflowOptions['splitTypes']))
job_classifier = FilteredJobClassifier(type_split_cols, split_types=split_types)
job_groups = job_classifier.split(job_data)
job_demand_extractor = JobDemandExtractor(self.report, equal_width=False, bin_count=60,
cutoff_quantile=0.95,
overflow_agg=config.workflowOptions['overflowAggregationMethod'],
additional_job_options=config.workflowOptions['additionalJobOptions'],
drop_overflow=config.workflowOptions.get('dropOverflow', False))
demands, partitions = job_demand_extractor.extract_job_demands(job_groups)
export_parameters('parameters_slots_from_pilots', scaled_nodes_pilots, demands)
export_parameters('parameters_slots_from_reports', scaled_nodes_reports, demands)
# Sample half of the reports, fix random state for reproducibility
reports_train, reports_test = sampling.split_samples(job_data, frac=0.5, random_state=38728)
sampling_report = ReportBuilder(base_path=config.outputDirectory, filename='calibration-report-sampled.md',
resource_dir='figures-sampling')
job_groups_train = job_classifier.split(reports_train)
job_demand_extractor.report = sampling_report
sample_demands, sample_partitions = job_demand_extractor.extract_job_demands(job_groups_train)
sampling_report.write()
export_parameters('parameters_slots_from_pilots_sampled0.5', scaled_nodes_pilots, sample_demands)
export_parameters('parameters_slots_from_reports_sampled0.5', scaled_nodes_reports, sample_demands)
# Export job throughputs from analyzed jobs
jobs_from_reports = job_data.copy()
jobs_from_reports[Metric.JOB_TYPE.value] = jobs_from_reports[Metric.JOB_TYPE.value].fillna('unknown')
job_counts_reports = jobs_from_reports.groupby(Metric.JOB_TYPE.value).size().reset_index()
job_counts_reports.columns = ['type', 'count']
job_counts_reports['throughput_day'] = job_counts_reports['count'].divide(day_count)
export_job_counts(job_counts_reports, 'parameters_slots_from_pilots',
config.outputPaths['jobCountReports'])
# Export walltimes
walltime_path = os.path.join(config.outputDirectory, 'parameters_slots_from_pilots',
config.outputPaths['walltimeReference'])
ReferenceWalltimeExporter().export_to_json_file(partitions, walltime_path)
# Write jobs to report
calibrationreport.add_jobs_report_section(jm_dataset, self.report)
# Write report out to disk
self.report.write()
def draw_jobslot_usage(self, jm_dataset, core_reference):
jobslot_timeseries = resource_usage.calculate_jobslot_usage(jm_dataset.df, jm_dataset.start, jm_dataset.end,
start_ts_col=Metric.START_TIME.value,
end_ts_col=Metric.STOP_TIME.value,
slot_col=Metric.USED_CORES.value)
jobslots_from_reports = jobslot_timeseries['totalSlots'].resample('s').pad().resample('H').mean()
avg_jobslots_reports = jobslots_from_reports.mean()
fig, axes = calibrationreport.multiple_jobslot_usage(
{'Extracted from job reports': jobslots_from_reports,
'Allocated to GridKa CMS pilots': core_reference['cms']})
self.report.add_figure(fig, axes, 'jobslot_usage_reference')
return avg_jobslots_reports
def add_jobs_over_time(self, start_date, end_date):
self.report.append("## Number of jobs completed over time")
job_counts = JobCountImporter().import_file(config.inputPaths['jobCountsReference'], start_date, end_date)
fig, axes = calibrationreport.jobtypes_over_time_df(job_counts, 'date', 'type')
self.report.add_figure(fig, axes, 'job_counts_reference', tight_layout=False)
job_counts_reference_summary = job_counts.groupby('type')['count'].sum().reset_index()
job_counts_reference_summary.columns = ['type', 'count']
job_counts_reference_summary['share'] = job_counts_reference_summary['count'] / job_counts_reference_summary[
'count'].sum()
job_counts_reference_summary['throughput_day'] = job_counts_reference_summary['count'].divide(
(end_date - start_date).days)
self.report.append("Job throughput from CMS Dashboard:")
self.report.append()
self.report.append_paragraph(rp.CodeBlock().append(job_counts_reference_summary.to_string()))
return job_counts_reference_summary
def add_cpu_efficiency(self, job_data, start_date, end_date):
efficiency_reference = CPUEfficiencyReferenceImporter(col='cms', output_column='value').import_file(
config.inputPaths['CPUEfficiencyReference'], start_date, end_date)
efficiency_timeseries, reports_average = cpuefficiency.calculate_efficiencies(job_data, freq='12h')
reference = efficiency_reference['value'].resample('12h').mean().rename('reference')
reference_mean = efficiency_reference['value'].mean()
from_reports = efficiency_timeseries.rename('measured')
# cpu_eff = pd.concat([reference, from_reports], axis=1)
fig, axes = visualization.draw_efficiency_timeseries(
{'extracted from job reports': from_reports, 'reference from GridKa monitoring': reference})
axes.set_ylabel("CPU efficiency (CPU time / wall time)")
axes.legend(['Extracted from job reports (average {:.2f}%)'.format(reports_average * 100),
'Reference from GridKa monitoring (average {:.2f}%)'.format(reference_mean * 100)])
axes.set_title("CPU efficiencies ({}, {} days)".format(config.runName, (end_date - start_date).days))
axes.set_xlim(left=start_date, right=(end_date - pd.Timedelta('1 days')))
fig.set_size_inches(8, 4.5)
self.report.add_figure(fig, axes, 'cpu_efficiencies_reference')
self.report.append("Efficiency from job reports: {} ".format(reports_average))
self.report.append("Efficiency from GridKa: {}".format(reference_mean))
|
[
"importers.gridkadata.GridKaNodeDataImporter",
"pandas.read_csv",
"analysis.cpuefficiency.calculate_efficiencies",
"analysis.jobreportanalysis.add_missing_node_info",
"importers.wmaimport.SummarizedWMAImporter",
"analysis.calibrationreport.multiple_jobslot_usage",
"os.path.isfile",
"analysis.nodeanalysis.scale_site_by_jobslots",
"analysis.resource_usage.calculate_jobslot_usage",
"merge.job_node.match_jobs_to_node",
"exporters.datasetexport.ReferenceWalltimeExporter",
"os.path.join",
"workflows.workflowutils.export_parameters",
"workflows.workflowutils.export_job_counts",
"utils.visualization.draw_efficiency_timeseries",
"analysis.nodeanalysis.extract_node_types",
"analysis.sampling.split_samples",
"pandas.Timedelta",
"datetime.datetime.now",
"analysis.jobreportanalysis.add_performance_data",
"analysis.calibrationreport.add_jobs_report_section",
"importers.gridkadata.ColumnCoreUsageImporter",
"analysis.calibrationreport.jobtypes_over_time_df",
"analysis.demandextraction.FilteredJobClassifier",
"analysis.jobreportcleaning.clean_job_reports",
"pandas.to_datetime",
"utils.report.CodeBlock",
"importers.jmimport.JMImporter",
"importers.gridkadata.CPUEfficiencyReferenceImporter",
"importers.jobcounts.JobCountImporter",
"merge.reportmatching.JobReportMatcher",
"analysis.nodeanalysis.add_performance_data",
"utils.config.workflowOptions.get",
"utils.report.ReportBuilder",
"merge.merge_datasets.UnionDatasetMerge"
] |
[((1261, 1347), 'utils.report.ReportBuilder', 'ReportBuilder', ([], {'base_path': 'config.outputDirectory', 'filename': '"""calibration-report.md"""'}), "(base_path=config.outputDirectory, filename=\n 'calibration-report.md')\n", (1274, 1347), False, 'from utils.report import ReportBuilder\n'), ((1629, 1661), 'pandas.to_datetime', 'pd.to_datetime', (['config.startDate'], {}), '(config.startDate)\n', (1643, 1661), True, 'import pandas as pd\n'), ((1681, 1711), 'pandas.to_datetime', 'pd.to_datetime', (['config.endDate'], {}), '(config.endDate)\n', (1695, 1711), True, 'import pandas as pd\n'), ((3222, 3286), 'merge.reportmatching.JobReportMatcher', 'JobReportMatcher', ([], {'timestamp_tolerance': '(10)', 'time_grouping_freq': '"""D"""'}), "(timestamp_tolerance=10, time_grouping_freq='D')\n", (3238, 3286), False, 'from merge.reportmatching import JobReportMatcher\n'), ((3915, 3967), 'analysis.jobreportcleaning.clean_job_reports', 'jobreportcleaning.clean_job_reports', (['jobs_dataset.df'], {}), '(jobs_dataset.df)\n', (3950, 3967), False, 'from analysis import jobreportcleaning\n'), ((4103, 4287), 'analysis.nodeanalysis.add_performance_data', 'nodeanalysis.add_performance_data', (['nodes'], {'simulated_cores': "config.workflowOptions['coreSimulationMethod']", 'thread_rate_method': "config.workflowOptions['threadPerformanceMethod']"}), "(nodes, simulated_cores=config.\n workflowOptions['coreSimulationMethod'], thread_rate_method=config.\n workflowOptions['threadPerformanceMethod'])\n", (4136, 4287), False, 'from analysis import nodeanalysis\n'), ((4382, 4433), 'merge.job_node.match_jobs_to_node', 'job_node.match_jobs_to_node', (['jobs_dataset.df', 'nodes'], {}), '(jobs_dataset.df, nodes)\n', (4409, 4433), False, 'from merge import job_node\n'), ((4457, 4517), 'analysis.jobreportanalysis.add_missing_node_info', 'jobreportanalysis.add_missing_node_info', (['matched_jobs', 'nodes'], {}), '(matched_jobs, nodes)\n', (4496, 4517), False, 'from analysis import jobreportanalysis\n'), ((4543, 4595), 'analysis.jobreportanalysis.add_performance_data', 'jobreportanalysis.add_performance_data', (['matched_jobs'], {}), '(matched_jobs)\n', (4581, 4595), False, 'from analysis import jobreportanalysis\n'), ((4719, 4744), 'importers.gridkadata.ColumnCoreUsageImporter', 'ColumnCoreUsageImporter', ([], {}), '()\n', (4742, 4744), False, 'from importers.gridkadata import GridKaNodeDataImporter, ColumnCoreUsageImporter, CPUEfficiencyReferenceImporter\n'), ((5264, 5302), 'analysis.nodeanalysis.extract_node_types', 'nodeanalysis.extract_node_types', (['nodes'], {}), '(nodes)\n', (5295, 5302), False, 'from analysis import nodeanalysis\n'), ((5437, 5499), 'analysis.nodeanalysis.scale_site_by_jobslots', 'nodeanalysis.scale_site_by_jobslots', (['node_types', 'cms_avg_cores'], {}), '(node_types, cms_avg_cores)\n', (5472, 5499), False, 'from analysis import nodeanalysis\n'), ((5531, 5600), 'analysis.nodeanalysis.scale_site_by_jobslots', 'nodeanalysis.scale_site_by_jobslots', (['node_types', 'avg_jobslots_reports'], {}), '(node_types, avg_jobslots_reports)\n', (5566, 5600), False, 'from analysis import nodeanalysis\n'), ((5854, 5917), 'analysis.demandextraction.FilteredJobClassifier', 'FilteredJobClassifier', (['type_split_cols'], {'split_types': 'split_types'}), '(type_split_cols, split_types=split_types)\n', (5875, 5917), False, 'from analysis.demandextraction import FilteredJobClassifier, JobDemandExtractor\n'), ((6584, 6663), 'workflows.workflowutils.export_parameters', 'export_parameters', (['"""parameters_slots_from_pilots"""', 'scaled_nodes_pilots', 'demands'], {}), "('parameters_slots_from_pilots', scaled_nodes_pilots, demands)\n", (6601, 6663), False, 'from workflows.workflowutils import export_job_counts, export_parameters\n'), ((6672, 6757), 'workflows.workflowutils.export_parameters', 'export_parameters', (['"""parameters_slots_from_reports"""', 'scaled_nodes_reports', 'demands'], {}), "('parameters_slots_from_reports', scaled_nodes_reports,\n demands)\n", (6689, 6757), False, 'from workflows.workflowutils import export_job_counts, export_parameters\n'), ((6868, 6930), 'analysis.sampling.split_samples', 'sampling.split_samples', (['job_data'], {'frac': '(0.5)', 'random_state': '(38728)'}), '(job_data, frac=0.5, random_state=38728)\n', (6890, 6930), False, 'from analysis import calibrationreport, resource_usage, cpuefficiency, sampling\n'), ((6958, 7085), 'utils.report.ReportBuilder', 'ReportBuilder', ([], {'base_path': 'config.outputDirectory', 'filename': '"""calibration-report-sampled.md"""', 'resource_dir': '"""figures-sampling"""'}), "(base_path=config.outputDirectory, filename=\n 'calibration-report-sampled.md', resource_dir='figures-sampling')\n", (6971, 7085), False, 'from utils.report import ReportBuilder\n'), ((7385, 7486), 'workflows.workflowutils.export_parameters', 'export_parameters', (['"""parameters_slots_from_pilots_sampled0.5"""', 'scaled_nodes_pilots', 'sample_demands'], {}), "('parameters_slots_from_pilots_sampled0.5',\n scaled_nodes_pilots, sample_demands)\n", (7402, 7486), False, 'from workflows.workflowutils import export_job_counts, export_parameters\n'), ((7491, 7594), 'workflows.workflowutils.export_parameters', 'export_parameters', (['"""parameters_slots_from_reports_sampled0.5"""', 'scaled_nodes_reports', 'sample_demands'], {}), "('parameters_slots_from_reports_sampled0.5',\n scaled_nodes_reports, sample_demands)\n", (7508, 7594), False, 'from workflows.workflowutils import export_job_counts, export_parameters\n'), ((8055, 8167), 'workflows.workflowutils.export_job_counts', 'export_job_counts', (['job_counts_reports', '"""parameters_slots_from_pilots"""', "config.outputPaths['jobCountReports']"], {}), "(job_counts_reports, 'parameters_slots_from_pilots',\n config.outputPaths['jobCountReports'])\n", (8072, 8167), False, 'from workflows.workflowutils import export_job_counts, export_parameters\n'), ((8242, 8356), 'os.path.join', 'os.path.join', (['config.outputDirectory', '"""parameters_slots_from_pilots"""', "config.outputPaths['walltimeReference']"], {}), "(config.outputDirectory, 'parameters_slots_from_pilots', config\n .outputPaths['walltimeReference'])\n", (8254, 8356), False, 'import os\n'), ((8512, 8578), 'analysis.calibrationreport.add_jobs_report_section', 'calibrationreport.add_jobs_report_section', (['jm_dataset', 'self.report'], {}), '(jm_dataset, self.report)\n', (8553, 8578), False, 'from analysis import calibrationreport, resource_usage, cpuefficiency, sampling\n'), ((8736, 8939), 'analysis.resource_usage.calculate_jobslot_usage', 'resource_usage.calculate_jobslot_usage', (['jm_dataset.df', 'jm_dataset.start', 'jm_dataset.end'], {'start_ts_col': 'Metric.START_TIME.value', 'end_ts_col': 'Metric.STOP_TIME.value', 'slot_col': 'Metric.USED_CORES.value'}), '(jm_dataset.df, jm_dataset.start,\n jm_dataset.end, start_ts_col=Metric.START_TIME.value, end_ts_col=Metric\n .STOP_TIME.value, slot_col=Metric.USED_CORES.value)\n', (8774, 8939), False, 'from analysis import calibrationreport, resource_usage, cpuefficiency, sampling\n'), ((9323, 9484), 'analysis.calibrationreport.multiple_jobslot_usage', 'calibrationreport.multiple_jobslot_usage', (["{'Extracted from job reports': jobslots_from_reports,\n 'Allocated to GridKa CMS pilots': core_reference['cms']}"], {}), "({'Extracted from job reports':\n jobslots_from_reports, 'Allocated to GridKa CMS pilots': core_reference\n ['cms']})\n", (9363, 9484), False, 'from analysis import calibrationreport, resource_usage, cpuefficiency, sampling\n'), ((9870, 9937), 'analysis.calibrationreport.jobtypes_over_time_df', 'calibrationreport.jobtypes_over_time_df', (['job_counts', '"""date"""', '"""type"""'], {}), "(job_counts, 'date', 'type')\n", (9909, 9937), False, 'from analysis import calibrationreport, resource_usage, cpuefficiency, sampling\n'), ((11023, 11081), 'analysis.cpuefficiency.calculate_efficiencies', 'cpuefficiency.calculate_efficiencies', (['job_data'], {'freq': '"""12h"""'}), "(job_data, freq='12h')\n", (11059, 11081), False, 'from analysis import calibrationreport, resource_usage, cpuefficiency, sampling\n'), ((11390, 11527), 'utils.visualization.draw_efficiency_timeseries', 'visualization.draw_efficiency_timeseries', (["{'extracted from job reports': from_reports,\n 'reference from GridKa monitoring': reference}"], {}), "({'extracted from job reports':\n from_reports, 'reference from GridKa monitoring': reference})\n", (11430, 11527), False, 'from utils import config, visualization\n'), ((2079, 2179), 'importers.jmimport.JMImporter', 'JMImporter', ([], {'timezone_correction': '"""Europe/Berlin"""', 'hostname_suffix': '""".gridka.de"""', 'with_files': '(False)'}), "(timezone_correction='Europe/Berlin', hostname_suffix=\n '.gridka.de', with_files=False)\n", (2089, 2179), False, 'from importers.jmimport import JMImporter\n'), ((2570, 2621), 'os.path.join', 'os.path.join', (['config.cacheDir', '"""jm-wma-matches.csv"""'], {}), "(config.cacheDir, 'jm-wma-matches.csv')\n", (2582, 2621), False, 'import os\n'), ((2638, 2670), 'os.path.isfile', 'os.path.isfile', (['match_cache_file'], {}), '(match_cache_file)\n', (2652, 2670), False, 'import os\n'), ((3457, 3508), 'os.path.join', 'os.path.join', (['config.cacheDir', '"""jm-wma-matches.csv"""'], {}), "(config.cacheDir, 'jm-wma-matches.csv')\n", (3469, 3508), False, 'import os\n'), ((1439, 1453), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1451, 1453), False, 'from datetime import datetime\n'), ((3680, 3699), 'merge.merge_datasets.UnionDatasetMerge', 'UnionDatasetMerge', ([], {}), '()\n', (3697, 3699), False, 'from merge.merge_datasets import UnionDatasetMerge\n'), ((4019, 4043), 'importers.gridkadata.GridKaNodeDataImporter', 'GridKaNodeDataImporter', ([], {}), '()\n', (4041, 4043), False, 'from importers.gridkadata import GridKaNodeDataImporter, ColumnCoreUsageImporter, CPUEfficiencyReferenceImporter\n'), ((6440, 6489), 'utils.config.workflowOptions.get', 'config.workflowOptions.get', (['"""dropOverflow"""', '(False)'], {}), "('dropOverflow', False)\n", (6466, 6489), False, 'from utils import config, visualization\n'), ((8397, 8424), 'exporters.datasetexport.ReferenceWalltimeExporter', 'ReferenceWalltimeExporter', ([], {}), '()\n', (8422, 8424), False, 'from exporters.datasetexport import ReferenceWalltimeExporter\n'), ((9756, 9774), 'importers.jobcounts.JobCountImporter', 'JobCountImporter', ([], {}), '()\n', (9772, 9774), False, 'from importers.jobcounts import JobCountImporter\n'), ((10816, 10880), 'importers.gridkadata.CPUEfficiencyReferenceImporter', 'CPUEfficiencyReferenceImporter', ([], {'col': '"""cms"""', 'output_column': '"""value"""'}), "(col='cms', output_column='value')\n", (10846, 10880), False, 'from importers.gridkadata import GridKaNodeDataImporter, ColumnCoreUsageImporter, CPUEfficiencyReferenceImporter\n'), ((2314, 2353), 'importers.wmaimport.SummarizedWMAImporter', 'SummarizedWMAImporter', ([], {'with_files': '(False)'}), '(with_files=False)\n', (2335, 2353), False, 'from importers.wmaimport import SummarizedWMAImporter\n'), ((2730, 2826), 'pandas.read_csv', 'pd.read_csv', (['match_cache_file'], {'usecols': '[jm_dataset.df.index.name, wm_dataset.df.index.name]'}), '(match_cache_file, usecols=[jm_dataset.df.index.name, wm_dataset\n .df.index.name])\n', (2741, 2826), True, 'import pandas as pd\n'), ((10608, 10622), 'utils.report.CodeBlock', 'rp.CodeBlock', ([], {}), '()\n', (10620, 10622), True, 'from utils import report as rp\n'), ((11976, 11998), 'pandas.Timedelta', 'pd.Timedelta', (['"""1 days"""'], {}), "('1 days')\n", (11988, 11998), True, 'import pandas as pd\n')]
|
from proboscis.asserts import assert_equal
from proboscis import test
from proboscis import before_class
from trove.common.utils import poll_until
from trove.tests.util import create_client
class InstanceGenerator(object):
def __init__(self, client, status=None, name=None, flavor=None,
account_id=None, created_at=None, databases=None, users=None,
volume_size=None):
self.client = client
self.status = status
self.name = name
self.flavor = flavor
self.account_id = account_id
self.databases = databases
self.users = users
self.volume_size = volume_size
self.id = None
def create_instance(self):
#make the call to create the instance
instance = self.client.instances.create(self.name, self.flavor,
self.volume_size, self.databases, self.users)
self.client.assert_http_code(200)
#verify we are in a build state
assert_equal(instance.status, "BUILD")
#pull out the ID
self.id = instance.id
return instance
def wait_for_build_to_finish(self):
poll_until(lambda: self.client.instance.get(self.id),
lambda instance: instance.status != "BUILD",
time_out=600)
def get_active_instance(self):
instance = self.client.instance.get(self.id)
self.client.assert_http_code(200)
#check the container name
assert_equal(instance.name, self.name)
#pull out volume info and verify
assert_equal(str(instance.volume_size), str(self.volume_size))
#pull out the flavor and verify
assert_equal(str(instance.flavor), str(self.flavor))
return instance
@test(groups=['smoke', 'positive'])
class CreateInstance(object):
@before_class
def set_up(self):
client = create_client(is_admin=False)
name = 'test_createInstance_container'
flavor = 1
volume_size = 1
db_name = 'test_db'
databases = [
{
"name": db_name
}
]
users = [
{
"name": "lite",
"password": "<PASSWORD>",
"databases": [{"name": db_name}]
}
]
#create the Instance
instance = InstanceGenerator(client, name=self.name,
flavor=flavor,
volume_size=self.volume_size,
databases=databases, users=users)
instance.create_instance()
#wait for the instance
instance.wait_for_build_to_finish()
#get the active instance
inst = instance.get_active_instance()
#list out the databases for our instance and verify the db name
dbs = client.databases.list(inst.id)
client.assert_http_code(200)
assert_equal(len(dbs), 1)
assert_equal(dbs[0].name, instance.db_name)
client.instance.delete(inst.id)
client.assert_http_code(202)
|
[
"proboscis.test",
"trove.tests.util.create_client",
"proboscis.asserts.assert_equal"
] |
[((1778, 1812), 'proboscis.test', 'test', ([], {'groups': "['smoke', 'positive']"}), "(groups=['smoke', 'positive'])\n", (1782, 1812), False, 'from proboscis import test\n'), ((1003, 1041), 'proboscis.asserts.assert_equal', 'assert_equal', (['instance.status', '"""BUILD"""'], {}), "(instance.status, 'BUILD')\n", (1015, 1041), False, 'from proboscis.asserts import assert_equal\n'), ((1496, 1534), 'proboscis.asserts.assert_equal', 'assert_equal', (['instance.name', 'self.name'], {}), '(instance.name, self.name)\n', (1508, 1534), False, 'from proboscis.asserts import assert_equal\n'), ((1901, 1930), 'trove.tests.util.create_client', 'create_client', ([], {'is_admin': '(False)'}), '(is_admin=False)\n', (1914, 1930), False, 'from trove.tests.util import create_client\n'), ((2990, 3033), 'proboscis.asserts.assert_equal', 'assert_equal', (['dbs[0].name', 'instance.db_name'], {}), '(dbs[0].name, instance.db_name)\n', (3002, 3033), False, 'from proboscis.asserts import assert_equal\n')]
|
#<NAME>
#CS4375: OS
#3 methods
from os import read #from os library import read method
next = 0
limit = 0
#This method calls read to fill a buffer, and gets one char at at time
def my_getChar(): #define = creating method : use method, loops, tryCatch
global next, limit #initializing 2 variables
if next == limit:
next = 0
limit = read(0,1000) #
if limit == 0:
return "EOF"
if next < len(limit) -1: #Check to make sure limit[next] wont go out of bounds.
c = chr(limit[next])#converting from ascii to char
next += 1
return c
else:
return "EOF"
def my_getLine():
global next
global limit
line = ""
char = my_getChar()
while (char != '' and char != "EOF"):
line += char
char = my_getChar()
next = 0
limit = 0
return line
def my_readLines():
numLines = 0
inLine = my_getLine()
while len(inLine):
numLines += 1
print(f"### Line {numLines}: <{str(inLine)}> ###\n")
inLine = my_getLine()
print(f"EOF after {numLines}\n")
|
[
"os.read"
] |
[((359, 372), 'os.read', 'read', (['(0)', '(1000)'], {}), '(0, 1000)\n', (363, 372), False, 'from os import read\n')]
|
# Generated by Django 3.1.4 on 2020-12-01 15:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_suggestions_from_django_doctor'),
]
operations = [
migrations.AlterField(
model_name='datapackage',
name='name',
field=models.CharField(blank=True, default='', max_length=500),
),
]
|
[
"django.db.models.CharField"
] |
[((348, 404), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(500)'}), "(blank=True, default='', max_length=500)\n", (364, 404), False, 'from django.db import migrations, models\n')]
|
import random
import logging
import networkx as nx
from tenet.message import (
Message,
DictTransport, MessageSerializer, MessageTypes
)
from tenet.peer import Peer, Friend
from tenet.utils import weighted_choice
log = logging.getLogger(__name__)
class SimulatedPeer(object):
def __init__(self, peer):
self.peer = peer
self.connected = True
def simulate(self, transport, env):
actions = [('friend_post', 4), ('send_msg', 4)]
while True:
available_actions = list(actions)
if self.peer.connected:
available_actions.append(('disconnect', 2))
else:
# NOTE: maybe simulate offline posts
# so that connection behaviour and a sudden egress of messages
# doesn't mess things up
available_actions = [('connect', 1), ('none', 3)]
a = weighted_choice(available_actions)
if a == 'send_msg':
log.debug("{} will send a message.".format(self.peer))
self.random_message(transport)
elif a == 'friend_post':
log.debug("{} will make a post.".format(self.peer))
self.random_post(transport)
elif a == 'disconnect':
log.info("{} disconnecting".format(self.peer))
self.peer.connected = False
elif a == 'connect':
log.info("{} reconnecting".format(self.peer))
self.peer.connected = True
self.peer.on_connect(transport)
wait_duration = random.randint(1,4)
yield env.timeout(wait_duration)
def random_post(self, transport):
sender = self.peer
recipients = set()
if not sender.friends:
log.debug("{} has no friends :-(".format(sender))
return
num_recipients = random.randint(1, len(list(sender.friends.values())))
while len(recipients) < num_recipients:
r = random.choice(list(sender.friends.values()))
recipients.add(r)
msg = Message(sender.address, [r.address for r in recipients], MessageTypes.SHARE, text="This is a general post to mah friends!")
sender.send(msg, transport)
def random_message(self, transport):
sender = self.peer
recipient = None
if not sender.friends:
log.debug("{} has no friends :-(".format(sender))
return
while recipient is None or recipient == sender:
recipient = random.choice(list(sender.friends.values()))
msg = Message(sender.address, [recipient.address], MessageTypes.MESSAGE, text="Hello {}!".format(recipient))
sender.send(msg, transport)
def random_address(i):
names = ['Ariel', 'Boris', 'Carrie', 'Daniel', 'Ezekiel', 'Fiona', 'Harold', 'Indiana']
hosts = ['example.com', 'gmail.com', 'robot.com', 'zombo.com', 'yahoo.com', 'geocities.com']
return random.choice(names) + '_' + str(i) + '@' + random.choice(hosts)
def generate_random_peers(number=100):
for i in range(0, number):
p = Peer(random_address(i))
yield SimulatedPeer(p)
def random_friendships(peers, G=None, density=0.1):
x = len(peers)
links = int(x*x*density)
for i in range(0, links):
p1 = random.choice(peers)
p2 = None
while p2 is None or p1 == p2:
p2 = random.choice(peers)
G.add_edge(p1.address, p2.address)
# TODO exchange keys too
p1.friends[p2.address] = Friend(p2.address, p2.key)
p2.friends[p1.address] = Friend(p1.address, p1.key)
#log.debug('{} and {} are now friends'.format(p1, p2))
def gen_social_graph_1(num_people=10):
G=nx.Graph()
peers = [x for x in generate_random_peers(num_people)]
[log.debug(x) for x in peers]
for p in peers:
G.add_node(p.address)
random_friendships([p.peer for p in peers], G)
return (peers, G)
def gen_social_graph_2(num_people=10):
G=nx.random_geometric_graph(num_people,0.325)
peer_by_id = {}
for n in G.nodes():
peer_by_id[n] = SimulatedPeer(Peer(random_address(n)))
for e in G.edges():
p1 = peer_by_id[e[0]]
p2 = peer_by_id[e[1]]
p1.peer.friends[p2.peer.address] = Friend(p2.peer.address, p2.peer.key)
p2.peer.friends[p1.peer.address] = Friend(p1.peer.address, p1.peer.key)
return peer_by_id.values(), G
def draw_graph(G):
try:
from networkx import graphviz_layout
except ImportError:
raise ImportError("This example needs Graphviz and either PyGraphviz or Pydot")
import matplotlib.pyplot as plt
plt.figure(1, figsize=(8,8))
# layout graphs with positions using graphviz neato
#pos=nx.graphviz_layout(G, prog="neato")
pos=nx.get_node_attributes(G,'pos')
nx.draw_networkx_edges(G,pos,alpha=0.4)
nx.draw_networkx_nodes(G,pos,
node_size=80,
cmap=plt.cm.Reds_r)
#nx.draw(G,
#pos,
#node_size=40,
##node_color=c,
#vmin=0.0,
#vmax=1.0,
#with_labels=False
#)
plt.savefig("tenet.png",dpi=75)
|
[
"matplotlib.pyplot.savefig",
"tenet.peer.Friend",
"networkx.draw_networkx_edges",
"tenet.message.Message",
"random.randint",
"tenet.utils.weighted_choice",
"random.choice",
"networkx.random_geometric_graph",
"matplotlib.pyplot.figure",
"networkx.Graph",
"networkx.draw_networkx_nodes",
"networkx.get_node_attributes",
"logging.getLogger"
] |
[((246, 273), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (263, 273), False, 'import logging\n'), ((3767, 3777), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (3775, 3777), True, 'import networkx as nx\n'), ((4044, 4088), 'networkx.random_geometric_graph', 'nx.random_geometric_graph', (['num_people', '(0.325)'], {}), '(num_people, 0.325)\n', (4069, 4088), True, 'import networkx as nx\n'), ((4704, 4733), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(8, 8)'}), '(1, figsize=(8, 8))\n', (4714, 4733), True, 'import matplotlib.pyplot as plt\n'), ((4842, 4874), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['G', '"""pos"""'], {}), "(G, 'pos')\n", (4864, 4874), True, 'import networkx as nx\n'), ((4878, 4919), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['G', 'pos'], {'alpha': '(0.4)'}), '(G, pos, alpha=0.4)\n', (4900, 4919), True, 'import networkx as nx\n'), ((4922, 4986), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['G', 'pos'], {'node_size': '(80)', 'cmap': 'plt.cm.Reds_r'}), '(G, pos, node_size=80, cmap=plt.cm.Reds_r)\n', (4944, 4986), True, 'import networkx as nx\n'), ((5204, 5236), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""tenet.png"""'], {'dpi': '(75)'}), "('tenet.png', dpi=75)\n", (5215, 5236), True, 'import matplotlib.pyplot as plt\n'), ((2122, 2249), 'tenet.message.Message', 'Message', (['sender.address', '[r.address for r in recipients]', 'MessageTypes.SHARE'], {'text': '"""This is a general post to mah friends!"""'}), "(sender.address, [r.address for r in recipients], MessageTypes.SHARE,\n text='This is a general post to mah friends!')\n", (2129, 2249), False, 'from tenet.message import Message, DictTransport, MessageSerializer, MessageTypes\n'), ((3039, 3059), 'random.choice', 'random.choice', (['hosts'], {}), '(hosts)\n', (3052, 3059), False, 'import random\n'), ((3344, 3364), 'random.choice', 'random.choice', (['peers'], {}), '(peers)\n', (3357, 3364), False, 'import random\n'), ((3569, 3595), 'tenet.peer.Friend', 'Friend', (['p2.address', 'p2.key'], {}), '(p2.address, p2.key)\n', (3575, 3595), False, 'from tenet.peer import Peer, Friend\n'), ((3629, 3655), 'tenet.peer.Friend', 'Friend', (['p1.address', 'p1.key'], {}), '(p1.address, p1.key)\n', (3635, 3655), False, 'from tenet.peer import Peer, Friend\n'), ((4324, 4360), 'tenet.peer.Friend', 'Friend', (['p2.peer.address', 'p2.peer.key'], {}), '(p2.peer.address, p2.peer.key)\n', (4330, 4360), False, 'from tenet.peer import Peer, Friend\n'), ((4404, 4440), 'tenet.peer.Friend', 'Friend', (['p1.peer.address', 'p1.peer.key'], {}), '(p1.peer.address, p1.peer.key)\n', (4410, 4440), False, 'from tenet.peer import Peer, Friend\n'), ((924, 958), 'tenet.utils.weighted_choice', 'weighted_choice', (['available_actions'], {}), '(available_actions)\n', (939, 958), False, 'from tenet.utils import weighted_choice\n'), ((1617, 1637), 'random.randint', 'random.randint', (['(1)', '(4)'], {}), '(1, 4)\n', (1631, 1637), False, 'import random\n'), ((3438, 3458), 'random.choice', 'random.choice', (['peers'], {}), '(peers)\n', (3451, 3458), False, 'import random\n'), ((2995, 3015), 'random.choice', 'random.choice', (['names'], {}), '(names)\n', (3008, 3015), False, 'import random\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module defines :class:`LicenseMaker` class."""
from __future__ import absolute_import, print_function
import os
import datetime
from . import settings
from .base import BaseMaker
#: Supported licenses, corresponding template file names, and descriptions
_LICENSES = {
"APACHE": ["license_apache", "Apache License"],
"CC0": ["license_cc0_1.0", "Creative Commons License for public domain"],
"GPL2": ["license_gpl_2.0", "GNU General Public License v2.0"],
"GPL3": ["license_gpl_3.0", "GNU General Public License v3.0"],
"LGPL2": ["license_lgpl_2.1", "GNU Lesser General Public License v2.1"],
"LGPL3": ["license_lgpl_3.0", "GNU Lesser General Public License v3.0"],
"MIT": ["license_mit", "MIT License, Default"],
"MOZILLA": ["license_mozilla", "Mozilla Public License v2.0"],
"NEW-BSD": ["license_new_bsd", "New BSD(Berkeley Software Distribution) License"],
"SIMPLE-BSD": ["license_simplified_bsd", "Simplified BSD(Berkeley Software Distribution) License"],
"PROPRIETARY": ["license_proprietary", "Proprietary License"],
}
class LicenseMaker(BaseMaker):
"""*Maker* class to create ``LICENSE`` file in the project directory
``LicenseMaker`` basically choose the license specified in setup.cfg file.
But if it can not retrieve a license from the file--for
example, when the user did not specify a license in setup.cfg-- it creates
the default license, which is the `MIT license <https://opensource.org/licenses/MIT>`_.
Args:
projectDir (str): absolute path of project directory to create
force (bool): option for overwriting if the file exists.
license (str): license to create.
Attributes:
default_license (str): default license(class variable)
"""
default_license = 'MIT'
def __init__(self, projectDir, force, license, **kwargs):
self.projectDir = projectDir
self.force = force
self.license = license
self._update_settings()
def _update_settings(self):
"""update :attr:`maker.settings` dictionary"""
info = {
'today': datetime.date.today().isoformat(),
'year': str(datetime.date.today().year),
}
settings.update(info)
@staticmethod
def is_supported_license(license):
"""check to see if the license given is supported by *skelpy* or not
license name is case-insensitive.
.. Note::
Currently supported licenses are::
* APACHE: Apace License
* CC0: Creative Commons License for public domain
* GPL2: GNU General Public License v2.0
* GPL3: GNU General Public License v3.0
* LGPL: GNU Lesser General Public License v2.1
* LGPL3: GNU Lesser General Public License v3.0
* MIT: MIT License, **Default**
* MOZILLA: Mozilla Public License v2.0
* NEW-BSD: New BSD(Berkeley Software Distribution) License
* SIMPLE-BSD: Simplified BSD License
* PROPRIETARY: Proprietary License
Args:
license (str): license name
Returns:
bool: True if the license given is supported, False otherwise
"""
return bool(_LICENSES.get(license.upper()))
@staticmethod
def print_licenses():
"""print supported licenses
Returns:
None
"""
print('Supported licenses are as follows:')
indent = " " * 4
for k, v in _LICENSES.items():
print('{0}{1}: {2}'.format(indent, k, v[1]))
def generate(self):
"""Worker method of :class:`LicenseMaker`
Returns:
bool: True if successful, False otherwise
"""
licFile = os.path.join(self.projectDir, 'LICENSE')
ret = self.write_file(_LICENSES[self.license][0], licFile)
if not ret:
self.logger.info(
"* You can change the license with 'license' sub-command.\n"
"For help, see 'skelpy license -h or --help'.")
return bool(ret)
|
[
"os.path.join",
"datetime.date.today"
] |
[((3874, 3914), 'os.path.join', 'os.path.join', (['self.projectDir', '"""LICENSE"""'], {}), "(self.projectDir, 'LICENSE')\n", (3886, 3914), False, 'import os\n'), ((2168, 2189), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (2187, 2189), False, 'import datetime\n'), ((2227, 2248), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (2246, 2248), False, 'import datetime\n')]
|
import unittest
from nes.processors.cpu import Cpu
from nes.bus import Bus
from nes.bus.devices.memory import Ram
class CpuIncrementInstructionsTestCase(unittest.TestCase):
def setUp(self):
bus = Bus()
bus.attach_device('RAM', Ram(256), 0, 256)
self.cpu = Cpu(bus)
def test_inc(self):
self.cpu.write(0x0000, 0x00)
instruction = self.cpu.decode(0xEE)
self.cpu.execute(instruction)
self.assertEqual(self.cpu.read(0x0000), 0x01)
self.assertFalse(self.cpu.p.z)
self.assertFalse(self.cpu.p.n)
def test_inx(self):
self.cpu.x.value = 0x00
instruction = self.cpu.decode(0xE8)
self.cpu.execute(instruction)
self.assertEqual(self.cpu.x.value, 0x01)
self.assertFalse(self.cpu.p.z)
self.assertFalse(self.cpu.p.n)
def test_iny(self):
self.cpu.y.value = 0x00
instruction = self.cpu.decode(0xC8)
self.cpu.execute(instruction)
self.assertEqual(self.cpu.y.value, 0x01)
self.assertFalse(self.cpu.p.z)
self.assertFalse(self.cpu.p.n)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"nes.processors.cpu.Cpu",
"nes.bus.devices.memory.Ram",
"nes.bus.Bus"
] |
[((1136, 1151), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1149, 1151), False, 'import unittest\n'), ((210, 215), 'nes.bus.Bus', 'Bus', ([], {}), '()\n', (213, 215), False, 'from nes.bus import Bus\n'), ((286, 294), 'nes.processors.cpu.Cpu', 'Cpu', (['bus'], {}), '(bus)\n', (289, 294), False, 'from nes.processors.cpu import Cpu\n'), ((249, 257), 'nes.bus.devices.memory.Ram', 'Ram', (['(256)'], {}), '(256)\n', (252, 257), False, 'from nes.bus.devices.memory import Ram\n')]
|
import hashlib
import json
import numpy as np
from jina import Executor, DocumentArray, requests
class TagsHasher(Executor):
"""Convert an arbitrary set of tags into a fixed-dimensional matrix using the hashing trick.
Unlike FeatureHashser, you should only use Jaccard/Hamming distance when searching documents
embedded with TagsHasher. This is because the closeness of the value of each feature is meaningless
it is basically the result of a hash function. Hence, only identity value matters.
More info: https://en.wikipedia.org/wiki/Feature_hashing
"""
def __init__(self, n_dim: int = 256, max_val: int = 65536, sparse: bool = False, **kwargs):
"""
:param n_dim: the dimensionality of each document in the output embedding.
Small numbers of features are likely to cause hash collisions,
but large numbers will cause larger overall parameter dimensions.
:param sparse: whether the resulting feature matrix should be a sparse csr_matrix or dense ndarray.
Note that this feature requires ``scipy``
:param text_attrs: which attributes to be considered as text attributes.
:param kwargs:
"""
super().__init__(**kwargs)
self.n_dim = n_dim
self.max_val = max_val
self.hash = hashlib.md5
self.sparse = sparse
def _any_hash(self, v):
try:
return int(v) # parse int parameter
except ValueError:
try:
return float(v) # parse float parameter
except ValueError:
if not v:
# ignore it when the parameter is empty
return 0
if isinstance(v, str):
v = v.strip()
if v.lower() in {'true', 'yes'}: # parse boolean parameter
return 1
if v.lower() in {'false', 'no'}:
return 0
if isinstance(v, (tuple, dict, list)):
v = json.dumps(v, sort_keys=True)
return int(self.hash(str(v).encode('utf-8')).hexdigest(), base=16)
@requests
def encode(self, docs: DocumentArray, **kwargs):
if self.sparse:
from scipy.sparse import csr_matrix
for idx, doc in enumerate(docs):
if doc.tags:
idxs, data = [], [] # sparse
table = np.zeros(self.n_dim) # dense
for k, v in doc.tags.items():
h = self._any_hash(k)
sign_h = np.sign(h)
col = h % self.n_dim
val = self._any_hash(v)
sign_v = np.sign(val)
val = val % self.max_val
idxs.append((0, col))
val = sign_h * sign_v * val
data.append(val)
table[col] += val
if self.sparse:
doc.embedding = csr_matrix(
(data, zip(*idxs)), shape=(1, self.n_dim)
)
else:
doc.embedding = table
|
[
"numpy.zeros",
"json.dumps",
"numpy.sign"
] |
[((2435, 2455), 'numpy.zeros', 'np.zeros', (['self.n_dim'], {}), '(self.n_dim)\n', (2443, 2455), True, 'import numpy as np\n'), ((2582, 2592), 'numpy.sign', 'np.sign', (['h'], {}), '(h)\n', (2589, 2592), True, 'import numpy as np\n'), ((2707, 2719), 'numpy.sign', 'np.sign', (['val'], {}), '(val)\n', (2714, 2719), True, 'import numpy as np\n'), ((2052, 2081), 'json.dumps', 'json.dumps', (['v'], {'sort_keys': '(True)'}), '(v, sort_keys=True)\n', (2062, 2081), False, 'import json\n')]
|
from pdfbuilder import registry
from pdfbuilder.basetemplates import BaseDocTemplateWithHeaderAndFooter as BaseDocTemplate
from pdfbuilder.basetemplates import OneColumnBaseDocTemplateWithHeaderAndFooter as OneColumnDocTemplate
from pdfbuilder.basetemplates import PDFTemplate
from reportlab.lib import colors
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph
import random
class ThreeColumnDown(PDFTemplate):
doctemplatefactory = BaseDocTemplate
def get_stylesheet(self):
style = getSampleStyleSheet()['Normal']
style.spaceAfter = style.fontSize
return style
class ThreeColumnAcross(PDFTemplate):
def generate_flowable_from_entry(self, entry, entry_prefix, stylesheet, bucket):
try:
row = bucket[-1]
except IndexError: # it's an empty list
row = []
bucket.append(row)
if len(row) == 3:
# If the row is full (has 3 elements already) we make a new row
row = []
bucket.append(row)
data = "%s%s" % (entry_prefix, str(entry))
row.append(Paragraph(data, stylesheet))
def post_generate_flowables(self, flowables_buckets):
style = TableStyle([
("VALIGN", (0,0), (-1,-1), "TOP"),
("LINEBELOW", (0,0), (-1,-1), 1, colors.gray),
("LINEABOVE", (0,0), (-1,0), 1, colors.gray),
])
tables = {}
for key, rows in flowables_buckets.items():
t = Table(rows)
t.setStyle(style)
tables[key] = [t]
return tables
class OneColumn(PDFTemplate):
doctemplatefactory = OneColumnDocTemplate
def get_stylesheet(self):
styles = getSampleStyleSheet()
styles['Heading1'].spaceAfter = 12
styles['Heading1'].fontName = "Helvetica"
return styles['Heading1']
registry.register_template(ThreeColumnDown, "threecolumn_down",
"Three column layout, flowing down the page (newspaper style)")
registry.register_template(ThreeColumnAcross, "threecolumn_across",
"Three column layout, filling data across in rows with lines separating the rows")
registry.register_template(OneColumn, "onecolumn_withcomments",
"One column layout")
|
[
"reportlab.lib.styles.getSampleStyleSheet",
"reportlab.platypus.TableStyle",
"reportlab.platypus.Paragraph",
"reportlab.platypus.Table",
"pdfbuilder.registry.register_template"
] |
[((1947, 2078), 'pdfbuilder.registry.register_template', 'registry.register_template', (['ThreeColumnDown', '"""threecolumn_down"""', '"""Three column layout, flowing down the page (newspaper style)"""'], {}), "(ThreeColumnDown, 'threecolumn_down',\n 'Three column layout, flowing down the page (newspaper style)')\n", (1973, 2078), False, 'from pdfbuilder import registry\n'), ((2102, 2261), 'pdfbuilder.registry.register_template', 'registry.register_template', (['ThreeColumnAcross', '"""threecolumn_across"""', '"""Three column layout, filling data across in rows with lines separating the rows"""'], {}), "(ThreeColumnAcross, 'threecolumn_across',\n 'Three column layout, filling data across in rows with lines separating the rows'\n )\n", (2128, 2261), False, 'from pdfbuilder import registry\n'), ((2280, 2368), 'pdfbuilder.registry.register_template', 'registry.register_template', (['OneColumn', '"""onecolumn_withcomments"""', '"""One column layout"""'], {}), "(OneColumn, 'onecolumn_withcomments',\n 'One column layout')\n", (2306, 2368), False, 'from pdfbuilder import registry\n'), ((1276, 1426), 'reportlab.platypus.TableStyle', 'TableStyle', (["[('VALIGN', (0, 0), (-1, -1), 'TOP'), ('LINEBELOW', (0, 0), (-1, -1), 1,\n colors.gray), ('LINEABOVE', (0, 0), (-1, 0), 1, colors.gray)]"], {}), "([('VALIGN', (0, 0), (-1, -1), 'TOP'), ('LINEBELOW', (0, 0), (-1,\n -1), 1, colors.gray), ('LINEABOVE', (0, 0), (-1, 0), 1, colors.gray)])\n", (1286, 1426), False, 'from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph\n'), ((1796, 1817), 'reportlab.lib.styles.getSampleStyleSheet', 'getSampleStyleSheet', ([], {}), '()\n', (1815, 1817), False, 'from reportlab.lib.styles import getSampleStyleSheet\n'), ((583, 604), 'reportlab.lib.styles.getSampleStyleSheet', 'getSampleStyleSheet', ([], {}), '()\n', (602, 604), False, 'from reportlab.lib.styles import getSampleStyleSheet\n'), ((1172, 1199), 'reportlab.platypus.Paragraph', 'Paragraph', (['data', 'stylesheet'], {}), '(data, stylesheet)\n', (1181, 1199), False, 'from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph\n'), ((1572, 1583), 'reportlab.platypus.Table', 'Table', (['rows'], {}), '(rows)\n', (1577, 1583), False, 'from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph\n')]
|
# Chapter05-04
# 파이썬 심화
# 데코레이터
# 장점
# 1. 중복 제거, 코드 간결, 공통 함수 작성
# 2. 로깅, 프레임워크, 유효성 체크..... -> 공통 기능
# 3. 조합해서 사용 용이
# 단점
# 1. 가독성 감소?
# 2. 특정 기능에 한정된 함수는 -> 단일 함수로 작성하는 것이 유리
# 3. 디버깅 불편
# 데코레이터 실습
import time
def perf_clock(func):
def perf_clocked(*args):
# 함수 시작 시간
st = time.perf_counter()
result = func(*args)
# 함수 종료 시간 계산
et = time.perf_counter() - st
# 실행 함수명
name = func.__name__
# 함수 매개변수
arg_str = ', '.join(repr(arg) for arg in args)
# 결과 출력
print('[%0.5fs] %s(%s) -> %r' % (et, name, arg_str, result))
return result
return perf_clocked
@perf_clock
def time_func(seconds):
time.sleep(seconds)
@perf_clock
def sum_func(*numbers):
return sum(numbers)
# 데코레이터 미사용
none_deco1 = perf_clock(time_func)
none_deco2 = perf_clock(sum_func)
print(none_deco1, none_deco1.__code__.co_freevars)
print(none_deco2, none_deco2.__code__.co_freevars)
print('-' * 40, 'Called None Decorator -> time_func')
print()
none_deco1(1.5)
print('-' * 40, 'Called None Decorator -> sum_func')
print()
none_deco2(100, 150, 250, 300, 350)
print()
print()
# 데코레이터 사용
print('*' * 40, 'Called Decorator -> time_func')
print()
time_func(1.5)
print('*' * 40, 'Called Decorator -> sum_func')
print()
sum_func(100, 150, 250, 300, 350)
print()
|
[
"time.perf_counter",
"time.sleep"
] |
[((706, 725), 'time.sleep', 'time.sleep', (['seconds'], {}), '(seconds)\n', (716, 725), False, 'import time\n'), ((301, 320), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (318, 320), False, 'import time\n'), ((386, 405), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (403, 405), False, 'import time\n')]
|
"""A class with static methods which can be used to access the data about
experiments.
This includes reading logs to parse success cases, reading images, costs
and speed.
"""
import numpy as np
from glob import glob
import torch
import pandas
import re
import json
from functools import lru_cache
import imageio
EPISODES = 561
class DataReader:
"""Container class for the static data access methods"""
EXPERIMENTS_MAPPING_FILE = 'experiments_mapping.json'
@staticmethod
@lru_cache(maxsize=1)
def get_experiments_mapping():
"""Reads the experiments mapping from a json file
EXPERIMENTS_MAPPING_FILE
"""
with open(DataReader.EXPERIMENTS_MAPPING_FILE, 'r') as f:
x = json.load(f)
return x
@staticmethod
def get_images(experiment, seed, checkpoint, episode):
"""Get simulator images for a given model evaluation on a
given episode"""
path = DataReader.get_experiments_mapping()[experiment][0]
model_name = DataReader.get_experiments_mapping()[experiment][1]
image_paths = f'{path}/planning_results/videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/ego/*.png'
images = []
for image_path in sorted(glob(image_paths)):
with open(image_path, 'rb') as f:
images.append(f.read())
return images
@staticmethod
def get_gradients(experiment, seed, checkpoint, episode):
"""Get gradients for a given model evaluation on a given episode"""
path = DataReader.get_experiments_mapping()[experiment][0]
model_name = DataReader.get_experiments_mapping()[experiment][1]
gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png'
images = []
for image_path in sorted(glob(gradient_paths)):
with open(image_path, 'rb') as f:
images.append(f.read())
return images
@staticmethod
def get_last_gradient(experiment, seed, checkpoint, episode):
"""Get the last gradient for the model and episode
Returns:
(value, x, y) - tuple, where value is the max value of the
gradient, x, y are the location of this max
value in the gradient image.
"""
path = DataReader.get_experiments_mapping()[experiment][0]
model_name = DataReader.get_experiments_mapping()[experiment][1]
gradient_paths = f'{path}/planning_results/grad_videos_simulator/{model_name}-seed={seed}-novaluestep{checkpoint}.model/ep{episode}/*.png'
images = sorted(glob(gradient_paths))
if len(images) == 0:
return (0, 0, 0)
image_path = sorted(glob(gradient_paths))[-1]
image = imageio.imread(image_path)
mx_index = np.argmax(image)
value = image.flatten()[mx_index]
middle_x = image.shape[0] / 2
middle_y = image.shape[1] / 2
x = mx_index // image.shape[1]
x -= middle_x
y = mx_index % image.shape[1]
y -= middle_y
if value == 0:
return (0, 0, 0)
else:
return (value, x, y)
@staticmethod
def get_evaluation_log_file(experiment, seed, step):
"""Retuns a path to the eval logs for given model"""
path = DataReader.get_experiments_mapping()[experiment]
regex = path[0] + 'planning_results/' + path[1] + \
f'-seed={seed}-novaluestep{step}' + '.model.log'
paths = glob(regex)
assert len(paths) == 1, \
f'paths for {regex} is not length of 1, and is equal to {paths}'
return paths[0]
@staticmethod
def get_training_log_file(experiment, seed):
"""Retuns a path to the eval logs for given model"""
path = DataReader.get_experiments_mapping()[experiment]
regex = path[0] + 'policy_networks/' + path[1] + \
f'-seed={seed}-novalue' + '.log'
paths = glob(regex)
assert len(paths) == 1, \
f'paths for {regex} is not length of 1, and is equal to {paths}'
return paths[0]
@staticmethod
@lru_cache(maxsize=100)
def find_option_values(option,
experiment=None,
seed=None,
checkpoint=None):
"""Returns possible values for selected option.
Depending on option, returns:
if option == 'seed' - returns all seeds for given experiment.
experiment has to passed.
if option == 'checkpoint' - returns all checkpoints for given
experiment and seed.
experiment and seed have to be
passed.
if option == 'episode' - returns all episodes for given
model
experiment, seed, and checkpoint have
to be passed.
"""
if option == 'seed':
path = DataReader.get_experiments_mapping()[experiment]
logs = glob(path[0] + 'planning_results/' + path[1] + '*.log')
regexp = r"seed=(\d+)-"
elif option == 'checkpoint':
path = DataReader.get_experiments_mapping()[experiment]
logs = glob(path[0] + 'planning_results/' +
path[1] + f'-seed={seed}' + '*.model.log')
regexp = r'-novaluestep(\d+)\.'
elif option == 'episode':
path = DataReader.get_experiments_mapping()[experiment]
logs = glob(path[0] +
'planning_results/videos_simulator/' +
path[1] +
f'-seed={seed}-novaluestep{checkpoint}.model/ep*')
regexp = r'model/ep(\d+)'
values = []
for log in logs:
m = re.search(regexp, log)
if m:
result = m.group(1)
values.append(int(result))
else:
print(f'{log} doesn\'t contain {option}')
# log files for each step are generated for seeds
values = list(set(values))
values.sort()
return values
@staticmethod
def get_success_rate(experiment, seed, step):
"""get the success rate for a given model"""
log_file = DataReader.get_evaluation_log_file(experiment, seed, step)
with open(log_file, 'r') as f:
last_line = f.readlines()[-1]
last_colon = last_line.rfind(':')
success_rate = float(last_line[(last_colon + 2):])
return success_rate
@staticmethod
def get_success_rates_for_experiment(experiment):
"""get success rate arrays for each seed for the given experiment
across all checkpoints.
The resulting shape of the np array is
(seeds, checkpoints), where seeds is the number of seeds,
and checkpints is the number of checkpoints.
"""
seeds = DataReader.find_option_values('seed', experiment)
result = {}
steps = []
min_length = 100
max_length = 0
for seed in seeds:
result[seed] = []
checkpoints = DataReader.find_option_values(
'checkpoint', experiment, seed)
if len(steps) < len(checkpoints):
steps = checkpoints
for checkpoint in checkpoints:
success = DataReader.get_success_rate(
experiment, seed, checkpoint)
result[seed].append(success)
min_length = min(min_length, len(result[seed]))
max_length = max(max_length, len(result[seed]))
if len(result) > 0:
result = np.stack([np.pad(np.array(result[seed]), (0, max_length - len(result[seed])), 'edge')
for seed in result])
steps = np.array(steps)
return steps, result
else:
return None, None
@staticmethod
def get_learning_curves_for_seed(experiment, seed):
"""Gets the training and validation total losses for a given experiment
and seed.
"""
path = DataReader.get_training_log_file(experiment, seed)
with open(path, 'r') as f:
lines = f.readlines()
regex = re.compile(".*step\s(\d+).*\s\[.*\π\:\s(.*)\].*\[.*\π\:\s(.*)\]")
steps = []
train_losses = []
validation_losses = []
for line in lines:
match = regex.match(line)
if match:
steps.append(int(match.group(1)))
train_losses.append(float(match.group(2)))
validation_losses.append(float(match.group(3)))
result = dict(
steps=steps,
train_losses=train_losses,
validation_losses=validation_losses,
)
return result
@staticmethod
def get_learning_curves_for_experiment(experiment):
seeds = DataReader.find_option_values('seed', experiment)
result = {}
steps = []
min_length = 100
max_length = 0
train = {}
validation = {}
for seed in seeds:
result[seed] = []
curves = DataReader.get_learning_curves_for_seed(experiment, seed)
for i, step in enumerate(curves['steps']):
train.setdefault(step, []).append(curves['train_losses'][i])
validation.setdefault(step, []).append(curves['validation_losses'][i])
train_means = []
train_stds = []
validation_means = []
validation_stds = []
for key in train:
train_means.append(float(np.mean(train[key])))
train_stds.append(float(np.std(train[key])))
validation_means.append(float(np.mean(validation[key])))
validation_stds.append(float(np.std(validation[key])))
result = dict(
steps=list(train.keys()),
train=(train_means, train_stds),
validation=(validation_means, validation_stds),
)
return result
@staticmethod
def get_episodes_with_outcome(experiment, seed, step, outcome):
"""Gets episodes with given outcome for a given model.
If outcome == 1, returns successful episodes,
if outcome == 0, returns failing episodes.
"""
path = DataReader.get_evaluation_log_file(experiment, seed, step)
with open(path, 'r') as f:
lines = f.readlines()
regex = re.compile(".*ep:\s+(\d+).*\|\ssuccess:\s+(\d).*")
result = []
for line in lines:
match = regex.match(line)
if match:
if int(match.group(2)) == outcome:
result.append(int(match.group(1)))
return result
@staticmethod
def get_episode_success_map(experiment, seed, step):
"""Gets a 0-1 array of shape (episodes) where episodes is
the number of episodes.
Ith value in the result is 0 if the ith episode failed,
and 1 otherwise.
"""
successes = DataReader.get_episodes_with_outcome(experiment,
seed,
step,
1)
successes = np.array(successes) - 1
result = np.zeros(EPISODES)
result[successes] = 1
return result
@staticmethod
def get_episodes_success_counts(experiment):
"""For a given experiment, for all episodes checks performance of all
the models with all possible seeds and checkpoints, and returns
an array of shape (episodes) where episodes is the number of episodes,
where Ith value is the number of models in this experiment that
succeeded in this episode.
"""
seeds = DataReader.find_option_values('seed', experiment)
result = np.zeros(EPISODES)
for seed in seeds:
checkpoints = DataReader.find_option_values(
'checkpoint', experiment, seed)
for checkpoint in checkpoints:
success = DataReader.get_episodes_with_outcome(experiment,
seed,
checkpoint,
1)
success = np.array(success)
success = success - 1
one_hot = np.zeros((len(success), EPISODES))
one_hot[np.arange(len(success)), success] = 1
one_hot = np.sum(one_hot, axis=0),
one_hot = np.squeeze(one_hot)
result += one_hot
return result
@staticmethod
def get_episode_speeds(experiment, seed, checkpoint, episode):
""" Returns an array of speeds for given model and given episode"""
return DataReader.get_model_speeds(experiment,
seed,
checkpoint)[episode - 1]
@staticmethod
def get_episode_costs(experiment, seed, checkpoint, episode):
""" Returns an array of data frames with all the costs for
given evaluation """
costs = DataReader.get_model_costs(experiment,
seed,
checkpoint)
if costs is not None:
return costs[episode - 1]
else:
return None
@staticmethod
@lru_cache(maxsize=10)
def get_model_costs(experiment, seed, checkpoint):
""" Returns an array of costs for given model for all episodes"""
path = DataReader.get_experiments_mapping()[experiment]
regex = path[0] + 'planning_results/' + path[1] + \
f'-seed={seed}-novaluestep{checkpoint}' + '.model.costs'
costs_paths = glob(regex)
if len(costs_paths) == 0:
print(
f'costs_paths for {regex} is {costs_paths} and it\'s length is not 1')
return None
else:
raw_costs = torch.load(costs_paths[0])
# list of DataFrame, one per episode
costs = [pandas.DataFrame(cost if type(cost) == type([]) else cost.tolist()) for cost in raw_costs]
return costs
@staticmethod
@lru_cache(maxsize=10)
def get_model_speeds(experiment, seed, checkpoint):
""" Returns an array of speeds for given model for all episodes"""
path = DataReader.get_experiments_mapping()[experiment]
regex = path[0] + 'planning_results/' + path[1] + \
f'-seed={seed}-novaluestep{checkpoint}' + '.model.states'
states_paths = glob(regex)
assert len(states_paths) == 1, \
f'states_paths for {regex} is {states_paths} and it\'s length is not 1'
states_path = states_paths[0]
states = torch.load(states_path)
result = []
for i in range(len(states)):
episode_states = states[i]
episode_states = list(map(lambda x: x[-1], episode_states))
episode_states = torch.stack(episode_states)
result.append(episode_states[:, 2:].norm(dim=1)) # is it correct
return result
@staticmethod
@lru_cache(maxsize=10)
def get_model_states(experiment, seed, checkpoint):
""" Returns an array of states for given model for all episodes"""
path = DataReader.get_experiments_mapping()[experiment]
regex = path[0] + 'planning_results/' + path[1] + \
f'-seed={seed}-novaluestep{checkpoint}' + '.model.states'
states_paths = glob(regex)
assert len(states_paths) == 1, \
f'states_paths for {regex} is {states_paths} and it\'s length is not 1'
states_path = states_paths[0]
states = torch.load(states_path)
result = []
for i in range(len(states)):
episode_states = states[i]
episode_states = list(map(lambda x: x[-1], episode_states))
episode_states = torch.stack(episode_states)
result.append(episode_states)
return result
|
[
"json.load",
"numpy.sum",
"torch.stack",
"numpy.argmax",
"numpy.std",
"imageio.imread",
"torch.load",
"numpy.zeros",
"numpy.mean",
"numpy.array",
"glob.glob",
"numpy.squeeze",
"functools.lru_cache",
"re.search",
"re.compile"
] |
[((494, 514), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(1)'}), '(maxsize=1)\n', (503, 514), False, 'from functools import lru_cache\n'), ((4233, 4255), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(100)'}), '(maxsize=100)\n', (4242, 4255), False, 'from functools import lru_cache\n'), ((13831, 13852), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (13840, 13852), False, 'from functools import lru_cache\n'), ((14648, 14669), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (14657, 14669), False, 'from functools import lru_cache\n'), ((15584, 15605), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (15593, 15605), False, 'from functools import lru_cache\n'), ((2863, 2889), 'imageio.imread', 'imageio.imread', (['image_path'], {}), '(image_path)\n', (2877, 2889), False, 'import imageio\n'), ((2909, 2925), 'numpy.argmax', 'np.argmax', (['image'], {}), '(image)\n', (2918, 2925), True, 'import numpy as np\n'), ((3602, 3613), 'glob.glob', 'glob', (['regex'], {}), '(regex)\n', (3606, 3613), False, 'from glob import glob\n'), ((4062, 4073), 'glob.glob', 'glob', (['regex'], {}), '(regex)\n', (4066, 4073), False, 'from glob import glob\n'), ((8529, 8607), 're.compile', 're.compile', (['""".*step\\\\s(\\\\d+).*\\\\s\\\\[.*\\\\π\\\\:\\\\s(.*)\\\\].*\\\\[.*\\\\π\\\\:\\\\s(.*)\\\\]"""'], {}), "('.*step\\\\s(\\\\d+).*\\\\s\\\\[.*\\\\π\\\\:\\\\s(.*)\\\\].*\\\\[.*\\\\π\\\\:\\\\s(.*)\\\\]')\n", (8539, 8607), False, 'import re\n'), ((10741, 10797), 're.compile', 're.compile', (['""".*ep:\\\\s+(\\\\d+).*\\\\|\\\\ssuccess:\\\\s+(\\\\d).*"""'], {}), "('.*ep:\\\\s+(\\\\d+).*\\\\|\\\\ssuccess:\\\\s+(\\\\d).*')\n", (10751, 10797), False, 'import re\n'), ((11619, 11637), 'numpy.zeros', 'np.zeros', (['EPISODES'], {}), '(EPISODES)\n', (11627, 11637), True, 'import numpy as np\n'), ((12189, 12207), 'numpy.zeros', 'np.zeros', (['EPISODES'], {}), '(EPISODES)\n', (12197, 12207), True, 'import numpy as np\n'), ((14197, 14208), 'glob.glob', 'glob', (['regex'], {}), '(regex)\n', (14201, 14208), False, 'from glob import glob\n'), ((15018, 15029), 'glob.glob', 'glob', (['regex'], {}), '(regex)\n', (15022, 15029), False, 'from glob import glob\n'), ((15210, 15233), 'torch.load', 'torch.load', (['states_path'], {}), '(states_path)\n', (15220, 15233), False, 'import torch\n'), ((15954, 15965), 'glob.glob', 'glob', (['regex'], {}), '(regex)\n', (15958, 15965), False, 'from glob import glob\n'), ((16146, 16169), 'torch.load', 'torch.load', (['states_path'], {}), '(states_path)\n', (16156, 16169), False, 'import torch\n'), ((735, 747), 'json.load', 'json.load', (['f'], {}), '(f)\n', (744, 747), False, 'import json\n'), ((1271, 1288), 'glob.glob', 'glob', (['image_paths'], {}), '(image_paths)\n', (1275, 1288), False, 'from glob import glob\n'), ((1896, 1916), 'glob.glob', 'glob', (['gradient_paths'], {}), '(gradient_paths)\n', (1900, 1916), False, 'from glob import glob\n'), ((2713, 2733), 'glob.glob', 'glob', (['gradient_paths'], {}), '(gradient_paths)\n', (2717, 2733), False, 'from glob import glob\n'), ((5274, 5329), 'glob.glob', 'glob', (["(path[0] + 'planning_results/' + path[1] + '*.log')"], {}), "(path[0] + 'planning_results/' + path[1] + '*.log')\n", (5278, 5329), False, 'from glob import glob\n'), ((6047, 6069), 're.search', 're.search', (['regexp', 'log'], {}), '(regexp, log)\n', (6056, 6069), False, 'import re\n'), ((8100, 8115), 'numpy.array', 'np.array', (['steps'], {}), '(steps)\n', (8108, 8115), True, 'import numpy as np\n'), ((11578, 11597), 'numpy.array', 'np.array', (['successes'], {}), '(successes)\n', (11586, 11597), True, 'import numpy as np\n'), ((14411, 14437), 'torch.load', 'torch.load', (['costs_paths[0]'], {}), '(costs_paths[0])\n', (14421, 14437), False, 'import torch\n'), ((15432, 15459), 'torch.stack', 'torch.stack', (['episode_states'], {}), '(episode_states)\n', (15443, 15459), False, 'import torch\n'), ((16368, 16395), 'torch.stack', 'torch.stack', (['episode_states'], {}), '(episode_states)\n', (16379, 16395), False, 'import torch\n'), ((2821, 2841), 'glob.glob', 'glob', (['gradient_paths'], {}), '(gradient_paths)\n', (2825, 2841), False, 'from glob import glob\n'), ((5490, 5569), 'glob.glob', 'glob', (["(path[0] + 'planning_results/' + path[1] + f'-seed={seed}' + '*.model.log')"], {}), "(path[0] + 'planning_results/' + path[1] + f'-seed={seed}' + '*.model.log')\n", (5494, 5569), False, 'from glob import glob\n'), ((12694, 12711), 'numpy.array', 'np.array', (['success'], {}), '(success)\n', (12702, 12711), True, 'import numpy as np\n'), ((12950, 12969), 'numpy.squeeze', 'np.squeeze', (['one_hot'], {}), '(one_hot)\n', (12960, 12969), True, 'import numpy as np\n'), ((5759, 5877), 'glob.glob', 'glob', (["(path[0] + 'planning_results/videos_simulator/' + path[1] +\n f'-seed={seed}-novaluestep{checkpoint}.model/ep*')"], {}), "(path[0] + 'planning_results/videos_simulator/' + path[1] +\n f'-seed={seed}-novaluestep{checkpoint}.model/ep*')\n", (5763, 5877), False, 'from glob import glob\n'), ((9900, 9919), 'numpy.mean', 'np.mean', (['train[key]'], {}), '(train[key])\n', (9907, 9919), True, 'import numpy as np\n'), ((9958, 9976), 'numpy.std', 'np.std', (['train[key]'], {}), '(train[key])\n', (9964, 9976), True, 'import numpy as np\n'), ((10021, 10045), 'numpy.mean', 'np.mean', (['validation[key]'], {}), '(validation[key])\n', (10028, 10045), True, 'import numpy as np\n'), ((10089, 10112), 'numpy.std', 'np.std', (['validation[key]'], {}), '(validation[key])\n', (10095, 10112), True, 'import numpy as np\n'), ((12899, 12922), 'numpy.sum', 'np.sum', (['one_hot'], {'axis': '(0)'}), '(one_hot, axis=0)\n', (12905, 12922), True, 'import numpy as np\n'), ((7959, 7981), 'numpy.array', 'np.array', (['result[seed]'], {}), '(result[seed])\n', (7967, 7981), True, 'import numpy as np\n')]
|
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mutable QNode, complicated primary parameters benchmark.
"""
# pylint: disable=invalid-name
import numpy as np
import pennylane as qml
import benchmark_utils as bu
def circuit(p, *, aux=0):
"""A very simple, lightweight mutable quantum circuit."""
qml.RX(p[aux][2], wires=[0])
return qml.expval(qml.PauliZ(0))
class Benchmark(bu.BaseBenchmark):
"""
This benchmark attempts to measure the efficiency of :meth:`JacobianQNode._construct` for
mutable QNodes, using an extreme case where the QNode has lots of primary parameters with
a complicated nested structure, but relatively few auxiliary parameters, and only a few
of the primary parameters are actually used in the circuit.
When the QNode is constructed, a VariableRef is built for each primary parameter,
and the qfunc re-evaluated. In this test this is meant to be time-consuming, but it is only
strictly necessary if the auxiliary parameters change.
The main reasons why there are significant differences in the execution speed of this test
between different PL commits:
* :meth:`BaseQNode._construct` should only reconstruct the QNode if the auxiliary params
have changed.
* Most of the primary params are not used in the circuit, hence
:meth:`JacobianQNode._construct` should efficiently figure out that partial derivatives
wrt. them are always zero.
"""
name = "mutable qnode, complicated primary params"
min_wires = 1
n_vals = range(6, 13, 1)
def __init__(self, device=None, verbose=False):
super().__init__(device, verbose)
self.qnode = None
def setup(self):
self.qnode = bu.create_qnode(circuit, self.device, mutable=True, interface=None)
def benchmark(self, n=8):
# n is the number of levels in the primary parameter tree.
# Hence the number of primary parameters depends exponentially on n.
def create_params(n):
"""Recursively builds a tree structure with n levels."""
if n <= 0:
# the leaves are arrays
return np.random.randn(2)
# the other nodes have two branches and a scalar
return [create_params(n - 1), create_params(n - 1), np.random.randn()]
p = create_params(n)
def evaluate(aux):
"""Evaluates the qnode using the given auxiliary params."""
res = self.qnode(p, aux=aux)
# check the result
assert np.allclose(res, np.cos(p[aux][2]))
# first evaluation and construction
evaluate(0)
# evaluate the node several times more with a different auxiliary argument
# (it does not matter if p changes or not, the VariableRefs handle it)
for _ in range(1, 10):
# If we had evaluate(i % 2) here instead the auxiliary arguments would change
# every time, which would negate most possible speedups.
evaluate(1)
return True
|
[
"benchmark_utils.create_qnode",
"numpy.random.randn",
"pennylane.RX",
"numpy.cos",
"pennylane.PauliZ"
] |
[((861, 889), 'pennylane.RX', 'qml.RX', (['p[aux][2]'], {'wires': '[0]'}), '(p[aux][2], wires=[0])\n', (867, 889), True, 'import pennylane as qml\n'), ((912, 925), 'pennylane.PauliZ', 'qml.PauliZ', (['(0)'], {}), '(0)\n', (922, 925), True, 'import pennylane as qml\n'), ((2281, 2348), 'benchmark_utils.create_qnode', 'bu.create_qnode', (['circuit', 'self.device'], {'mutable': '(True)', 'interface': 'None'}), '(circuit, self.device, mutable=True, interface=None)\n', (2296, 2348), True, 'import benchmark_utils as bu\n'), ((2710, 2728), 'numpy.random.randn', 'np.random.randn', (['(2)'], {}), '(2)\n', (2725, 2728), True, 'import numpy as np\n'), ((2854, 2871), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (2869, 2871), True, 'import numpy as np\n'), ((3111, 3128), 'numpy.cos', 'np.cos', (['p[aux][2]'], {}), '(p[aux][2])\n', (3117, 3128), True, 'import numpy as np\n')]
|
from rest_framework.decorators import (
api_view,
permission_classes,
authentication_classes,
renderer_classes,
)
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.authentication import BaseAuthentication
from rest_framework.renderers import JSONRenderer
from django.conf.urls import url
class AnyAuthentication(BaseAuthentication):
def authenticate(self, request):
return
class JSONPRenderer(JSONRenderer):
"""
jsonp render
"""
media_type = "application/javascript"
def render(self, data, accepted_media_type=None, renderer_context=None):
renderer_context = renderer_context or {}
request = renderer_context.get("request", None)
callback = request.query_params.get("callback", "callback")
json = super(JSONPRenderer, self).render(
data, accepted_media_type, renderer_context
)
return callback.encode("utf-8") + b"(" + json + b");"
@api_view(["GET"])
@authentication_classes((AnyAuthentication,))
@permission_classes((AllowAny,))
@renderer_classes(
(JSONPRenderer,),
)
def jsonp(request):
token = request.COOKIES.get("auth", "")
cookies = {
"token": token,
"host": request.get_host(),
}
response = Response(cookies)
return response
@api_view(["POST"])
@authentication_classes((AnyAuthentication,))
@permission_classes((AllowAny,))
def login(request):
token = request.COOKIES.get("auth", "auth")
password = request.data.get("password", "")
username = request.data.get("username", "")
# user center check username password
response = Response({"user": "user_info", "token": token})
response.set_cookie("auth", token, domain="0.0.0.0", expires=30 * 24 * 60 * 60)
return response
@api_view(["GET"])
@authentication_classes((AnyAuthentication,))
@permission_classes((AllowAny,))
def check_token(request, token):
token = request.COOKIES.get("auth")
# user center check token ...
data = {"user_info": {"username": "admin", "user_id": 1}, "token": token}
return Response(data)
mock_urls = [
url("^jsonp/", jsonp),
url("^login/", login),
url(r"^check_token/(?P<token>[A-Za-z0-9]+)/$", check_token),
]
|
[
"rest_framework.decorators.renderer_classes",
"rest_framework.decorators.authentication_classes",
"rest_framework.response.Response",
"django.conf.urls.url",
"rest_framework.decorators.permission_classes",
"rest_framework.decorators.api_view"
] |
[((1015, 1032), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (1023, 1032), False, 'from rest_framework.decorators import api_view, permission_classes, authentication_classes, renderer_classes\n'), ((1034, 1078), 'rest_framework.decorators.authentication_classes', 'authentication_classes', (['(AnyAuthentication,)'], {}), '((AnyAuthentication,))\n', (1056, 1078), False, 'from rest_framework.decorators import api_view, permission_classes, authentication_classes, renderer_classes\n'), ((1080, 1111), 'rest_framework.decorators.permission_classes', 'permission_classes', (['(AllowAny,)'], {}), '((AllowAny,))\n', (1098, 1111), False, 'from rest_framework.decorators import api_view, permission_classes, authentication_classes, renderer_classes\n'), ((1113, 1147), 'rest_framework.decorators.renderer_classes', 'renderer_classes', (['(JSONPRenderer,)'], {}), '((JSONPRenderer,))\n', (1129, 1147), False, 'from rest_framework.decorators import api_view, permission_classes, authentication_classes, renderer_classes\n'), ((1358, 1376), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (1366, 1376), False, 'from rest_framework.decorators import api_view, permission_classes, authentication_classes, renderer_classes\n'), ((1378, 1422), 'rest_framework.decorators.authentication_classes', 'authentication_classes', (['(AnyAuthentication,)'], {}), '((AnyAuthentication,))\n', (1400, 1422), False, 'from rest_framework.decorators import api_view, permission_classes, authentication_classes, renderer_classes\n'), ((1424, 1455), 'rest_framework.decorators.permission_classes', 'permission_classes', (['(AllowAny,)'], {}), '((AllowAny,))\n', (1442, 1455), False, 'from rest_framework.decorators import api_view, permission_classes, authentication_classes, renderer_classes\n'), ((1833, 1850), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (1841, 1850), False, 'from rest_framework.decorators import api_view, permission_classes, authentication_classes, renderer_classes\n'), ((1852, 1896), 'rest_framework.decorators.authentication_classes', 'authentication_classes', (['(AnyAuthentication,)'], {}), '((AnyAuthentication,))\n', (1874, 1896), False, 'from rest_framework.decorators import api_view, permission_classes, authentication_classes, renderer_classes\n'), ((1898, 1929), 'rest_framework.decorators.permission_classes', 'permission_classes', (['(AllowAny,)'], {}), '((AllowAny,))\n', (1916, 1929), False, 'from rest_framework.decorators import api_view, permission_classes, authentication_classes, renderer_classes\n'), ((1317, 1334), 'rest_framework.response.Response', 'Response', (['cookies'], {}), '(cookies)\n', (1325, 1334), False, 'from rest_framework.response import Response\n'), ((1678, 1725), 'rest_framework.response.Response', 'Response', (["{'user': 'user_info', 'token': token}"], {}), "({'user': 'user_info', 'token': token})\n", (1686, 1725), False, 'from rest_framework.response import Response\n'), ((2130, 2144), 'rest_framework.response.Response', 'Response', (['data'], {}), '(data)\n', (2138, 2144), False, 'from rest_framework.response import Response\n'), ((2165, 2186), 'django.conf.urls.url', 'url', (['"""^jsonp/"""', 'jsonp'], {}), "('^jsonp/', jsonp)\n", (2168, 2186), False, 'from django.conf.urls import url\n'), ((2192, 2213), 'django.conf.urls.url', 'url', (['"""^login/"""', 'login'], {}), "('^login/', login)\n", (2195, 2213), False, 'from django.conf.urls import url\n'), ((2219, 2277), 'django.conf.urls.url', 'url', (['"""^check_token/(?P<token>[A-Za-z0-9]+)/$"""', 'check_token'], {}), "('^check_token/(?P<token>[A-Za-z0-9]+)/$', check_token)\n", (2222, 2277), False, 'from django.conf.urls import url\n')]
|
#
# Copyright (C) 2020 Arm Mbed. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Text snippet extractor."""
from typing import List, Optional
from snippet import exceptions
from snippet.config import Config
class Example:
"""An example."""
def __init__(self, path: str, line_num: int, example_name: str, line: str) -> None:
"""Initialiser."""
self._key = (path, line_num, example_name)
self._strip = len(line) - len(line.lstrip())
self._text: List[str] = list()
self._cloaking = False
def add_line(self, line: str) -> None:
"""Adds a line."""
if self._cloaking:
return
self._text.append(line)
def cloak(self, line_num: int) -> None:
"""Starts cloaking."""
if self._cloaking:
raise exceptions.CloakMismatch(f"Already cloaked at {self.debug_id} ({line_num})")
self._cloaking = True
def uncloak(self, line_num: int) -> None:
"""Stops cloaking."""
if not self._cloaking:
raise exceptions.CloakMismatch(f"Already uncloaked at {self.debug_id} ({line_num})")
self._cloaking = False
@property
def is_cloaking(self) -> bool:
"""States whether it's in cloaking mode."""
return self._cloaking
@property
def is_empty(self) -> bool:
"""States whether the example is empty or not."""
return len(self._text) == 0
@property
def text(self) -> List[str]:
"""Gets example text."""
return self._text
@property
def strip_number(self) -> int:
"""Gets the example strip number."""
return self._strip
@property
def key(self) -> tuple:
"""Gets the example key."""
return self._key
@property
def debug_id(self) -> str:
"""Gets some debug information about the example."""
return str(self.key)
class Examples:
"""All the examples in a file."""
def __init__(self) -> None:
"""Initialiser."""
self._examples: List[Example] = list()
self._current_example: Optional[Example] = None
def set_current(self, example: Example, line_num: int) -> None:
"""Sets current example."""
if self._current_example:
raise exceptions.StartEndMismatch(f"Already capturing at {self._current_example.debug_id} ({line_num})")
self._current_example = example
def store_current(self, line_num: int) -> None:
"""Stores current example."""
if not self._current_example:
raise exceptions.StartEndMismatch(f"Not yet capturing at {line_num}")
if self._current_example.is_cloaking:
raise exceptions.CloakMismatch(
f"End of example reached whilst still cloaked {self._current_example.debug_id} ({line_num})"
)
if not self._current_example.is_empty:
self._examples.append(self._current_example)
self._current_example = None
def cloak(self, line_num: int) -> None:
"""Start cloaking."""
if self._current_example:
self._current_example.cloak(line_num)
def uncloak(self, line_num: int) -> None:
"""Stops cloaking."""
if self._current_example:
self._current_example.uncloak(line_num)
def end(self, line_num: int) -> None:
"""Ends."""
if self._current_example:
raise exceptions.StartEndMismatch(
f"EOF reached whilst still capturing {self._current_example.debug_id} ({line_num})"
)
def add_line(self, line: str) -> None:
"""Adds a line."""
if self._current_example:
self._current_example.add_line(line)
def validate_dedent(self, line: str, line_num: int) -> None:
"""Validates dedent."""
if not self._current_example:
return
if any(line[: self._current_example.strip_number].lstrip()):
raise exceptions.ValidationFailure(
f"Unexpected dedent whilst capturing {self._current_example.debug_id} ({line_num})"
)
def validate_line(self, fail_on_contains: List[str], line: str, line_num: int) -> None:
"""Validates line."""
for trigger in fail_on_contains:
if trigger in line:
debug_info = self._current_example.debug_id if self._current_example else ""
raise exceptions.ValidationFailure(f"Unexpected phrase {repr(trigger)} at {debug_info} ({line_num})")
def clean_line(self, line: str) -> str:
"""Cleans a line."""
if not self._current_example:
return line
start = self._current_example.strip_number
return line[start:].rstrip()
@property
def all(self) -> list:
"""Gets all the examples."""
return self._examples
def extract_snippets_from_text(config: Config, lines: list, path: str) -> dict:
"""Finds snippets in lines of text."""
examples = Examples()
line_index = 0
for line_num, line in enumerate(lines):
line_index = line_num
if config.start_flag in line:
# start capturing code from the next line
examples.set_current(
Example(path=path, line_num=line_num, example_name=line.rsplit(":")[-1].strip(), line=line), line_num
)
continue
if config.end_flag in line:
# stop capturing, and discard empty blocks
examples.store_current(line_num)
continue
if config.uncloak_flag in line:
examples.uncloak(line_num)
continue
if config.cloak_flag in line:
examples.cloak(line_num)
continue
# whilst capturing, append code lines to the current block
if config.fail_on_dedent:
examples.validate_dedent(line, line_num)
clean_line = examples.clean_line(line)
if any(match in clean_line for match in config.drop_lines):
continue
for r_before, r_after in config.replacements.items():
clean_line = clean_line.replace(r_before, r_after)
examples.validate_line(config.fail_on_contains, clean_line, line_num)
# add this line of code to the example block
examples.add_line(clean_line)
examples.end(line_index)
return {example.key: example.text for example in examples.all}
|
[
"snippet.exceptions.CloakMismatch",
"snippet.exceptions.StartEndMismatch",
"snippet.exceptions.ValidationFailure"
] |
[((823, 899), 'snippet.exceptions.CloakMismatch', 'exceptions.CloakMismatch', (['f"""Already cloaked at {self.debug_id} ({line_num})"""'], {}), "(f'Already cloaked at {self.debug_id} ({line_num})')\n", (847, 899), False, 'from snippet import exceptions\n'), ((1056, 1134), 'snippet.exceptions.CloakMismatch', 'exceptions.CloakMismatch', (['f"""Already uncloaked at {self.debug_id} ({line_num})"""'], {}), "(f'Already uncloaked at {self.debug_id} ({line_num})')\n", (1080, 1134), False, 'from snippet import exceptions\n'), ((2284, 2387), 'snippet.exceptions.StartEndMismatch', 'exceptions.StartEndMismatch', (['f"""Already capturing at {self._current_example.debug_id} ({line_num})"""'], {}), "(\n f'Already capturing at {self._current_example.debug_id} ({line_num})')\n", (2311, 2387), False, 'from snippet import exceptions\n'), ((2570, 2633), 'snippet.exceptions.StartEndMismatch', 'exceptions.StartEndMismatch', (['f"""Not yet capturing at {line_num}"""'], {}), "(f'Not yet capturing at {line_num}')\n", (2597, 2633), False, 'from snippet import exceptions\n'), ((2698, 2826), 'snippet.exceptions.CloakMismatch', 'exceptions.CloakMismatch', (['f"""End of example reached whilst still cloaked {self._current_example.debug_id} ({line_num})"""'], {}), "(\n f'End of example reached whilst still cloaked {self._current_example.debug_id} ({line_num})'\n )\n", (2722, 2826), False, 'from snippet import exceptions\n'), ((3425, 3547), 'snippet.exceptions.StartEndMismatch', 'exceptions.StartEndMismatch', (['f"""EOF reached whilst still capturing {self._current_example.debug_id} ({line_num})"""'], {}), "(\n f'EOF reached whilst still capturing {self._current_example.debug_id} ({line_num})'\n )\n", (3452, 3547), False, 'from snippet import exceptions\n'), ((3964, 4087), 'snippet.exceptions.ValidationFailure', 'exceptions.ValidationFailure', (['f"""Unexpected dedent whilst capturing {self._current_example.debug_id} ({line_num})"""'], {}), "(\n f'Unexpected dedent whilst capturing {self._current_example.debug_id} ({line_num})'\n )\n", (3992, 4087), False, 'from snippet import exceptions\n')]
|
import exhaust
def test_double_iteration():
def gen(state: exhaust.State):
return state.maybe()
space = exhaust.space(gen)
assert len(set(space)) == 2
assert len(set(space)) == 2
|
[
"exhaust.space"
] |
[((122, 140), 'exhaust.space', 'exhaust.space', (['gen'], {}), '(gen)\n', (135, 140), False, 'import exhaust\n')]
|
# -*- coding: utf-8 -*-
# vim: sw=4 ts=4 fenc=utf-8
# =============================================================================
# $Id: ispman_helpers.py 84 2006-11-27 04:12:13Z s0undt3ch $
# =============================================================================
# $URL: http://ispmanccp.ufsoft.org/svn/branches/PythonPerl/ispmanccp/lib/ispman_helpers.py $
# $LastChangedDate: 2006-11-27 04:12:13 +0000 (Mon, 27 Nov 2006) $
# $Rev: 84 $
# $LastChangedBy: s0undt3ch $
# =============================================================================
# Copyright (C) 2006 Ufsoft.org - <NAME> <<EMAIL>>
#
# Please view LICENSE for additional licensing information.
# =============================================================================
from string import join
from formencode.variabledecode import variable_decode
from pylons import request, g, cache
from pylons.decorators.cache import beaker_cache
from ispmanccp.lib.helpers import to_unicode, asbool
from ispmanccp.lib.decorators import perlexcept
APP_CONF = g.pylons_config.app_conf
ispman_cache = cache.get_cache('ispman')
allowed_user_attributes = (
'dn', 'dialupAccess', 'radiusProfileDn', 'uid', 'uidNumber', 'gidNumber',
'homeDirectory', 'loginShell', 'ispmanStatus', 'ispmanCreateTimestamp',
'ispmanUserId', 'ispmanDomain', 'DestinationAddress', 'DestinationPort',
'mailQuota', 'mailHost', 'fileHost', 'cn', 'mailRoutingAddress',
'FTPStatus', 'FTPQuotaMBytes', 'mailAlias', 'sn', 'mailLocalAddress',
'userPassword', 'mailForwardingAddress', 'givenName')
updatable_attributes = (
'ispmanStatus', 'mailQuota', 'mailAlias', 'sn', 'userPassword',
'givenName', 'updateUser', 'uid', 'mailForwardingAddress', 'ispmanDomain',
'FTPQuotaMBytes', 'FTPStatus', 'mailHost', 'fileHost', 'dialupAccess',
'radiusProfileDN'
)
def get_cache(domain):
return cache.get_cache(domain)
def get_domain_users(domain, attr_list): #attributes_to_retrieve):
"""Function to get the `attr_list` from all users on `domain`"""
if attr_list.count('ispmanUserId') < 1:
attr_list.append('ispmanUserId')
userlist = to_unicode(g.ispman.getUsers(domain, attr_list))
decorated = [(dict_['ispmanUserId'], dict_) for dict_ in userlist.values()]
decorated.sort()
result = [dict_ for (key, dict_) in decorated]
return result
def address_exists_on_domain(domain, address):
users = get_domain_users(
domain,
[
"ispmanUserId",
"mailAlias",
"mailLocalAddress",
#"mailForwardingAddress"
]
)
for user in users:
for key, val, in user.iteritems():
if isinstance(val, list):
for n in range(len(val)):
if val[n] == address:
return user["ispmanUserId"]
elif val == address:
return user["ispmanUserId"]
return None
def get_users_list(domain, letter, sortby=None, sort_ascending=True):
domain_users = get_domain_users(
domain, [
"dn",
"givenName",
"sn",
"cn",
"ispmanCreateTimestamp",
"ispmanUserId",
"mailLocalAddress",
"mailForwardingAddress",
"userPassword",
"mailQuota",
"mailAlias",
"FTPQuotaMBytes",
"FTPStatus"
]
)
userlist = []
lengths = {}
for user in domain_users:
user_id = user['ispmanUserId']
lengths[user_id] = {}
# Aparently Genshi converts what it can to strings,
# we have to make these lists
if 'mailAlias' in user:
lengths[user_id]['aliases'] = len(user['mailAlias'])
if 'mailForwardingAddress' in user:
lengths[user_id]['forwards'] = len(user['mailForwardingAddress'])
if letter == 'All' or user_id.upper().startswith(letter):
userlist.append(user)
# let's save some time and return right away if we don't need any sorting
if len(userlist) <= 1:
return lengths, userlist
decorated = [(dict_[sortby], dict_) for dict_ in userlist]
decorated.sort()
if not sort_ascending:
decorated.reverse()
result = [dict_ for (key, dict_) in decorated]
return lengths, result
def get_user_info(uid, domain):
user_info = to_unicode(g.ispman.getUserInfo(uid + '@' + domain, domain))
lengths = {}
lengths[uid] = {}
if 'mailAlias' in user_info:
lengths[uid]['aliases'] = len(user_info['mailAlias'])
if 'mailForwardingAddress' in user_info:
lengths[uid]['forwards'] = len(user_info['mailForwardingAddress'])
user_info['mailQuota'] = int(user_info['mailQuota'])/1024
return lengths, user_info
def get_perl_cgi(params_dict):
attrib_tpl = """ '%(key)s' => ['%(val)s'], """
params_dict = variable_decode(params_dict)
cgi_params = "$q = new CGI({"
for key, val in params_dict.iteritems():
if key in updatable_attributes:
if isinstance(val, list):
cgi_params += attrib_tpl % ( {'key': key, 'val': join(val)} )
else:
cgi_params += attrib_tpl % ( {'key': key, 'val': val} )
cgi_params += """}) or die "$@";"""
cgi = g.perl.eval(cgi_params)
g.perl.eval('$q->header(-charset => "UTF-8");')
return cgi
@perlexcept
def update_user_info(attrib_dict):
cgi = get_perl_cgi(attrib_dict)
return asbool(g.ispman.update_user(cgi))
def get_user_attribute_values(id, domain, attribute):
return to_unicode(
g.ispman.getUserAttributeValues(id, domain, attribute)
)
@perlexcept
def delete_user(post_dict):
cgi = get_perl_cgi(post_dict)
return asbool(g.ispman.deleteUser(cgi))
def user_exists(user_id):
uid = user_id + '@' + request.POST['ispmanDomain']
return bool(int(g.ispman.userExists(uid)))
# cache it for 5 minutes
@beaker_cache(expire=300, query_args=True)
def get_domain_info(domain):
return to_unicode(dict(
g.ispman.getDomainInfo(domain, 2))
)
def get_domain_vhost_count(domain):
return to_unicode(g.ispman.getVhostCount(domain))
def get_domain_user_count(domain):
return to_unicode(g.ispman.getUserCount(domain))
# cache it for 1 hour
@beaker_cache(expire=3600, query_args=True)
def get_default_acount_vars():
defaults = {}
defaults['defaultUserFtpQuota'] = to_unicode(
g.ispman.getConf('defaultUserFtpQuota')
)
defaults['defaultUserMailQuota'] = to_unicode(
g.ispman.getConf('defaultUserMailQuota')
)
return defaults
@perlexcept
def add_user(attrib_dict):
cgi = get_perl_cgi(attrib_dict)
return g.ispman.addUser(cgi)
def ldap_search(ldap_filter="objectClass=*",
attrs=None,
scope="sub",
sort='ispmanUserId',
ascending=True):
base = APP_CONF['ispman_ldap_base_dn']
if attrs is not None:
results = to_unicode(
g.ispman.getEntriesAsHashRef(base, ldap_filter, attrs, scope)
)
else:
results = to_unicode(
g.ispman.getEntriesAsHashRef(base, ldap_filter)
)
entries = []
if not results:
return None
for dn in results:
vals = results[dn]
vals['dn'] = dn
entries.append(vals)
if len(entries) <= 1:
return entries
decorated = [(dict_[sort], dict_) for dict_ in entries]
decorated.sort()
if not ascending:
decorated.reverse()
result = [dict_ for (key, dict_) in decorated]
return result
|
[
"pylons.g.ispman.getVhostCount",
"pylons.g.ispman.update_user",
"pylons.g.ispman.getDomainInfo",
"string.join",
"pylons.g.ispman.getUserAttributeValues",
"pylons.g.ispman.deleteUser",
"pylons.g.ispman.userExists",
"pylons.g.ispman.getEntriesAsHashRef",
"pylons.decorators.cache.beaker_cache",
"pylons.g.perl.eval",
"pylons.g.ispman.getConf",
"pylons.g.ispman.addUser",
"pylons.cache.get_cache",
"pylons.g.ispman.getUserInfo",
"formencode.variabledecode.variable_decode",
"pylons.g.ispman.getUserCount",
"pylons.g.ispman.getUsers"
] |
[((1095, 1120), 'pylons.cache.get_cache', 'cache.get_cache', (['"""ispman"""'], {}), "('ispman')\n", (1110, 1120), False, 'from pylons import request, g, cache\n'), ((5957, 5998), 'pylons.decorators.cache.beaker_cache', 'beaker_cache', ([], {'expire': '(300)', 'query_args': '(True)'}), '(expire=300, query_args=True)\n', (5969, 5998), False, 'from pylons.decorators.cache import beaker_cache\n'), ((6312, 6354), 'pylons.decorators.cache.beaker_cache', 'beaker_cache', ([], {'expire': '(3600)', 'query_args': '(True)'}), '(expire=3600, query_args=True)\n', (6324, 6354), False, 'from pylons.decorators.cache import beaker_cache\n'), ((1890, 1913), 'pylons.cache.get_cache', 'cache.get_cache', (['domain'], {}), '(domain)\n', (1905, 1913), False, 'from pylons import request, g, cache\n'), ((4908, 4936), 'formencode.variabledecode.variable_decode', 'variable_decode', (['params_dict'], {}), '(params_dict)\n', (4923, 4936), False, 'from formencode.variabledecode import variable_decode\n'), ((5312, 5335), 'pylons.g.perl.eval', 'g.perl.eval', (['cgi_params'], {}), '(cgi_params)\n', (5323, 5335), False, 'from pylons import request, g, cache\n'), ((5340, 5387), 'pylons.g.perl.eval', 'g.perl.eval', (['"""$q->header(-charset => "UTF-8");"""'], {}), '(\'$q->header(-charset => "UTF-8");\')\n', (5351, 5387), False, 'from pylons import request, g, cache\n'), ((6721, 6742), 'pylons.g.ispman.addUser', 'g.ispman.addUser', (['cgi'], {}), '(cgi)\n', (6737, 6742), False, 'from pylons import request, g, cache\n'), ((2164, 2200), 'pylons.g.ispman.getUsers', 'g.ispman.getUsers', (['domain', 'attr_list'], {}), '(domain, attr_list)\n', (2181, 2200), False, 'from pylons import request, g, cache\n'), ((4410, 4458), 'pylons.g.ispman.getUserInfo', 'g.ispman.getUserInfo', (["(uid + '@' + domain)", 'domain'], {}), "(uid + '@' + domain, domain)\n", (4430, 4458), False, 'from pylons import request, g, cache\n'), ((5505, 5530), 'pylons.g.ispman.update_user', 'g.ispman.update_user', (['cgi'], {}), '(cgi)\n', (5525, 5530), False, 'from pylons import request, g, cache\n'), ((5619, 5673), 'pylons.g.ispman.getUserAttributeValues', 'g.ispman.getUserAttributeValues', (['id', 'domain', 'attribute'], {}), '(id, domain, attribute)\n', (5650, 5673), False, 'from pylons import request, g, cache\n'), ((5773, 5797), 'pylons.g.ispman.deleteUser', 'g.ispman.deleteUser', (['cgi'], {}), '(cgi)\n', (5792, 5797), False, 'from pylons import request, g, cache\n'), ((6165, 6195), 'pylons.g.ispman.getVhostCount', 'g.ispman.getVhostCount', (['domain'], {}), '(domain)\n', (6187, 6195), False, 'from pylons import request, g, cache\n'), ((6256, 6285), 'pylons.g.ispman.getUserCount', 'g.ispman.getUserCount', (['domain'], {}), '(domain)\n', (6277, 6285), False, 'from pylons import request, g, cache\n'), ((6462, 6501), 'pylons.g.ispman.getConf', 'g.ispman.getConf', (['"""defaultUserFtpQuota"""'], {}), "('defaultUserFtpQuota')\n", (6478, 6501), False, 'from pylons import request, g, cache\n'), ((6567, 6607), 'pylons.g.ispman.getConf', 'g.ispman.getConf', (['"""defaultUserMailQuota"""'], {}), "('defaultUserMailQuota')\n", (6583, 6607), False, 'from pylons import request, g, cache\n'), ((5902, 5926), 'pylons.g.ispman.userExists', 'g.ispman.userExists', (['uid'], {}), '(uid)\n', (5921, 5926), False, 'from pylons import request, g, cache\n'), ((6064, 6097), 'pylons.g.ispman.getDomainInfo', 'g.ispman.getDomainInfo', (['domain', '(2)'], {}), '(domain, 2)\n', (6086, 6097), False, 'from pylons import request, g, cache\n'), ((7029, 7090), 'pylons.g.ispman.getEntriesAsHashRef', 'g.ispman.getEntriesAsHashRef', (['base', 'ldap_filter', 'attrs', 'scope'], {}), '(base, ldap_filter, attrs, scope)\n', (7057, 7090), False, 'from pylons import request, g, cache\n'), ((7153, 7200), 'pylons.g.ispman.getEntriesAsHashRef', 'g.ispman.getEntriesAsHashRef', (['base', 'ldap_filter'], {}), '(base, ldap_filter)\n', (7181, 7200), False, 'from pylons import request, g, cache\n'), ((5159, 5168), 'string.join', 'join', (['val'], {}), '(val)\n', (5163, 5168), False, 'from string import join\n')]
|
import json
from os.path import join
import requests
from django.conf import settings
def remove_unneeded_properties(feature):
keys_to_remove = [
key for key in feature['properties'].keys()
if key.startswith('osm:') or key.startswith('result:')
]
for key in keys_to_remove:
feature['properties'].pop(key)
if feature['properties'].get('oldVersion'):
feature['properties'].pop('oldVersion')
if feature['properties'].get('suspicions'):
feature['properties'].pop('suspicions')
return feature
def format_challenge_task_payload(feature, challenge_id, name, reasons=[]):
if len(reasons):
feature['properties']['osmcha_reasons'] = ", ".join([i for i in reasons])
payload = {
"parent": challenge_id,
"name": "{}".format(name),
"geometries": {"features": [remove_unneeded_properties(feature)]}
}
return json.dumps(payload)
def push_feature_to_maproulette(feature, challenge_id, name, reasons=[]):
if (settings.MAP_ROULETTE_API_KEY is not None and
settings.MAP_ROULETTE_API_URL is not None):
payload = format_challenge_task_payload(
feature, challenge_id, name, reasons
)
headers = {
"Content-Type": "application/json",
"apiKey": settings.MAP_ROULETTE_API_KEY
}
return requests.post(
join(settings.MAP_ROULETTE_API_URL, 'task'),
headers=headers,
data=payload
)
|
[
"os.path.join",
"json.dumps"
] |
[((920, 939), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (930, 939), False, 'import json\n'), ((1414, 1457), 'os.path.join', 'join', (['settings.MAP_ROULETTE_API_URL', '"""task"""'], {}), "(settings.MAP_ROULETTE_API_URL, 'task')\n", (1418, 1457), False, 'from os.path import join\n')]
|
#!/bin/env python3
import argparse
import zmq
import threading
import json
import time
from libs.mylib import is_prime
def parse_args():
parser = argparse.ArgumentParser(description='Find all prime number in a range (from 2).')
parser.add_argument('max', type=int, default=1000,
help='from 2 to MAX')
return parser.parse_args()
def worker_routine(worker_url, control_url, context=None):
"""Worker routine"""
print('thread started')
context = context or zmq.Context.instance()
w_socket = context.socket(zmq.REP)
w_socket.connect(worker_url)
c_sub = context.socket(zmq.SUB)
c_sub.connect(control_url)
c_sub.setsockopt(zmq.SUBSCRIBE, b"S")
while True:
try:
[address, stop_bit] = c_sub.recv_multipart(flags=zmq.NOBLOCK)
print('==> %s, %s'%(address, stop_bit))
if int(stop_bit) == 1:
break
except zmq.Again as e:
pass
try:
string = w_socket.recv(flags=zmq.NOBLOCK)
data = json.loads(string)
value = data['value']
known_primes = data['known_primes']
isPrime = is_prime(value, known_primes)
#print('%d: %d', value, isPrime)
#send reply back to client
w_socket.send(b"%d"%isPrime)
#w_socket.send(b'%d'%True)
except zmq.Again as e:
pass
print('thread terminated')
#w_socket.close()
#context.close()
def main(num_threads=2, num_ceil=10, known_primes=[2, ]):
worker_url = "inproc://workers"
control_url = "inproc://control"
context = zmq.Context.instance()
w_socket = context.socket(zmq.REQ)
w_socket.bind(worker_url)
c_pub = context.socket(zmq.PUB)
c_pub.bind(control_url)
print('Start threads')
for i in range(num_threads):
thread = threading.Thread(target=worker_routine,
args=(worker_url, control_url, ))
thread.start()
print('Find primes')
for i in range(3, num_ceil+1):
data = {'value': i, 'known_primes':known_primes}
str_data = json.dumps(data)
b_data = str_data.encode('ascii');
w_socket.send(b_data)
y_n = w_socket.recv()
if int(y_n) == 1:
known_primes.append(i)
print('Done finding')
c_pub.send_multipart([b'S', b'1'])
time.sleep(1)
w_socket.close()
c_pub.close()
context.term()
return known_primes
if __name__ == '__main__':
args = parse_args()
known_primes = main(2, args.max)
print(known_primes)
|
[
"threading.Thread",
"argparse.ArgumentParser",
"json.loads",
"json.dumps",
"time.sleep",
"zmq.Context.instance",
"libs.mylib.is_prime"
] |
[((153, 239), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Find all prime number in a range (from 2)."""'}), "(description=\n 'Find all prime number in a range (from 2).')\n", (176, 239), False, 'import argparse\n'), ((1663, 1685), 'zmq.Context.instance', 'zmq.Context.instance', ([], {}), '()\n', (1683, 1685), False, 'import zmq\n'), ((2397, 2410), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2407, 2410), False, 'import time\n'), ((508, 530), 'zmq.Context.instance', 'zmq.Context.instance', ([], {}), '()\n', (528, 530), False, 'import zmq\n'), ((1897, 1968), 'threading.Thread', 'threading.Thread', ([], {'target': 'worker_routine', 'args': '(worker_url, control_url)'}), '(target=worker_routine, args=(worker_url, control_url))\n', (1913, 1968), False, 'import threading\n'), ((2144, 2160), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2154, 2160), False, 'import json\n'), ((1075, 1093), 'json.loads', 'json.loads', (['string'], {}), '(string)\n', (1085, 1093), False, 'import json\n'), ((1198, 1227), 'libs.mylib.is_prime', 'is_prime', (['value', 'known_primes'], {}), '(value, known_primes)\n', (1206, 1227), False, 'from libs.mylib import is_prime\n')]
|
# Copyright 2019 Katteli Inc.
# TestFlows.com Open-Source Software Testing Framework (http://testflows.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import json
import time
import base64
import threading
import importlib.util
from datetime import datetime
from functools import partial
import testflows.settings as settings
import testflows._core.cli.arg.type as argtype
from testflows._core import __version__
from testflows._core.flags import Flags, SKIP
from testflows._core.testtype import TestType
from testflows._core.cli.arg.common import epilog
from testflows._core.cli.arg.common import HelpFormatter
from testflows._core.cli.arg.handlers.handler import Handler as HandlerBase
from testflows._core.cli.arg.handlers.report.copyright import copyright
from testflows._core.transform.log.pipeline import ResultsLogPipeline
from testflows._core.transform.log.short import format_test, format_result
from testflows._core.utils.timefuncs import localfromtimestamp, strftimedelta
from testflows._core.utils.string import title as make_title
from testflows._core.transform.log.report.totals import Counts
from testflows._core.objects import Requirement
logo = '<img class="logo" src="data:image/png;base64,%(data)s" alt="logo"/>'
testflows = '<span class="testflows-logo"></span> [<span class="logo-test">Test</span><span class="logo-flows">Flows</span>]'
testflows_em = testflows.replace("[", "").replace("]", "")
FailResults = ["Fail", "Error", "Null"]
XoutResults = ["XOK", "XFail", "XError", "XNull"]
template = f"""
<section class="clearfix">%(logo)s%(confidential)s%(copyright)s</section>
---
# Requirements Coverage Report%(title)s
%(body)s
---
Generated by {testflows} Open-Source Test Framework
[<span class="logo-test">Test</span><span class="logo-flows">Flows</span>]: https://testflows.com
[ClickHouse]: https://clickhouse.yandex
<script>
%(script)s
</script>
"""
script = """
window.onload = function(){
// Toggle requirement description on click
document.querySelectorAll('.requirement').forEach(
function(item){
item.addEventListener('click', function(){
item.nextElementSibling.classList.toggle('show');
item.children[0].classList.toggle('active');
});
});
// Toggle test procedure on click
document.querySelectorAll('.test').forEach(
function(item){
item.addEventListener('click', function(){
item.nextElementSibling.classList.toggle('show');
item.classList.toggle('active');
});
});
}
"""
class Formatter:
utf_icons = {
"satisfied": "\u2714",
"unsatisfied": "\u2718",
"untested": "\u270E"
}
icon_colors = {
"satisfied": "color-ok",
"unsatisfied": "color-fail",
"untested": "color-error"
}
def format_logo(self, data):
if not data["company"].get("logo"):
return ""
data = base64.b64encode(data["company"]["logo"]).decode("utf-8")
return '\n<p>' + logo % {"data": data} + "</p>\n"
def format_confidential(self, data):
if not data["company"].get("confidential"):
return ""
return f'\n<p class="confidential">Document status - Confidential</p>\n'
def format_copyright(self, data):
if not data["company"].get("name"):
return ""
return (f'\n<p class="copyright">\n'
f'{copyright(data["company"]["name"])}\n'
"</p>\n")
def format_metadata(self, data):
metadata = data["metadata"]
s = (
"\n\n"
f"||**Date**||{localfromtimestamp(metadata['date']):%b %d, %Y %-H:%M}||\n"
f'||**Framework**||'
f'{testflows} {metadata["version"]}||\n'
)
return s + "\n"
def format_summary(self, data):
counts = data["counts"]
def template(value, title, color):
return (
f'<div class="c100 p{value} {color} smaller-title">'
f'<span>{value}%</span>'
f'<span class="title">{title}</span>'
'<div class="slice">'
'<div class="bar"></div>'
'<div class="fill"></div>'
'</div>'
'</div>\n')
s = "\n## Summary\n"
if counts.units <= 0:
s += "No tests"
else:
s += '<div class="chart">'
if counts.satisfied > 0:
s += template(f"{counts.satisfied / float(counts.units) * 100:.0f}", "Satisfied", "green")
if counts.unsatisfied > 0:
s += template(f"{counts.unsatisfied / float(counts.units) * 100:.0f}", "Unsatisfied", "red")
if counts.untested > 0:
s += template(f"{counts.untested / float(counts.units) * 100:.0f}", "Untested", "orange")
s += '</div>\n'
return s
def format_statistics(self, data):
counts = data["counts"]
result_map = {
"OK": "Satisfied",
"Fail": "Unsatisfied",
"Error": "Untested"
}
s = "\n\n## Statistics\n"
s += "||" + "||".join(
["<span></span>", "Units"]
+ [f'<span class="result result-{k.lower()}">{v}</span>' for k, v in result_map.items()]
) + "||\n"
s += "||" + "||".join([f"<center>{i}</center>" for i in ["**Requirements**",
str(counts.units), str(counts.satisfied),
str(counts.unsatisfied), str(counts.untested)]]) + "||\n"
return s + "\n"
def format_table(self, data):
reqs = data["requirements"]
s = "\n\n## Coverage\n"
for r in reqs.values():
s += f'\n<section class="requirement"><span class="requirement-inline"><i class="utf-icon {self.icon_colors[r["status"]]}">{self.utf_icons[r["status"]]}</i>{r["requirement"].name}</span></section>'
description = r["requirement"].description.replace("\\n","\n")
if description:
s += f'\n<div markdown="1" class="requirement-description hidden">\n{description}\n</div>'
for test in r["tests"]:
result = test["result"]
cls = result["result_type"].lower()
s += f'\n<div class="test"><span class="result result-inline result-{cls}">{result["result_type"]}</span><span class="time time-inline">{strftimedelta(result["message_rtime"])}</span>{test["test"]["test_name"]}</div>'
s += f'\n<div class="test-procedure hidden">\n```testflows\n{test["messages"]}\n```\n</div>'
if not r["tests"]:
s += f'\n<div class="no-tests">\n<span class="result-inline">\u270E</span>\nNo tests\n</div>'
s += "\n"
return s + "\n"
def format_title(self, data):
if data["title"]:
return "<br>" + make_title(data["title"])
return ""
def format(self, data):
body = ""
body += self.format_metadata(data)
body += self.format_summary(data)
body += self.format_statistics(data)
body += self.format_table(data)
return template.strip() % {
"logo": self.format_logo(data),
"confidential": self.format_confidential(data),
"copyright": self.format_copyright(data),
"body": body,
"script": script,
"title": self.format_title(data)
}
class Counts(object):
def __init__(self, name, units, satisfied, unsatisfied, untested):
self.name = name
self.units = units
self.satisfied = satisfied
self.unsatisfied = unsatisfied
self.untested = untested
def __bool__(self):
return self.units > 0
class Handler(HandlerBase):
@classmethod
def add_command(cls, commands):
parser = commands.add_parser("coverage", help="requirements coverage report", epilog=epilog(),
description="Generate requirements coverage report.",
formatter_class=HelpFormatter)
parser.add_argument("requirements", metavar="requirements", type=partial(argtype.path, special=["-"]),
help="requirements source file, default: '-' (from input log)", nargs="?", default="-")
parser.add_argument("input", metavar="input", type=argtype.logfile("r", bufsize=1, encoding="utf-8"),
nargs="?", help="input log, default: stdin", default="-")
parser.add_argument("output", metavar="output", type=argtype.file("w", bufsize=1, encoding="utf-8"),
nargs="?", help='output file, default: stdout', default="-")
parser.add_argument("--show", metavar="status", type=str, nargs="+", help="verification status. Choices: 'satisfied', 'unsatisfied', 'untested'",
choices=["satisfied", "unsatisfied", "untested"],
default=["satisfied", "unsatisfied", "untested"])
parser.add_argument("--input-link", metavar="attribute",
help="attribute that is used as a link to the input log, default: job.url",
type=str, default="job.url")
parser.add_argument("--format", metavar="type", type=str,
help="output format, default: md (Markdown)", choices=["md"], default="md")
parser.add_argument("--copyright", metavar="name", help="add copyright notice", type=str)
parser.add_argument("--confidential", help="mark as confidential", action="store_true")
parser.add_argument("--logo", metavar="path", type=argtype.file("rb"),
help='use logo image (.png)')
parser.add_argument("--title", metavar="name", help="custom title", type=str)
parser.add_argument("--only", metavar="name", type=str, default=[], nargs="+",
help=("name of one or more specifications for which to generate coverage report"
", default: include all specifications. Only a unique part of the name can be specified."
))
parser.set_defaults(func=cls())
def get_attribute(self, result, name, default=None):
tests = list(result["tests"].values())
if not tests:
return default
test = tests[0]["test"]
for attr in test["attributes"]:
if attr["attribute_name"] == name:
return attr["attribute_value"]
return default
def table(self, results):
table = {
"header": ["Requirement", "Tests"],
"rows": [],
}
return table
def metadata(self, results):
return {
"date": time.time(),
"version": __version__,
}
def requirements(self, spec_names, path, results):
_requirements = {}
_specs = []
if path == "-":
for spec in results["specifications"]:
if spec_names:
matched = False
for name in spec_names:
if name in spec["specification_name"]:
matched = True
break
if not matched:
continue
_specs.append(spec)
for req in spec["specification_requirements"]:
_requirements[req["name"]] = {"requirement": Requirement(**req), "tests": []}
else:
spec = importlib.util.spec_from_file_location("requirements", path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
for name, value in vars(module).items():
if not isinstance(value, Requirement):
continue
_requirements[value.name] = {"requirement": value, "tests": []}
return (_specs, _requirements)
def add_test_messages(self, test, idx, tests, tests_by_parent, tests_by_id):
started = test["test"]["message_time"]
ended = test["result"]["message_time"]
messages = [format_test(test["test"], "", tests_by_parent, tests_by_id, no_colors=True)]
if getattr(TestType, test["test"]["test_type"]) > TestType.Test:
for t in tests[idx + 1:]:
flags = Flags(t["test"]["test_flags"])
if flags & SKIP and settings.show_skipped is False:
continue
if t["test"]["message_time"] > ended:
break
if getattr(TestType, t["test"]["test_type"]) >= TestType.Test \
and t["test"]["test_id"].startswith(test["test"]["test_id"]):
messages.append(format_test(t["test"], "", tests_by_parent, tests_by_id, no_colors=True))
messages.append(format_result(t["result"], no_colors=True))
else:
for t in tests[idx + 1:]:
flags = Flags(t["test"]["test_flags"])
if flags & SKIP and settings.show_skipped is False:
continue
if t["test"]["message_time"] > ended:
break
if t["test"]["test_id"].startswith(test["test"]["test_id"]):
messages.append(format_test(t["test"], "", tests_by_parent, tests_by_id, no_colors=True))
messages.append(format_result(t["result"], no_colors=True))
messages.append(format_result(test["result"], no_colors=True))
test["messages"] = "".join(messages)
return test
def add_tests(self, requirements, results):
tests = list(results["tests"].values())
for i, test in enumerate(tests):
flags = Flags(test["test"]["test_flags"])
if flags & SKIP and settings.show_skipped is False:
continue
result = test["result"]
for requirement in test["test"]["requirements"]:
if requirement["requirement_name"] in requirements:
requirements[requirement["requirement_name"]]["tests"].append(self.add_test_messages(test, i, tests, results["tests_by_parent"], results["tests_by_id"]))
return requirements
def counts(self, requirements):
counts = Counts("requirements", *([0] * 4))
for req in requirements.values():
counts.units += 1
tests = req["tests"]
if not tests:
counts.untested += 1
req["status"] = "untested"
else:
satisfied = True
for test in tests:
result = test["result"]
if result["result_type"] != "OK":
satisfied = False
if satisfied:
counts.satisfied += 1
req["status"] = "satisfied"
else:
counts.unsatisfied += 1
req["status"] = "unsatisfied"
return counts
def company(self, args):
d = {}
if args.copyright:
d["name"] = args.copyright
if args.confidential:
d["confidential"] = True
if args.logo:
d["logo"] = args.logo.read()
return d
def data(self, source, results, args):
d = dict()
specs, requirements = self.requirements(args.only, source, results)
# if custom title was not specified generate a title
# that include all specification names
title = args.title
if title is None and specs:
title = "<br>".join([spec["specification_name"] for spec in specs])
d["title"] = title
d["requirements"] = self.add_tests(requirements, results)
d["metadata"] = self.metadata(results)
d["counts"] = self.counts(d["requirements"])
d["company"] = self.company(args)
counts = d["counts"]
return d
def generate(self, formatter, results, args):
output = args.output
output.write(
formatter.format(self.data(args.requirements, results, args))
)
output.write("\n")
def handle(self, args):
results = {}
formatter = Formatter()
ResultsLogPipeline(args.input, results).run()
self.generate(formatter, results, args)
|
[
"functools.partial",
"testflows._core.cli.arg.type.logfile",
"testflows._core.cli.arg.type.file",
"testflows._core.utils.timefuncs.strftimedelta",
"time.time",
"testflows._core.transform.log.report.totals.Counts",
"testflows._core.utils.timefuncs.localfromtimestamp",
"testflows._core.objects.Requirement",
"testflows._core.cli.arg.handlers.report.copyright.copyright",
"base64.b64encode",
"testflows._core.utils.string.title",
"testflows._core.flags.Flags",
"testflows._core.transform.log.short.format_result",
"testflows._core.transform.log.pipeline.ResultsLogPipeline",
"testflows._core.cli.arg.common.epilog",
"testflows._core.transform.log.short.format_test"
] |
[((14721, 14755), 'testflows._core.transform.log.report.totals.Counts', 'Counts', (['"""requirements"""', '*([0] * 4)'], {}), "('requirements', *([0] * 4))\n", (14727, 14755), False, 'from testflows._core.transform.log.report.totals import Counts\n'), ((11143, 11154), 'time.time', 'time.time', ([], {}), '()\n', (11152, 11154), False, 'import time\n'), ((12550, 12625), 'testflows._core.transform.log.short.format_test', 'format_test', (["test['test']", '""""""', 'tests_by_parent', 'tests_by_id'], {'no_colors': '(True)'}), "(test['test'], '', tests_by_parent, tests_by_id, no_colors=True)\n", (12561, 12625), False, 'from testflows._core.transform.log.short import format_test, format_result\n'), ((13903, 13948), 'testflows._core.transform.log.short.format_result', 'format_result', (["test['result']"], {'no_colors': '(True)'}), "(test['result'], no_colors=True)\n", (13916, 13948), False, 'from testflows._core.transform.log.short import format_test, format_result\n'), ((14174, 14207), 'testflows._core.flags.Flags', 'Flags', (["test['test']['test_flags']"], {}), "(test['test']['test_flags'])\n", (14179, 14207), False, 'from testflows._core.flags import Flags, SKIP\n'), ((3489, 3530), 'base64.b64encode', 'base64.b64encode', (["data['company']['logo']"], {}), "(data['company']['logo'])\n", (3505, 3530), False, 'import base64\n'), ((3967, 4001), 'testflows._core.cli.arg.handlers.report.copyright.copyright', 'copyright', (["data['company']['name']"], {}), "(data['company']['name'])\n", (3976, 4001), False, 'from testflows._core.cli.arg.handlers.report.copyright import copyright\n'), ((4162, 4198), 'testflows._core.utils.timefuncs.localfromtimestamp', 'localfromtimestamp', (["metadata['date']"], {}), "(metadata['date'])\n", (4180, 4198), False, 'from testflows._core.utils.timefuncs import localfromtimestamp, strftimedelta\n'), ((7438, 7463), 'testflows._core.utils.string.title', 'make_title', (["data['title']"], {}), "(data['title'])\n", (7448, 7463), True, 'from testflows._core.utils.string import title as make_title\n'), ((8487, 8495), 'testflows._core.cli.arg.common.epilog', 'epilog', ([], {}), '()\n', (8493, 8495), False, 'from testflows._core.cli.arg.common import epilog\n'), ((8680, 8716), 'functools.partial', 'partial', (['argtype.path'], {'special': "['-']"}), "(argtype.path, special=['-'])\n", (8687, 8716), False, 'from functools import partial\n'), ((8881, 8930), 'testflows._core.cli.arg.type.logfile', 'argtype.logfile', (['"""r"""'], {'bufsize': '(1)', 'encoding': '"""utf-8"""'}), "('r', bufsize=1, encoding='utf-8')\n", (8896, 8930), True, 'import testflows._core.cli.arg.type as argtype\n'), ((9067, 9113), 'testflows._core.cli.arg.type.file', 'argtype.file', (['"""w"""'], {'bufsize': '(1)', 'encoding': '"""utf-8"""'}), "('w', bufsize=1, encoding='utf-8')\n", (9079, 9113), True, 'import testflows._core.cli.arg.type as argtype\n'), ((10071, 10089), 'testflows._core.cli.arg.type.file', 'argtype.file', (['"""rb"""'], {}), "('rb')\n", (10083, 10089), True, 'import testflows._core.cli.arg.type as argtype\n'), ((12763, 12793), 'testflows._core.flags.Flags', 'Flags', (["t['test']['test_flags']"], {}), "(t['test']['test_flags'])\n", (12768, 12793), False, 'from testflows._core.flags import Flags, SKIP\n'), ((13403, 13433), 'testflows._core.flags.Flags', 'Flags', (["t['test']['test_flags']"], {}), "(t['test']['test_flags'])\n", (13408, 13433), False, 'from testflows._core.flags import Flags, SKIP\n'), ((16684, 16723), 'testflows._core.transform.log.pipeline.ResultsLogPipeline', 'ResultsLogPipeline', (['args.input', 'results'], {}), '(args.input, results)\n', (16702, 16723), False, 'from testflows._core.transform.log.pipeline import ResultsLogPipeline\n'), ((6972, 7010), 'testflows._core.utils.timefuncs.strftimedelta', 'strftimedelta', (["result['message_rtime']"], {}), "(result['message_rtime'])\n", (6985, 7010), False, 'from testflows._core.utils.timefuncs import localfromtimestamp, strftimedelta\n'), ((11865, 11883), 'testflows._core.objects.Requirement', 'Requirement', ([], {}), '(**req)\n', (11876, 11883), False, 'from testflows._core.objects import Requirement\n'), ((13173, 13245), 'testflows._core.transform.log.short.format_test', 'format_test', (["t['test']", '""""""', 'tests_by_parent', 'tests_by_id'], {'no_colors': '(True)'}), "(t['test'], '', tests_by_parent, tests_by_id, no_colors=True)\n", (13184, 13245), False, 'from testflows._core.transform.log.short import format_test, format_result\n'), ((13283, 13325), 'testflows._core.transform.log.short.format_result', 'format_result', (["t['result']"], {'no_colors': '(True)'}), "(t['result'], no_colors=True)\n", (13296, 13325), False, 'from testflows._core.transform.log.short import format_test, format_result\n'), ((13724, 13796), 'testflows._core.transform.log.short.format_test', 'format_test', (["t['test']", '""""""', 'tests_by_parent', 'tests_by_id'], {'no_colors': '(True)'}), "(t['test'], '', tests_by_parent, tests_by_id, no_colors=True)\n", (13735, 13796), False, 'from testflows._core.transform.log.short import format_test, format_result\n'), ((13834, 13876), 'testflows._core.transform.log.short.format_result', 'format_result', (["t['result']"], {'no_colors': '(True)'}), "(t['result'], no_colors=True)\n", (13847, 13876), False, 'from testflows._core.transform.log.short import format_test, format_result\n')]
|
import copy
import os
from http import HTTPStatus
from unittest.mock import MagicMock, patch
import pytest
import responses
from lighthouse import create_app
from lighthouse.constants.events import PE_BECKMAN_SOURCE_ALL_NEGATIVES, PE_BECKMAN_SOURCE_COMPLETED
from lighthouse.constants.fields import (
FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER,
FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME,
FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER,
FIELD_CHERRYTRACK_USER_ID,
FIELD_SAMPLE_ID,
)
from lighthouse.db.dart import load_sql_server_script
from lighthouse.helpers.dart import create_dart_connection
from lighthouse.helpers.mysql import create_mysql_connection_engine, get_table
from lighthouse.messages.message import Message
from lighthouse.types import EventMessage
from tests.fixtures.data.biosero.destination_plate_wells import build_cherrytrack_destination_plate_response
from tests.fixtures.data.biosero.source_plate_wells import build_cherrytrack_source_plates_response
from tests.fixtures.data.centres import CENTRES
from tests.fixtures.data.dart import DART_MONGO_MERGED_SAMPLES
from tests.fixtures.data.event_wh import EVENT_WH_DATA
from tests.fixtures.data.mlwh import (
COG_UK_IDS,
MLWH_LH_SAMPLES,
MLWH_LH_SAMPLES_MULTIPLE,
MLWH_SAMPLE_LIGHTHOUSE_SAMPLE,
MLWH_SAMPLE_STOCK_RESOURCE,
SAMPLES_FOR_MLWH_UPDATE,
cherrytrack_mlwh_example,
)
from tests.fixtures.data.plate_events import PLATE_EVENTS
from tests.fixtures.data.plates_lookup import PLATES_LOOKUP_WITH_SAMPLES, PLATES_LOOKUP_WITHOUT_SAMPLES
from tests.fixtures.data.priority_samples import PRIORITY_SAMPLES
from tests.fixtures.data.samples import SAMPLES, rows_for_samples_in_cherrytrack
from tests.fixtures.data.source_plates import SOURCE_PLATES
@pytest.fixture
def app():
# set the 'EVE_SETTINGS' env variable to easily switch to the testing environment when creating an app
os.environ["EVE_SETTINGS"] = "test.py"
app = create_app()
yield app
@pytest.fixture
def client(app):
return app.test_client()
@pytest.fixture
def biosero_auth_headers(app):
with app.app_context():
return {"Authorization": app.config.get("API_TOKENS_EVENTS").get("biosero_read_write")}
@pytest.fixture
def lighthouse_ui_auth_headers(app):
with app.app_context():
return {"Authorization": app.config.get("API_TOKENS_EVENTS").get("lighthouse_ui_read_write")}
@pytest.fixture
def centres(app):
with app.app_context():
centres_collection = app.data.driver.db.centres
_ = centres_collection.insert_many(CENTRES)
# yield a copy so that the test change it however it wants
yield copy.deepcopy(CENTRES)
# clear up after the fixture is used
with app.app_context():
centres_collection.delete_many({})
@pytest.fixture
def samples(app):
with app.app_context():
samples_collection = app.data.driver.db.samples
inserted_samples = samples_collection.insert_many(SAMPLES)
# yield a copy of so that the test change it however it wants
yield copy.deepcopy(SAMPLES), inserted_samples
# clear up after the fixture is used
with app.app_context():
samples_collection.delete_many({})
@pytest.fixture
def clear_events(app):
try:
yield
finally:
with app.app_context():
events_collection = app.data.driver.db.events
events_collection.delete_many({})
@pytest.fixture
def priority_samples(app, samples):
_, samples = samples
# create a copy so that the test can change it however it needs
priority_samples = copy.deepcopy(PRIORITY_SAMPLES)
# update the priority samples with the _id of the samples inserted into mongo, currently only uses the number
# of priority samples therefore PRIORITY_SAMPLES needs to be <= SAMPLES
for count, priority_sample in enumerate(priority_samples):
priority_sample[FIELD_SAMPLE_ID] = samples.inserted_ids[count]
with app.app_context():
priority_samples_collection = app.data.driver.db.priority_samples
_ = priority_samples_collection.insert_many(priority_samples)
yield priority_samples
# clear up after the fixture is used
with app.app_context():
priority_samples_collection.delete_many({})
@pytest.fixture
def source_plates(app):
with app.app_context():
source_plates_collection = app.data.driver.db.source_plates
_ = source_plates_collection.insert_many(SOURCE_PLATES)
# yield a copy of that the test change it however it wants
yield copy.deepcopy(SOURCE_PLATES)
# clear up after the fixture is used
with app.app_context():
source_plates_collection.delete_many({})
@pytest.fixture
def plate_events(app):
with app.app_context():
events_collection = app.data.driver.db.events
inserted_events = events_collection.insert_many(PLATE_EVENTS)
# yield a copy of so that the test change it however it wants
yield copy.deepcopy(PLATE_EVENTS), inserted_events
# clear up after the fixture is used
with app.app_context():
events_collection.delete_many({})
@pytest.fixture
def mocked_responses():
"""Easily mock responses from HTTP calls.
https://github.com/getsentry/responses#responses-as-a-pytest-fixture"""
with responses.RequestsMock() as rsps:
yield rsps
@pytest.fixture
def labwhere_samples_simple(app, mocked_responses):
labwhere_url = f"{app.config['LABWHERE_URL']}/api/labwares_by_barcode"
body = [
{
"barcode": "plate_123",
"location_barcode": "location_123",
}
]
mocked_responses.add(responses.POST, labwhere_url, json=body, status=HTTPStatus.OK)
@pytest.fixture
def samples_for_mlwh_update():
return SAMPLES_FOR_MLWH_UPDATE
@pytest.fixture
def cog_uk_ids():
return COG_UK_IDS
# ********************** WAREHOUSE DATA ************************** #
@pytest.fixture
def mlwh_lh_samples(app, mlwh_sql_engine):
insert_into_mlwh(app, MLWH_LH_SAMPLES, mlwh_sql_engine, app.config["MLWH_LIGHTHOUSE_SAMPLE_TABLE"])
@pytest.fixture
def mlwh_lh_samples_multiple(app, mlwh_sql_engine):
insert_into_mlwh(app, MLWH_LH_SAMPLES_MULTIPLE, mlwh_sql_engine, app.config["MLWH_LIGHTHOUSE_SAMPLE_TABLE"])
@pytest.fixture
def mlwh_sentinel_cherrypicked(app, mlwh_sql_engine):
def delete_data():
delete_from_mlwh(app, mlwh_sql_engine, app.config["MLWH_STOCK_RESOURCES_TABLE"])
delete_from_mlwh(app, mlwh_sql_engine, app.config["MLWH_SAMPLE_TABLE"])
delete_from_mlwh(app, mlwh_sql_engine, app.config["MLWH_STUDY_TABLE"])
try:
delete_data()
# inserts
insert_into_mlwh(
app,
MLWH_SAMPLE_STOCK_RESOURCE["sample"],
mlwh_sql_engine,
app.config["MLWH_SAMPLE_TABLE"],
)
insert_into_mlwh(
app,
MLWH_SAMPLE_STOCK_RESOURCE["study"],
mlwh_sql_engine,
app.config["MLWH_STUDY_TABLE"],
)
insert_into_mlwh(
app,
MLWH_SAMPLE_STOCK_RESOURCE["stock_resource"],
mlwh_sql_engine,
app.config["MLWH_STOCK_RESOURCES_TABLE"],
)
yield
finally:
delete_data()
@pytest.fixture
def mlwh_beckman_cherrypicked(app, mlwh_sql_engine):
def delete_data():
delete_from_mlwh(app, mlwh_sql_engine, app.config["MLWH_SAMPLE_TABLE"])
delete_from_mlwh(app, mlwh_sql_engine, app.config["MLWH_LIGHTHOUSE_SAMPLE_TABLE"])
try:
delete_data()
# inserts
insert_into_mlwh(
app,
MLWH_SAMPLE_LIGHTHOUSE_SAMPLE["lighthouse_sample"],
mlwh_sql_engine,
app.config["MLWH_LIGHTHOUSE_SAMPLE_TABLE"],
)
insert_into_mlwh(
app,
MLWH_SAMPLE_LIGHTHOUSE_SAMPLE["sample"],
mlwh_sql_engine,
app.config["MLWH_SAMPLE_TABLE"],
)
yield
finally:
delete_data()
@pytest.fixture
def mlwh_sentinel_and_beckman_cherrypicked(app, mlwh_sql_engine):
def delete_data():
delete_from_mlwh(app, mlwh_sql_engine, app.config["MLWH_STOCK_RESOURCES_TABLE"])
delete_from_mlwh(app, mlwh_sql_engine, app.config["MLWH_SAMPLE_TABLE"])
delete_from_mlwh(app, mlwh_sql_engine, app.config["MLWH_STUDY_TABLE"])
delete_from_mlwh(app, mlwh_sql_engine, app.config["MLWH_LIGHTHOUSE_SAMPLE_TABLE"])
try:
delete_data()
# inserts
insert_into_mlwh(
app,
MLWH_SAMPLE_LIGHTHOUSE_SAMPLE["lighthouse_sample"],
mlwh_sql_engine,
app.config["MLWH_LIGHTHOUSE_SAMPLE_TABLE"],
)
insert_into_mlwh(
app,
MLWH_SAMPLE_STOCK_RESOURCE["sample"] + MLWH_SAMPLE_LIGHTHOUSE_SAMPLE["sample"], # type: ignore
mlwh_sql_engine,
app.config["MLWH_SAMPLE_TABLE"],
)
insert_into_mlwh(
app,
MLWH_SAMPLE_STOCK_RESOURCE["study"],
mlwh_sql_engine,
app.config["MLWH_STUDY_TABLE"],
)
insert_into_mlwh(
app,
MLWH_SAMPLE_STOCK_RESOURCE["stock_resource"],
mlwh_sql_engine,
app.config["MLWH_STOCK_RESOURCES_TABLE"],
)
yield
finally:
delete_data()
def insert_into_mlwh(app, data, mlwh_sql_engine, table_name):
table = get_table(mlwh_sql_engine, table_name)
with mlwh_sql_engine.begin() as connection:
connection.execute(table.delete()) # delete all rows from table first
print("Inserting MLWH test data")
connection.execute(table.insert(), data)
def delete_from_mlwh(app, mlwh_sql_engine, table_name):
table = get_table(mlwh_sql_engine, table_name)
with mlwh_sql_engine.begin() as connection:
print("Deleting MLWH test data")
connection.execute(table.delete())
@pytest.fixture
def event_wh_data(app, event_wh_sql_engine):
try:
subjects_table = get_table(event_wh_sql_engine, app.config["EVENT_WH_SUBJECTS_TABLE"])
roles_table = get_table(event_wh_sql_engine, app.config["EVENT_WH_ROLES_TABLE"])
events_table = get_table(event_wh_sql_engine, app.config["EVENT_WH_EVENTS_TABLE"])
event_types_table = get_table(event_wh_sql_engine, app.config["EVENT_WH_EVENT_TYPES_TABLE"])
subject_types_table = get_table(event_wh_sql_engine, app.config["EVENT_WH_SUBJECT_TYPES_TABLE"])
role_types_table = get_table(event_wh_sql_engine, app.config["EVENT_WH_ROLE_TYPES_TABLE"])
def delete_event_warehouse_data():
with event_wh_sql_engine.begin() as connection:
connection.execute(roles_table.delete())
connection.execute(subjects_table.delete())
connection.execute(events_table.delete())
connection.execute(event_types_table.delete())
connection.execute(subject_types_table.delete())
connection.execute(role_types_table.delete())
delete_event_warehouse_data()
with event_wh_sql_engine.begin() as connection:
print("Inserting Events Warehouse test data")
connection.execute(role_types_table.insert(), EVENT_WH_DATA["role_types"])
connection.execute(event_types_table.insert(), EVENT_WH_DATA["event_types"])
connection.execute(subject_types_table.insert(), EVENT_WH_DATA["subject_types"])
connection.execute(subjects_table.insert(), EVENT_WH_DATA["subjects"])
connection.execute(events_table.insert(), EVENT_WH_DATA["events"])
connection.execute(roles_table.insert(), EVENT_WH_DATA["roles"])
yield
finally:
delete_event_warehouse_data()
@pytest.fixture
def mlwh_sql_engine(app):
return create_mysql_connection_engine(app.config["WAREHOUSES_RW_CONN_STRING"], app.config["MLWH_DB"])
@pytest.fixture
def dart_connection(app):
return create_dart_connection()
@pytest.fixture
def dart_schema_create(app):
with app.app_context():
load_sql_server_script("tests/data/dart/schema.sql")
@pytest.fixture
def dart_samples(app, dart_schema_create):
with app.app_context():
load_sql_server_script("tests/data/dart/seed.sql")
@pytest.fixture
def dart_mongo_merged_samples():
return DART_MONGO_MERGED_SAMPLES
@pytest.fixture
def event_wh_sql_engine(app):
return create_mysql_connection_engine(app.config["WAREHOUSES_RW_CONN_STRING"], app.config["EVENTS_WH_DB"])
@pytest.fixture
def message_unknown():
message_content: EventMessage = {
"event": {
"uuid": "1770dbcd-0abf-4293-ac62-dd26964f80b0",
"event_type": "no_callbacks",
"occured_at": "2020-11-26T15:58:20",
"user_identifier": "test1",
"subjects": [],
"metadata": {},
},
"lims": "LH_TEST",
}
return Message(message_content)
@pytest.fixture
def message_source_complete():
message_content: EventMessage = {
"event": {
"uuid": "1770dbcd-0abf-4293-ac62-dd26964f80b0",
"event_type": PE_BECKMAN_SOURCE_COMPLETED,
"occured_at": "2020-11-26T15:58:20",
"user_identifier": "test1",
"subjects": [
{
"role_type": "sample",
"subject_type": "sample",
"friendly_name": "friendly_name",
"uuid": "00000000-1111-2222-3333-555555555555",
},
{
"role_type": "cherrypicking_source_labware",
"subject_type": "plate",
"friendly_name": "plate-barcode",
"uuid": "00000000-1111-2222-3333-555555555556",
},
{
"role_type": "robot",
"subject_type": "robot",
"friendly_name": "robot-serial",
"uuid": "00000000-1111-2222-3333-555555555557",
},
],
"metadata": {},
},
"lims": "LH_TEST",
}
return Message(message_content)
@pytest.fixture
def message_source_all_negative():
message_content: EventMessage = {
"event": {
"uuid": "1770dbcd-0abf-4293-ac62-dd26964f80b0",
"event_type": PE_BECKMAN_SOURCE_ALL_NEGATIVES,
"occured_at": "2020-11-26T15:58:20",
"user_identifier": "test1",
"subjects": [
{
"role_type": "cherrypicking_source_labware",
"subject_type": "plate",
"friendly_name": "plate-barcode",
"uuid": "00000000-1111-2222-3333-555555555556",
},
{
"role_type": "robot",
"subject_type": "robot",
"friendly_name": "robot-serial",
"uuid": "00000000-1111-2222-3333-555555555557",
},
],
"metadata": {},
},
"lims": "LH_TEST",
}
return Message(message_content)
@pytest.fixture
def plates_lookup_with_samples(samples, priority_samples):
return PLATES_LOOKUP_WITH_SAMPLES
@pytest.fixture
def plates_lookup_without_samples(samples, priority_samples):
return PLATES_LOOKUP_WITHOUT_SAMPLES
@pytest.fixture
def mocked_rabbit_channel(app):
with app.app_context():
mocked_broker = MagicMock()
with patch("lighthouse.classes.services.warehouse.Broker", return_value=mocked_broker):
mocked_channel = MagicMock()
mocked_broker.__enter__.return_value = mocked_channel
yield mocked_channel
@pytest.fixture
def cherrytrack_mock_run_info(
app, mocked_responses, run_id, cherrytrack_run_info_response, cherrytrack_mock_run_info_status
):
run_url = f"{app.config['CHERRYTRACK_URL']}/automation-system-runs/{run_id}"
mocked_responses.add(
responses.GET,
run_url,
json=cherrytrack_run_info_response,
status=cherrytrack_mock_run_info_status,
)
yield
@pytest.fixture
def baracoda_mock_barcodes_group(app, mocked_responses, baracoda_mock_responses, baracoda_mock_status):
for centre_prefix in baracoda_mock_responses.keys():
if baracoda_mock_responses[centre_prefix] is not None:
num_samples = len(baracoda_mock_responses[centre_prefix]["barcodes_group"]["barcodes"])
baracoda_url = (
f"http://{app.config['BARACODA_URL']}" f"/barcodes_group/{centre_prefix}/new?count={num_samples}"
)
mocked_responses.add(
responses.POST,
baracoda_url,
json=baracoda_mock_responses[centre_prefix],
status=baracoda_mock_status,
)
yield
@pytest.fixture
def cherrytrack_mock_source_plates_status():
return HTTPStatus.OK
@pytest.fixture
def cherrytrack_mock_run_info_status():
return HTTPStatus.OK
@pytest.fixture
def cherrytrack_mock_destination_plate_status():
return HTTPStatus.OK
@pytest.fixture
def baracoda_mock_status():
return HTTPStatus.CREATED
@pytest.fixture
def cherrytrack_mock_source_plates(
app,
mocked_responses,
source_barcode,
destination_barcode,
cherrytrack_source_plates_response,
cherrytrack_mock_source_plates_status,
):
source_plates_url = f"{app.config['CHERRYTRACK_URL']}/source-plates/{source_barcode}"
mocked_responses.add(
responses.GET,
source_plates_url,
json=cherrytrack_source_plates_response,
status=cherrytrack_mock_source_plates_status,
)
yield
@pytest.fixture
def cherrytrack_mock_destination_plate(
app,
mocked_responses,
destination_barcode,
cherrytrack_destination_plate_response,
cherrytrack_mock_destination_plate_status,
):
destination_plate_url = f"{app.config['CHERRYTRACK_URL']}/destination-plates/{destination_barcode}"
mocked_responses.add(
responses.GET,
destination_plate_url,
json=cherrytrack_destination_plate_response,
status=cherrytrack_mock_destination_plate_status,
)
yield
@pytest.fixture
def cherrytrack_run_info_response(run_id):
return {
"data": {
"id": run_id,
FIELD_CHERRYTRACK_USER_ID: "user1",
FIELD_CHERRYTRACK_LIQUID_HANDLER_SERIAL_NUMBER: "aLiquidHandlerSerialNumber",
FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_MANUFACTURER: "biosero",
FIELD_CHERRYTRACK_AUTOMATION_SYSTEM_NAME: "CPA",
}
}
@pytest.fixture
def cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id):
return build_cherrytrack_destination_plate_response(destination_barcode, source_barcode, run_id)
def cherrytrack_destination_plate_response_duplicated_wells(cherrytrack_destination_plate_response):
cherrytrack_destination_plate_response["wells"][0]["destination_coordinate"] = "H12"
return cherrytrack_destination_plate_response
@pytest.fixture
def cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode):
return build_cherrytrack_source_plates_response(run_id, source_barcode, destination_barcode)
@pytest.fixture
def samples_from_cherrytrack_into_mongo(app, source_barcode):
try:
samples = rows_for_samples_in_cherrytrack(source_barcode)
with app.app_context():
samples_collection = app.data.driver.db.samples
inserted_samples = samples_collection.insert_many(samples)
# yield a copy of so that the test change it however it wants
yield copy.deepcopy(samples), inserted_samples
# clear up after the fixture is used
finally:
samples_collection.delete_many({})
@pytest.fixture
def mlwh_samples_in_cherrytrack(app, source_barcode, mlwh_sql_engine):
def delete_data():
delete_from_mlwh(app, mlwh_sql_engine, app.config["MLWH_SAMPLE_TABLE"])
delete_from_mlwh(app, mlwh_sql_engine, app.config["MLWH_LIGHTHOUSE_SAMPLE_TABLE"])
try:
delete_data()
example = cherrytrack_mlwh_example(source_barcode)
# inserts
insert_into_mlwh(
app,
example["lighthouse_sample"],
mlwh_sql_engine,
app.config["MLWH_LIGHTHOUSE_SAMPLE_TABLE"],
)
insert_into_mlwh(
app,
example["sample"],
mlwh_sql_engine,
app.config["MLWH_SAMPLE_TABLE"],
)
yield
finally:
delete_data()
|
[
"lighthouse.helpers.mysql.create_mysql_connection_engine",
"copy.deepcopy",
"lighthouse.helpers.dart.create_dart_connection",
"tests.fixtures.data.biosero.destination_plate_wells.build_cherrytrack_destination_plate_response",
"tests.fixtures.data.biosero.source_plate_wells.build_cherrytrack_source_plates_response",
"responses.RequestsMock",
"unittest.mock.MagicMock",
"lighthouse.create_app",
"tests.fixtures.data.samples.rows_for_samples_in_cherrytrack",
"tests.fixtures.data.mlwh.cherrytrack_mlwh_example",
"lighthouse.db.dart.load_sql_server_script",
"unittest.mock.patch",
"lighthouse.messages.message.Message",
"lighthouse.helpers.mysql.get_table"
] |
[((1959, 1971), 'lighthouse.create_app', 'create_app', ([], {}), '()\n', (1969, 1971), False, 'from lighthouse import create_app\n'), ((3595, 3626), 'copy.deepcopy', 'copy.deepcopy', (['PRIORITY_SAMPLES'], {}), '(PRIORITY_SAMPLES)\n', (3608, 3626), False, 'import copy\n'), ((9441, 9479), 'lighthouse.helpers.mysql.get_table', 'get_table', (['mlwh_sql_engine', 'table_name'], {}), '(mlwh_sql_engine, table_name)\n', (9450, 9479), False, 'from lighthouse.helpers.mysql import create_mysql_connection_engine, get_table\n'), ((9769, 9807), 'lighthouse.helpers.mysql.get_table', 'get_table', (['mlwh_sql_engine', 'table_name'], {}), '(mlwh_sql_engine, table_name)\n', (9778, 9807), False, 'from lighthouse.helpers.mysql import create_mysql_connection_engine, get_table\n'), ((11845, 11944), 'lighthouse.helpers.mysql.create_mysql_connection_engine', 'create_mysql_connection_engine', (["app.config['WAREHOUSES_RW_CONN_STRING']", "app.config['MLWH_DB']"], {}), "(app.config['WAREHOUSES_RW_CONN_STRING'], app\n .config['MLWH_DB'])\n", (11875, 11944), False, 'from lighthouse.helpers.mysql import create_mysql_connection_engine, get_table\n'), ((11995, 12019), 'lighthouse.helpers.dart.create_dart_connection', 'create_dart_connection', ([], {}), '()\n', (12017, 12019), False, 'from lighthouse.helpers.dart import create_dart_connection\n'), ((12451, 12555), 'lighthouse.helpers.mysql.create_mysql_connection_engine', 'create_mysql_connection_engine', (["app.config['WAREHOUSES_RW_CONN_STRING']", "app.config['EVENTS_WH_DB']"], {}), "(app.config['WAREHOUSES_RW_CONN_STRING'], app\n .config['EVENTS_WH_DB'])\n", (12481, 12555), False, 'from lighthouse.helpers.mysql import create_mysql_connection_engine, get_table\n'), ((12951, 12975), 'lighthouse.messages.message.Message', 'Message', (['message_content'], {}), '(message_content)\n', (12958, 12975), False, 'from lighthouse.messages.message import Message\n'), ((14172, 14196), 'lighthouse.messages.message.Message', 'Message', (['message_content'], {}), '(message_content)\n', (14179, 14196), False, 'from lighthouse.messages.message import Message\n'), ((15153, 15177), 'lighthouse.messages.message.Message', 'Message', (['message_content'], {}), '(message_content)\n', (15160, 15177), False, 'from lighthouse.messages.message import Message\n'), ((18781, 18874), 'tests.fixtures.data.biosero.destination_plate_wells.build_cherrytrack_destination_plate_response', 'build_cherrytrack_destination_plate_response', (['destination_barcode', 'source_barcode', 'run_id'], {}), '(destination_barcode,\n source_barcode, run_id)\n', (18825, 18874), False, 'from tests.fixtures.data.biosero.destination_plate_wells import build_cherrytrack_destination_plate_response\n'), ((19227, 19316), 'tests.fixtures.data.biosero.source_plate_wells.build_cherrytrack_source_plates_response', 'build_cherrytrack_source_plates_response', (['run_id', 'source_barcode', 'destination_barcode'], {}), '(run_id, source_barcode,\n destination_barcode)\n', (19267, 19316), False, 'from tests.fixtures.data.biosero.source_plate_wells import build_cherrytrack_source_plates_response\n'), ((2656, 2678), 'copy.deepcopy', 'copy.deepcopy', (['CENTRES'], {}), '(CENTRES)\n', (2669, 2678), False, 'import copy\n'), ((4554, 4582), 'copy.deepcopy', 'copy.deepcopy', (['SOURCE_PLATES'], {}), '(SOURCE_PLATES)\n', (4567, 4582), False, 'import copy\n'), ((5303, 5327), 'responses.RequestsMock', 'responses.RequestsMock', ([], {}), '()\n', (5325, 5327), False, 'import responses\n'), ((10038, 10107), 'lighthouse.helpers.mysql.get_table', 'get_table', (['event_wh_sql_engine', "app.config['EVENT_WH_SUBJECTS_TABLE']"], {}), "(event_wh_sql_engine, app.config['EVENT_WH_SUBJECTS_TABLE'])\n", (10047, 10107), False, 'from lighthouse.helpers.mysql import create_mysql_connection_engine, get_table\n'), ((10130, 10196), 'lighthouse.helpers.mysql.get_table', 'get_table', (['event_wh_sql_engine', "app.config['EVENT_WH_ROLES_TABLE']"], {}), "(event_wh_sql_engine, app.config['EVENT_WH_ROLES_TABLE'])\n", (10139, 10196), False, 'from lighthouse.helpers.mysql import create_mysql_connection_engine, get_table\n'), ((10220, 10287), 'lighthouse.helpers.mysql.get_table', 'get_table', (['event_wh_sql_engine', "app.config['EVENT_WH_EVENTS_TABLE']"], {}), "(event_wh_sql_engine, app.config['EVENT_WH_EVENTS_TABLE'])\n", (10229, 10287), False, 'from lighthouse.helpers.mysql import create_mysql_connection_engine, get_table\n'), ((10316, 10388), 'lighthouse.helpers.mysql.get_table', 'get_table', (['event_wh_sql_engine', "app.config['EVENT_WH_EVENT_TYPES_TABLE']"], {}), "(event_wh_sql_engine, app.config['EVENT_WH_EVENT_TYPES_TABLE'])\n", (10325, 10388), False, 'from lighthouse.helpers.mysql import create_mysql_connection_engine, get_table\n'), ((10419, 10493), 'lighthouse.helpers.mysql.get_table', 'get_table', (['event_wh_sql_engine', "app.config['EVENT_WH_SUBJECT_TYPES_TABLE']"], {}), "(event_wh_sql_engine, app.config['EVENT_WH_SUBJECT_TYPES_TABLE'])\n", (10428, 10493), False, 'from lighthouse.helpers.mysql import create_mysql_connection_engine, get_table\n'), ((10521, 10592), 'lighthouse.helpers.mysql.get_table', 'get_table', (['event_wh_sql_engine', "app.config['EVENT_WH_ROLE_TYPES_TABLE']"], {}), "(event_wh_sql_engine, app.config['EVENT_WH_ROLE_TYPES_TABLE'])\n", (10530, 10592), False, 'from lighthouse.helpers.mysql import create_mysql_connection_engine, get_table\n'), ((12103, 12155), 'lighthouse.db.dart.load_sql_server_script', 'load_sql_server_script', (['"""tests/data/dart/schema.sql"""'], {}), "('tests/data/dart/schema.sql')\n", (12125, 12155), False, 'from lighthouse.db.dart import load_sql_server_script\n'), ((12253, 12303), 'lighthouse.db.dart.load_sql_server_script', 'load_sql_server_script', (['"""tests/data/dart/seed.sql"""'], {}), "('tests/data/dart/seed.sql')\n", (12275, 12303), False, 'from lighthouse.db.dart import load_sql_server_script\n'), ((15516, 15527), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (15525, 15527), False, 'from unittest.mock import MagicMock, patch\n'), ((19420, 19467), 'tests.fixtures.data.samples.rows_for_samples_in_cherrytrack', 'rows_for_samples_in_cherrytrack', (['source_barcode'], {}), '(source_barcode)\n', (19451, 19467), False, 'from tests.fixtures.data.samples import SAMPLES, rows_for_samples_in_cherrytrack\n'), ((20191, 20231), 'tests.fixtures.data.mlwh.cherrytrack_mlwh_example', 'cherrytrack_mlwh_example', (['source_barcode'], {}), '(source_barcode)\n', (20215, 20231), False, 'from tests.fixtures.data.mlwh import COG_UK_IDS, MLWH_LH_SAMPLES, MLWH_LH_SAMPLES_MULTIPLE, MLWH_SAMPLE_LIGHTHOUSE_SAMPLE, MLWH_SAMPLE_STOCK_RESOURCE, SAMPLES_FOR_MLWH_UPDATE, cherrytrack_mlwh_example\n'), ((3057, 3079), 'copy.deepcopy', 'copy.deepcopy', (['SAMPLES'], {}), '(SAMPLES)\n', (3070, 3079), False, 'import copy\n'), ((4973, 5000), 'copy.deepcopy', 'copy.deepcopy', (['PLATE_EVENTS'], {}), '(PLATE_EVENTS)\n', (4986, 5000), False, 'import copy\n'), ((15541, 15627), 'unittest.mock.patch', 'patch', (['"""lighthouse.classes.services.warehouse.Broker"""'], {'return_value': 'mocked_broker'}), "('lighthouse.classes.services.warehouse.Broker', return_value=\n mocked_broker)\n", (15546, 15627), False, 'from unittest.mock import MagicMock, patch\n'), ((15653, 15664), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (15662, 15664), False, 'from unittest.mock import MagicMock, patch\n'), ((19718, 19740), 'copy.deepcopy', 'copy.deepcopy', (['samples'], {}), '(samples)\n', (19731, 19740), False, 'import copy\n')]
|
from botocore.exceptions import ClientError
from osdu_commons.utils.throttle import throttle_exception, ThrottledBotoResource
class Counter:
def __init__(self):
self.counter = 0
def count(self):
self.counter += 1
def test_throttle_exception():
class BogusException(Exception):
pass
max_retries = 1
retry_on_bogus = throttle_exception([BogusException], max_sleep=0.1, max_retries=max_retries)
@retry_on_bogus
def bogus_function(counter):
counter.count()
if counter.counter <= max_retries:
raise BogusException
counter = Counter()
bogus_function(counter)
assert counter.counter == max_retries + 1
def test_throttled_boto_resource():
max_retries = 1
class BogusResource:
def __init__(self, counter, max_retries):
self.counter = counter
self._max_retries = max_retries
def bogus_function(self):
self.counter.count()
if self.counter.counter <= self._max_retries:
raise ClientError(
error_response={
'Error': {
'Code': 'ThrottlingException'
}
},
operation_name='bogus'
)
counter = Counter()
bogus_resource = ThrottledBotoResource(BogusResource(counter, max_retries))
bogus_resource.bogus_function()
assert counter.counter == max_retries + 1
|
[
"botocore.exceptions.ClientError",
"osdu_commons.utils.throttle.throttle_exception"
] |
[((366, 442), 'osdu_commons.utils.throttle.throttle_exception', 'throttle_exception', (['[BogusException]'], {'max_sleep': '(0.1)', 'max_retries': 'max_retries'}), '([BogusException], max_sleep=0.1, max_retries=max_retries)\n', (384, 442), False, 'from osdu_commons.utils.throttle import throttle_exception, ThrottledBotoResource\n'), ((1058, 1156), 'botocore.exceptions.ClientError', 'ClientError', ([], {'error_response': "{'Error': {'Code': 'ThrottlingException'}}", 'operation_name': '"""bogus"""'}), "(error_response={'Error': {'Code': 'ThrottlingException'}},\n operation_name='bogus')\n", (1069, 1156), False, 'from botocore.exceptions import ClientError\n')]
|
# -*- coding: utf-8 -*-
"""
Navigation properties
---------------------
The entity can define properties that link to other entities. These are known
as navigation properties and are supported in this library.
.. code-block:: python
>>> order = Service.query(Order).first()
>>> order.Shipper
<Entity(Shipper:3)>
>>> order.Shipper.CompanyName
'Federal Shipping'
When creating new instances, relationships can be assigned via navigation
properties:
.. code-block:: python
# query a shipper instance, just for this example
Shipper = Service.entities['Shipper']
my_shipper = Service.query(Shipper).first()
# assign for the new Order
order.Shipper = my_shipper
Service.save(order)
"""
try:
# noinspection PyUnresolvedReferences
from urllib.parse import urljoin
except ImportError:
# noinspection PyUnresolvedReferences
from urlparse import urljoin
class NavigationProperty(object):
"""
A Property-like object for marking relationships between entities, but does
not inherit from PropertyBase.
"""
def __init__(self, name, entitycls, collection=False, foreign_key=None, containment=False):
from odata.property import PropertyBase
self.name = name
self.entitycls = entitycls
self.is_collection = collection
self.is_containment = containment
if isinstance(foreign_key, PropertyBase):
self.foreign_key = foreign_key.name
else:
self.foreign_key = foreign_key
def __repr__(self):
return u'<NavigationProperty to {0}>'.format(self.entitycls)
def instances_from_data(self, raw_data, connection):
if self.is_collection:
return [self.instance_from_data(d, connection) for d in raw_data['value']] if raw_data['value'] else []
else:
return self.instance_from_data(raw_data, connection) if raw_data else None
def instance_from_data(self, raw_data, connection): # mwa: this needs to be seperated form navproperty
entitycls = self._getClass_by_response_type(self.entitycls, raw_data.get('@odata.type'))
e = entitycls.__new__(entitycls, from_data=raw_data)
es = e.__odata__
es.connection = connection
return e
def _getClass_by_response_type(self, matched_class, odata_type):
if not odata_type: return matched_class
for subclass in matched_class.__subclasses__():
if subclass.__odata_type__ == odata_type[1:]: return self._getClass_by_response_type(subclass, odata_type)
return matched_class
def _get_parent_cache(self, instance):
es = instance.__odata__
ic = es.nav_cache
if self.name not in ic:
cache = {}
ic[self.name] = cache
else:
cache = ic[self.name]
return cache
def _get_instances_from_server(self, instance):
es = instance.__odata__
connection = es.connection
parent_url = es.instance_url
parent_url += '/'
url = urljoin(parent_url, self.name)
raw_data = connection.execute_get(url)
instances = self.instances_from_data(raw_data, connection)
while '@odata.nextLink' in raw_data:
url = raw_data.get('@odata.nextLink')
raw_data = connection.execute_get(url)
instances.extend(self.instances_from_data(raw_data, connection))
return instances
def __set__(self, instance, value):
"""
:type instance: odata.entity.EntityBase
"""
cache = self._get_parent_cache(instance)
if self.is_collection:
cache['collection'] = value
else:
cache['single'] = value
instance.__odata__.set_property_dirty(self)
def __get__(self, instance, owner):
"""
:type instance: odata.entity.EntityBase
"""
if instance is None:
return self
es = instance.__odata__
cache = self._get_parent_cache(instance)
if es.instance_url is None:
if self.is_collection:
return cache.get('collection', [])
return cache.get('single', None)
cache_type = 'collection' if self.is_collection else 'single'
try:
return cache[cache_type]
except KeyError:
cache[cache_type] = self._get_instances_from_server(instance)
return cache[cache_type]
|
[
"urlparse.urljoin"
] |
[((3062, 3092), 'urlparse.urljoin', 'urljoin', (['parent_url', 'self.name'], {}), '(parent_url, self.name)\n', (3069, 3092), False, 'from urlparse import urljoin\n')]
|
from watchdog.events import PatternMatchingEventHandler
from utils import debug
from urllib.parse import unquote
from rtf.Rtf2Markdown import getMarkdown
import watchdog.events
import olefile
import sqlite3
import configparser
import codecs
import threading
class FileHandlerInterface(PatternMatchingEventHandler):
"""Base class for all the Sticky Notes file handlers."""
sync_engine = None
idle_timeout = None
def __init__(self, sync_engine, patterns=None):
self.sync_engine = sync_engine
super().__init__(ignore_directories=True, patterns=patterns)
def is_valid_event(self, event):
"""Check if event is a valid event to be proceesed by the file handler."""
if self.sync_engine.sticky_notes_file_path != event.src_path:
return False
if event.event_type == watchdog.events.EVENT_TYPE_MODIFIED:
return True
elif event.event_type == watchdog.events.EVENT_TYPE_DELETED:
debug(self.sync_engine.sticky_notes_file_path + ' was unexpectedly deleted', err=True, terminate=True)
elif event.event_type == watchdog.events.EVENT_TYPE_MOVED:
debug(self.sync_engine.sticky_notes_file_path + ' was unexpectedly moved to ' + event.dest_path, err=True,
terminate=True)
else:
debug('Unhandled event type: ' + event.event_type, err=True)
return False
def on_any_event(self, event):
if not self.is_valid_event(event):
pass
# Restart the idle timeout
if self.idle_timeout:
self.idle_timeout.cancel()
self.idle_timeout = threading.Timer(5.0, self.sync_engine.sync_notes, args=[self.get_notes()])
self.idle_timeout.start()
def get_notes(self):
"""Must be overridden to return a list of notes regarding the filetype we are watching."""
raise Exception('get_notes must be overridden')
class SNTFileHandler(FileHandlerInterface):
"""StickyNotes.snt file handler"""
snt_file = None
def __init__(self, sync_engine):
if not olefile.isOleFile(sync_engine.sticky_notes_file_path):
debug(sync_engine.sticky_notes_file_path + ' isn\'t a valid Sticky Notes file', err=True, terminate=True)
super().__init__(patterns=['*.snt'], sync_engine=sync_engine)
def get_notes(self):
notes = []
self.snt_file = olefile.OleFileIO(self.sync_engine.sticky_notes_file_path)
for storage in self.snt_file.listdir(storages=True, streams=False):
note_id = storage[0] # UUID-like string representing the note ID
note_text_rtf_file = '0' # RTF content of the note
with self.snt_file.openstream([note_id, note_text_rtf_file]) as note_content:
note_text_rtf = note_content.read().decode('unicode')
notes.append({'text': getMarkdown(note_text_rtf), 'color': None})
self.snt_file.close()
return notes
class SQLiteFileHandler(FileHandlerInterface):
"""plum.sqlite file handler"""
colors_map = {
'Yellow': 'yellow',
'Green': 'green',
'Blue': 'blue',
'Purple': 'purple',
'Pink': 'pink'
}
database = None
def __init__(self, sync_engine):
super().__init__(patterns=['*.sqlite'], sync_engine=sync_engine)
def get_notes(self):
self.database = sqlite3.connect('file:' + self.sync_engine.sticky_notes_file_path + '?mode=ro', uri=True)
self.database.row_factory = sqlite3.Row
notes_in_db = self.database.execute('SELECT Text, Theme FROM Note')
notes = [{'text': getMarkdown(note['Text']), 'color': self.get_note_color(note['Theme'])} for note in notes_in_db]
self.database.close()
return notes
def get_note_color(self, note):
return self.colors_map[note['color']] if note['color'] in self.colors_map else None
class INIFileHandler(FileHandlerInterface):
"""Settings.ini file handler"""
sidebar_config = None
def __init__(self, sync_engine):
super().__init__(patterns=['*.ini'], sync_engine=sync_engine)
def get_notes(self):
notes = []
# This masquerade to decode the ugly file content from UTF-16 (UCS-2) LE with BOM to unicode
with open(self.sync_engine.sticky_notes_file_path, 'rb') as sidebar_config_file:
sidebar_config_file_content = sidebar_config_file.read()
sidebar_config_file_content = sidebar_config_file_content[len(codecs.BOM_UTF16_LE):] # Remove the BOM
self.sidebar_config = configparser.ConfigParser(delimiters=('='), interpolation=None)
self.sidebar_config.read_string(sidebar_config_file_content.decode('utf-16-le'))
notes_color = None
for section in self.sidebar_config.sections():
if not section.startswith('Section '):
continue
if 'NoteCount' not in self.sidebar_config[section]:
continue
notes_color = self.sidebar_config[section]['ColorSaved'].strip('"') if 'ColorSaved' in self.sidebar_config[
section] and notes_color is None else None
for key in self.sidebar_config[section]:
if key.isdigit():
notes.append({'text': unquote(self.sidebar_config[section][key].strip('"')), 'color': notes_color})
break
return notes
|
[
"rtf.Rtf2Markdown.getMarkdown",
"olefile.OleFileIO",
"sqlite3.connect",
"utils.debug",
"olefile.isOleFile",
"configparser.ConfigParser"
] |
[((2407, 2465), 'olefile.OleFileIO', 'olefile.OleFileIO', (['self.sync_engine.sticky_notes_file_path'], {}), '(self.sync_engine.sticky_notes_file_path)\n', (2424, 2465), False, 'import olefile\n'), ((3399, 3492), 'sqlite3.connect', 'sqlite3.connect', (["('file:' + self.sync_engine.sticky_notes_file_path + '?mode=ro')"], {'uri': '(True)'}), "('file:' + self.sync_engine.sticky_notes_file_path +\n '?mode=ro', uri=True)\n", (3414, 3492), False, 'import sqlite3\n'), ((4585, 4646), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {'delimiters': '"""="""', 'interpolation': 'None'}), "(delimiters='=', interpolation=None)\n", (4610, 4646), False, 'import configparser\n'), ((2093, 2146), 'olefile.isOleFile', 'olefile.isOleFile', (['sync_engine.sticky_notes_file_path'], {}), '(sync_engine.sticky_notes_file_path)\n', (2110, 2146), False, 'import olefile\n'), ((2160, 2268), 'utils.debug', 'debug', (['(sync_engine.sticky_notes_file_path + " isn\'t a valid Sticky Notes file")'], {'err': '(True)', 'terminate': '(True)'}), '(sync_engine.sticky_notes_file_path +\n " isn\'t a valid Sticky Notes file", err=True, terminate=True)\n', (2165, 2268), False, 'from utils import debug\n'), ((978, 1084), 'utils.debug', 'debug', (["(self.sync_engine.sticky_notes_file_path + ' was unexpectedly deleted')"], {'err': '(True)', 'terminate': '(True)'}), "(self.sync_engine.sticky_notes_file_path + ' was unexpectedly deleted',\n err=True, terminate=True)\n", (983, 1084), False, 'from utils import debug\n'), ((3641, 3666), 'rtf.Rtf2Markdown.getMarkdown', 'getMarkdown', (["note['Text']"], {}), "(note['Text'])\n", (3652, 3666), False, 'from rtf.Rtf2Markdown import getMarkdown\n'), ((1160, 1286), 'utils.debug', 'debug', (["(self.sync_engine.sticky_notes_file_path + ' was unexpectedly moved to ' +\n event.dest_path)"], {'err': '(True)', 'terminate': '(True)'}), "(self.sync_engine.sticky_notes_file_path +\n ' was unexpectedly moved to ' + event.dest_path, err=True, terminate=True)\n", (1165, 1286), False, 'from utils import debug\n'), ((1327, 1387), 'utils.debug', 'debug', (["('Unhandled event type: ' + event.event_type)"], {'err': '(True)'}), "('Unhandled event type: ' + event.event_type, err=True)\n", (1332, 1387), False, 'from utils import debug\n'), ((2881, 2907), 'rtf.Rtf2Markdown.getMarkdown', 'getMarkdown', (['note_text_rtf'], {}), '(note_text_rtf)\n', (2892, 2907), False, 'from rtf.Rtf2Markdown import getMarkdown\n')]
|
# -*- coding: utf-8 -*-
import logging
from scrapy.spiders import Spider
from scrapy.http import Request
logger = logging.getLogger(__name__)
# This spider is a base for those attempting to crawl and parse a specified
# list of URLs rather than using an RSS feed or a sitemap. It needs the
# SPECIFIED_URIS_FILE setting set up to point to a file with a list of URLs.
class NewsSpecifiedSpider(Spider):
start_urls = []
def start_requests(self):
if self.crawler.settings.get('REFETCHCONTROL_ENABLED') == True:
logger.warning('RefetchControl is incompatible with '
'NewsSpecifiedSpider and will give spurious '
'warnings. Try setting REFETCHCONTROL_ENABLED to '
'False in settings.py.')
startfn = self.crawler.settings.get('SPECIFIED_URLS_FILE')
if not startfn:
logger.critical("SPECIFIED_URLS_FILE must be configured (e.g. in "
"settings.py) to point to a file containing a "
"list of URLs.")
return
for url in self.start_urls:
yield Request(url, dont_filter=True)
with open(startfn, 'r') as f:
urls = [u.strip() for u in f.readlines()]
logger.debug(f"URLs read from SPECIFIED_URL_FILE: {urls}")
for url in urls:
if url != '':
yield Request(url, dont_filter=True)
def parse(self, response):
return self.parse_page(response)
def parse_page(self, response):
raise NotImplementedError
|
[
"scrapy.http.Request",
"logging.getLogger"
] |
[((115, 142), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (132, 142), False, 'import logging\n'), ((1162, 1192), 'scrapy.http.Request', 'Request', (['url'], {'dont_filter': '(True)'}), '(url, dont_filter=True)\n', (1169, 1192), False, 'from scrapy.http import Request\n'), ((1442, 1472), 'scrapy.http.Request', 'Request', (['url'], {'dont_filter': '(True)'}), '(url, dont_filter=True)\n', (1449, 1472), False, 'from scrapy.http import Request\n')]
|
# -*- coding: utf-8 -*-
"""
Practical Algorthns
Problem set: Unit 5, 1.1
Problem statement:
4. Modify your binary search algorithm (from #3) to work with words rather
than integers. Test it on a small list of words, e.g.,
["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"].
The search should be case in-sensitive.
7. Now, test your binary search algorithm from #6 on a list of all
English words in the dictionary. See this for a tip on how to get a list of
all dictionary words. Note the time taken to search for a word.
Compare it with your timing result from #5, and comment on your findings.
https://www.datasciencebytes.com/bytes/2014/11/03/get-a-list-of-all-english-words-in-python/
"""
#%% sequential search
def binary_search_word(list1, word):
"""
Carry out a binary search of the given sorted list for a given word
Parameters
----------
list1: input list, sorted
word: the value to be searched
Returns
-------
True/False
"""
size = len(list1)
mid = size // 2
#debug message, remove when not debugging
#if(size):
# print ("While searching for word: ", word, ", binary search called on this list starting at : ", list1[0], " of size ", size)
#base case
if size == 0:
return False
#item found
if(list1[mid].lower()==word.lower()):
return True
#recursive call
if(list1[mid].lower() < word.lower()):
return binary_search_word(list1[mid+1:size], word)
else:
return binary_search_word(list1[0:mid], word)
#%% test binary search
mylist = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
assert (binary_search_word(mylist, "monDay")==True)
assert (binary_search_word(mylist, "Funday")==False)
print("Binary search test passed")
#%% testing sequential search on list of dictionary words
from nltk.corpus import words
word_list = words.words()
# prints 236736
print (len(word_list))
from timeit import default_timer as timer
list_of_words_to_search = ["yesterday", "omuamua", "waqar", "different", "obtuse", "zoo", "aardvark", "style", "zaazoozum", "aaaaaaaa"]
n = len(list_of_words_to_search)
results = {}
cumulative_time = 0
for word in list_of_words_to_search:
start = timer()
found = binary_search_word(word_list, word)
end = timer()
time_taken = end-start
results[word] = (round(time_taken,5), found)
cumulative_time += time_taken
print("\n** Binary Search of word list**")
print("Search for these words: ", list_of_words_to_search)
print("\nTime taken to search various words and the result:")
for k,v in results.items():
print(k, v)
print("\nTotal time to carry out search of ", n, " words = ", round(cumulative_time,5), " seconds")
print("Average search time per word = ", round(cumulative_time/n,5), " seconds")
|
[
"timeit.default_timer",
"nltk.corpus.words.words"
] |
[((1964, 1977), 'nltk.corpus.words.words', 'words.words', ([], {}), '()\n', (1975, 1977), False, 'from nltk.corpus import words\n'), ((2314, 2321), 'timeit.default_timer', 'timer', ([], {}), '()\n', (2319, 2321), True, 'from timeit import default_timer as timer\n'), ((2380, 2387), 'timeit.default_timer', 'timer', ([], {}), '()\n', (2385, 2387), True, 'from timeit import default_timer as timer\n')]
|
import numpy as np
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
dot_product = np.dot(a, b)
print(dot_product)
|
[
"numpy.dot",
"numpy.array"
] |
[((24, 43), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (32, 43), True, 'import numpy as np\n'), ((48, 67), 'numpy.array', 'np.array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (56, 67), True, 'import numpy as np\n'), ((83, 95), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (89, 95), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# File: calculate.py
# Initial date: 4 Sept 2020
# Author: <NAME>
# School assignment: For Assignment #0 of class MCDB6440: Software Engineering for Scientists
# Description: Intro to using GitHub Classroom. Practice with creating and uploading files to github etc.
# This file imports funcitons div and add from math_lib.py
import sys
import math_lib as ml
## --- input parameters ---(from command line or shell script)
cmd = sys.argv[1] #command (can be 'add' or 'div')
a = float(sys.argv[2]) #numerical
b = float(sys.argv[3]) #numerical
## print statements:
print('input values are '+ str(a) + ' and ' + str(b))
## ---
if cmd == 'add':
foo = ml.add(a,b)
print('the sum is: '+ str(foo))
if cmd == 'div':
foo = ml.div(a,b)
print('first/second is: '+ str(foo))
|
[
"math_lib.add",
"math_lib.div"
] |
[((680, 692), 'math_lib.add', 'ml.add', (['a', 'b'], {}), '(a, b)\n', (686, 692), True, 'import math_lib as ml\n'), ((756, 768), 'math_lib.div', 'ml.div', (['a', 'b'], {}), '(a, b)\n', (762, 768), True, 'import math_lib as ml\n')]
|
import yaml
import os
from opnsense_cli.facades.commands.base import CommandFacade
from opnsense_cli.factories.cli_output_format import CliOutputFormatFactory
from opnsense_cli.formats.base import Format
"""
Click callback methods
See: https://click.palletsprojects.com/en/8.0.x/advanced/#parameter-modifications
"""
def defaults_from_configfile(ctx, param, filename):
def dict_from_yaml(path):
with open(path, 'r') as yaml_file:
data = yaml.load(yaml_file, Loader=yaml.SafeLoader)
return data
options = dict_from_yaml(os.path.expanduser(filename))
ctx.default_map = options
def expand_path(ctx, param, filename):
return os.path.expanduser(filename)
def available_formats():
return CliOutputFormatFactory._keymap.keys()
def formatter_from_formatter_name(ctx, param, format_name) -> Format:
factory = CliOutputFormatFactory(format_name)
return factory.get_class()
def bool_as_string(ctx, param, value):
if type(value) == bool:
return str(int(value))
return value
def tuple_to_csv(ctx, param, value):
if param.multiple and not value:
return None
if type(value) == tuple:
return ",".join(value)
return value
def comma_to_newline(ctx, param, value):
if type(value) == str and "," in value:
return value.replace(",", "\n")
return value
def int_as_string(ctx, param, value):
if type(value) == int:
return str(value)
return value
def resolve_linked_names_to_uuids(ctx, param, value):
option_name = param.opts[0].replace("--", "")
resolve_map = ctx.obj.uuid_resolver_map[option_name]
if value and isinstance(ctx.obj, CommandFacade):
return ctx.obj.resolve_linked_uuids(resolve_map, value)
return value
|
[
"yaml.load",
"opnsense_cli.factories.cli_output_format.CliOutputFormatFactory._keymap.keys",
"os.path.expanduser",
"opnsense_cli.factories.cli_output_format.CliOutputFormatFactory"
] |
[((671, 699), 'os.path.expanduser', 'os.path.expanduser', (['filename'], {}), '(filename)\n', (689, 699), False, 'import os\n'), ((738, 775), 'opnsense_cli.factories.cli_output_format.CliOutputFormatFactory._keymap.keys', 'CliOutputFormatFactory._keymap.keys', ([], {}), '()\n', (773, 775), False, 'from opnsense_cli.factories.cli_output_format import CliOutputFormatFactory\n'), ((862, 897), 'opnsense_cli.factories.cli_output_format.CliOutputFormatFactory', 'CliOutputFormatFactory', (['format_name'], {}), '(format_name)\n', (884, 897), False, 'from opnsense_cli.factories.cli_output_format import CliOutputFormatFactory\n'), ((559, 587), 'os.path.expanduser', 'os.path.expanduser', (['filename'], {}), '(filename)\n', (577, 587), False, 'import os\n'), ((465, 509), 'yaml.load', 'yaml.load', (['yaml_file'], {'Loader': 'yaml.SafeLoader'}), '(yaml_file, Loader=yaml.SafeLoader)\n', (474, 509), False, 'import yaml\n')]
|
import psycopg2
import os
class DBHandler:
_max_id_length = 255
_max_record_length = 255
def __init__(self, table_name="intake_records", error_table_name="scan_errors"):
"""
:param table_name: (str) Optional string name of the main db table.
:param error_table_name: (str) Optional string name for the errors db table.
"""
self.connection_info = os.environ.get("CEDA_INTAKE_DB_SETTINGS")
if not self.connection_info:
raise KeyError('Please create environment variable CEDA_INTAKE_DB_SETTINGS'
'in for format of "dbname=<db_name> user=<user_name>'
'host=<host_name> password=<password>"')
self._test_connection()
self.table_name = table_name
self.error_table_name = error_table_name
self._create_tables()
def _test_connection(self):
try:
conn = psycopg2.connect(self.connection_info)
except psycopg2.Error as err:
print(err)
raise ValueError('CEDA_INTAKE_DB_SETTINGS string is incorrect. Should be'
'in for format of "dbname=<db_name> user=<user_name>'
'host=<host_name> password=<password>"')
conn.close()
def _create_tables(self):
"""
Creates tables if they don't already exist.
"""
# Create main table
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(f"CREATE TABLE IF NOT EXISTS {self.table_name} ("
f" id varchar({self._max_id_length}) PRIMARY KEY, "
f" record varchar({self._max_record_length}) NOT NULL"
f");")
conn.commit()
cur.execute(f"CREATE TABLE IF NOT EXISTS {self.error_table_name} ("
f" id varchar({self._max_id_length}) PRIMARY KEY, "
f" record varchar({self._max_record_length}) NOT NULL"
f");")
conn.commit()
def _delete_tables(self):
"""
Drops the database tables
"""
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(f"DROP TABLE {self.table_name};")
conn.commit()
cur.execute(f"DROP TABLE {self.error_table_name};")
conn.commit()
def get_record(self, identifier):
"""
Selects the record of the job with the identifier parsed
and returns it
:param identifier: (str) Identifier of the job record
:return: (str) Record of job
"""
query = f"SELECT record FROM {self.table_name} " \
f"WHERE id='{identifier}';"
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(query)
if cur.rowcount > 0:
return cur.fetchone()[0]
return None
def get_all_records(self):
"""
:return: (dict) Dictionary of all job identifiers mapped to their respective records
"""
query = f"SELECT * FROM {self.table_name}"
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(query)
record_dict = {}
for (name, record) in cur:
record_dict[name] = record
return record_dict
def get_successful_runs(self):
"""
:return: (str list) Returns a list of the identifiers of all successful runs
"""
query = f"SELECT id FROM {self.table_name} " \
"WHERE record='success';"
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(query)
return [name[0] for name in cur]
def get_failed_runs(self):
"""
:return: (dict) Dictionary of error types mapped to lists of job identifiers which record in them
"""
query = f"SELECT id, record FROM {self.table_name} " \
"WHERE record<>'success';"
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(query)
failures = {}
for (name, record) in cur:
failures.setdefault(record, [])
failures[record].append(name)
return failures
def delete_record(self, identifier):
"""
Deletes entry specified by the given identifier
from the database
:param identifier: (str) Identifier of the job
"""
query = f"DELETE FROM {self.table_name} " \
f"WHERE id='{identifier}';"
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(query)
conn.commit()
def delete_all_records(self):
"""
Deletes all entries from the table
"""
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(f"DELETE FROM {self.table_name};")
conn.commit()
def ran_successfully(self, identifier):
"""
Returns true / false on whether the record with this
identifier is successful
:param identifier: (str) Identifier of the job record
:return: (bool) Boolean on if job ran successfully
"""
query = f"SELECT record FROM {self.table_name} " \
f"WHERE id='{identifier}';"
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(query)
record = cur.fetchone()
if record is not None:
return record[0] == 'success'
return False
def count_records(self):
"""
:return: (int) Number of records in the table
"""
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(f"SELECT COUNT(*) FROM {self.table_name};")
return cur.fetchone()[0]
def count_successes(self):
"""
:return: (int) Number of successfull records in the table
"""
query = f"SELECT COUNT(*) FROM {self.table_name} " \
"WHERE record='success';"
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(query)
return cur.fetchone()[0]
def count_failures(self):
"""
:return: (int) Number of failed records in the table
"""
query = f"SELECT COUNT(*) FROM {self.table_name} " \
"WHERE record<>'success';"
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(query)
return cur.fetchone()[0]
def batch_insert(self, records):
"""Batch insert records.
>>> execute_values(cur,
... "INSERT INTO test (id, v1, v2) VALUES %s",
... [(1, 2, 3), (4, 5, 6), (7, 8, 9)])
"""
raise NotImplementedError
def insert_success(self, identifier):
"""
Inserts an entry into the table with a given identifier
and the record 'success'
:param identifier: (str) Identifier of the job
"""
if self.get_record(identifier):
self.delete_record(identifier)
query = f"INSERT INTO {self.table_name} " \
f"VALUES ('{identifier}', 'success');"
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(query)
conn.commit()
def insert_failure(self, identifier, error_type='failure'):
"""
Inserts an entry into the table with a given identifier
and erroneous record
:param identifier: (str) Identifier of the job
:param error_type: (str) Record of the job
"""
if self.get_record(identifier):
self.delete_record(identifier)
error_type = error_type[:self._max_record_length]
query = f"INSERT INTO {self.table_name} " \
f"VALUES ('{identifier}', '{error_type}');"
with psycopg2.connect(self.connection_info) as conn:
with conn.cursor() as cur:
cur.execute(query)
conn.commit()
|
[
"os.environ.get",
"psycopg2.connect"
] |
[((407, 448), 'os.environ.get', 'os.environ.get', (['"""CEDA_INTAKE_DB_SETTINGS"""'], {}), "('CEDA_INTAKE_DB_SETTINGS')\n", (421, 448), False, 'import os\n'), ((946, 984), 'psycopg2.connect', 'psycopg2.connect', (['self.connection_info'], {}), '(self.connection_info)\n', (962, 984), False, 'import psycopg2\n'), ((1457, 1495), 'psycopg2.connect', 'psycopg2.connect', (['self.connection_info'], {}), '(self.connection_info)\n', (1473, 1495), False, 'import psycopg2\n'), ((2280, 2318), 'psycopg2.connect', 'psycopg2.connect', (['self.connection_info'], {}), '(self.connection_info)\n', (2296, 2318), False, 'import psycopg2\n'), ((2936, 2974), 'psycopg2.connect', 'psycopg2.connect', (['self.connection_info'], {}), '(self.connection_info)\n', (2952, 2974), False, 'import psycopg2\n'), ((3377, 3415), 'psycopg2.connect', 'psycopg2.connect', (['self.connection_info'], {}), '(self.connection_info)\n', (3393, 3415), False, 'import psycopg2\n'), ((3916, 3954), 'psycopg2.connect', 'psycopg2.connect', (['self.connection_info'], {}), '(self.connection_info)\n', (3932, 3954), False, 'import psycopg2\n'), ((4372, 4410), 'psycopg2.connect', 'psycopg2.connect', (['self.connection_info'], {}), '(self.connection_info)\n', (4388, 4410), False, 'import psycopg2\n'), ((5011, 5049), 'psycopg2.connect', 'psycopg2.connect', (['self.connection_info'], {}), '(self.connection_info)\n', (5027, 5049), False, 'import psycopg2\n'), ((5280, 5318), 'psycopg2.connect', 'psycopg2.connect', (['self.connection_info'], {}), '(self.connection_info)\n', (5296, 5318), False, 'import psycopg2\n'), ((5864, 5902), 'psycopg2.connect', 'psycopg2.connect', (['self.connection_info'], {}), '(self.connection_info)\n', (5880, 5902), False, 'import psycopg2\n'), ((6260, 6298), 'psycopg2.connect', 'psycopg2.connect', (['self.connection_info'], {}), '(self.connection_info)\n', (6276, 6298), False, 'import psycopg2\n'), ((6702, 6740), 'psycopg2.connect', 'psycopg2.connect', (['self.connection_info'], {}), '(self.connection_info)\n', (6718, 6740), False, 'import psycopg2\n'), ((7102, 7140), 'psycopg2.connect', 'psycopg2.connect', (['self.connection_info'], {}), '(self.connection_info)\n', (7118, 7140), False, 'import psycopg2\n'), ((7922, 7960), 'psycopg2.connect', 'psycopg2.connect', (['self.connection_info'], {}), '(self.connection_info)\n', (7938, 7960), False, 'import psycopg2\n'), ((8642, 8680), 'psycopg2.connect', 'psycopg2.connect', (['self.connection_info'], {}), '(self.connection_info)\n', (8658, 8680), False, 'import psycopg2\n')]
|
from contextlib import ExitStack as DoesNotRaise
from typing import Tuple, Optional
import numpy as np
import pytest
from onemetric.cv.utils.iou import box_iou, mask_iou, box_iou_batch
@pytest.mark.parametrize(
"box_true, box_detection, expected_result, exception",
[
(None, None, None, pytest.raises(ValueError)),
((0., 0., 1.), (0., 0., 1., 1.), None, pytest.raises(ValueError)),
((0., 0., 1., 1.), (0., 0., 1.), None, pytest.raises(ValueError)),
([0., 0., 1., 1.], [0., 0., 1., 1.], None, pytest.raises(ValueError)),
((0., 0., 1., 1.), (0., 1., 1., 2.), 0., DoesNotRaise()),
((0, 1., 1., 2.), (0., 0., 1., 1.), 0., DoesNotRaise()),
((0., 0., 1., 1.), (1., 0., 2., 1.), 0., DoesNotRaise()),
((1., 0., 2., 1.), (0., 0., 1., 1.), 0., DoesNotRaise()),
((0., 0., 1., 1.), (0.25, 0., 1.25, 1.), 0.6, DoesNotRaise()),
((0.25, 0., 1.25, 1.), (0., 0., 1., 1.), 0.6, DoesNotRaise()),
((0., 0., 1., 1.), (0., 0.25, 1., 1.25), 0.6, DoesNotRaise()),
((0., 0.25, 1., 1.25), (0., 0., 1., 1.), 0.6, DoesNotRaise()),
((0., 0., 1., 1.), (0., 0., 1., 1.), 1., DoesNotRaise()),
((0., 0., 3., 3.), (1., 1., 2., 2.), 1/9, DoesNotRaise()),
((1., 1., 2., 2.), (0., 0., 3., 3.), 1/9, DoesNotRaise())
]
)
def test_box_iou(
box_true: Tuple[float, float, float, float],
box_detection: Tuple[float, float, float, float],
expected_result: Optional[float],
exception: Exception
) -> None:
with exception:
result = box_iou(box_true=box_true, box_detection=box_detection)
assert result == expected_result
@pytest.mark.parametrize(
"boxes_true, boxes_detection, expected_result, exception",
[
(
None,
np.array([
[0., 0.25, 1., 1.25]
]),
None,
pytest.raises(ValueError)
),
(
np.array([
[0., 0.25, 1., 1.25]
]),
None,
None,
pytest.raises(ValueError)
),
(
np.array([
[0., 0., 1., 1.],
[2., 2., 2.5, 2.5]
]),
np.array([
[0., 0., 1., 1.],
[2., 2., 2.5, 2.5]
]),
np.array([
[1., 0.],
[0., 1.]
]),
DoesNotRaise()
),
(
np.array([
[0., 0., 1., 1.],
[0., 0.75, 1., 1.75]
]),
np.array([
[0., 0.25, 1., 1.25]
]),
np.array([
[0.6],
[1/3]
]),
DoesNotRaise()
),
(
np.array([
[0., 0., 1., 1.],
[0., 0.75, 1., 1.75]
]),
np.array([
[0., 0.25, 1., 1.25],
[0., 0.75, 1., 1.75],
[1., 1., 2., 2.]
]),
np.array([
[0.6, 1/7, 0],
[1/3, 1., 0]
]),
DoesNotRaise()
)
]
)
def test_box_iou_batch(
boxes_true: np.ndarray,
boxes_detection: np.ndarray,
expected_result: Optional[float],
exception: Exception
) -> None:
with exception:
result = box_iou_batch(boxes_true=boxes_true, boxes_detection=boxes_detection)
np.testing.assert_array_equal(result, expected_result)
QUARTER_MASK = np.zeros((10, 10)).astype('uint8')
QUARTER_MASK[0:5, 0:5] = 1
@pytest.mark.parametrize(
"mask_true, mask_detection, expected_result, exception",
[
(None, None, None, pytest.raises(ValueError)),
(np.zeros((10, 10)).astype('uint8'), np.zeros((20, 20)).astype('uint8'), None, pytest.raises(ValueError)),
(np.zeros((20, 20)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, pytest.raises(ValueError)),
(np.ones((10, 10)).astype('int16'), np.zeros((10, 10)).astype('int16'), None, pytest.raises(ValueError)),
(np.ones((10, 10)).astype('uint8') * 2, np.zeros((10, 10)).astype('uint8'), 0., pytest.raises(ValueError)),
(np.ones((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), 0., DoesNotRaise()),
(np.zeros((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 0., DoesNotRaise()),
(np.zeros((10, 10)).astype('uint8'), np.zeros((10, 10)).astype('uint8'), None, DoesNotRaise()),
(np.ones((10, 10)).astype('uint8'), np.ones((10, 10)).astype('uint8'), 1., DoesNotRaise()),
(np.ones((10, 10)).astype('uint8'), QUARTER_MASK, 0.25, DoesNotRaise())
]
)
def test_mask_iou(mask_true: np.array, mask_detection: np.array, expected_result: float, exception: Exception) -> None:
with exception:
result = mask_iou(mask_true=mask_true, mask_detection=mask_detection)
assert result == expected_result
|
[
"numpy.testing.assert_array_equal",
"numpy.zeros",
"numpy.ones",
"contextlib.ExitStack",
"pytest.raises",
"numpy.array",
"onemetric.cv.utils.iou.mask_iou",
"onemetric.cv.utils.iou.box_iou_batch",
"onemetric.cv.utils.iou.box_iou"
] |
[((1550, 1605), 'onemetric.cv.utils.iou.box_iou', 'box_iou', ([], {'box_true': 'box_true', 'box_detection': 'box_detection'}), '(box_true=box_true, box_detection=box_detection)\n', (1557, 1605), False, 'from onemetric.cv.utils.iou import box_iou, mask_iou, box_iou_batch\n'), ((3366, 3435), 'onemetric.cv.utils.iou.box_iou_batch', 'box_iou_batch', ([], {'boxes_true': 'boxes_true', 'boxes_detection': 'boxes_detection'}), '(boxes_true=boxes_true, boxes_detection=boxes_detection)\n', (3379, 3435), False, 'from onemetric.cv.utils.iou import box_iou, mask_iou, box_iou_batch\n'), ((3444, 3498), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'expected_result'], {}), '(result, expected_result)\n', (3473, 3498), True, 'import numpy as np\n'), ((3516, 3534), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (3524, 3534), True, 'import numpy as np\n'), ((4839, 4899), 'onemetric.cv.utils.iou.mask_iou', 'mask_iou', ([], {'mask_true': 'mask_true', 'mask_detection': 'mask_detection'}), '(mask_true=mask_true, mask_detection=mask_detection)\n', (4847, 4899), False, 'from onemetric.cv.utils.iou import box_iou, mask_iou, box_iou_batch\n'), ((307, 332), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (320, 332), False, 'import pytest\n'), ((382, 407), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (395, 407), False, 'import pytest\n'), ((457, 482), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (470, 482), False, 'import pytest\n'), ((536, 561), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (549, 561), False, 'import pytest\n'), ((613, 627), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (625, 627), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((678, 692), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (690, 692), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((744, 758), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (756, 758), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((810, 824), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (822, 824), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((881, 895), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (893, 895), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((952, 966), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (964, 966), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((1023, 1037), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (1035, 1037), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((1094, 1108), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (1106, 1108), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((1160, 1174), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (1172, 1174), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((1227, 1241), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (1239, 1241), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((1294, 1308), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (1306, 1308), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((1784, 1818), 'numpy.array', 'np.array', (['[[0.0, 0.25, 1.0, 1.25]]'], {}), '([[0.0, 0.25, 1.0, 1.25]])\n', (1792, 1818), True, 'import numpy as np\n'), ((1878, 1903), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1891, 1903), False, 'import pytest\n'), ((1937, 1971), 'numpy.array', 'np.array', (['[[0.0, 0.25, 1.0, 1.25]]'], {}), '([[0.0, 0.25, 1.0, 1.25]])\n', (1945, 1971), True, 'import numpy as np\n'), ((2049, 2074), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2062, 2074), False, 'import pytest\n'), ((2108, 2162), 'numpy.array', 'np.array', (['[[0.0, 0.0, 1.0, 1.0], [2.0, 2.0, 2.5, 2.5]]'], {}), '([[0.0, 0.0, 1.0, 1.0], [2.0, 2.0, 2.5, 2.5]])\n', (2116, 2162), True, 'import numpy as np\n'), ((2216, 2270), 'numpy.array', 'np.array', (['[[0.0, 0.0, 1.0, 1.0], [2.0, 2.0, 2.5, 2.5]]'], {}), '([[0.0, 0.0, 1.0, 1.0], [2.0, 2.0, 2.5, 2.5]])\n', (2224, 2270), True, 'import numpy as np\n'), ((2324, 2358), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (2332, 2358), True, 'import numpy as np\n'), ((2414, 2428), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (2426, 2428), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((2462, 2518), 'numpy.array', 'np.array', (['[[0.0, 0.0, 1.0, 1.0], [0.0, 0.75, 1.0, 1.75]]'], {}), '([[0.0, 0.0, 1.0, 1.0], [0.0, 0.75, 1.0, 1.75]])\n', (2470, 2518), True, 'import numpy as np\n'), ((2572, 2606), 'numpy.array', 'np.array', (['[[0.0, 0.25, 1.0, 1.25]]'], {}), '([[0.0, 0.25, 1.0, 1.25]])\n', (2580, 2606), True, 'import numpy as np\n'), ((2648, 2674), 'numpy.array', 'np.array', (['[[0.6], [1 / 3]]'], {}), '([[0.6], [1 / 3]])\n', (2656, 2674), True, 'import numpy as np\n'), ((2732, 2746), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (2744, 2746), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((2780, 2836), 'numpy.array', 'np.array', (['[[0.0, 0.0, 1.0, 1.0], [0.0, 0.75, 1.0, 1.75]]'], {}), '([[0.0, 0.0, 1.0, 1.0], [0.0, 0.75, 1.0, 1.75]])\n', (2788, 2836), True, 'import numpy as np\n'), ((2890, 2975), 'numpy.array', 'np.array', (['[[0.0, 0.25, 1.0, 1.25], [0.0, 0.75, 1.0, 1.75], [1.0, 1.0, 2.0, 2.0]]'], {}), '([[0.0, 0.25, 1.0, 1.25], [0.0, 0.75, 1.0, 1.75], [1.0, 1.0, 2.0, 2.0]]\n )\n', (2898, 2975), True, 'import numpy as np\n'), ((3038, 3082), 'numpy.array', 'np.array', (['[[0.6, 1 / 7, 0], [1 / 3, 1.0, 0]]'], {}), '([[0.6, 1 / 7, 0], [1 / 3, 1.0, 0]])\n', (3046, 3082), True, 'import numpy as np\n'), ((3137, 3151), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (3149, 3151), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((3700, 3725), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3713, 3725), False, 'import pytest\n'), ((3815, 3840), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3828, 3840), False, 'import pytest\n'), ((3930, 3955), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3943, 3955), False, 'import pytest\n'), ((4044, 4069), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4057, 4069), False, 'import pytest\n'), ((4160, 4185), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4173, 4185), False, 'import pytest\n'), ((4272, 4286), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (4284, 4286), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((4373, 4387), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (4385, 4387), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((4477, 4491), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (4489, 4491), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((4577, 4591), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (4589, 4591), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((4658, 4672), 'contextlib.ExitStack', 'DoesNotRaise', ([], {}), '()\n', (4670, 4672), True, 'from contextlib import ExitStack as DoesNotRaise\n'), ((3737, 3755), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (3745, 3755), True, 'import numpy as np\n'), ((3773, 3791), 'numpy.zeros', 'np.zeros', (['(20, 20)'], {}), '((20, 20))\n', (3781, 3791), True, 'import numpy as np\n'), ((3852, 3870), 'numpy.zeros', 'np.zeros', (['(20, 20)'], {}), '((20, 20))\n', (3860, 3870), True, 'import numpy as np\n'), ((3888, 3906), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (3896, 3906), True, 'import numpy as np\n'), ((3967, 3984), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (3974, 3984), True, 'import numpy as np\n'), ((4002, 4020), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (4010, 4020), True, 'import numpy as np\n'), ((4120, 4138), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (4128, 4138), True, 'import numpy as np\n'), ((4197, 4214), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (4204, 4214), True, 'import numpy as np\n'), ((4232, 4250), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (4240, 4250), True, 'import numpy as np\n'), ((4298, 4316), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (4306, 4316), True, 'import numpy as np\n'), ((4334, 4351), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (4341, 4351), True, 'import numpy as np\n'), ((4399, 4417), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (4407, 4417), True, 'import numpy as np\n'), ((4435, 4453), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (4443, 4453), True, 'import numpy as np\n'), ((4503, 4520), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (4510, 4520), True, 'import numpy as np\n'), ((4538, 4555), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (4545, 4555), True, 'import numpy as np\n'), ((4603, 4620), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (4610, 4620), True, 'import numpy as np\n'), ((4081, 4098), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (4088, 4098), True, 'import numpy as np\n')]
|
"""
@author: <NAME>,<NAME>
"""
import numpy as np
import streamlit as st
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
st.title("Synapse Unsupervised Models")
uploaded_file = st.file_uploader("Choose a csv file", type="csv")
if uploaded_file is not None:
data = pd.read_csv(uploaded_file)
st.write(data)
if uploaded_file is not None:
drop_column = st.sidebar.multiselect('X : Features (Selected will be dropped)', data.columns.to_list())
X = data.drop(drop_column,axis = 1)
st.header('X : Features')
st.write(X)
if uploaded_file is not None:
if st.sidebar.checkbox("Feature Normalization"):
X = (X - np.mean(X))/np.std(X)
st.header("X : Features (Normalized)")
st.write(X)
class Kmeans:
def initialize_var(self,X,K=3):
X = np.array(X)
m,n = X.shape
c = np.random.randn(K,n)
return X,c,K
def assignment_move(self,X,c,K):
m = X.shape[0]
idx = np.zeros(m)
for o in range(10):
for i in range(m):
temp = np.zeros(K)
for j in range(K):
temp[j] = np.sum((X[i,:] - c[j,:]) ** 2)
idx[i] = np.argmin(temp)
for p in range(K):
points = [X[j] for j in range(len(X)) if idx[j] == p]
c[p] = np.mean(points, axis=0)
return idx,c
def test(self,X,K=3):
self.X,c,self.K = self.initialize_var(X,K)
self.idx,self.c = self.assignment_move(self.X,c,self.K)
X_ = pd.DataFrame(self.X)
idx_ = pd.DataFrame(self.idx)
data = pd.concat([X_,idx_],axis =1)
return self.c,data
def plot_clusters(self,d):
a={}
if self.X.shape[1]==2:
for i in range(2):
a['a'+str(i+1)] = self.X[:,i:i+1]
a['a1'] = np.reshape(a['a1'],(a['a1']).shape[0],)
a['a2'] = np.reshape(a['a2'],(a['a2']).shape[0],)
fig = go.Figure(data=go.Scatter(x=a['a1'],
y=a['a2'],
mode='markers',
marker=dict(color=self.idx)
))
st.plotly_chart(fig)
elif self.X.shape[1]==3:
d.columns = ['x','y','z','l']
fig = px.scatter_3d(d, x='x', y='y', z='z',color = 'l')
st.plotly_chart(fig)
elif self.X.shape[1]==3:
print("Incomplete")
else:
st.error("Your data is in Higher Dimension state")
class PCA:
def initialization(self,X):
X = np.array(X)
return X
def train(self,X):
self.X = self.initialization(X)
self.covariance_matrix = np.cov(X.T)
self.u,s,v = np.linalg.svd(self.covariance_matrix)
sum_s = np.sum(s)
self.variance_exp= []
k = 0
for i in s:
k = i+k
variance = k/sum_s
self.variance_exp.append(variance)
def K_components(self,n=2):
self.X= np.dot(self.X,self.u[:,:n])
return self.X
def variance_explained(self):
return self.variance_exp
if uploaded_file is not None:
Algorithms = st.sidebar.selectbox(
'Algorithm',
('None','K-means Clustering','Principal Component Analysis')
)
if uploaded_file is not None:
if Algorithms == 'K-means Clustering':
k_value = st.sidebar.number_input('Enter K value',value = 3)
train_button = st.sidebar.checkbox("Click Here for training")
if train_button:
d = Kmeans()
c,data = d.test(X,k_value)
st.subheader("Centroids")
st.write(c)
st.subheader("Clustering Data with labels")
st.write(data)
d.plot_clusters(data)
#except : raise ValueError('graph not computed with NaN values or no. of K value exceeds try again')
if Algorithms == 'Principal Component Analysis':
k_value = st.sidebar.number_input('Enter K components value',value = 3)
train_button = st.sidebar.checkbox("Click Here for training")
if train_button:
d = PCA()
d.train(X)
st.header('Variance Explained')
st.markdown(d.variance_explained())
st.info('Always Use Feature Normalization when applying PCA')
X_pca = d.K_components(k_value)
st.header('X : Feature (PCA)')
st.write(X_pca)
|
[
"numpy.sum",
"pandas.read_csv",
"streamlit.title",
"numpy.argmin",
"streamlit.sidebar.selectbox",
"numpy.linalg.svd",
"numpy.mean",
"pandas.DataFrame",
"streamlit.subheader",
"numpy.random.randn",
"streamlit.sidebar.checkbox",
"numpy.std",
"streamlit.info",
"numpy.reshape",
"numpy.cov",
"pandas.concat",
"streamlit.error",
"streamlit.plotly_chart",
"streamlit.header",
"plotly.express.scatter_3d",
"streamlit.file_uploader",
"numpy.dot",
"streamlit.sidebar.number_input",
"numpy.zeros",
"streamlit.write",
"numpy.array"
] |
[((159, 198), 'streamlit.title', 'st.title', (['"""Synapse Unsupervised Models"""'], {}), "('Synapse Unsupervised Models')\n", (167, 198), True, 'import streamlit as st\n'), ((216, 265), 'streamlit.file_uploader', 'st.file_uploader', (['"""Choose a csv file"""'], {'type': '"""csv"""'}), "('Choose a csv file', type='csv')\n", (232, 265), True, 'import streamlit as st\n'), ((308, 334), 'pandas.read_csv', 'pd.read_csv', (['uploaded_file'], {}), '(uploaded_file)\n', (319, 334), True, 'import pandas as pd\n'), ((339, 353), 'streamlit.write', 'st.write', (['data'], {}), '(data)\n', (347, 353), True, 'import streamlit as st\n'), ((546, 571), 'streamlit.header', 'st.header', (['"""X : Features"""'], {}), "('X : Features')\n", (555, 571), True, 'import streamlit as st\n'), ((576, 587), 'streamlit.write', 'st.write', (['X'], {}), '(X)\n', (584, 587), True, 'import streamlit as st\n'), ((629, 673), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', (['"""Feature Normalization"""'], {}), "('Feature Normalization')\n", (648, 673), True, 'import streamlit as st\n'), ((3334, 3435), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Algorithm"""', "('None', 'K-means Clustering', 'Principal Component Analysis')"], {}), "('Algorithm', ('None', 'K-means Clustering',\n 'Principal Component Analysis'))\n", (3354, 3435), True, 'import streamlit as st\n'), ((722, 760), 'streamlit.header', 'st.header', (['"""X : Features (Normalized)"""'], {}), "('X : Features (Normalized)')\n", (731, 760), True, 'import streamlit as st\n'), ((769, 780), 'streamlit.write', 'st.write', (['X'], {}), '(X)\n', (777, 780), True, 'import streamlit as st\n'), ((849, 860), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (857, 860), True, 'import numpy as np\n'), ((895, 916), 'numpy.random.randn', 'np.random.randn', (['K', 'n'], {}), '(K, n)\n', (910, 916), True, 'import numpy as np\n'), ((1012, 1023), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (1020, 1023), True, 'import numpy as np\n'), ((1588, 1608), 'pandas.DataFrame', 'pd.DataFrame', (['self.X'], {}), '(self.X)\n', (1600, 1608), True, 'import pandas as pd\n'), ((1624, 1646), 'pandas.DataFrame', 'pd.DataFrame', (['self.idx'], {}), '(self.idx)\n', (1636, 1646), True, 'import pandas as pd\n'), ((1662, 1691), 'pandas.concat', 'pd.concat', (['[X_, idx_]'], {'axis': '(1)'}), '([X_, idx_], axis=1)\n', (1671, 1691), True, 'import pandas as pd\n'), ((2677, 2688), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (2685, 2688), True, 'import numpy as np\n'), ((2817, 2828), 'numpy.cov', 'np.cov', (['X.T'], {}), '(X.T)\n', (2823, 2828), True, 'import numpy as np\n'), ((2850, 2887), 'numpy.linalg.svd', 'np.linalg.svd', (['self.covariance_matrix'], {}), '(self.covariance_matrix)\n', (2863, 2887), True, 'import numpy as np\n'), ((2904, 2913), 'numpy.sum', 'np.sum', (['s'], {}), '(s)\n', (2910, 2913), True, 'import numpy as np\n'), ((3141, 3170), 'numpy.dot', 'np.dot', (['self.X', 'self.u[:, :n]'], {}), '(self.X, self.u[:, :n])\n', (3147, 3170), True, 'import numpy as np\n'), ((3569, 3618), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', (['"""Enter K value"""'], {'value': '(3)'}), "('Enter K value', value=3)\n", (3592, 3618), True, 'import streamlit as st\n'), ((3652, 3698), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', (['"""Click Here for training"""'], {}), "('Click Here for training')\n", (3671, 3698), True, 'import streamlit as st\n'), ((4161, 4221), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', (['"""Enter K components value"""'], {'value': '(3)'}), "('Enter K components value', value=3)\n", (4184, 4221), True, 'import streamlit as st\n'), ((4246, 4292), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', (['"""Click Here for training"""'], {}), "('Click Here for training')\n", (4265, 4292), True, 'import streamlit as st\n'), ((704, 713), 'numpy.std', 'np.std', (['X'], {}), '(X)\n', (710, 713), True, 'import numpy as np\n'), ((1901, 1938), 'numpy.reshape', 'np.reshape', (["a['a1']", "a['a1'].shape[0]"], {}), "(a['a1'], a['a1'].shape[0])\n", (1911, 1938), True, 'import numpy as np\n'), ((1963, 2000), 'numpy.reshape', 'np.reshape', (["a['a2']", "a['a2'].shape[0]"], {}), "(a['a2'], a['a2'].shape[0])\n", (1973, 2000), True, 'import numpy as np\n'), ((2255, 2275), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig'], {}), '(fig)\n', (2270, 2275), True, 'import streamlit as st\n'), ((3801, 3826), 'streamlit.subheader', 'st.subheader', (['"""Centroids"""'], {}), "('Centroids')\n", (3813, 3826), True, 'import streamlit as st\n'), ((3839, 3850), 'streamlit.write', 'st.write', (['c'], {}), '(c)\n', (3847, 3850), True, 'import streamlit as st\n'), ((3863, 3906), 'streamlit.subheader', 'st.subheader', (['"""Clustering Data with labels"""'], {}), "('Clustering Data with labels')\n", (3875, 3906), True, 'import streamlit as st\n'), ((3919, 3933), 'streamlit.write', 'st.write', (['data'], {}), '(data)\n', (3927, 3933), True, 'import streamlit as st\n'), ((4384, 4415), 'streamlit.header', 'st.header', (['"""Variance Explained"""'], {}), "('Variance Explained')\n", (4393, 4415), True, 'import streamlit as st\n'), ((4476, 4537), 'streamlit.info', 'st.info', (['"""Always Use Feature Normalization when applying PCA"""'], {}), "('Always Use Feature Normalization when applying PCA')\n", (4483, 4537), True, 'import streamlit as st\n'), ((4594, 4624), 'streamlit.header', 'st.header', (['"""X : Feature (PCA)"""'], {}), "('X : Feature (PCA)')\n", (4603, 4624), True, 'import streamlit as st\n'), ((4637, 4652), 'streamlit.write', 'st.write', (['X_pca'], {}), '(X_pca)\n', (4645, 4652), True, 'import streamlit as st\n'), ((692, 702), 'numpy.mean', 'np.mean', (['X'], {}), '(X)\n', (699, 702), True, 'import numpy as np\n'), ((1106, 1117), 'numpy.zeros', 'np.zeros', (['K'], {}), '(K)\n', (1114, 1117), True, 'import numpy as np\n'), ((1384, 1407), 'numpy.mean', 'np.mean', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (1391, 1407), True, 'import numpy as np\n'), ((2378, 2426), 'plotly.express.scatter_3d', 'px.scatter_3d', (['d'], {'x': '"""x"""', 'y': '"""y"""', 'z': '"""z"""', 'color': '"""l"""'}), "(d, x='x', y='y', z='z', color='l')\n", (2391, 2426), True, 'import plotly.express as px\n'), ((2440, 2460), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig'], {}), '(fig)\n', (2455, 2460), True, 'import streamlit as st\n'), ((1183, 1215), 'numpy.sum', 'np.sum', (['((X[i, :] - c[j, :]) ** 2)'], {}), '((X[i, :] - c[j, :]) ** 2)\n', (1189, 1215), True, 'import numpy as np\n'), ((1244, 1259), 'numpy.argmin', 'np.argmin', (['temp'], {}), '(temp)\n', (1253, 1259), True, 'import numpy as np\n'), ((2565, 2615), 'streamlit.error', 'st.error', (['"""Your data is in Higher Dimension state"""'], {}), "('Your data is in Higher Dimension state')\n", (2573, 2615), True, 'import streamlit as st\n')]
|
import spam
def TestSystem():
r = spam.system("ls -l")
assert r == 0
def TestNothingDone():
r = spam.nothing_done()
assert r is None
|
[
"spam.system",
"spam.nothing_done"
] |
[((39, 59), 'spam.system', 'spam.system', (['"""ls -l"""'], {}), "('ls -l')\n", (50, 59), False, 'import spam\n'), ((110, 129), 'spam.nothing_done', 'spam.nothing_done', ([], {}), '()\n', (127, 129), False, 'import spam\n')]
|
"""
Checks to see if Makefile follows standards
"""
import re
import os
import pytest
from pytest_repo_health import add_key_to_metadata
from repo_health import get_file_content
module_dict_key = "makefile"
@pytest.fixture(name='makefile')
def fixture_makefile(repo_path):
"""Fixture containing the text content of Makefile"""
full_path = os.path.join(repo_path, "Makefile")
return get_file_content(full_path)
@add_key_to_metadata((module_dict_key, "upgrade"))
def check_has_upgrade(makefile, all_results):
"""
upgrade: makefile target that upgrades our dependencies to newer released versions
"""
regex_pattern = "upgrade:"
match = re.search(regex_pattern, makefile)
all_results[module_dict_key]["upgrade"] = False
if match is not None:
all_results[module_dict_key]["upgrade"] = True
|
[
"pytest.fixture",
"repo_health.get_file_content",
"re.search",
"os.path.join",
"pytest_repo_health.add_key_to_metadata"
] |
[((213, 244), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""makefile"""'}), "(name='makefile')\n", (227, 244), False, 'import pytest\n'), ((430, 479), 'pytest_repo_health.add_key_to_metadata', 'add_key_to_metadata', (["(module_dict_key, 'upgrade')"], {}), "((module_dict_key, 'upgrade'))\n", (449, 479), False, 'from pytest_repo_health import add_key_to_metadata\n'), ((352, 387), 'os.path.join', 'os.path.join', (['repo_path', '"""Makefile"""'], {}), "(repo_path, 'Makefile')\n", (364, 387), False, 'import os\n'), ((399, 426), 'repo_health.get_file_content', 'get_file_content', (['full_path'], {}), '(full_path)\n', (415, 426), False, 'from repo_health import get_file_content\n'), ((672, 706), 're.search', 're.search', (['regex_pattern', 'makefile'], {}), '(regex_pattern, makefile)\n', (681, 706), False, 'import re\n')]
|
# --------------------------------------------------------------------------------------------------------------------------------
# Imports and Executables
# --------------------------------------------------------------------------------------------------------------------------------
from splinter import Browser
from bs4 import BeautifulSoup as soupy
import pandas as pd
import datetime as dt
# --------------------------------------------------------------------------------------------------------------------------------
# Gathered Data
# --------------------------------------------------------------------------------------------------------------------------------
def scrape_all():
# Set the executable path and initialize the chrome browser in splinter
browser = Browser('chrome', **{'executable_path':'chromedriver'}, headless=True)
# headless = True, doesnt show automated script in action
# pylint: disable=unbalanced-tuple-unpacking
# news_title, news_teaser_sum, news_date = mars_news(browser)
news_title, news_teaser_sum = mars_news(browser)
# Runs all separate scraping functions and stores results in a dictionary
mars_total_data = {
"news_title" : news_title,
"news_paragraph_summary" : news_teaser_sum,
# "news_latest_date" : news_date,
# "news_latest_link" : latest_art_link,
"featured_image" : featured_image(browser),
"facts" : mars_facts(),
"img_and_url": get_url(browser),
"last_modified" : dt.datetime.now()}
browser.quit()
return mars_total_data
# --------------------------------------------------------------------------------------------------------------------------------
# News Title and Paragraph
# --------------------------------------------------------------------------------------------------------------------------------
def mars_news(browser):
# defined outside of the function, basically a catalyst to get the function started, like a grandfather variable
# browser function already defined outside
# Visit the mars nasa news site
nasa_url = 'https://mars.nasa.gov/news/'
browser.visit(nasa_url)
# optional delay for loading page
browser.is_element_present_by_css("ul.item_list li.slide", wait_time=1)
# Convert the browser html to a soup object and then quit the browser
parse_html = browser.html
news_soup = soupy(parse_html, 'html.parser')
try:
# add error handling, espescially for AttributeErros with try/except
# if error, code will keep running, except it will stop when its AttributeError with none returned
slide_elem = news_soup.select_one('ul.item_list li.slide') # parent element, holds other elements to furthur filter
# Use the parent element to find the first a tag and save it as `news_title`
news_title = slide_elem.find('div',class_='content_title').get_text()
# news_date = slide_elem.find('div',class_='list_date').get_text()
# latest_art_link = f"https://mars.nasa.gov{slide_elem.select_one('ul li a').get('href')}"
# Use the parent element to find the paragraph text
news_teaser_sum = slide_elem.find('div',class_='article_teaser_body').get_text()
except AttributeError:
return None, None
# return news_title, news_teaser_sum, news_date, latest_art_link
return news_title, news_teaser_sum
# --------------------------------------------------------------------------------------------------------------------------------
# JPL Featured Space Image
# --------------------------------------------------------------------------------------------------------------------------------
# Visit URL
def featured_image(browser):
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url)
# Find and click the full image button
full_image_elem = browser.find_by_id('full_image')
full_image_elem.click()
# Find the more info button and click that
browser.is_element_present_by_text('more info', wait_time=1)
more_info_elem = browser.links.find_by_partial_text('more info')
more_info_elem.click()
# Parse the resulting html with soup
parse_html = browser.html
full_img_soup = soupy(parse_html, 'html.parser' )
try:
# find the relative image url
latest_image_full = full_img_soup.select_one('figure.lede a img').get("src")
except AttributeError:
return None
# Use the base url to create an absolute url
latest_imgurl = f"https://www.jpl.nasa.gov{latest_image_full}"
return latest_imgurl
# --------------------------------------------------------------------------------------------------------------------------------
# Mars Fact Table
# --------------------------------------------------------------------------------------------------------------------------------
def mars_facts():
try:
mars_df = pd.read_html('https://space-facts.com/mars/')[0]
except BaseException:
# covers all exception errors
return None
# Assign columns and set index of dataframe
mars_df.columns = ['Description', 'Mars'] # adds column names
mars_df.set_index('Description', inplace=True) # set column index
# Convert dataframe into HTML format, add bootstrap
return mars_df.to_html(classes= "table")
# --------------------------------------------------------------------------------------------------------------------------------
# Mars Hemispheres
# --------------------------------------------------------------------------------------------------------------------------------
def get_url(browser):
hemis_search_list = ['Cerberus Hemisphere Enhanced',
'Schiaparelli Hemisphere Enhanced',
'Syrtis Major Hemisphere Enhanced',
'Valles Marineris Hemisphere Enhanced']
names_n_url = []
Hemisphere = "Hemisphere"
Urlid = "URL"
for x in range(len(hemis_search_list)):
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
try:
browser.is_element_present_by_text((f'{hemis_search_list[x]}'), wait_time=2)
hemi_click = browser.links.find_by_partial_text(f'{hemis_search_list[x]}')
hemi_click.click()
parse_html = browser.html
hemi_parse_html = soupy(parse_html, 'html.parser' )
hemi_img_url = hemi_parse_html.select_one('ul li a').get("href")
names_n_url.append({Hemisphere:hemis_search_list[x],Urlid:hemi_img_url})
except IndexError:
return f"Search result not found"
except AttributeError:
return None
# df_hemi_urls = pd.DataFrame.from_dict(names_n_url, orient='columns')
# df_hemi_urls.set_index('Hemisphere', inplace=True)
# df_hemi_urls['URL']=str(df_hemi_urls['URL'])
# pd.set_option('display.max_colwidth', -1)
return names_n_url
if __name__ == "__main__":
# if running as script, print scraped data
print(scrape_all())
|
[
"bs4.BeautifulSoup",
"pandas.read_html",
"splinter.Browser",
"datetime.datetime.now"
] |
[((871, 942), 'splinter.Browser', 'Browser', (['"""chrome"""'], {'headless': '(True)'}), "('chrome', **{'executable_path': 'chromedriver'}, headless=True)\n", (878, 942), False, 'from splinter import Browser\n'), ((2537, 2569), 'bs4.BeautifulSoup', 'soupy', (['parse_html', '"""html.parser"""'], {}), "(parse_html, 'html.parser')\n", (2542, 2569), True, 'from bs4 import BeautifulSoup as soupy\n'), ((4447, 4479), 'bs4.BeautifulSoup', 'soupy', (['parse_html', '"""html.parser"""'], {}), "(parse_html, 'html.parser')\n", (4452, 4479), True, 'from bs4 import BeautifulSoup as soupy\n'), ((1610, 1627), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1625, 1627), True, 'import datetime as dt\n'), ((5183, 5228), 'pandas.read_html', 'pd.read_html', (['"""https://space-facts.com/mars/"""'], {}), "('https://space-facts.com/mars/')\n", (5195, 5228), True, 'import pandas as pd\n'), ((6742, 6774), 'bs4.BeautifulSoup', 'soupy', (['parse_html', '"""html.parser"""'], {}), "(parse_html, 'html.parser')\n", (6747, 6774), True, 'from bs4 import BeautifulSoup as soupy\n')]
|
''' models for storing different kinds of Activities '''
from django.utils import timezone
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from model_utils.managers import InheritanceManager
from bookwyrm import activitypub
from .base_model import ActivitypubMixin, OrderedCollectionPageMixin
from .base_model import BookWyrmModel, PrivacyLevels
from . import fields
from .fields import image_serializer
class Status(OrderedCollectionPageMixin, BookWyrmModel):
''' any post, like a reply to a review, etc '''
user = fields.ForeignKey(
'User', on_delete=models.PROTECT, activitypub_field='attributedTo')
content = fields.TextField(blank=True, null=True)
mention_users = fields.TagField('User', related_name='mention_user')
mention_books = fields.TagField('Edition', related_name='mention_book')
local = models.BooleanField(default=True)
privacy = models.CharField(
max_length=255,
default='public',
choices=PrivacyLevels.choices
)
sensitive = fields.BooleanField(default=False)
# the created date can't be this, because of receiving federated posts
published_date = fields.DateTimeField(
default=timezone.now, activitypub_field='published')
deleted = models.BooleanField(default=False)
deleted_date = models.DateTimeField(blank=True, null=True)
favorites = models.ManyToManyField(
'User',
symmetrical=False,
through='Favorite',
through_fields=('status', 'user'),
related_name='user_favorites'
)
reply_parent = fields.ForeignKey(
'self',
null=True,
on_delete=models.PROTECT,
activitypub_field='inReplyTo',
)
objects = InheritanceManager()
activity_serializer = activitypub.Note
serialize_reverse_fields = [('attachments', 'attachment')]
deserialize_reverse_fields = [('attachments', 'attachment')]
#----- replies collection activitypub ----#
@classmethod
def replies(cls, status):
''' load all replies to a status. idk if there's a better way
to write this so it's just a property '''
return cls.objects.filter(reply_parent=status).select_subclasses()
@property
def status_type(self):
''' expose the type of status for the ui using activity type '''
return self.activity_serializer.__name__
def to_replies(self, **kwargs):
''' helper function for loading AP serialized replies to a status '''
return self.to_ordered_collection(
self.replies(self),
remote_id='%s/replies' % self.remote_id,
**kwargs
)
def to_activity(self, pure=False):
''' return tombstone if the status is deleted '''
if self.deleted:
return activitypub.Tombstone(
id=self.remote_id,
url=self.remote_id,
deleted=self.deleted_date.isoformat(),
published=self.deleted_date.isoformat()
).serialize()
activity = ActivitypubMixin.to_activity(self)
activity['replies'] = self.to_replies()
# privacy controls
public = 'https://www.w3.org/ns/activitystreams#Public'
mentions = [u.remote_id for u in self.mention_users.all()]
# this is a link to the followers list:
followers = self.user.__class__._meta.get_field('followers')\
.field_to_activity(self.user.followers)
if self.privacy == 'public':
activity['to'] = [public]
activity['cc'] = [followers] + mentions
elif self.privacy == 'unlisted':
activity['to'] = [followers]
activity['cc'] = [public] + mentions
elif self.privacy == 'followers':
activity['to'] = [followers]
activity['cc'] = mentions
if self.privacy == 'direct':
activity['to'] = mentions
activity['cc'] = []
# "pure" serialization for non-bookwyrm instances
if pure:
activity['content'] = self.pure_content
if 'name' in activity:
activity['name'] = self.pure_name
activity['type'] = self.pure_type
activity['attachment'] = [
image_serializer(b.cover) for b in self.mention_books.all() \
if b.cover]
if hasattr(self, 'book'):
activity['attachment'].append(
image_serializer(self.book.cover)
)
return activity
def save(self, *args, **kwargs):
''' update user active time '''
if self.user.local:
self.user.last_active_date = timezone.now()
self.user.save()
return super().save(*args, **kwargs)
class GeneratedNote(Status):
''' these are app-generated messages about user activity '''
@property
def pure_content(self):
''' indicate the book in question for mastodon (or w/e) users '''
message = self.content
books = ', '.join(
'<a href="%s">"%s"</a>' % (book.remote_id, book.title) \
for book in self.mention_books.all()
)
return '%s %s %s' % (self.user.display_name, message, books)
activity_serializer = activitypub.GeneratedNote
pure_type = 'Note'
class Comment(Status):
''' like a review but without a rating and transient '''
book = fields.ForeignKey(
'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook')
@property
def pure_content(self):
''' indicate the book in question for mastodon (or w/e) users '''
return self.content + '<br><br>(comment on <a href="%s">"%s"</a>)' % \
(self.book.remote_id, self.book.title)
activity_serializer = activitypub.Comment
pure_type = 'Note'
class Quotation(Status):
''' like a review but without a rating and transient '''
quote = fields.TextField()
book = fields.ForeignKey(
'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook')
@property
def pure_content(self):
''' indicate the book in question for mastodon (or w/e) users '''
return '"%s"<br>-- <a href="%s">"%s"</a><br><br>%s' % (
self.quote,
self.book.remote_id,
self.book.title,
self.content,
)
activity_serializer = activitypub.Quotation
pure_type = 'Note'
class Review(Status):
''' a book review '''
name = fields.CharField(max_length=255, null=True)
book = fields.ForeignKey(
'Edition', on_delete=models.PROTECT, activitypub_field='inReplyToBook')
rating = fields.IntegerField(
default=None,
null=True,
blank=True,
validators=[MinValueValidator(1), MaxValueValidator(5)]
)
@property
def pure_name(self):
''' clarify review names for mastodon serialization '''
if self.rating:
return 'Review of "%s" (%d stars): %s' % (
self.book.title,
self.rating,
self.name
)
return 'Review of "%s": %s' % (
self.book.title,
self.name
)
@property
def pure_content(self):
''' indicate the book in question for mastodon (or w/e) users '''
return self.content + '<br><br>(<a href="%s">"%s"</a>)' % \
(self.book.remote_id, self.book.title)
activity_serializer = activitypub.Review
pure_type = 'Article'
class Favorite(ActivitypubMixin, BookWyrmModel):
''' fav'ing a post '''
user = fields.ForeignKey(
'User', on_delete=models.PROTECT, activitypub_field='actor')
status = fields.ForeignKey(
'Status', on_delete=models.PROTECT, activitypub_field='object')
activity_serializer = activitypub.Like
def save(self, *args, **kwargs):
''' update user active time '''
self.user.last_active_date = timezone.now()
self.user.save()
super().save(*args, **kwargs)
class Meta:
''' can't fav things twice '''
unique_together = ('user', 'status')
class Boost(Status):
''' boost'ing a post '''
boosted_status = fields.ForeignKey(
'Status',
on_delete=models.PROTECT,
related_name='boosters',
activitypub_field='object',
)
activity_serializer = activitypub.Boost
# This constraint can't work as it would cross tables.
# class Meta:
# unique_together = ('user', 'boosted_status')
class ReadThrough(BookWyrmModel):
''' Store progress through a book in the database. '''
user = models.ForeignKey('User', on_delete=models.PROTECT)
book = models.ForeignKey('Book', on_delete=models.PROTECT)
pages_read = models.IntegerField(
null=True,
blank=True)
start_date = models.DateTimeField(
blank=True,
null=True)
finish_date = models.DateTimeField(
blank=True,
null=True)
def save(self, *args, **kwargs):
''' update user active time '''
self.user.last_active_date = timezone.now()
self.user.save()
super().save(*args, **kwargs)
NotificationType = models.TextChoices(
'NotificationType',
'FAVORITE REPLY MENTION TAG FOLLOW FOLLOW_REQUEST BOOST IMPORT')
class Notification(BookWyrmModel):
''' you've been tagged, liked, followed, etc '''
user = models.ForeignKey('User', on_delete=models.PROTECT)
related_book = models.ForeignKey(
'Edition', on_delete=models.PROTECT, null=True)
related_user = models.ForeignKey(
'User',
on_delete=models.PROTECT, null=True, related_name='related_user')
related_status = models.ForeignKey(
'Status', on_delete=models.PROTECT, null=True)
related_import = models.ForeignKey(
'ImportJob', on_delete=models.PROTECT, null=True)
read = models.BooleanField(default=False)
notification_type = models.CharField(
max_length=255, choices=NotificationType.choices)
class Meta:
''' checks if notifcation is in enum list for valid types '''
constraints = [
models.CheckConstraint(
check=models.Q(notification_type__in=NotificationType.values),
name="notification_type_valid",
)
]
|
[
"django.db.models.ManyToManyField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.utils.timezone.now",
"model_utils.managers.InheritanceManager",
"django.core.validators.MinValueValidator",
"django.db.models.Q",
"django.db.models.BooleanField",
"django.db.models.IntegerField",
"django.db.models.TextChoices",
"django.db.models.DateTimeField",
"django.core.validators.MaxValueValidator"
] |
[((9232, 9339), 'django.db.models.TextChoices', 'models.TextChoices', (['"""NotificationType"""', '"""FAVORITE REPLY MENTION TAG FOLLOW FOLLOW_REQUEST BOOST IMPORT"""'], {}), "('NotificationType',\n 'FAVORITE REPLY MENTION TAG FOLLOW FOLLOW_REQUEST BOOST IMPORT')\n", (9250, 9339), False, 'from django.db import models\n'), ((889, 922), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (908, 922), False, 'from django.db import models\n'), ((937, 1023), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'default': '"""public"""', 'choices': 'PrivacyLevels.choices'}), "(max_length=255, default='public', choices=PrivacyLevels.\n choices)\n", (953, 1023), False, 'from django.db import models\n'), ((1293, 1327), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1312, 1327), False, 'from django.db import models\n'), ((1347, 1390), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1367, 1390), False, 'from django.db import models\n'), ((1407, 1546), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['"""User"""'], {'symmetrical': '(False)', 'through': '"""Favorite"""', 'through_fields': "('status', 'user')", 'related_name': '"""user_favorites"""'}), "('User', symmetrical=False, through='Favorite',\n through_fields=('status', 'user'), related_name='user_favorites')\n", (1429, 1546), False, 'from django.db import models\n'), ((1755, 1775), 'model_utils.managers.InheritanceManager', 'InheritanceManager', ([], {}), '()\n', (1773, 1775), False, 'from model_utils.managers import InheritanceManager\n'), ((8669, 8720), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""User"""'], {'on_delete': 'models.PROTECT'}), "('User', on_delete=models.PROTECT)\n", (8686, 8720), False, 'from django.db import models\n'), ((8732, 8783), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Book"""'], {'on_delete': 'models.PROTECT'}), "('Book', on_delete=models.PROTECT)\n", (8749, 8783), False, 'from django.db import models\n'), ((8801, 8843), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (8820, 8843), False, 'from django.db import models\n'), ((8878, 8921), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (8898, 8921), False, 'from django.db import models\n'), ((8957, 9000), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (8977, 9000), False, 'from django.db import models\n'), ((9445, 9496), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""User"""'], {'on_delete': 'models.PROTECT'}), "('User', on_delete=models.PROTECT)\n", (9462, 9496), False, 'from django.db import models\n'), ((9516, 9581), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Edition"""'], {'on_delete': 'models.PROTECT', 'null': '(True)'}), "('Edition', on_delete=models.PROTECT, null=True)\n", (9533, 9581), False, 'from django.db import models\n'), ((9610, 9706), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""User"""'], {'on_delete': 'models.PROTECT', 'null': '(True)', 'related_name': '"""related_user"""'}), "('User', on_delete=models.PROTECT, null=True, related_name\n ='related_user')\n", (9627, 9706), False, 'from django.db import models\n'), ((9740, 9804), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Status"""'], {'on_delete': 'models.PROTECT', 'null': '(True)'}), "('Status', on_delete=models.PROTECT, null=True)\n", (9757, 9804), False, 'from django.db import models\n'), ((9835, 9902), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""ImportJob"""'], {'on_delete': 'models.PROTECT', 'null': '(True)'}), "('ImportJob', on_delete=models.PROTECT, null=True)\n", (9852, 9902), False, 'from django.db import models\n'), ((9923, 9957), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (9942, 9957), False, 'from django.db import models\n'), ((9982, 10048), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'choices': 'NotificationType.choices'}), '(max_length=255, choices=NotificationType.choices)\n', (9998, 10048), False, 'from django.db import models\n'), ((7987, 8001), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (7999, 8001), False, 'from django.utils import timezone\n'), ((9133, 9147), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (9145, 9147), False, 'from django.utils import timezone\n'), ((4716, 4730), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (4728, 4730), False, 'from django.utils import timezone\n'), ((6799, 6819), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(1)'], {}), '(1)\n', (6816, 6819), False, 'from django.core.validators import MaxValueValidator, MinValueValidator\n'), ((6821, 6841), 'django.core.validators.MaxValueValidator', 'MaxValueValidator', (['(5)'], {}), '(5)\n', (6838, 6841), False, 'from django.core.validators import MaxValueValidator, MinValueValidator\n'), ((10227, 10282), 'django.db.models.Q', 'models.Q', ([], {'notification_type__in': 'NotificationType.values'}), '(notification_type__in=NotificationType.values)\n', (10235, 10282), False, 'from django.db import models\n')]
|
from __future__ import absolute_import
from unittest import TestCase
from mock import MagicMock, patch
import socket
import tempfile
import os
import shutil
from sfmutils.consumer import MqConfig
from sfmutils.stream_consumer import StreamConsumer
from sfmutils.supervisor import HarvestSupervisor
class TestStreamConsumer(TestCase):
def setUp(self):
self.patcher = patch("sfmutils.stream_consumer.HarvestSupervisor")
mock_supervisor_class = self.patcher.start()
self.mock_supervisor = MagicMock(spec=HarvestSupervisor)
mock_supervisor_class.side_effect = [self.mock_supervisor]
self.working_path = tempfile.mkdtemp()
self.stream_consumer = StreamConsumer("/opt/sfm/test.py", self.working_path,
mq_config=MqConfig(None, None, None, None,
{"test_queue": [
"harvest.start.test.test_usertimeline",
"harvest.start.test.test_search"]}), )
def tearDown(self):
# self.patcher.remove()
if os.path.exists(self.working_path):
shutil.rmtree(self.working_path)
def test_stop_queue(self):
stop_queue = "test_queue_{}".format(socket.gethostname())
self.assertSetEqual({"test_queue", stop_queue},
set(self.stream_consumer.mq_config.queues.keys()))
self.assertListEqual(["harvest.stop.test.test_usertimeline", "harvest.stop.test.test_search"],
self.stream_consumer.mq_config.queues[stop_queue])
def test_start(self):
message = {
"id": "test:1",
"collection_set": {
"id": "test_collection_set"
}
}
self.stream_consumer.message = message
self.stream_consumer.routing_key = "harvest.start.test.test_usertimeline"
self.stream_consumer.on_message()
self.mock_supervisor.start.called_once_with(message, "harvest.start.test.test_usertimeline")
def test_remove(self):
message = {
"id": "test:1"
}
self.stream_consumer.message = message
self.stream_consumer.routing_key = "harvest.stop.test.test_usertimeline"
self.stream_consumer.on_message()
self.mock_supervisor.remove.called_once_with("test:1")
|
[
"sfmutils.consumer.MqConfig",
"os.path.exists",
"mock.patch",
"socket.gethostname",
"tempfile.mkdtemp",
"shutil.rmtree",
"mock.MagicMock"
] |
[((380, 431), 'mock.patch', 'patch', (['"""sfmutils.stream_consumer.HarvestSupervisor"""'], {}), "('sfmutils.stream_consumer.HarvestSupervisor')\n", (385, 431), False, 'from mock import MagicMock, patch\n'), ((516, 549), 'mock.MagicMock', 'MagicMock', ([], {'spec': 'HarvestSupervisor'}), '(spec=HarvestSupervisor)\n', (525, 549), False, 'from mock import MagicMock, patch\n'), ((645, 663), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (661, 663), False, 'import tempfile\n'), ((1205, 1238), 'os.path.exists', 'os.path.exists', (['self.working_path'], {}), '(self.working_path)\n', (1219, 1238), False, 'import os\n'), ((1252, 1284), 'shutil.rmtree', 'shutil.rmtree', (['self.working_path'], {}), '(self.working_path)\n', (1265, 1284), False, 'import shutil\n'), ((1361, 1381), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1379, 1381), False, 'import socket\n'), ((805, 934), 'sfmutils.consumer.MqConfig', 'MqConfig', (['None', 'None', 'None', 'None', "{'test_queue': ['harvest.start.test.test_usertimeline',\n 'harvest.start.test.test_search']}"], {}), "(None, None, None, None, {'test_queue': [\n 'harvest.start.test.test_usertimeline', 'harvest.start.test.test_search']})\n", (813, 934), False, 'from sfmutils.consumer import MqConfig\n')]
|
from itertools import count
from aocd import lines
rows = len(lines)
cols = len(lines[0])
_map = {}
east = []
south = []
for y, line in enumerate(lines):
for x, sc in enumerate(line):
if sc == '>':
east.append((x,y))
elif sc == 'v':
south.append((x,y))
_map[(x,y)] = sc
for step in count(1):
east_move = False
south_move = False
new_map = {}
new_east = []
for sc in east:
x, y = sc
nx = x + 1 if x + 1 < cols else 0
if _map.get((nx,y), '.') == '.':
new_map[(nx,y)] = '>'
new_east.append((nx, y))
east_move = True
else:
new_map[(x,y)] = '>'
new_east.append((x,y))
east = new_east
new_south = []
for sc in south:
x, y = sc
ny = y + 1 if y + 1 < rows else 0
if new_map.get((x,ny), '.') == '.' and _map.get((x,ny), '.') != 'v':
new_map[(x,ny)] = 'v'
new_south.append((x, ny))
south_move = True
else:
new_map[(x,y)] = 'v'
new_south.append((x,y))
south = new_south
_map = new_map
if not east_move and not south_move:
break
print('Part 1:', step)
|
[
"itertools.count"
] |
[((338, 346), 'itertools.count', 'count', (['(1)'], {}), '(1)\n', (343, 346), False, 'from itertools import count\n')]
|
from __future__ import print_function
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
try:
import cPickle as pickle
except ImportError:
import pickle
import copy
import numpy as np
from src.SpectralAnalysis import utils
from src.SpectralAnalysis import powerspectrum
from src.SpectralAnalysis import mcmc
from src.SpectralAnalysis import mle
from src.SpectralAnalysis import posterior
##########################################
#
# class Bayes: Bayesian data analysis for time series
#
# This class defines a Bayes object that can:
# - pick between two models using likelihood ratio tests
# - find periodicities by picking out the largest power in
# an observation/set of fake periodograms
# - search for QPOs via a model selection approach using LRTs
#
#
# TO DO: Need to add smoothing for picking out narrow signals
#
#
#
class Bayes(object):
""" Bayesian time series analysis
This class defines a Bayes object that can:
- pick between two models using likelihood ratio tests
- find periodicities by picking out the largest power in
an observation/set of fake periodograms
- search for QPOs via a model selection approach using LRTs
Parameters
----------
ps : powerspectrum.Powerspectrum
A periodogram object that is to be searched for QPOs
namestr: string, optional, default "test"
The string that will be used to identify this periodogram when
saving output (text files and plots)
plot: boolean, optional, default True
If True, several diagnostic plots will be saved to disk
m: integer, optional, default 1
If the periodogram used is the result of averaging several
individual periodograms (or bins), this changes the statistical
distributions. Set m to the number of periodograms
averaged to be sure to use the right distribution
Attributes
----------
Examples
--------
"""
def __init__(self, ps, namestr='test', plot=True, m=1):
assert isinstance(ps, powerspectrum.PowerSpectrum), "ps must be of type powerspectrum.PowerSpectrum!"
self.ps = ps
self.namestr = namestr
self.plot = plot
self.m = m
def choose_noise_model(self, func1, par1, func2, par2,
fitmethod='bfgs',
nchain=10,
niter=5000,
nsim=1000,
covfactor=1.0,
use_emcee=True,
parname=None,
noise1=-1,
noise2=-1,
writefile=True):
"""
Fit two models func1 and func2, compute the likelihood
ratio at the maximum-a-posteriori paramters.
If func1 and func2 differ in complexity, the less complex
should be func1.
Then sample the posterior distribution for the the simpler
model (func1), pick parameter sets from the posterior
to create fake periodograms.
Fit each fake periodogram with the same models as the data, and
compute the likelihood ratios such that it is possible to
build up a posterior distribution for the likelihood
ratios and compute a posterior predictive p-value
that the data can be explained sufficiently with the simpler
model.
Parameters
----------
func1 : function
Parametric model for the periodogram.
Needs to be a function that takes an array of frequencies and
k parameters, and returns an array of model powers.
The function should include a parameter setting a constant background
level, and this parameter should be last!
par1 : {list, array-like}
Input guesses for the MAP fit using func1.
The number of elements *must* equal the number of parameters k
taken by func1.
func2 : function
Parametric model for the periodogram.
Needs to be a function that takes an array of frequencies and n
parameters, and returns an array of model powers
The function should include a parameter setting a constant background
level, and this parameter should be last!
par2 : {list, array-like}
Input guesses for the MAP fit using func2.
The number of elements *must* equal the number of parameters n
taken by func2.
fitmethod : string, optional, default bfgs
Allows the choice of different minimization algorithms.
Default uses BFGS, which is pretty robust for most purposes.
nchain : int, optional, default 10
The number of chains or walkers to use in MCMC.
For Metropolis-Hastings, use ~10-20 and many samples
For emcee, use as many as you can afford (~500) and fewer samples
niter : int, optional, default 5000
Sets the length of the Markov chains.
For Metropolis-Hastings, this needs to be large (>10000)
For emcee, this can be smaller, but it's a good idea to
verify that the chains have mixed.
nsim : int, optional, default 1000
The number of simulations to use when computing the
posterior distribution of the likelihood ratio.
Note that this also sets the maximum precision of the
posterior predictive p-value (for 1000 simulations, the
p-value can be constrained only to 0.001).
covfactor : float, optional, default 1.0
A tuning parameter for the MCMC step. Used only in
Metropolis-Hastings.
use_emcee : boolean, optional, default True
If True (STRONGLY RECOMMENDED), use the emcee package
for running MCMC. If False, use Metropolis-Hastings.
parname : list, optional, default None
Include a list of strings here to set parameter names for
plotting
noise1, noise2 : int, optional, default -1
The index for the noise parameter in func1 and func2.
In the pre-defined models, this index is *always* -1.
"""
resfilename = self.namestr + "_choosenoisemodel.dat"
resfile = utils.TwoPrint(resfilename)
### make strings for function names from function definition
func1name = "model1"
func2name = "model2"
### step 1: fit both models to observation and compute LRT
psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True)
obslrt = psfit.compute_lrt(func1, par1, func2, par2, noise1=noise1, noise2=noise2, m=self.m)
### get out best fit parameters and associated quantities
fitpars1 = getattr(psfit, func1name + 'fit')
fitpars2 = getattr(psfit, func2name + 'fit')
if self.plot:
### plot the periodogram and best fit models
psfit.plotfits(fitpars1, fitpars2, namestr=self.namestr, log=True)
if self.m == 1:
lpost = posterior.PerPosterior(self.ps, func1)
else:
lpost = posterior.StackPerPosterior(self.ps, func1, self.m)
### Step 2: Set up Markov Chain Monte Carlo Simulations
### of model 1:
mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost,
topt=fitpars1['popt'],
tcov=fitpars1['cov'],
covfactor=covfactor,
niter=niter,
nchain=nchain,
parname=parname,
check_conv=True,
namestr=self.namestr,
use_emcee=use_emcee,
plot=self.plot,
printobj=resfile,
m=self.m)
### Step 3: create fake periodograms out of MCMCs
fakeper = mcobs.simulate_periodogram(nsim=nsim)
### empty lists for simulated quantities of interest:
sim_lrt, sim_deviance, sim_ksp, sim_maxpow, sim_merit, sim_fpeak, sim_y0, sim_srat = [], [], [], [], [], [], [], []
### Step 4: Fit fake periodograms and read out parameters of interest from each fit:
for i, x in enumerate(fakeper):
try:
fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False)
lrt = fitfake.compute_lrt(func1, par1, func2, par2, noise1=noise1, noise2=noise2, m=self.m)
# resfile('Fitting of fake periodogram ' + str(i) + ' failed! Returning ...')
# return psfit, fakeper, mcobs
sim_pars1 = getattr(fitfake, func1name + 'fit')
sim_pars2 = getattr(fitfake, func2name + 'fit')
# if lrt > 20:
# fitfake.plotfits(sim_pars1, sim_pars2, namestr=self.namestr+'_'+str(i))
sim_lrt.append(lrt)
sim_deviance.append(sim_pars1['deviance'])
sim_ksp.append(sim_pars1['ksp'])
sim_maxpow.append(sim_pars1['maxpow'])
sim_merit.append(sim_pars1['merit'])
sim_fpeak.append(sim_pars1['maxfreq'])
sim_y0.append(sim_pars1['mfit'][sim_pars1['maxind']])
sim_srat.append(sim_pars1['sobs'])
except KeyboardInterrupt:
break
if len(sim_maxpow) == 0:
resfile("Analysis of Burst failed! Returning ...")
return False, False, False
else:
### Step 5: Compute Bayesian posterior probabilities of individual quantities
p_maxpow = float(len([x for x in sim_maxpow if x > fitpars1['maxpow']])) / float(len(sim_maxpow))
p_deviance = float(len([x for x in sim_deviance if x > fitpars1['deviance']])) / float(len(sim_deviance))
p_ksp = float(len([x for x in sim_ksp if x > fitpars1['ksp']])) / float(len(sim_ksp))
p_merit = float(len([x for x in sim_merit if x > fitpars1['merit']])) / float(len(sim_merit))
p_lrt = float(len([x for x in sim_lrt if x > obslrt])) / float(len(sim_lrt))
p_srat = float(len([x for x in sim_srat if x > fitpars1['sobs']])) / float(len(sim_srat))
resfile('simulated srat: ' + str(sim_srat))
resfile('observed srat: ' + str(fitpars1['sobs']))
resfile("p(LRT) = " + str(p_lrt))
resfile("KSP(obs) = " + str(fitpars1['ksp']))
resfile("mean(sim_ksp) = " + str(np.mean(sim_ksp)))
resfile("Merit(obs) = " + str(fitpars1['merit']))
resfile("mean(sim_merit) = " + str(np.mean(sim_merit)))
resfile("Srat(obs) = " + str(fitpars1['sobs']))
resfile("mean(sim_srat) = " + str(np.mean(sim_srat)))
### Step 6: Compute errors of Bayesian posterior probabilities
pmaxpow_err = np.sqrt(p_maxpow * (1.0 - p_maxpow) / float(len(sim_ksp)))
pdeviance_err = np.sqrt(p_deviance * (1.0 - p_deviance) / float(len(sim_ksp)))
pksp_err = np.sqrt(p_ksp * (1.0 - p_ksp) / float(len(sim_ksp)))
pmerit_err = np.sqrt(p_merit * (1.0 - p_merit) / float(len(sim_ksp)))
plrt_err = np.sqrt(p_lrt * (1.0 - p_lrt) / float(len(sim_ksp)))
psrat_err = np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp)))
### Display results on screen and make funky plots
resfile("Bayesian p-value for maximum power P_max = " + str(p_maxpow) + " +/- " + str(pmaxpow_err))
resfile("Bayesian p-value for deviance D = " + str(p_deviance) + " +/- " + str(pdeviance_err))
resfile("Bayesian p-value for KS test: " + str(p_ksp) + " +/- " + str(pksp_err))
resfile("Bayesian p-value for Merit function: " + str(p_merit) + " +/- " + str(pmerit_err))
resfile("Bayesian p-value for the np.sum of residuals: " + str(p_srat) + " +/- " + str(psrat_err))
resfile("Bayesian p-value for Likelihood Ratio: " + str(p_lrt) + " +/- " + str(plrt_err))
if self.plot:
n, bins, patches = plt.hist(sim_lrt, bins=100, normed=True, color="cyan", histtype='stepfilled')
plt.vlines(obslrt, 0.0, 0.8 * max(n), lw=4, color='navy')
plt.savefig(self.namestr + '_lrt.png', format='png')
plt.close()
summary = {"p_lrt": [p_lrt, plrt_err], "p_maxpow": [p_maxpow, pmaxpow_err],
"p_deviance": [p_deviance, pdeviance_err], "p_ksp": [p_ksp, pksp_err],
"p_merit": [p_merit, pmerit_err], "p_srat": [p_srat, psrat_err], "postmean": mcobs.mean,
"posterr": mcobs.std, "postquantiles": mcobs.ci, "rhat": mcobs.rhat, "acor": mcobs.acor,
"acceptance": mcobs.acceptance}
return psfit, fakeper, summary
def find_periodicity(self, func, par,
fitmethod='bfgs',
nchain=10,
niter=5000,
nsim=1000,
covfactor=1.0,
parname=None,
noise=-1,
use_emcee=True,
searchfreq=None):
"""
Find periodicities in observed data and compute significance via MCMCs.
First, fit the periodogram with func and compute the
maximum-a-posteriori (MAP) estimate.
Divide the data by the MAP model; for a perfect data-model fit,
the resulting residuals should follow a chi-square distribution
with two degrees of freedom.
Find the highest power in the residuals and its frequency.
Sample the posterior distribution of parameters for func using MCMC,
and create fake periodograms from samples of the posterior.
For each fake periodogram, find the MAP estimate, divide out the
MAP model and find the highest power in that periodogram.
Create a posterior distribution of maximum powers and compute
a posterior predictive p-value of seeing the maximum power
in the data under the null hypothesis (no QPO).
Parameters
----------
func : function
Parametric model for the periodogram.
Needs to be a function that takes an array of frequencies and
k parameters, and returns an array of model powers.
The function should include a parameter setting a constant background
level, and this parameter should be last!
par : {list, array-like}
Input guesses for the parameters taken by func.
The number of elements in this list or array must match the
number of parameters k taken by func.
fitmethod : string, optional, default "bfgs"
Choose the optimization algorithm used when minimizing the
-log-likelihood. Choices are listed in mle.py, but the default
(bfgs) should be sufficient for most applications.
nchain : int, optional, default 10
The number of chains or walkers to use in MCMC.
For Metropolis-Hastings, use ~10-20 and many samples
For emcee, use as many as you can afford (~500) and fewer samples
niter : int, optional, default 5000
Sets the length of the Markov chains.
For Metropolis-Hastings, this needs to be large (>10000)
For emcee, this can be smaller, but it's a good idea to
verify that the chains have mixed.
nsim : int, optional, default 1000
The number of simulations to use when computing the
posterior distribution of the likelihood ratio.
Note that this also sets the maximum precision of the
posterior predictive p-value (for 1000 simulations, the
p-value can be constrained only to 0.001).
covfactor : float, optional, default 1.0
A tuning parameter for the MCMC step. Used only in
Metropolis-Hastings.
parname : list, optional, default None
Include a list of strings here to set parameter names for
plotting
noise: int, optional, default -1
The index for the noise parameter in func.
In the pre-defined models, this index is *always* -1.
use_emcee : boolean, optional, default True
If True (STRONGLY RECOMMENDED), use the emcee package
for running MCMC. If False, use Metropolis-Hastings.
"""
## the file name where the output will be stored
resfilename = self.namestr + "_findperiodicity_results.dat"
## open the output log file
resfile = utils.TwoPrint(resfilename)
### step 1: fit model to observation
psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True)
fitpars = psfit.mlest(func, par, obs=True, noise=noise, m=self.m)
bindict = fitpars['bindict']
# print('popt: ' + str(fitpars['popt']))
## which posterior do I need to use?
if self.m == 1:
lpost = posterior.PerPosterior(self.ps, func)
else:
lpost = posterior.StackPerPosterior(self.ps, func, self.m)
### Step 2: Set up Markov Chain Monte Carlo Simulations
### of model 1:
mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost,
topt=fitpars['popt'],
tcov=fitpars['cov'],
covfactor=covfactor,
niter=niter,
nchain=nchain,
parname=parname,
check_conv=True,
namestr=self.namestr,
use_emcee=True,
plot=self.plot,
printobj=resfile,
m=self.m)
### Step 3: create fake periodograms out of MCMCs
fakeper = mcobs.simulate_periodogram(nsim=nsim)
sim_pars_all, sim_deviance, sim_ksp, sim_fpeak, sim_srat, \
sim_maxpow, sim_merit, sim_y0, sim_s3max, sim_s5max, sim_s11max = [], [], [], [], [], [], [], [], [], [], []
bmax = int(self.ps.freq[-1] / (2.0 * (self.ps.freq[1] - self.ps.freq[0])))
bins = [1, 3, 5, 7, 10, 15, 20, 30, 50, 70, 100, 200, 300, 500, 700, 1000]
binlist = [r for r in fitpars["bindict"].keys()]
nbins = len(binlist) / 4
sain = copy.copy(fitpars['popt'])
# print('popt2: ' + str(fitpars['popt']))
### Step 4: Fit fake periodograms:
for i, x in enumerate(fakeper):
try:
# print('popt' + str(i) + 'a : ' + str(fitpars['popt']))
fitfake = mle.PerMaxLike(x, fitmethod=fitmethod, obs=False)
# print('popt' + str(i) + 'b : ' + str(fitpars['popt']))
sim_pars = fitfake.mlest(func, sain, obs=False, noise=noise, m=self.m)
# print('popt' + str(i) + 'c : ' + str(fitpars['popt']))
sim_pars_all.append(sim_pars)
sim_deviance.append(sim_pars['deviance'])
sim_ksp.append(sim_pars['ksp'])
sim_maxpow.append(sim_pars['maxpow'])
sim_merit.append(sim_pars['merit'])
sim_fpeak.append(sim_pars['maxfreq'])
sim_y0.append(sim_pars['mfit'][sim_pars['maxind']])
sim_srat.append(sim_pars['sobs'])
sim_s3max.append(sim_pars['s3max'])
sim_s5max.append(sim_pars['s5max'])
sim_s11max.append(sim_pars['s11max'])
except KeyboardInterrupt:
break
# except:
# print("Simulation failed! Continuing ...")
# continue
# print('popt' + str(i) + 'd : ' + str(fitpars['popt']))
# print('popt3: ' + str(fitpars['popt']))
### upper limit is the power in the sorted array where p_maxpow would be 0.05
### i.e. when only 0.05*nsim simulations are higher than this
### note: sometimes simulations fail, therefore the 5% limit should be 0.05*len(sims)
fiveperlim = int(0.05 * len(sim_maxpow))
if fiveperlim == 0:
resfile('Warning! Too few simulations to compute five percent limit reliably!')
fiveperlim = 1
ninetyfiveperlim = len(sim_maxpow) - fiveperlim
# print('popt4: ' + str(fitpars['popt']))
bindicts = [x["bindict"] for x in sim_pars_all]
### get out binned powers:
maxpows_all = {}
binprob = {}
for b in bins[:nbins]:
binps = fitpars['bindict']['bin' + str(b)]
bmaxpow = np.array([x["bmax" + str(b)] for x in bindicts])
maxpows_all["bin" + str(b)] = bmaxpow
bindict['sim_bmaxpow' + str(b)] = bmaxpow
p_bmaxpow = float(len([x for x in bmaxpow if x > fitpars['bindict']["bmax" + str(b)]])) / float(
len(bmaxpow))
bindict["p_maxpow" + str(b)] = p_bmaxpow
bmaxpow_err = np.sqrt(p_bmaxpow * (1.0 - p_bmaxpow) / float(len(bmaxpow)))
bindict['p_maxpow' + str(b) + 'err'] = bmaxpow_err
sim_bmaxpow_sort = np.msort(bmaxpow)
### note: this is the limit for 2*I/S --> multiply by S to get powers for each frequency
### Like everything else, this is n-trial corrected!
# print('len(bmaxpow_sort) : ' + str(len(sim_bmaxpow_sort)))
resfile('ninetyfiveperlim: ' + str(ninetyfiveperlim))
bmaxpow_ul = sim_bmaxpow_sort[ninetyfiveperlim]
bindict['bmax' + str(b) + '_ul'] = bmaxpow_ul
resfile('The posterior p-value for the maximum residual power for a binning of ' + str(
self.ps.df * b) + 'Hz is p = ' + str(p_bmaxpow) + ' +/- ' + str(bmaxpow_err))
resfile('The corresponding value of the T_R statistic at frequency f = ' + str(
fitpars["bindict"]["bmaxfreq" + str(b)]) + ' is 2I/S = ' + str(fitpars['bindict']["bmax" + str(b)]))
resfile('The upper limit on the T_R statistic is 2I/S = ' + str(bmaxpow_ul))
### now turn upper limit into an rms amplitude:
## first compute broadband noise model for binned frequencies
bintemplate = func(fitpars['bindict']['bin' + str(b)].freq, *fitpars['popt'])
resfile("bintemplate[0]: " + str(bintemplate[0]))
## then compute upper limits for powers I_j depending on frequency
binpowers = bmaxpow_ul * bintemplate / 2.0 - bintemplate
## now compute rms amplitude at 40, 70, 100 and 300 Hz
## first, convert powers into rms normalization, if they're not already
if self.ps.norm == 'leahy':
binpowers = binpowers / (self.ps.df * b * self.ps.nphots)
elif self.ps.norm == 'variance':
binpowers = binpowers * self.ps.n ** 2.0 / (self.ps.df * b * self.ps.nphots ** 2.0)
# print('len(binps.freq): ' + str(len(binps.freq)))
# print('len(binpowers): ' + str(len(binpowers)))
if searchfreq is None:
searchfreq = [40.0, 70.0, 100.0, 300.0, 500.0, 1000.0]
## for 40 Hz:
print(searchfreq)
for bc in searchfreq:
if bc > (binps.freq[1] - binps.freq[0]):
bind = np.searchsorted(binps.freq, bc) - 1
bpow = binpowers[bind]
brms = np.sqrt(bpow * b * self.ps.df)
resfile('The upper limit on the power at ' + str(bc) +
'Hz for a binning of ' + str(b) + ' is P = ' +
str(bpow * (self.ps.df * b * self.ps.nphots)))
resfile('The upper limit on the rms amplitude at ' + str(bc) +
'Hz for a binning of ' + str(b) + ' is rms = ' + str(brms))
bindict['bin' + str(b) + '_ul_%.4fHz' % bc] = brms
else:
continue
### Step 5: Compute Bayesian posterior probabilities of individual quantities
p_maxpow = float(len([x for x in sim_maxpow if x > fitpars['maxpow']])) / float(len(sim_maxpow))
p_deviance = float(len([x for x in sim_deviance if x > fitpars['deviance']])) / float(len(sim_deviance))
p_ksp = float(len([x for x in sim_ksp if x > fitpars['ksp']])) / float(len(sim_ksp))
p_merit = float(len([x for x in sim_merit if x > fitpars['merit']])) / float(len(sim_merit))
p_srat = float(len([x for x in sim_srat if x > fitpars['sobs']])) / float(len(sim_srat))
p_s3max = float(len([x for x in sim_s3max if x > fitpars['s3max']])) / float(len(sim_s3max))
p_s5max = float(len([x for x in sim_s5max if x > fitpars['s5max']])) / float(len(sim_s5max))
p_s11max = float(len([x for x in sim_s11max if x > fitpars['s11max']])) / float(len(sim_s11max))
### sort maximum powers from lowest to highest
sim_maxpow_sort = np.msort(sim_maxpow)
sim_s3max_sort = np.msort(sim_s3max)
sim_s5max_sort = np.msort(sim_s5max)
sim_s11max_sort = np.msort(sim_s11max)
### note: this is the limit for 2*I/S --> multiply by S to get powers for each frequency
### Like everything else, this is n-trial corrected!
maxpow_ul = sim_maxpow_sort[ninetyfiveperlim]
### Step 6: Compute errors of Bayesian posterior probabilities
pmaxpow_err = np.sqrt(p_maxpow * (1.0 - p_maxpow) / float(len(sim_ksp)))
pdeviance_err = np.sqrt(p_deviance * (1.0 - p_deviance) / float(len(sim_ksp)))
pksp_err = np.sqrt(p_ksp * (1.0 - p_ksp) / float(len(sim_ksp)))
pmerit_err = np.sqrt(p_merit * (1.0 - p_merit) / float(len(sim_ksp)))
psrat_err = np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp)))
ps3max_err = np.sqrt(p_s3max * (1.0 - p_s3max) / float(len(sim_ksp)))
ps5max_err = np.sqrt(p_s5max * (1.0 - p_s5max) / float(len(sim_ksp)))
ps11max_err = np.sqrt(p_s11max * (1.0 - p_s11max) / float(len(sim_ksp)))
### Display results on screen and make funky plots
resfile("Bayesian p-value for maximum power P_max = " + str(p_maxpow) + " +/- " + str(pmaxpow_err))
# resfile('Upper limit on maximum signal power P_max_ul = ' + str(maxpow_ul))
resfile("Bayesian p-value for maximum power P_max = " + str(p_s3max) + " +/- " + str(ps3max_err))
# resfile('Upper limit on maximum signal power P_max_ul = ' + str(s3max_ul))
resfile("Bayesian p-value for maximum power P_max = " + str(p_s5max) + " +/- " + str(ps5max_err))
# resfile('Upper limit on maximum signal power P_max_ul = ' + str(s5max_ul))
resfile("Bayesian p-value for maximum power P_max = " + str(p_s11max) + " +/- " + str(ps11max_err))
# resfile('Upper limit on maximum signal power P_max_ul = ' + str(s11max_ul))
resfile("Bayesian p-value for deviance D = " + str(p_deviance) + " +/- " + str(pdeviance_err))
resfile("Bayesian p-value for KS test: " + str(p_ksp) + " +/- " + str(pksp_err))
resfile("Bayesian p-value for Merit function: " + str(p_merit) + " +/- " + str(pmerit_err))
resfile("Bayesian p-value for the np.sum of residuals: " + str(p_srat) + " +/- " + str(psrat_err))
if self.plot:
plt.subplot(2, 2, 1)
n, bins, patches = plt.hist(sim_maxpow, bins=100, normed=True, color="cyan", histtype='stepfilled')
xmin, xmax = min(min(bins), fitpars['maxpow']) / 1.2, max(25, fitpars['maxpow'] * 1.2)
plt.axis([xmin, xmax, 0.0, max(n)])
plt.vlines(fitpars['maxpow'], 0.0, max(n), lw=2, color='navy')
plt.title('unsmoothed data', fontsize=12)
plt.subplot(2, 2, 2)
n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color="cyan", histtype='stepfilled')
xmin, xmax = min(min(bins), fitpars['s3max']) / 1.2, max(25, fitpars['s3max'] * 1.2)
plt.axis([xmin, xmax, 0.0, max(n)])
plt.vlines(fitpars['s3max'], 0.0, max(n), lw=2, color='navy')
plt.title('smoothed (3) data', fontsize=12)
plt.subplot(2, 2, 3)
n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color="cyan", histtype='stepfilled')
xmin, xmax = min(min(bins), fitpars['s5max']) / 1.2, max(25, fitpars['s5max'] * 1.2)
plt.axis([xmin, xmax, 0.0, max(n)])
plt.vlines(fitpars['s5max'], 0.0, max(n), lw=2, color='navy')
plt.title('smoothed (5) data/model outlier', fontsize=12)
plt.subplot(2, 2, 4)
n, bins, patches = plt.hist(sim_s3max, bins=100, normed=True, color="cyan", histtype='stepfilled')
xmin, xmax = min(min(bins), fitpars['s11max']) / 1.2, max(25, fitpars['s3max'] * 1.2)
plt.axis([xmin, xmax, 0.0, max(n)])
plt.vlines(fitpars['s11max'], 0.0, max(n), lw=2, color='navy')
plt.title('smoothed (11) data', fontsize=12)
plt.savefig(self.namestr + '_maxpow.png', format='png')
plt.close()
results = {"fitpars": fitpars, 'bindict': bindict, 'maxpows_all': maxpows_all, 'mcobs': mcobs,
'p_maxpow': [sim_maxpow, p_maxpow, pmaxpow_err], 'maxpow_ul': maxpow_ul,
'p_s3max': [sim_s3max, p_s3max, ps3max_err], 'p_s5max': [sim_s5max, p_s5max, ps5max_err],
'p_s11max': [sim_s11max, p_s11max, ps11max_err], 'p_merit': [p_merit, pmerit_err],
'p_srat': [p_srat, psrat_err], 'p_deviance': [p_deviance, pdeviance_err], 'fitpars': fitpars,
"postmean": mcobs.mean, "posterr": mcobs.std, "postquantiles": mcobs.ci, "rhat": mcobs.rhat,
"acor": mcobs.acor, "acceptance": mcobs.acceptance}
return results
def find_qpo(self, func, ain,
fitmethod='constbfgs',
nchain=10,
niter=5000,
nsim=1000,
covfactor=1.0,
parname=None,
plotstr=None,
use_emcee=True):
"""
Find QPOs by fitting a QPO + background model to *every*
frequency.
NOTE: I rarely ever use this because it's really computationally
expensive.
Parameters
----------
func : function
Parametric model for the periodogram.
Needs to be a function that takes an array of frequencies and
k parameters, and returns an array of model powers.
The function should include a parameter setting a constant background
level, and this parameter should be last!
par : {list, array-like}
Input guesses for the parameters taken by func.
The number of elements in this list or array must match the
number of parameters k taken by func.
fitmethod : string, optional, default "bfgs"
Choose the optimization algorithm used when minimizing the
-log-likelihood. Choices are listed in mle.py, but the default
(bfgs) should be sufficient for most applications.
nchain : int, optional, default 10
The number of chains or walkers to use in MCMC.
For Metropolis-Hastings, use ~10-20 and many samples
For emcee, use as many as you can afford (~500) and fewer samples
niter : int, optional, default 5000
Sets the length of the Markov chains.
For Metropolis-Hastings, this needs to be large (>10000)
For emcee, this can be smaller, but it's a good idea to
verify that the chains have mixed.
nsim : int, optional, default 1000
The number of simulations to use when computing the
posterior distribution of the likelihood ratio.
Note that this also sets the maximum precision of the
posterior predictive p-value (for 1000 simulations, the
p-value can be constrained only to 0.001).
covfactor : float, optional, default 1.0
A tuning parameter for the MCMC step. Used only in
Metropolis-Hastings.
parname : list, optional, default None
Include a list of strings here to set parameter names for
plotting
noise: int, optional, default -1
The index for the noise parameter in func.
In the pre-defined models, this index is *always* -1.
use_emcee : boolean, optional, default True
If True (STRONGLY RECOMMENDED), use the emcee package
for running MCMC. If False, use Metropolis-Hastings.
"""
if plotstr == None:
plotstr = self.namestr
funcname = str(func).split()[1]
# print("<< --- len(self.ps beginning): " + str(len(self.ps.ps)))
### step 1: fit model to observation
psfit = mle.PerMaxLike(self.ps, fitmethod=fitmethod, obs=True)
fitpars = psfit.mlest(func, ain, obs=True, noise=-1, m=self.m)
# print("<< --- len(self.ps beginning): " + str(len(self.ps.ps)))
if self.m == 1:
lpost = posterior.PerPosterior(self.ps, func)
else:
lpost = posterior.StackPerPosterior(self.ps, func, self.m)
### Step 2: Set up Markov Chain Monte Carlo Simulations
### of model 1:
mcobs = mcmc.MarkovChainMonteCarlo(self.ps.freq, self.ps.ps, lpost,
topt=fitpars['popt'],
tcov=fitpars['cov'],
covfactor=covfactor,
niter=niter,
nchain=nchain,
parname=parname,
check_conv=True,
namestr=self.namestr,
use_emcee=True,
plot=self.plot,
m=self.m)
### find optimum QPO values for the real data
obslrt, optpars, qpopars = psfit.find_qpo(func, ain, plot=True, obs=True, plotname=self.namestr + '_loglikes')
### simulate lots of realizations of the broadband noise model from MCMCs
funcfake = mcobs.simulate_periodogram(nsim=nsim)
### empty lists to store simulated LRTS and parameters in
sim_lrt, sim_optpars, sim_qpopars, sim_deviance, sim_ksp, sim_merit, sim_srat = [], [], [], [], [], [], []
simno = 0
### run QPO search on each and return likelihood ratios parameters for each
for x in funcfake:
try:
simno = simno + 1
sim_psfit = mle.PerMaxLike(x, fitmethod='constbfgs', obs=False)
slrt, soptpars, sqpopars = sim_psfit.find_qpo(func, ain, obs=False, plot=True,
plotname=plotstr + '_sim' + str(simno) + '_qposearch')
sim_lrt.append(slrt)
sim_optpars.append(soptpars)
sim_qpopars.append(sqpopars)
sim_deviance.append(soptpars['deviance'])
sim_ksp.append(soptpars['ksp'])
sim_merit.append(soptpars['merit'])
sim_srat.append(soptpars['sobs'])
except KeyboardInterrupt:
break
### Step 5: Compute Bayesian posterior probabilities of individual quantities
p_deviance = float(len([x for x in sim_deviance if x > optpars['deviance']])) / float(len(sim_deviance))
p_ksp = float(len([x for x in sim_ksp if x > optpars['ksp']])) / float(len(sim_ksp))
p_merit = float(len([x for x in sim_merit if x > optpars['merit']])) / float(len(sim_merit))
p_lrt = float(len([x for x in sim_lrt if x > obslrt])) / float(len(sim_lrt))
p_srat = float(len([x for x in sim_srat if x > optpars['sobs']])) / float(len(sim_srat))
print("p(LRT) = " + str(p_lrt))
# print("LRT(obs) = " + str(obslrt))
# print("mean(sim_lrt) = " + str(np.mean(sim_lrt)))
# print("Deviance(obs) = " + str(fitpars1['deviance']))
# print("mean(sim_deviance) = " + str(np.mean(sim_deviance)))
print("KSP(obs) = " + str(optpars['ksp']))
print("mean(sim_ksp) = " + str(np.mean(sim_ksp)))
print("Merit(obs) = " + str(optpars['merit']))
print("mean(sim_merit) = " + str(np.mean(sim_merit)))
print("Srat(obs) = " + str(optpars['sobs']))
print("mean(sim_srat) = " + str(np.mean(sim_srat)))
### Step 6: Compute errors of Bayesian posterior probabilities
pdeviance_err = np.sqrt(p_deviance * (1.0 - p_deviance) / float(len(sim_ksp)))
pksp_err = np.sqrt(p_ksp * (1.0 - p_ksp) / float(len(sim_ksp)))
pmerit_err = np.sqrt(p_merit * (1.0 - p_merit) / float(len(sim_ksp)))
plrt_err = np.sqrt(p_lrt * (1.0 - p_lrt) / float(len(sim_ksp)))
psrat_err = np.sqrt(p_srat * (1.0 - p_srat) / float(len(sim_ksp)))
### Display results on screen and make funky plots
print("Bayesian p-value for deviance D = " + str(p_deviance) + " +/- " + str(pdeviance_err))
print("Bayesian p-value for KS test: " + str(p_ksp) + " +/- " + str(pksp_err))
print("Bayesian p-value for Merit function: " + str(p_merit) + " +/- " + str(pmerit_err))
print("Bayesian p-value for the np.sum of residuals: " + str(p_srat) + " +/- " + str(psrat_err))
print("Bayesian p-value for Likelihood Ratio: " + str(p_lrt) + " +/- " + str(plrt_err))
if self.plot:
n, bins, patches = plt.hist(sim_lrt, bins=100, normed=True, histtype='stepfilled')
plt.vlines(obslrt, 0.0, 0.8 * max(n), lw=4, color='m')
plt.savefig(self.namestr + '_qpolrt.png', format='png')
plt.close()
summary = {"p_lrt": [p_lrt, plrt_err],
"p_deviance": [p_deviance, pdeviance_err],
"p_ksp": [p_ksp, pksp_err],
"p_merit": [p_merit, pmerit_err],
"p_srat": [p_srat, psrat_err],
"postmean": mcobs.mean,
"posterr": mcobs.std,
"postquantiles": mcobs.ci,
"rhat": mcobs.rhat,
"acor": mcobs.acor,
"acceptance": mcobs.acceptance}
return summary
def print_summary(self, summary):
"""
Print a summary of the results.
NOT USED!
"""
try:
keys = summary.keys()
except AttributeError:
raise Exception("Summary must be a dictionary!")
probs = dict()
postpars = dict()
### sort out p-values and posterior distribution of parameters
for x in keys:
if x[:2] == 'p_':
probs[x] = summary[x]
else:
postpars[x] = summary[x]
print("The ensemble acceptance rate is " + str(postpars["acceptance"]) + " .")
try:
print("The autocorrelation times are: " + str(postpars["acor"]))
except KeyError:
print("Module Acor not found. Cannot compute autocorrelation times for the parameters")
for i, x in enumerate(postpars["rhat"]):
print("The $R_hat$ value for Parameter " + str(i) + " is " + str(x))
### print posterior summary of parameters:
print("-- Posterior Summary of Parameters: \n")
print("parameter \t mean \t\t sd \t\t 5% \t\t 95% \n")
print("---------------------------------------------\n")
for i in range(len(postpars['postmean'])):
print("theta[" + str(i) + "] \t " + str(postpars['postmean'][i]) + "\t" + str(
postpars['posterr'][i]) + "\t" + str(postpars['postquantiles'][i][0]) + "\t" + str(
postpars["postquantiles"][i][1]) + "\n")
for x in probs.keys():
if x == 'p_lrt':
print("Bayesian p-value for Likelihood Ratio: " + str(probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_deviance':
print("Bayesian p-value for deviance D = " + str(probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_ksp':
print("Bayesian p-value for KS test: " + str(probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_merit':
print("Bayesian p-value for Merit function: " + str(probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_srat':
print("Bayesian p-value for the sum of residuals: " + str(probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_maxpow':
if "fitpars" in probs.keys():
print("Highest [unsmoothed] data/model outlier at frequency F=" + str(
probs["fitpars"]["maxfreq"]) + "Hz with power P=" + str(probs["fitpars"]["maxpow"]))
print("Bayesian p-value for the highest [unsmoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_s3max':
if "fitpars" in probs.keys():
print("Highest [3 bin smoothed] data/model outlier at frequency F=" + str(
probs["fitpars"]["s3maxfreq"]) + "Hz with power P=" + str(probs["fitpars"]["s3max"]))
print("Bayesian p-value for the highest [3 bin smoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_s5max':
if "fitpars" in probs.keys():
print("Highest [5 bin smoothed] data/model outlier at frequency F=" + str(
probs["fitpars"]["s5maxfreq"]) + "Hz with power P=" + str(probs["fitpars"]["s5max"]))
print("Bayesian p-value for the highest [5 bin smoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]))
elif x == 'p_s11max':
if "fitpars" in probs.keys():
print("Highest [11 bin smoothed] data/model outlier at frequency F=" + str(
probs["fitpars"]["s11maxfreq"]) + "Hz with power P=" + str(probs["fitpars"]["s11max"]))
print("Bayesian p-value for the highest [11 bin smoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]))
return
def write_summary(self, summary, namestr=None):
"""
Write a summary of the analysis to file.
NOT USED!
:param summary:
:param namestr:
:return:
"""
if not namestr:
namestr = self.namestr
try:
keys = summary.keys()
except AttributeError:
raise Exception("Summary must be a dictionary!")
probs = dict()
postpars = dict()
### sort out p-values and posterior distribution of parameters
for x in keys:
if x[:2] == 'p_':
probs[x] = summary[x]
else:
postpars[x] = summary[x]
picklefile = open(namestr + "_summary_pickle.dat", "w")
pickle.dump(summary, picklefile)
picklefile.close()
file = open(namestr + "_summary.dat", "w")
file.write("The ensemble acceptance rate is " + str(postpars["acceptance"]) + " .\n")
try:
file.write("The autocorrelation times are: " + str(postpars["acor"]) + "\n")
except KeyError:
file.write("Module Acor not found. Cannot compute autocorrelation times for the parameters \n")
for i, x in enumerate(postpars["rhat"]):
file.write("The $R_hat$ value for Parameter " + str(i) + " is " + str(x) + "\n")
### print posterior summary of parameters:
file.write("-- Posterior Summary of Parameters: \n")
file.write("parameter \t mean \t\t sd \t\t 5% \t\t 95% \n")
file.write("---------------------------------------------\n")
for i in range(len(postpars['postmean'])):
file.write("theta[" + str(i) + "] \t " + str(postpars['postmean'][i]) + "\t" + str(
postpars['posterr'][i]) + "\t" + str(postpars['postquantiles'][i][0]) + "\t" + str(
postpars["postquantiles"][i][1]) + "\n")
for x in probs.keys():
if x == 'p_lrt':
file.write(
"Bayesian p-value for Likelihood Ratio: " + str(probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
elif x == 'p_deviance':
file.write("Bayesian p-value for deviance D = " + str(probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
elif x == 'p_ksp':
file.write("Bayesian p-value for KS test: " + str(probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
elif x == 'p_merit':
file.write(
"Bayesian p-value for Merit function: " + str(probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
elif x == 'p_srat':
file.write("Bayesian p-value for the sum of residuals: " + str(probs[x][0]) + " +/- " + str(
probs[x][1]) + "\n")
elif x == 'p_maxpow':
file.write("Bayesian p-value for the highest [unsmoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
file.write(
"Upper limit for highest [unsmoothed] data/model outlier: " + str(summary['maxpow_ul']) + "\n")
elif x == 'p_s3max':
file.write("Bayesian p-value for the highest [3 bin smoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
file.write(
"Upper limit for highest [unsmoothed] data/model outlier: " + str(summary['s3max_ul']) + "\n")
elif x == 'p_s5max':
file.write("Bayesian p-value for the highest [5 bin smoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
file.write(
"Upper limit for highest [unsmoothed] data/model outlier: " + str(summary['s5max_ul']) + "\n")
elif x == 'p_s11max':
file.write("Bayesian p-value for the highest [11 bin smoothed] data/model outlier: " + str(
probs[x][0]) + " +/- " + str(probs[x][1]) + "\n")
file.write(
"Upper limit for highest [unsmoothed] data/model outlier: " + str(summary['s11max_ul']) + "\n")
return
def plot_posteriors(namestr='test', **pars):
plotkeys = pars.keys()
N = len(plotkeys)
### number of parameters
fig = plt.figure(figsize=(2, N / 2 + 1))
plt.subplots_adjust(top=0.95, bottom=0.05, left=0.05, right=0.95, wspace=0.2, hspace=0.2)
for i in range(N):
ax = fig.add_subplot(N / 2 + 1, 2, i)
n, bins, patches = ax.hist(pars[plotkeys[i]][0], 30)
ax.vlines(pars[plotkeys[i]][0], 0.0, 0.8 * max(n), lw=4)
ax.figtext(pars[plotkeys[i]][0] + 0.01 * pars[plotkeys[i]][0], 0.8 * n, "p = " + str(pars[plotkeys[i]][1]))
ax.title("Posterior for " + plotkeys[i])
return
|
[
"matplotlib.pyplot.title",
"src.SpectralAnalysis.posterior.StackPerPosterior",
"pickle.dump",
"src.SpectralAnalysis.utils.TwoPrint",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.close",
"src.SpectralAnalysis.mle.PerMaxLike",
"src.SpectralAnalysis.posterior.PerPosterior",
"copy.copy",
"numpy.searchsorted",
"matplotlib.pyplot.figure",
"numpy.mean",
"src.SpectralAnalysis.mcmc.MarkovChainMonteCarlo",
"matplotlib.pyplot.subplots_adjust",
"numpy.msort",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((6349, 6376), 'src.SpectralAnalysis.utils.TwoPrint', 'utils.TwoPrint', (['resfilename'], {}), '(resfilename)\n', (6363, 6376), False, 'from src.SpectralAnalysis import utils\n'), ((6589, 6643), 'src.SpectralAnalysis.mle.PerMaxLike', 'mle.PerMaxLike', (['self.ps'], {'fitmethod': 'fitmethod', 'obs': '(True)'}), '(self.ps, fitmethod=fitmethod, obs=True)\n', (6603, 6643), False, 'from src.SpectralAnalysis import mle\n'), ((7352, 7640), 'src.SpectralAnalysis.mcmc.MarkovChainMonteCarlo', 'mcmc.MarkovChainMonteCarlo', (['self.ps.freq', 'self.ps.ps', 'lpost'], {'topt': "fitpars1['popt']", 'tcov': "fitpars1['cov']", 'covfactor': 'covfactor', 'niter': 'niter', 'nchain': 'nchain', 'parname': 'parname', 'check_conv': '(True)', 'namestr': 'self.namestr', 'use_emcee': 'use_emcee', 'plot': 'self.plot', 'printobj': 'resfile', 'm': 'self.m'}), "(self.ps.freq, self.ps.ps, lpost, topt=fitpars1[\n 'popt'], tcov=fitpars1['cov'], covfactor=covfactor, niter=niter, nchain\n =nchain, parname=parname, check_conv=True, namestr=self.namestr,\n use_emcee=use_emcee, plot=self.plot, printobj=resfile, m=self.m)\n", (7378, 7640), False, 'from src.SpectralAnalysis import mcmc\n'), ((17049, 17076), 'src.SpectralAnalysis.utils.TwoPrint', 'utils.TwoPrint', (['resfilename'], {}), '(resfilename)\n', (17063, 17076), False, 'from src.SpectralAnalysis import utils\n'), ((17139, 17193), 'src.SpectralAnalysis.mle.PerMaxLike', 'mle.PerMaxLike', (['self.ps'], {'fitmethod': 'fitmethod', 'obs': '(True)'}), '(self.ps, fitmethod=fitmethod, obs=True)\n', (17153, 17193), False, 'from src.SpectralAnalysis import mle\n'), ((17672, 17953), 'src.SpectralAnalysis.mcmc.MarkovChainMonteCarlo', 'mcmc.MarkovChainMonteCarlo', (['self.ps.freq', 'self.ps.ps', 'lpost'], {'topt': "fitpars['popt']", 'tcov': "fitpars['cov']", 'covfactor': 'covfactor', 'niter': 'niter', 'nchain': 'nchain', 'parname': 'parname', 'check_conv': '(True)', 'namestr': 'self.namestr', 'use_emcee': '(True)', 'plot': 'self.plot', 'printobj': 'resfile', 'm': 'self.m'}), "(self.ps.freq, self.ps.ps, lpost, topt=fitpars[\n 'popt'], tcov=fitpars['cov'], covfactor=covfactor, niter=niter, nchain=\n nchain, parname=parname, check_conv=True, namestr=self.namestr,\n use_emcee=True, plot=self.plot, printobj=resfile, m=self.m)\n", (17698, 17953), False, 'from src.SpectralAnalysis import mcmc\n'), ((19030, 19056), 'copy.copy', 'copy.copy', (["fitpars['popt']"], {}), "(fitpars['popt'])\n", (19039, 19056), False, 'import copy\n'), ((25727, 25747), 'numpy.msort', 'np.msort', (['sim_maxpow'], {}), '(sim_maxpow)\n', (25735, 25747), True, 'import numpy as np\n'), ((25773, 25792), 'numpy.msort', 'np.msort', (['sim_s3max'], {}), '(sim_s3max)\n', (25781, 25792), True, 'import numpy as np\n'), ((25818, 25837), 'numpy.msort', 'np.msort', (['sim_s5max'], {}), '(sim_s5max)\n', (25826, 25837), True, 'import numpy as np\n'), ((25864, 25884), 'numpy.msort', 'np.msort', (['sim_s11max'], {}), '(sim_s11max)\n', (25872, 25884), True, 'import numpy as np\n'), ((33685, 33739), 'src.SpectralAnalysis.mle.PerMaxLike', 'mle.PerMaxLike', (['self.ps'], {'fitmethod': 'fitmethod', 'obs': '(True)'}), '(self.ps, fitmethod=fitmethod, obs=True)\n', (33699, 33739), False, 'from src.SpectralAnalysis import mle\n'), ((34163, 34426), 'src.SpectralAnalysis.mcmc.MarkovChainMonteCarlo', 'mcmc.MarkovChainMonteCarlo', (['self.ps.freq', 'self.ps.ps', 'lpost'], {'topt': "fitpars['popt']", 'tcov': "fitpars['cov']", 'covfactor': 'covfactor', 'niter': 'niter', 'nchain': 'nchain', 'parname': 'parname', 'check_conv': '(True)', 'namestr': 'self.namestr', 'use_emcee': '(True)', 'plot': 'self.plot', 'm': 'self.m'}), "(self.ps.freq, self.ps.ps, lpost, topt=fitpars[\n 'popt'], tcov=fitpars['cov'], covfactor=covfactor, niter=niter, nchain=\n nchain, parname=parname, check_conv=True, namestr=self.namestr,\n use_emcee=True, plot=self.plot, m=self.m)\n", (34189, 34426), False, 'from src.SpectralAnalysis import mcmc\n'), ((44092, 44124), 'pickle.dump', 'pickle.dump', (['summary', 'picklefile'], {}), '(summary, picklefile)\n', (44103, 44124), False, 'import pickle\n'), ((47690, 47724), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2, N / 2 + 1)'}), '(figsize=(2, N / 2 + 1))\n', (47700, 47724), True, 'import matplotlib.pyplot as plt\n'), ((47733, 47827), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.95)', 'bottom': '(0.05)', 'left': '(0.05)', 'right': '(0.95)', 'wspace': '(0.2)', 'hspace': '(0.2)'}), '(top=0.95, bottom=0.05, left=0.05, right=0.95, wspace=\n 0.2, hspace=0.2)\n', (47752, 47827), True, 'import matplotlib.pyplot as plt\n'), ((7122, 7160), 'src.SpectralAnalysis.posterior.PerPosterior', 'posterior.PerPosterior', (['self.ps', 'func1'], {}), '(self.ps, func1)\n', (7144, 7160), False, 'from src.SpectralAnalysis import posterior\n'), ((7195, 7246), 'src.SpectralAnalysis.posterior.StackPerPosterior', 'posterior.StackPerPosterior', (['self.ps', 'func1', 'self.m'], {}), '(self.ps, func1, self.m)\n', (7222, 7246), False, 'from src.SpectralAnalysis import posterior\n'), ((17444, 17481), 'src.SpectralAnalysis.posterior.PerPosterior', 'posterior.PerPosterior', (['self.ps', 'func'], {}), '(self.ps, func)\n', (17466, 17481), False, 'from src.SpectralAnalysis import posterior\n'), ((17516, 17566), 'src.SpectralAnalysis.posterior.StackPerPosterior', 'posterior.StackPerPosterior', (['self.ps', 'func', 'self.m'], {}), '(self.ps, func, self.m)\n', (17543, 17566), False, 'from src.SpectralAnalysis import posterior\n'), ((21893, 21910), 'numpy.msort', 'np.msort', (['bmaxpow'], {}), '(bmaxpow)\n', (21901, 21910), True, 'import numpy as np\n'), ((28075, 28095), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (28086, 28095), True, 'import matplotlib.pyplot as plt\n'), ((28127, 28212), 'matplotlib.pyplot.hist', 'plt.hist', (['sim_maxpow'], {'bins': '(100)', 'normed': '(True)', 'color': '"""cyan"""', 'histtype': '"""stepfilled"""'}), "(sim_maxpow, bins=100, normed=True, color='cyan', histtype='stepfilled'\n )\n", (28135, 28212), True, 'import matplotlib.pyplot as plt\n'), ((28442, 28483), 'matplotlib.pyplot.title', 'plt.title', (['"""unsmoothed data"""'], {'fontsize': '(12)'}), "('unsmoothed data', fontsize=12)\n", (28451, 28483), True, 'import matplotlib.pyplot as plt\n'), ((28497, 28517), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (28508, 28517), True, 'import matplotlib.pyplot as plt\n'), ((28549, 28628), 'matplotlib.pyplot.hist', 'plt.hist', (['sim_s3max'], {'bins': '(100)', 'normed': '(True)', 'color': '"""cyan"""', 'histtype': '"""stepfilled"""'}), "(sim_s3max, bins=100, normed=True, color='cyan', histtype='stepfilled')\n", (28557, 28628), True, 'import matplotlib.pyplot as plt\n'), ((28860, 28903), 'matplotlib.pyplot.title', 'plt.title', (['"""smoothed (3) data"""'], {'fontsize': '(12)'}), "('smoothed (3) data', fontsize=12)\n", (28869, 28903), True, 'import matplotlib.pyplot as plt\n'), ((28917, 28937), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (28928, 28937), True, 'import matplotlib.pyplot as plt\n'), ((28969, 29048), 'matplotlib.pyplot.hist', 'plt.hist', (['sim_s3max'], {'bins': '(100)', 'normed': '(True)', 'color': '"""cyan"""', 'histtype': '"""stepfilled"""'}), "(sim_s3max, bins=100, normed=True, color='cyan', histtype='stepfilled')\n", (28977, 29048), True, 'import matplotlib.pyplot as plt\n'), ((29281, 29338), 'matplotlib.pyplot.title', 'plt.title', (['"""smoothed (5) data/model outlier"""'], {'fontsize': '(12)'}), "('smoothed (5) data/model outlier', fontsize=12)\n", (29290, 29338), True, 'import matplotlib.pyplot as plt\n'), ((29352, 29372), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (29363, 29372), True, 'import matplotlib.pyplot as plt\n'), ((29404, 29483), 'matplotlib.pyplot.hist', 'plt.hist', (['sim_s3max'], {'bins': '(100)', 'normed': '(True)', 'color': '"""cyan"""', 'histtype': '"""stepfilled"""'}), "(sim_s3max, bins=100, normed=True, color='cyan', histtype='stepfilled')\n", (29412, 29483), True, 'import matplotlib.pyplot as plt\n'), ((29718, 29762), 'matplotlib.pyplot.title', 'plt.title', (['"""smoothed (11) data"""'], {'fontsize': '(12)'}), "('smoothed (11) data', fontsize=12)\n", (29727, 29762), True, 'import matplotlib.pyplot as plt\n'), ((29776, 29831), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.namestr + '_maxpow.png')"], {'format': '"""png"""'}), "(self.namestr + '_maxpow.png', format='png')\n", (29787, 29831), True, 'import matplotlib.pyplot as plt\n'), ((29844, 29855), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (29853, 29855), True, 'import matplotlib.pyplot as plt\n'), ((33935, 33972), 'src.SpectralAnalysis.posterior.PerPosterior', 'posterior.PerPosterior', (['self.ps', 'func'], {}), '(self.ps, func)\n', (33957, 33972), False, 'from src.SpectralAnalysis import posterior\n'), ((34007, 34057), 'src.SpectralAnalysis.posterior.StackPerPosterior', 'posterior.StackPerPosterior', (['self.ps', 'func', 'self.m'], {}), '(self.ps, func, self.m)\n', (34034, 34057), False, 'from src.SpectralAnalysis import posterior\n'), ((38509, 38572), 'matplotlib.pyplot.hist', 'plt.hist', (['sim_lrt'], {'bins': '(100)', 'normed': '(True)', 'histtype': '"""stepfilled"""'}), "(sim_lrt, bins=100, normed=True, histtype='stepfilled')\n", (38517, 38572), True, 'import matplotlib.pyplot as plt\n'), ((38652, 38707), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.namestr + '_qpolrt.png')"], {'format': '"""png"""'}), "(self.namestr + '_qpolrt.png', format='png')\n", (38663, 38707), True, 'import matplotlib.pyplot as plt\n'), ((38720, 38731), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (38729, 38731), True, 'import matplotlib.pyplot as plt\n'), ((8622, 8671), 'src.SpectralAnalysis.mle.PerMaxLike', 'mle.PerMaxLike', (['x'], {'fitmethod': 'fitmethod', 'obs': '(False)'}), '(x, fitmethod=fitmethod, obs=False)\n', (8636, 8671), False, 'from src.SpectralAnalysis import mle\n'), ((12413, 12490), 'matplotlib.pyplot.hist', 'plt.hist', (['sim_lrt'], {'bins': '(100)', 'normed': '(True)', 'color': '"""cyan"""', 'histtype': '"""stepfilled"""'}), "(sim_lrt, bins=100, normed=True, color='cyan', histtype='stepfilled')\n", (12421, 12490), True, 'import matplotlib.pyplot as plt\n'), ((12581, 12633), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.namestr + '_lrt.png')"], {'format': '"""png"""'}), "(self.namestr + '_lrt.png', format='png')\n", (12592, 12633), True, 'import matplotlib.pyplot as plt\n'), ((12650, 12661), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12659, 12661), True, 'import matplotlib.pyplot as plt\n'), ((19326, 19375), 'src.SpectralAnalysis.mle.PerMaxLike', 'mle.PerMaxLike', (['x'], {'fitmethod': 'fitmethod', 'obs': '(False)'}), '(x, fitmethod=fitmethod, obs=False)\n', (19340, 19375), False, 'from src.SpectralAnalysis import mle\n'), ((35592, 35643), 'src.SpectralAnalysis.mle.PerMaxLike', 'mle.PerMaxLike', (['x'], {'fitmethod': '"""constbfgs"""', 'obs': '(False)'}), "(x, fitmethod='constbfgs', obs=False)\n", (35606, 35643), False, 'from src.SpectralAnalysis import mle\n'), ((24189, 24219), 'numpy.sqrt', 'np.sqrt', (['(bpow * b * self.ps.df)'], {}), '(bpow * b * self.ps.df)\n', (24196, 24219), True, 'import numpy as np\n'), ((37200, 37216), 'numpy.mean', 'np.mean', (['sim_ksp'], {}), '(sim_ksp)\n', (37207, 37216), True, 'import numpy as np\n'), ((37316, 37334), 'numpy.mean', 'np.mean', (['sim_merit'], {}), '(sim_merit)\n', (37323, 37334), True, 'import numpy as np\n'), ((37431, 37448), 'numpy.mean', 'np.mean', (['sim_srat'], {}), '(sim_srat)\n', (37438, 37448), True, 'import numpy as np\n'), ((10814, 10830), 'numpy.mean', 'np.mean', (['sim_ksp'], {}), '(sim_ksp)\n', (10821, 10830), True, 'import numpy as np\n'), ((10943, 10961), 'numpy.mean', 'np.mean', (['sim_merit'], {}), '(sim_merit)\n', (10950, 10961), True, 'import numpy as np\n'), ((11071, 11088), 'numpy.mean', 'np.mean', (['sim_srat'], {}), '(sim_srat)\n', (11078, 11088), True, 'import numpy as np\n'), ((24083, 24114), 'numpy.searchsorted', 'np.searchsorted', (['binps.freq', 'bc'], {}), '(binps.freq, bc)\n', (24098, 24114), True, 'import numpy as np\n')]
|
import torch.nn as nn
import torch
import torch.nn.functional as F
from torch.autograd import Variable
class FeatureExtractor(nn.Module):
def __init__(self, voacb_size, embedding_dim=300, hidden_dim=300):
super(FeatureExtractor, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.embedding = nn.Embedding(voacb_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True)
def forward(self, sentence):
x = self.embedding(sentence)
lstm_out, lstm_hidden = self.lstm(x)
return lstm_out
class Classifier(nn.Module):
def __init__(self, target_size=2, hidden_dim=300):
super(Classifier, self).__init__()
self.lstm1 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True)
self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True)
self.fc = nn.Sequential(
nn.Linear(hidden_dim, 150),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(150, target_size)
)
def forward(self, x, sentence_length):
self.lstm1.flatten_parameters()
self.lstm2.flatten_parameters()
lstm1_out, lstm1_hidden = self.lstm1(x)
lstm2_out, lstm2_hidden = self.lstm2(lstm1_out)
out = torch.stack([lstm2_out[i, sentence_length[i] - 1] for i in range(len(lstm2_out))], dim=0)
out = self.fc(out)
return out
class MutlInfo(nn.Module):
def __init__(self, voacb_size, target_size=2, embedding_dim=300, hidden_dim=300):
super(MutlInfo, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.embedding = nn.Embedding(voacb_size, embedding_dim)
self.lstm1 = nn.LSTM(embedding_dim, hidden_dim, batch_first=True)
self.lstm2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True)
self.lstm3 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True)
self.fc = nn.Sequential(
nn.Linear(2 * hidden_dim + target_size, 150),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(150, 1)
)
def forward(self, sentence, z, u, sentence_length):
self.lstm1.flatten_parameters()
self.lstm2.flatten_parameters()
self.lstm3.flatten_parameters()
x = self.embedding(sentence)
lstm1_out, lstm1_hidden = self.lstm1(x)
lstm2_out, lstm2_hidden = self.lstm2(lstm1_out)
lstm3_out, lstm3_hidden = self.lstm3(lstm2_out)
x_new = torch.stack([lstm3_out[i, sentence_length[i]-1] for i in range(len(lstm3_out))], dim=0)
lstm2_out_z, lstm2_hidden_z = self.lstm2(z)
lstm3_out_z, lstm3_hidden_z = self.lstm3(lstm2_out_z)
z_new = torch.stack([lstm3_out_z[i, sentence_length[i]-1] for i in range(len(lstm3_out_z))], dim=0)
out = torch.cat((x_new, z_new, u), dim=1)
out = self.fc(out)
return out
def info_loss(MI, x, x_length, z, u, x_prime, x_prime_length):
Ej = -F.softplus(-MI(x, z, u, x_length)).mean()
Em = F.softplus(MI(x_prime, z, u, x_prime_length)).mean()
return Ej - Em
|
[
"torch.nn.Dropout",
"torch.nn.ReLU",
"torch.nn.Embedding",
"torch.cat",
"torch.nn.Linear",
"torch.nn.LSTM"
] |
[((368, 407), 'torch.nn.Embedding', 'nn.Embedding', (['voacb_size', 'embedding_dim'], {}), '(voacb_size, embedding_dim)\n', (380, 407), True, 'import torch.nn as nn\n'), ((428, 480), 'torch.nn.LSTM', 'nn.LSTM', (['embedding_dim', 'hidden_dim'], {'batch_first': '(True)'}), '(embedding_dim, hidden_dim, batch_first=True)\n', (435, 480), True, 'import torch.nn as nn\n'), ((771, 820), 'torch.nn.LSTM', 'nn.LSTM', (['hidden_dim', 'hidden_dim'], {'batch_first': '(True)'}), '(hidden_dim, hidden_dim, batch_first=True)\n', (778, 820), True, 'import torch.nn as nn\n'), ((842, 891), 'torch.nn.LSTM', 'nn.LSTM', (['hidden_dim', 'hidden_dim'], {'batch_first': '(True)'}), '(hidden_dim, hidden_dim, batch_first=True)\n', (849, 891), True, 'import torch.nn as nn\n'), ((1716, 1755), 'torch.nn.Embedding', 'nn.Embedding', (['voacb_size', 'embedding_dim'], {}), '(voacb_size, embedding_dim)\n', (1728, 1755), True, 'import torch.nn as nn\n'), ((1777, 1829), 'torch.nn.LSTM', 'nn.LSTM', (['embedding_dim', 'hidden_dim'], {'batch_first': '(True)'}), '(embedding_dim, hidden_dim, batch_first=True)\n', (1784, 1829), True, 'import torch.nn as nn\n'), ((1852, 1901), 'torch.nn.LSTM', 'nn.LSTM', (['hidden_dim', 'hidden_dim'], {'batch_first': '(True)'}), '(hidden_dim, hidden_dim, batch_first=True)\n', (1859, 1901), True, 'import torch.nn as nn\n'), ((1923, 1972), 'torch.nn.LSTM', 'nn.LSTM', (['hidden_dim', 'hidden_dim'], {'batch_first': '(True)'}), '(hidden_dim, hidden_dim, batch_first=True)\n', (1930, 1972), True, 'import torch.nn as nn\n'), ((2882, 2917), 'torch.cat', 'torch.cat', (['(x_new, z_new, u)'], {'dim': '(1)'}), '((x_new, z_new, u), dim=1)\n', (2891, 2917), False, 'import torch\n'), ((937, 963), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', '(150)'], {}), '(hidden_dim, 150)\n', (946, 963), True, 'import torch.nn as nn\n'), ((977, 998), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (984, 998), True, 'import torch.nn as nn\n'), ((1012, 1024), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (1022, 1024), True, 'import torch.nn as nn\n'), ((1038, 1065), 'torch.nn.Linear', 'nn.Linear', (['(150)', 'target_size'], {}), '(150, target_size)\n', (1047, 1065), True, 'import torch.nn as nn\n'), ((2019, 2063), 'torch.nn.Linear', 'nn.Linear', (['(2 * hidden_dim + target_size)', '(150)'], {}), '(2 * hidden_dim + target_size, 150)\n', (2028, 2063), True, 'import torch.nn as nn\n'), ((2077, 2098), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2084, 2098), True, 'import torch.nn as nn\n'), ((2112, 2124), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (2122, 2124), True, 'import torch.nn as nn\n'), ((2138, 2155), 'torch.nn.Linear', 'nn.Linear', (['(150)', '(1)'], {}), '(150, 1)\n', (2147, 2155), True, 'import torch.nn as nn\n')]
|
import warnings
import matplotlib.pyplot as plt
import pandas as pd
import pytask
import seaborn as sns
from src.config import BLD
from src.config import PLOT_END_DATE
from src.config import PLOT_SIZE
from src.config import PLOT_START_DATE
from src.config import SRC
from src.plotting.plotting import style_plot
from src.testing.shared import get_piecewise_linear_interpolation
@pytask.mark.depends_on(
{
"params": BLD / "params.pkl",
"rki": BLD / "data" / "processed_time_series" / "rki.pkl",
"plotting.py": SRC / "plotting" / "plotting.py",
"testing_shared.py": SRC / "testing" / "shared.py",
}
)
@pytask.mark.produces(
BLD / "figures" / "data" / "testing" / "private_test_demand_shares.pdf"
)
def task_plot_private_test_demand_shares(depends_on, produces):
params = pd.read_pickle(depends_on["params"])
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message="indexing past lexsort depth may impact performance."
)
params_slice = params.loc[("rapid_test_demand", "private_demand")]
private_demand_shares = get_piecewise_linear_interpolation(params_slice)
private_demand_shares = private_demand_shares.loc[PLOT_START_DATE:PLOT_END_DATE]
fig, ax = plt.subplots(figsize=PLOT_SIZE)
sns.lineplot(
x=private_demand_shares.index,
y=private_demand_shares,
ax=ax,
)
ax.set_title(
"Private Rapid Test Demand\n"
"(Share of Individuals who Do a Rapid Test \n"
"When a Household Member Tests Positive Or Becomes Symptomatic Or \n"
"When Developing Symptoms but not Receiving a Rapid Test Or \n"
"When Participating in Some Private Events)"
)
fig, ax = style_plot(fig, ax)
fig.tight_layout()
fig.savefig(produces)
plt.close()
|
[
"seaborn.lineplot",
"pytask.mark.produces",
"warnings.filterwarnings",
"matplotlib.pyplot.close",
"pytask.mark.depends_on",
"src.testing.shared.get_piecewise_linear_interpolation",
"src.plotting.plotting.style_plot",
"warnings.catch_warnings",
"pandas.read_pickle",
"matplotlib.pyplot.subplots"
] |
[((383, 605), 'pytask.mark.depends_on', 'pytask.mark.depends_on', (["{'params': BLD / 'params.pkl', 'rki': BLD / 'data' /\n 'processed_time_series' / 'rki.pkl', 'plotting.py': SRC / 'plotting' /\n 'plotting.py', 'testing_shared.py': SRC / 'testing' / 'shared.py'}"], {}), "({'params': BLD / 'params.pkl', 'rki': BLD / 'data' /\n 'processed_time_series' / 'rki.pkl', 'plotting.py': SRC / 'plotting' /\n 'plotting.py', 'testing_shared.py': SRC / 'testing' / 'shared.py'})\n", (405, 605), False, 'import pytask\n'), ((644, 741), 'pytask.mark.produces', 'pytask.mark.produces', (["(BLD / 'figures' / 'data' / 'testing' / 'private_test_demand_shares.pdf')"], {}), "(BLD / 'figures' / 'data' / 'testing' /\n 'private_test_demand_shares.pdf')\n", (664, 741), False, 'import pytask\n'), ((821, 857), 'pandas.read_pickle', 'pd.read_pickle', (["depends_on['params']"], {}), "(depends_on['params'])\n", (835, 857), True, 'import pandas as pd\n'), ((1125, 1173), 'src.testing.shared.get_piecewise_linear_interpolation', 'get_piecewise_linear_interpolation', (['params_slice'], {}), '(params_slice)\n', (1159, 1173), False, 'from src.testing.shared import get_piecewise_linear_interpolation\n'), ((1274, 1305), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'PLOT_SIZE'}), '(figsize=PLOT_SIZE)\n', (1286, 1305), True, 'import matplotlib.pyplot as plt\n'), ((1310, 1385), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': 'private_demand_shares.index', 'y': 'private_demand_shares', 'ax': 'ax'}), '(x=private_demand_shares.index, y=private_demand_shares, ax=ax)\n', (1322, 1385), True, 'import seaborn as sns\n'), ((1751, 1770), 'src.plotting.plotting.style_plot', 'style_plot', (['fig', 'ax'], {}), '(fig, ax)\n', (1761, 1770), False, 'from src.plotting.plotting import style_plot\n'), ((1825, 1836), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1834, 1836), True, 'import matplotlib.pyplot as plt\n'), ((867, 892), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (890, 892), False, 'import warnings\n'), ((902, 1003), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '"""indexing past lexsort depth may impact performance."""'}), "('ignore', message=\n 'indexing past lexsort depth may impact performance.')\n", (925, 1003), False, 'import warnings\n')]
|
import json
import logging
from typing import Dict
import numpy
class ClientSet:
def __init__(self):
self.sockets_by_host_id = {}
self.route_ids_by_host_id = {}
self.host_ids_by_socket = {}
def add(self, host_id, socket):
logging.info(f"Registered hostID '{host_id}'")
self.sockets_by_host_id[host_id], self.host_ids_by_socket[socket] = socket, host_id
def remove(self, host_id=None, socket=None):
if socket:
host_id = self.host_ids_by_socket[socket]
if not socket:
socket = self.sockets_by_host_id[host_id]
logging.info(f"Deleting hostID '{host_id}'")
self.host_ids_by_socket.pop(socket, None)
self.route_ids_by_host_id.pop(host_id, None)
self.sockets_by_host_id.pop(host_id, None)
def get_socket(self, host_id):
return self.sockets_by_host_id.get(host_id)
def get_host_id(self, socket):
return self.host_ids_by_socket.get(socket)
def get_route_id(self, host_id=None, socket=None):
if socket:
host_id = self.host_ids_by_socket[socket]
return self.route_ids_by_host_id.get(host_id)
def set_route_id(self, host_id=None, socket=None, route_id=None):
if socket:
host_id = self.host_ids_by_socket[socket]
logging.info(f"Assigning routeID '{route_id}' to hostID '{host_id}'")
self.route_ids_by_host_id[host_id] = route_id
def clear_route_id(self, host_id=None, socket=None):
if socket:
host_id = self.host_ids_by_socket[socket]
route_id = self.route_ids_by_host_id[host_id]
logging.info(f"Removing routeID '{route_id}' from hostID '{host_id}'")
del self.route_ids_by_host_id[host_id]
def connected_hosts_count(self):
return len(self.host_ids_by_socket)
def current_state(self):
return self.route_ids_by_host_id
async def send_json(websocket, event: str, msg: Dict = None):
"""
Send a JSON event with optional additional fields via the given websocket connection.
:param websocket: the websocket to send the message on
:param event: the desired value of the "event" field inside the JSON message
:param msg: a dict containing any additional fields for the JSON message to contain
:return:
"""
if msg is None:
msg = {}
msg['event'] = event
json_msg = json.dumps(msg, default=default_json_encoder)
await websocket.send(json_msg)
async def recv_json(websocket):
response = await websocket.recv()
return json.loads(response)
def default_json_encoder(o):
if isinstance(o, numpy.int64):
return int(o)
raise TypeError
|
[
"logging.info",
"json.loads",
"json.dumps"
] |
[((2398, 2443), 'json.dumps', 'json.dumps', (['msg'], {'default': 'default_json_encoder'}), '(msg, default=default_json_encoder)\n', (2408, 2443), False, 'import json\n'), ((2562, 2582), 'json.loads', 'json.loads', (['response'], {}), '(response)\n', (2572, 2582), False, 'import json\n'), ((266, 312), 'logging.info', 'logging.info', (['f"""Registered hostID \'{host_id}\'"""'], {}), '(f"Registered hostID \'{host_id}\'")\n', (278, 312), False, 'import logging\n'), ((613, 657), 'logging.info', 'logging.info', (['f"""Deleting hostID \'{host_id}\'"""'], {}), '(f"Deleting hostID \'{host_id}\'")\n', (625, 657), False, 'import logging\n'), ((1322, 1391), 'logging.info', 'logging.info', (['f"""Assigning routeID \'{route_id}\' to hostID \'{host_id}\'"""'], {}), '(f"Assigning routeID \'{route_id}\' to hostID \'{host_id}\'")\n', (1334, 1391), False, 'import logging\n'), ((1639, 1709), 'logging.info', 'logging.info', (['f"""Removing routeID \'{route_id}\' from hostID \'{host_id}\'"""'], {}), '(f"Removing routeID \'{route_id}\' from hostID \'{host_id}\'")\n', (1651, 1709), False, 'import logging\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_utils
----------------------------------
Tests for the various utility operations employed by Magpie.
"""
import os
import unittest
from distutils.version import LooseVersion
import mock
import six
from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPInternalServerError, HTTPOk
from pyramid.settings import asbool
from magpie import __meta__, constants
from magpie.api import exception as ax
from magpie.api import generic as ag
from magpie.api import requests as ar
from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url
from tests import runner, utils
class DummyEnum(ExtendedEnum):
VALUE1 = "value-1"
VALUE2 = "value-2"
@runner.MAGPIE_TEST_LOCAL
@runner.MAGPIE_TEST_UTILS
class TestUtils(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.version = __meta__.__version__ # only local test
def test_magpie_prefix_direct_request(self):
base_url = "http://localhost"
for url in ["http://localhost", "http://localhost/magpie"]:
app = utils.get_test_magpie_app({"magpie.url": url})
path = "/version"
resp = utils.test_request(app, "GET", path)
utils.check_response_basic_info(resp)
utils.check_val_equal(resp.request.url, base_url + path,
"Proxied path should have been auto-resolved [URL: {}].".format(url))
def test_magpie_prefix_request_with_multiple_route_url(self):
"""
Test multiple request routing with fixed "MAGPIE_URL" within the API application.
Signin with invalid credentials will call "/signin" followed by sub-request "/signin_internal" and finally
"ZigguratSignInBadAuth". Both "/signin" and "ZigguratSignInBadAuth" use "get_multiformat_body".
"""
from magpie.api.requests import get_value_multiformat_body_checked as real_multiform_post_checked
base_url = "http://localhost"
def mock_get_post(real_func, *args, **kwargs):
if args[1] != "password":
return real_func(*args, **kwargs)
request, args = args[0], args[1:]
utils.check_val_equal(request.url, base_url + _paths.pop(0),
"Proxied path should have been auto-resolved [URL: {}].".format(url))
return real_func(request, *args, **kwargs)
for url in ["http://localhost", "http://localhost/magpie"]:
# paths are reduced (pop in mock) each time a post to get the 'password' is called in 'login' module
# this combination should happen twice, one in signin route and another on the redirected internal login
_paths = ["/signin", "/signin_internal"]
app = utils.get_test_magpie_app({"magpie.url": url})
with mock.patch("magpie.api.requests.get_value_multiformat_body_checked",
side_effect=lambda *_, **__: mock_get_post(real_multiform_post_checked, *_, **__)):
data = {"user_name": "foo", "password": "<PASSWORD>"}
headers = {"Content-Type": CONTENT_TYPE_JSON, "Accept": CONTENT_TYPE_JSON}
resp = utils.test_request(app, "POST", _paths[0], json=data, headers=headers, expect_errors=True)
if LooseVersion(self.version) < LooseVersion("0.10.0"):
# user name doesn't exist
utils.check_response_basic_info(resp, expected_code=406, expected_method="POST")
else:
# invalid username/password credentials
utils.check_response_basic_info(resp, expected_code=401, expected_method="POST")
def test_get_header_split(self):
headers = {"Content-Type": "{}; charset=UTF-8".format(CONTENT_TYPE_JSON)}
for name in ["content_type", "content-type", "Content_Type", "Content-Type", "CONTENT_TYPE", "CONTENT-TYPE"]:
for split in [";,", ",;", ";", (",", ";"), [";", ","]]:
utils.check_val_equal(get_header(name, headers, split=split), CONTENT_TYPE_JSON)
def test_get_query_param(self):
resp = utils.mock_request("/some/path")
v = ar.get_query_param(resp, "value")
utils.check_val_equal(v, None)
resp = utils.mock_request("/some/path?other=test")
v = ar.get_query_param(resp, "value")
utils.check_val_equal(v, None)
resp = utils.mock_request("/some/path?other=test")
v = ar.get_query_param(resp, "value", True)
utils.check_val_equal(v, True)
resp = utils.mock_request("/some/path?value=test")
v = ar.get_query_param(resp, "value", True)
utils.check_val_equal(v, "test")
resp = utils.mock_request("/some/path?query=value")
v = ar.get_query_param(resp, "query")
utils.check_val_equal(v, "value")
resp = utils.mock_request("/some/path?QUERY=VALUE")
v = ar.get_query_param(resp, "query")
utils.check_val_equal(v, "VALUE")
resp = utils.mock_request("/some/path?QUERY=VALUE")
v = asbool(ar.get_query_param(resp, "query"))
utils.check_val_equal(v, False)
resp = utils.mock_request("/some/path?Query=TRUE")
v = asbool(ar.get_query_param(resp, "query"))
utils.check_val_equal(v, True)
def test_verify_param_proper_verifications_raised(self):
# with default error
utils.check_raises(lambda: ax.verify_param("b", param_compare=["a", "b"], not_in=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param("x", param_compare=["a", "b"], is_in=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param("1", param_compare=int, is_type=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param(1.0, param_compare=six.string_types, is_type=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param("x", param_compare="x", not_equal=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param("x", param_compare="y", is_equal=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param(False, is_true=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param(True, is_false=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param(None, not_none=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param(1, is_none=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param("", not_empty=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param("abc", is_empty=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param("abc", matches=True, param_compare=r"[A-Z]+"), HTTPBadRequest)
# with requested error
utils.check_raises(lambda:
ax.verify_param("b", param_compare=["a", "b"], not_in=True, http_error=HTTPForbidden),
HTTPForbidden)
utils.check_raises(lambda: ax.verify_param("x", param_compare=["a", "b"], is_in=True, http_error=HTTPForbidden),
HTTPForbidden)
utils.check_raises(lambda: ax.verify_param("1", param_compare=int, is_type=True, http_error=HTTPForbidden),
HTTPForbidden)
utils.check_raises(lambda: ax.verify_param("x", param_compare="x", not_equal=True, http_error=HTTPForbidden),
HTTPForbidden)
utils.check_raises(lambda: ax.verify_param("x", param_compare="y", is_equal=True, http_error=HTTPForbidden),
HTTPForbidden)
utils.check_raises(lambda: ax.verify_param(False, is_true=True, http_error=HTTPForbidden), HTTPForbidden)
utils.check_raises(lambda: ax.verify_param(True, is_false=True, http_error=HTTPForbidden), HTTPForbidden)
utils.check_raises(lambda: ax.verify_param(None, not_none=True, http_error=HTTPForbidden), HTTPForbidden)
utils.check_raises(lambda: ax.verify_param(1, is_none=True, http_error=HTTPForbidden), HTTPForbidden)
utils.check_raises(lambda: ax.verify_param("", not_empty=True, http_error=HTTPForbidden), HTTPForbidden)
utils.check_raises(lambda: ax.verify_param("abc", is_empty=True, http_error=HTTPForbidden), HTTPForbidden)
utils.check_raises(lambda:
ax.verify_param("abc", matches=True, param_compare=r"[A-Z]+", http_error=HTTPForbidden),
HTTPForbidden)
def test_verify_param_proper_verifications_passed(self):
ax.verify_param("x", param_compare=["a", "b"], not_in=True)
ax.verify_param("b", param_compare=["a", "b"], is_in=True)
ax.verify_param(1, param_compare=int, is_type=True)
ax.verify_param("x", param_compare=six.string_types, is_type=True)
ax.verify_param("x", param_compare=str, is_type=True)
ax.verify_param("x", param_compare="y", not_equal=True)
ax.verify_param("x", param_compare="x", is_equal=True)
ax.verify_param(True, is_true=True)
ax.verify_param(False, is_false=True)
ax.verify_param(1, not_none=True)
ax.verify_param(None, is_none=True)
ax.verify_param("abc", not_empty=True)
ax.verify_param("", is_empty=True)
ax.verify_param("abc", matches=True, param_compare=r"[a-z]+")
def test_verify_param_args_incorrect_usage(self):
"""
Invalid usage of function raises internal server error instead of 'normal HTTP error'.
"""
utils.check_raises(lambda: ax.verify_param("b", param_compare=["a", "b"]),
HTTPInternalServerError, msg="missing any flag specification should be caught")
utils.check_raises(lambda: ax.verify_param("b", param_compare=["a", "b"], not_in=None), # noqa
HTTPInternalServerError, msg="flag specified with incorrect type should be caught")
utils.check_raises(lambda: ax.verify_param("b", not_in=True),
HTTPInternalServerError, msg="missing 'param_compare' for flag needing it should be caught")
utils.check_raises(lambda: ax.verify_param("b", param_compare=["b"], not_in=True, http_error=HTTPOk), # noqa
HTTPInternalServerError, msg="incorrect HTTP class to raise error should be caught")
utils.check_raises(lambda: ax.verify_param([1], param_compare=1, is_in=True),
HTTPInternalServerError, msg="incorrect non-iterable compare should raise invalid type")
for flag in ["not_none", "not_empty", "not_in", "not_equal", "is_none", "is_empty", "is_in", "is_equal",
"is_true", "is_false", "is_type", "matches"]:
utils.check_raises(lambda: ax.verify_param("x", **{flag: 1}),
HTTPInternalServerError, msg="invalid flag '{}' type should be caught".format(flag))
def test_verify_param_compare_types(self):
"""
Arguments ``param`` and ``param_compare`` must be of same type for valid comparison, except for ``is_type``
where compare parameter must be the type directly.
.. versionchanged:: 2.0
Since ``param`` can come from user input, we should **NOT** raise ``HTTPInternalServerError`` because the
whole point of the method is to ensure that values are compared accordingly in a controlled fashion.
Therefore, error to be raised is an 'expected' validation failure (``HTTPBadRequest`` or whichever
``http_error`` provided) instead of runtime 'unexpected' processing error.
On the other hand, when ``is_type`` flag is requested, we know that ``param_compare`` must be a type.
Inversely, ``param_compare`` must not be a type if ``is_type`` is not requested, but other flags require
some form of comparison between values. We evaluate these use cases here.
.. seealso::
- :func:`test_verify_param_args_incorrect_usage` for invalid input use-cases
"""
# compare flags expecting a value (can only consider it bad request because comparison values are valid)
utils.check_raises(lambda: ax.verify_param("1", param_compare=1, is_equal=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param("1", param_compare=True, is_equal=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param(1, param_compare="1", is_equal=True), HTTPBadRequest)
utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_equal=True), HTTPBadRequest)
# when compare flags expect a value but type is provided, should still detect incorrect input
utils.check_raises(lambda: ax.verify_param(1, param_compare=int, is_equal=True), HTTPInternalServerError)
utils.check_raises(lambda: ax.verify_param("1", param_compare=str, is_equal=True), HTTPInternalServerError)
# compare flags expecting param_compare to be a type while value provided is not
utils.check_raises(lambda: ax.verify_param(1, param_compare="x", is_type=True), HTTPInternalServerError)
utils.check_raises(lambda: ax.verify_param(1, param_compare=True, is_type=True), HTTPInternalServerError)
utils.check_raises(lambda: ax.verify_param("1", param_compare=None, is_type=True), HTTPInternalServerError)
# compare flags expecting param_compare to be some container instance while value provided is not
utils.check_raises(lambda: ax.verify_param(1, param_compare=1, is_in=True), HTTPInternalServerError)
utils.check_raises(lambda: ax.verify_param(1, param_compare=list, is_in=True), HTTPInternalServerError)
utils.check_raises(lambda: ax.verify_param("1", param_compare=str, is_in=True), HTTPInternalServerError)
utils.check_raises(lambda: ax.verify_param(1, param_compare=1, not_in=True), HTTPInternalServerError)
utils.check_raises(lambda: ax.verify_param(1, param_compare=list, not_in=True), HTTPInternalServerError)
utils.check_raises(lambda: ax.verify_param("1", param_compare=str, not_in=True), HTTPInternalServerError)
# strings cases handled correctly (no raise)
utils.check_no_raise(lambda: ax.verify_param("1", param_compare="1", is_equal=True))
def test_enum_values_listing(self):
utils.check_all_equal(DummyEnum.values(), ["value-1", "value-2"], any_order=True)
def test_enum_get_by_value(self):
utils.check_val_equal(DummyEnum.get("value-1"), DummyEnum.VALUE1)
utils.check_val_equal(DummyEnum.get("VALUE1"), DummyEnum.VALUE1)
utils.check_val_equal(DummyEnum.get("random"), None)
utils.check_val_equal(DummyEnum.get("random", "something"), "something")
def test_enum_other(self):
class OtherEnum(ExtendedEnum):
VALUE1 = DummyEnum.VALUE1.value # copy internal string representation
utils.check_val_not_equal(DummyEnum.VALUE1, OtherEnum.VALUE1, msg="concrete enum elements should be different")
def test_evaluate_call_callable_incorrect_usage(self):
"""
Verifies that incorrect usage of utility is raised accordingly.
"""
utils.check_raises(lambda: ax.evaluate_call(int),
HTTPInternalServerError, msg="invalid callable non-lambda 'call' should raise")
utils.check_raises(lambda: ax.evaluate_call(lambda: int, fallback=int), # noqa
HTTPInternalServerError, msg="invalid callable non-lambda 'fallback' should raise")
def test_evaluate_call_recursive_safeguard(self):
"""
Validate use case if internal function that handles formatting and generation of a resulting HTTP response
raises itself an error (because of implementation issue), while it is processing another pre-raised error, that
it does not end up into an endless recursive call stack of raised errors.
"""
mock_calls = {"counter": 0}
def mock_raise(*_, **__):
# avoid raising forever if the real safeguard fails doing its job
if mock_calls["counter"] >= 2 * ax.RAISE_RECURSIVE_SAFEGUARD_MAX:
return TypeError()
mock_calls["counter"] += 1
raise TypeError()
def mock_lambda_call(*_, **__):
ax.evaluate_call(lambda: int("x"))
try:
app = utils.get_test_magpie_app()
with mock.patch("magpie.api.exception.generate_response_http_format", side_effect=mock_raise):
with mock.patch("magpie.api.login.login.get_session", side_effect=mock_lambda_call):
# Call request that ends up calling the response formatter via 'evaluate_call' itself raising to
# trigger 'mock_raise' recursively within 'raise_http' function.
# Since tweens are set up to format all response prior to return, the raised error will itself
# call 'raise_http' again each time operation fails, creating recursive raises.
# If recursive safeguard does its job, it should end up raising 'HTTPInternalServerError' directly
# (without further formatting attempt when reaching the MAX value), stopping the endless loop.
utils.test_request(app, "GET", "/session", expect_errors=True)
except AssertionError:
# Request called with above 'test_request' should catch the final 'HTTPInternalServerError' that is
# raised directly instead of usual TestResponse returned. That error is again re-raised as 'AssertionError'
pass
except Exception as exc:
self.fail("unexpected error during request creation should not raise: {}".format(exc))
# if our counter reached higher than the MAX (i.e.: 2*MAX from mock), the safeguard did not do its job
# if it did not get called at least more than once, use cases did not really get tested
utils.check_val_is_in(mock_calls["counter"], list(range(2, ax.RAISE_RECURSIVE_SAFEGUARD_MAX + 1))) # noqa
def test_format_content_json_str_invalid_usage(self):
non_json_serializable_content = {"key": HTTPInternalServerError()}
utils.check_raises(
lambda: ax.format_content_json_str(200, "", non_json_serializable_content, CONTENT_TYPE_JSON),
HTTPInternalServerError, msg="invalid content format expected as JSON serializable should raise"
)
def test_generate_response_http_format_invalid_usage(self):
utils.check_raises(
lambda: ax.generate_response_http_format(None, {}, {}, "", {}), # noqa
HTTPInternalServerError, msg="invalid arguments resulting in error during response generation should raise"
)
def test_guess_target_format_default(self):
request = utils.mock_request()
content_type, where = ag.guess_target_format(request)
utils.check_val_equal(content_type, CONTENT_TYPE_JSON)
utils.check_val_equal(where, True)
def test_get_magpie_url_defined_or_defaults(self):
# Disable constants globals() for every case, since it can pre-loaded from .env when running all tests.
# Always need to provide a settings container (even empty direct when nothing define in settings),
# otherwise 'get_constant' can find the current thread settings generated by any test app
with mock.patch.object(constants, "MAGPIE_URL", None):
with mock.patch.dict(os.environ, {"MAGPIE_URL": ""}):
url = utils.check_no_raise(lambda: get_magpie_url({}))
utils.check_val_equal(url, "http://localhost:2001")
url = utils.check_no_raise(lambda: get_magpie_url({"magpie.url": "https://test-server.com"}))
utils.check_val_equal(url, "https://test-server.com")
url = utils.check_no_raise(lambda: get_magpie_url({"magpie.host": "localhost"}))
utils.check_val_equal(url, "http://localhost:2001")
url = utils.check_no_raise(lambda: get_magpie_url({"magpie.host": "test-server.com"}))
utils.check_val_equal(url, "http://test-server.com:2001")
url = utils.check_no_raise(lambda: get_magpie_url({"magpie.host": "test.com", "magpie.port": "1234"}))
utils.check_val_equal(url, "http://test.com:1234")
url = utils.check_no_raise(lambda: get_magpie_url({"magpie.port": "1234"}))
utils.check_val_equal(url, "http://localhost:1234")
url = utils.check_no_raise(lambda: get_magpie_url({"magpie.port": "9000", "magpie.scheme": "https"}))
utils.check_val_equal(url, "https://localhost:9000")
with mock.patch.dict(os.environ, {"MAGPIE_URL": "localhost:9871"}):
url = utils.check_no_raise(lambda: get_magpie_url({"magpie.url": "https://test-server.com"}))
utils.check_val_equal(url, "https://test-server.com") # settings priority over envs
url = utils.check_no_raise(lambda: get_magpie_url({}))
utils.check_val_equal(url, "http://localhost:9871") # env URL found if not in settings
url = utils.check_no_raise(lambda: get_magpie_url({"magpie.host": "server"})) # ignored, URL priority
utils.check_val_equal(url, "http://localhost:9871") # URL fixed with missing scheme even if defined
with mock.patch.dict(os.environ, {"MAGPIE_URL": "", "MAGPIE_PORT": "1234"}):
url = utils.check_no_raise(lambda: get_magpie_url({"magpie.url": "https://test-server.com"}))
utils.check_val_equal(url, "https://test-server.com") # ignore port, URL has priority
url = utils.check_no_raise(lambda: get_magpie_url({"magpie.host": "server"}))
utils.check_val_equal(url, "http://server:1234")
url = utils.check_no_raise(lambda: get_magpie_url({"magpie.scheme": "https"}))
utils.check_val_equal(url, "https://localhost:1234")
|
[
"tests.utils.test_request",
"magpie.api.exception.generate_response_http_format",
"tests.utils.check_response_basic_info",
"magpie.api.requests.get_query_param",
"magpie.utils.get_header",
"magpie.api.exception.evaluate_call",
"mock.patch.object",
"pyramid.httpexceptions.HTTPInternalServerError",
"distutils.version.LooseVersion",
"magpie.api.generic.guess_target_format",
"mock.patch",
"tests.utils.mock_request",
"magpie.api.exception.verify_param",
"tests.utils.get_test_magpie_app",
"magpie.utils.get_magpie_url",
"tests.utils.check_val_not_equal",
"mock.patch.dict",
"tests.utils.check_val_equal",
"magpie.api.exception.format_content_json_str"
] |
[((4180, 4212), 'tests.utils.mock_request', 'utils.mock_request', (['"""/some/path"""'], {}), "('/some/path')\n", (4198, 4212), False, 'from tests import runner, utils\n'), ((4225, 4258), 'magpie.api.requests.get_query_param', 'ar.get_query_param', (['resp', '"""value"""'], {}), "(resp, 'value')\n", (4243, 4258), True, 'from magpie.api import requests as ar\n'), ((4267, 4297), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['v', 'None'], {}), '(v, None)\n', (4288, 4297), False, 'from tests import runner, utils\n'), ((4314, 4357), 'tests.utils.mock_request', 'utils.mock_request', (['"""/some/path?other=test"""'], {}), "('/some/path?other=test')\n", (4332, 4357), False, 'from tests import runner, utils\n'), ((4370, 4403), 'magpie.api.requests.get_query_param', 'ar.get_query_param', (['resp', '"""value"""'], {}), "(resp, 'value')\n", (4388, 4403), True, 'from magpie.api import requests as ar\n'), ((4412, 4442), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['v', 'None'], {}), '(v, None)\n', (4433, 4442), False, 'from tests import runner, utils\n'), ((4459, 4502), 'tests.utils.mock_request', 'utils.mock_request', (['"""/some/path?other=test"""'], {}), "('/some/path?other=test')\n", (4477, 4502), False, 'from tests import runner, utils\n'), ((4515, 4554), 'magpie.api.requests.get_query_param', 'ar.get_query_param', (['resp', '"""value"""', '(True)'], {}), "(resp, 'value', True)\n", (4533, 4554), True, 'from magpie.api import requests as ar\n'), ((4563, 4593), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['v', '(True)'], {}), '(v, True)\n', (4584, 4593), False, 'from tests import runner, utils\n'), ((4610, 4653), 'tests.utils.mock_request', 'utils.mock_request', (['"""/some/path?value=test"""'], {}), "('/some/path?value=test')\n", (4628, 4653), False, 'from tests import runner, utils\n'), ((4666, 4705), 'magpie.api.requests.get_query_param', 'ar.get_query_param', (['resp', '"""value"""', '(True)'], {}), "(resp, 'value', True)\n", (4684, 4705), True, 'from magpie.api import requests as ar\n'), ((4714, 4746), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['v', '"""test"""'], {}), "(v, 'test')\n", (4735, 4746), False, 'from tests import runner, utils\n'), ((4763, 4807), 'tests.utils.mock_request', 'utils.mock_request', (['"""/some/path?query=value"""'], {}), "('/some/path?query=value')\n", (4781, 4807), False, 'from tests import runner, utils\n'), ((4820, 4853), 'magpie.api.requests.get_query_param', 'ar.get_query_param', (['resp', '"""query"""'], {}), "(resp, 'query')\n", (4838, 4853), True, 'from magpie.api import requests as ar\n'), ((4862, 4895), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['v', '"""value"""'], {}), "(v, 'value')\n", (4883, 4895), False, 'from tests import runner, utils\n'), ((4912, 4956), 'tests.utils.mock_request', 'utils.mock_request', (['"""/some/path?QUERY=VALUE"""'], {}), "('/some/path?QUERY=VALUE')\n", (4930, 4956), False, 'from tests import runner, utils\n'), ((4969, 5002), 'magpie.api.requests.get_query_param', 'ar.get_query_param', (['resp', '"""query"""'], {}), "(resp, 'query')\n", (4987, 5002), True, 'from magpie.api import requests as ar\n'), ((5011, 5044), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['v', '"""VALUE"""'], {}), "(v, 'VALUE')\n", (5032, 5044), False, 'from tests import runner, utils\n'), ((5061, 5105), 'tests.utils.mock_request', 'utils.mock_request', (['"""/some/path?QUERY=VALUE"""'], {}), "('/some/path?QUERY=VALUE')\n", (5079, 5105), False, 'from tests import runner, utils\n'), ((5168, 5199), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['v', '(False)'], {}), '(v, False)\n', (5189, 5199), False, 'from tests import runner, utils\n'), ((5216, 5259), 'tests.utils.mock_request', 'utils.mock_request', (['"""/some/path?Query=TRUE"""'], {}), "('/some/path?Query=TRUE')\n", (5234, 5259), False, 'from tests import runner, utils\n'), ((5322, 5352), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['v', '(True)'], {}), '(v, True)\n', (5343, 5352), False, 'from tests import runner, utils\n'), ((8557, 8616), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""x"""'], {'param_compare': "['a', 'b']", 'not_in': '(True)'}), "('x', param_compare=['a', 'b'], not_in=True)\n", (8572, 8616), True, 'from magpie.api import exception as ax\n'), ((8625, 8683), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""b"""'], {'param_compare': "['a', 'b']", 'is_in': '(True)'}), "('b', param_compare=['a', 'b'], is_in=True)\n", (8640, 8683), True, 'from magpie.api import exception as ax\n'), ((8692, 8743), 'magpie.api.exception.verify_param', 'ax.verify_param', (['(1)'], {'param_compare': 'int', 'is_type': '(True)'}), '(1, param_compare=int, is_type=True)\n', (8707, 8743), True, 'from magpie.api import exception as ax\n'), ((8752, 8818), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""x"""'], {'param_compare': 'six.string_types', 'is_type': '(True)'}), "('x', param_compare=six.string_types, is_type=True)\n", (8767, 8818), True, 'from magpie.api import exception as ax\n'), ((8827, 8880), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""x"""'], {'param_compare': 'str', 'is_type': '(True)'}), "('x', param_compare=str, is_type=True)\n", (8842, 8880), True, 'from magpie.api import exception as ax\n'), ((8889, 8944), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""x"""'], {'param_compare': '"""y"""', 'not_equal': '(True)'}), "('x', param_compare='y', not_equal=True)\n", (8904, 8944), True, 'from magpie.api import exception as ax\n'), ((8953, 9007), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""x"""'], {'param_compare': '"""x"""', 'is_equal': '(True)'}), "('x', param_compare='x', is_equal=True)\n", (8968, 9007), True, 'from magpie.api import exception as ax\n'), ((9016, 9051), 'magpie.api.exception.verify_param', 'ax.verify_param', (['(True)'], {'is_true': '(True)'}), '(True, is_true=True)\n', (9031, 9051), True, 'from magpie.api import exception as ax\n'), ((9060, 9097), 'magpie.api.exception.verify_param', 'ax.verify_param', (['(False)'], {'is_false': '(True)'}), '(False, is_false=True)\n', (9075, 9097), True, 'from magpie.api import exception as ax\n'), ((9106, 9139), 'magpie.api.exception.verify_param', 'ax.verify_param', (['(1)'], {'not_none': '(True)'}), '(1, not_none=True)\n', (9121, 9139), True, 'from magpie.api import exception as ax\n'), ((9148, 9183), 'magpie.api.exception.verify_param', 'ax.verify_param', (['None'], {'is_none': '(True)'}), '(None, is_none=True)\n', (9163, 9183), True, 'from magpie.api import exception as ax\n'), ((9192, 9230), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""abc"""'], {'not_empty': '(True)'}), "('abc', not_empty=True)\n", (9207, 9230), True, 'from magpie.api import exception as ax\n'), ((9239, 9273), 'magpie.api.exception.verify_param', 'ax.verify_param', (['""""""'], {'is_empty': '(True)'}), "('', is_empty=True)\n", (9254, 9273), True, 'from magpie.api import exception as ax\n'), ((9282, 9342), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""abc"""'], {'matches': '(True)', 'param_compare': '"""[a-z]+"""'}), "('abc', matches=True, param_compare='[a-z]+')\n", (9297, 9342), True, 'from magpie.api import exception as ax\n'), ((14903, 15019), 'tests.utils.check_val_not_equal', 'utils.check_val_not_equal', (['DummyEnum.VALUE1', 'OtherEnum.VALUE1'], {'msg': '"""concrete enum elements should be different"""'}), "(DummyEnum.VALUE1, OtherEnum.VALUE1, msg=\n 'concrete enum elements should be different')\n", (14928, 15019), False, 'from tests import runner, utils\n'), ((18849, 18869), 'tests.utils.mock_request', 'utils.mock_request', ([], {}), '()\n', (18867, 18869), False, 'from tests import runner, utils\n'), ((18900, 18931), 'magpie.api.generic.guess_target_format', 'ag.guess_target_format', (['request'], {}), '(request)\n', (18922, 18931), True, 'from magpie.api import generic as ag\n'), ((18940, 18994), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['content_type', 'CONTENT_TYPE_JSON'], {}), '(content_type, CONTENT_TYPE_JSON)\n', (18961, 18994), False, 'from tests import runner, utils\n'), ((19003, 19037), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['where', '(True)'], {}), '(where, True)\n', (19024, 19037), False, 'from tests import runner, utils\n'), ((1108, 1154), 'tests.utils.get_test_magpie_app', 'utils.get_test_magpie_app', (["{'magpie.url': url}"], {}), "({'magpie.url': url})\n", (1133, 1154), False, 'from tests import runner, utils\n'), ((1205, 1241), 'tests.utils.test_request', 'utils.test_request', (['app', '"""GET"""', 'path'], {}), "(app, 'GET', path)\n", (1223, 1241), False, 'from tests import runner, utils\n'), ((1254, 1291), 'tests.utils.check_response_basic_info', 'utils.check_response_basic_info', (['resp'], {}), '(resp)\n', (1285, 1291), False, 'from tests import runner, utils\n'), ((2802, 2848), 'tests.utils.get_test_magpie_app', 'utils.get_test_magpie_app', (["{'magpie.url': url}"], {}), "({'magpie.url': url})\n", (2827, 2848), False, 'from tests import runner, utils\n'), ((5125, 5158), 'magpie.api.requests.get_query_param', 'ar.get_query_param', (['resp', '"""query"""'], {}), "(resp, 'query')\n", (5143, 5158), True, 'from magpie.api import requests as ar\n'), ((5279, 5312), 'magpie.api.requests.get_query_param', 'ar.get_query_param', (['resp', '"""query"""'], {}), "(resp, 'query')\n", (5297, 5312), True, 'from magpie.api import requests as ar\n'), ((16382, 16409), 'tests.utils.get_test_magpie_app', 'utils.get_test_magpie_app', ([], {}), '()\n', (16407, 16409), False, 'from tests import runner, utils\n'), ((18194, 18219), 'pyramid.httpexceptions.HTTPInternalServerError', 'HTTPInternalServerError', ([], {}), '()\n', (18217, 18219), False, 'from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPInternalServerError, HTTPOk\n'), ((19424, 19472), 'mock.patch.object', 'mock.patch.object', (['constants', '"""MAGPIE_URL"""', 'None'], {}), "(constants, 'MAGPIE_URL', None)\n", (19441, 19472), False, 'import mock\n'), ((3232, 3326), 'tests.utils.test_request', 'utils.test_request', (['app', '"""POST"""', '_paths[0]'], {'json': 'data', 'headers': 'headers', 'expect_errors': '(True)'}), "(app, 'POST', _paths[0], json=data, headers=headers,\n expect_errors=True)\n", (3250, 3326), False, 'from tests import runner, utils\n'), ((5479, 5538), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""b"""'], {'param_compare': "['a', 'b']", 'not_in': '(True)'}), "('b', param_compare=['a', 'b'], not_in=True)\n", (5494, 5538), True, 'from magpie.api import exception as ax\n'), ((5591, 5649), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""x"""'], {'param_compare': "['a', 'b']", 'is_in': '(True)'}), "('x', param_compare=['a', 'b'], is_in=True)\n", (5606, 5649), True, 'from magpie.api import exception as ax\n'), ((5702, 5755), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""1"""'], {'param_compare': 'int', 'is_type': '(True)'}), "('1', param_compare=int, is_type=True)\n", (5717, 5755), True, 'from magpie.api import exception as ax\n'), ((5808, 5874), 'magpie.api.exception.verify_param', 'ax.verify_param', (['(1.0)'], {'param_compare': 'six.string_types', 'is_type': '(True)'}), '(1.0, param_compare=six.string_types, is_type=True)\n', (5823, 5874), True, 'from magpie.api import exception as ax\n'), ((5927, 5982), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""x"""'], {'param_compare': '"""x"""', 'not_equal': '(True)'}), "('x', param_compare='x', not_equal=True)\n", (5942, 5982), True, 'from magpie.api import exception as ax\n'), ((6035, 6089), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""x"""'], {'param_compare': '"""y"""', 'is_equal': '(True)'}), "('x', param_compare='y', is_equal=True)\n", (6050, 6089), True, 'from magpie.api import exception as ax\n'), ((6142, 6178), 'magpie.api.exception.verify_param', 'ax.verify_param', (['(False)'], {'is_true': '(True)'}), '(False, is_true=True)\n', (6157, 6178), True, 'from magpie.api import exception as ax\n'), ((6231, 6267), 'magpie.api.exception.verify_param', 'ax.verify_param', (['(True)'], {'is_false': '(True)'}), '(True, is_false=True)\n', (6246, 6267), True, 'from magpie.api import exception as ax\n'), ((6320, 6356), 'magpie.api.exception.verify_param', 'ax.verify_param', (['None'], {'not_none': '(True)'}), '(None, not_none=True)\n', (6335, 6356), True, 'from magpie.api import exception as ax\n'), ((6409, 6441), 'magpie.api.exception.verify_param', 'ax.verify_param', (['(1)'], {'is_none': '(True)'}), '(1, is_none=True)\n', (6424, 6441), True, 'from magpie.api import exception as ax\n'), ((6494, 6529), 'magpie.api.exception.verify_param', 'ax.verify_param', (['""""""'], {'not_empty': '(True)'}), "('', not_empty=True)\n", (6509, 6529), True, 'from magpie.api import exception as ax\n'), ((6582, 6619), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""abc"""'], {'is_empty': '(True)'}), "('abc', is_empty=True)\n", (6597, 6619), True, 'from magpie.api import exception as ax\n'), ((6672, 6732), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""abc"""'], {'matches': '(True)', 'param_compare': '"""[A-Z]+"""'}), "('abc', matches=True, param_compare='[A-Z]+')\n", (6687, 6732), True, 'from magpie.api import exception as ax\n'), ((6845, 6935), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""b"""'], {'param_compare': "['a', 'b']", 'not_in': '(True)', 'http_error': 'HTTPForbidden'}), "('b', param_compare=['a', 'b'], not_in=True, http_error=\n HTTPForbidden)\n", (6860, 6935), True, 'from magpie.api import exception as ax\n'), ((7009, 7098), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""x"""'], {'param_compare': "['a', 'b']", 'is_in': '(True)', 'http_error': 'HTTPForbidden'}), "('x', param_compare=['a', 'b'], is_in=True, http_error=\n HTTPForbidden)\n", (7024, 7098), True, 'from magpie.api import exception as ax\n'), ((7172, 7251), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""1"""'], {'param_compare': 'int', 'is_type': '(True)', 'http_error': 'HTTPForbidden'}), "('1', param_compare=int, is_type=True, http_error=HTTPForbidden)\n", (7187, 7251), True, 'from magpie.api import exception as ax\n'), ((7330, 7416), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""x"""'], {'param_compare': '"""x"""', 'not_equal': '(True)', 'http_error': 'HTTPForbidden'}), "('x', param_compare='x', not_equal=True, http_error=\n HTTPForbidden)\n", (7345, 7416), True, 'from magpie.api import exception as ax\n'), ((7490, 7575), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""x"""'], {'param_compare': '"""y"""', 'is_equal': '(True)', 'http_error': 'HTTPForbidden'}), "('x', param_compare='y', is_equal=True, http_error=HTTPForbidden\n )\n", (7505, 7575), True, 'from magpie.api import exception as ax\n'), ((7649, 7711), 'magpie.api.exception.verify_param', 'ax.verify_param', (['(False)'], {'is_true': '(True)', 'http_error': 'HTTPForbidden'}), '(False, is_true=True, http_error=HTTPForbidden)\n', (7664, 7711), True, 'from magpie.api import exception as ax\n'), ((7763, 7825), 'magpie.api.exception.verify_param', 'ax.verify_param', (['(True)'], {'is_false': '(True)', 'http_error': 'HTTPForbidden'}), '(True, is_false=True, http_error=HTTPForbidden)\n', (7778, 7825), True, 'from magpie.api import exception as ax\n'), ((7877, 7939), 'magpie.api.exception.verify_param', 'ax.verify_param', (['None'], {'not_none': '(True)', 'http_error': 'HTTPForbidden'}), '(None, not_none=True, http_error=HTTPForbidden)\n', (7892, 7939), True, 'from magpie.api import exception as ax\n'), ((7991, 8049), 'magpie.api.exception.verify_param', 'ax.verify_param', (['(1)'], {'is_none': '(True)', 'http_error': 'HTTPForbidden'}), '(1, is_none=True, http_error=HTTPForbidden)\n', (8006, 8049), True, 'from magpie.api import exception as ax\n'), ((8101, 8162), 'magpie.api.exception.verify_param', 'ax.verify_param', (['""""""'], {'not_empty': '(True)', 'http_error': 'HTTPForbidden'}), "('', not_empty=True, http_error=HTTPForbidden)\n", (8116, 8162), True, 'from magpie.api import exception as ax\n'), ((8214, 8277), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""abc"""'], {'is_empty': '(True)', 'http_error': 'HTTPForbidden'}), "('abc', is_empty=True, http_error=HTTPForbidden)\n", (8229, 8277), True, 'from magpie.api import exception as ax\n'), ((8356, 8447), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""abc"""'], {'matches': '(True)', 'param_compare': '"""[A-Z]+"""', 'http_error': 'HTTPForbidden'}), "('abc', matches=True, param_compare='[A-Z]+', http_error=\n HTTPForbidden)\n", (8371, 8447), True, 'from magpie.api import exception as ax\n'), ((9553, 9599), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""b"""'], {'param_compare': "['a', 'b']"}), "('b', param_compare=['a', 'b'])\n", (9568, 9599), True, 'from magpie.api import exception as ax\n'), ((9743, 9802), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""b"""'], {'param_compare': "['a', 'b']", 'not_in': 'None'}), "('b', param_compare=['a', 'b'], not_in=None)\n", (9758, 9802), True, 'from magpie.api import exception as ax\n'), ((9958, 9991), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""b"""'], {'not_in': '(True)'}), "('b', not_in=True)\n", (9973, 9991), True, 'from magpie.api import exception as ax\n'), ((10148, 10221), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""b"""'], {'param_compare': "['b']", 'not_in': '(True)', 'http_error': 'HTTPOk'}), "('b', param_compare=['b'], not_in=True, http_error=HTTPOk)\n", (10163, 10221), True, 'from magpie.api import exception as ax\n'), ((10378, 10427), 'magpie.api.exception.verify_param', 'ax.verify_param', (['[1]'], {'param_compare': '(1)', 'is_in': '(True)'}), '([1], param_compare=1, is_in=True)\n', (10393, 10427), True, 'from magpie.api import exception as ax\n'), ((12202, 12254), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""1"""'], {'param_compare': '(1)', 'is_equal': '(True)'}), "('1', param_compare=1, is_equal=True)\n", (12217, 12254), True, 'from magpie.api import exception as ax\n'), ((12307, 12362), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""1"""'], {'param_compare': '(True)', 'is_equal': '(True)'}), "('1', param_compare=True, is_equal=True)\n", (12322, 12362), True, 'from magpie.api import exception as ax\n'), ((12415, 12467), 'magpie.api.exception.verify_param', 'ax.verify_param', (['(1)'], {'param_compare': '"""1"""', 'is_equal': '(True)'}), "(1, param_compare='1', is_equal=True)\n", (12430, 12467), True, 'from magpie.api import exception as ax\n'), ((12520, 12573), 'magpie.api.exception.verify_param', 'ax.verify_param', (['(1)'], {'param_compare': '(True)', 'is_equal': '(True)'}), '(1, param_compare=True, is_equal=True)\n', (12535, 12573), True, 'from magpie.api import exception as ax\n'), ((12728, 12780), 'magpie.api.exception.verify_param', 'ax.verify_param', (['(1)'], {'param_compare': 'int', 'is_equal': '(True)'}), '(1, param_compare=int, is_equal=True)\n', (12743, 12780), True, 'from magpie.api import exception as ax\n'), ((12842, 12896), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""1"""'], {'param_compare': 'str', 'is_equal': '(True)'}), "('1', param_compare=str, is_equal=True)\n", (12857, 12896), True, 'from magpie.api import exception as ax\n'), ((13048, 13099), 'magpie.api.exception.verify_param', 'ax.verify_param', (['(1)'], {'param_compare': '"""x"""', 'is_type': '(True)'}), "(1, param_compare='x', is_type=True)\n", (13063, 13099), True, 'from magpie.api import exception as ax\n'), ((13161, 13213), 'magpie.api.exception.verify_param', 'ax.verify_param', (['(1)'], {'param_compare': '(True)', 'is_type': '(True)'}), '(1, param_compare=True, is_type=True)\n', (13176, 13213), True, 'from magpie.api import exception as ax\n'), ((13275, 13329), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""1"""'], {'param_compare': 'None', 'is_type': '(True)'}), "('1', param_compare=None, is_type=True)\n", (13290, 13329), True, 'from magpie.api import exception as ax\n'), ((13498, 13545), 'magpie.api.exception.verify_param', 'ax.verify_param', (['(1)'], {'param_compare': '(1)', 'is_in': '(True)'}), '(1, param_compare=1, is_in=True)\n', (13513, 13545), True, 'from magpie.api import exception as ax\n'), ((13607, 13657), 'magpie.api.exception.verify_param', 'ax.verify_param', (['(1)'], {'param_compare': 'list', 'is_in': '(True)'}), '(1, param_compare=list, is_in=True)\n', (13622, 13657), True, 'from magpie.api import exception as ax\n'), ((13719, 13770), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""1"""'], {'param_compare': 'str', 'is_in': '(True)'}), "('1', param_compare=str, is_in=True)\n", (13734, 13770), True, 'from magpie.api import exception as ax\n'), ((13832, 13880), 'magpie.api.exception.verify_param', 'ax.verify_param', (['(1)'], {'param_compare': '(1)', 'not_in': '(True)'}), '(1, param_compare=1, not_in=True)\n', (13847, 13880), True, 'from magpie.api import exception as ax\n'), ((13942, 13993), 'magpie.api.exception.verify_param', 'ax.verify_param', (['(1)'], {'param_compare': 'list', 'not_in': '(True)'}), '(1, param_compare=list, not_in=True)\n', (13957, 13993), True, 'from magpie.api import exception as ax\n'), ((14055, 14107), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""1"""'], {'param_compare': 'str', 'not_in': '(True)'}), "('1', param_compare=str, not_in=True)\n", (14070, 14107), True, 'from magpie.api import exception as ax\n'), ((14225, 14279), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""1"""'], {'param_compare': '"""1"""', 'is_equal': '(True)'}), "('1', param_compare='1', is_equal=True)\n", (14240, 14279), True, 'from magpie.api import exception as ax\n'), ((15206, 15227), 'magpie.api.exception.evaluate_call', 'ax.evaluate_call', (['int'], {}), '(int)\n', (15222, 15227), True, 'from magpie.api import exception as ax\n'), ((15371, 15415), 'magpie.api.exception.evaluate_call', 'ax.evaluate_call', (['(lambda : int)'], {'fallback': 'int'}), '(lambda : int, fallback=int)\n', (15387, 15415), True, 'from magpie.api import exception as ax\n'), ((16427, 16519), 'mock.patch', 'mock.patch', (['"""magpie.api.exception.generate_response_http_format"""'], {'side_effect': 'mock_raise'}), "('magpie.api.exception.generate_response_http_format',\n side_effect=mock_raise)\n", (16437, 16519), False, 'import mock\n'), ((18269, 18358), 'magpie.api.exception.format_content_json_str', 'ax.format_content_json_str', (['(200)', '""""""', 'non_json_serializable_content', 'CONTENT_TYPE_JSON'], {}), "(200, '', non_json_serializable_content,\n CONTENT_TYPE_JSON)\n", (18295, 18358), True, 'from magpie.api import exception as ax\n'), ((18588, 18642), 'magpie.api.exception.generate_response_http_format', 'ax.generate_response_http_format', (['None', '{}', '{}', '""""""', '{}'], {}), "(None, {}, {}, '', {})\n", (18620, 18642), True, 'from magpie.api import exception as ax\n'), ((19492, 19539), 'mock.patch.dict', 'mock.patch.dict', (['os.environ', "{'MAGPIE_URL': ''}"], {}), "(os.environ, {'MAGPIE_URL': ''})\n", (19507, 19539), False, 'import mock\n'), ((19628, 19679), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['url', '"""http://localhost:2001"""'], {}), "(url, 'http://localhost:2001')\n", (19649, 19679), False, 'from tests import runner, utils\n'), ((19807, 19860), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['url', '"""https://test-server.com"""'], {}), "(url, 'https://test-server.com')\n", (19828, 19860), False, 'from tests import runner, utils\n'), ((19975, 20026), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['url', '"""http://localhost:2001"""'], {}), "(url, 'http://localhost:2001')\n", (19996, 20026), False, 'from tests import runner, utils\n'), ((20147, 20204), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['url', '"""http://test-server.com:2001"""'], {}), "(url, 'http://test-server.com:2001')\n", (20168, 20204), False, 'from tests import runner, utils\n'), ((20341, 20391), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['url', '"""http://test.com:1234"""'], {}), "(url, 'http://test.com:1234')\n", (20362, 20391), False, 'from tests import runner, utils\n'), ((20501, 20552), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['url', '"""http://localhost:1234"""'], {}), "(url, 'http://localhost:1234')\n", (20522, 20552), False, 'from tests import runner, utils\n'), ((20688, 20740), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['url', '"""https://localhost:9000"""'], {}), "(url, 'https://localhost:9000')\n", (20709, 20740), False, 'from tests import runner, utils\n'), ((20759, 20820), 'mock.patch.dict', 'mock.patch.dict', (['os.environ', "{'MAGPIE_URL': 'localhost:9871'}"], {}), "(os.environ, {'MAGPIE_URL': 'localhost:9871'})\n", (20774, 20820), False, 'import mock\n'), ((20948, 21001), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['url', '"""https://test-server.com"""'], {}), "(url, 'https://test-server.com')\n", (20969, 21001), False, 'from tests import runner, utils\n'), ((21121, 21172), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['url', '"""http://localhost:9871"""'], {}), "(url, 'http://localhost:9871')\n", (21142, 21172), False, 'from tests import runner, utils\n'), ((21345, 21396), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['url', '"""http://localhost:9871"""'], {}), "(url, 'http://localhost:9871')\n", (21366, 21396), False, 'from tests import runner, utils\n'), ((21464, 21534), 'mock.patch.dict', 'mock.patch.dict', (['os.environ', "{'MAGPIE_URL': '', 'MAGPIE_PORT': '1234'}"], {}), "(os.environ, {'MAGPIE_URL': '', 'MAGPIE_PORT': '1234'})\n", (21479, 21534), False, 'import mock\n'), ((21662, 21715), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['url', '"""https://test-server.com"""'], {}), "(url, 'https://test-server.com')\n", (21683, 21715), False, 'from tests import runner, utils\n'), ((21860, 21908), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['url', '"""http://server:1234"""'], {}), "(url, 'http://server:1234')\n", (21881, 21908), False, 'from tests import runner, utils\n'), ((22021, 22073), 'tests.utils.check_val_equal', 'utils.check_val_equal', (['url', '"""https://localhost:1234"""'], {}), "(url, 'https://localhost:1234')\n", (22042, 22073), False, 'from tests import runner, utils\n'), ((3342, 3368), 'distutils.version.LooseVersion', 'LooseVersion', (['self.version'], {}), '(self.version)\n', (3354, 3368), False, 'from distutils.version import LooseVersion\n'), ((3371, 3393), 'distutils.version.LooseVersion', 'LooseVersion', (['"""0.10.0"""'], {}), "('0.10.0')\n", (3383, 3393), False, 'from distutils.version import LooseVersion\n'), ((3461, 3546), 'tests.utils.check_response_basic_info', 'utils.check_response_basic_info', (['resp'], {'expected_code': '(406)', 'expected_method': '"""POST"""'}), "(resp, expected_code=406, expected_method='POST'\n )\n", (3492, 3546), False, 'from tests import runner, utils\n'), ((3644, 3729), 'tests.utils.check_response_basic_info', 'utils.check_response_basic_info', (['resp'], {'expected_code': '(401)', 'expected_method': '"""POST"""'}), "(resp, expected_code=401, expected_method='POST'\n )\n", (3675, 3729), False, 'from tests import runner, utils\n'), ((4069, 4107), 'magpie.utils.get_header', 'get_header', (['name', 'headers'], {'split': 'split'}), '(name, headers, split=split)\n', (4079, 4107), False, 'from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url\n'), ((10764, 10797), 'magpie.api.exception.verify_param', 'ax.verify_param', (['"""x"""'], {}), "('x', **{flag: 1})\n", (10779, 10797), True, 'from magpie.api import exception as ax\n'), ((16538, 16616), 'mock.patch', 'mock.patch', (['"""magpie.api.login.login.get_session"""'], {'side_effect': 'mock_lambda_call'}), "('magpie.api.login.login.get_session', side_effect=mock_lambda_call)\n", (16548, 16616), False, 'import mock\n'), ((17289, 17351), 'tests.utils.test_request', 'utils.test_request', (['app', '"""GET"""', '"""/session"""'], {'expect_errors': '(True)'}), "(app, 'GET', '/session', expect_errors=True)\n", (17307, 17351), False, 'from tests import runner, utils\n'), ((19592, 19610), 'magpie.utils.get_magpie_url', 'get_magpie_url', (['{}'], {}), '({})\n', (19606, 19610), False, 'from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url\n'), ((19732, 19789), 'magpie.utils.get_magpie_url', 'get_magpie_url', (["{'magpie.url': 'https://test-server.com'}"], {}), "({'magpie.url': 'https://test-server.com'})\n", (19746, 19789), False, 'from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url\n'), ((19913, 19957), 'magpie.utils.get_magpie_url', 'get_magpie_url', (["{'magpie.host': 'localhost'}"], {}), "({'magpie.host': 'localhost'})\n", (19927, 19957), False, 'from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url\n'), ((20079, 20129), 'magpie.utils.get_magpie_url', 'get_magpie_url', (["{'magpie.host': 'test-server.com'}"], {}), "({'magpie.host': 'test-server.com'})\n", (20093, 20129), False, 'from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url\n'), ((20257, 20323), 'magpie.utils.get_magpie_url', 'get_magpie_url', (["{'magpie.host': 'test.com', 'magpie.port': '1234'}"], {}), "({'magpie.host': 'test.com', 'magpie.port': '1234'})\n", (20271, 20323), False, 'from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url\n'), ((20444, 20483), 'magpie.utils.get_magpie_url', 'get_magpie_url', (["{'magpie.port': '1234'}"], {}), "({'magpie.port': '1234'})\n", (20458, 20483), False, 'from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url\n'), ((20605, 20670), 'magpie.utils.get_magpie_url', 'get_magpie_url', (["{'magpie.port': '9000', 'magpie.scheme': 'https'}"], {}), "({'magpie.port': '9000', 'magpie.scheme': 'https'})\n", (20619, 20670), False, 'from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url\n'), ((20873, 20930), 'magpie.utils.get_magpie_url', 'get_magpie_url', (["{'magpie.url': 'https://test-server.com'}"], {}), "({'magpie.url': 'https://test-server.com'})\n", (20887, 20930), False, 'from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url\n'), ((21085, 21103), 'magpie.utils.get_magpie_url', 'get_magpie_url', (['{}'], {}), '({})\n', (21099, 21103), False, 'from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url\n'), ((21261, 21302), 'magpie.utils.get_magpie_url', 'get_magpie_url', (["{'magpie.host': 'server'}"], {}), "({'magpie.host': 'server'})\n", (21275, 21302), False, 'from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url\n'), ((21587, 21644), 'magpie.utils.get_magpie_url', 'get_magpie_url', (["{'magpie.url': 'https://test-server.com'}"], {}), "({'magpie.url': 'https://test-server.com'})\n", (21601, 21644), False, 'from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url\n'), ((21801, 21842), 'magpie.utils.get_magpie_url', 'get_magpie_url', (["{'magpie.host': 'server'}"], {}), "({'magpie.host': 'server'})\n", (21815, 21842), False, 'from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url\n'), ((21961, 22003), 'magpie.utils.get_magpie_url', 'get_magpie_url', (["{'magpie.scheme': 'https'}"], {}), "({'magpie.scheme': 'https'})\n", (21975, 22003), False, 'from magpie.utils import CONTENT_TYPE_JSON, ExtendedEnum, get_header, get_magpie_url\n')]
|
import os
import csv
from sqlalchemy import Table,literal_column,select
def importVolumes(connection, metadata, source_path):
invVolumes = Table('invVolumes', metadata)
invTypes = Table('invTypes', metadata)
with open(
os.path.join(source_path, 'invVolumes1.csv'), 'r'
) as groupVolumes:
volumereader = csv.reader(groupVolumes, delimiter=',')
for group in volumereader:
connection.execute(
invVolumes.insert().from_select(['typeID','volume'], select([invTypes.c.typeID,literal_column(group[0])]).where(invTypes.c.groupID == literal_column(group[1])))
)
with open(os.path.join(source_path, 'invVolumes2.csv'), 'r') as groupVolumes:
volumereader = csv.reader(groupVolumes, delimiter=',')
for group in volumereader:
connection.execute(
invVolumes.insert(),
typeID=group[1],
volume=group[0]
)
|
[
"sqlalchemy.literal_column",
"sqlalchemy.Table",
"csv.reader",
"os.path.join"
] |
[((145, 174), 'sqlalchemy.Table', 'Table', (['"""invVolumes"""', 'metadata'], {}), "('invVolumes', metadata)\n", (150, 174), False, 'from sqlalchemy import Table, literal_column, select\n'), ((190, 217), 'sqlalchemy.Table', 'Table', (['"""invTypes"""', 'metadata'], {}), "('invTypes', metadata)\n", (195, 217), False, 'from sqlalchemy import Table, literal_column, select\n'), ((338, 377), 'csv.reader', 'csv.reader', (['groupVolumes'], {'delimiter': '""","""'}), "(groupVolumes, delimiter=',')\n", (348, 377), False, 'import csv\n'), ((742, 781), 'csv.reader', 'csv.reader', (['groupVolumes'], {'delimiter': '""","""'}), "(groupVolumes, delimiter=',')\n", (752, 781), False, 'import csv\n'), ((242, 286), 'os.path.join', 'os.path.join', (['source_path', '"""invVolumes1.csv"""'], {}), "(source_path, 'invVolumes1.csv')\n", (254, 286), False, 'import os\n'), ((651, 695), 'os.path.join', 'os.path.join', (['source_path', '"""invVolumes2.csv"""'], {}), "(source_path, 'invVolumes2.csv')\n", (663, 695), False, 'import os\n'), ((595, 619), 'sqlalchemy.literal_column', 'literal_column', (['group[1]'], {}), '(group[1])\n', (609, 619), False, 'from sqlalchemy import Table, literal_column, select\n'), ((540, 564), 'sqlalchemy.literal_column', 'literal_column', (['group[0]'], {}), '(group[0])\n', (554, 564), False, 'from sqlalchemy import Table, literal_column, select\n')]
|
# python standard library
from collections import OrderedDict
# third party
from configobj import ConfigObj
# this package
from theape import BasePlugin
from theape.parts.sleep.sleep import TheBigSleep
from theape.infrastructure.timemap import time_validator
SLEEP_SECTION = 'SLEEP'
END_OPTION = 'end'
TOTAL_OPTION = 'total'
INTERVAL_OPTION = 'interval'
VERBOSE_OPTION = 'verbose'
configuration = """
[[SLEEP]]
# to allow the section names to be arbitrary
# the plugin names are required
plugin = Sleep
# 'end' should be a timestamp for the end-time (11-12-2013 8:45 pm)
# 'total' should be a timestamp for the run-time (1 hr 23 minutes)
# 'interval' should be <amount> <units> (1 minute)
# if verbose is False, sceen output will be off except at startup
# only one of absolute or relative time is required, although both can be used
end = <absolute time>
total = <relative time>
interval = 1 second
verbose = True
"""
sleep_configspec = """
end = absolute_time(default=None)
total = relative_time(default=None)
interval = relative_time(default=1)
verbose = boolean(default=True)
"""
sections = OrderedDict()
sections['name'] = '{bold}sleep{reset} -- a countdown timer that blocks until time is over'
sections['description'] = '{bold}sleep{reset} is a verbose no-op (by default) meant to allow the insertion of a pause in the execution of the APE. At this point all calls to sleep will get the same configuration.'
sections['configuration'] = configuration
sections['see also'] = 'EventTimer, RelativeTime, AbsoluteTime'
sections['options'] = """
The configuration options --
{bold}end{reset} : an absolute time given as a time-stamp that can be interpreted by `dateutil.parser.parse`. This is for the cases where you have a specific time that you want the sleep to end.
{bold}total{reset} : a relative time given as pairs of '<amount> <units>' -- e.g. '3.4 hours'. Most units only use the first letter, but since `months` and `minutes` both start with `m`, you have to use two letters to specify them. The sleep will stop at the start of the sleep + the total time given.
{bold}interval{reset} : The amount of time beween reports of the time remaining (default = 1 second). Use the same formatting as the `total` option.
{bold}verbose{reset} : If True (the default) then report time remaining at specified intervals while the sleep runs.
One of {bold}end{reset} or {bold}total{reset} needs to be specified. Everything else is optional.
"""
sections['author'] = 'ape'
class Sleep(BasePlugin):
"""
A plugin for TheBigSleep
"""
def __init__(self, *args, **kwargs):
"""
Constructor for Sleep
"""
super(Sleep, self).__init__(*args, **kwargs)
self._subsection = None
return
@property
def subsection(self):
"""
the plugin sub-section
"""
if self._subsection is None:
configspec = ConfigObj(sleep_configspec.splitlines(),
list_values=False,
_inspec=True)
section = ConfigObj(self.configuration[self.section_header],
configspec=configspec)
section.validate(time_validator)
self._subsection = section
return self._subsection
def fetch_config(self):
"""
prints a config-file sample
"""
print(configuration)
@property
def sections(self):
"""
Help dictionary
"""
if self._sections is None:
self._sections = sections
return self._sections
@property
def product(self):
"""
A built TheBigSleep object
:return: TheBigSleep
"""
if self._product is None:
end = self.subsection[END_OPTION]
total = self.subsection[TOTAL_OPTION]
interval = self.subsection[INTERVAL_OPTION]
if interval != 1:
interval = interval.total_seconds()
verbose = self.subsection[VERBOSE_OPTION]
self._product = TheBigSleep(end=end,
total=total,
interval=interval,
verbose=verbose)
return self._product
|
[
"collections.OrderedDict",
"theape.parts.sleep.sleep.TheBigSleep",
"configobj.ConfigObj"
] |
[((1128, 1141), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1139, 1141), False, 'from collections import OrderedDict\n'), ((3112, 3185), 'configobj.ConfigObj', 'ConfigObj', (['self.configuration[self.section_header]'], {'configspec': 'configspec'}), '(self.configuration[self.section_header], configspec=configspec)\n', (3121, 3185), False, 'from configobj import ConfigObj\n'), ((4133, 4202), 'theape.parts.sleep.sleep.TheBigSleep', 'TheBigSleep', ([], {'end': 'end', 'total': 'total', 'interval': 'interval', 'verbose': 'verbose'}), '(end=end, total=total, interval=interval, verbose=verbose)\n', (4144, 4202), False, 'from theape.parts.sleep.sleep import TheBigSleep\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 4 20:13:37 2020
@author: jolsten
"""
import sys, logging
import socket
import time
from abc import ABCMeta, abstractmethod
from .exceptions import *
from .utils import STK_DATEFMT, inherit_docstrings
class _AbstractConnect(metaclass=ABCMeta):
'''An STK Connect connection class.
Attributes:
host : str
The host on which the desired instance of STK is running.
port : int
The port on which the desired instance is accepting connections.
address : tuple
The address as a tuple (host, port)
ack : bool
A boolean representing whether the instance is using ACK/NACK.
Changing this after .connect() is called will not change the mode.
connect_attempts : int
The maximum number of attempts at connecting to the socket.
send_attempts : int
Sets the default maximum number of attempts to make while calling
.send() before raising STKNackError.
timeout : float
Sets the default timeout period for calls to .read() before
assuming all data was received.
'''
def __init__(self, **kwargs):
'''Inits an STK connection object (Connect or AsyncConnect)
Args:
host : str (default: 'localhost')
port : int (default: 5001)
ack : bool (default: True)
Specifies whether or not to use ACK/NACK responses with STK
Connect. Highly recommended to leave this to True.
connect_attempts : int (default: 5)
The maximum number of attempts at connecting to the socket.
Several attempts should be made, in case the instance of STK
hasn't finished initializing by the time this is called.
send_attempts : int (default: 1)
Sets the default maximum number of attempts to make while
calling .send() before raising STKNackError.
timeout : int or float (default: 1.0)
Sets the default timeout period for calls to .read() before
assuming all data was received.
Because network traffic is unpredictable, increasing the
timeout will increase the likelihood that you receive all the
data.
However, this also adds a mandatory minimum delay before the
read() function returns.
'''
self._kwargs = kwargs
self.host = str( kwargs.get('host', 'localhost') )
self.port = int( kwargs.get('port', 5001) )
self.ack = bool( kwargs.get('ack', True) )
self.connect_attempts = int( kwargs.get('connect_attempts', 5) )
self.send_attempts = int( kwargs.get('send_attempts', 1) )
self.timeout = float( kwargs.get('timeout', 1 ) )
self.socket = None
@property
def address(self):
'''The socket address tuple.
Args:
None
Returns:
tuple : (host, port)
'''
return (self.host, self.port)
def connect(self):
'''Connect to the STK Connect socket specified.
Args:
None
Returns:
None
Raises:
STKConnectError : If, after .connect_attempts attempts, a
connection couldn't be made successfully.'
'''
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
time.sleep(3) # give STK a moment to start
self._connect()
if type(self) == AsyncConnect:
self.send(f'ConControl / AsyncOn')
else:
self.send(f'ConControl / AsyncOff')
if self.ack is not True:
self.send(f'ConControl / AckOff')
def _connect(self):
attempt = 0
while True:
attempt += 1
try:
self.socket.connect(self.address)
except ConnectionRefusedError as e:
logging.debug(f'ConnectionRefusedError: {e}')
else: # exit loop if no exceptions caught
logging.info(f'Connected to STK on {self.host}:{self.port}')
return True
finally: # continue loop if any exception caught
if attempt >= self.connect_attempts:
raise STKConnectError(f'Failed to connect to STK via socket on {self.host}:{self.port}')
time.sleep( 3 )
def send(self, message, attempts=None):
'''Sends a Connect command via socket.
Args:
message: A string containing the STK Connect command
attempts: Optional; The maximum number of times to send the
command if a NACK is received.
Returns:
None
Raises:
STKNackError : If too many NACK responses were received from STK.
Examples:
s.send("Unload / *")
'''
if attempts is None: attempts = self.send_attempts
attempt = 0
while True:
attempt += 1
try:
self._send(message)
if self.ack: self.get_ack(message)
return
except STKNackError as e:
if attempt >= attempts:
logging.error(f'send() failed, received NACK too many times')
raise STKNackError(e)
def _send(self, message: str):
logging.debug(f'stk.send("{message}")')
self.socket.send( (message+'\n').encode() )
def read(self, timeout=None):
'''Read all available data from the TCP/IP socket.
Args:
timeout : int or None (default: None)
Sets the timeout period for this specific call to .read()
before assuming all data was received.
Because network traffic is unpredictable, increasing the
timeout will increase the likelihood that you receive all the
data.
However, this also adds a mandatory minimum delay before the
read() function returns.
Returns:
bytes : a bytes object containing the data received from the socket
'''
timeout = timeout
if timeout is None: timeout = self.timeout
self.socket.setblocking(False)
self.socket.settimeout(timeout)
logging.debug('Reading until no data is left in the socket...')
buffer = b''
while True:
try:
buffer += self.socket.recv(4096)
except socket.timeout:
logging.debug('Timeout reached, returning buffer')
self.socket.settimeout(None)
return buffer
def disconnect(self):
'''Alias of .close()'''
self.close()
def close(self):
'''Closes the STK Connect socket.
Args:
None
Returns:
None
'''
try:
self.socket.close()
except:
pass
def __repr__(self):
return f'{type(self).__name__}({self.host}:{self.port})'
def __del__(self):
self.close()
@abstractmethod
def get_ack(self, message):
'''Block until an ACK is received from STK Connect.
Users should not typically need to use this method directly, as it is
called from .send() if the class attribute ack=True
Args:
None
Returns:
None
'''
pass
@abstractmethod
def get_single_message(self):
pass
@abstractmethod
def get_multi_message(self):
pass
@abstractmethod
def report(self, **kwargs):
'''Create a report in STK and save it to a file.
Args:
ObjPath : str (required)
The STK Object Path for the desired report.
e.g.
Facility/A_Facility_Name
Satellite/A_Satellite_Name
Style : str or path-like object (required)
The Style name, if it is already loaded into STK (or is a
default report style).
Otherwise, pass a path to the desired .RST file.
FilePath : str or path-like object (required)
The path to the file to which the report should be written.
TimePeriod : str or None (default: None)
The time period to use for the report. If None, then use the
default (typically the parent object's time period).
Valid values:
UseAccessTimes
{TimeInterval}
Intervals {"<FilePath>" | "<IntervalOrListSpec>"}
Enter {TimeInterval} to define the start time and stop
time for the report span. For valid {TimeInterval} values
see Time Options.
Or specify UseAccessTimes to only report data during
access times between the <ObjectPath> and an AccessObject,
but you must also specify at least one AccessObject.
Or use the Intervals option to specify an STK interval
file for the time period or an Interval or Interval List
component specification.
For help on creating the STK interval file,
see Create & Import External Files - Interval List
in STK Help.
For information about "<IntervalOrListSpec>" see
Component Specification.
See STK Help for more details on these options.
TimeStep : float or str (default: None)
The timestep to use for the report. If None, then use the
default (typically the parent object's timestep).
Valid values:
<Value>
Bound <Value>
Array "<TimeArraySpec>"
Enter the time step <Value> to be used in creating the
report. This value is entered in seconds and must be
between 0.000001 and 1000000000.0 seconds.
Or enter Bound <Value> to have the report steps calculated
on a specific time boundary. This value is entered in
seconds and must be between 0 and 3600 seconds. If 0 is
entered then the default time step (usually 60 seconds) is
used.
Or enter the Array keyword with a Time Array component
specification to use the array times as time steps. For
information about "<TimeArraySpec>"
see Component Specification.
AdditionalData : str or None (default: None)
Some Report Styles require additional or pre-data, such as a
comparison object for the RIC report for a Satellite. For these
types of reports you must include this option. More information
on styles that require AdditionalData can be found at "Report
Additional Data" in the STK Help.
Summary : str or None (default: None)
Summary data is not generally included. Use this option, to
have the summary data included in the exported report file.
Valid values:
Include
Only
Specify the Include value to have the summary included with the
rest of the report; use the Only value to have only the summary
data reported.
Returns:
None
'''
pass
@abstractmethod
def report_rm(self, **kwargs):
'''Create a report in STK and return them via socket.
Args:
ObjPath : str (required)
The STK Object Path for the desired report.
e.g.
Facility/A_Facility_Name
Satellite/A_Satellite_Name
Style : str or path-like object (required)
The Style name, if it is already loaded into STK (or is a
default report style).
Otherwise, pass a path to the desired .RST file.
TimePeriod : str or None (default: None)
The time period to use for the report. If None, then use the
default (typically the parent object's time period).
Valid values:
UseAccessTimes
{TimeInterval}
Intervals {"<FilePath>" | "<IntervalOrListSpec>"}
Enter {TimeInterval} to define the start time and stop time
for the report span. For valid {TimeInterval} values see
Time Options.
Or specify UseAccessTimes to only report data during access
times between the <ObjectPath> and an AccessObject, but you
must also specify at least one AccessObject.
Or use the Intervals option to specify an STK interval file
for the time period or an Interval or Interval List
component specification.
For help on creating the STK interval file, see Create &
Import External Files - Interval List in STK Help.
For information about "<IntervalOrListSpec>"
see Component Specification.
See STK Help for more details on these options.
TimeStep : float or str
The timestep to use for the report. If None, then use the
default (typically the parent object's timestep).
Valid values:
<Value>
Bound <Value>
Array "<TimeArraySpec>"
Enter the time step <Value> to be used in creating the
report. This value is entered in seconds and must be
between 0.000001 and 1000000000.0 seconds.
Or enter Bound <Value> to have the report steps calculated
on a specific time boundary. This value is entered in
seconds and must be between 0 and 3600 seconds. If 0 is
entered then the default time step (usually 60 seconds) is
used.
Or enter the Array keyword with a Time Array component
specification to use the array times as time steps. For
information about "<TimeArraySpec>"
see Component Specification.
AdditionalData :
Some Report Styles require additional or pre-data, such as a
comparison object for the RIC report for a Satellite. For these
types of reports you must include this option. More information
on styles that require AdditionalData can be found at
"Report Additional Data" in the STK Help.
Summary : str
Valid values:
Include
Only
Summary data is not generally included. Use this option, to
have the summary data included in the exported report file.
Specify the Include value to have the summary included with
the rest of the report; use the Only value to have only the
summary data reported.
Returns:
None
'''
pass
class Connect(_AbstractConnect):
@inherit_docstrings
def get_ack(self, message):
msg = self.socket.recv(3).decode()
if msg == 'ACK':
# logging.debug('ACK Received')
return
elif msg == 'NAC':
k = self.socket.recv(1).decode()
msg = msg + k
raise STKNackError(f'NACK Received: stk.send("{message.rstrip()}")')
else:
logging.error(f'Expecting ACK or NACK, got: {msg}{self.socket.recv(2048)}')
sys.exit(1)
def get_single_message(self):
header = self.socket.recv(40).decode()
cmd_name, length = header.rstrip().split()
length = int(length)
data = self.socket.recv(length).decode()
return header, data
def get_multi_message(self):
hdr, data = self.get_single_message()
messages = []
for i in range(int(data)):
sm = self.get_single_message()
if len(sm) > 0:
messages.append(sm)
return messages
@inherit_docstrings
def report(self, ObjPath, Style, FilePath, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None):
message = f'ReportCreate */{ObjPath} Style "{Style}" Type "Export" File "{FilePath}"'
if AccessObjectPath is not None: message += f' AccessObject {AccessObjectPath}'
if TimePeriod is not None: message += f' TimePeriod {TimePeriod}'
if TimeStep is not None: message += f' TimeStep {TimeStep}'
if AdditionalData is not None: message += f' AdditionalData "{AdditionalData}"'
if Summary is not None: message += f' Summary {Summary}'
if AllLines is not None: message += f' AllLines {AllLines}'
self.send(message)
@inherit_docstrings
def report_rm(self, ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None, **kwargs):
message = f'Report_RM */{ObjPath} Style "{Style}"'
if AccessObjectPath is not None: message += f' AccessObject {AccessObjectPath}'
if TimePeriod is not None: message += f' TimePeriod {TimePeriod}'
if TimeStep is not None: message += f' TimeStep {TimeStep}'
if AdditionalData is not None: message += f' AdditionalData "{AdditionalData}"'
if Summary is not None: message += f' Summary {Summary}'
if AllLines is not None: message += f' AllLines {AllLines}'
self.send(message)
messages = self.get_multi_message()
return [x[1] for x in messages]
# report = ''
# for msg in messages:
# report +=
# return .join()
# buffer = self.read(**kwargs).decode()
# if len(buffer) == 0: return []
# logging.debug(f'Report_RM Returned: {buffer}')
# return []
class AsyncConnect(_AbstractConnect):
@inherit_docstrings
def get_ack(self, message):
hdr, data = self.get_single_message()
if hdr.async_type == 'ACK':
return True
elif hdr.async_type == 'NACK':
raise STKNackError(f'NACK Received: stk.send("{message}")')
def get_single_message(self):
msg = self.socket.recv(42).decode()
hdr = AsyncHeader(msg)
pdl = hdr.data_length
data = self.socket.recv( pdl ).decode()
while len(data) < hdr.data_length:
data += self.socket.recv( pdl - len(data) ).decode()
return hdr, data
def get_multi_message(self):
logging.debug('Getting Message Block:')
hdr, data = self.get_single_message()
logging.debug(f'GotMessage: {hdr}{data}')
msg_grp = [None] * hdr.total_packets
msg_grp[hdr.packet_number-1] = data
for i in range(1,hdr.total_packets):
hdr, data = self.get_message()
logging.debug(f'GotMessage: {hdr}{data}')
msg_grp[hdr.packet_number-1] = data
if msg_grp[-1] == '': del msg_grp[-1]
return msg_grp
@inherit_docstrings
def report(self, ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None):
message = f'ReportCreate */{ObjPath} Style "{Style}"'
if AccessObjectPath is not None: message += f' AccessObject {AccessObjectPath}'
if TimePeriod is not None: message += f' TimePeriod {TimePeriod}'
if TimeStep is not None: message += f' TimeStep {TimeStep}'
if AdditionalData is not None: message += f' AdditionalData "{AdditionalData}"'
if Summary is not None: message += f' Summary {Summary}'
if AllLines is not None: message += f' AllLines {AllLines}'
self.send(message)
@inherit_docstrings
def report_rm(self, ObjPath, Style, TimePeriod=None, TimeStep=None, AccessObjectPath=None, AdditionalData=None, Summary=None, AllLines=None, **kwargs):
message = f'Report_RM */{ObjPath} Style "{Style}"'
if AccessObjectPath is not None: message += f' AccessObject {AccessObjectPath}'
if TimePeriod is not None: message += f' TimePeriod {TimePeriod}'
if TimeStep is not None: message += f' TimeStep {TimeStep}'
if AdditionalData is not None: message += f' AdditionalData "{AdditionalData}"'
if Summary is not None: message += f' Summary {Summary}'
if AllLines is not None: message += f' AllLines {AllLines}'
self.send(message)
buffer = self.read(**kwargs).decode()
if len(buffer) == 0: return []
return [ x[18:] for x in buffer.split('AGI421009REPORT_RM ')[1:] ]
class AsyncHeader():
'''A helper class to read the STK Connect Asynchronous Message Format headers.'''
def __init__(self, bytestring):
'''Inits a new object using the raw values, passed as bytes or str.'''
if isinstance(bytestring, bytes): bytestring = bytestring.decode()
self.raw = bytestring
def __repr__(self):
return f'<{self.raw}>'
@property
def sync(self):
'''str : The sync word, should always be "AGI"'''
return self.raw[0:3].decode()
@property
def header_length(self):
'''int : The header_length, should always be 42.'''
return int(self.raw[3:5].decode())
@property
def version(self):
'''str : The version in major.minor format.'''
return f'{self.major_version}.{self.minor_version}'
@property
def major_version(self):
'''int : The major version number.'''
return int(self.raw[5].decode())
@property
def minor_version(self):
'''int : The minor version number.'''
return int(self.raw[6].decode())
@property
def type_length(self):
'''int : The length of the command type string.'''
return int(self.raw[7:9])
@property
def async_type(self):
'''str : The value of the command type string.'''
return (self.raw[9:24])[0:self.type_length]
@property
def identifier(self):
'''int : The value of the response ID.
This should be used to associate the correct responses with each
other if commands are being processed asynchronously.
'''
return int(self.raw[24:30])
@property
def total_packets(self):
'''int : The total number of packets in the current identifier.'''
return int(self.raw[30:34])
@property
def packet_number(self):
'''int : The sequence number of the current packet for this identifier.'''
return int(self.raw[34:38])
@property
def data_length(self):
'''int : The length of the data field for the current packet.'''
return int(self.raw[38:42])
|
[
"logging.error",
"logging.debug",
"socket.socket",
"time.sleep",
"logging.info",
"sys.exit"
] |
[((3980, 4029), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (3993, 4029), False, 'import socket\n'), ((4049, 4062), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (4059, 4062), False, 'import time\n'), ((6146, 6185), 'logging.debug', 'logging.debug', (['f"""stk.send("{message}")"""'], {}), '(f\'stk.send("{message}")\')\n', (6159, 6185), False, 'import sys, logging\n'), ((7188, 7251), 'logging.debug', 'logging.debug', (['"""Reading until no data is left in the socket..."""'], {}), "('Reading until no data is left in the socket...')\n", (7201, 7251), False, 'import sys, logging\n'), ((21538, 21577), 'logging.debug', 'logging.debug', (['"""Getting Message Block:"""'], {}), "('Getting Message Block:')\n", (21551, 21577), False, 'import sys, logging\n'), ((21644, 21685), 'logging.debug', 'logging.debug', (['f"""GotMessage: {hdr}{data}"""'], {}), "(f'GotMessage: {hdr}{data}')\n", (21657, 21685), False, 'import sys, logging\n'), ((5051, 5064), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (5061, 5064), False, 'import time\n'), ((21890, 21931), 'logging.debug', 'logging.debug', (['f"""GotMessage: {hdr}{data}"""'], {}), "(f'GotMessage: {hdr}{data}')\n", (21903, 21931), False, 'import sys, logging\n'), ((4722, 4782), 'logging.info', 'logging.info', (['f"""Connected to STK on {self.host}:{self.port}"""'], {}), "(f'Connected to STK on {self.host}:{self.port}')\n", (4734, 4782), False, 'import sys, logging\n'), ((18272, 18283), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (18280, 18283), False, 'import sys, logging\n'), ((4604, 4649), 'logging.debug', 'logging.debug', (['f"""ConnectionRefusedError: {e}"""'], {}), "(f'ConnectionRefusedError: {e}')\n", (4617, 4649), False, 'import sys, logging\n'), ((7426, 7476), 'logging.debug', 'logging.debug', (['"""Timeout reached, returning buffer"""'], {}), "('Timeout reached, returning buffer')\n", (7439, 7476), False, 'import sys, logging\n'), ((5990, 6051), 'logging.error', 'logging.error', (['f"""send() failed, received NACK too many times"""'], {}), "(f'send() failed, received NACK too many times')\n", (6003, 6051), False, 'import sys, logging\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import generators
from __future__ import division
import pipes
from ansible.module_utils.basic import *
class Firebrew(object):
STATUS_SUCCESS = 0
STATUS_FAILURE = 1
STATUS_NOT_CHANGED = 2
def __init__(self, AnsibleModule = AnsibleModule):
self.module = AnsibleModule(
argument_spec = dict(
state = dict(type='str', default='present', choices=['present', 'absent']),
name = dict(type='str', required=True),
base_dir = dict(type='str'),
profile = dict(type='str'),
firefox = dict(type='str')
)
)
def build_command(self):
params = self.module.params
command = [
self.module.get_bin_path('firebrew'),
{'present': 'install', 'absent': 'uninstall'}[params['state']],
pipes.quote(params['name'])
]
for opt in ['base_dir','profile','firefox']:
if opt in params and params[opt] != None and params[opt].strip() != '':
command.append('--%s=%s' % (opt.replace('_','-'), pipes.quote(params[opt])))
return ' '.join(command)
def execute(self):
(rc,out,err) = self.module.run_command(self.build_command())
if rc == self.STATUS_SUCCESS:
self.module.exit_json(changed=True)
elif rc == self.STATUS_NOT_CHANGED:
self.module.exit_json(changed=False)
else:
self.module.fail_json(msg = err)
if __name__ == '__main__':
Firebrew().execute()
|
[
"pipes.quote"
] |
[((1008, 1035), 'pipes.quote', 'pipes.quote', (["params['name']"], {}), "(params['name'])\n", (1019, 1035), False, 'import pipes\n'), ((1249, 1273), 'pipes.quote', 'pipes.quote', (['params[opt]'], {}), '(params[opt])\n', (1260, 1273), False, 'import pipes\n')]
|
from __future__ import division, print_function, absolute_import
# lookup()
##########
import petl as etl
table1 = [['foo', 'bar'],
['a', 1],
['b', 2],
['b', 3]]
lkp = etl.lookup(table1, 'foo', 'bar')
lkp['a']
lkp['b']
# if no valuespec argument is given, defaults to the whole
# row (as a tuple)
lkp = etl.lookup(table1, 'foo')
lkp['a']
lkp['b']
# compound keys are supported
table2 = [['foo', 'bar', 'baz'],
['a', 1, True],
['b', 2, False],
['b', 3, True],
['b', 3, False]]
lkp = etl.lookup(table2, ('foo', 'bar'), 'baz')
lkp[('a', 1)]
lkp[('b', 2)]
lkp[('b', 3)]
# data can be loaded into an existing dictionary-like
# object, including persistent dictionaries created via the
# shelve module
import shelve
lkp = shelve.open('example.dat', flag='n')
lkp = etl.lookup(table1, 'foo', 'bar', lkp)
lkp.close()
lkp = shelve.open('example.dat', flag='r')
lkp['a']
lkp['b']
# lookupone()
#############
import petl as etl
table1 = [['foo', 'bar'],
['a', 1],
['b', 2],
['b', 3]]
# if the specified key is not unique and strict=False (default),
# the first value wins
lkp = etl.lookupone(table1, 'foo', 'bar')
lkp['a']
lkp['b']
# if the specified key is not unique and strict=True, will raise
# DuplicateKeyError
try:
lkp = etl.lookupone(table1, 'foo', strict=True)
except etl.errors.DuplicateKeyError as e:
print(e)
# compound keys are supported
table2 = [['foo', 'bar', 'baz'],
['a', 1, True],
['b', 2, False],
['b', 3, True],
['b', 3, False]]
lkp = etl.lookupone(table2, ('foo', 'bar'), 'baz')
lkp[('a', 1)]
lkp[('b', 2)]
lkp[('b', 3)]
# data can be loaded into an existing dictionary-like
# object, including persistent dictionaries created via the
# shelve module
import shelve
lkp = shelve.open('example.dat', flag='n')
lkp = etl.lookupone(table1, 'foo', 'bar', lkp)
lkp.close()
lkp = shelve.open('example.dat', flag='r')
lkp['a']
lkp['b']
# dictlookup()
##############
import petl as etl
table1 = [['foo', 'bar'],
['a', 1],
['b', 2],
['b', 3]]
lkp = etl.dictlookup(table1, 'foo')
lkp['a']
lkp['b']
# compound keys are supported
table2 = [['foo', 'bar', 'baz'],
['a', 1, True],
['b', 2, False],
['b', 3, True],
['b', 3, False]]
lkp = etl.dictlookup(table2, ('foo', 'bar'))
lkp[('a', 1)]
lkp[('b', 2)]
lkp[('b', 3)]
# data can be loaded into an existing dictionary-like
# object, including persistent dictionaries created via the
# shelve module
import shelve
lkp = shelve.open('example.dat', flag='n')
lkp = etl.dictlookup(table1, 'foo', lkp)
lkp.close()
lkp = shelve.open('example.dat', flag='r')
lkp['a']
lkp['b']
# dictlookupone()
#################
import petl as etl
table1 = [['foo', 'bar'],
['a', 1],
['b', 2],
['b', 3]]
# if the specified key is not unique and strict=False (default),
# the first value wins
lkp = etl.dictlookupone(table1, 'foo')
lkp['a']
lkp['b']
# if the specified key is not unique and strict=True, will raise
# DuplicateKeyError
try:
lkp = etl.dictlookupone(table1, 'foo', strict=True)
except etl.errors.DuplicateKeyError as e:
print(e)
# compound keys are supported
table2 = [['foo', 'bar', 'baz'],
['a', 1, True],
['b', 2, False],
['b', 3, True],
['b', 3, False]]
lkp = etl.dictlookupone(table2, ('foo', 'bar'))
lkp[('a', 1)]
lkp[('b', 2)]
lkp[('b', 3)]
# data can be loaded into an existing dictionary-like
# object, including persistent dictionaries created via the
# shelve module
import shelve
lkp = shelve.open('example.dat', flag='n')
lkp = etl.dictlookupone(table1, 'foo', lkp)
lkp.close()
lkp = shelve.open('example.dat', flag='r')
lkp['a']
lkp['b']
|
[
"petl.dictlookup",
"petl.dictlookupone",
"shelve.open",
"petl.lookup",
"petl.lookupone"
] |
[((204, 236), 'petl.lookup', 'etl.lookup', (['table1', '"""foo"""', '"""bar"""'], {}), "(table1, 'foo', 'bar')\n", (214, 236), True, 'import petl as etl\n'), ((339, 364), 'petl.lookup', 'etl.lookup', (['table1', '"""foo"""'], {}), "(table1, 'foo')\n", (349, 364), True, 'import petl as etl\n'), ((558, 599), 'petl.lookup', 'etl.lookup', (['table2', "('foo', 'bar')", '"""baz"""'], {}), "(table2, ('foo', 'bar'), 'baz')\n", (568, 599), True, 'import petl as etl\n'), ((794, 830), 'shelve.open', 'shelve.open', (['"""example.dat"""'], {'flag': '"""n"""'}), "('example.dat', flag='n')\n", (805, 830), False, 'import shelve\n'), ((837, 874), 'petl.lookup', 'etl.lookup', (['table1', '"""foo"""', '"""bar"""', 'lkp'], {}), "(table1, 'foo', 'bar', lkp)\n", (847, 874), True, 'import petl as etl\n'), ((893, 929), 'shelve.open', 'shelve.open', (['"""example.dat"""'], {'flag': '"""r"""'}), "('example.dat', flag='r')\n", (904, 929), False, 'import shelve\n'), ((1181, 1216), 'petl.lookupone', 'etl.lookupone', (['table1', '"""foo"""', '"""bar"""'], {}), "(table1, 'foo', 'bar')\n", (1194, 1216), True, 'import petl as etl\n'), ((1608, 1652), 'petl.lookupone', 'etl.lookupone', (['table2', "('foo', 'bar')", '"""baz"""'], {}), "(table2, ('foo', 'bar'), 'baz')\n", (1621, 1652), True, 'import petl as etl\n'), ((1847, 1883), 'shelve.open', 'shelve.open', (['"""example.dat"""'], {'flag': '"""n"""'}), "('example.dat', flag='n')\n", (1858, 1883), False, 'import shelve\n'), ((1890, 1930), 'petl.lookupone', 'etl.lookupone', (['table1', '"""foo"""', '"""bar"""', 'lkp'], {}), "(table1, 'foo', 'bar', lkp)\n", (1903, 1930), True, 'import petl as etl\n'), ((1949, 1985), 'shelve.open', 'shelve.open', (['"""example.dat"""'], {'flag': '"""r"""'}), "('example.dat', flag='r')\n", (1960, 1985), False, 'import shelve\n'), ((2151, 2180), 'petl.dictlookup', 'etl.dictlookup', (['table1', '"""foo"""'], {}), "(table1, 'foo')\n", (2165, 2180), True, 'import petl as etl\n'), ((2374, 2412), 'petl.dictlookup', 'etl.dictlookup', (['table2', "('foo', 'bar')"], {}), "(table2, ('foo', 'bar'))\n", (2388, 2412), True, 'import petl as etl\n'), ((2607, 2643), 'shelve.open', 'shelve.open', (['"""example.dat"""'], {'flag': '"""n"""'}), "('example.dat', flag='n')\n", (2618, 2643), False, 'import shelve\n'), ((2650, 2684), 'petl.dictlookup', 'etl.dictlookup', (['table1', '"""foo"""', 'lkp'], {}), "(table1, 'foo', lkp)\n", (2664, 2684), True, 'import petl as etl\n'), ((2703, 2739), 'shelve.open', 'shelve.open', (['"""example.dat"""'], {'flag': '"""r"""'}), "('example.dat', flag='r')\n", (2714, 2739), False, 'import shelve\n'), ((2996, 3028), 'petl.dictlookupone', 'etl.dictlookupone', (['table1', '"""foo"""'], {}), "(table1, 'foo')\n", (3013, 3028), True, 'import petl as etl\n'), ((3424, 3465), 'petl.dictlookupone', 'etl.dictlookupone', (['table2', "('foo', 'bar')"], {}), "(table2, ('foo', 'bar'))\n", (3441, 3465), True, 'import petl as etl\n'), ((3658, 3694), 'shelve.open', 'shelve.open', (['"""example.dat"""'], {'flag': '"""n"""'}), "('example.dat', flag='n')\n", (3669, 3694), False, 'import shelve\n'), ((3701, 3738), 'petl.dictlookupone', 'etl.dictlookupone', (['table1', '"""foo"""', 'lkp'], {}), "(table1, 'foo', lkp)\n", (3718, 3738), True, 'import petl as etl\n'), ((3757, 3793), 'shelve.open', 'shelve.open', (['"""example.dat"""'], {'flag': '"""r"""'}), "('example.dat', flag='r')\n", (3768, 3793), False, 'import shelve\n'), ((1335, 1376), 'petl.lookupone', 'etl.lookupone', (['table1', '"""foo"""'], {'strict': '(True)'}), "(table1, 'foo', strict=True)\n", (1348, 1376), True, 'import petl as etl\n'), ((3147, 3192), 'petl.dictlookupone', 'etl.dictlookupone', (['table1', '"""foo"""'], {'strict': '(True)'}), "(table1, 'foo', strict=True)\n", (3164, 3192), True, 'import petl as etl\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-10-14 19:45:05
# @Author : jingray (<EMAIL>)
# @Link : http://www.jianshu.com/u/01fb0364467d
# @Version : $Id$
import os
# STRINGS
print("STRINGS")
my_string_1 = "hello"
my_string_2 = 'world'
my_multiline_string = """
Dear World,
Hello. I am a multiline python string.
I'm enclosed in triple quotes. I'd write
them here, but that would end the string!
I know! I'll use a slash as an escape character.
Triple quotes look like this: \"\"\"
Sincerely,
Python
"""
newline_character = "\n"
print(my_string_1, my_string_2)
print(my_multiline_string)
print(newline_character)
print("-----------")
print(newline_character)
# NUMBERS AND BOOLEANS
print("NUMBERS")
my_float = 0.5
my_integer = 7
my_negative = -3.5
my_fraction = 1/2
# what do you think THIS line of code will assign to the variable
# does_half_equal_point_five?
does_half_equal_point_five = (my_fraction == my_float)
print("The absolute value of", my_negative, "is", abs(my_negative))
print(my_integer, "squared is equal to", my_integer ** 2)
print("Does", my_fraction, "equal", my_float, "?", does_half_equal_point_five)
for left_num in range(10):
for right_num in range(10):
product = left_num * right_num
print(left_num, "x", right_num, "=", product)
print ("\n")
#List
my_list = [1, 2, 3, "a", "b", "c"]
print("my_list is:", my_list)
print("Enumerating a list...")
for i, item in enumerate(my_list):
print("item number", i, "is", item)
print("Another way to enumerate using a list 'method'...")
for item in my_list:
index = my_list.index(item)
print("item", item, "has index", index)
#List Comprehensions
numbers_0_to_9 = [x for x in range(10)]
print("Numbers 0 to 9", numbers_0_to_9)
squares = [x * x for x in range(10)]
print("Squares ", squares)
odds = [x for x in range(10) if x % 2 == 1]
print("Odds ", odds)
# This example uses a data type called a namedtuple which is similar to a struct data type in other languages.
from collections import namedtuple
Person = namedtuple("Person", ["name", "age", "gender"])
people = [
Person("Andy", 30, "m"),
Person("Ping", 1, "m"),
Person("Tina", 32, "f"),
Person("Abby", 14, "f"),
Person("Adah", 13, "f"),
Person("Sebastian", 42, "m"),
Person("Carol" , 68, "f"),
]
# first, let's show how this namedtuple works.
andy = people[0]
print("name: ", andy.name)
print("age: ", andy.age)
print("gender:", andy.gender)
# now let's show what we can do with a list comprehension
#
male_names = [person.name for person in people if person.gender=="m"]
print("Male names:", male_names)
teen_names = [p.name for p in people if 13 <= p.age <= 18 ]
print("Teen names:", teen_names)
# random
import random as rd
a = rd.random()
b = rd.random()
c = rd.random()
print("a is", a)
print("b is", b)
print("c is", c)
|
[
"random.random",
"collections.namedtuple"
] |
[((2069, 2116), 'collections.namedtuple', 'namedtuple', (['"""Person"""', "['name', 'age', 'gender']"], {}), "('Person', ['name', 'age', 'gender'])\n", (2079, 2116), False, 'from collections import namedtuple\n'), ((2788, 2799), 'random.random', 'rd.random', ([], {}), '()\n', (2797, 2799), True, 'import random as rd\n'), ((2804, 2815), 'random.random', 'rd.random', ([], {}), '()\n', (2813, 2815), True, 'import random as rd\n'), ((2820, 2831), 'random.random', 'rd.random', ([], {}), '()\n', (2829, 2831), True, 'import random as rd\n')]
|
import pytest
from src.lib.time_util import TimeUtil
import datetime
@pytest.fixture(scope="module", autouse=True)
def tmu_object():
tmu = TimeUtil()
yield tmu
class TestTimeUtil:
@pytest.mark.parametrize("test_input, expected_wareki, expected_other", [(
'令和2年10月31日',
'令和',
'2年10月31日'
),
('平成30年1月31日', '平成', '30年1月31日'), ('大正1年1月31日', None, None)])
def test_get_wareki(self, tmu_object, test_input, expected_wareki, expected_other):
wareki, other = tmu_object.get_wareki(test_input)
assert wareki == expected_wareki
assert other == expected_other
def test_get_ymd_int_each(self, tmu_object):
result = tmu_object.get_ymd_int_each('2年3月9日')
assert result == [2, 3, 9]
def test_get_ymd_int_each_2020(self, tmu_object):
result = tmu_object.get_ymd_int_each('3月1日', need_year=False)
assert result == [3, 1]
def test_parse_date_span(self, tmu_object):
target_char = "test1~ \ntest2"
result = tmu_object.parse_date_span(target_char)
assert result == ["test1", "test2"]
def test_get_ad_dt_fmt(self, tmu_object):
iso_format = tmu_object.get_ad_dt_fmt('令和', 2, 4, 29)
assert iso_format == "2020-04-29T00:00:00+09:00"
def test_get_ad_date_iso_fmt(self, tmu_object):
iso_format = tmu_object.get_ad_date_iso_fmt(4, 3)
assert iso_format == "2020-04-03T00:00:00+09:00"
def test_get_ad_default_year_dt_fmt(self, tmu_object):
datetime_format = tmu_object.get_ad_default_year_dt_fmt(4, 3)
assert datetime_format == datetime.datetime(
2020, 4, 3, 0, 0, tzinfo=datetime.timezone(datetime.timedelta(0, 32400), 'JST'))
def test_convert_wareki_to_ad(self, tmu_object):
result = tmu_object.convert_wareki_to_ad('令和2年10月23日')
assert result == "2020-10-23T00:00:00+09:00"
def test_convert_wareki_to_ad_error(self, tmu_object):
with pytest.raises(ValueError):
tmu_object.convert_wareki_to_ad('大正2年10月23日')
@pytest.mark.parametrize(
"pattern, end, start, need_day, expected", [
("No_start_No_needDay", datetime.datetime(2020, 3, 2), None, False, [{"日付": "2020-03-01T00:00:00+09:00",
"小計": 0}, {"日付": "2020-03-02T00:00:00+09:00", "小計": 0}]),
("start_No_needDay", datetime.datetime(
2020, 3, 2), datetime.datetime(2020, 3, 1, 0, 0, 0, 0, tzinfo=datetime.timezone(
datetime.timedelta(hours=9), name='JST')), False, [{"日付": "2020-03-01T00:00:00+09:00",
"小計": 0}, {"日付": "2020-03-02T00:00:00+09:00", "小計": 0}]),
("start_needDay", datetime.datetime(
2020, 3, 2), datetime.datetime(2020, 3, 1, 0, 0, 0, 0, tzinfo=datetime.timezone(
datetime.timedelta(hours=9), name='JST')), True, [{"日付": "2020-03-01T00:00:00+09:00", "day": 1,
"小計": 0}, {"日付": "2020-03-02T00:00:00+09:00", "小計": 0, "day": 2}]),
("NO_start_needDay", datetime.datetime(
2020, 3, 2), None, True, [{"日付": "2020-03-01T00:00:00+09:00", "day": 1,
"小計": 0}, {"日付": "2020-03-02T00:00:00+09:00", "小計": 0, "day": 2}])
]
)
def test_create_dt_dict(self, tmu_object, pattern, end, start, need_day, expected):
print(pattern)
result = tmu_object.create_dt_dict(
end, start=start, need_day=need_day)
assert result == expected
def test_get_dt_dict_from_text(self, tmu_object):
target_char = "3月1日~ \n3月2日"
result = tmu_object.get_dt_dict_from_text(target_char)
assert result == [{"日付": "2020-03-01T00:00:00+09:00", "day": 1,
"小計": 0}, {"日付": "2020-03-02T00:00:00+09:00", "小計": 0, "day": 2}]
if __name__ == '__main__':
pytest.main(['-v', __file__])
|
[
"src.lib.time_util.TimeUtil",
"pytest.fixture",
"pytest.main",
"datetime.datetime",
"pytest.raises",
"datetime.timedelta",
"pytest.mark.parametrize"
] |
[((72, 116), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'autouse': '(True)'}), "(scope='module', autouse=True)\n", (86, 116), False, 'import pytest\n'), ((145, 155), 'src.lib.time_util.TimeUtil', 'TimeUtil', ([], {}), '()\n', (153, 155), False, 'from src.lib.time_util import TimeUtil\n'), ((197, 374), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_input, expected_wareki, expected_other"""', "[('令和2年10月31日', '令和', '2年10月31日'), ('平成30年1月31日', '平成', '30年1月31日'), (\n '大正1年1月31日', None, None)]"], {}), "('test_input, expected_wareki, expected_other', [(\n '令和2年10月31日', '令和', '2年10月31日'), ('平成30年1月31日', '平成', '30年1月31日'), (\n '大正1年1月31日', None, None)])\n", (220, 374), False, 'import pytest\n'), ((4045, 4074), 'pytest.main', 'pytest.main', (["['-v', __file__]"], {}), "(['-v', __file__])\n", (4056, 4074), False, 'import pytest\n'), ((1968, 1993), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1981, 1993), False, 'import pytest\n'), ((2173, 2202), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(3)', '(2)'], {}), '(2020, 3, 2)\n', (2190, 2202), False, 'import datetime\n'), ((2429, 2458), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(3)', '(2)'], {}), '(2020, 3, 2)\n', (2446, 2458), False, 'import datetime\n'), ((2814, 2843), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(3)', '(2)'], {}), '(2020, 3, 2)\n', (2831, 2843), False, 'import datetime\n'), ((3218, 3247), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(3)', '(2)'], {}), '(2020, 3, 2)\n', (3235, 3247), False, 'import datetime\n'), ((1687, 1715), 'datetime.timedelta', 'datetime.timedelta', (['(0)', '(32400)'], {}), '(0, 32400)\n', (1705, 1715), False, 'import datetime\n'), ((2565, 2592), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(9)'}), '(hours=9)\n', (2583, 2592), False, 'import datetime\n'), ((2950, 2977), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(9)'}), '(hours=9)\n', (2968, 2977), False, 'import datetime\n')]
|
#!/usr/bin/env python3
"""
sudo apt-get install libqpdf-dev
"""
import zlib
import argparse
import pikepdf
from pikepdf import Pdf, PdfImage, Name
from reportlab.pdfgen import canvas
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.lib import units
class PdfWatermark:
def __init__(self, pdf_in: str, pdf_out: str, text: str):
self.pdf_in = pdf_in
self.pdf_out = pdf_out
self.pdf_watermark = "wm.pdf"
self.wm_font_size = 20
self.wm_text = text
self.wm_alpha = 0.2
def apply(self):
self._create_watermark_pdf()
with pikepdf.open(self.pdf_in) as pdf_main:
with pikepdf.open(self.pdf_watermark) as pdf_wm:
for page in pdf_main.pages:
page.add_underlay(pdf_wm.pages[0])
pdf_main.save(self.pdf_out)
def _create_watermark_pdf(self):
c = canvas.Canvas(self.pdf_watermark)
pdfmetrics.registerFont(
TTFont('times new roman', 'Times New Roman.ttf'))
c.setFont('times new roman', self.wm_font_size)
pw, ph = c._pagesize
c.setFillGray(0.5, self.wm_alpha)
c.saveState()
c.translate(500, 100)
c.rotate(45)
c.drawCentredString(pw / 2 - 50, ph - 400, self.wm_text)
c.restoreState()
c.save()
def main_cli():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input",
type=str,
required=True,
help="The PDF file in which will be inserted watermark"
)
parser.add_argument(
"--out",
type=str,
required=True,
help="The PDF file in which will be saved result"
)
parser.add_argument(
"--text",
type=str,
required=True,
help="The text of watermark"
)
args = parser.parse_args()
srv = PdfWatermark(args.input, args.out, args.text)
srv.apply()
if __name__ == "__main__":
main_cli()
|
[
"reportlab.pdfgen.canvas.Canvas",
"reportlab.pdfbase.ttfonts.TTFont",
"pikepdf.open",
"argparse.ArgumentParser"
] |
[((1405, 1430), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1428, 1430), False, 'import argparse\n'), ((936, 969), 'reportlab.pdfgen.canvas.Canvas', 'canvas.Canvas', (['self.pdf_watermark'], {}), '(self.pdf_watermark)\n', (949, 969), False, 'from reportlab.pdfgen import canvas\n'), ((647, 672), 'pikepdf.open', 'pikepdf.open', (['self.pdf_in'], {}), '(self.pdf_in)\n', (659, 672), False, 'import pikepdf\n'), ((1015, 1063), 'reportlab.pdfbase.ttfonts.TTFont', 'TTFont', (['"""times new roman"""', '"""Times New Roman.ttf"""'], {}), "('times new roman', 'Times New Roman.ttf')\n", (1021, 1063), False, 'from reportlab.pdfbase.ttfonts import TTFont\n'), ((703, 735), 'pikepdf.open', 'pikepdf.open', (['self.pdf_watermark'], {}), '(self.pdf_watermark)\n', (715, 735), False, 'import pikepdf\n')]
|
"""Profiles for Scholarship App"""
__author__ = "<NAME>"
import base64
import bibcat
import datetime
import hashlib
import io
import os
import pprint
import smtplib
import subprocess
import threading
import uuid
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import mimetypes
import click
import rdflib
import requests
from bs4 import BeautifulSoup
from flask import current_app
from github import Github, GithubException
import utilities
from .sparql import EMAIL_LOOKUP, SUBJECTS_IRI, RESEARCH_STMT_IRI
from .sparql import add_qualified_generation, add_qualified_revision
BF = rdflib.Namespace("http://id.loc.gov/ontologies/bibframe/")
CITE = rdflib.Namespace("https://www.coloradocollege.edu/library/ns/citation/")
PROV = rdflib.Namespace("http://www.w3.org/ns/prov#")
SCHEMA = rdflib.Namespace("http://schema.org/")
class GitProfile(object):
def __init__(self, config):
self.graph_hashes = {}
cc_github = Github(config.get("GITHUB_USER"),
config.get("GITHUB_PWD"))
self.triplestore_url = config.get("TRIPLESTORE_URL")
self.tutt_github = cc_github.get_organization("Tutt-Library")
# Start retrieving and parsing latest RDF for current academic year
# and CC people
now = datetime.datetime.utcnow()
if now.month < 7:
start_year = now.year - 1
end_year = now.year
else:
start_year = now.year
end_year = now.year + 1
self.current_year_path = "/KnowledgeGraph/cc-{0}-{1}.ttl".format(
start_year, end_year)
self.current_year = rdflib.Graph()
self.cc_people = rdflib.Graph()
self.tiger_repo = self.tutt_github.get_repo("tiger-catalog")
for content in self.tiger_repo.get_dir_contents("/KnowledgeGraph/"):
raw_turtle = self.__get_content__("tiger_repo",
content)
if content.name.startswith(self.current_year_path.split("/")[-1]):
self.current_year_git = content
self.current_year.parse(data=raw_turtle,
format='turtle')
if content.name.startswith("cc-people"):
self.cc_people_git = content
self.cc_people.parse(data=raw_turtle,
format='turtle')
self.graph_hashes["cc_people"] = hashlib.sha1(
self.cc_people.serialize(format='n3')).hexdigest()
self.graph_hashes["current_year"] = hashlib.sha1(
self.current_year.serialize(format='n3')).hexdigest()
# Start retrieving and parsing latest RDF for creative works,
# research statements, and FAST subjects
self.creative_works = rdflib.Graph()
self.research_statements = rdflib.Graph()
self.fast_subjects = rdflib.Graph()
self.scholarship_repo = self.tutt_github.get_repo("cc-scholarship-graph")
for content in self.scholarship_repo.get_dir_contents("/data/"):
raw_turtle = self.__get_content__("scholarship_repo",
content)
if content.name.startswith("cc-research-statements"):
self.research_statements_git = content
self.research_statements.parse(
data=raw_turtle,
format='turtle')
if content.name.startswith("cc-fast-subjects"):
self.fast_subjects_git = content
self.fast_subjects.parse(
data=raw_turtle,
format='turtle')
if content.name.startswith("creative-works"):
self.creative_works_git = content
self.creative_works.parse(
data=raw_turtle,
format='turtle')
self.graph_hashes["creative_works"] = hashlib.sha1(
self.creative_works.serialize(format='n3')).hexdigest()
self.graph_hashes["research_statements"] = hashlib.sha1(
self.research_statements.serialize(format='n3')).hexdigest()
self.graph_hashes["fast_subjects"] = hashlib.sha1(
self.fast_subjects.serialize(format='n3')).hexdigest()
def __get_content__(self, repo_name, content):
raw_turtle = None
try:
raw_turtle = content.decoded_content
except GithubException:
repo = getattr(self, repo_name)
blob = repo.get_git_blob(content.sha)
raw_turtle = base64.b64decode(blob.content)
return raw_turtle
def __save_graph__(self, **kwargs):
git_repo = kwargs.get("git_repo")
file_path = kwargs.get("file_path")
graph_name = kwargs.get("graph_name")
branch = kwargs.get("branch")
message = kwargs.get("message", "Updating {}".format(graph_name))
graph = getattr(self, graph_name)
graph_sha1 = hashlib.sha1(graph.serialize(format='n3')).hexdigest()
if graph_sha1 == self.graph_hashes[graph_name]:
return
git_graph = getattr(self, "{}_git".format(graph_name))
if branch:
git_repo.update_file(file_path,
message,
graph.serialize(format='turtle'),
git_graph.sha,
branch=branch)
else:
git_repo.update_file(file_path,
message,
graph.serialize(format='turtle'),
git_graph.sha)
def update_all(self, person_label, action="Add", connection=None):
self.__save_graph__(
git_repo=self.tiger_repo,
file_path="/KnowledgeGraph/cc-people.ttl",
graph_name="cc_people",
message="{} {} to CC People".format(action, person_label))
self.__save_graph__(
git_repo=self.tiger_repo,
file_path=self.current_year_path,
graph_name="current_year",
message="{} person to Department for school year".format(action))
self.__save_graph__(
git_repo=self.scholarship_repo,
file_path="/data/cc-research-statements.ttl",
graph_name="research_statements",
message="{} Research Statement for {}".format(
action, person_label))
self.__save_graph__(
git_repo=self.scholarship_repo,
file_path ="/data/cc-fast-subjects.ttl",
graph_name="fast_subjects",
message="Fast subject added")
self.__save_graph__(
git_repo=self.scholarship_repo,
file_path ="/data/creative-works.ttl",
graph_name="creative_works",
message="Creative Works added")
if connection:
self.__reload_triplestore__(connection)
def __reload_triplestore__(self, config_mgr):
data_upload = []
for row in config_mgr.get("CONNECTIONS"):
if row.get("name").startswith("datastore"):
for directory_row in row.get("data_upload"):
data_upload.append(directory_row[1])
# Pull in the latest changes in each repository
for directory in data_upload:
os.chdir(directory)
result = subprocess.run(['git', 'pull', 'origin', 'master'])
click.echo(result.returncode, result.stdout)
config_mgr.conns.datastore.mgr.reset()
class ProfileUpdateThread(threading.Thread):
def __init__(self, **kwargs):
threading.Thread.__init__(self)
config = kwargs.get("config")
cc_github = Github(config.get("GITHUB_USER"),
config.get("GITHUB_PWD"))
self.tutt_github = cc_github.get_organization("Tutt-Library")
self.statement_msg = kwargs.get("msg")
self.person_iri = kwargs.get("person")
self.research_statements = rdflib.Graph()
self.fast_subjects = rdflib.Graph()
self.profile = kwargs.get("profile")
self.scholarship_repo = self.tutt_github.get_repo("cc-scholarship-graph")
for content in self.scholarship_repo.get_dir_contents("/data/"):
try:
raw_turtle = content.decoded_content
except GithubException:
blob = self.scholarship_repo.get_git_blob(content.sha)
raw_turtle = base64.b64decode(blob.content)
if content.name.startswith("cc-research-statements"):
self.research_statements_git = content
self.research_statements.parse(
data=raw_turtle,
format='turtle')
if content.name.startswith("cc-fast-subjects"):
self.fast_subjects_git = content
self.fast_subjects.parse(
data=raw_turtle,
format='turtle')
def __save_graph__(self, **kwargs):
file_path = kwargs.get("file_path")
branch = kwargs.get("branch")
graph_name = kwargs.get("graph_name")
graph = getattr(self, graph_name)
message = kwargs.get("message", "Updating {}".format(graph_name))
git_graph = getattr(self, "{}_git".format(graph_name))
if branch:
self.scholarship_repo.update_file(file_path,
message,
graph.serialize(format='turtle'),
git_graph.sha,
branch=branch)
else:
self.scholarship_repo.update_file(file_path,
message,
graph.serialize(format='turtle'),
git_graph.sha)
def __update_fast_subjects__(self):
existing_subjects, new_subjects = set(), set()
existing_stmt = self.research_statements.value(
predicate=SCHEMA.accountablePerson,
object=self.person_iri)
for row in self.research_statements.objects(
subject=existing_stmt,
predicate=SCHEMA.about):
existing_subjects.add(row)
for fast_heading in self.profile.graph.objects(
subject=existing_stmt,
predicate=SCHEMA.about):
new_subjects.add(fast_heading)
for subject in list(existing_subjects.difference(new_subjects)):
self.research_statements.remove((existing_stmt,
SCHEMA.about,
subject))
for subject in list(new_subjects.difference(existing_subjects)):
# Add new subject to research statements and fast subjects
self.research_statements.add((existing_stmt,
SCHEMA.about,
subject))
self.fast_subjects.add((subject,
rdflib.RDF.type,
BF.Topic))
subject_label = self.profile.graph.value(subject=subject,
predicate=rdflib.RDFS.label)
if subject_label is not None:
self.fast_subjects.add((subject,
rdflib.RDFS.label,
subject_label))
def __update_research_statements__(self):
existing_stmt = self.research_statements.value(
predicate=SCHEMA.accountablePerson,
object=self.person_iri)
current_description = self.research_statements.value(
subject=existing_stmt,
predicate=SCHEMA.description)
new_description = self.profile.graph.value(
subject=existing_stmt,
predicate=SCHEMA.description)
if new_description is not None \
and str(current_description) != str(new_description):
self.research_statements.remove((existing_stmt,
SCHEMA.description,
current_description))
self.research_statements.replace('"','\"')
self.research_statements.add((existing_stmt,
SCHEMA.description,
new_description))
def run(self):
# Function iterates and commits any changes to
self.__update_fast_subjects__()
self.__update_research_statements__()
self.__save_graph__(
file_path="/data/cc-research-statements.ttl",
graph_name="research_statements",
message=self.statement_msg)
self.__save_graph__(
file_path ="/data/cc-fast-subjects.ttl",
graph_name="fast_subjects",
message="Fast subject added")
class EmailProfile(object):
"""Simple Email Profile class that creates a local RDF graph for new
profile or editing profile that is send via email to the Administrators
for review."""
def __init__(self, config, person_iri):
self.config = config
self.triplestore_url = self.config.get("TRIPLESTORE_URL")
self.graph = rdflib.Graph()
self.graph.namespace_manager.bind("bf", BF)
self.graph.namespace_manager.bind("cite", CITE)
self.graph.namespace_manager.bind("schema", SCHEMA)
self.graph.namespace_manager.bind("prov", PROV)
self.email = config.get("EMAIL")
self.recipients = config.get("ADMINS")
self.person_iri = person_iri
def __send_email__(self, subject, body):
"""Sends email to administrators with attached profile graph"""
message = MIMEMultipart()
message["From"] = self.email.get("user")
message["To"] = ",".join(["<{0}>".format(r) for r in self.recipients])
message["Subject"] = subject
email_server = smtplib.SMTP(
self.email.get("host"),
self.email.get("port"))
email_server.ehlo()
if self.email.get("tls"):
email_server.starttls()
body = MIMEText(body, _charset="UTF-8")
message.attach(body)
graph_turtle = io.StringIO(
self.graph.serialize(format='turtle').decode())
attachment = MIMEText(graph_turtle.read())
attachment.add_header('Content-Disposition',
'attachment',
filename='profile.ttl')
message.attach(attachment)
email_server.login(
self.email.get("user"),
self.email.get("password"))
recipients = list(set(self.recipients)) # Quick dedup
email_server.sendmail(self.email.get("user"),
recipients,
message.as_string())
email_server.close()
def __add_article__(self, work_iri, work_form):
"""Article specific data added to creative work
Args:
work_iri(rdflib.URIRef): Creative Work IRI for Article
work_form(Flask.request.form): Dict of form values
"""
self.graph.add((work_iri,
rdflib.RDF.type,
SCHEMA.ScholarlyArticle))
self.graph.add((work_iri,
SCHEMA.name,
rdflib.Literal(work_form.article_title.data)))
if work_form.page_start.data !=None:
self.graph.add((work_iri,
SCHEMA.pageStart,
rdflib.Literal(work_form.page_start.data)))
if work_form.page_end.data !=None:
self.graph.add((work_iri,
SCHEMA.pageEnd,
rdflib.Literal(work_form.page_end.data)))
journal = rdflib.BNode()
self.graph.add((journal, rdflib.RDF.type, SCHEMA.Periodical))
self.graph.add((journal,
SCHEMA.name,
rdflib.Literal(work_form.journal_title.data)))
issue, volume = None, None
if work_form.volume_number.data != None:
volume = rdflib.BNode()
self.graph.add((volume, rdflib.RDF.type, SCHEMA.PublicationVolume))
self.graph.add((volume,
SCHEMA.volumeNumber,
rdflib.Literal(work_form.volume_number.data)))
self.graph.add((volume, SCHEMA.partOf, journal))
if work_form.issue_number.data != None:
issue = rdflib.BNode()
self.graph.add((issue, rdflib.RDF.type, SCHEMA.PublicationIssue))
self.graph.add((issue,
SCHEMA.issueNumber,
rdflib.Literal(work_form.issue_number.data)))
if volume is not None:
self.graph.add((issue,
SCHEMA.partOf,
volume))
else:
self.graph.add((issue,
SCHEMA.partOf,
journal))
self.graph.add((work_iri, SCHEMA.partOf, issue))
elif volume is not None:
self.graph.add((work_iri, SCHEMA.partOf, volume))
else:
# Add work_iri to Journal as last resort
self.graph.add((work_iri, SCHEMA.partOf, journal))
if work_form.month.data != None:
self.graph.add((work_iri,
CITE.month,
rdflib.Literal(work_form.month.data)))
def __add_book__(self, work, work_form):
self.graph.add((work, rdflib.RDF.type, SCHEMA.Book))
self.graph.add((work,
SCHEMA.title,
rdflib.Literal(work_form.book_title.data)))
if work_form.isbn.data is not None:
self.graph.add((work,
SCHEMA.isbn,
rdflib.Literal(work_form.isbn.data)))
if work_form.editionStatement.data is not None:
self.graph.add(
(work,
SCHEMA.editionStatement,
rdflib.Literal(work_form.editionStatement.data)))
if work_form.editor.data is not None:
self.graph.add((work,
SCHEMA.editor,
rdflib.Literal(work_form.editor.data)))
if work_form.provisionActivityStatement.data is not None:
self.graph.add(
(work,
SCHEMA.provisionActivityStatement,
rdflib.Literal(work_form.provisionActivityStatement.data)))
if work_form.notes.data is not None:
self.graph.add(
(work,
SCHEMA.description,
rdflib.Literal(work_form.notes.data)))
def __populate_work__(self, work_form, generated_by=None):
"""Populates graph with new work
Args:
form(Flask.request.form): Dict of form values
"""
if len(work_form.iri.data) > 0:
work_iri = rdflib.URIRef(work_form.iri.data)
else: # Mint IRI for new work
if "doi" in work_form and len(work_form.doi.data) > 0:
work_iri = rdflib.URIRef(work_form.doi.data)
else:
work_iri = rdflib.URIRef(
"http://catalog.coloradocollege.edu/{}".format(
uuid.uuid1()))
self.graph.add((work_iri,
SCHEMA.dataPublished,
rdflib.Literal(work_form.datePublished.data)))
self.graph.add((work_iri,
CITE.authorString,
rdflib.Literal(work_form.author_string.data)))
if generated_by:
add_qualified_generation(self.graph,
work_iri,
generated_by)
citation_type = work_form.citation_type.data
self.graph.add((work_iri,
CITE.citationType,
rdflib.Literal(citation_type)))
if "author" in work_form and len(work_form.author.data) > 0:
self.person_iri = rdflib.URIRef(work_form.author.data)
self.graph.add((work_iri,
SCHEMA.author,
self.person_iri))
elif generated_by:
self.person_iri = generated_by
self.graph.add((work_iri,
SCHEMA.author,
generated_by))
if "url" in work_form and len(work_form.url.data) > 0:
self.graph.add((work_iri,
SCHEMA.url,
rdflib.URIRef(work_form.url.data)))
if work_form.abstract.data != None:
self.graph.add((work_iri,
SCHEMA.about,
rdflib.Literal(work_form.abstract.data)))
if citation_type.startswith("article"):
self.__add_article__(work_iri, work_form)
elif citation_type.startswith("book chapter"):
self.graph.add((work_iri, rdflib.RDF.type, SCHEMA.Chapter))
book_bnode = rdflib.BNode()
self.graph.add((work_iri, SCHEMA.partOf, book_bnode))
self.__add_book__(book_bnode, work_form)
elif citation_type.startswith("book"):
self.__add_book__(work_iri, work_form)
else:
abort(500)
if work_form.abstract.data != None:
self.graph.add((work_iri,
SCHEMA.about,
rdflib.Literal(work_form.abstract.data)))
return work_iri
def add(self, work_form, generated_by=None):
work_iri = self.__populate_work__(work_form, generated_by)
email_body = "Properties and Values for Creative Work {}".format(work_iri)
for row in work_form._fields:
if row.startswith("csrf_token"):
continue
field = getattr(work_form, row)
email_body += "\n{}:\t{}".format(row, field.data)
self.__send_email__("Added New Work", email_body)
return work_iri
def new(self, message):
"""Adds a new profile"""
self.__send_email__("Add new profile", message)
def update(self, message):
"""Edits existing profile"""
global BACKGROUND_THREAD
BACKGROUND_THREAD = ProfileUpdateThread(
config=self.config,
msg=message,
person=self.person_iri,
profile=self)
BACKGROUND_THREAD.start()
self.__send_email__("Updating Profile", message)
def __email_work__(**kwargs):
"""Function takes a work graph and configuration and emails the graph in
turtle format to the administrators for review before adding to production.
Keyword args:
work_graph(rdflib.Graph): RDF Graph of Citation
config: Configuration includes logins and administor
"""
work_graph = kwargs.get("graph")
config = kwargs.get("config")
sender = config.get('EMAIL')['user']
recipients = config.get("ADMINS")
subject = kwargs.get('subject')
text = kwargs.get('text')
carbon_copy = kwargs.get("carbon_copy", [])
message = MIMEMultipart()
message["From"] = sender
message["Subject"] = subject
message["To"] = ",".join(["<{0}>".format(r) for r in recipients])
if len(carbon_copy) > 0:
message["Cc"] = ','.join(carbon_copy)
recipients.extend(carbon_copy)
body = MIMEText(text, _charset="UTF-8")
message.attach(body)
if work_graph:
work_turtle = io.StringIO(
work_graph.serialize(format='turtle').decode())
attachment = MIMEText(work_turtle.read())
attachment.add_header('Content-Disposition',
'attachment',
filename='work.ttl')
message.attach(attachment)
#try:
server = smtplib.SMTP(config.get('EMAIL')['host'],
config.get('EMAIL')['port'])
server.ehlo()
if config.get('EMAIL')['tls']:
server.starttls()
server.ehlo()
server.login(sender,
config.get("EMAIL")["password"])
recipients = list(set(recipients)) # Quick dedup
server.sendmail(sender, recipients, message.as_string())
server.close()
def generate_citation_html(citation):
soup = BeautifulSoup("", 'lxml')
div = soup.new_tag("div", **{"class": "row"})
col_1 = soup.new_tag("div", **{"class": "col-1"})
citation_type = citation.get("ENTRYTYPE")
if citation_type.startswith("article"):
col_1.append(soup.new_tag("i", **{"class": "fas fa-file-alt"}))
elif citation_type.endswith("book"):
col_1.append(soup.new_tag("i", **{"class": "fas fa-book"}))
under_review = soup.new_tag("em")
under_review.string = "In Review"
col_1.append(under_review)
div.append(col_1)
col_2 = soup.new_tag("div", **{"class": "col-7"})
if "article_title" in citation:
name = citation.get("article_title")
elif "title" in citation:
name = citation.get("title")
if "url" in citation:
work_link = soup.new_tag("a", href=citation.get("url"))
work_link.string = name
col_2.append(work_link)
else:
span = soup.new_tag("span")
span.string = name
col_2.append(span)
if "journal_title" in citation:
em = soup.new_tag("em")
em.string = citation.get("journal_title")
col_2.append(em)
if "year" in citation:
span = soup.new_tag("span")
span.string = "({0})".format(citation.get("year"))
col_2.append(span)
vol_number = citation.get("volume_number")
if vol_number and len(vol_number) > 0:
span = soup.new_tag("span")
span.string = "v. {}".format(vol_number)
col_2.append(span)
issue_number = citation.get("issue_number")
if issue_number and len(issue_number ) > 0:
span = soup.new_tag("span")
span.string = " no. {}".format(issue_number)
col_2.append(span)
page_start = citation.get("page_start")
if page_start and len(page_start) > 0:
span = soup.new_tag("span")
span.string = "p. {}".format(page_start)
col_2.append(span)
page_end = citation.get("page_end")
if page_end and len(page_end) > 0:
span = soup.new_tag("span")
if "page_start" in citation:
page_string = "- {}."
else:
page_string = "{}."
span.string = page_string.format(page_end)
col_2.append(span)
div.append(col_2)
col_3 = soup.new_tag("div", **{"class": "col-4"})
iri = citation.get("iri")
if iri:
edit_click = "editCitation('{}');".format(iri)
delete_click = "deleteCitation('{}');".format(iri)
edit_a = soup.new_tag("a", **{"class": "btn btn-warning disabled",
"onclick": edit_click,
"type=": "input"})
edit_a.append(soup.new_tag("i", **{"class": "fas fa-edit"}))
col_3.append(edit_a)
delete_a = soup.new_tag("a", **{"class": "btn btn-danger",
"onclick": delete_click,
"type=": "input"})
delete_a.append(soup.new_tag("i", **{"class": "fas fa-trash-alt"}))
col_3.append(delete_a)
div.append(col_3)
return div.prettify()
def __reconcile_article__(work_graph, connection):
SCHEMA = rdflib.Namespace("http://schema.org/")
for row in work_graph.query(
"""SELECT ?entity ?label WHERE { ?entity rdf:type schema:Periodical ;
schema:name ?label . } """):
entity, label = row
break
volume, issue = None, None
volume_or_issue = work_graph.value(predicate=SCHEMA.partOf,
object=entity)
schema_class = work_graph.value(subject=volume_or_issue,
predicate=rdflib.RDF.type)
if schema_class is SCHEMA.volumeNumber:
volume = volume_or_issue
issue = work_graph.value(predicate=SCHEMA.partOf,
object=volume)
elif schema_class is SCHEMA.issueNumber:
issue = volume_or_issue
result = connection.datastore.query("""SELECT ?periodical
WHERE {{
?periodical schema:name ?name .
FILTER(CONTAINS(?name, "{0}"))
}}""".format(label))
if result and len(result) > 0:
periodical = result[0].get("periodical").get("value")
if periodical != str(entity):
new_work = rdflib.URIRef(periodical)
bibcat.replace_iri(work_graph, entity, new_work)
entity = new_work
if volume is not None:
vol_num = work_graph.value(subject=volume,
predicate=SCHEMA.volumeNumber)
result = connection.datastore.query("""SELECT ?volume
WHERE {{
?volume schema:partOf ?work ;
schema:volumeNumber ?volumeNumber .
BIND(<{0}> as ?work)
BIND("{1}" as ?volumeNumber)
}}""".format(entity, vol_num))
if result and len(result) > 0:
new_volume = rdflib.URIRef(result[0].get("volume").get("value"))
bibcat.replace_iri(work_graph, volume, new_volume)
if issue is not None:
issue_number = work_graph.value(subject=issue,
predicate=SCHEMA.issueNumber)
result = connection.datastore.query("""SELECT ?issue
WHERE {{
?issue rdf:type schema:issueNumber ;
schema:issueNumber ?issue_number .
OPTIONAL {{ ?issue schema:partOf ?volume . }}
OPTIONAL {{ ?issue schema:partOf ?periodical . }}
BIND(<{0}> as ?volume)
BIND(<{1}> as ?periodical)
BIND("{2}" as ?issue_number)
}}""".format(volume, periodical, issue_number) )
if result and len(result) > 0:
new_issue = rdflib.URIRef(result[0].get("issue").get("value"))
bibcat.replace_iri(work_graph, issue, new_issue)
def add_creative_work(**kwargs):
"""Calls utilities to populate and save to datastore"""
config = kwargs.get("config")
profile = EmailProfile(config)
current_user = kwargs.get("current_user")
config_manager = kwargs.get('config_manager')
connection = config_manager.conns
generated_by = kwargs.get("generated_by")
work_form = kwargs.get("work_form")
BF = config_manager.nsm.bf
SCHEMA = config_manager.nsm.schema
sparql = EMAIL_LOOKUP.format(
current_user.data.get('mail').lower())
email_results = connection.datastore.query(sparql)
if len(email_results) > 0:
generated_by = rdflib.URIRef(
email_results[0].get("person").get('value'))
work_iri = rdflib.URIRef(profile.add(work_form, generated_by))
#profile.update("Added or Updated Creative Work")
return {"message": "New work has been submitted for review",
"status": True,
"iri": work_iri}
def add_profile(**kwargs):
"""Adds a profile stub to scholarship graph"""
config = kwargs.get("config")
current_user = kwargs.get("current_user")
config_manager = kwargs.get('config_manager')
profile = EmailProfile(config)
connection = config_manager.conns
BF = config_manager.nsm.bf
SCHEMA = config_manager.nsm.schema
results = connection.datastore.query(
EMAIL_LOOKUP.format(
current_user.data.get('mail').lower()))
if len(results) > 0:
generated_by = rdflib.URIRef(results[0].get("person").get('value'))
else:
generated_by = None
form = kwargs.get("form")
if form.get("orcid"):
person_uri = form.get("orcid")
else:
person_uri = "http://catalog.coloradocollege.edu/{}".format(
uuid.uuid1())
person_iri = rdflib.URIRef(person_uri)
if generated_by is None:
generated_by = person_iri
profile.graph.add(
(person_iri,
rdflib.RDF.type,
BF.Person.rdflib))
given_name = form.get("given_name")
if given_name is not None:
profile.graph.add(
(person_iri,
SCHEMA.givenName.rdflib,
rdflib.Literal(given_name, lang="en")))
family_name = form.get("family_name")
if family_name is not None:
profile.graph.add((person_iri,
SCHEMA.familyName.rdflib,
rdflib.Literal(family_name, lang="en")))
label = "{} {}".format(given_name, family_name)
profile.graph.add((person_iri,
rdflib.RDFS.label,
rdflib.Literal(label, lang="en")))
email = form.get("email")
profile.graph.add((person_iri,
SCHEMA.email.rdflib,
rdflib.Literal(email)))
add_qualified_generation(profile.graph,
person_iri,
generated_by)
dept_year = kwargs.get("year-iri")
if dept_year is not None:
dept_year_iri = rdflib.URIRef(dept_year_iri)
title = kwargs.get("title-iri")
profile.graph.add(
(dept_year_iri,
rdflib.URIRef(title),
person_iri))
statement = kwargs.get("statement", form.get("research_stmt"))
if statement is not None:
statement_iri = rdflib.URIRef("http://catalog.coloradocollege.edu/{}".format(
uuid.uuid1()))
profile.graph.add(
(statement_iri,
rdflib.RDF.type,
SCHEMA.DigitalDocument.rdflib))
profile.graph.add(
(statement_iri,
rdflib.RDFS.label,
rdflib.Literal("Research Statement for {}".format(label),
lang="en")))
profile.graph.add(
(statement_iri,
SCHEMA.accountablePerson.rdflib,
person_iri))
profile.graph.add(
(statement_iri,
SCHEMA.description.rdflib,
rdflib.Literal(statement, lang="en")))
add_qualified_generation(profile.graph,
statement_iri,
generated_by)
form_subjects = form.getlist("subjects")
new_subjects = {}
for row in form_subjects:
fast_id, fast_label = row.split("==")
if fast_id.startswith("http"):
fast_uri = fast_id
else:
fast_uri = "http://id.worldcat.org/fast/{}".format(fast_id[3:])
new_subjects[fast_uri] = fast_label
for fast_subject, fast_label in new_subjects.items():
iri_subject = rdflib.URIRef(fast_subject)
profile.graph.add(
(statement_iri,
SCHEMA.about.rdflib,
iri_subject))
existing_label = profile.fast_subjects.value(
subject=iri_subject,
predicate=rdflib.RDFS.label)
if existing_label is None:
profile.graph.add(
(iri_subject,
rdflib.RDF.type,
BF.Topic.rdflib))
profile.graph.add(
(iri_subject,
rdflib.RDFS.label,
rdflib.Literal(fast_label, lang="en")))
message = "New {} as {} to Colorado College's Scholarship Graph".format(
label,
person_iri)
profile.new(message)
def delete_creative_work(**kwargs):
config = kwargs.get("config")
git_profile = GitProfile(config)
current_user = kwargs.get("current_user")
config_manager = kwargs.get('config_manager')
author = kwargs.get("author")
connection = config_manager.conns
iri = kwargs.get("iri")
__email_work__(
config=config,
carbon_copy=[current_user.data.get('mail'),],
subject="Delete Request",
text="Delete citation {} for {}\nrequested by {} on {}".format(
iri,
author,
current_user.data.get('mail'),
datetime.datetime.utcnow().isoformat())
)
return {"message": "Deletion of {} for {} under review".format(
iri, author),
"status": True}
def edit_creative_work(**kwargs):
config = kwargs.get("config")
git_profile = GitProfile(config)
current_user_email = kwargs.get("current_user_email")
config_manager = kwargs.get('config_manager')
connection = config_manager.conns
revised_by = kwargs.get("revised_by")
raw_citation = kwargs.get("citation")
work_type = kwargs.get("work_type", "article")
if revised_by is None and current_user_email:
sparql = EMAIL_LOOKUP.format(
current_user_email.lower())
email_results = connection.datastore.query(sparql)
if len(email_results) > 0:
revised_by = rdflib.URIRef(
email_results[0].get("person").get('value'))
temp_work = rdflib.Graph()
temp_work.namespace_manager.bind("cite",
rdflib.Namespace("https://www.coloradocollege.edu/library/ns/citation/"))
for prefix, namespace in git_profile.cc_people.namespaces():
temp_work.namespace_manager.bind(prefix, namespace)
if work_type.startswith("article"):
citation = utilities.Article_Citation(raw_citation,
temp_work,
git_profile.cc_people,
False)
citation.populate()
citation.populate_article()
citation.add_article()
elif work_type.startswith("book"):
citation = utilities.Book_Citation(raw_citation,
temp_work,
git_profile.cc_people,
False)
citation.populate()
citation.populate_book()
citation.add_book()
if revised_by:
add_qualified_revision(temp_work,
rdflib.URIRef(citation.iri),
revised_by)
email_subject = 'Edited Creative Work {}'.format(citation.iri)
__email_work__(graph=temp_work,
config=config,
carbon_copy=[current_user_email,],
subject=email_subject,
text="Edited {} revised by {} on {}, see attached RDF turtle file".format(
citation.citation_type,
revised_by,
datetime.datetime.utcnow().isoformat())
)
return {"message": "Changes to work has been submitted for review",
"status": True}
def update_profile(**kwargs):
"""Updates existing triples based on form values"""
config_manager = kwargs.get('config_manager')
connection = config_manager.conns
BF = config_manager.nsm.bf
SCHEMA = config_manager.nsm.schema
form = kwargs.get('form')
current_user = kwargs.get("current_user")
output = ''
person_iri = rdflib.URIRef(form.get("iri"))
profile = EmailProfile(config_manager, person_iri)
msg = ""
results = connection.datastore.query(
EMAIL_LOOKUP.format(
current_user.data.get('mail').lower()))
if len(results) > 0:
generated_by = rdflib.URIRef(results[0].get("person").get('value'))
else:
generated_by = person_iri
msg = "{} made the following changes to {}'s academic profile:\n".format(
generated_by,
form['label'])
statement_iri_results = connection.datastore.query(
RESEARCH_STMT_IRI.format(
person_iri))
if len(statement_iri_results) > 0:
statement_iri = rdflib.URIRef(
statement_iri_results[0].get("iri").get("value"))
add_qualified_revision(profile.graph,
statement_iri,
generated_by)
else:
statement_iri = rdflib.URIRef(
"http://catalog.coloradocollege.edu/{}".format(uuid.uuid1()))
profile.graph.add(
(statement_iri,
rdflib.RDF.type,
SCHEMA.DigitalDocument.rdflib))
profile.graph.add(
(statement_iri,
SCHEMA.accountablePerson.rdflib,
person_iri))
profile.graph.add(
(statement_iri,
rdflib.RDFS.label,
rdflib.Literal("Research Statement for {} {}".format(
form.get('given_name'),
form.get('family_name')), lang="en")))
add_qualified_generation(
profile.graph,
statement_iri,
generated_by)
citations = form.getlist("citations")
for uri in citations:
profile.graph.add(
(rdflib.URIRef(uri),
SCHEMA.author.rdflib,
person_iri))
statement = form.get("research_stmt")
if len(statement) > 0:
profile.graph.add(
(statement_iri,
SCHEMA.description.rdflib,
rdflib.Literal(statement, lang="en")))
form_subjects = form.getlist("subjects")
new_subjects = {}
for row in form_subjects:
fast_id, fast_label = row.split("==")
if fast_id.startswith("http"):
fast_uri = fast_id
else:
fast_uri = "http://id.worldcat.org/fast/{}".format(fast_id[3:])
iri_subject = rdflib.URIRef(fast_uri)
profile.graph.add(
(statement_iri,
SCHEMA.about.rdflib,
iri_subject))
profile.graph.add(
(iri_subject,
rdflib.RDF.type,
BF.Topic.rdflib))
profile.graph.add(
(iri_subject,
rdflib.RDFS.label,
rdflib.Literal(fast_label, lang="en")))
profile.update(msg)
return {"message": msg,
"status": True}
|
[
"subprocess.run",
"threading.Thread.__init__",
"rdflib.Graph",
"rdflib.Literal",
"utilities.Book_Citation",
"email.mime.text.MIMEText",
"click.echo",
"base64.b64decode",
"rdflib.URIRef",
"rdflib.Namespace",
"email.mime.multipart.MIMEMultipart",
"datetime.datetime.utcnow",
"rdflib.BNode",
"uuid.uuid1",
"utilities.Article_Citation",
"bs4.BeautifulSoup",
"bibcat.replace_iri",
"os.chdir"
] |
[((622, 680), 'rdflib.Namespace', 'rdflib.Namespace', (['"""http://id.loc.gov/ontologies/bibframe/"""'], {}), "('http://id.loc.gov/ontologies/bibframe/')\n", (638, 680), False, 'import rdflib\n'), ((688, 760), 'rdflib.Namespace', 'rdflib.Namespace', (['"""https://www.coloradocollege.edu/library/ns/citation/"""'], {}), "('https://www.coloradocollege.edu/library/ns/citation/')\n", (704, 760), False, 'import rdflib\n'), ((768, 814), 'rdflib.Namespace', 'rdflib.Namespace', (['"""http://www.w3.org/ns/prov#"""'], {}), "('http://www.w3.org/ns/prov#')\n", (784, 814), False, 'import rdflib\n'), ((824, 862), 'rdflib.Namespace', 'rdflib.Namespace', (['"""http://schema.org/"""'], {}), "('http://schema.org/')\n", (840, 862), False, 'import rdflib\n'), ((22889, 22904), 'email.mime.multipart.MIMEMultipart', 'MIMEMultipart', ([], {}), '()\n', (22902, 22904), False, 'from email.mime.multipart import MIMEMultipart\n'), ((23162, 23194), 'email.mime.text.MIMEText', 'MIMEText', (['text'], {'_charset': '"""UTF-8"""'}), "(text, _charset='UTF-8')\n", (23170, 23194), False, 'from email.mime.text import MIMEText\n'), ((24018, 24043), 'bs4.BeautifulSoup', 'BeautifulSoup', (['""""""', '"""lxml"""'], {}), "('', 'lxml')\n", (24031, 24043), False, 'from bs4 import BeautifulSoup\n'), ((27112, 27150), 'rdflib.Namespace', 'rdflib.Namespace', (['"""http://schema.org/"""'], {}), "('http://schema.org/')\n", (27128, 27150), False, 'import rdflib\n'), ((31407, 31432), 'rdflib.URIRef', 'rdflib.URIRef', (['person_uri'], {}), '(person_uri)\n', (31420, 31432), False, 'import rdflib\n'), ((36241, 36255), 'rdflib.Graph', 'rdflib.Graph', ([], {}), '()\n', (36253, 36255), False, 'import rdflib\n'), ((1306, 1332), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1330, 1332), False, 'import datetime\n'), ((1653, 1667), 'rdflib.Graph', 'rdflib.Graph', ([], {}), '()\n', (1665, 1667), False, 'import rdflib\n'), ((1693, 1707), 'rdflib.Graph', 'rdflib.Graph', ([], {}), '()\n', (1705, 1707), False, 'import rdflib\n'), ((2772, 2786), 'rdflib.Graph', 'rdflib.Graph', ([], {}), '()\n', (2784, 2786), False, 'import rdflib\n'), ((2822, 2836), 'rdflib.Graph', 'rdflib.Graph', ([], {}), '()\n', (2834, 2836), False, 'import rdflib\n'), ((2866, 2880), 'rdflib.Graph', 'rdflib.Graph', ([], {}), '()\n', (2878, 2880), False, 'import rdflib\n'), ((7507, 7538), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (7532, 7538), False, 'import threading\n'), ((7883, 7897), 'rdflib.Graph', 'rdflib.Graph', ([], {}), '()\n', (7895, 7897), False, 'import rdflib\n'), ((7927, 7941), 'rdflib.Graph', 'rdflib.Graph', ([], {}), '()\n', (7939, 7941), False, 'import rdflib\n'), ((13084, 13098), 'rdflib.Graph', 'rdflib.Graph', ([], {}), '()\n', (13096, 13098), False, 'import rdflib\n'), ((13585, 13600), 'email.mime.multipart.MIMEMultipart', 'MIMEMultipart', ([], {}), '()\n', (13598, 13600), False, 'from email.mime.multipart import MIMEMultipart\n'), ((13988, 14020), 'email.mime.text.MIMEText', 'MIMEText', (['body'], {'_charset': '"""UTF-8"""'}), "(body, _charset='UTF-8')\n", (13996, 14020), False, 'from email.mime.text import MIMEText\n'), ((15571, 15585), 'rdflib.BNode', 'rdflib.BNode', ([], {}), '()\n', (15583, 15585), False, 'import rdflib\n'), ((32483, 32511), 'rdflib.URIRef', 'rdflib.URIRef', (['dept_year_iri'], {}), '(dept_year_iri)\n', (32496, 32511), False, 'import rdflib\n'), ((34002, 34029), 'rdflib.URIRef', 'rdflib.URIRef', (['fast_subject'], {}), '(fast_subject)\n', (34015, 34029), False, 'import rdflib\n'), ((36309, 36381), 'rdflib.Namespace', 'rdflib.Namespace', (['"""https://www.coloradocollege.edu/library/ns/citation/"""'], {}), "('https://www.coloradocollege.edu/library/ns/citation/')\n", (36325, 36381), False, 'import rdflib\n'), ((36571, 36657), 'utilities.Article_Citation', 'utilities.Article_Citation', (['raw_citation', 'temp_work', 'git_profile.cc_people', '(False)'], {}), '(raw_citation, temp_work, git_profile.cc_people, \n False)\n', (36597, 36657), False, 'import utilities\n'), ((40366, 40389), 'rdflib.URIRef', 'rdflib.URIRef', (['fast_uri'], {}), '(fast_uri)\n', (40379, 40389), False, 'import rdflib\n'), ((7221, 7240), 'os.chdir', 'os.chdir', (['directory'], {}), '(directory)\n', (7229, 7240), False, 'import os\n'), ((7262, 7313), 'subprocess.run', 'subprocess.run', (["['git', 'pull', 'origin', 'master']"], {}), "(['git', 'pull', 'origin', 'master'])\n", (7276, 7313), False, 'import subprocess\n'), ((7326, 7370), 'click.echo', 'click.echo', (['result.returncode', 'result.stdout'], {}), '(result.returncode, result.stdout)\n', (7336, 7370), False, 'import click\n'), ((15904, 15918), 'rdflib.BNode', 'rdflib.BNode', ([], {}), '()\n', (15916, 15918), False, 'import rdflib\n'), ((16288, 16302), 'rdflib.BNode', 'rdflib.BNode', ([], {}), '()\n', (16300, 16302), False, 'import rdflib\n'), ((18808, 18841), 'rdflib.URIRef', 'rdflib.URIRef', (['work_form.iri.data'], {}), '(work_form.iri.data)\n', (18821, 18841), False, 'import rdflib\n'), ((19893, 19929), 'rdflib.URIRef', 'rdflib.URIRef', (['work_form.author.data'], {}), '(work_form.author.data)\n', (19906, 19929), False, 'import rdflib\n'), ((28184, 28209), 'rdflib.URIRef', 'rdflib.URIRef', (['periodical'], {}), '(periodical)\n', (28197, 28209), False, 'import rdflib\n'), ((28223, 28271), 'bibcat.replace_iri', 'bibcat.replace_iri', (['work_graph', 'entity', 'new_work'], {}), '(work_graph, entity, new_work)\n', (28241, 28271), False, 'import bibcat\n'), ((28797, 28847), 'bibcat.replace_iri', 'bibcat.replace_iri', (['work_graph', 'volume', 'new_volume'], {}), '(work_graph, volume, new_volume)\n', (28815, 28847), False, 'import bibcat\n'), ((29505, 29553), 'bibcat.replace_iri', 'bibcat.replace_iri', (['work_graph', 'issue', 'new_issue'], {}), '(work_graph, issue, new_issue)\n', (29523, 29553), False, 'import bibcat\n'), ((31376, 31388), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (31386, 31388), False, 'import uuid\n'), ((32141, 32173), 'rdflib.Literal', 'rdflib.Literal', (['label'], {'lang': '"""en"""'}), "(label, lang='en')\n", (32155, 32173), False, 'import rdflib\n'), ((32278, 32299), 'rdflib.Literal', 'rdflib.Literal', (['email'], {}), '(email)\n', (32292, 32299), False, 'import rdflib\n'), ((36839, 36917), 'utilities.Book_Citation', 'utilities.Book_Citation', (['raw_citation', 'temp_work', 'git_profile.cc_people', '(False)'], {}), '(raw_citation, temp_work, git_profile.cc_people, False)\n', (36862, 36917), False, 'import utilities\n'), ((37112, 37139), 'rdflib.URIRef', 'rdflib.URIRef', (['citation.iri'], {}), '(citation.iri)\n', (37125, 37139), False, 'import rdflib\n'), ((4533, 4563), 'base64.b64decode', 'base64.b64decode', (['blob.content'], {}), '(blob.content)\n', (4549, 4563), False, 'import base64\n'), ((15110, 15154), 'rdflib.Literal', 'rdflib.Literal', (['work_form.article_title.data'], {}), '(work_form.article_title.data)\n', (15124, 15154), False, 'import rdflib\n'), ((15752, 15796), 'rdflib.Literal', 'rdflib.Literal', (['work_form.journal_title.data'], {}), '(work_form.journal_title.data)\n', (15766, 15796), False, 'import rdflib\n'), ((17505, 17546), 'rdflib.Literal', 'rdflib.Literal', (['work_form.book_title.data'], {}), '(work_form.book_title.data)\n', (17519, 17546), False, 'import rdflib\n'), ((18974, 19007), 'rdflib.URIRef', 'rdflib.URIRef', (['work_form.doi.data'], {}), '(work_form.doi.data)\n', (18987, 19007), False, 'import rdflib\n'), ((19281, 19325), 'rdflib.Literal', 'rdflib.Literal', (['work_form.datePublished.data'], {}), '(work_form.datePublished.data)\n', (19295, 19325), False, 'import rdflib\n'), ((19429, 19473), 'rdflib.Literal', 'rdflib.Literal', (['work_form.author_string.data'], {}), '(work_form.author_string.data)\n', (19443, 19473), False, 'import rdflib\n'), ((19762, 19791), 'rdflib.Literal', 'rdflib.Literal', (['citation_type'], {}), '(citation_type)\n', (19776, 19791), False, 'import rdflib\n'), ((20808, 20822), 'rdflib.BNode', 'rdflib.BNode', ([], {}), '()\n', (20820, 20822), False, 'import rdflib\n'), ((31775, 31812), 'rdflib.Literal', 'rdflib.Literal', (['given_name'], {'lang': '"""en"""'}), "(given_name, lang='en')\n", (31789, 31812), False, 'import rdflib\n'), ((31978, 32016), 'rdflib.Literal', 'rdflib.Literal', (['family_name'], {'lang': '"""en"""'}), "(family_name, lang='en')\n", (31992, 32016), False, 'import rdflib\n'), ((32621, 32641), 'rdflib.URIRef', 'rdflib.URIRef', (['title'], {}), '(title)\n', (32634, 32641), False, 'import rdflib\n'), ((32864, 32876), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (32874, 32876), False, 'import uuid\n'), ((33431, 33467), 'rdflib.Literal', 'rdflib.Literal', (['statement'], {'lang': '"""en"""'}), "(statement, lang='en')\n", (33445, 33467), False, 'import rdflib\n'), ((38989, 39001), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (38999, 39001), False, 'import uuid\n'), ((39740, 39758), 'rdflib.URIRef', 'rdflib.URIRef', (['uri'], {}), '(uri)\n', (39753, 39758), False, 'import rdflib\n'), ((40001, 40037), 'rdflib.Literal', 'rdflib.Literal', (['statement'], {'lang': '"""en"""'}), "(statement, lang='en')\n", (40015, 40037), False, 'import rdflib\n'), ((40720, 40757), 'rdflib.Literal', 'rdflib.Literal', (['fast_label'], {'lang': '"""en"""'}), "(fast_label, lang='en')\n", (40734, 40757), False, 'import rdflib\n'), ((8348, 8378), 'base64.b64decode', 'base64.b64decode', (['blob.content'], {}), '(blob.content)\n', (8364, 8378), False, 'import base64\n'), ((15314, 15355), 'rdflib.Literal', 'rdflib.Literal', (['work_form.page_start.data'], {}), '(work_form.page_start.data)\n', (15328, 15355), False, 'import rdflib\n'), ((15511, 15550), 'rdflib.Literal', 'rdflib.Literal', (['work_form.page_end.data'], {}), '(work_form.page_end.data)\n', (15525, 15550), False, 'import rdflib\n'), ((16112, 16156), 'rdflib.Literal', 'rdflib.Literal', (['work_form.volume_number.data'], {}), '(work_form.volume_number.data)\n', (16126, 16156), False, 'import rdflib\n'), ((16492, 16535), 'rdflib.Literal', 'rdflib.Literal', (['work_form.issue_number.data'], {}), '(work_form.issue_number.data)\n', (16506, 16535), False, 'import rdflib\n'), ((17281, 17317), 'rdflib.Literal', 'rdflib.Literal', (['work_form.month.data'], {}), '(work_form.month.data)\n', (17295, 17317), False, 'import rdflib\n'), ((17674, 17709), 'rdflib.Literal', 'rdflib.Literal', (['work_form.isbn.data'], {}), '(work_form.isbn.data)\n', (17688, 17709), False, 'import rdflib\n'), ((17878, 17925), 'rdflib.Literal', 'rdflib.Literal', (['work_form.editionStatement.data'], {}), '(work_form.editionStatement.data)\n', (17892, 17925), False, 'import rdflib\n'), ((18079, 18116), 'rdflib.Literal', 'rdflib.Literal', (['work_form.editor.data'], {}), '(work_form.editor.data)\n', (18093, 18116), False, 'import rdflib\n'), ((18305, 18362), 'rdflib.Literal', 'rdflib.Literal', (['work_form.provisionActivityStatement.data'], {}), '(work_form.provisionActivityStatement.data)\n', (18319, 18362), False, 'import rdflib\n'), ((18515, 18551), 'rdflib.Literal', 'rdflib.Literal', (['work_form.notes.data'], {}), '(work_form.notes.data)\n', (18529, 18551), False, 'import rdflib\n'), ((20348, 20381), 'rdflib.URIRef', 'rdflib.URIRef', (['work_form.url.data'], {}), '(work_form.url.data)\n', (20361, 20381), False, 'import rdflib\n'), ((20512, 20551), 'rdflib.Literal', 'rdflib.Literal', (['work_form.abstract.data'], {}), '(work_form.abstract.data)\n', (20526, 20551), False, 'import rdflib\n'), ((21231, 21270), 'rdflib.Literal', 'rdflib.Literal', (['work_form.abstract.data'], {}), '(work_form.abstract.data)\n', (21245, 21270), False, 'import rdflib\n'), ((34555, 34592), 'rdflib.Literal', 'rdflib.Literal', (['fast_label'], {'lang': '"""en"""'}), "(fast_label, lang='en')\n", (34569, 34592), False, 'import rdflib\n'), ((19160, 19172), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (19170, 19172), False, 'import uuid\n'), ((35334, 35360), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (35358, 35360), False, 'import datetime\n'), ((37519, 37545), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (37543, 37545), False, 'import datetime\n')]
|
'''
Created on Dec 23, 2019
@author: mohammedmostafa
'''
import tensorflow as tf
modelPath = "../model/CNDetector_5.h5"
converter = tf.lite.TFLiteConverter.from_keras_model_file(modelPath)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
lite_model = converter.convert()
open("../model/CNDetector_Lite_5.tflite", "wb").write(lite_model)
|
[
"tensorflow.lite.TFLiteConverter.from_keras_model_file"
] |
[((135, 191), 'tensorflow.lite.TFLiteConverter.from_keras_model_file', 'tf.lite.TFLiteConverter.from_keras_model_file', (['modelPath'], {}), '(modelPath)\n', (180, 191), True, 'import tensorflow as tf\n')]
|
"""USERS - Autoincrement set to PK
Revision ID: d385c3eb6937
Revises: ee2cbe4166fb
Create Date: 2018-02-16 11:23:29.705565
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd385c3eb6937'
down_revision = 'ee2cbe4166fb'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('name', sa.String(length=20), nullable=True))
op.add_column('user', sa.Column('surname', sa.String(length=20), nullable=True))
op.create_index(op.f('ix_user_surname'), 'user', ['surname'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_surname'), table_name='user')
op.drop_column('user', 'surname')
op.drop_column('user', 'name')
# ### end Alembic commands ###
|
[
"sqlalchemy.String",
"alembic.op.drop_column",
"alembic.op.f"
] |
[((831, 864), 'alembic.op.drop_column', 'op.drop_column', (['"""user"""', '"""surname"""'], {}), "('user', 'surname')\n", (845, 864), False, 'from alembic import op\n'), ((869, 899), 'alembic.op.drop_column', 'op.drop_column', (['"""user"""', '"""name"""'], {}), "('user', 'name')\n", (883, 899), False, 'from alembic import op\n'), ((585, 608), 'alembic.op.f', 'op.f', (['"""ix_user_surname"""'], {}), "('ix_user_surname')\n", (589, 608), False, 'from alembic import op\n'), ((783, 806), 'alembic.op.f', 'op.f', (['"""ix_user_surname"""'], {}), "('ix_user_surname')\n", (787, 806), False, 'from alembic import op\n'), ((442, 462), 'sqlalchemy.String', 'sa.String', ([], {'length': '(20)'}), '(length=20)\n', (451, 462), True, 'import sqlalchemy as sa\n'), ((527, 547), 'sqlalchemy.String', 'sa.String', ([], {'length': '(20)'}), '(length=20)\n', (536, 547), True, 'import sqlalchemy as sa\n')]
|
# A large portion of the code came from the COVID-19 Dataset project by Our World in Data
# https://github.com/owid/covid-19-data/tree/master/scripts/scripts/vaccinations/src/vax/manual/twitter
# Mainly contributed by <NAME> https://github.com/lucasrodes
# The code is under completely open access under the Creative Commons BY license
# https://creativecommons.org/licenses/by/4.0/
import os
import pandas as pd
import re
import tweepy
try:
from config import TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET
except ImportError:
TWITTER_CONSUMER_KEY = os.getenv('TWITTER_CONSUMER_KEY')
TWITTER_CONSUMER_SECRET = os.getenv('TWITTER_CONSUMER_SECRET')
class TwitterAPI:
def __init__(self, consumer_key: str, consumer_secret: str):
self._api = self._get_api(consumer_key, consumer_secret)
def _get_api(self, consumer_key, consumer_secret):
auth = tweepy.AppAuthHandler(consumer_key, consumer_secret)
return tweepy.API(auth)
def get_tweets(self, username, num_tweets=30):
tweets = tweepy.Cursor(self._api.user_timeline,
screen_name=username,
include_rts=False,
tweet_mode='extended',
exclude_replies=False,
).items(num_tweets)
return tweets
class TwitterCollectorBase:
def __init__(self, api, username: str, location: str, num_tweets=100):
self.username = username
self.location = location
self.tweets = api.get_tweets(self.username, num_tweets)
self.tweets_relevant = []
self.output_path = "./algeria-covid19-icu-data.csv"
self._data_old = self._get_current_data()
def _set_output_path(self, paths, output_path):
if output_path is None:
if paths is not None:
return paths.tmp_vax_out_proposal(self.location)
else:
raise AttributeError(
"Either specify attribute `paths` or method argument `output_path`")
def _get_current_data(self):
if os.path.isfile(self.output_path):
return pd.read_csv(self.output_path)
else:
None
@property
def last_update(self):
if self._data_old is not None:
return self._data_old.date.max()
else:
return None
def _propose_df(self):
raise NotImplementedError
def propose_df(self):
df = (
self._propose_df()
.pipe(self.merge_with_current_data)
.sort_values("date")
)
return df
def build_post_url(self, tweet_id: str):
return f"https://twitter.com/{self.username}/status/{tweet_id}"
def merge_with_current_data(self, df: pd.DataFrame) -> pd.DataFrame:
if df.empty:
return self._data_old
if self._data_old is not None:
df_current = self._data_old[~self._data_old.date.isin(df.date)]
df = pd.concat([df, df_current]).sort_values(by="date")
return df
def stop_search(self, dt):
if self._data_old is None:
return False
elif dt >= self.last_update:
return False
elif dt < self.last_update:
return True
def to_csv(self):
df = self.propose_df()
df.to_csv(self.output_path, index=False)
class Algeria(TwitterCollectorBase):
def __init__(self, api, **kwargs):
super().__init__(
api=api,
username="Sante_Gouv_dz",
location="Algeria",
**kwargs
)
def _propose_df(self):
data = []
for tweet in self.tweets:
match = re.search(r"مؤشرات الترصد لوباء كوفيد-19", tweet.full_text) or re.search(
r"حصيلة وباء كورونا كوفيد-19 ليوم", tweet.full_text) or re.search(
r"نوافيكم بالحصيلة الكاملة", tweet.full_text)
match2 = re.search(r"العناية المركز", tweet.full_text)
if match and match2:
dt_match = re.search(
r"(\d{1,2})\s*([ء-ي]+)\s*[ء-ي]*(202\d)", tweet.full_text)
dt = dt_match.group(
3)+"-"+arabicMonthToNum(dt_match.group(2))+"-"+dt_match.group(1).zfill(2)
if self.stop_search(dt):
break
new_cases_line = re.findall(
"^.*جديدة.*$", tweet.full_text, re.MULTILINE)[0]
new_cases = int(re.search(r'\d+', new_cases_line).group(0))
recoveries_line = re.findall(
"^.*للشفاء.*$", tweet.full_text, re.MULTILINE)[0]
recoveries = int(re.search(r'\d+', recoveries_line).group(0))
in_icu_line = re.findall(
"^.*العناية المركز.*$", tweet.full_text, re.MULTILINE)[0]
in_icu = int(re.search(r'\d+', in_icu_line).group(0))
new_deaths_line = re.findall(
"^.*وفيات.*$", tweet.full_text, re.MULTILINE)
if(new_deaths_line):
new_deaths = int(
re.search(r'\d+', new_deaths_line[0]).group(0))
else:
if(re.findall(
"^.*وفاة واحدة.*$", tweet.full_text, re.MULTILINE)[0]):
new_deaths = 1
data.append({
"date": dt,
"new_cases": new_cases,
"recoveries": recoveries,
"in_icu": in_icu,
"death": new_deaths,
"text": tweet.full_text,
"source_url": self.build_post_url(tweet.id),
})
df = pd.DataFrame(data)
return df
def arabicMonthToNum(month):
return {
'جانفي': "01",
'فيفري': "02",
'مارس': "03",
'أفريل': "04",
'ماي': "05",
'جوان': "06",
'جويلية': "07",
'اوت': "08",
'أوت': "08",
'سبتمبر': "09",
'أكتوبر': "10",
'اكتوبر': "10",
'كتوبر': "10",
'نوفمبر': "11",
'ديسمبر': "12"
}[month]
def main():
api = TwitterAPI(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET)
Algeria(api).to_csv()
if __name__ == "__main__":
main()
|
[
"pandas.DataFrame",
"tweepy.API",
"pandas.concat",
"pandas.read_csv",
"os.path.isfile",
"tweepy.Cursor",
"re.findall",
"tweepy.AppAuthHandler",
"re.search",
"os.getenv"
] |
[((560, 593), 'os.getenv', 'os.getenv', (['"""TWITTER_CONSUMER_KEY"""'], {}), "('TWITTER_CONSUMER_KEY')\n", (569, 593), False, 'import os\n'), ((624, 660), 'os.getenv', 'os.getenv', (['"""TWITTER_CONSUMER_SECRET"""'], {}), "('TWITTER_CONSUMER_SECRET')\n", (633, 660), False, 'import os\n'), ((883, 935), 'tweepy.AppAuthHandler', 'tweepy.AppAuthHandler', (['consumer_key', 'consumer_secret'], {}), '(consumer_key, consumer_secret)\n', (904, 935), False, 'import tweepy\n'), ((951, 967), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (961, 967), False, 'import tweepy\n'), ((2114, 2146), 'os.path.isfile', 'os.path.isfile', (['self.output_path'], {}), '(self.output_path)\n', (2128, 2146), False, 'import os\n'), ((5747, 5765), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (5759, 5765), True, 'import pandas as pd\n'), ((2167, 2196), 'pandas.read_csv', 'pd.read_csv', (['self.output_path'], {}), '(self.output_path)\n', (2178, 2196), True, 'import pandas as pd\n'), ((3967, 4011), 're.search', 're.search', (['"""العناية المركز"""', 'tweet.full_text'], {}), "('العناية المركز', tweet.full_text)\n", (3976, 4011), False, 'import re\n'), ((1037, 1167), 'tweepy.Cursor', 'tweepy.Cursor', (['self._api.user_timeline'], {'screen_name': 'username', 'include_rts': '(False)', 'tweet_mode': '"""extended"""', 'exclude_replies': '(False)'}), "(self._api.user_timeline, screen_name=username, include_rts=\n False, tweet_mode='extended', exclude_replies=False)\n", (1050, 1167), False, 'import tweepy\n'), ((3727, 3785), 're.search', 're.search', (['"""مؤشرات الترصد لوباء كوفيد-19"""', 'tweet.full_text'], {}), "('مؤشرات الترصد لوباء كوفيد-19', tweet.full_text)\n", (3736, 3785), False, 'import re\n'), ((3800, 3861), 're.search', 're.search', (['"""حصيلة وباء كورونا كوفيد-19 ليوم"""', 'tweet.full_text'], {}), "('حصيلة وباء كورونا كوفيد-19 ليوم', tweet.full_text)\n", (3809, 3861), False, 'import re\n'), ((3883, 3937), 're.search', 're.search', (['"""نوافيكم بالحصيلة الكاملة"""', 'tweet.full_text'], {}), "('نوافيكم بالحصيلة الكاملة', tweet.full_text)\n", (3892, 3937), False, 'import re\n'), ((4073, 4143), 're.search', 're.search', (['"""(\\\\d{1,2})\\\\s*([ء-ي]+)\\\\s*[ء-ي]*(202\\\\d)"""', 'tweet.full_text'], {}), "('(\\\\d{1,2})\\\\s*([ء-ي]+)\\\\s*[ء-ي]*(202\\\\d)', tweet.full_text)\n", (4082, 4143), False, 'import re\n'), ((4968, 5024), 're.findall', 're.findall', (['"""^.*وفيات.*$"""', 'tweet.full_text', 're.MULTILINE'], {}), "('^.*وفيات.*$', tweet.full_text, re.MULTILINE)\n", (4978, 5024), False, 'import re\n'), ((3015, 3042), 'pandas.concat', 'pd.concat', (['[df, df_current]'], {}), '([df, df_current])\n', (3024, 3042), True, 'import pandas as pd\n'), ((4393, 4449), 're.findall', 're.findall', (['"""^.*جديدة.*$"""', 'tweet.full_text', 're.MULTILINE'], {}), "('^.*جديدة.*$', tweet.full_text, re.MULTILINE)\n", (4403, 4449), False, 'import re\n'), ((4584, 4641), 're.findall', 're.findall', (['"""^.*للشفاء.*$"""', 'tweet.full_text', 're.MULTILINE'], {}), "('^.*للشفاء.*$', tweet.full_text, re.MULTILINE)\n", (4594, 4641), False, 'import re\n'), ((4774, 4839), 're.findall', 're.findall', (['"""^.*العناية المركز.*$"""', 'tweet.full_text', 're.MULTILINE'], {}), "('^.*العناية المركز.*$', tweet.full_text, re.MULTILINE)\n", (4784, 4839), False, 'import re\n'), ((5238, 5299), 're.findall', 're.findall', (['"""^.*وفاة واحدة.*$"""', 'tweet.full_text', 're.MULTILINE'], {}), "('^.*وفاة واحدة.*$', tweet.full_text, re.MULTILINE)\n", (5248, 5299), False, 'import re\n'), ((4506, 4539), 're.search', 're.search', (['"""\\\\d+"""', 'new_cases_line'], {}), "('\\\\d+', new_cases_line)\n", (4515, 4539), False, 'import re\n'), ((4699, 4733), 're.search', 're.search', (['"""\\\\d+"""', 'recoveries_line'], {}), "('\\\\d+', recoveries_line)\n", (4708, 4733), False, 'import re\n'), ((4893, 4923), 're.search', 're.search', (['"""\\\\d+"""', 'in_icu_line'], {}), "('\\\\d+', in_icu_line)\n", (4902, 4923), False, 'import re\n'), ((5145, 5182), 're.search', 're.search', (['"""\\\\d+"""', 'new_deaths_line[0]'], {}), "('\\\\d+', new_deaths_line[0])\n", (5154, 5182), False, 'import re\n')]
|
from math import pi
import numpy as np
from aleph.consts import *
from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent
from reamber.osu.OsuMap import OsuMap
# notes: 01:37:742 (97742|2,125moves993|2) -
SHAKES = np.array(
[100560, 100790, 101018, 101245,
104124, 104340, 104556, 104770,
107487, 107692, 107896, 108099,
110674, 110867, 111059, 111156, 111252, 111348,
113698, 113882, 114065, 114248,
116577, 116753, 116928, 117103,
119326, 119494, 119661, 119827,
121953, 122114, 122275, 122434,
122594, 122673, 122752, 122831, 123068,
123147, 123226, 123304, 123383, 123539,
123618, 123696, 123773, 123851, 124007,
124084, 124162, 124239, 124316, 124471,
124547, 124624, 124701, 124778, 124932,
125008, 125084, 125160, 125236, 125388,
125464, 125540, 125616, 125692, 125767, 125842, 125918, 125993])
def f247(m: OsuMap):
notes = sorted([n for n in m.notes.hits() if 97742 < n.offset <= 125993])
BASE_SHAKE_AMP = 0.010
INC_SHAKE_AMP = 0.0010
SHAKE_WINDOW = 250
NOTE_DURATION = 2000
# noinspection PyTypeChecker
events = [
*[SvOsuMeasureLineEvent(
firstOffset=n.offset - NOTE_DURATION - t, lastOffset=n.offset - t,
startX=n.offset - NOTE_DURATION - t, endX=n.offset - t,
startY=-1 + en / 500 , endY=1 - en / 500,
funcs=[
lambda x, n=n, t=t:
# This flips the board if it's < 2
(-1 if n.column < 2 else 1) *
(
np.piecewise(x,
[(i <= x) & (x < i + SHAKE_WINDOW) for i in SHAKES],
[*[lambda x, i=i, es=es:
(BASE_SHAKE_AMP + es * INC_SHAKE_AMP)
* np.sin((x - i) * pi / (SHAKE_WINDOW - es * 3))
for es, i in enumerate(SHAKES)],
lambda x: 0])
+ (x - (n.offset - t)) / NOTE_DURATION
)
]) for en, n in enumerate(notes) for t in np.linspace(0, 24, NOTE_THICKNESS)]
]
svs, bpms = svOsuMeasureLineMD(events,
scalingFactor=SCALE,
firstOffset=97742,
lastOffset=125993,
paddingSize=PADDING,
endBpm=250)
m.svs.extend(svs)
m.bpms.extend(bpms)
|
[
"numpy.sin",
"numpy.array",
"numpy.linspace",
"reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD.svOsuMeasureLineMD"
] |
[((277, 896), 'numpy.array', 'np.array', (['[100560, 100790, 101018, 101245, 104124, 104340, 104556, 104770, 107487, \n 107692, 107896, 108099, 110674, 110867, 111059, 111156, 111252, 111348,\n 113698, 113882, 114065, 114248, 116577, 116753, 116928, 117103, 119326,\n 119494, 119661, 119827, 121953, 122114, 122275, 122434, 122594, 122673,\n 122752, 122831, 123068, 123147, 123226, 123304, 123383, 123539, 123618,\n 123696, 123773, 123851, 124007, 124084, 124162, 124239, 124316, 124471,\n 124547, 124624, 124701, 124778, 124932, 125008, 125084, 125160, 125236,\n 125388, 125464, 125540, 125616, 125692, 125767, 125842, 125918, 125993]'], {}), '([100560, 100790, 101018, 101245, 104124, 104340, 104556, 104770, \n 107487, 107692, 107896, 108099, 110674, 110867, 111059, 111156, 111252,\n 111348, 113698, 113882, 114065, 114248, 116577, 116753, 116928, 117103,\n 119326, 119494, 119661, 119827, 121953, 122114, 122275, 122434, 122594,\n 122673, 122752, 122831, 123068, 123147, 123226, 123304, 123383, 123539,\n 123618, 123696, 123773, 123851, 124007, 124084, 124162, 124239, 124316,\n 124471, 124547, 124624, 124701, 124778, 124932, 125008, 125084, 125160,\n 125236, 125388, 125464, 125540, 125616, 125692, 125767, 125842, 125918,\n 125993])\n', (285, 896), True, 'import numpy as np\n'), ((2302, 2424), 'reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD.svOsuMeasureLineMD', 'svOsuMeasureLineMD', (['events'], {'scalingFactor': 'SCALE', 'firstOffset': '(97742)', 'lastOffset': '(125993)', 'paddingSize': 'PADDING', 'endBpm': '(250)'}), '(events, scalingFactor=SCALE, firstOffset=97742,\n lastOffset=125993, paddingSize=PADDING, endBpm=250)\n', (2320, 2424), False, 'from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent\n'), ((2243, 2277), 'numpy.linspace', 'np.linspace', (['(0)', '(24)', 'NOTE_THICKNESS'], {}), '(0, 24, NOTE_THICKNESS)\n', (2254, 2277), True, 'import numpy as np\n'), ((1948, 1994), 'numpy.sin', 'np.sin', (['((x - i) * pi / (SHAKE_WINDOW - es * 3))'], {}), '((x - i) * pi / (SHAKE_WINDOW - es * 3))\n', (1954, 1994), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# This filter is identical to the include filter, except
# that while building a document, it outputs a document
# map on stderr so that a script can figure out where each part of
# the document came from. E.g.
#
# ```include
# includethisfile.md
# ```
# This filter is recursive, so you markdown can include
# other markdown to any level.
#
import os
import sys
import json
import re
from subprocess import Popen, PIPE
from pandocfilters import toJSONFilter, walk, get_value, Str, Header, Para, Space
REFFILE=""
def md_to_json(s):
p = Popen(["pandoc", "-f", "markdown", "-t", "json"], stdin=PIPE, stdout=PIPE)
p.stdin.write(s.encode())
(stdout, stderr) = p.communicate()
if str(stderr) != "None":
sys.stderr.write("WARNING: Conversion to json had results in stderr: " + str(stderr))
return stdout.decode()
def get_contents_of_file(f, levels=u"0"):
numLevels = int(levels)
# Return the contents of file unchanged if no change in level is needed
if numLevels == 0:
if os.path.isfile(f):
with open(f, "r") as myFile:
return myFile.read()
else:
sys.stderr.write("WARNING: cannot read " + f)
return "FILE NOT FOUND: " + f
# Alter the level
alterLevelBy = abs(numLevels)
pre = "in" if numLevels > 0 else "de"
if alterLevelBy > 5:
sys.stderr.write("WARNING: Header change out of bounds. Will stick at a maximum of 6 or minimum of 0\n")
alterLevelBy = 5
p = Popen(["pandoc", "-f", "markdown", "-t", "markdown", "-F", "flt-" + pre + "crement-header-" + str(alterLevelBy) + ".py", f], stdout=PIPE)
(stdout, stderr) = p.communicate()
stdout = stdout.decode()
if stderr is not None:
stderr = stderr.decode()
if stderr != "None":
sys.stderr.write("WARNING: Conversion to json had results in stderr: " + stderr)
return stdout
def docmap(key, value, format, meta):
global REFFILE
if key == 'Header':
[level, attr, inline] = value
[ids, classes, keyvals] = attr
# Change the reference file if we see a new level-1 header
if level == 1 and 'fromfile' in meta:
reffile = re.sub("\.md", ".html", meta['fromfile']['c'])
REFFILE="~~" + reffile + "~~"
sys.stderr.write(reffile + "\n")
return Header(level, [REFFILE + str(ids), [], []], inline)
elif key == 'CodeBlock':
[[ident, classes, keyvals], code] = value
if "include" in classes:
rv = []
for l in code.splitlines():
l = l.strip()
if os.path.isfile(l):
(headingLevel, dummy) = get_value(keyvals, "heading-level")
if not headingLevel:
headingLevel = 0
contents = get_contents_of_file(l, headingLevel)
doc = json.loads(md_to_json(contents))
if 'meta' in doc:
meta = doc['meta']
elif doc[0]: # old API
meta = doc[0]['unMeta']
else:
meta = {}
# Add a file to the meta info
meta['fromfile']= {u'c':l, u't':'MetaString'}
altered = walk(doc, docmap, format, meta)
rv.append(altered['blocks'])
else:
sys.stderr.write("WARNING: Can't read file '" + l + "'. Skipping.")
# Return a flattened list using nested list comprehension
#
# The following is equivalent to:
#
# flattened = []
# for sublist in rv:
# for item in sublist:
# flattened.append(item)
# return flattened
return [item for sublist in rv for item in sublist]
if __name__ == "__main__":
toJSONFilter(docmap)
|
[
"subprocess.Popen",
"pandocfilters.walk",
"os.path.isfile",
"pandocfilters.get_value",
"sys.stderr.write",
"pandocfilters.toJSONFilter",
"re.sub"
] |
[((1213, 1287), 'subprocess.Popen', 'Popen', (["['pandoc', '-f', 'markdown', '-t', 'json']"], {'stdin': 'PIPE', 'stdout': 'PIPE'}), "(['pandoc', '-f', 'markdown', '-t', 'json'], stdin=PIPE, stdout=PIPE)\n", (1218, 1287), False, 'from subprocess import Popen, PIPE\n'), ((4618, 4638), 'pandocfilters.toJSONFilter', 'toJSONFilter', (['docmap'], {}), '(docmap)\n', (4630, 4638), False, 'from pandocfilters import toJSONFilter, walk, get_value, Str, Header, Para, Space\n'), ((1695, 1712), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (1709, 1712), False, 'import os\n'), ((2039, 2156), 'sys.stderr.write', 'sys.stderr.write', (['"""WARNING: Header change out of bounds. Will stick at a maximum of 6 or minimum of 0\n"""'], {}), '(\n """WARNING: Header change out of bounds. Will stick at a maximum of 6 or minimum of 0\n"""\n )\n', (2055, 2156), False, 'import sys\n'), ((1818, 1863), 'sys.stderr.write', 'sys.stderr.write', (["('WARNING: cannot read ' + f)"], {}), "('WARNING: cannot read ' + f)\n", (1834, 1863), False, 'import sys\n'), ((2485, 2570), 'sys.stderr.write', 'sys.stderr.write', (["('WARNING: Conversion to json had results in stderr: ' + stderr)"], {}), "('WARNING: Conversion to json had results in stderr: ' + stderr\n )\n", (2501, 2570), False, 'import sys\n'), ((2885, 2932), 're.sub', 're.sub', (['"""\\\\.md"""', '""".html"""', "meta['fromfile']['c']"], {}), "('\\\\.md', '.html', meta['fromfile']['c'])\n", (2891, 2932), False, 'import re\n'), ((2987, 3019), 'sys.stderr.write', 'sys.stderr.write', (["(reffile + '\\n')"], {}), "(reffile + '\\n')\n", (3003, 3019), False, 'import sys\n'), ((3312, 3329), 'os.path.isfile', 'os.path.isfile', (['l'], {}), '(l)\n', (3326, 3329), False, 'import os\n'), ((3375, 3410), 'pandocfilters.get_value', 'get_value', (['keyvals', '"""heading-level"""'], {}), "(keyvals, 'heading-level')\n", (3384, 3410), False, 'from pandocfilters import toJSONFilter, walk, get_value, Str, Header, Para, Space\n'), ((4009, 4040), 'pandocfilters.walk', 'walk', (['doc', 'docmap', 'format', 'meta'], {}), '(doc, docmap, format, meta)\n', (4013, 4040), False, 'from pandocfilters import toJSONFilter, walk, get_value, Str, Header, Para, Space\n'), ((4133, 4200), 'sys.stderr.write', 'sys.stderr.write', (['("WARNING: Can\'t read file \'" + l + "\'. Skipping.")'], {}), '("WARNING: Can\'t read file \'" + l + "\'. Skipping.")\n', (4149, 4200), False, 'import sys\n')]
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Copyright (c) 2021
#
# See the LICENSE file for details
# see the AUTHORS file for authors
# ----------------------------------------------------------------------
#--------------------
# System wide imports
# -------------------
import os
import os.path
import logging
#--------------
# local imports
# -------------
from streetool.utils import get_image, paging
# -----------------------
# Module global variables
# -----------------------
log = logging.getLogger("streetoool")
# ----------------
# Module constants
# ----------------
def dynamic_sql(options):
columns = list()
headers = list()
where = ""
if options.workflow:
columns.append("workflow_id")
headers.append("Workflow Id")
if options.user:
columns.append("user_id")
headers.append("User Id")
elif options.anon_user:
columns.append("user_ip")
headers.append("User IP")
where = "WHERE user_id IS NULL"
if options.subject:
columns.append("subject_id")
headers.append("Subject Id")
if options.classification:
columns.append("classification_id")
headers.append("Classification Id")
if options.source:
columns.append("cluster_id")
headers.append("Source Id")
if len(columns) == 0:
raise ValueError("At least one --<flag> must be specified")
headers.append("# Classif")
sql = f"SELECT {','.join(columns)}, COUNT(*) FROM spectra_classification_v {where} GROUP BY {','.join(columns)} ORDER BY {','.join(columns)}"
return sql, headers
# ========
# COMMANDS
# ========
def view(connection, options):
sql, headers = dynamic_sql(options)
cursor = connection.cursor()
cursor.execute(sql)
paging(
iterable = cursor,
headers = headers,
)
|
[
"streetool.utils.paging",
"logging.getLogger"
] |
[((553, 584), 'logging.getLogger', 'logging.getLogger', (['"""streetoool"""'], {}), "('streetoool')\n", (570, 584), False, 'import logging\n'), ((1831, 1871), 'streetool.utils.paging', 'paging', ([], {'iterable': 'cursor', 'headers': 'headers'}), '(iterable=cursor, headers=headers)\n', (1837, 1871), False, 'from streetool.utils import get_image, paging\n')]
|
import argparse
import math
import pysam
import shap
import tensorflow
from deeplift.dinuc_shuffle import dinuc_shuffle
from scipy.spatial.distance import jensenshannon
from scipy.special import logit, softmax
tensorflow.compat.v1.disable_v2_behavior()
import kerasAC
import matplotlib
import pandas as pd
from kerasAC.interpret.deepshap import *
from kerasAC.interpret.profile_shap import *
from kerasAC.util import *
from tensorflow.keras.models import load_model
from tensorflow.keras.utils import get_custom_objects
from kerasAC.custom_losses import *
from kerasAC.metrics import *
def parse_args():
parser = argparse.ArgumentParser(description="Argument Parser for SNP scoring")
parser.add_argument("--model_hdf5")
parser.add_argument("--peak_file")
parser.add_argument("--npeaks_to_sample",type=int,default=30000)
parser.add_argument("--out_prefix")
parser.add_argument(
"--ref_fasta", default="/data/GRCh38_no_alt_analysis_set_GCA_000001405.15.fasta"
)
parser.add_argument("--dinuc_shuffle_input",action='store_true',default=False)
parser.add_argument("--chrom_sizes", default="/data/hg38.chrom.sizes")
parser.add_argument("--flank_size", type=int, default=1057)
parser.add_argument("--batch_size",type=int,default=100)
return parser.parse_args()
def load_model_wrapper(model_hdf5):
# load the model!
custom_objects = {
"recall": recall,
"sensitivity": recall,
"specificity": specificity,
"fpr": fpr,
"fnr": fnr,
"precision": precision,
"f1": f1,
"ambig_binary_crossentropy": ambig_binary_crossentropy,
"ambig_mean_absolute_error": ambig_mean_absolute_error,
"ambig_mean_squared_error": ambig_mean_squared_error,
"MultichannelMultinomialNLL": MultichannelMultinomialNLL,
}
get_custom_objects().update(custom_objects)
return load_model(model_hdf5)
def combine_mult_and_diffref(mult, orig_inp, bg_data):
to_return = []
for l in [0]:
projected_hypothetical_contribs = np.zeros_like(bg_data[l]).astype("float")
assert len(orig_inp[l].shape) == 2
for i in range(orig_inp[l].shape[-1]):
hypothetical_input = np.zeros_like(orig_inp[l]).astype("float")
hypothetical_input[:, i] = 1.0
hypothetical_difference_from_reference = (
hypothetical_input[None, :, :] - bg_data[l]
)
hypothetical_contribs = hypothetical_difference_from_reference * mult[l]
projected_hypothetical_contribs[:, :, i] = np.sum(
hypothetical_contribs, axis=-1
)
to_return.append(np.mean(projected_hypothetical_contribs, axis=0))
to_return.append(np.zeros_like(orig_inp[1]))
return to_return
def shuffle_several_times(s):
numshuffles = 20
return [
np.array([dinuc_shuffle(s[0]) for i in range(numshuffles)]),
np.array([s[1] for i in range(numshuffles)]),
]
def main():
args = parse_args()
chrom_sizes=open(args.chrom_sizes,'r').read().strip().split('\n')
chrom_size_dict={}
for line in chrom_sizes:
tokens=line.split('\t')
chrom_size_dict[tokens[0]]=int(tokens[1])
ref=pysam.FastaFile(args.ref_fasta)
# load the model
model = load_model_wrapper(args.model_hdf5)
print("loaded model")
# create the count & profile explainers
model_wrapper = (model.input, model.outputs[1][:, 0:1])
count_explainer = shap.DeepExplainer(
model_wrapper,
data=create_background_atac,
combine_mult_and_diffref=combine_mult_and_diffref_atac
)
prof_explainer = create_explainer(model, ischip=False, task_index=0)
print("made explainers")
#read in the peaks
peaks=pd.read_csv(args.peak_file,header=None,sep='\t')
nrow=peaks.shape[0]
tosample=round(int(args.npeaks_to_sample)/nrow,2)
peaks = peaks.sample(frac=tosample).reset_index(drop=True)
nrow=peaks.shape[0]
print("sampled peaks:"+str(nrow))
#allocate space for numpy arrays for modisco
hypothetical_profile_scores=np.empty((nrow,2*args.flank_size,4))
hypothetical_count_scores=np.empty((nrow,2*args.flank_size,4))
observed_profile_scores=np.empty((nrow,2*args.flank_size,4))
observed_count_scores=np.empty((nrow,2*args.flank_size,4))
seq=np.empty((nrow,2*args.flank_size,4))
print("pre-allocted output arrays")
#generate one-hot-encoded inputs
start_index=0
while start_index < nrow:
cur_batch_size=min(args.batch_size,nrow-start_index)
print(str(start_index)+":"+str(start_index+cur_batch_size))
batch_chroms=peaks[0][start_index:start_index+cur_batch_size].tolist()
batch_start_pos=peaks[1]+peaks[9]-args.flank_size
batch_start_pos=batch_start_pos.tolist()
batch_start_pos=[max(0,i) for i in batch_start_pos]
batch_start_pos=[min(batch_start_pos[i],chrom_size_dict[batch_chroms[i]]-2*args.flank_size) for i in range(cur_batch_size)]
seq_batch=[ref.fetch(batch_chroms[i],batch_start_pos[i],batch_start_pos[i]+2*args.flank_size) for i in range(cur_batch_size)]
if args.dinuc_shuffle_input is True:
seq_batch=[dinuc_shuffle(i) for i in seq_batch]
seq_batch=one_hot_encode(seq_batch)
seq[start_index:start_index+cur_batch_size,:,:]=seq_batch
#get the hypothetical scores for the batch
hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]= prof_explainer(seq_batch, None)
observed_profile_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_profile_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch
hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]= np.squeeze(count_explainer.shap_values(seq_batch)[0])
observed_count_scores[start_index:start_index+cur_batch_size,:,:]=hypothetical_count_scores[start_index:start_index+cur_batch_size,:,:]*seq_batch
start_index+=args.batch_size
#save
print("saving outputs")
np.save(args.out_prefix+'.hyp.profile.npy',hypothetical_profile_scores)
np.save(args.out_prefix+'.observed.profile.npy',observed_profile_scores)
np.save(args.out_prefix+'.hyp.count.npy',hypothetical_count_scores)
np.save(args.out_prefix+'.observed.count.npy',observed_count_scores)
np.save(args.out_prefix+'.seq.npy',seq)
if __name__ == "__main__":
main()
|
[
"deeplift.dinuc_shuffle.dinuc_shuffle",
"tensorflow.keras.models.load_model",
"pysam.FastaFile",
"argparse.ArgumentParser",
"pandas.read_csv",
"tensorflow.keras.utils.get_custom_objects",
"shap.DeepExplainer",
"tensorflow.compat.v1.disable_v2_behavior"
] |
[((214, 256), 'tensorflow.compat.v1.disable_v2_behavior', 'tensorflow.compat.v1.disable_v2_behavior', ([], {}), '()\n', (254, 256), False, 'import tensorflow\n'), ((623, 693), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Argument Parser for SNP scoring"""'}), "(description='Argument Parser for SNP scoring')\n", (646, 693), False, 'import argparse\n'), ((1906, 1928), 'tensorflow.keras.models.load_model', 'load_model', (['model_hdf5'], {}), '(model_hdf5)\n', (1916, 1928), False, 'from tensorflow.keras.models import load_model\n'), ((3255, 3286), 'pysam.FastaFile', 'pysam.FastaFile', (['args.ref_fasta'], {}), '(args.ref_fasta)\n', (3270, 3286), False, 'import pysam\n'), ((3509, 3631), 'shap.DeepExplainer', 'shap.DeepExplainer', (['model_wrapper'], {'data': 'create_background_atac', 'combine_mult_and_diffref': 'combine_mult_and_diffref_atac'}), '(model_wrapper, data=create_background_atac,\n combine_mult_and_diffref=combine_mult_and_diffref_atac)\n', (3527, 3631), False, 'import shap\n'), ((3794, 3844), 'pandas.read_csv', 'pd.read_csv', (['args.peak_file'], {'header': 'None', 'sep': '"""\t"""'}), "(args.peak_file, header=None, sep='\\t')\n", (3805, 3844), True, 'import pandas as pd\n'), ((1851, 1871), 'tensorflow.keras.utils.get_custom_objects', 'get_custom_objects', ([], {}), '()\n', (1869, 1871), False, 'from tensorflow.keras.utils import get_custom_objects\n'), ((2886, 2905), 'deeplift.dinuc_shuffle.dinuc_shuffle', 'dinuc_shuffle', (['s[0]'], {}), '(s[0])\n', (2899, 2905), False, 'from deeplift.dinuc_shuffle import dinuc_shuffle\n'), ((5255, 5271), 'deeplift.dinuc_shuffle.dinuc_shuffle', 'dinuc_shuffle', (['i'], {}), '(i)\n', (5268, 5271), False, 'from deeplift.dinuc_shuffle import dinuc_shuffle\n')]
|
# Generated by Django 1.11.13 on 2019-04-04 01:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("advicer", "0016_auto_20190404_0320")]
operations = [
migrations.AlterField(
model_name="advice",
name="subject",
field=models.CharField(
blank=True, max_length=100, null=True, verbose_name="Subject"
),
)
]
|
[
"django.db.models.CharField"
] |
[((323, 402), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'null': '(True)', 'verbose_name': '"""Subject"""'}), "(blank=True, max_length=100, null=True, verbose_name='Subject')\n", (339, 402), False, 'from django.db import migrations, models\n')]
|
from __future__ import division, print_function, unicode_literals
import streamlit as st
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
st.title('Mô hình dự đoán giá nhà đất tại hồ gươm ')
# x1 là diện tích của lô đất(m2)
# x2 là chiều dài mặt tiền (m)
# x3 là số tầng nhà
# x4 là khoảng cách tới hồ gươm (m)
X = np.array([[40, 8, 2, 1800],
[36, 3.5, 6, 450],
[35, 4.5, 6, 450],
[39, 9, 2, 1800],
[40, 9, 1, 1800],
[36, 4.5, 5, 450],
[36, 4.5, 6, 450],
[40, 9, 2, 1800],
[36, 4.5, 7, 450],
[40, 9, 3, 1800],
[44, 4, 5, 350],
[41, 9, 2, 1800],
[37, 4.5, 6, 450],
[36, 5.5, 6, 450],
[40, 10, 2, 1800],
[45, 3, 4, 350],
[45, 4, 3, 350],
[45, 4, 4, 350],
[45, 4, 5, 350],
[45, 5, 4, 350],
[45, 3, 4, 350],
[60, 2.3, 5, 450],
[59, 3.3, 5, 450],
[60, 3.3, 4, 450],
[85, 4, 4, 950],
[85, 4, 5, 950],
[60, 3.3, 5, 450],
[61, 6, 1, 800],
[62, 5, 1, 800],
[85, 4, 6, 950],
[84, 6, 5, 950],
[86, 2.5, 3, 900],
[60, 3.3, 6, 450],
[85, 5, 5, 950],
[85, 3.5, 3, 900],
[86, 3.5, 2, 900],
[31.2, 3, 4, 450],
[61, 3.3, 5, 450],
[62, 6, 1, 800],
[85, 6, 5, 950],
[86, 3.5, 3, 900],
[62, 6, 2, 800],
[86, 3.5, 4, 900],
[87, 3.5, 3, 900],
[30.2, 4, 4, 450],
[62, 6, 3, 800],
[86, 4.5, 3, 900],
[86, 6, 5, 950],
[60, 4.3, 5, 450],
[62, 7, 1, 800],
[63, 6, 1, 800],
[31.2, 4, 4, 450],
[31.2, 4, 3, 450],
[62, 4, 5, 550],
[31.2, 4, 5, 450],
[63, 5, 3, 550],
[63, 4, 5, 550],
[32.2, 4 , 4, 450],
[31.2, 5, 4, 450],
[63, 5, 5, 550],
[64, 4, 5, 550],
[63, 5, 6 , 550],
[63, 6, 4, 550],
[80, 5.8, 7, 1100],
[80, 4.8, 8, 1100],
[80, 5.8, 8, 1100],
[79, 5.8, 8, 1100],
[80, 5.8, 9, 1100],
[81, 5.8, 8, 1100],
[80, 6.8, 8, 1100],
[80, 3.5, 6, 300],
[80, 4.5, 5, 300],
[80, 4.5, 6, 300],
[79, 4.5, 6, 300],
[81, 4.5, 6, 300],
[88, 3.5, 4, 850],
[88, 4.5, 3, 850],
[88, 4.5, 4, 850],
[87, 4.5, 4, 850],
[88, 4.5, 5, 850],
[89, 4.5, 4, 850],
[88, 5.5, 4, 850],
[80, 5.5, 7, 300],
[63, 6, 4, 250],
[62, 7, 4, 250],
[63, 7, 3, 250],
[63, 7, 4, 250],
[63, 7, 5, 250],
[64, 7, 4, 250],
[63, 8, 4, 250],
[140, 4.5, 5, 500],
[139, 5.5, 5, 500],
[140, 5.5, 4, 500],
[140, 5.5, 5, 500],
[140, 5.5, 6, 500],
[141, 5.5, 5, 500],
[140, 6.5, 5, 500]])
Y = np.array([[
19, 19.3, 19.45, 19.48, 19.5, 19.7, 20, 20, 20.3, 20.5,
20.5, 20.52, 20.55, 20.7, 21, 21, 21.3, 21.5, 21.7, 22,
22.5, 29, 30, 30.5, 30.5, 30.8, 31, 31, 31, 31, 31.3, 31.35,
31.5, 31.5, 31.63, 31.7, 32, 32, 32, 32, 32, 32.3, 32.3, 32.37,
32.4, 32.5, 32.65, 32.7, 33, 33, 33, 33.5, 33.5, 33.6, 34, 34, 34.3,
34.6, 35, 35, 35, 35.5, 35.7, 42.5, 42.9, 43, 43.463, 43.5, 43.537,
44.1, 50, 52.3, 53, 53.38, 53.62, 54, 54.5, 55, 55.46, 55.5, 55.54, 56,
56.7, 60, 62.3, 62.5, 63, 63.5, 63.7, 66, 96.5, 97.3, 97.5, 98, 98.5, 98.7, 99.5
]]).T
def duel_plot(X1, X2, Y):
fig = plt.figure(figsize=(15, 5))
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
ax1.plot(Y, X[:, 0])
ax1.set_title('xét diện tích với giá tiền')
ax1.set_xlabel('giá tiền')
ax1.set_ylabel('Diện tích m2')
ax2.plot(Y, X[:, 1])
ax2.set_title('xét số mét mặt tiền với giá tiền')
ax2.set_xlabel('giá tiền')
ax2.set_ylabel('số mét mặt tiền')
return fig
def duel_plot2(X4, X5, Y):
fig = plt.figure(figsize=(15, 5))
ax3 = fig.add_subplot(1, 2, 1)
ax4 = fig.add_subplot(1, 2, 2)
ax3.plot(Y, X[:, 2])
ax3.set_title('xét số tầng nhà với giá tiền')
ax3.set_xlabel('giá tiền')
ax3.set_ylabel('số tầng nhà')
ax4.plot(Y, X[:, 3])
ax4.set_title('xét khoảng cách với giá tiền')
ax4.set_xlabel('giá tiền')
ax4.set_ylabel('khoảng cách tới hồ gươm')
return fig
st.set_option('deprecation.showPyplotGlobalUse', False)
st.pyplot(duel_plot(X[:, 0], X[:, 1], Y))
st.pyplot(duel_plot2(X[:, 2], X[:, 3], Y))
st.sidebar.title('Dự đoán giá các mẫu nhà')
dt_name = st.sidebar.text_input('Nhập diện tích đất(m2) ')
cd_name = st.sidebar.text_input('Nhập chiều dài mặt tiền(m) ')
tn_name = st.sidebar.text_input('Nhập số tầng nhà(tầng) ')
kc_name = st.sidebar.text_input('Nhập khoảng cách nhà tới hồ gươm(m) ')
one = np.ones((X.shape[0], 1))
Xbar = np.concatenate((one, X), axis=1)
x_train, x_test, y_train, y_test = train_test_split(Xbar, Y, test_size=0.2)
A = np.dot(Xbar.T, Xbar)
b = np.dot(Xbar.T, Y)
w = np.dot(np.linalg.pinv(A), b)
w_0 = w[0][0]
w_1 = w[1][0]
w_2 = w[2][0]
w_3 = w[3][0]
w_4 = w[4][0]
st.write("Độ chính xác (R2 square) : ", r2_score(y_test, np.dot(x_test, w)))
vd = np.array([dt_name, cd_name, tn_name, kc_name, 1])
if st.sidebar.button('Dự đoán'):
y1 = w_1*float(dt_name)+w_2*float(cd_name)+w_3 * \
float(tn_name)+w_4*float(kc_name) + w_0
st.sidebar.write('Giá của ngôi nhà là : ', y1, 'tỷ đồng')
|
[
"streamlit.set_option",
"streamlit.sidebar.write",
"sklearn.model_selection.train_test_split",
"numpy.ones",
"streamlit.title",
"streamlit.sidebar.title",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.dot",
"streamlit.sidebar.text_input",
"streamlit.sidebar.button",
"numpy.linalg.pinv",
"numpy.concatenate"
] |
[((271, 323), 'streamlit.title', 'st.title', (['"""Mô hình dự đoán giá nhà đất tại hồ gươm """'], {}), "('Mô hình dự đoán giá nhà đất tại hồ gươm ')\n", (279, 323), True, 'import streamlit as st\n'), ((449, 2354), 'numpy.array', 'np.array', (['[[40, 8, 2, 1800], [36, 3.5, 6, 450], [35, 4.5, 6, 450], [39, 9, 2, 1800],\n [40, 9, 1, 1800], [36, 4.5, 5, 450], [36, 4.5, 6, 450], [40, 9, 2, 1800\n ], [36, 4.5, 7, 450], [40, 9, 3, 1800], [44, 4, 5, 350], [41, 9, 2, \n 1800], [37, 4.5, 6, 450], [36, 5.5, 6, 450], [40, 10, 2, 1800], [45, 3,\n 4, 350], [45, 4, 3, 350], [45, 4, 4, 350], [45, 4, 5, 350], [45, 5, 4, \n 350], [45, 3, 4, 350], [60, 2.3, 5, 450], [59, 3.3, 5, 450], [60, 3.3, \n 4, 450], [85, 4, 4, 950], [85, 4, 5, 950], [60, 3.3, 5, 450], [61, 6, 1,\n 800], [62, 5, 1, 800], [85, 4, 6, 950], [84, 6, 5, 950], [86, 2.5, 3, \n 900], [60, 3.3, 6, 450], [85, 5, 5, 950], [85, 3.5, 3, 900], [86, 3.5, \n 2, 900], [31.2, 3, 4, 450], [61, 3.3, 5, 450], [62, 6, 1, 800], [85, 6,\n 5, 950], [86, 3.5, 3, 900], [62, 6, 2, 800], [86, 3.5, 4, 900], [87, \n 3.5, 3, 900], [30.2, 4, 4, 450], [62, 6, 3, 800], [86, 4.5, 3, 900], [\n 86, 6, 5, 950], [60, 4.3, 5, 450], [62, 7, 1, 800], [63, 6, 1, 800], [\n 31.2, 4, 4, 450], [31.2, 4, 3, 450], [62, 4, 5, 550], [31.2, 4, 5, 450],\n [63, 5, 3, 550], [63, 4, 5, 550], [32.2, 4, 4, 450], [31.2, 5, 4, 450],\n [63, 5, 5, 550], [64, 4, 5, 550], [63, 5, 6, 550], [63, 6, 4, 550], [80,\n 5.8, 7, 1100], [80, 4.8, 8, 1100], [80, 5.8, 8, 1100], [79, 5.8, 8, \n 1100], [80, 5.8, 9, 1100], [81, 5.8, 8, 1100], [80, 6.8, 8, 1100], [80,\n 3.5, 6, 300], [80, 4.5, 5, 300], [80, 4.5, 6, 300], [79, 4.5, 6, 300],\n [81, 4.5, 6, 300], [88, 3.5, 4, 850], [88, 4.5, 3, 850], [88, 4.5, 4, \n 850], [87, 4.5, 4, 850], [88, 4.5, 5, 850], [89, 4.5, 4, 850], [88, 5.5,\n 4, 850], [80, 5.5, 7, 300], [63, 6, 4, 250], [62, 7, 4, 250], [63, 7, 3,\n 250], [63, 7, 4, 250], [63, 7, 5, 250], [64, 7, 4, 250], [63, 8, 4, 250\n ], [140, 4.5, 5, 500], [139, 5.5, 5, 500], [140, 5.5, 4, 500], [140, \n 5.5, 5, 500], [140, 5.5, 6, 500], [141, 5.5, 5, 500], [140, 6.5, 5, 500]]'], {}), '([[40, 8, 2, 1800], [36, 3.5, 6, 450], [35, 4.5, 6, 450], [39, 9, 2,\n 1800], [40, 9, 1, 1800], [36, 4.5, 5, 450], [36, 4.5, 6, 450], [40, 9, \n 2, 1800], [36, 4.5, 7, 450], [40, 9, 3, 1800], [44, 4, 5, 350], [41, 9,\n 2, 1800], [37, 4.5, 6, 450], [36, 5.5, 6, 450], [40, 10, 2, 1800], [45,\n 3, 4, 350], [45, 4, 3, 350], [45, 4, 4, 350], [45, 4, 5, 350], [45, 5, \n 4, 350], [45, 3, 4, 350], [60, 2.3, 5, 450], [59, 3.3, 5, 450], [60, \n 3.3, 4, 450], [85, 4, 4, 950], [85, 4, 5, 950], [60, 3.3, 5, 450], [61,\n 6, 1, 800], [62, 5, 1, 800], [85, 4, 6, 950], [84, 6, 5, 950], [86, 2.5,\n 3, 900], [60, 3.3, 6, 450], [85, 5, 5, 950], [85, 3.5, 3, 900], [86, \n 3.5, 2, 900], [31.2, 3, 4, 450], [61, 3.3, 5, 450], [62, 6, 1, 800], [\n 85, 6, 5, 950], [86, 3.5, 3, 900], [62, 6, 2, 800], [86, 3.5, 4, 900],\n [87, 3.5, 3, 900], [30.2, 4, 4, 450], [62, 6, 3, 800], [86, 4.5, 3, 900\n ], [86, 6, 5, 950], [60, 4.3, 5, 450], [62, 7, 1, 800], [63, 6, 1, 800],\n [31.2, 4, 4, 450], [31.2, 4, 3, 450], [62, 4, 5, 550], [31.2, 4, 5, 450\n ], [63, 5, 3, 550], [63, 4, 5, 550], [32.2, 4, 4, 450], [31.2, 5, 4, \n 450], [63, 5, 5, 550], [64, 4, 5, 550], [63, 5, 6, 550], [63, 6, 4, 550\n ], [80, 5.8, 7, 1100], [80, 4.8, 8, 1100], [80, 5.8, 8, 1100], [79, 5.8,\n 8, 1100], [80, 5.8, 9, 1100], [81, 5.8, 8, 1100], [80, 6.8, 8, 1100], [\n 80, 3.5, 6, 300], [80, 4.5, 5, 300], [80, 4.5, 6, 300], [79, 4.5, 6, \n 300], [81, 4.5, 6, 300], [88, 3.5, 4, 850], [88, 4.5, 3, 850], [88, 4.5,\n 4, 850], [87, 4.5, 4, 850], [88, 4.5, 5, 850], [89, 4.5, 4, 850], [88, \n 5.5, 4, 850], [80, 5.5, 7, 300], [63, 6, 4, 250], [62, 7, 4, 250], [63,\n 7, 3, 250], [63, 7, 4, 250], [63, 7, 5, 250], [64, 7, 4, 250], [63, 8, \n 4, 250], [140, 4.5, 5, 500], [139, 5.5, 5, 500], [140, 5.5, 4, 500], [\n 140, 5.5, 5, 500], [140, 5.5, 6, 500], [141, 5.5, 5, 500], [140, 6.5, 5,\n 500]])\n', (457, 2354), True, 'import numpy as np\n'), ((5056, 5111), 'streamlit.set_option', 'st.set_option', (['"""deprecation.showPyplotGlobalUse"""', '(False)'], {}), "('deprecation.showPyplotGlobalUse', False)\n", (5069, 5111), True, 'import streamlit as st\n'), ((5199, 5242), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Dự đoán giá các mẫu nhà"""'], {}), "('Dự đoán giá các mẫu nhà')\n", (5215, 5242), True, 'import streamlit as st\n'), ((5253, 5301), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Nhập diện tích đất(m2) """'], {}), "('Nhập diện tích đất(m2) ')\n", (5274, 5301), True, 'import streamlit as st\n'), ((5312, 5364), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Nhập chiều dài mặt tiền(m) """'], {}), "('Nhập chiều dài mặt tiền(m) ')\n", (5333, 5364), True, 'import streamlit as st\n'), ((5375, 5423), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Nhập số tầng nhà(tầng) """'], {}), "('Nhập số tầng nhà(tầng) ')\n", (5396, 5423), True, 'import streamlit as st\n'), ((5434, 5495), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Nhập khoảng cách nhà tới hồ gươm(m) """'], {}), "('Nhập khoảng cách nhà tới hồ gươm(m) ')\n", (5455, 5495), True, 'import streamlit as st\n'), ((5502, 5526), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (5509, 5526), True, 'import numpy as np\n'), ((5534, 5566), 'numpy.concatenate', 'np.concatenate', (['(one, X)'], {'axis': '(1)'}), '((one, X), axis=1)\n', (5548, 5566), True, 'import numpy as np\n'), ((5603, 5643), 'sklearn.model_selection.train_test_split', 'train_test_split', (['Xbar', 'Y'], {'test_size': '(0.2)'}), '(Xbar, Y, test_size=0.2)\n', (5619, 5643), False, 'from sklearn.model_selection import train_test_split\n'), ((5649, 5669), 'numpy.dot', 'np.dot', (['Xbar.T', 'Xbar'], {}), '(Xbar.T, Xbar)\n', (5655, 5669), True, 'import numpy as np\n'), ((5674, 5691), 'numpy.dot', 'np.dot', (['Xbar.T', 'Y'], {}), '(Xbar.T, Y)\n', (5680, 5691), True, 'import numpy as np\n'), ((5881, 5930), 'numpy.array', 'np.array', (['[dt_name, cd_name, tn_name, kc_name, 1]'], {}), '([dt_name, cd_name, tn_name, kc_name, 1])\n', (5889, 5930), True, 'import numpy as np\n'), ((5934, 5962), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Dự đoán"""'], {}), "('Dự đoán')\n", (5951, 5962), True, 'import streamlit as st\n'), ((3591, 4164), 'numpy.array', 'np.array', (['[[19, 19.3, 19.45, 19.48, 19.5, 19.7, 20, 20, 20.3, 20.5, 20.5, 20.52, \n 20.55, 20.7, 21, 21, 21.3, 21.5, 21.7, 22, 22.5, 29, 30, 30.5, 30.5, \n 30.8, 31, 31, 31, 31, 31.3, 31.35, 31.5, 31.5, 31.63, 31.7, 32, 32, 32,\n 32, 32, 32.3, 32.3, 32.37, 32.4, 32.5, 32.65, 32.7, 33, 33, 33, 33.5, \n 33.5, 33.6, 34, 34, 34.3, 34.6, 35, 35, 35, 35.5, 35.7, 42.5, 42.9, 43,\n 43.463, 43.5, 43.537, 44.1, 50, 52.3, 53, 53.38, 53.62, 54, 54.5, 55, \n 55.46, 55.5, 55.54, 56, 56.7, 60, 62.3, 62.5, 63, 63.5, 63.7, 66, 96.5,\n 97.3, 97.5, 98, 98.5, 98.7, 99.5]]'], {}), '([[19, 19.3, 19.45, 19.48, 19.5, 19.7, 20, 20, 20.3, 20.5, 20.5, \n 20.52, 20.55, 20.7, 21, 21, 21.3, 21.5, 21.7, 22, 22.5, 29, 30, 30.5, \n 30.5, 30.8, 31, 31, 31, 31, 31.3, 31.35, 31.5, 31.5, 31.63, 31.7, 32, \n 32, 32, 32, 32, 32.3, 32.3, 32.37, 32.4, 32.5, 32.65, 32.7, 33, 33, 33,\n 33.5, 33.5, 33.6, 34, 34, 34.3, 34.6, 35, 35, 35, 35.5, 35.7, 42.5, \n 42.9, 43, 43.463, 43.5, 43.537, 44.1, 50, 52.3, 53, 53.38, 53.62, 54, \n 54.5, 55, 55.46, 55.5, 55.54, 56, 56.7, 60, 62.3, 62.5, 63, 63.5, 63.7,\n 66, 96.5, 97.3, 97.5, 98, 98.5, 98.7, 99.5]])\n', (3599, 4164), True, 'import numpy as np\n'), ((4206, 4233), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (4216, 4233), True, 'import matplotlib.pyplot as plt\n'), ((4648, 4675), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (4658, 4675), True, 'import matplotlib.pyplot as plt\n'), ((5703, 5720), 'numpy.linalg.pinv', 'np.linalg.pinv', (['A'], {}), '(A)\n', (5717, 5720), True, 'import numpy as np\n'), ((6071, 6128), 'streamlit.sidebar.write', 'st.sidebar.write', (['"""Giá của ngôi nhà là : """', 'y1', '"""tỷ đồng"""'], {}), "('Giá của ngôi nhà là : ', y1, 'tỷ đồng')\n", (6087, 6128), True, 'import streamlit as st\n'), ((5860, 5877), 'numpy.dot', 'np.dot', (['x_test', 'w'], {}), '(x_test, w)\n', (5866, 5877), True, 'import numpy as np\n')]
|
#################################################################
# Libraries
#################################################################
import sys, os
import pytest
import bitarray
from granite.toBig import (
main as main_toBig
)
from granite.lib.shared_functions import *
#################################################################
# Tests
#################################################################
def test_run_toBig_rdthr_2_all():
''' '''
# Variables
args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',
'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out',
'fithr': '3', 'rdthr': '2', 'ncores': '2', 'abthr': None,
'regionfile': 'tests/files/input_toBig.regions',
'chromfile': 'tests/files/input_toBig.chrom.size'}
# Run
main_toBig(args)
# Expected
snv_expect = [11001, 11007, 11010]
ins_expect = [11005, 11022]
del_expect = [11017, 11030]
# Tests
bit = bitarray.bitarray(11030 + 1)
big_dict = load_big('tests/files/main_test.out')
# Check snv
bit.setall(False)
for i in snv_expect:
bit[i] = True
#end for
assert big_dict['13_snv'][:11031] == bit
# Check ins
bit.setall(False)
for i in ins_expect:
bit[i] = True
#end for
assert big_dict['13_ins'][:11031] == bit
# Check del
bit.setall(False)
for i in del_expect:
bit[i] = True
#end for
assert big_dict['13_del'][:11031] == bit
# Clean
os.remove('tests/files/main_test.out')
#end def
def test_run_toBig_rdthr_2_2():
''' '''
# Variables
args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',
'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out',
'fithr': '2', 'rdthr': '2', 'ncores': '2', 'abthr': None,
'regionfile': 'tests/files/input_toBig.regions',
'chromfile': 'tests/files/input_toBig.chrom.size'}
# Run
main_toBig(args)
# Expected
snv_expect = [11001, 11002, 11007, 11010, 11013, 11023]
ins_expect = [11005, 11022]
del_expect = [11017, 11030]
# Tests
bit = bitarray.bitarray(11030 + 1)
big_dict = load_big('tests/files/main_test.out')
# Check snv
bit.setall(False)
for i in snv_expect:
bit[i] = True
#end for
assert big_dict['13_snv'][:11031] == bit
# Check ins
bit.setall(False)
for i in ins_expect:
bit[i] = True
#end for
assert big_dict['13_ins'][:11031] == bit
# Check del
bit.setall(False)
for i in del_expect:
bit[i] = True
#end for
assert big_dict['13_del'][:11031] == bit
# Clean
os.remove('tests/files/main_test.out')
#end def
def test_run_toBig_rdthr_17_all():
''' '''
# Variables
args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',
'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out',
'fithr': '3', 'rdthr': '17', 'ncores': None, 'abthr': None,
'regionfile': 'tests/files/input_toBig.regions',
'chromfile': 'tests/files/input_toBig.chrom.size'}
# Run
main_toBig(args)
# Expected
snv_expect = [11007]
ins_expect = []
del_expect = []
# Tests
bit = bitarray.bitarray(11030 + 1)
big_dict = load_big('tests/files/main_test.out')
# Check snv
bit.setall(False)
for i in snv_expect:
bit[i] = True
#end for
assert big_dict['13_snv'][:11031] == bit
# Check ins
bit.setall(False)
for i in ins_expect:
bit[i] = True
#end for
assert big_dict['13_ins'][:11031] == bit
# Check del
bit.setall(False)
for i in del_expect:
bit[i] = True
#end for
assert big_dict['13_del'][:11031] == bit
# Clean
os.remove('tests/files/main_test.out')
#end def
def test_run_toBig_rdthr_17_2():
''' '''
# Variables
args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',
'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out',
'fithr': '2', 'rdthr': '17', 'ncores': '1', 'abthr': None,
'regionfile': 'tests/files/input_toBig.regions',
'chromfile': 'tests/files/input_toBig.chrom.size'}
# Run
main_toBig(args)
# Expected
snv_expect = [11007, 11010]
ins_expect = []
del_expect = []
# Tests
bit = bitarray.bitarray(11030 + 1)
big_dict = load_big('tests/files/main_test.out')
# Check snv
bit.setall(False)
for i in snv_expect:
bit[i] = True
#end for
assert big_dict['13_snv'][:11031] == bit
# Check ins
bit.setall(False)
for i in ins_expect:
bit[i] = True
#end for
assert big_dict['13_ins'][:11031] == bit
# Check del
bit.setall(False)
for i in del_expect:
bit[i] = True
#end for
assert big_dict['13_del'][:11031] == bit
# Clean
os.remove('tests/files/main_test.out')
#end def
def test_run_toBig_abthr_15_all():
''' '''
# Variables
args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',
'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out',
'fithr': '3', 'rdthr': None, 'ncores': '2', 'abthr': None,
'regionfile': 'tests/files/input_toBig.regions',
'chromfile': 'tests/files/input_toBig.chrom.size'}
# Run
main_toBig(args)
# Expected
snv_expect = [11007, 11010, 11013]
ins_expect = [11022]
del_expect = [11030]
# Tests
bit = bitarray.bitarray(11030 + 1)
big_dict = load_big('tests/files/main_test.out')
# Check snv
bit.setall(False)
for i in snv_expect:
bit[i] = True
#end for
assert big_dict['13_snv'][:11031] == bit
# Check ins
bit.setall(False)
for i in ins_expect:
bit[i] = True
#end for
assert big_dict['13_ins'][:11031] == bit
# Check del
bit.setall(False)
for i in del_expect:
bit[i] = True
#end for
assert big_dict['13_del'][:11031] == bit
# Clean
os.remove('tests/files/main_test.out')
#end def
def test_run_toBig_abthr_25_all():
''' '''
# Variables
args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',
'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out',
'fithr': '3', 'rdthr': None, 'ncores': '2', 'abthr': '25',
'regionfile': 'tests/files/input_toBig.regions',
'chromfile': 'tests/files/input_toBig.chrom.size'}
# Run
main_toBig(args)
# Expected
snv_expect = [11010]
ins_expect = []
del_expect = []
# Tests
bit = bitarray.bitarray(11030 + 1)
big_dict = load_big('tests/files/main_test.out')
# Check snv
bit.setall(False)
for i in snv_expect:
bit[i] = True
#end for
assert big_dict['13_snv'][:11031] == bit
# Check ins
bit.setall(False)
for i in ins_expect:
bit[i] = True
#end for
assert big_dict['13_ins'][:11031] == bit
# Check del
bit.setall(False)
for i in del_expect:
bit[i] = True
#end for
assert big_dict['13_del'][:11031] == bit
# Clean
os.remove('tests/files/main_test.out')
#end def
def test_run_toBig_abthr_25_2():
''' '''
# Variables
args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',
'tests/files/input_toBig_3.rck.gz'], 'outputfile': 'tests/files/main_test.out',
'fithr': '2', 'rdthr': None, 'ncores': '1', 'abthr': '25',
'regionfile': 'tests/files/input_toBig.regions',
'chromfile': 'tests/files/input_toBig.chrom.size'}
# Run
main_toBig(args)
# Expected
snv_expect = [11007, 11010]
ins_expect = []
del_expect = []
# Tests
bit = bitarray.bitarray(11030 + 1)
big_dict = load_big('tests/files/main_test.out')
# Check snv
bit.setall(False)
for i in snv_expect:
bit[i] = True
#end for
assert big_dict['13_snv'][:11031] == bit
# Check ins
bit.setall(False)
for i in ins_expect:
bit[i] = True
#end for
assert big_dict['13_ins'][:11031] == bit
# Check del
bit.setall(False)
for i in del_expect:
bit[i] = True
#end for
assert big_dict['13_del'][:11031] == bit
# Clean
os.remove('tests/files/main_test.out')
#end def
def test_run_toBig_rdthr_2_1_single():
''' '''
# Variables
args = {'file': ['tests/files/input_toBig_1.rck.gz'],
'outputfile': 'tests/files/main_test.out',
'fithr': '1', 'rdthr': '2', 'ncores': '2', 'abthr': None,
'regionfile': 'tests/files/input_toBig.regions',
'chromfile': 'tests/files/input_toBig.chrom.size'}
# Run
main_toBig(args)
# Expected
snv_expect = [11001, 11002, 11007, 11010, 11013, 11023]
ins_expect = [11005, 11022]
del_expect = [11017, 11030]
# Tests
bit = bitarray.bitarray(11030 + 1)
big_dict = load_big('tests/files/main_test.out')
# Check snv
bit.setall(False)
for i in snv_expect:
bit[i] = True
#end for
assert big_dict['13_snv'][:11031] == bit
# Check ins
bit.setall(False)
for i in ins_expect:
bit[i] = True
#end for
assert big_dict['13_ins'][:11031] == bit
# Check del
bit.setall(False)
for i in del_expect:
bit[i] = True
#end for
assert big_dict['13_del'][:11031] == bit
# Clean
os.remove('tests/files/main_test.out')
#end def
def test_run_toBig_rdthr_2_2_single():
''' '''
# Variables
args = {'file': ['tests/files/input_toBig_1.rck.gz'],
'outputfile': 'tests/files/main_test.out',
'fithr': '2', 'rdthr': '2', 'ncores': '2', 'abthr': None,
'regionfile': 'tests/files/input_toBig.regions',
'chromfile': 'tests/files/input_toBig.chrom.size'}
# Run
main_toBig(args)
# Expected
snv_expect = []
ins_expect = []
del_expect = []
# Tests
bit = bitarray.bitarray(11030 + 1)
big_dict = load_big('tests/files/main_test.out')
# Check snv
bit.setall(False)
for i in snv_expect:
bit[i] = True
#end for
assert big_dict['13_snv'][:11031] == bit
# Check ins
bit.setall(False)
for i in ins_expect:
bit[i] = True
#end for
assert big_dict['13_ins'][:11031] == bit
# Check del
bit.setall(False)
for i in del_expect:
bit[i] = True
#end for
assert big_dict['13_del'][:11031] == bit
# Clean
os.remove('tests/files/main_test.out')
#end def
#################################################################
# Errors
#################################################################
def test_run_toBig_rdthr_2_all_miss_pos():
''' '''
# Variables
args = {'file': ['tests/files/input_toBig_1.rck.gz', 'tests/files/input_toBig_2.rck.gz',
'tests/files/input_toBig_3.rck.gz', 'tests/files/input_toBig_miss_pos.rck.gz'], 'outputfile': 'tests/files/main_test.out',
'fithr': '4', 'rdthr': '2', 'ncores': '2', 'abthr': None,
'regionfile': 'tests/files/input_toBig.regions',
'chromfile': 'tests/files/input_toBig.chrom.size'}
# Run and Tests
with pytest.raises(Exception) as e:
assert main_toBig(args)
assert str(e.value) == '\nERROR in file: position 13:11006 in file tests/files/input_toBig_miss_pos.rck.gz is not consistent with other files\n'
#end def
|
[
"pytest.raises",
"os.remove",
"bitarray.bitarray",
"granite.toBig.main"
] |
[((938, 954), 'granite.toBig.main', 'main_toBig', (['args'], {}), '(args)\n', (948, 954), True, 'from granite.toBig import main as main_toBig\n'), ((1095, 1123), 'bitarray.bitarray', 'bitarray.bitarray', (['(11030 + 1)'], {}), '(11030 + 1)\n', (1112, 1123), False, 'import bitarray\n'), ((1622, 1660), 'os.remove', 'os.remove', (['"""tests/files/main_test.out"""'], {}), "('tests/files/main_test.out')\n", (1631, 1660), False, 'import sys, os\n'), ((2124, 2140), 'granite.toBig.main', 'main_toBig', (['args'], {}), '(args)\n', (2134, 2140), True, 'from granite.toBig import main as main_toBig\n'), ((2302, 2330), 'bitarray.bitarray', 'bitarray.bitarray', (['(11030 + 1)'], {}), '(11030 + 1)\n', (2319, 2330), False, 'import bitarray\n'), ((2829, 2867), 'os.remove', 'os.remove', (['"""tests/files/main_test.out"""'], {}), "('tests/files/main_test.out')\n", (2838, 2867), False, 'import sys, os\n'), ((3336, 3352), 'granite.toBig.main', 'main_toBig', (['args'], {}), '(args)\n', (3346, 3352), True, 'from granite.toBig import main as main_toBig\n'), ((3455, 3483), 'bitarray.bitarray', 'bitarray.bitarray', (['(11030 + 1)'], {}), '(11030 + 1)\n', (3472, 3483), False, 'import bitarray\n'), ((3982, 4020), 'os.remove', 'os.remove', (['"""tests/files/main_test.out"""'], {}), "('tests/files/main_test.out')\n", (3991, 4020), False, 'import sys, os\n'), ((4486, 4502), 'granite.toBig.main', 'main_toBig', (['args'], {}), '(args)\n', (4496, 4502), True, 'from granite.toBig import main as main_toBig\n'), ((4612, 4640), 'bitarray.bitarray', 'bitarray.bitarray', (['(11030 + 1)'], {}), '(11030 + 1)\n', (4629, 4640), False, 'import bitarray\n'), ((5139, 5177), 'os.remove', 'os.remove', (['"""tests/files/main_test.out"""'], {}), "('tests/files/main_test.out')\n", (5148, 5177), False, 'import sys, os\n'), ((5646, 5662), 'granite.toBig.main', 'main_toBig', (['args'], {}), '(args)\n', (5656, 5662), True, 'from granite.toBig import main as main_toBig\n'), ((5789, 5817), 'bitarray.bitarray', 'bitarray.bitarray', (['(11030 + 1)'], {}), '(11030 + 1)\n', (5806, 5817), False, 'import bitarray\n'), ((6316, 6354), 'os.remove', 'os.remove', (['"""tests/files/main_test.out"""'], {}), "('tests/files/main_test.out')\n", (6325, 6354), False, 'import sys, os\n'), ((6822, 6838), 'granite.toBig.main', 'main_toBig', (['args'], {}), '(args)\n', (6832, 6838), True, 'from granite.toBig import main as main_toBig\n'), ((6941, 6969), 'bitarray.bitarray', 'bitarray.bitarray', (['(11030 + 1)'], {}), '(11030 + 1)\n', (6958, 6969), False, 'import bitarray\n'), ((7468, 7506), 'os.remove', 'os.remove', (['"""tests/files/main_test.out"""'], {}), "('tests/files/main_test.out')\n", (7477, 7506), False, 'import sys, os\n'), ((7972, 7988), 'granite.toBig.main', 'main_toBig', (['args'], {}), '(args)\n', (7982, 7988), True, 'from granite.toBig import main as main_toBig\n'), ((8098, 8126), 'bitarray.bitarray', 'bitarray.bitarray', (['(11030 + 1)'], {}), '(11030 + 1)\n', (8115, 8126), False, 'import bitarray\n'), ((8625, 8663), 'os.remove', 'os.remove', (['"""tests/files/main_test.out"""'], {}), "('tests/files/main_test.out')\n", (8634, 8663), False, 'import sys, os\n'), ((9062, 9078), 'granite.toBig.main', 'main_toBig', (['args'], {}), '(args)\n', (9072, 9078), True, 'from granite.toBig import main as main_toBig\n'), ((9240, 9268), 'bitarray.bitarray', 'bitarray.bitarray', (['(11030 + 1)'], {}), '(11030 + 1)\n', (9257, 9268), False, 'import bitarray\n'), ((9767, 9805), 'os.remove', 'os.remove', (['"""tests/files/main_test.out"""'], {}), "('tests/files/main_test.out')\n", (9776, 9805), False, 'import sys, os\n'), ((10204, 10220), 'granite.toBig.main', 'main_toBig', (['args'], {}), '(args)\n', (10214, 10220), True, 'from granite.toBig import main as main_toBig\n'), ((10318, 10346), 'bitarray.bitarray', 'bitarray.bitarray', (['(11030 + 1)'], {}), '(11030 + 1)\n', (10335, 10346), False, 'import bitarray\n'), ((10845, 10883), 'os.remove', 'os.remove', (['"""tests/files/main_test.out"""'], {}), "('tests/files/main_test.out')\n", (10854, 10883), False, 'import sys, os\n'), ((11560, 11584), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (11573, 11584), False, 'import pytest\n'), ((11606, 11622), 'granite.toBig.main', 'main_toBig', (['args'], {}), '(args)\n', (11616, 11622), True, 'from granite.toBig import main as main_toBig\n')]
|
from django.conf.urls import url
from django.views.generic import ListView, DetailView
from models import Notas
from .views import *
urlpatterns = [
url(r'^$', list_notas,name='notas_list'),
# url(r'^$', 'lista_notas', name="notas_list"),
url(r'^pais/(?P<id>\d+)/$', lista_notas_pais, name="notas_list_pais"),
url(r'^coparte/(?P<id>\d+)/$', lista_notas_copartes, name="notas_list_copartes"),
# url(r'^ver/(?P<id>\d+)/$', 'comentar_nota', name='comentar-nota'),
url(r'^(?P<id>\d+)/$', nota_detail, name='notas-detail'),
url(r'^crear/$', crear_nota, name="crear-nota"),
# url(r'^editar/(?P<id>\d+)/$', 'editar_nota', name='editar-nota'),
# url(r'^borrar/(?P<id>\d+)/$', 'borrar_nota', name='borrar-nota'),
url(r'^imagenes/$', ver_imagenes, name="imagenes-nota"),
url(r'^videos/$', ver_videos, name="videos-nota"),
]
|
[
"django.conf.urls.url"
] |
[((154, 194), 'django.conf.urls.url', 'url', (['"""^$"""', 'list_notas'], {'name': '"""notas_list"""'}), "('^$', list_notas, name='notas_list')\n", (157, 194), False, 'from django.conf.urls import url\n'), ((252, 321), 'django.conf.urls.url', 'url', (['"""^pais/(?P<id>\\\\d+)/$"""', 'lista_notas_pais'], {'name': '"""notas_list_pais"""'}), "('^pais/(?P<id>\\\\d+)/$', lista_notas_pais, name='notas_list_pais')\n", (255, 321), False, 'from django.conf.urls import url\n'), ((327, 412), 'django.conf.urls.url', 'url', (['"""^coparte/(?P<id>\\\\d+)/$"""', 'lista_notas_copartes'], {'name': '"""notas_list_copartes"""'}), "('^coparte/(?P<id>\\\\d+)/$', lista_notas_copartes, name='notas_list_copartes'\n )\n", (330, 412), False, 'from django.conf.urls import url\n'), ((486, 542), 'django.conf.urls.url', 'url', (['"""^(?P<id>\\\\d+)/$"""', 'nota_detail'], {'name': '"""notas-detail"""'}), "('^(?P<id>\\\\d+)/$', nota_detail, name='notas-detail')\n", (489, 542), False, 'from django.conf.urls import url\n'), ((548, 594), 'django.conf.urls.url', 'url', (['"""^crear/$"""', 'crear_nota'], {'name': '"""crear-nota"""'}), "('^crear/$', crear_nota, name='crear-nota')\n", (551, 594), False, 'from django.conf.urls import url\n'), ((745, 799), 'django.conf.urls.url', 'url', (['"""^imagenes/$"""', 'ver_imagenes'], {'name': '"""imagenes-nota"""'}), "('^imagenes/$', ver_imagenes, name='imagenes-nota')\n", (748, 799), False, 'from django.conf.urls import url\n'), ((806, 854), 'django.conf.urls.url', 'url', (['"""^videos/$"""', 'ver_videos'], {'name': '"""videos-nota"""'}), "('^videos/$', ver_videos, name='videos-nota')\n", (809, 854), False, 'from django.conf.urls import url\n')]
|
# -*- coding: utf-8 -*-
#/usr/bin/python2
'''
June 2017 by <NAME>.
<EMAIL>.
https://www.github.com/kyubyong/transformer
'''
from __future__ import print_function
import codecs
import os
import tensorflow as tf
import numpy as np
from hyperparams import Hyperparams as hp
from data_load import load_test_data, load_de_vocab, load_en_vocab
from train import Graph
#from nltk.translate.bleu_score import corpus_bleu
def eval():
# Load graph
g = Graph(is_training=False)
print("Graph loaded")
# Load data
# X, Sources, Targets = load_test_data()
"""
x_list, y_list, Sources, Targets = [], [], [], []
for source_sent, target_sent in zip(source_sents, target_sents):
x = [de2idx.get(word, 1) for word in (source_sent + u" </S>").split()] # 1: OOV, </S>: End of Text
y = [en2idx.get(word, 1) for word in (target_sent + u" </S>").split()]
if max(len(x), len(y)) <=hp.maxlen:
x_list.append(np.array(x))
y_list.append(np.array(y))
Sources.append(source_sent)
Targets.append(target_sent)
# Pad
X = np.zeros([len(x_list), hp.maxlen], np.int32)
Y = np.zeros([len(y_list), hp.maxlen], np.int32)
for i, (x, y) in enumerate(zip(x_list, y_list)):
X[i] = np.lib.pad(x, [0, hp.maxlen-len(x)], 'constant', constant_values=(0, 0))
"""
en2idx, idx2en = load_en_vocab()
# Start session
with g.graph.as_default():
sv = tf.train.Supervisor()
with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
## Restore parameters
sv.saver.restore(sess, tf.train.latest_checkpoint(hp.logdir))
while(True):
prompt = raw_input()
xlist = []
xval = [en2idx.get(word, 1) for word in (target_sent + u" </S>").split()]
if (len(xval) <= hp.maxlen):
xlist.append(np.array(xval))
X = np.zeros([len(xlist), hp.maxlen], np.int32)
for i, xi in enumerate(xlist):
X[i] = np.lib.pad(x, [0, hp.maxlen - len(x)], 'constant', constant_values=(0, 0))
list_of_refs, hypotheses = [], []
for i in range(len(X) // hp.batch_size):
### Get mini-batches
x = X[i*hp.batch_size: (i+1)*hp.batch_size]
prompt = raw_input()
### Autoregressive inference
preds = np.zeros((hp.batch_size, hp.maxlen), np.int32)
for j in range(hp.maxlen):
#print("j: " + str(j))
_preds = sess.run(g.preds, {g.x: x, g.y: preds})
preds[:, j] = _preds[:, j]
#print(pred) # pred should be length 1 each time due to the cycling of the while loop in main
for pred in preds:
got = " ".join(idx2en[idx] for idx in pred).split("</S>")[0].strip()
#return got
print(got)
if __name__ == '__main__':
eval()
|
[
"train.Graph",
"data_load.load_en_vocab",
"numpy.zeros",
"tensorflow.train.Supervisor",
"tensorflow.ConfigProto",
"tensorflow.train.latest_checkpoint",
"numpy.array"
] |
[((456, 480), 'train.Graph', 'Graph', ([], {'is_training': '(False)'}), '(is_training=False)\n', (461, 480), False, 'from train import Graph\n'), ((1395, 1410), 'data_load.load_en_vocab', 'load_en_vocab', ([], {}), '()\n', (1408, 1410), False, 'from data_load import load_test_data, load_de_vocab, load_en_vocab\n'), ((1494, 1515), 'tensorflow.train.Supervisor', 'tf.train.Supervisor', ([], {}), '()\n', (1513, 1515), True, 'import tensorflow as tf\n'), ((1676, 1713), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['hp.logdir'], {}), '(hp.logdir)\n', (1702, 1713), True, 'import tensorflow as tf\n'), ((1555, 1596), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (1569, 1596), True, 'import tensorflow as tf\n'), ((2533, 2579), 'numpy.zeros', 'np.zeros', (['(hp.batch_size, hp.maxlen)', 'np.int32'], {}), '((hp.batch_size, hp.maxlen), np.int32)\n', (2541, 2579), True, 'import numpy as np\n'), ((1973, 1987), 'numpy.array', 'np.array', (['xval'], {}), '(xval)\n', (1981, 1987), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "forecastsite.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
try:
import django
except ImportError:
raise ImportError(
"Nie można zaimportować Django"
)
raise
execute_from_command_line(sys.argv)
|
[
"os.environ.setdefault",
"django.core.management.execute_from_command_line"
] |
[((75, 147), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""forecastsite.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'forecastsite.settings')\n", (96, 147), False, 'import os\n'), ((428, 463), 'django.core.management.execute_from_command_line', 'execute_from_command_line', (['sys.argv'], {}), '(sys.argv)\n', (453, 463), False, 'from django.core.management import execute_from_command_line\n')]
|
import torch.nn as nn
import torch
class LabelSmoothing(nn.Module):
def __init__(self, size, smoothing=0.0):
super(LabelSmoothing, self).__init__()
self.criterion = nn.KLDivLoss(size_average=False)
#self.padding_idx = padding_idx
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.size = size
self.true_dist = None
def forward(self, x, target):
"""
x表示输入 (M,N)N个样本,M表示总类数,每一个类的概率log P
target表示label(M,)
"""
assert x.size(1) == self.size
x = x.log()
true_dist = x.data.clone()#先深复制过来
#print true_dist
true_dist.fill_(self.smoothing / (self.size - 1))#otherwise的公式
#print true_dist
#变成one-hot编码,1表示按列填充,
#target.data.unsqueeze(1)表示索引,confidence表示填充的数字
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
self.true_dist = true_dist
# print(x.shape,true_dist.shape)
return self.criterion(x, true_dist)
class LabelSmoothingLoss(nn.Module):
def __init__(self, classes, smoothing=0.0, dim=-1):
super(LabelSmoothingLoss, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.cls = classes
self.dim = dim
def forward(self, pred, target):
pred = pred.log_softmax(dim=self.dim)
with torch.no_grad():
true_dist = pred.data.clone()
true_dist = torch.zeros_like(pred)
true_dist.fill_(self.smoothing / (self.cls - 1))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))
|
[
"torch.nn.KLDivLoss",
"torch.no_grad",
"torch.sum",
"torch.zeros_like"
] |
[((185, 217), 'torch.nn.KLDivLoss', 'nn.KLDivLoss', ([], {'size_average': '(False)'}), '(size_average=False)\n', (197, 217), True, 'import torch.nn as nn\n'), ((1400, 1415), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1413, 1415), False, 'import torch\n'), ((1483, 1505), 'torch.zeros_like', 'torch.zeros_like', (['pred'], {}), '(pred)\n', (1499, 1505), False, 'import torch\n'), ((1670, 1712), 'torch.sum', 'torch.sum', (['(-true_dist * pred)'], {'dim': 'self.dim'}), '(-true_dist * pred, dim=self.dim)\n', (1679, 1712), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
from __future__ import division
import unittest
import odelab
from odelab.scheme.stochastic import *
from odelab.system import *
from odelab.solver import *
import numpy as np
class Test_OU(unittest.TestCase):
def test_run(self):
sys = OrnsteinUhlenbeck()
scheme = EulerMaruyama()
scheme.h = .01
self.s = SingleStepSolver(scheme, sys)
self.s.initialize(u0=np.array([1.]))
self.s.run(time=1.)
class Test_Differentiator(unittest.TestCase):
t0 = 5e-9
V0 = .01
def test_run(self):
sys = Differentiator(LinBumpSignal(self.V0,self.t0))
## sys.kT = 0. # no noise
scheme = EulerMaruyama()
## scheme.h = 2.5e-11
scheme.h = self.t0
self.s = SingleStepSolver(scheme, sys)
self.s.initialize(u0 = np.array([0,0,0,0,0.]))
self.s.run(time=5*self.t0)
|
[
"numpy.array"
] |
[((397, 412), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (405, 412), True, 'import numpy as np\n'), ((745, 772), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0.0]'], {}), '([0, 0, 0, 0, 0.0])\n', (753, 772), True, 'import numpy as np\n')]
|
'''
Listing 5.1: Operator usage (and vector usage)
'''
import pyopencl as cl
import pyopencl.array
import utility
kernel_src = '''
__kernel void op_test(__global int4 *output) {
int4 vec = (int4)(1, 2, 3, 4);
/* Adds 4 to every element of vec */
vec += 4;
/* Sets the third element to 0
Doesn't change the other elements
(-1 in hexadecimal = 0xFFFFFFFF */
if(vec.s2 == 7){
vec &= (int4)(-1, -1, 0, -1);
}
/* Sets the first element to -1, the second to 0 */
vec.s01 = vec.s23 < 7;
/* Divides the last element by 2 until it is less than or equal to 7 */
while(vec.s3 > 7 && (vec.s0 < 16 || vec.s1 < 16)){
vec.s3 >>= 1;
}
*output = vec;
}
'''
# Get device and context, create command queue and program
dev = utility.get_default_device()
context = cl.Context(devices=[dev])
queue = cl.CommandQueue(context, dev)
# Build program in the specified context using the kernel source code
prog = cl.Program(context, kernel_src)
try:
prog.build(options=['-Werror'], devices=[dev])
except:
print('Build log:')
print(prog.get_build_info(dev, cl.program_build_info.LOG))
raise
# Create output buffer
out = cl.array.vec.zeros_int4()
buffer_out = cl.Buffer(context, cl.mem_flags.WRITE_ONLY, size=out.itemsize)
# Enqueue kernel (with argument specified directly)
n_globals = (1,)
n_locals = None
prog.op_test(queue, n_globals, n_locals, buffer_out)
# Enqueue command to copy from buffer_out to host memory
cl.enqueue_copy(queue, dest=out, src=buffer_out, is_blocking=True)
print('Output: ' + str(out))
|
[
"utility.get_default_device",
"pyopencl.array.vec.zeros_int4",
"pyopencl.enqueue_copy",
"pyopencl.Context",
"pyopencl.CommandQueue",
"pyopencl.Buffer",
"pyopencl.Program"
] |
[((775, 803), 'utility.get_default_device', 'utility.get_default_device', ([], {}), '()\n', (801, 803), False, 'import utility\n'), ((814, 839), 'pyopencl.Context', 'cl.Context', ([], {'devices': '[dev]'}), '(devices=[dev])\n', (824, 839), True, 'import pyopencl as cl\n'), ((848, 877), 'pyopencl.CommandQueue', 'cl.CommandQueue', (['context', 'dev'], {}), '(context, dev)\n', (863, 877), True, 'import pyopencl as cl\n'), ((957, 988), 'pyopencl.Program', 'cl.Program', (['context', 'kernel_src'], {}), '(context, kernel_src)\n', (967, 988), True, 'import pyopencl as cl\n'), ((1181, 1206), 'pyopencl.array.vec.zeros_int4', 'cl.array.vec.zeros_int4', ([], {}), '()\n', (1204, 1206), True, 'import pyopencl as cl\n'), ((1220, 1282), 'pyopencl.Buffer', 'cl.Buffer', (['context', 'cl.mem_flags.WRITE_ONLY'], {'size': 'out.itemsize'}), '(context, cl.mem_flags.WRITE_ONLY, size=out.itemsize)\n', (1229, 1282), True, 'import pyopencl as cl\n'), ((1480, 1546), 'pyopencl.enqueue_copy', 'cl.enqueue_copy', (['queue'], {'dest': 'out', 'src': 'buffer_out', 'is_blocking': '(True)'}), '(queue, dest=out, src=buffer_out, is_blocking=True)\n', (1495, 1546), True, 'import pyopencl as cl\n')]
|
import time
from surgeon import *
ori_time=int()
cur_time=int()
pre_time=int()
waves=int()
double_water=False
AUTO=False
def fight_start():
global ori_time,started,double_water,pre_time
ori_time=time.time()
pre_time=0
started=True
double_water=False
def fight_end():
global started
print("Fight Finished.")
if AUTO:
next_fight()
fight_start()
else:
exit()
def time_past():
global cur_time
cur_time=time.time()
return int(cur_time-ori_time)
def wait():
global double_water,cur_time
if double_water:
time.sleep(3)
else:
cur_time=time.time()
if cur_time-ori_time>=120:
double_water=True
time.sleep(3)
else:
time.sleep(5)
def Fight():
global waves,cur_time,pre_time
cur_time=time.time()
if cur_time-pre_time>=30:
waves+=1
# pre_time=cur_time
return True
else:
return False
def set_pre_time():
global pre_time
pre_time=time.time()
if __name__=='__main__':
print("Regulator Here")
|
[
"time.sleep",
"time.time"
] |
[((207, 218), 'time.time', 'time.time', ([], {}), '()\n', (216, 218), False, 'import time\n'), ((472, 483), 'time.time', 'time.time', ([], {}), '()\n', (481, 483), False, 'import time\n'), ((839, 850), 'time.time', 'time.time', ([], {}), '()\n', (848, 850), False, 'import time\n'), ((1031, 1042), 'time.time', 'time.time', ([], {}), '()\n', (1040, 1042), False, 'import time\n'), ((593, 606), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (603, 606), False, 'import time\n'), ((634, 645), 'time.time', 'time.time', ([], {}), '()\n', (643, 645), False, 'import time\n'), ((723, 736), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (733, 736), False, 'import time\n'), ((763, 776), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (773, 776), False, 'import time\n')]
|
""" https://adventofcode.com/2021/day/17 """
import re
import math
from typing import Tuple
class Rect:
"""A 2D rectangle defined by top-left and bottom-right positions"""
def __init__(self, left, right, bottom, top):
self.left = left
self.right = right
self.bottom = bottom
self.top = top
def inside(self, x, y):
"""Checks if a given x, y point is inside the rect"""
return (self.left <= x <= self.right) and (self.bottom <= y <= self.top)
@staticmethod
def from_input(string):
match = re.search(r"target area: x=(-?\d*)..(-?\d*), y=(-?\d*)..(-?\d*)", string)
if match:
left = int(match.group(1))
right = int(match.group(2))
bottom = int(match.group(3))
top = int(match.group(4))
return Rect(left, right, bottom, top)
assert False # Shouldn't reach
return None
def sign(_n):
if _n > 0:
return 1
if _n < 0:
return -1
return 0
def hit_target(vx0, vy0, target:Rect) -> Tuple[bool, int]:
"""Simulate the probe shooting and check if the probe reaches the target area.
Returns wether probe reaches the target area in a discrete t and, in that case,
the maximum height it reaches on the trajectory."""
velocity_x = vx0
velocity_y = vy0
probe_x = 0
probe_y = 0
_t = 0
max_height = 0
while probe_x < target.right and probe_y > target.bottom:
probe_x += velocity_x
probe_y += velocity_y
max_height = max(max_height, probe_y)
velocity_x -= sign(velocity_x)
velocity_y -= 1
_t += 1
if target.inside(probe_x, probe_y):
return True, max_height
return False, 0
puzzle = Rect.from_input("target area: x=209..238, y=-86..-59")
example = Rect.from_input("target area: x=20..30, y=-10..-5")
def both_parts_bruteforce(target):
global_maxima = 0
hit_count = 0
# do a smart brute-force over sensible ranges
min_vx = 0
max_vx = target.right # max speed is hitting the right of the area in t=1
min_vy = min(target.bottom, target.top) # use the same reasoning as for maxvy
max_vy = -min_vy # not much thinkin here (explore the same range in positive than in negative)
for velocity_x in range(min_vx, max_vx+1):
min_vx = math.floor((1 + math.sqrt(1 + target.left * 8)) / 2)
max_vx = target.right
for velocity_y in range(min_vy, max_vy+1):
hit, maxy = hit_target(velocity_x, velocity_y, target)
if hit:
global_maxima = max(global_maxima, maxy)
hit_count += 1
print(f"What is the highest y position it reaches on this trajectory? {global_maxima}")
print(f"How many distinct initial velocity values cause the probe to be within the target area after any step?: {hit_count}")
both_parts_bruteforce(example)
both_parts_bruteforce(puzzle)
|
[
"re.search",
"math.sqrt"
] |
[((567, 643), 're.search', 're.search', (['"""target area: x=(-?\\\\d*)..(-?\\\\d*), y=(-?\\\\d*)..(-?\\\\d*)"""', 'string'], {}), "('target area: x=(-?\\\\d*)..(-?\\\\d*), y=(-?\\\\d*)..(-?\\\\d*)', string)\n", (576, 643), False, 'import re\n'), ((2363, 2393), 'math.sqrt', 'math.sqrt', (['(1 + target.left * 8)'], {}), '(1 + target.left * 8)\n', (2372, 2393), False, 'import math\n')]
|
import numpy as np
from collections import namedtuple
import skimage.measure
#import matplotlib.pyplot as plt
#import ipdb
# could maybe turn this into a generic mutable namedtuple
class Point2D(object):
__slots__ = "x", "y"
def __init__(self, x, y):
self.x = x
self.y = y
def __iter__(self):
'''iterate over fields tuple/list style'''
for field_name in self.__slots__:
yield getattr(self, field_name)
def __getitem__(self, index):
'''tuple/list style getitem'''
return getattr(self, self.__slots__[index])
# NOTE IterateOverWindows and IterateOverSuperpixels must share the same iter() interface
# TODO create IterateOverOverlappingWindows(IterateOverWindows), which enforces
# pixel_stride <= pixels_per_cell
#
# NOTE if pixel_stride > pixels_per_cell/2, it is possible to leave data unseen on the
# right/bottom boarder of an image
#
# this is similar to matlab's im2col
class IterateOverWindows(object):
def __init__(self, pixels_per_cell, pixel_stride=None, image=None,
mode='constant', cval=0,
start_pt=(0, 0), stop_pt=(None, None)):
''' Sliding window iterator.
Parameters
----------
pixels_per_cell : array_like
x,y - let x,y be odd so the window can be easily centered
pixel_stride : array_like, optional
x,y
image : array_like, optional
like numpy.array (ndim == 2 or 3)
mode : str, optional
Points outside the boundaries of the input are filled according to the
given mode. Only ``mode='constant'``, ``mode='discard'`` and
``mode='reflect'`` are currently supported, although others could be
added (e.g., 'nearest' and 'wrap')
cval : float, optional
Value used for points outside the boundaries of the input if
``mode='constant'``. Default is 0.0
start_pt : array_like, optional
(x,y)
stop_pt : array_like, optional
(x,y)
>>> tot = 0; im = np.arange(100).reshape((10,10))
>>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),cval=1).iter(im)):
... tot += ret[0].sum()
... #print(i, ':\n', ret[0])
>>> print(tot) # weak test
22647
>>> tot = 0; im = np.arange(81).reshape((9,9)).T
>>> for i,ret in enumerate(IterateOverWindows((5,5),(2,2),mode='reflect').iter(im)):
... tot += ret[0].sum()
... #print(i, ':\n', ret[0])
>>> print(tot) # weak test
25000
'''
assert pixels_per_cell[0] % 2 == 1 and pixels_per_cell[1] % 2 == 1, \
'provide an odd number for pixels_per_cell to easily center the window'
self.pixels_per_cell = tuple(pixels_per_cell)
self.pixel_stride = self.pixels_per_cell if pixel_stride is None else pixel_stride
self.image = image
self.mode = mode
self.cval = cval
self.start_pt = Point2D(*(int(s) for s in start_pt))
self.stop_pt = Point2D(*(stop_pt))
def setImage(self, image):
'''
Parameters
----------
image : array_like
like numpy.array (ndim == 2 or 3)
'''
self.image = image
return self
def shape(self):
if self.image is None: raise TypeError("self.image cannot be of type NoneType")
nrows, ncols = self.image.shape[0:2]
stop_x = ncols if self.stop_pt.x is None else int(self.stop_pt.x)
stop_y = nrows if self.stop_pt.y is None else int(self.stop_pt.y)
roi_height = stop_y-self.start_pt.y
roi_width = stop_x-self.start_pt.x
#print(roi_width, roi_height, self.pixel_stride)
nrows = np.ceil(float(roi_height)/self.pixel_stride[1]).astype(int)
ncols = np.ceil(float(roi_width)/self.pixel_stride[0]).astype(int)
return (nrows, ncols)
def iter(self,image=None):
'''Next window generator
Parameters
----------
image : array_like
like numpy.array (ndim == 2 or 3)
Returns
-------
numpy.array, optional
chip : pixels within the current window. Points outside the
boundaries of the input are filled according to the given mode.
numpy.array
mask : the binary mask of the window within the chip
BoundingBox
bbox : the inclusive extents of the chip (which may exceed the bounds
of the image)
MODIFICATIONS
sgr : turned into a class
sgr : added mode='reflect'
'''
if image is not None: self.image = image
elif self.image is None: raise TypeError("self.image cannot be of type NoneType")
nrows, ncols = self.image.shape[0:2]
# NOTE could iterate over the interior of the image without bounds checking
# for additional speedup
BoundingBox = namedtuple("BoundingBox", "min_x max_x min_y max_y")
pixels_per_half_cell = self.pixels_per_cell[0]//2, self.pixels_per_cell[1]//2
ystrides_per_image, xstrides_per_image = self.shape()
# iterate around the boarder of the image
for r in xrange(ystrides_per_image):
for c in xrange(xstrides_per_image):
# chip out pixels in this sliding window
min_x = self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0]
max_x = min_x+self.pixels_per_cell[0]
min_y = self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1]
max_y = min_y+self.pixels_per_cell[1]
bbox = BoundingBox(min_x,max_x,min_y,max_y)
min_x, max_x = max(0, bbox.min_x), min(ncols, bbox.max_x)
min_y, max_y = max(0, bbox.min_y), min(nrows, bbox.max_y)
#print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y, max_y)
chip = self.image[min_y:max_y, min_x:max_x, ...]
# couch chip in a fixed-size window
# REVIEW I could refactor handling the boarder into pad_image(). then mode wouldn't
# be necessary here and I could simply loop over the image.
# RE this is more efficient though
if self.mode == 'constant' or self.mode == 'reflect':
chunk = np.empty(
self.pixels_per_cell + ((self.image.shape[2],) if self.image.ndim == 3 else ()),
dtype=self.image.dtype.type)
chunk[:] = self.cval
mask = np.zeros(self.pixels_per_cell)
min_x = self.start_pt.x + self.pixel_stride[0]*c - pixels_per_half_cell[0]
max_x = min(self.pixels_per_cell[0], ncols - min_x)
min_x = max(0, -min_x)
min_y = self.start_pt.y + self.pixel_stride[1]*r - pixels_per_half_cell[1]
max_y = min(self.pixels_per_cell[1], nrows - min_y)
min_y = max(0, -min_y)
#print('c=%d'%c, 'r=%d'%r, min_x, max_x, min_y, max_y)
#print()
chunk[min_y:max_y, min_x:max_x, ...] = chip
mask[min_y:max_y, min_x:max_x] = 1
if self.mode == 'reflect':
nrows_chunk, ncols_chunk = chunk.shape[0:2]
# NOTE assume the points outside the boundaries of input can be filled from chip.
# this seems harder than it should be...
chunk[:min_y, min_x:max_x, ...] = np.flipud(np.atleast_2d( # top border
chip[:min_y, :, ...]))
chunk[min_y:max_y, :min_x, ...] = np.fliplr(np.atleast_2d( # left border
chip[:, :min_x, ...]))
# NOTE neg indice trikery (flipping first simplifies indexing)
chunk[max_y:, min_x:max_x, ...] = np.atleast_2d( # bottom border
np.flipud(chip)[:nrows_chunk-max_y, :, ...])
chunk[min_y:max_y, max_x:, ...] = np.atleast_2d( # right border
np.fliplr(chip)[:, :ncols_chunk-max_x, ...])
chunk[:min_y, :min_x, ...] = np.fliplr(np.flipud(np.atleast_2d( # top-left corner
chip[:min_y, :min_x, ...])))
chunk[:min_y, max_x:, ...] = np.flipud(np.atleast_2d( # top-right corner
np.fliplr(chip)[:min_y, :ncols_chunk-max_x, ...]))
chunk[max_y:, max_x:, ...] = np.atleast_2d( # bottom-right corner
np.flipud(np.fliplr(chip))[:nrows_chunk-max_y, :ncols_chunk-max_x, ...])
chunk[max_y:, :min_x, ...] = np.fliplr(np.atleast_2d( # bottom-left corner
np.flipud(chip)[:nrows_chunk-max_y, :min_x, ...]))
elif self.mode == 'discard':
mask = np.ones_like(chip)
chunk = chip
else:
assert False, 'unrecognized mode'
# FIXME should bbox be max-1 like in the superpixel version
yield chunk, mask, bbox
class IterateOverSuperpixels(object):
def __init__(self, segmented, image=None):
self.segmented = segmented
self.image = image
'''
Parameters
----------
segmented : array_like
Superpixel labeled segmentation (like numpy.array)
NOTE regionprops expects labels to be sequential and start
at 1: {1,2,...}. label 0 is treated as unlabeled.
image : array_like, optional
like numpy.array (ndim == 2 or 3)
'''
def setImage(self, image):
'''
Parameters
----------
image : array_like
like numpy.array (ndim == 2 or 3)
'''
self.image = image
return self
def iter(self, image=None):
'''Next superpixel generator
Parameters
----------
image : array_like, optional
like numpy.array (ndim == 2 or 3)
Returns
-------
numpy.array, optional
chip : pixels within the current window. Points outside the
boundaries of the input are filled according to the given mode.
numpy.array
mask : the binary mask of the window within the chip
BoundingBox
bbox : the inclusive extents of the chip (which may exceed the bounds
of the image)
MODIFICATIONS
sgr : optimized
sgr : turned into a class
'''
if image is not None: self.image = image
elif self.image is None: raise TypeError("self.image cannot be of type NoneType")
# regionprops() treats label zero (0) as unlabeled and ignores it
# TODO remove small, unconnected components
properties = skimage.measure.regionprops(self.segmented)
BoundingBox = namedtuple("BoundingBox", "min_x max_x min_y max_y")
for rp in properties:
if rp._slice is None: continue
(min_y,min_x,max_y,max_x) = rp.bbox
chip = image[min_y:max_y, min_x:max_x,...]
mask = rp.filled_image
bbox = BoundingBox(min_x,max_x-1,min_y,max_y-1)
yield (chip, mask, bbox)
|
[
"numpy.ones_like",
"numpy.empty",
"numpy.zeros",
"numpy.flipud",
"numpy.fliplr",
"collections.namedtuple",
"numpy.atleast_2d"
] |
[((4577, 4629), 'collections.namedtuple', 'namedtuple', (['"""BoundingBox"""', '"""min_x max_x min_y max_y"""'], {}), "('BoundingBox', 'min_x max_x min_y max_y')\n", (4587, 4629), False, 'from collections import namedtuple\n'), ((9981, 10033), 'collections.namedtuple', 'namedtuple', (['"""BoundingBox"""', '"""min_x max_x min_y max_y"""'], {}), "('BoundingBox', 'min_x max_x min_y max_y')\n", (9991, 10033), False, 'from collections import namedtuple\n'), ((5839, 5961), 'numpy.empty', 'np.empty', (['(self.pixels_per_cell + ((self.image.shape[2],) if self.image.ndim == 3 else\n ()))'], {'dtype': 'self.image.dtype.type'}), '(self.pixels_per_cell + ((self.image.shape[2],) if self.image.ndim ==\n 3 else ()), dtype=self.image.dtype.type)\n', (5847, 5961), True, 'import numpy as np\n'), ((6035, 6065), 'numpy.zeros', 'np.zeros', (['self.pixels_per_cell'], {}), '(self.pixels_per_cell)\n', (6043, 6065), True, 'import numpy as np\n'), ((8168, 8186), 'numpy.ones_like', 'np.ones_like', (['chip'], {}), '(chip)\n', (8180, 8186), True, 'import numpy as np\n'), ((6908, 6943), 'numpy.atleast_2d', 'np.atleast_2d', (['chip[:min_y, :, ...]'], {}), '(chip[:min_y, :, ...])\n', (6921, 6943), True, 'import numpy as np\n'), ((7032, 7067), 'numpy.atleast_2d', 'np.atleast_2d', (['chip[:, :min_x, ...]'], {}), '(chip[:, :min_x, ...])\n', (7045, 7067), True, 'import numpy as np\n'), ((7280, 7295), 'numpy.flipud', 'np.flipud', (['chip'], {}), '(chip)\n', (7289, 7295), True, 'import numpy as np\n'), ((7428, 7443), 'numpy.fliplr', 'np.fliplr', (['chip'], {}), '(chip)\n', (7437, 7443), True, 'import numpy as np\n'), ((7534, 7574), 'numpy.atleast_2d', 'np.atleast_2d', (['chip[:min_y, :min_x, ...]'], {}), '(chip[:min_y, :min_x, ...])\n', (7547, 7574), True, 'import numpy as np\n'), ((7719, 7734), 'numpy.fliplr', 'np.fliplr', (['chip'], {}), '(chip)\n', (7728, 7734), True, 'import numpy as np\n'), ((7890, 7905), 'numpy.fliplr', 'np.fliplr', (['chip'], {}), '(chip)\n', (7899, 7905), True, 'import numpy as np\n'), ((8062, 8077), 'numpy.flipud', 'np.flipud', (['chip'], {}), '(chip)\n', (8071, 8077), True, 'import numpy as np\n')]
|
from common import *
from os.path import join as join_path, isdir
from shutil import rmtree
from os import mkdir
import feedparser
from bs4 import BeautifulSoup as bs
languages_names = [x['name'] for x in languages]
rss_sources = {
'da': [
'https://politiken.dk/rss/senestenyt.rss',
'https://borsen.dk/rss/'
],
'de': [
'http://www.spiegel.de/index.rss',
'https://www.faz.net/rss/aktuell/'
],
'en': [
'http://feeds.washingtonpost.com/rss/rss_powerpost',
'http://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml'
],
'es': [
'http://ep00.epimg.net/rss/elpais/portada.xml',
'https://e00-elmundo.uecdn.es/elmundo/rss/espana.xml'
],
'fi': [
'https://www.iltalehti.fi/rss/uutiset.xml',
'https://www.uusisuomi.fi/raha/feed'
],
'fr': [
'https://www.lemonde.fr/rss/une.xml',
'http://www.lefigaro.fr/rss/figaro_flash-actu.xml'
],
'hu': [
'https://nepszava.hu/feed',
'https://www.vg.hu/feed/'
],
'it': [
'https://www.fanpage.it/feed/',
'http://www.ansa.it/campania/notizie/campania_rss.xml'
],
'nl': [
'https://www.telegraaf.nl/rss',
'https://www.ad.nl/nieuws/rss.xml'
],
'no': [
'https://www.vg.no/rss/feed/forsiden/?format=rss',
'https://www.aftenposten.no/rss'
],
'pl': [
'http://rss.gazeta.pl/pub/rss/najnowsze_wyborcza.xml',
'https://www.rp.pl/rss/1019'
],
'pt': [
'https://feeds.folha.uol.com.br/emcimadahora/rss091.xml',
'http://feeds.jn.pt/JN-Nacional'
],
'ro': [
'https://evz.ro/rss.xml',
'https://adevarul.ro/rss/'
],
'ru': [
'https://www.mk.ru/rss/index.xml',
'https://iz.ru/xml/rss/all.xml'
],
'sv': [
'https://www.di.se/rss',
'https://www.arbetarbladet.se/feed'
],
'uk': [
'https://ukurier.gov.ua/uk/feed/',
'http://day.kyiv.ua/uk/news-rss.xml'
],
'vi': [
'https://vnexpress.net/rss/tin-moi-nhat.rss',
'https://www.tienphong.vn/rss/ho-chi-minh-288.rss'
]
}
def text_from_html(html):
return bs(html, "lxml").text
if __name__ == '__main__':
if isdir(VALIDATION_SET_DIR):
user_input = input("Validation set directory already exists, should delete it and re-fetch the data? Y/N\n")
if user_input.lower() != 'y':
print("Nothing to do.")
exit(0)
else:
print("Deleting old validate set dir", VALIDATION_SET_DIR)
rmtree(VALIDATION_SET_DIR)
print("Creating new directory", VALIDATION_SET_DIR)
mkdir(VALIDATION_SET_DIR)
# for lang in ['vi']:
for lang in languages_names:
print(lang)
if lang not in rss_sources:
print("\tSkipping", lang, "as there are no sources.")
continue
with open(join_path(VALIDATION_SET_DIR, lang), 'wb') as f:
for source in rss_sources[lang]:
feed = feedparser.parse(source)
items = feed.entries
for item in items:
title = text_from_html(item['title'])
summary = text_from_html(item['summary'])
validation_text = sanitize_text(title) + ' ' + sanitize_text(summary)
if len(validation_text) > 200:
validation_text = validation_text[:200]
f.write(validation_text.encode("UTF-8"))
f.write('\n'.encode("UTF-8"))
# print('\t', title, ' -> ', summary, ' -> ', validation_text)
print("\tfound", len(items), "feeds in", source)
|
[
"feedparser.parse",
"os.mkdir",
"os.path.isdir",
"bs4.BeautifulSoup",
"shutil.rmtree",
"os.path.join"
] |
[((2266, 2291), 'os.path.isdir', 'isdir', (['VALIDATION_SET_DIR'], {}), '(VALIDATION_SET_DIR)\n', (2271, 2291), False, 'from os.path import join as join_path, isdir\n'), ((2689, 2714), 'os.mkdir', 'mkdir', (['VALIDATION_SET_DIR'], {}), '(VALIDATION_SET_DIR)\n', (2694, 2714), False, 'from os import mkdir\n'), ((2208, 2224), 'bs4.BeautifulSoup', 'bs', (['html', '"""lxml"""'], {}), "(html, 'lxml')\n", (2210, 2224), True, 'from bs4 import BeautifulSoup as bs\n'), ((2601, 2627), 'shutil.rmtree', 'rmtree', (['VALIDATION_SET_DIR'], {}), '(VALIDATION_SET_DIR)\n', (2607, 2627), False, 'from shutil import rmtree\n'), ((2937, 2972), 'os.path.join', 'join_path', (['VALIDATION_SET_DIR', 'lang'], {}), '(VALIDATION_SET_DIR, lang)\n', (2946, 2972), True, 'from os.path import join as join_path, isdir\n'), ((3054, 3078), 'feedparser.parse', 'feedparser.parse', (['source'], {}), '(source)\n', (3070, 3078), False, 'import feedparser\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.