max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
CHT/update_het_probs.py | aryam7/WASP | 72 | 12787651 | <filename>CHT/update_het_probs.py
import gzip
import argparse
import math
import sys
from argparse import ArgumentParser
import tables
import util
def parse_options():
parser = ArgumentParser(description="""This script adjusts
heterozygote probabilities in CHT files to account for
possible genotyping errors. Total counts of reference
and alternative alleles are used to adjust the
probability. The read counts that are provided can be
from the same experiment, combined across many different
experiments, or (perhaps ideally) from DNA sequencing
of the same individual.""")
parser.add_argument("infile", action='store',
help="Input file for Combined Haplotype "
"Test (CHT) that needs het probabilities "
"adjusted",
default=None)
parser.add_argument("outfile", action='store',
help="Output CHT file with heterozygote "
"probabilities adjusted",
default=None)
parser.add_argument("--ref_as_counts",
action='store',
help="Path to HDF5 file containing counts "
"of reads that match reference allele",
metavar="REF_AS_COUNT_H5_FILE",
required=True)
parser.add_argument("--alt_as_counts",
help="Path to HDF5 file containing counts "
"of reads that match alternate allele",
metavar="ALT_AS_COUNT_H5_FILE",
action='store', required=True)
return parser.parse_args()
def main():
error = 0.01
args = parse_options()
if util.is_gzipped(args.infile):
infile = gzip.open(args.infile, "rt")
else:
infile = open(args.infile, "rt")
if args.outfile.endswith(".gz"):
outfile = gzip.open(args.outfile, "wt")
else:
outfile = open(args.outfile, "wt")
ref_count_h5 = tables.open_file(args.ref_as_counts)
alt_count_h5 = tables.open_file(args.alt_as_counts)
snp_line = infile.readline()
if snp_line:
outfile.write(snp_line)
else:
sys.stderr.write("The input file was empty.\n")
exit(-1)
snp_line = infile.readline()
while snp_line:
snpinfo = snp_line.strip().split()
if snpinfo[9] == "NA":
outfile.write(snp_line)
else:
new_hetps = process_one_snp(snpinfo, ref_count_h5,
alt_count_h5, error)
outfile.write("\t".join(snpinfo[:10] +
[";".join(new_hetps)] +
snpinfo[11:]) + "\n")
snp_line = infile.readline()
ref_count_h5.close()
alt_count_h5.close()
def process_one_snp(snpinfo, ref_count_h5, alt_count_h5, error):
chrm = snpinfo[0]
# positions of target SNPs
snplocs = [int(y.strip()) for y in snpinfo[9].split(';')]
# heterozygote probabilities of target SNPs
hetps = [float(y.strip()) for y in snpinfo[10].split(';')]
update_hetps = []
ref_node = ref_count_h5.get_node("/%s" % chrm)
alt_node = alt_count_h5.get_node("/%s" % chrm)
for i in range(len(snplocs)):
pos = snplocs[i]
adr = ref_node[pos-1]
ada = alt_node[pos-1]
update_hetps.append(str(get_posterior_hetp(hetps[i], adr,
ada, error)))
return update_hetps
def get_posterior_hetp(hetp_prior, adr, ada, error):
prior = min(0.99, hetp_prior)
badlike = addlogs(math.log(error)*adr +
math.log(1-error)*ada,
math.log(1-error)*adr +
math.log(error)*ada)
goodlike = math.log(0.5)*adr + math.log(0.5)*ada
if goodlike-badlike > 40:
# avoid overflow (very close to 1.0)
return 1.0
else:
return prior*math.exp(goodlike - badlike) / (prior*math.exp(goodlike - badlike) + (1.0 - prior))
def addlogs(loga, logb):
return max(loga, logb) + math.log(1+math.exp(-abs(loga-logb)))
main()
| 2.84375 | 3 |
server.py | mudo121/vizbox | 0 | 12787652 | <filename>server.py<gh_stars>0
#! /usr/bin/env python
import json
import signal
import sys
from socket import error
import time
from tornado.ioloop import IOLoop
from tornado.web import Application, RequestHandler, StaticFileHandler
from tornado.websocket import WebSocketHandler
from backendbase import BackendBase
from rosbackend import RosBackend
CMD_BTN1 = "Next Step"
CMD_BTN2 = "Stop"
class ChallengeHandler(RequestHandler):
def initialize(self, backend):
self.backend = backend
def get(self):
print "Update ROS Params"
self.backend.updateRosParams()
print "Rendering..."
self.render("templates/challenge.html",
visualization="Robot camera image",
title=self.backend.title,
storyline=self.backend.storyline
)
def post(self):
if self.get_argument("btn") == "1":
print('btn1 pushed')
self.backend.btn_pushed(CMD_BTN1)
if self.get_argument("btn") == "2":
print('btn2 pushed')
self.backend.btn_pushed(CMD_BTN2)
class CommandReceiver(RequestHandler):
def initialize(self, backend):
self.backend = backend
def post(self, *args, **kwargs):
command = self.get_argument("command")
self.backend.accept_command(command)
print(command)
class MessageForwarder(WebSocketHandler):
def __init__(self, *args, **kwargs):
self.backend = kwargs.pop('backend')
super(MessageForwarder, self).__init__(*args, **kwargs)
def check_origin(self, origin):
return True
def open(self):
print("opening WebSocket")
self.backend.attach_operator_text(self.handle_operator_text)
self.backend.attach_robot_text(self.handle_robot_text)
self.backend.attach_challenge_step(self.handle_challenge_step)
self.backend.attach_image(self.handle_image)
self.backend.attach_story(self.handle_story)
print("WebSocket opened")
def on_message(self, message):
self.write_message(u"You said: " + message)
def on_close(self):
print("WebSocket closed")
self.backend.detach_operator_text(self.handle_operator_text)
self.backend.detach_robot_text(self.handle_robot_text)
self.backend.detach_challenge_step(self.handle_challenge_step)
self.backend.detach_image(self.handle_image)
self.backend.detach_story(self.handle_story)
def handle_operator_text(self, text):
print "handle_operator_text({})".format(text)
data = {"label": "operator_text", "text": "Operator : "+text}
data = json.dumps(data)
self.write_message(data)
def handle_robot_text(self, text):
print "handle_robot_text({})".format(text)
data = {"label": "robot_text", "text": "Robot : "+text}
data = json.dumps(data)
self.write_message(data)
def handle_challenge_step(self, step):
print "handle_challenge_step({})".format(step)
data = {"label": "challenge_step", "index": step}
data = json.dumps(data)
self.write_message(data)
def handle_image(self, image):
print "handle_image({})".format(len(image))
data = {"label": "image", "image": image}
data = json.dumps(data)
self.write_message(data)
def handle_story(self, title_storyline):
print "handle_story({})".format(title_storyline)
title, storyline = title_storyline
data = {"label": "story", "title": title, "storyline": storyline}
data = json.dumps(data)
self.write_message(data)
def handle_shutdown(*arg, **kwargs):
IOLoop.instance().stop()
def main(hostIp = "localhost"):
backend = RosBackend.get_instance(shutdown_hook=handle_shutdown)
signal.signal(signal.SIGINT, handle_shutdown)
signal.signal(signal.SIGQUIT, handle_shutdown) # SIGQUIT is send by our supervisord to stop this server.
signal.signal(signal.SIGTERM, handle_shutdown) # SIGTERM is send by Ctrl+C or supervisord's default.
print "Shutdown handler connected"
app = Application([
(r"/ws", MessageForwarder, {'backend': backend}),
(r'/', ChallengeHandler, {'backend': backend}),
(r'/command', CommandReceiver, {'backend': backend}),
(r'/static/(.*)', StaticFileHandler, {'path': 'static/'})],
(r'/(favicon\.ico)', StaticFileHandler, {'path': 'static/favicon.ico'}),
debug=True,
template_path="templates")
# address, port = "localhost", 8888
address, port = hostIp, 8888
print "Application instantiated"
connected = False
while not connected:
try:
print "Listening..."
app.listen(port, address)
print "Listening on http://{addr}:{port}".format(addr=address, port=port)
connected = True
except error as ex:
print "{ex}. Cannot start, trying in a bit".format(ex=ex)
time.sleep(1)
print "Starting IOLoop"
IOLoop.instance().start()
if __name__ == "__main__":
try:
hostIp = sys.argv[1]
main(hostIp)
except IndexError:
print("Please specifiy the host ip of the vizbox Server as the first argument")
| 2.34375 | 2 |
python/decorator_test.py | walterfan/snippets | 1 | 12787653 | registry = []
def register(func):
print("register function {}".format(func))
registry.append(func)
return func
@register
def hello():
print("hello")
@register
def world():
print("world")
if __name__ == '__main__':
hello()
world()
for func in registry:
print("there is a registered {}".format(func)) | 3.1875 | 3 |
xbot/xsql.py | xinthral/streamlabs | 0 | 12787654 | <reponame>xinthral/streamlabs
"""
SLCB runs on IronPython, those cheeky whores:
https://stackoverflow.com/a/66794423/13825434
"""
try:
import sqlite3
except:
import clr
clr.AddReference("IronPython.SQLite.dll")
clr.AddReference("IronPython.Modules.dll")
finally:
import re
import sqlite3
class Database:
""" Static Class Scope Variables """
_delim = ';::;'
# _library = 'library.db'
_library = 'Services/Scripts/xbot/library.db'
_tables = ['facts', 'jokes', 'phrases', 'rather']
@staticmethod
def create_connection(db_file=_library):
""" Establish Connection to database object and return connection object """
conn = None
try:
conn = sqlite3.connect(db_file)
except Exception as e:
raise(e)
return(conn)
@staticmethod
def queryTableAll(tbl_name=_tables[0]):
""" Query all items from a specific database in the database object """
con = Database.create_connection()
c = con.cursor()
c.execute("SELECT * FROM {}".format(tbl_name))
rows = c.fetchall()
con.close()
return(rows)
@staticmethod
def queryTableID(item_id=0, tbl_name=_tables[0]):
""" Query a specific items ID from a specific database in the database object """
con = Database.create_connection()
c = con.cursor()
c.execute("SELECT * FROM {} WHERE pid={}".format(tbl_name, item_id))
item = c.fetchone()
con.close()
return(item)
@staticmethod
def getTableHeaders(tbl_name=_tables[0]):
""" Query Headers for specific table in database object """
return([ele[1] for ele in Database.showTableSchema(tbl_name)])
@staticmethod
def insert(payload, tbl_name=_tables[0]):
if len(payload[0].split(Database._delim)) < 1:
print('Empty Payload')
return(False)
con = Database.create_connection()
c = con.cursor()
columns = Database.getTableHeaders(tbl_name)
columns = ', '.join(columns)
elements = [str(Database.getTableCount(tbl_name))] + list(payload)
# Yeah, this next line is gross
elements = ', '.join([("\"{}\"".format(str(ele).encode('UTF-8').replace('"', r"\'"))) for ele in elements])
sqlQuery = "INSERT INTO {} ({}) VALUES ({})".format(tbl_name, columns, elements)
c.execute(sqlQuery)
con.commit()
con.close()
return(True)
@staticmethod
def insertFact(payload):
""" Insert fact payload into database object (wrapper) """
#payload: ['input text', 'category', blocked: 0/1]
return(Database.insert(payload, 'facts'))
@staticmethod
def insertJoke(payload):
""" Inserts joke payload into database object (wrapper) """
#payload: ['input text', 'category', blocked: 0/1]
return(Database.insert(payload, 'jokes'))
@staticmethod
def insertPhrase(payload):
""" Inserts phrase payload into database object (wrapper) """
#payload: ['input text', 'category', blocked: 0/1]
return(Database.insert(payload, 'phrases'))
@staticmethod
def insertRather(payload):
""" Inserts would you rather payload into database object (wrapper) """
#payload: ['input text', 'category', blocked: 0/1]
return(Database.insert(payload, 'rather'))
@staticmethod
def updateVisibility(payload):
""" Update visibility value for a record """
if len(payload[0].split(Database._delim)) < 1:
print('Empty Payload')
return(False)
# table, column, value, pid
table, column, value, pid = payload
table.replace('"', '""')
column.replace('"', '""')
con = Database.create_connection()
c = con.cursor()
c.execute('UPDATE "{}" SET "{}" = {} WHERE pid = {}'.format(table, column, value, pid))
con.commit()
con.close()
return(True)
@staticmethod
def showTables():
""" Query table names from database object """
con = Database.create_connection()
c = con.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type=\'table\'")
rows = c.fetchall()
con.close()
return(rows)
@staticmethod
def showTableSchema(tbl_name=_tables[0]):
""" Query Schema for specific table in database object """
con = Database.create_connection()
c = con.cursor()
c.execute("PRAGMA table_info({})".format(tbl_name))
rows = c.fetchall()
con.close()
return(rows)
@staticmethod
def getTableCount(tbl_name=_tables[0]):
""" Query item count from a specific table in database object """
con = Database.create_connection()
c = con.cursor()
c.execute("SELECT COUNT(*) FROM {}".format(tbl_name))
count = c.fetchone()[0]
con.close()
return(count)
@staticmethod
def queryTableCategory(tbl_name=_tables[0], cat='dad'):
"""
Query all elements for a specific category from a table in the database object
"""
con = Database.create_connection()
c = con.cursor()
c.execute("SELECT * FROM {} WHERE category=\'{}\'".format(tbl_name, cat))
rows = c.fetchall()
con.close()
return(rows)
| 2.53125 | 3 |
CURSO UDEMY/EXERCICIOS/EX008.py | CamilliCerutti/Exercicios-de-Python-curso-em-video | 0 | 12787655 | """
CRIE UMA FUNÇÃO QUE RECEBA 2 NÚMEROS. O PRIMEIRO É UM VALOR E O SEGUNDO UM PERCENTUAL. RETORNE O VALOR DO PRIMEIRO NUMERO SOMADO DO AUMENTO DO PERCENTUAL DO MESMO.
"""
n1 = int(input('Digite um numero: '))
porcentagem = int(input('Digite o percentual: '))
def soma(num = n1, porcento = porcentagem):
valor_descontado = n1 * (porcentagem / 100)
return n1 + valor_descontado
print(soma()) | 3.71875 | 4 |
parser.py | hrw/fosdem-videos | 4 | 12787656 | <reponame>hrw/fosdem-videos
#!/usr/bin/python3
from datetime import date, datetime
from jinja2 import Environment, FileSystemLoader
import xml.etree.ElementTree as ET
import sys
xml_file = 'xml'
if len(sys.argv) > 1:
xml_file = sys.argv[1]
tree = ET.parse(xml_file)
root = tree.getroot()
talks = []
show_webm = False
slides = 0
videos = 0
for day in root.iter('day'):
year = date.fromisoformat(day.attrib['date']).year
if day.attrib['index'] == '1':
day_name = 'Sat'
elif day.attrib['index'] == '2':
day_name = 'Sun'
for room in day:
room_name = room.attrib['name']
for talk in room.iter('event'):
new_talk = {
'room': room_name,
'start': '%s %s' % (day_name, talk.find('start').text),
'title': talk.find('title').text,
'track': talk.find('track').text,
'slug': talk.find('slug').text,
}
persons = []
for person in talk.find('persons').iter('person'):
persons.append(person.text)
new_talk['persons'] = ', '.join(persons)
if talk.find('attachments'):
for link in talk.find('attachments'):
new_talk['slides'] = link.attrib['href']
slides += 1
for link in talk.find('links'):
if 'WebM' in link.text:
new_talk['webm'] = link.attrib['href']
videos += 1
show_webm = True
elif 'mp4' in link.text or 'mp4' in link.attrib['href']:
new_talk['mp4'] = link.attrib['href']
videos += 1
elif 'Slides' in link.text or 'Presentation' in link.text:
new_talk['slides'] = link.attrib['href']
slides += 1
talks.append(new_talk)
file_loader = FileSystemLoader('templates')
env = Environment(loader=file_loader)
template = env.get_template('index.html.j2')
output = template.render(generate_time=datetime.strftime(datetime.utcnow(),
"%d %B %Y %H:%M"),
talks=talks, year=year, show_webm=show_webm,
years=range(2015, 2022))
print(output)
print(f"Talks: {len(talks)} Slides: {slides} Videos: {videos}",
file=sys.stderr)
| 2.40625 | 2 |
acq4/util/Canvas/items/ImageCanvasItem.py | tropp/ACQ4 | 0 | 12787657 | # -*- coding: utf-8 -*-
from acq4.pyqtgraph.Qt import QtCore, QtGui
from CanvasItem import CanvasItem
import numpy as np
import scipy.ndimage as ndimage
import acq4.pyqtgraph as pg
import acq4.util.DataManager as DataManager
import acq4.util.debug as debug
class ImageCanvasItem(CanvasItem):
def __init__(self, image=None, **opts):
"""
CanvasItem displaying an image.
The image may be 2 or 3-dimensional.
Options:
image: May be a fileHandle, ndarray, or GraphicsItem.
handle: May optionally be specified in place of image
"""
## If no image was specified, check for a file handle..
if image is None:
image = opts.get('handle', None)
item = None
self.data = None
self.currentT = None
if isinstance(image, QtGui.QGraphicsItem):
item = image
elif isinstance(image, np.ndarray):
self.data = image
elif isinstance(image, DataManager.FileHandle):
opts['handle'] = image
self.handle = image
self.data = self.handle.read()
if 'name' not in opts:
opts['name'] = self.handle.shortName()
try:
if 'transform' in self.handle.info():
tr = pg.SRTTransform3D(self.handle.info()['transform'])
tr = pg.SRTTransform(tr) ## convert to 2D
opts['pos'] = tr.getTranslation()
opts['scale'] = tr.getScale()
opts['angle'] = tr.getRotation()
else: ## check for older info formats
if 'imagePosition' in self.handle.info():
opts['scale'] = self.handle.info()['pixelSize']
opts['pos'] = self.handle.info()['imagePosition']
elif 'Downsample' in self.handle.info():
### Needed to support an older format stored by 2p imager
if 'pixelSize' in self.handle.info():
opts['scale'] = self.handle.info()['pixelSize']
if 'microscope' in self.handle.info():
m = self.handle.info()['microscope']
print 'm: ',m
print 'mpos: ', m['position']
opts['pos'] = m['position'][0:2]
else:
info = self.data._info[-1]
opts['pos'] = info.get('imagePosition', None)
elif hasattr(self.data, '_info'):
info = self.data._info[-1]
opts['scale'] = info.get('pixelSize', None)
opts['pos'] = info.get('imagePosition', None)
else:
opts['defaultUserTransform'] = {'scale': (1e-5, 1e-5)}
opts['scalable'] = True
except:
debug.printExc('Error reading transformation for image file %s:' % image.name())
if item is None:
item = pg.ImageItem()
CanvasItem.__init__(self, item, **opts)
self.histogram = pg.PlotWidget()
self.blockHistogram = False
self.histogram.setMaximumHeight(100)
self.levelRgn = pg.LinearRegionItem()
self.histogram.addItem(self.levelRgn)
self.updateHistogram(autoLevels=True)
# addWidget arguments: row, column, rowspan, colspan
self.layout.addWidget(self.histogram, self.layout.rowCount(), 0, 1, 3)
self.timeSlider = QtGui.QSlider(QtCore.Qt.Horizontal)
#self.timeSlider.setMinimum(0)
#self.timeSlider.setMaximum(self.data.shape[0]-1)
self.layout.addWidget(self.timeSlider, self.layout.rowCount(), 0, 1, 3)
self.timeSlider.valueChanged.connect(self.timeChanged)
self.timeSlider.sliderPressed.connect(self.timeSliderPressed)
self.timeSlider.sliderReleased.connect(self.timeSliderReleased)
thisRow = self.layout.rowCount()
self.edgeBtn = QtGui.QPushButton('Edge')
self.edgeBtn.clicked.connect(self.edgeClicked)
self.layout.addWidget(self.edgeBtn, thisRow, 0, 1, 1)
self.meanBtn = QtGui.QPushButton('Mean')
self.meanBtn.clicked.connect(self.meanClicked)
self.layout.addWidget(self.meanBtn, thisRow+1, 0, 1, 1)
self.tvBtn = QtGui.QPushButton('tv denoise')
self.tvBtn.clicked.connect(self.tvClicked)
self.layout.addWidget(self.tvBtn, thisRow+2, 0, 1, 1)
self.maxBtn = QtGui.QPushButton('Max no Filter')
self.maxBtn.clicked.connect(self.maxClicked)
self.layout.addWidget(self.maxBtn, thisRow, 1, 1, 1)
self.maxBtn2 = QtGui.QPushButton('Max w/Gaussian')
self.maxBtn2.clicked.connect(self.max2Clicked)
self.layout.addWidget(self.maxBtn2, thisRow+1, 1, 1, 1)
self.maxMedianBtn = QtGui.QPushButton('Max w/Median')
self.maxMedianBtn.clicked.connect(self.maxMedianClicked)
self.layout.addWidget(self.maxMedianBtn, thisRow+2, 1, 1, 1)
self.filterOrder = QtGui.QComboBox()
self.filterLabel = QtGui.QLabel('Order')
for n in range(1,11):
self.filterOrder.addItem("%d" % n)
self.layout.addWidget(self.filterLabel, thisRow+3, 2, 1, 1)
self.layout.addWidget(self.filterOrder, thisRow+3, 3, 1, 1)
self.zPlanes = QtGui.QComboBox()
self.zPlanesLabel = QtGui.QLabel('# planes')
for s in ['All', '1', '2', '3', '4', '5']:
self.zPlanes.addItem("%s" % s)
self.layout.addWidget(self.zPlanesLabel, thisRow+3, 0, 1, 1)
self.layout.addWidget(self.zPlanes, thisRow + 3, 1, 1, 1)
## controls that only appear if there is a time axis
self.timeControls = [self.timeSlider, self.edgeBtn, self.maxBtn, self.meanBtn, self.maxBtn2,
self.maxMedianBtn, self.filterOrder, self.zPlanes]
if self.data is not None:
self.updateImage(self.data)
self.graphicsItem().sigImageChanged.connect(self.updateHistogram)
self.levelRgn.sigRegionChanged.connect(self.levelsChanged)
self.levelRgn.sigRegionChangeFinished.connect(self.levelsChangeFinished)
@classmethod
def checkFile(cls, fh):
if not fh.isFile():
return 0
ext = fh.ext().lower()
if ext == '.ma':
return 10
elif ext in ['.ma', '.png', '.jpg', '.tif']:
return 100
return 0
def timeChanged(self, t):
self.graphicsItem().updateImage(self.data[t])
self.currentT = t
def tRange(self):
"""
for a window around the current image, define a range for
averaging or whatever
"""
sh = self.data.shape
if self.currentT is None:
tsel = range(0, sh[0])
else:
sel = self.zPlanes.currentText()
if sel == 'All':
tsel = range(0, sh[0])
else:
ir = int(sel)
llim = self.currentT - ir
if llim < 0:
llim = 0
rlim = self.currentT + ir
if rlim > sh[0]:
rlim = sh[0]
tsel = range(llim, rlim)
return tsel
def timeSliderPressed(self):
self.blockHistogram = True
def edgeClicked(self):
## unsharp mask to enhance fine details
fd = self.data.asarray().astype(float)
blur = ndimage.gaussian_filter(fd, (0, 1, 1))
blur2 = ndimage.gaussian_filter(fd, (0, 2, 2))
dif = blur - blur2
#dif[dif < 0.] = 0
self.graphicsItem().updateImage(dif.max(axis=0))
self.updateHistogram(autoLevels=True)
def maxClicked(self):
## just the max of a stack
tsel = self.tRange()
fd = self.data[tsel,:,:].asarray().astype(float)
self.graphicsItem().updateImage(fd.max(axis=0))
print 'max stack image udpate done'
self.updateHistogram(autoLevels=True)
#print 'histogram updated'
def max2Clicked(self):
## just the max of a stack, after a little 3d bluring
tsel = self.tRange()
fd = self.data[tsel,:,:].asarray().astype(float)
filt = self.filterOrder.currentText()
n = int(filt)
blur = ndimage.gaussian_filter(fd, (n,n,n))
print 'image blurred'
self.graphicsItem().updateImage(blur.max(axis=0))
print 'image udpate done'
self.updateHistogram(autoLevels=True)
#print 'histogram updated'
def maxMedianClicked(self):
## just the max of a stack, after a little 3d bluring
tsel = self.tRange()
fd = self.data[tsel,:,:].asarray().astype(float)
filt = self.filterOrder.currentText()
n = int(filt) + 1 # value of 1 is no filter so start with 2
blur = ndimage.median_filter(fd, size=n)
self.graphicsItem().updateImage(blur.max(axis=0))
self.updateHistogram(autoLevels=True)
def meanClicked(self):
## just the max of a stack
tsel = self.tRange()
fd = self.data[tsel,:,:].asarray().astype(float)
self.graphicsItem().updateImage(fd.mean(axis=0))
self.updateHistogram(autoLevels=True)
def tvClicked(self):
tsel = self.tRange()
fd = self.data[tsel,:,:].asarray().astype(float)
filt = self.filterOrder.currentText()
n = (int(filt) + 1) # value of 1 is no filter so start with 2
blur = self.tv_denoise(fd, weight=n, n_iter_max=5)
self.graphicsItem().updateImage(blur.max(axis=0))
self.updateHistogram(autoLevels=True)
def timeSliderReleased(self):
self.blockHistogram = False
self.updateHistogram()
def updateHistogram(self, autoLevels=False):
if self.blockHistogram:
return
x, y = self.graphicsItem().getHistogram()
if x is None: ## image has no data
return
self.histogram.clearPlots()
self.histogram.plot(x, y)
if autoLevels:
self.graphicsItem().updateImage(autoLevels=True)
w, b = self.graphicsItem().getLevels()
self.levelRgn.blockSignals(True)
self.levelRgn.setRegion([w, b])
self.levelRgn.blockSignals(False)
def updateImage(self, data, autoLevels=True):
self.data = data
if data.ndim == 4:
showTime = True
elif data.ndim == 3:
if data.shape[2] <= 4: ## assume last axis is color
showTime = False
else:
showTime = True
else:
showTime = False
if showTime:
self.timeSlider.setMinimum(0)
self.timeSlider.setMaximum(self.data.shape[0]-1)
self.timeSlider.valueChanged.connect(self.timeChanged)
self.timeSlider.sliderPressed.connect(self.timeSliderPressed)
self.timeSlider.sliderReleased.connect(self.timeSliderReleased)
#self.timeSlider.show()
#self.maxBtn.show()
self.graphicsItem().updateImage(data[self.timeSlider.value()])
else:
#self.timeSlider.hide()
#self.maxBtn.hide()
self.graphicsItem().updateImage(data, autoLevels=autoLevels)
for widget in self.timeControls:
widget.setVisible(showTime)
tr = self.saveTransform()
self.resetUserTransform()
self.restoreTransform(tr)
self.updateHistogram(autoLevels=autoLevels)
def levelsChanged(self):
rgn = self.levelRgn.getRegion()
self.graphicsItem().setLevels(rgn)
self.hideSelectBox()
def levelsChangeFinished(self):
self.showSelectBox()
def _tv_denoise_3d(self, im, weight=100, eps=2.e-4, n_iter_max=200):
"""
Perform total-variation denoising on 3-D arrays
Parameters
----------
im: ndarray
3-D input data to be denoised
weight: float, optional
denoising weight. The greater ``weight``, the more denoising (at
the expense of fidelity to ``input``)
eps: float, optional
relative difference of the value of the cost function that determines
the stop criterion. The algorithm stops when:
(E_(n-1) - E_n) < eps * E_0
n_iter_max: int, optional
maximal number of iterations used for the optimization.
Returns
-------
out: ndarray
denoised array
Notes
-----
Rudin, Osher and Fatemi algorithm
Examples
---------
First build synthetic noisy data
>>> x, y, z = np.ogrid[0:40, 0:40, 0:40]
>>> mask = (x -22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2
>>> mask = mask.astype(np.float)
>>> mask += 0.2*np.random.randn(*mask.shape)
>>> res = tv_denoise_3d(mask, weight=100)
"""
px = np.zeros_like(im)
py = np.zeros_like(im)
pz = np.zeros_like(im)
gx = np.zeros_like(im)
gy = np.zeros_like(im)
gz = np.zeros_like(im)
d = np.zeros_like(im)
i = 0
while i < n_iter_max:
d = - px - py - pz
d[1:] += px[:-1]
d[:, 1:] += py[:, :-1]
d[:, :, 1:] += pz[:, :, :-1]
out = im + d
E = (d**2).sum()
gx[:-1] = np.diff(out, axis=0)
gy[:, :-1] = np.diff(out, axis=1)
gz[:, :, :-1] = np.diff(out, axis=2)
norm = np.sqrt(gx**2 + gy**2 + gz**2)
E += weight * norm.sum()
norm *= 0.5 / weight
norm += 1.
px -= 1./6.*gx
px /= norm
py -= 1./6.*gy
py /= norm
pz -= 1/6.*gz
pz /= norm
E /= float(im.size)
if i == 0:
E_init = E
E_previous = E
else:
if np.abs(E_previous - E) < eps * E_init:
break
else:
E_previous = E
i += 1
return out
def _tv_denoise_2d(self, im, weight=50, eps=2.e-4, n_iter_max=200):
"""
Perform total-variation denoising
Parameters
----------
im: ndarray
input data to be denoised
weight: float, optional
denoising weight. The greater ``weight``, the more denoising (at
the expense of fidelity to ``input``)
eps: float, optional
relative difference of the value of the cost function that determines
the stop criterion. The algorithm stops when:
(E_(n-1) - E_n) < eps * E_0
n_iter_max: int, optional
maximal number of iterations used for the optimization.
Returns
-------
out: ndarray
denoised array
Notes
-----
The principle of total variation denoising is explained in
http://en.wikipedia.org/wiki/Total_variation_denoising
This code is an implementation of the algorithm of Rudin, Fatemi and Osher
that was proposed by Chambolle in [1]_.
References
----------
.. [1] <NAME>, An algorithm for total variation minimization and
applications, Journal of Mathematical Imaging and Vision,
Springer, 2004, 20, 89-97.
Examples
---------
>>> import scipy
>>> lena = scipy.lena()
>>> import scipy
>>> lena = scipy.lena().astype(np.float)
>>> lena += 0.5 * lena.std()*np.random.randn(*lena.shape)
>>> denoised_lena = tv_denoise(lena, weight=60.0)
"""
px = np.zeros_like(im)
py = np.zeros_like(im)
gx = np.zeros_like(im)
gy = np.zeros_like(im)
d = np.zeros_like(im)
i = 0
while i < n_iter_max:
d = -px -py
d[1:] += px[:-1]
d[:, 1:] += py[:, :-1]
out = im + d
E = (d**2).sum()
gx[:-1] = np.diff(out, axis=0)
gy[:, :-1] = np.diff(out, axis=1)
norm = np.sqrt(gx**2 + gy**2)
E += weight * norm.sum()
norm *= 0.5 / weight
norm += 1
px -= 0.25*gx
px /= norm
py -= 0.25*gy
py /= norm
E /= float(im.size)
if i == 0:
E_init = E
E_previous = E
else:
if np.abs(E_previous - E) < eps * E_init:
break
else:
E_previous = E
i += 1
return out
def tv_denoise(self, im, weight=50, eps=2.e-4, keep_type=False, n_iter_max=200):
"""
Perform total-variation denoising on 2-d and 3-d images
Parameters
----------
im: ndarray (2d or 3d) of ints, uints or floats
input data to be denoised. `im` can be of any numeric type,
but it is cast into an ndarray of floats for the computation
of the denoised image.
weight: float, optional
denoising weight. The greater ``weight``, the more denoising (at
the expense of fidelity to ``input``)
eps: float, optional
relative difference of the value of the cost function that
determines the stop criterion. The algorithm stops when:
(E_(n-1) - E_n) < eps * E_0
keep_type: bool, optional (False)
whether the output has the same dtype as the input array.
keep_type is False by default, and the dtype of the output
is np.float
n_iter_max: int, optional
maximal number of iterations used for the optimization.
Returns
-------
out: ndarray
denoised array
Notes
-----
The principle of total variation denoising is explained in
http://en.wikipedia.org/wiki/Total_variation_denoising
The principle of total variation denoising is to minimize the
total variation of the image, which can be roughly described as
the integral of the norm of the image gradient. Total variation
denoising tends to produce "cartoon-like" images, that is,
piecewise-constant images.
This code is an implementation of the algorithm of Rudin, Fatemi and Osher
that was proposed by Chambolle in [1]_.
References
----------
.. [1] <NAME>, An algorithm for total variation minimization and
applications, Journal of Mathematical Imaging and Vision,
Springer, 2004, 20, 89-97.
Examples
---------
>>> import scipy
>>> # 2D example using lena
>>> lena = scipy.lena()
>>> import scipy
>>> lena = scipy.lena().astype(np.float)
>>> lena += 0.5 * lena.std()*np.random.randn(*lena.shape)
>>> denoised_lena = tv_denoise(lena, weight=60)
>>> # 3D example on synthetic data
>>> x, y, z = np.ogrid[0:40, 0:40, 0:40]
>>> mask = (x -22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2
>>> mask = mask.astype(np.float)
>>> mask += 0.2*np.random.randn(*mask.shape)
>>> res = tv_denoise_3d(mask, weight=100)
"""
im_type = im.dtype
if not im_type.kind == 'f':
im = im.astype(np.float)
if im.ndim == 2:
out = self._tv_denoise_2d(im, weight, eps, n_iter_max)
elif im.ndim == 3:
out = self._tv_denoise_3d(im, weight, eps, n_iter_max)
else:
raise ValueError('only 2-d and 3-d images may be denoised with this function')
if keep_type:
return out.astype(im_type)
else:
return out
| 2.40625 | 2 |
App.py | goph-R/NodeEditor | 0 | 12787658 | <gh_stars>0
import os
from PySide2.QtCore import QObject, QCoreApplication, QSettings
from PySide2.QtWidgets import QApplication
from MainWindow import MainWindow
from Style import Style
class App(QObject):
def __init__(self, args):
super(App, self).__init__()
QCoreApplication.setOrganizationName('Dynart')
QCoreApplication.setApplicationName('NodeEditor')
self._qApp = QApplication(args)
self._qApp.setStyle(Style())
self._mainWindow = MainWindow(self)
self._mainWindow.show()
def run(self):
settings = QSettings()
self._mainWindow.restoreGeometry(settings.value('mainWindowGeometry'))
self._mainWindow.restoreState(settings.value('mainWindowState'))
return self._qApp.exec_()
def exit(self, returnCode=0):
settings = QSettings()
settings.setValue('mainWindowGeometry', self._mainWindow.saveGeometry())
settings.setValue('mainWindowState', self._mainWindow.saveState())
self._qApp.exit(returnCode)
| 2.265625 | 2 |
app/room/games/fibbing_it.py | hmajid2301/banter-bus-core-api | 0 | 12787659 | from app.game_state.game_state_models import (
FibbingItQuestion,
FibbingItState,
GameState,
NextQuestion,
UpdateQuestionRoundState,
)
from app.player.player_models import Player
from app.room.games.abstract_game import AbstractGame
from app.room.games.exceptions import UnexpectedGameStateType
from app.room.room_events_models import GotNextQuestion, GotQuestionFibbingIt
class FibbingIt(AbstractGame):
def got_next_question(self, player: Player, game_state: GameState, next_question: NextQuestion) -> GotNextQuestion:
if not isinstance(game_state.state, FibbingItState):
raise UnexpectedGameStateType("expected `game_state.state` to be of type `FibbingItState`")
is_player_fibber = player.player_id == game_state.state.current_fibber_id
got_next_question = self._get_got_next_question(is_player_fibber, next_question)
return got_next_question
@staticmethod
def _get_got_next_question(is_player_fibber: bool, next_question: NextQuestion) -> GotNextQuestion:
if not isinstance(next_question.next_question, FibbingItQuestion):
raise UnexpectedGameStateType("expected `next_question.next_question` to be of type `FibbingItQuestion`")
question = next_question.next_question.question
if is_player_fibber:
question = next_question.next_question.fibber_question
got_next_question = GotNextQuestion(
question=GotQuestionFibbingIt(
is_fibber=is_player_fibber,
question=question,
answers=next_question.next_question.answers,
),
updated_round=UpdateQuestionRoundState(**next_question.updated_round.dict()),
timer_in_seconds=next_question.timer_in_seconds,
)
return got_next_question
| 2.265625 | 2 |
sem_seg/train-apollo.py | ahmed-anas/thesis-pointnet | 1 | 12787660 |
import os
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import math
import argparse
import math
import h5py
import numpy as np
import tensorflow as tf
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# tf.logging.set_verbosity(tf.logging.ERROR)
import socket
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(ROOT_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import provider
import tf_util
from model import *
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=4096, help='Point number [default: 4096]')
parser.add_argument('--max_epoch', type=int, default=50, help='Epoch to run [default: 50]')
parser.add_argument('--batch_size', type=int, default=12, help='Batch Size during training [default: 12]')
parser.add_argument('--learning_rate', type=float, default=0.000001, help='Initial learning rate [default: 0.000001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='momentum', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=300000, help='Decay step for lr decay [default: 300000]')
parser.add_argument('--decay_rate', type=float, default=0.5, help='Decay rate for lr decay [default: 0.5]')
parser.add_argument('--test_recordings', type=str, default='11', help='Which recording numbers to use for test, i.e "1,2", "1", "3", "3,4,5" [default: 11]')
parser.add_argument('--dir_path_h5', type=str, default='data/apollo_sem_seg_hdf5_data', help='directory containing the h5 files')
parser.add_argument('--use_saved_model', type=str, default='no', help='yes or no')
FLAGS = parser.parse_args()
LOAD_FULL_DATA = False
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR):
os.mkdir(LOG_DIR)
USE_SAVED_MODEL = False
if FLAGS.use_saved_model == 'yes':
USE_SAVED_MODEL = True
print('using saved model')
elif FLAGS.use_saved_model != 'no':
raise ValueError('use_saved_model param must be eitehr yes or no')
os.system('cp model.py %s' % (LOG_DIR)) # bkp of model def
os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
MAX_NUM_POINT = 4096
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
#BN_DECAY_DECAY_STEP = float(DECAY_STEP * 2)
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
HOSTNAME = socket.gethostname()
# DIR_PATH_H5 = os.path.join(ROOT_DIR, 'data/apollo_sem_seg_hdf5_data_test')
DIR_PATH_H5 = FLAGS.dir_path_h5
if not os.path.exists(DIR_PATH_H5):
raise ValueError('the given h5 directory is invalid')
H5_FILES = [os.path.join(DIR_PATH_H5, file_h5) for file_h5 in os.listdir(DIR_PATH_H5) if file_h5[-2:] == 'h5']
#ALL_FILES = provider.getDataFiles('data/apollo_sem_seg_hdf5_data')
room_filelist = [line.rstrip() for line in open(os.path.join(DIR_PATH_H5, 'room_filelist.txt'))]
classMappings = [line.rstrip() for line in open(os.path.join(DIR_PATH_H5, 'class_mappings.txt'))]
NUM_CLASSES = len(classMappings)
BATCH_SIZE_H5 = provider.loadDataFile(H5_FILES[0])[0].shape[0]
# Load ALL data
# if LOAD_FULL_DATA:
# data_batch_list = []
# label_batch_list = []
# for i,h5_filename in enumerate(H5_FILES):
# if i%10 == 0:
# print("loading h5 file: " , i, h5_filename)
# data_batch, label_batch = provider.loadDataFile(h5_filename)
# data_batch_list.append(data_batch)
# label_batch_list.append(label_batch)
# if LOAD_FULL_DATA:
# print('---all loaded---')
# data_batches = np.concatenate(data_batch_list, 0)
# data_batch_list = None
# label_batches = np.concatenate(label_batch_list, 0)
# label_batch_list = None
# print(data_batches.shape)
# print(label_batches.shape)
data_for_training = np.empty(len(room_filelist), dtype=bool)
test_recordings = [str(int(recording_number)).zfill(3) for recording_number in FLAGS.test_recordings.split(',')]
#test_recordings = 'Area_'+str(FLAGS.test_area)
# if LOAD_FULL_DATA:
# train_idxs = []
# test_idxs = []
total_training_data = 0
total_testing_data = 0
for i,room_name in enumerate(room_filelist):
#remove this
if i%4==0:
total_testing_data += 1
data_for_training[i] = False
#if room_name[6:9] in test_recordings:
# if LOAD_FULL_DATA:
# test_idxs.append(i)
else:
total_training_data += 1
data_for_training[i] = True
# if LOAD_FULL_DATA:
# train_idxs.append(i)
# if LOAD_FULL_DATA:
# train_data = data_batches[train_idxs,...]
# train_label = label_batches[train_idxs]
# test_data = data_batches[test_idxs,...]
# test_label = label_batches[test_idxs]
# data_batches = None
# label_batches = None
# print(train_data.shape, train_label.shape)
# print(test_data.shape, test_label.shape)
current_train_idx = 0
current_test_idx = 0
last_loaded_file_index = None
last_loaded_file_data = None
last_loaded_file_label = None
def reset_train_data():
global current_train_idx
current_train_idx = 0
def reset_test_data():
global current_test_idx
current_test_idx = 0
def can_get_test_data():
global current_test_idx
return current_test_idx < data_for_training.shape[0]
def can_get_train_data():
global current_train_idx
global last_loaded_file_index
global last_loaded_file_data
global last_loaded_file_label
return current_train_idx < data_for_training.shape[0]
# h5_fileindex = int(math.floor( current_train_idx / float(BATCH_SIZE_H5) ))
# if h5_fileindex + 1 < len(H5_FILES):
# return True
# if last_loaded_file_index != h5_fileindex:
# h5_filename = H5_FILES[h5_fileindex]
# last_loaded_file_data, last_loaded_file_label = provider.loadDataFile(h5_filename)
# last_loaded_file_index = h5_fileindex
# start_idx_batch = current_train_idx - (h5_fileindex * BATCH_SIZE_H5)
# h5_remaining_batch_size = BATCH_SIZE_H5 - start_idx_batch
# return h5_remaining_batch_size > 0
def get_train_or_test_data(amount, for_training):
global current_train_idx
global current_test_idx
global last_loaded_file_index
global last_loaded_file_data
global last_loaded_file_label
local_data_batch_list = []
local_label_batch_list = []
total_retrieved = 0
if for_training:
index_for_run = current_train_idx
else:
index_for_run = current_test_idx
while total_retrieved < amount and index_for_run < data_for_training.shape[0]:
#total_retrieved += 1
h5_fileindex = int(math.floor( index_for_run / float(BATCH_SIZE_H5) ))
if last_loaded_file_index != h5_fileindex:
h5_filename = H5_FILES[h5_fileindex]
last_loaded_file_data, last_loaded_file_label = provider.loadDataFile(h5_filename)
last_loaded_file_index = h5_fileindex
amount_to_retrieve = amount - total_retrieved
start_idx_batch = index_for_run - (h5_fileindex * BATCH_SIZE_H5)
h5_remaining_batch_size = BATCH_SIZE_H5 - start_idx_batch
total_remaining_size = data_for_training.shape[0] - start_idx_batch
amount_to_fetch_from_batch = min(amount_to_retrieve, h5_remaining_batch_size, total_remaining_size)
start_idx_total = index_for_run
end_idx_total = start_idx_total + amount_to_fetch_from_batch
end_idx_batch = start_idx_batch + amount_to_fetch_from_batch
if for_training:
data_batch = (last_loaded_file_data[start_idx_batch:end_idx_batch]) [data_for_training[start_idx_total:end_idx_total],:,:]
label_batch = (last_loaded_file_label[start_idx_batch:end_idx_batch]) [data_for_training[start_idx_total:end_idx_total],:]
else:
arr = data_for_training[start_idx_total:end_idx_total] == False
data_batch = (last_loaded_file_data[start_idx_batch:end_idx_batch]) [arr,:,:]
label_batch = (last_loaded_file_label[start_idx_batch:end_idx_batch]) [arr,:]
total_retrieved += data_batch.shape[0]
index_for_run += amount_to_fetch_from_batch
local_data_batch_list.append(data_batch)
local_label_batch_list.append(label_batch)
local_data_batches = np.concatenate(local_data_batch_list, 0)
local_label_batches = np.concatenate(local_label_batch_list, 0)
if for_training:
current_train_idx = index_for_run
else:
current_test_idx = index_for_run
return local_data_batches, local_label_batches
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def train(use_saved_model ):
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl = placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
batch = tf.Variable(0)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
# Get model and loss
pred = get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay, num_classes=NUM_CLASSES)
loss = get_loss(pred, labels_pl)
tf.summary.scalar('loss', loss)
correct = tf.equal(tf.argmax(pred, 2), tf.to_int64(labels_pl))
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE*NUM_POINT)
tf.summary.scalar('accuracy', accuracy)
# Get training operator
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=batch)
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Init variables
init = tf.global_variables_initializer()
sess.run(init, {is_training_pl:True})
if use_saved_model:
saver.restore(sess, os.path.join(LOG_DIR,'model.ckpt'))
# Add summary writers
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'loss': loss,
'train_op': train_op,
'merged': merged,
'step': batch}
if use_saved_model:
eval_one_epoch(sess, ops, test_writer)
for epoch in range(MAX_EPOCH):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
train_one_epoch(sess, ops, train_writer)
save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
log_string("Model saved in file: %s" % save_path)
eval_one_epoch(sess, ops, test_writer)
# # Save the variables to disk.
# if epoch % 1 == 0:
def train_one_epoch(sess, ops, train_writer):
reset_train_data()
""" ops: dict mapping from string to tf ops """
is_training = True
log_string('----')
#checking to confirm get_train_data is functioning correctly
# if LOAD_FULL_DATA:
# current_data = train_data
# current_label = train_label
# file_size = current_data.shape[0]
# num_batches = file_size // BATCH_SIZE
# num_batches = total_training_data / BATCH_SIZE
total_correct = 0
total_seen = 0
loss_sum = 0
batch_idx = -1
# for batch_idx in range(num_batches):
while can_get_train_data():
batch_idx += 1
if batch_idx % 10 == 0:
print('Current batch: %d'%(batch_idx))
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
data_for_loop, label_for_loop = get_train_or_test_data(BATCH_SIZE, True)
#this is in case the last batch has insufficient blocks, so we simply bail
if not can_get_train_data():
break;
#checking to confirm get_train_data is functioning correctly
# check_data_for_loop = current_data[start_idx:end_idx, :, :]
# check_label_for_loop = current_label[start_idx:end_idx]
# if sum(sum(sum(data_for_loop == check_data_for_loop))) != 442368:
# z = 32131
# log_string('check data for loop not match what it should be')
# raise ValueError('check data for loop not match what it should be')
#remove below comments
feed_dict = {ops['pointclouds_pl']: data_for_loop,
ops['labels_pl']: label_for_loop,
ops['is_training_pl']: is_training,}
summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred']],
feed_dict=feed_dict)
train_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 2)
correct = np.sum(pred_val == label_for_loop)
total_correct += correct
total_seen += (BATCH_SIZE*NUM_POINT)
loss_sum += loss_val
#remove below comments
# log_string('mean loss: %f' % (loss_sum / float(num_batches)))
# log_string('accuracy: %f' % (total_correct / float(total_seen)))
def eval_one_epoch(sess, ops, test_writer):
reset_test_data()
""" ops: dict mapping from string to tf ops """
is_training = False
total_correct = 0
total_seen = 0
loss_sum = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
log_string('----')
# current_data = test_data[:,0:NUM_POINT,:]
# current_label = np.squeeze(test_label)
# file_size = current_data.shape[0]
# num_batches = file_size // BATCH_SIZE
batch_idx = -1
# for batch_idx in range(num_batches):
while can_get_test_data():
batch_idx += 1
data_for_loop, label_for_loop = get_train_or_test_data(BATCH_SIZE, False)
#this is in case the last batch has insufficient blocks
if not can_get_test_data():
break
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
feed_dict = {ops['pointclouds_pl']: data_for_loop,
ops['labels_pl']: label_for_loop,
ops['is_training_pl']: is_training}
summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['loss'], ops['pred']],
feed_dict=feed_dict)
test_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 2)
correct = np.sum(pred_val == label_for_loop)
total_correct += correct
total_seen += (BATCH_SIZE*NUM_POINT)
loss_sum += (loss_val*BATCH_SIZE)
for i in range(start_idx, end_idx):
for j in range(NUM_POINT):
try:
l = label_for_loop[i - start_idx, j - start_idx]
total_seen_class[l] += 1
total_correct_class[l] += (pred_val[i-start_idx, j] == l)
except:
l = label_for_loop[i - start_idx, j - start_idx]
total_seen_class[l] += 1
total_correct_class[l] += (pred_val[i-start_idx, j] == l)
log_string('eval mean loss: %f' % (loss_sum / float(total_seen/NUM_POINT)))
log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
print('total correct class')
print(total_correct_class)
print('total seen class')
print(total_seen_class)
if __name__ == "__main__":
train(USE_SAVED_MODEL)
LOG_FOUT.close()
| 1.96875 | 2 |
utils/img_visual.py | Memoristor/LightWeight-HRRSI | 2 | 12787661 | <reponame>Memoristor/LightWeight-HRRSI<filename>utils/img_visual.py<gh_stars>1-10
# coding=utf-8
# import matplotlib
# matplotlib.use('Agg')
from matplotlib import pyplot as plt, patches
from PIL import Image
import numpy as np
import cv2
__all__ = ['VisualByPlt', 'VisualByCV2']
class VisualByPlt(object):
"""
Use matplotlib.pyplot to draw boxes, dots or mask on a image.
Allow users to define the labels and colors of boxes and the radius and colors of dots.
Params:
image_data: 2-D or 3-D numpy array. The original background image to be drawn
kwargs: dict. The key params, and the following params are supported:
>> title: str. Title of the whole image.
>> title_size: int. The size of the title. Default value is 20
>> boxes: 2-D numpy array. 2-D coordinates preserved in the format of (y1, x1, y2, x2) for each row.
>> boxes_label: str or 1-D list. The labels of each boxes.
>> boxes_color: str or 1-D list. The colors of each boxes.
>> dots: 2-D numpy array. A 2-D numpy array in the form of (y1, x1,..., yn, xn) for each row.
>> dots_radius. int or 1-D list. The radius of each dots.
>> dots_color. str or 1-D list. The color of each dots.
>> mask. 2-D numpy array. The mask of segmentation objects or classes, note that the mask should
be the P mode of PIL.Image. The size of mask should be the same with image_data
Return:
plt(matplotlib.pyplot)
"""
def __init__(self, image_data, save_path=None, **kwargs):
self.image_data = image_data
self.save_path = save_path
self.kwargs = kwargs
self.draw()
def draw(self):
# plt.style.use('seaborn-white')
plt.figure()
# 1, Set title
if 'title' in self.kwargs.keys():
# plt.title(self.kwargs['title'],
# {'family': 'Times New Roman', 'weight': 'normal',
# 'size': self.kwargs['title_size'] if 'title_size' in self.kwargs.keys() else 20})
plt.title(self.kwargs['title'], {
'weight': 'normal',
'size': self.kwargs['title_size'] if 'title_size' in self.kwargs.keys() else 20
})
# 2, Draw boxes
if 'boxes' in self.kwargs.keys():
# 2.1 Init boxes' parameters.
boxes = np.array(self.kwargs['boxes'], ndmin=2, dtype=np.int)
boxes_color = np.tile('b', [boxes.shape[0]])
if 'boxes_color' in self.kwargs.keys():
boxes_color = self.kwargs['boxes_color']
if isinstance(boxes_color, str):
boxes_color = np.tile(boxes_color, [boxes.shape[0]])
boxes_label = np.tile('', [boxes.shape[0]])
if 'boxes_label' in self.kwargs.keys():
boxes_label = self.kwargs['boxes_label']
if isinstance(boxes_label, str):
boxes_label = np.tile(boxes_label, [boxes.shape[0]])
# 2.2 Draw every boxes.
for i in range(boxes.shape[0]):
y1, x1, y2, x2 = boxes[i]
rec = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, facecolor="none", edgecolor=boxes_color[i])
plt.gca().add_patch(rec)
if boxes_label[i]:
plt.text(x1 + 5, y1 - 8, boxes_label[i], style='italic',
bbox={'facecolor': 'grey', 'alpha': 0.5, 'pad': 5})
# 3, Draw dots
if 'dots' in self.kwargs.keys():
# 3.1 Init dots' parameters
dots = np.array(self.kwargs['dots'], ndmin=2, dtype=np.int)
dots_color = np.tile('r', [dots.shape[0]])
if 'dots_color' in self.kwargs.keys():
dots_color = self.kwargs['dots_color']
if isinstance(dots_color, str):
dots_color = np.tile(dots_color, [dots.shape[0]])
dots_radius = np.tile(2, [dots.shape[0]])
if 'dots_radius' in self.kwargs.keys():
dots_radius = self.kwargs['dots_radius']
if isinstance(dots_radius, int):
dots_radius = np.tile(dots_radius, [dots.shape[0]])
# 3.2 Draw each dots.
for i in range(dots.shape[0]):
for j in range(dots.shape[1] // 2):
circle = patches.CirclePolygon((dots[i, j * 2 + 1], dots[i, j * 2]),
color=dots_color[i], radius=dots_radius[i])
plt.gca().add_patch(circle)
# 4, Draw mask
if 'mask' in self.kwargs.keys():
img = Image.fromarray(self.image_data)
msk = Image.fromarray(self.kwargs['mask'])
bkg = Image.new('L', img.size, 128)
self.image_data = np.array(Image.composite(msk, img, bkg))
# Show image figure.
plt.imshow(self.image_data)
plt.tick_params(labelsize=15)
# Save image
if isinstance(self.save_path, str):
plt.savefig(self.save_path)
def show(self):
plt.show()
class VisualByCV2(object):
"""
Use CV2 to draw boxes, dots, polygons or mask on a image.
Allow users to define the labels and colors of boxes and the radius and colors of dots.
Params:
image_data: 2-D or 3-D numpy array. The original background image to be drawn
kwargs: dict. The key params, and the following params are supported:
>> boxes: 2-D numpy array. 2-D coordinates preserved in the format of (y1, x1, y2, x2) for each row.
>> boxes_label: str or 1-D list. The labels of each boxes.
>> boxes_color: str or 1-D list. The colors of each boxes.
>> dots: 2-D numpy array. A 2-D numpy array in the form of (y1, x1,..., yn, xn) for each row.
>> dots_radius. int or 1-D list. The radius of each dots.
>> dots_color. str or 1-D list. The color of each dots.
>> mask. 2-D numpy array. The mask of segmentation objects or classes, note that the mask should
be the P mode of PIL.Image. The size of mask should be the same with image_data
Return:
None
"""
def __init__(self, image_data, save_path=None, **kwargs):
self.image_data = image_data
self.save_path = save_path
self.kwargs = kwargs
self.draw()
def draw(self):
pass
def show(self):
pass
| 2.9375 | 3 |
scripts/mut_floydwarshall.py | dew-uff/versioned-prov | 1 | 12787662 | from datetime import datetime
from pprint import pprint
import extensible_provn.view.mutable_prov
import annotations as prov
HIDE = prov.HIDE
SPECIFIC = prov.SPECIFIC
prov.reset_prov("../generated/mutable_prov/")
prov.STATS_VIEW = 1
def time():
return datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f")
def cond(ents):
return ents
# Line 1
m = 10000 # max value
with prov.desc("L1 - assign", line=1) as line:
e_n10000 = prov.entity("10000", None, prov.SCRIPT + "literal", "10000", line, attrs=HIDE)
v_10000 = prov.value("v10000", "10000", attrs=SPECIFIC)
prov.defined(e_n10000, v_10000, time(), attrs=SPECIFIC)
e_m = prov.entity("m", None, prov.SCRIPT + "name", "m", line, attrs=HIDE)
prov.activity("assign", [(e_m, e_n10000)], attrs=HIDE)
prov.accessed(e_m, v_10000, time(), attrs=SPECIFIC)
# Line 2
result = dist = [
[0, 1, 4],
[m, 0, 2],
[2, m, 0]
]
with prov.desc("L2 - list definition / assign", line=2) as line:
with prov.desc("L2 - list definition"):
e_n0 = prov.entity("0", None, prov.SCRIPT + "literal", "0", line + 1, attrs=HIDE)
v_0 = prov.value("v0", "0", attrs=SPECIFIC)
prov.defined(e_n0, v_0, time(), attrs=SPECIFIC)
e_n1 = prov.entity("1", None, prov.SCRIPT + "literal", "1", line + 1, attrs=HIDE)
v_1 = prov.value("v1", "1", attrs=SPECIFIC)
prov.defined(e_n1, v_1, time(), attrs=SPECIFIC)
e_n4 = prov.entity("4", None, prov.SCRIPT + "literal", "4", line + 1, attrs=HIDE)
v_4 = prov.value("v4", "4", attrs=SPECIFIC)
prov.defined(e_n4, v_4, time(), attrs=SPECIFIC)
e_n2 = prov.entity("2", None, prov.SCRIPT + "literal", "2", line + 2, attrs=HIDE)
v_2 = prov.value("v2", "2", attrs=SPECIFIC)
prov.defined(e_n2, v_2, time(), attrs=SPECIFIC)
prov_dist = [
[e_n0, e_n1, e_n4],
[e_m, e_n0, e_n2],
[e_n2, e_m, e_n0]
]
prov_label = [
["0", "1", "4"],
["m", "0", "2"],
["2", "m", "0"]
]
e_list = prov.entity("matrix", None, prov.SCRIPT + "list", prov.calc_label(prov_label), line)
rows = []
for i, row in enumerate(prov_dist):
v_row = prov.value("row{}".format(i), repr(dist[i]), attrs=SPECIFIC)
prov.derivedByInsertion(
e_list, v_row,
[(str(j), prov.VALUES[v]) for j, v in enumerate(row)],
time(), attrs=SPECIFIC
)
rows.append((str(i), v_row))
ti = time()
v_list = prov.value("vmatrix", repr(dist), attrs=SPECIFIC)
prov.derivedByInsertion(
e_list, v_list, rows, ti, attrs=SPECIFIC
)
prov.defined(e_list, v_list, ti, attrs=SPECIFIC)
with prov.desc("L2 - assign"):
e_dist = prov.entity("dist", None, prov.SCRIPT + "name", "dist", line)
prov.accessed(e_dist, v_list, time(), attrs=SPECIFIC)
prov.activity("assign", [(e_dist, e_list)], attrs=HIDE)
e_result = prov.entity("result", None, prov.SCRIPT + "name", "result", line)
prov.accessed(e_result, v_list, time(), attrs=SPECIFIC)
prov.activity("assign", [(e_result, e_list)], attrs=HIDE)
# Line 6
nodes = len(dist)
with prov.desc("L6 - func call / assign", line=6) as line:
e_ret = prov.entity("len_dist", None, prov.SCRIPT + "eval", "len(dist)", line)
v_3 = prov.value("v3", "3", attrs=SPECIFIC)
prov.defined(e_ret, v_3, time(), attrs=SPECIFIC)
prov.activity("call", [], [e_dist], [e_ret], label="len", attrs=HIDE)
e_nodes = prov.entity("nodes", None, prov.SCRIPT + "name", "nodes", line)
prov.accessed(e_nodes, v_3, time(), attrs=SPECIFIC)
prov.activity("assign", [(e_nodes, e_ret)], attrs=HIDE)
# Line 7
indexes = range(nodes)
with prov.desc("L7 - func call / list assign", line=7) as line:
e_ret = prov.entity("range_nodes", None, prov.SCRIPT + "eval", "range(nodes)", line)
vs = [(str(i), prov.value("v{}".format(x), repr(x), attrs=SPECIFIC)) for i, x in enumerate(indexes)]
v_range = prov.value("v_range", repr(list(indexes)), attrs=SPECIFIC)
ti = time()
prov.derivedByInsertion(
e_ret, v_range, vs, ti, attrs=SPECIFIC
)
prov.defined(e_ret, v_range, ti, attrs=SPECIFIC)
prov.activity("call", [], [e_nodes], [e_ret], label="range", attrs=HIDE)
e_indexes = prov.entity("indexes", None, prov.SCRIPT + "name", "indexes", line)
prov.accessed(e_indexes, v_range, time(), attrs=SPECIFIC)
prov.activity("assign", [(e_indexes, e_ret)], attrs=HIDE)
# Line 8
for k in indexes:
with prov.desc("L8 - loop access", line=8) as line:
e_k = prov.entity("k", None, prov.SCRIPT + "name", "k", line, show1=True, attrs=HIDE)
v_k = prov.DICTS[v_range][repr(k)]
prov.accessedPart(e_k, v_range, repr(k), v_k, time(), attrs=SPECIFIC)
prov.activity("access", used=[e_indexes], generated=[e_k], attrs=HIDE)
# Line 9
distk = dist[k]
with prov.desc("L9 - access / assign", line=9) as line:
e_dist_ak = prov.entity("dist@k", None, prov.SCRIPT + "access", "dist[k]", line, show1=True)
v_dist_ak = prov.DICTS[v_list][repr(k)]
prov.accessedPart(e_dist_ak, v_list, repr(k), v_dist_ak, time(), attrs=SPECIFIC)
prov.activity("access", used=[e_dist, e_k], generated=[e_dist_ak], attrs=HIDE)
e_distk = prov.entity("distk", None, prov.SCRIPT + "name", "distk", line, show1=True)
prov.accessed(e_distk, v_dist_ak, time(), attrs=SPECIFIC)
prov.activity("assign", [(e_distk, e_dist_ak)], attrs=HIDE)
# Line 10
for i in indexes:
with prov.desc("L10 - loop access", line=10) as line:
e_i = prov.entity("i", None, prov.SCRIPT + "name", "i", line, show1=True, attrs=HIDE)
v_i = prov.DICTS[v_range][repr(i)]
prov.accessedPart(e_i, v_range, repr(i), v_i, time(), attrs=SPECIFIC)
prov.activity("access", used=[e_indexes], generated=[e_i], attrs=HIDE)
# Line 11
with prov.desc("L11 - condition", line=11) as line:
cond([e_i, e_k])
if i == k: continue
# Line 12
disti = dist[i]
with prov.desc("L12 - access / assign", line=12) as line:
e_dist_ai = prov.entity("dist@i", None, prov.SCRIPT + "access", "dist[i]", line, show1=True)
v_dist_ai = prov.DICTS[v_list][repr(i)]
prov.accessedPart(e_dist_ai, v_list, repr(i), v_dist_ai, time(), attrs=SPECIFIC)
prov.activity("access", used=[e_dist, e_i], generated=[e_dist_ai], attrs=HIDE)
e_disti = prov.entity("disti", None, prov.SCRIPT + "name", "disti", line, show1=True)
prov.accessed(e_disti, v_dist_ai, time(), attrs=SPECIFIC)
prov.activity("assign", [(e_disti, e_dist_ai)], attrs=HIDE)
# Line 13
for j in indexes:
with prov.desc("L13 - loop access", line=13) as line:
e_j = prov.entity("j", None, prov.SCRIPT + "name", "j", line, show1=True, attrs=HIDE)
v_j = prov.DICTS[v_range][repr(j)]
prov.accessedPart(e_j, v_range, repr(j), v_j, time(), attrs=SPECIFIC)
prov.activity("access", used=[e_indexes], generated=[e_j], attrs=HIDE)
# Line 14
with prov.desc("L14 - condition", line=14) as line:
cond([e_j, e_k, e_i])
if j == k or j == i: continue
# Line 15
ikj = disti[k] + distk[j]
with prov.desc("L15 - access / access / operation / assign", line=15) as line:
e_disti_ak = prov.entity("disti@k", None, prov.SCRIPT + "access", "disti[k]", line, show1=True, attrs=HIDE)
v_disti_ak = prov.DICTS[v_dist_ai][repr(k)]
prov.accessedPart(e_disti_ak, v_dist_ai, repr(k), v_disti_ak, time(), attrs=SPECIFIC)
prov.activity("access", used=[e_disti, e_k], generated=[e_disti_ak], attrs=HIDE)
e_distk_aj = prov.entity("distk@j", None, prov.SCRIPT + "access", "distk[j]", line, show1=True, attrs=HIDE)
v_distk_aj = prov.DICTS[v_dist_ak][repr(j)]
prov.accessedPart(e_distk_aj, v_dist_ak, repr(j), v_distk_aj, time(), attrs=SPECIFIC)
prov.activity("access", used=[e_distk, e_j], generated=[e_distk_aj], attrs=HIDE)
e_sum = prov.entity("sum", None, prov.SCRIPT + "operation", "disti[k] + distk[j]", line, show1=True, attrs=HIDE)
vikj = prov.value("vsum", repr(ikj), attrs=SPECIFIC)
prov.defined(e_sum, vikj, time(), attrs=SPECIFIC)
prov.activity("+", [(e_sum, e_disti_ak, e_distk_aj)], attrs=HIDE)
e_ikj = prov.entity("ikj", None, prov.SCRIPT + "name", "ikj", line, show1=True, attrs=HIDE)
prov.accessed(e_ikj, vikj, time(), attrs=SPECIFIC)
prov.activity("assign", [(e_ikj, e_sum)], attrs=HIDE)
# Line 16
with prov.desc("L16 - access", line=16) as line:
e_disti_aj = prov.entity("disti@j", None, prov.SCRIPT + "access", "disti[j]", line, show1=True, attrs=HIDE)
v_disti_aj = prov.DICTS[v_dist_ai][repr(j)]
prov.accessedPart(e_disti_aj, v_dist_ai, repr(j), v_disti_aj, time(), attrs=SPECIFIC)
prov.activity("access", used=[e_disti, e_j], generated=[e_disti_aj], attrs=HIDE)
ucond = cond([e_disti_aj, e_ikj])
if disti[j] > ikj:
# Line 17
disti[j] = ikj
with prov.desc("L17 - part assign with propagation", line=17) as line:
used = [e_j]
used += ucond # from if
generated = []
e_disti_aj = prov.entity("disti@j", None, prov.SCRIPT + "access", "disti[j]", line, show1=True)
ti = time()
prov.derivedByInsertion(
e_disti_aj, v_dist_ai,
[(str(j), vikj)],
ti, attrs=SPECIFIC
)
prov.accessed(e_disti_aj, vikj, ti, attrs=SPECIFIC)
prov.activity("assign", [(e_disti_aj, e_ikj)], used=[e_disti], shared=True)
# Line 18
print(result[0][2])
with prov.desc("L18 - access / access / call", line=18) as line:
e_result_a0 = prov.entity("result@0", None, prov.SCRIPT + "access", "result[0]", line, attrs=HIDE)
v_result_a0 = prov.DICTS[v_list]["0"]
prov.accessedPart(e_result_a0, v_list, "0", v_result_a0, time(), attrs=SPECIFIC)
prov.activity("access", used=[e_result, e_n0], generated=[e_result_a0], attrs=HIDE)
e_result_a02 = prov.entity("result@0@2", None, prov.SCRIPT + "access", "result[0][2]", line, attrs=HIDE)
v_result_a02 = prov.DICTS[v_result_a0]["2"]
prov.accessedPart(e_result_a02, v_result_a0, "2", v_result_a02, time(), attrs=SPECIFIC)
prov.activity("access", used=[e_result_a0, e_n2], generated=[e_result_a02], attrs=HIDE)
prov.activity("print", [], [e_result_a02], attrs=HIDE)
prov.finish(show_count=False)
| 2.28125 | 2 |
qlcp21a/JZ_plotting.py | RapidLzj/QLCP21A | 0 | 12787663 | # -*- coding: utf-8 -*-
"""
201901, Dr. <NAME>, Beijing & Xinglong, NAOC
202101-? Dr. <NAME> & Dr./Prof. <NAME>
Light_Curve_Pipeline
v3 (2021A) Upgrade from former version, remove unused code
"""
import numpy as np
import matplotlib
#matplotlib.use('Agg')
from matplotlib import pyplot as plt
from .JZ_utils import meanclip
def plot_im_star(ini, img, x, y, mag, err, title, filename):
"""
Plot observed image and overplot stars
:param ini:
:param img:
:param x:
:param y:
:param mag:
:param err:
:param title:
:param filename: file to save
:return:
"""
ny, nx = img.shape
fig = plt.figure(figsize=(nx/50.0, ny/50.0))
ax = fig.add_subplot(111)
d_m, d_s = meanclip(img)
ax.imshow(img, cmap="gray",
vmin=d_m - d_s * ini["plot_img_lowsigma"],
vmax=d_m + d_s * ini["plot_img_highsigma"])
ax.set_xlim(0, nx)
ax.set_ylim(0, ny)
ix_g = np.where(err < 0.1)
ix_b = np.where(err >= 0.1)
ms = (25.0 - mag) * 5
ms[mag > 25] = 1.0
# ms[mag < 10] = 15.0
ax.scatter(x[ix_g], y[ix_g], marker="o", s=ms[ix_g], c="", edgecolors="r")
ax.scatter(x[ix_b], y[ix_b], marker="o", s=ms[ix_b], c="", edgecolors="c")
ax.set_title(title)
fig.savefig(filename, bbox_inches='tight')
plt.close()
def plot_magerr(ini, mag, err, title, filename):
"""
Plot mag-err figure
:param ini:
:param mag:
:param err:
:param title:
:param filename: file to save
:return:
"""
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111)
ax.plot(mag, err, '.')
ax.set_xlim(10, 25)
ax.set_ylim(-0.001, 1.0)
ax.set_xlabel("Mag (Inst)")
ax.set_ylabel("Error")
ax.set_title(title)
fig.savefig(filename)
plt.close()
def plot_im_target(ini, img,
target_x, target_y,
ref_x, ref_y,
chk_x, chk_y,
title, filename,
target_marker=("s", "r"),
ref_marker=("s", "y"),
chk_marker=("o", "y"),
noplot=False,
):
"""
Plot image and mark target, referenece, and check stars
:param ini:
:param img:
:param target_x:
:param target_y:
:param ref_x:
:param ref_y:
:param chk_x:
:param chk_y:
:param title:
:param filename:
:param target_marker: 2-tuple for marker, marker type and border color
:param ref_marker:
:param chk_marker:
:param noplot:
:return:
"""
ny, nx = img.shape
fig = plt.figure(figsize=(nx / 100.0, ny / 100.0))
ax = fig.add_subplot(111)
fsize = nx / 100 # font size
msize = fsize * 5 # marker size
d_m, d_s = meanclip(img)
ax.imshow(img, cmap="gray",
vmin=d_m - d_s * ini["plot_img_lowsigma"],
vmax=d_m + d_s * ini["plot_img_highsigma"])
ax.set_xlim(0, nx)
ax.set_ylim(0, ny)
if target_x is not None:
ax.scatter(target_x, target_y, marker=target_marker[0], s=msize, c=None, edgecolors=target_marker[1])
if np.isscalar(target_x): target_x = (target_x, )
if np.isscalar(target_y): target_y = (target_y, )
for i in range(len(target_x)):
ax.text(target_x[i]+fsize/2, target_y[i]+fsize/2, "T-{}".format(i),
color=target_marker[1], fontsize=fsize)
if ref_x is not None:
ax.scatter(ref_x, ref_y, marker=ref_marker[0], s=msize, c=None, edgecolors=ref_marker[1])
if np.isscalar(ref_x): ref_x = (ref_x, )
if np.isscalar(ref_y): ref_y = (ref_y, )
for i in range(len(ref_x)):
ax.text(ref_x[i]+fsize/2, ref_y[i]+fsize/2, "R-{}".format(i),
color=ref_marker[1], fontsize=fsize)
if chk_x is not None:
ax.scatter(chk_x, chk_y, marker=chk_marker[0], s=msize, c=None, edgecolors=chk_marker[1])
if np.isscalar(chk_x): chk_x = (chk_x, )
if np.isscalar(chk_y): chk_y = (chk_y, )
for i in range(len(chk_x)):
ax.text(chk_x[i]+fsize/2, chk_y[i]+fsize/2, "C-{}".format(i),
color=chk_marker[1], fontsize=fsize)
ax.set_title(title)
fig.savefig(filename, bbox_inches='tight')
if noplot:
plt.close(fig)
def plot_im_obj(ini, img,
obj_x, obj_y,
title, filename,
target_marker=("s", "r"),
noplot=False,
):
"""
Plot only objects, without using ref or check
:param ini:
:param img:
:param obj_x:
:param obj_y:
:param title:
:param filename:
:param target_marker:
:param noplot:
:return:
"""
plot_im_target(ini, img, obj_x, obj_y,
None, None, None, None,
title, filename,
target_marker,
noplot=noplot,
)
| 1.984375 | 2 |
egg/zoo/visA/callbacks.py | chan0park/EGG | 0 | 12787664 | <reponame>chan0park/EGG<filename>egg/zoo/visA/callbacks.py<gh_stars>0
import math
import warnings
import json
import logging
from typing import Dict, Any, Callable, Optional, List, Union, cast
import torch
from torch.utils.data import DataLoader
import numpy as np
from scipy import stats
from egg.core import Callback, move_to, Trainer
def cosine_dist(vecs: torch.Tensor, reduce_dims=-1) -> torch.Tensor:
vecs /= vecs.norm(dim=-1, keepdim=True)
return -(vecs.unsqueeze(0) * vecs.unsqueeze(1)).sum(reduce_dims)
def levenshtein_dist(msgs: torch.Tensor) -> torch.Tensor:
return (msgs.unsqueeze(0) != msgs.unsqueeze(1)).sum(-1)
def get_upper_triangle(x: np.ndarray) -> np.ndarray:
return x[np.triu_indices(x.shape[0])].reshape(-1)
def calculate_toposim(
inputs: torch.Tensor,
messages: torch.Tensor,
input_dist: Callable,
message_dist: Callable,
) -> float:
in_dists = get_upper_triangle(input_dist(inputs).cpu().numpy())
msg_dists = get_upper_triangle(message_dist(messages).cpu().numpy())
# spearmanr complains about dividing by a 0 stddev sometimes; just let it nan
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
corr = stats.spearmanr(in_dists, msg_dists)[0]
if math.isnan(corr):
corr = 0
return corr
class ToposimCallback(Callback):
trainer: "Trainer"
def __init__(
self, valid_dl: DataLoader, train_dl: DataLoader, sender, use_embeddings=True
) -> None:
super(ToposimCallback, self).__init__()
self.use_embeddings = use_embeddings
self.valid_dl = valid_dl
self.train_dl = train_dl
self.sender = sender
def on_test_end(self, *args, **kwargs) -> None:
return self._caluclate_toposim(self.valid_dl, *args, **kwargs)
def on_epoch_end(self, *args, **kwargs) -> None:
return self._caluclate_toposim(self.train_dl, *args, **kwargs)
def _caluclate_toposim(
self, dataloader: DataLoader, loss: float, logs: Dict[str, Any] = None
) -> None:
sender_mode = self.sender.training
self.sender.eval()
messages: List[torch.Tensor] = []
inputs = []
# Ignore repeats for toposim calculation
n_repeats = dataloader.dataset.n_repeats
dataloader.dataset.n_repeats = 1
with torch.no_grad():
for batch in dataloader:
# batch = move_to(batch, self.sender.device)
inputs.append(batch[0])
output = self.sender(batch[0])
# TODO Determine a better way to do this
# If the method RF, the output is a tuple
if type(output) == tuple:
messages.append(output[0])
else:
messages.append(output.argmax(-1))
dataloader.dataset.n_repeats = n_repeats
self.sender.train(sender_mode)
if self.use_embeddings:
embeddings = self.sender.embedding.weight.transpose(0, 1).detach()
toposim = calculate_toposim(
torch.cat(inputs, 0),
embeddings[torch.cat(messages, 0)],
cosine_dist,
lambda x: cosine_dist(x, reduce_dims=(-2, -1)),
)
else:
toposim = calculate_toposim(
torch.cat(inputs, 0),
torch.cat(messages, 0).argmax(-1),
cosine_dist,
levenshtein_dist,
)
if logs is not None:
logs["toposim"] = toposim
class MetricLogger(Callback):
def __init__(self) -> None:
super(MetricLogger, self).__init__()
self._finalized_logs: Optional[Dict[str, Any]] = None
self._train_logs: List[Dict[str, Any]] = []
self._valid_logs: List[Dict[str, Any]] = []
@staticmethod
def _detach_tensors(d: Dict[str, Any]) -> Dict[str, Any]:
return {
k: v.detach().numpy() if torch.is_tensor(v) else v for k, v in d.items()
}
def on_test_end(self, loss: float, logs: Dict[str, Any]) -> None:
log_dict = MetricLogger._detach_tensors({"loss": loss, **logs})
self._valid_logs.append(log_dict)
def on_epoch_end(self, loss: float, logs: Dict[str, Any]) -> None:
log_dict = MetricLogger._detach_tensors({"loss": loss, **logs})
self._train_logs.append(log_dict)
@staticmethod
def _dicts_to_arrays(dict_list: List[Dict[str, Any]]) -> Dict[str, np.ndarray]:
train_lists: Dict[str, List] = {}
for d in dict_list:
for field, value in d.items():
if field not in train_lists:
train_lists[field] = []
if torch.is_tensor(value):
value = value.detach().numpy()
train_lists[field].append(value)
return {k: np.array(v) for k, v in train_lists.items()}
def on_train_end(self) -> None:
assert len(self._train_logs) > 0
assert len(self._valid_logs) > 0
train_logs = MetricLogger._dicts_to_arrays(self._train_logs)
valid_logs = MetricLogger._dicts_to_arrays(self._valid_logs)
# TODO Add other post-processing of metrics
self._finalized_logs = {"train": train_logs, "valid": valid_logs}
def get_finalized_logs(self) -> Dict[str, Any]:
if self._finalized_logs is None:
raise ValueError("Logs are not yet finalized.")
else:
return self._finalized_logs
class ConsoleLogger(Callback):
def __init__(self, print_train_loss=False, as_json=False, print_test_loss=True):
self.print_train_loss = print_train_loss
self.as_json = as_json
self.epoch_counter = 0
self.print_test_loss = print_test_loss
def on_test_end(self, loss: float, logs: Dict[str, Any] = None):
if logs is None:
logs = {}
if self.print_test_loss:
if self.as_json:
dump = dict(
mode="test", epoch=self.epoch_counter, loss=self._get_metric(loss)
)
for k, v in logs.items():
dump[k] = self._get_metric(v)
output_message = json.dumps(dump)
else:
output_message = (
f"test: epoch {self.epoch_counter}, loss {loss:.4f}, {logs}"
)
logging.info(output_message)
def on_epoch_end(self, loss: float, logs: Dict[str, Any] = None):
if logs is None:
logs = {}
self.epoch_counter += 1
if self.print_train_loss:
if self.as_json:
dump = dict(
mode="train", epoch=self.epoch_counter, loss=self._get_metric(loss)
)
for k, v in logs.items():
dump[k] = self._get_metric(v)
output_message = json.dumps(dump)
else:
output_message = (
f"train: epoch {self.epoch_counter}, loss {loss:.4f}, {logs}"
)
logging.info(output_message)
def _get_metric(self, metric: Union[torch.Tensor, float]) -> float:
if torch.is_tensor(metric) and cast(torch.Tensor, metric).dim() > 1:
return cast(torch.Tensor, metric).mean().item()
elif torch.is_tensor(metric):
return cast(torch.Tensor, metric).item()
elif type(metric) == float:
return metric
else:
raise TypeError("Metric must be either float or torch.Tensor")
| 1.851563 | 2 |
web/migrations/0008_auto_20160901_1912.py | acuestap/smarttools_test | 0 | 12787665 | <reponame>acuestap/smarttools_test
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-02 00:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0007_auto_20160901_0641'),
]
operations = [
migrations.AlterModelOptions(
name='video',
options={'ordering': ['uploadDate']},
),
migrations.AlterField(
model_name='video',
name='converted_video',
field=models.FileField(blank=True, null=True, upload_to='upload_files/competitions/videos'),
),
migrations.AlterField(
model_name='video',
name='original_video',
field=models.FileField(null=True, upload_to='upload_files/competitions/videos'),
),
]
| 1.671875 | 2 |
integration_tests/cfg/ows_test_cfg_bad.py | LiamOSullivan/datacube-ows | 4 | 12787666 | <reponame>LiamOSullivan/datacube-ows<filename>integration_tests/cfg/ows_test_cfg_bad.py
# pylint: skip-file
# This file is part of datacube-ows, part of the Open Data Cube project.
# See https://opendatacube.org for more information.
#
# Copyright (c) 2017-2021 OWS Contributors
# SPDX-License-Identifier: Apache-2.0
# THIS IS A TESTING FILE FOR TESTING ERROR HANDLING.
# Do not use it as an example, it is deliberately invalid.
#
# Please refer to datacube_ows/ows_cfg_example.py for EXAMPLE CONFIG
ows_cfg = {
"glerbal": {
"turtle": "An invalid configuration",
},
"liars": []
}
| 1.921875 | 2 |
tests/unit-tests/test_config_titlefix.py | hazemelraffiee/confluencebuilder | 90 | 12787667 | <filename>tests/unit-tests/test_config_titlefix.py
# -*- coding: utf-8 -*-
"""
:copyright: Copyright 2020-2022 Sphinx Confluence Builder Contributors (AUTHORS)
:license: BSD-2-Clause (LICENSE)
"""
from tests.lib.testcase import ConfluenceTestCase
from tests.lib.testcase import setup_builder
from tests.lib import parse
import os
class TestConfluenceConfigTitlefix(ConfluenceTestCase):
@classmethod
def setUpClass(cls):
super(TestConfluenceConfigTitlefix, cls).setUpClass()
cls.config['root_doc'] = 'titlefix'
cls.dataset = os.path.join(cls.datasets, 'common')
cls.filenames = [
'titlefix',
'titlefix-child',
]
@setup_builder('confluence')
def test_storage_config_titlefix_none(self):
out_dir = self.build(self.dataset, filenames=self.filenames)
with parse('titlefix', out_dir) as data:
page_ref = data.find('ri:page')
self.assertIsNotNone(page_ref)
self.assertEqual(page_ref['ri:content-title'],
'titlefix-child')
with parse('titlefix-child', out_dir) as data:
page_ref = data.find('ri:page')
self.assertIsNotNone(page_ref)
self.assertEqual(page_ref['ri:content-title'], 'titlefix')
@setup_builder('confluence')
def test_storage_config_titlefix_postfix(self):
config = dict(self.config)
config['confluence_publish_postfix'] = '-mypostfix'
out_dir = self.build(self.dataset, config=config,
filenames=self.filenames)
with parse('titlefix', out_dir) as data:
page_ref = data.find('ri:page')
self.assertIsNotNone(page_ref)
self.assertEqual(page_ref['ri:content-title'],
'titlefix-child-mypostfix')
with parse('titlefix-child', out_dir) as data:
page_ref = data.find('ri:page')
self.assertIsNotNone(page_ref)
self.assertEqual(page_ref['ri:content-title'], 'titlefix-mypostfix')
@setup_builder('confluence')
def test_storage_config_titlefix_prefix(self):
config = dict(self.config)
config['confluence_publish_prefix'] = 'myprefix-'
out_dir = self.build(self.dataset, config=config,
filenames=self.filenames)
with parse('titlefix', out_dir) as data:
page_ref = data.find('ri:page')
self.assertIsNotNone(page_ref)
self.assertEqual(page_ref['ri:content-title'],
'myprefix-titlefix-child')
with parse('titlefix-child', out_dir) as data:
page_ref = data.find('ri:page')
self.assertIsNotNone(page_ref)
self.assertEqual(page_ref['ri:content-title'], 'myprefix-titlefix')
@setup_builder('confluence')
def test_storage_config_titlefix_prefix_and_postfix(self):
config = dict(self.config)
config['confluence_publish_prefix'] = 'myprefix-'
config['confluence_publish_postfix'] = '-mypostfix'
out_dir = self.build(self.dataset, config=config,
filenames=self.filenames)
with parse('titlefix', out_dir) as data:
page_ref = data.find('ri:page')
self.assertIsNotNone(page_ref)
self.assertEqual(page_ref['ri:content-title'],
'myprefix-titlefix-child-mypostfix')
with parse('titlefix-child', out_dir) as data:
page_ref = data.find('ri:page')
self.assertIsNotNone(page_ref)
self.assertEqual(page_ref['ri:content-title'],
'myprefix-titlefix-mypostfix')
@setup_builder('confluence')
def test_storage_config_titlefix_ignore_root(self):
config = dict(self.config)
config['confluence_ignore_titlefix_on_index'] = True
config['confluence_publish_postfix'] = '-mypostfix'
config['confluence_publish_prefix'] = 'myprefix-'
out_dir = self.build(self.dataset, config=config,
filenames=self.filenames)
with parse('titlefix', out_dir) as data:
page_ref = data.find('ri:page')
self.assertIsNotNone(page_ref)
self.assertEqual(page_ref['ri:content-title'],
'myprefix-titlefix-child-mypostfix')
with parse('titlefix-child', out_dir) as data:
page_ref = data.find('ri:page')
self.assertIsNotNone(page_ref)
self.assertEqual(page_ref['ri:content-title'], 'titlefix')
| 2.09375 | 2 |
ConverString.py | shatheesh171/divide_and_conquer_algos | 0 | 12787668 | def findMinOperation(s1,s2,index1,index2):
if index1==len(s1):
return len(s2)-index2
if index2==len(s2):
return len(s1)-index1
if s1[index1]==s2[index2]:
return findMinOperation(s1,s2,index1+1,index2+1)
else:
insertOp=1+findMinOperation(s1,s2,index1+1,index2)
deleteOp=1+findMinOperation(s1,s2,index1,index2+1)
replaceOp=1+findMinOperation(s1,s2,index1+1,index2+1)
return min(insertOp,deleteOp,replaceOp)
print(findMinOperation("table","tbrles",0,0))
| 3.515625 | 4 |
spylls/hunspell/algo/trie.py | strepon/spylls | 0 | 12787669 | from collections import defaultdict
class Leaf: # pylint: disable=too-few-public-methods,missing-class-docstring
def __init__(self):
self.payloads = []
self.children = defaultdict(Leaf)
class Trie:
"""
`Trie <https://en.wikipedia.org/wiki/Trie>`_ is a data structure for effective prefix search. It
is used in Spylls to store prefixes and suffixes. For example, if we have suffixes "s", "ions",
"ications", they are stored (reversed) this way:
.. code-block:: text
root
+-s ... metadata for suffix "s"
+-noi ... metadata for suffix "ions"
+-taci ... metadata for suffix "ications"
So, for the word "complications", we can receive all its possible suffixes (all three) in one
pass through trie.
**Important:** Profiling shows that search through Trie of suffixes/prefixes is the center of
Spylls performance, that's why it is very primitive and fast implementation instead of some
library like `pygtrie <https://github.com/google/pygtrie>`_. Probably, by choosing fast (C)
implementation of trie, the whole spylls can be make much faster.
"""
def __init__(self, data=None):
self.root = Leaf()
if data:
for key, val in data.items():
self.set(key, val)
def put(self, path, payload):
cur = self.root
for p in path:
cur = cur.children[p]
cur.payloads.append(payload)
def set(self, path, payloads):
cur = self.root
for p in path:
cur = cur.children[p]
cur.payloads = payloads
def lookup(self, path):
for _, leaf in self.traverse(self.root, path):
yield from leaf.payloads
def traverse(self, cur, path, traversed=[]):
yield (traversed, cur)
if not path or path[0] not in cur.children:
return
yield from self.traverse(cur.children[path[0]], path[1:], [*traversed, path[0]])
| 3.1875 | 3 |
vmprofile/migrations/0007_auto_20160915_1542.py | mattip/vmprof-server | 0 | 12787670 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-09-15 15:42
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import vmprofile.models
import uuid
def forward_func(apps, schema_editor):
RuntimeData = apps.get_model("vmprofile", "RuntimeData")
CPUProfile = apps.get_model("vmprofile", "CPUProfile")
for prof in CPUProfile.objects.all():
rd = RuntimeData.objects.create()
rd.created = prof.created
rd.user = prof.user
rd.name = prof.name
rd.vm = prof.vm
rd.completed = True
rd.save()
prof.runtime_data = rd
prof.save()
def backward_func(apps, schema_editor):
RuntimeData = apps.get_model("vmprofile", "RuntimeData")
CPUProfile = apps.get_model("vmprofile", "CPUProfile")
for rd in RuntimeData.objects.all():
cpup = rd.cpu_profile
cpup.created = rd.created
cpup.user = rd.user
cpup.name = rd.name
cpup.vm = rd.vm
cpup.save()
RuntimeData.objects.delete()
class Migration(migrations.Migration):
atomic = False
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('vmprofile', '0006_auto_20160915_1531'),
]
operations = [
migrations.CreateModel(
name='RuntimeData',
fields=[
('runtime_id', models.CharField(default=uuid.uuid4, max_length=64, primary_key=True, unique=True, serialize=False)),
('created', models.DateTimeField(auto_now_add=True)),
('vm', models.CharField(blank=True, max_length=32)),
('name', models.CharField(blank=True, max_length=256)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created'],
},
),
migrations.RenameField(
model_name='cpuprofile',
old_name='checksum',
new_name='cpuprofile_id',
),
migrations.AlterField(
model_name='cpuprofile',
name='cpuprofile_id',
field=models.CharField(default=uuid.uuid4, max_length=64, primary_key=True, serialize=False),
),
migrations.AddField(
model_name='cpuprofile',
name='runtime_data',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
related_name='cpu_profile', to='vmprofile.RuntimeData'),
),
migrations.RunPython(forward_func, backward_func),
migrations.AlterModelOptions(
name='cpuprofile',
options={},
),
migrations.RemoveField(
model_name='cpuprofile',
name='created',
),
migrations.RemoveField(
model_name='cpuprofile',
name='name',
),
migrations.RemoveField(
model_name='cpuprofile',
name='user',
),
migrations.RemoveField(
model_name='cpuprofile',
name='vm',
),
migrations.AddField(
model_name='cpuprofile',
name='file',
field=models.FileField(null=True, upload_to=vmprofile.models.get_profile_storage_directory),
),
migrations.AlterField(
model_name='cpuprofile',
name='data',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='runtimedata',
name='completed',
field=models.BooleanField(default=False),
),
]
| 1.882813 | 2 |
scikit/sk_diag_under_and_over_fitting.py | abondar24/deepLearnPython | 1 | 12787671 | <filename>scikit/sk_diag_under_and_over_fitting.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.learning_curve import validation_curve
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data', header=None)
x = df.loc[:, 2:].values
y = df.loc[:, 1].values
le = LabelEncoder()
y = le.fit_transform(y)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.20, random_state=1)
pipe_lr = Pipeline([('sc1', StandardScaler()),
('clf', LogisticRegression(penalty='l2', random_state=0))])
param_range = [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]
train_scores, test_scores = validation_curve(estimator=pipe_lr, X=x_train,
y=y_train,
param_name='clf__C',
param_range=param_range,
cv=10)
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
plt.plot(param_range, train_mean, color='blue', marker='o', markersize=5, label='training accuracy')
plt.fill_between(param_range, train_mean + train_std, train_mean - train_std, alpha=0.15, color='blue')
plt.plot(param_range, test_mean, color='green', linestyle='--', marker='s', markersize=5, label='validation accuracy')
plt.fill_between(param_range, test_mean + test_std, test_mean - test_std, alpha=0.15, color='green')
plt.grid()
plt.xlabel('Parameter C')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.ylim([0.8, 1.0])
plt.show()
| 2.796875 | 3 |
tests/unit/conftest.py | ckornacker/aws-gate | 369 | 12787672 | <reponame>ckornacker/aws-gate
import os
import boto3
import placebo
import pytest
@pytest.fixture(name="session")
def placebo_session(request):
session_kwargs = {"region_name": os.environ.get("AWS_DEFAULT_REGION", "eu-west-1")}
profile_name = os.environ.get("PLACEBO_PROFILE", None)
if profile_name:
session_kwargs["profile_name"] = profile_name
session = boto3.Session(**session_kwargs)
prefix = request.function.__name__
base_dir = os.environ.get("PLACEBO_DIR", os.path.join(os.getcwd(), "placebo"))
record_dir = os.path.join(base_dir, prefix)
if not os.path.exists(record_dir):
os.makedirs(record_dir)
pill = placebo.attach(session, data_path=record_dir)
if os.environ.get("PLACEBO_MODE") == "record":
pill.record()
else:
pill.playback()
return session
@pytest.fixture
def ec2(session):
return session.resource("ec2", region_name="eu-west-1")
@pytest.fixture
def ec2_ic(session):
return session.resource("ec2-instance-connect", region_name="eu-west-1")
@pytest.fixture
def ssm(session):
return session.client("ssm", region_name="eu-west-1")
@pytest.fixture
def ec2_mock(mocker):
return mocker.MagicMock()
@pytest.fixture
def ec2_ic_mock(mocker):
return mocker.MagicMock()
@pytest.fixture
def ssm_mock(mocker):
mock = mocker.MagicMock()
response = {
"SessionId": "session-020bf6cd31f912b53",
"TokenValue": "randomtokenvalue",
}
mock.configure_mock(
**{
"start_session.return_value": response,
"terminate_session.return_value": response,
}
)
type(mock.meta).endpoint_url = mocker.PropertyMock(return_value="ssm")
return mock
@pytest.fixture
def instance_id():
return "i-0c32153096cd68a6d"
@pytest.fixture
def ssh_key(mocker):
mock = mocker.MagicMock()
mock.configure_mock(
**{
"public_key.return_value": "ssh-rsa ranodombase64string",
"key_path.return_value": "/home/user/.aws-gate/key",
}
)
return mock
@pytest.fixture
def config(mocker):
mock = mocker.MagicMock()
mock.configure_mock(
**{
"get_host.return_value": {
"alias": "test",
"name": "SSM-test",
"profile": "default",
"region": "eu-west-1",
}
}
)
return mock
@pytest.fixture
def empty_config(mocker):
mock = mocker.MagicMock()
mock.configure_mock(**{"get_host.return_value": {}})
return mock
@pytest.fixture
def get_instance_details_response():
return {"availability_zone": "eu-west-1a"}
| 1.953125 | 2 |
Classes/Wrappers/BattleLogPlayerEntry.py | Enjoyop/BSDS-V42 | 16 | 12787673 | from Classes.Wrappers.PlayerDisplayData import PlayerDisplayData
class BattleLogPlayerEntry:
def encode(calling_instance, fields):
pass
def decode(calling_instance, fields):
fields["BattleLogEntry"] = {}
fields["BattleLogEntry"]["Unkown1"] = calling_instance.readVInt()
fields["BattleLogEntry"]["Unkown2"] = calling_instance.readLong()
fields["BattleLogEntry"]["Unkown3"] = calling_instance.readVInt()
fields["BattleLogEntry"]["Unkown4"] = calling_instance.readBoolean()
countVal = calling_instance.readVInt()
fields["BattleLogEntry"]["Unkown5"] = countVal
fields["BattleLogEntry"]["Entries"] = {}
for i in range(countVal):
fields["BattleLogEntry"]["Entries"][str(i)] = {}
fields["BattleLogEntry"]["Entries"][str(i)]["Unknown1"] = calling_instance.readDataReference()
fields["BattleLogEntry"]["Entries"][str(i)]["Unknown2"] = calling_instance.readVInt()
fields["BattleLogEntry"]["Entries"][str(i)]["Unknown3"] = calling_instance.readVInt()
fields["BattleLogEntry"]["Entries"][str(i)]["Unknown4"] = calling_instance.readVInt()
fields["BattleLogEntry"]["Unkown6"] = calling_instance.readVInt()
PlayerDisplayData.decode(calling_instance, fields) | 2.8125 | 3 |
source/modules/beeline.py | telegrambotdev/telegram.email.notify | 0 | 12787674 | # -*- coding: utf-8 -*-
"""
Beeline.ru
"""
from html2text import convert
from . import by_subj, NBSP, BUTTONS
MARK_INBOX = 'В Ваш почтовый ящик '
MARK_CLOUD_GO = 'Прослушать сообщение можно в web-интерфейсе управления услугой'
def voice_mail(_subj, text):
"""
voice mail
"""
pos_start = text.index(MARK_INBOX)
pos_end = text.index(MARK_CLOUD_GO)
result = text[pos_start:pos_end]
if 'отабонента' in result:
result = result.replace('отабонента', 'от абонента')
return [
result,
'\n' + BUTTONS + '\n' + "[Прослушать](https://cloudpbx.beeline.ru/)",
]
SUBJ_HANDLERS = [
(('Облачная АТС - У вас новое сообщение голосовой почты', ), voice_mail),
]
def start(subj, body):
"""
parse Beeline
"""
return by_subj(
subj,
body,
convert(body).replace(NBSP, ' '),
'beeline',
'Beeline Облачная АТС\n',
SUBJ_HANDLERS
)
| 2.875 | 3 |
pyramid_scaffold/models/mymodel.py | regenalgrant/pyramid_scaffold | 0 | 12787675 | import bcrypt
from sqlalchemy import (
Column,
Index,
Integer,
Unicode,
Date,
)
from .meta import Base
class Entry(Base):
__tablename__ = 'entries'
id = Column(Integer, primary_key=True)
title = Column(Unicode)
body = Column(Unicode)
category = Column(Unicode)
tags = Column(Unicode)
creation_date = Column(Date)
| 2.28125 | 2 |
p4/pox_module/cs640/vnethandler.py | zhaoyi3264/cs640 | 3 | 12787676 | <gh_stars>1-10
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.revent import *
from pox.lib.util import dpidToStr
from pox.lib.util import str_to_bool
from pox.lib.recoco import Timer
from pox.lib.packet import ethernet
import time
import threading
import asyncore
import collections
import logging
import socket
# Required for VNS
import sys
import os
from twisted.python import threadable
from threading import Thread
from twisted.internet import reactor
from VNSProtocol import VNS_DEFAULT_PORT, create_vns_server
from VNSProtocol import VNSOpen, VNSClose, VNSPacket
from VNSProtocol import VNSInterface, VNSSwitchInterface, VNSHardwareInfo
log = core.getLogger()
def pack_mac(macaddr):
octets = macaddr.split(':')
ret = ''
for byte in octets:
ret += chr(int(byte, 16))
return ret
def pack_ip(ipaddr):
octets = ipaddr.split('.')
ret = ''
for byte in octets:
ret += chr(int(byte))
return ret
class VNetDevice:
def __init__ (self, swid, ifaces):
self.swid = swid
self.conn = None
self.intfname_to_port = {}
self.port_to_intfname = {}
self.interfaces = []
for intf in ifaces.keys():
ip, mask, mac, port = ifaces[intf]
if (ip is None or mask is None or mac is None):
self.interfaces.append(VNSSwitchInterface(intf))
else:
ip = pack_ip(ip)
mask = pack_ip(mask)
mac = pack_mac(mac)
self.interfaces.append(VNSInterface(intf, mac, ip, mask))
# Mapping between of-port and intf-name
self.intfname_to_port[intf] = port
self.port_to_intfname[port] = intf
def handle_packet_msg(self, vns_msg):
out_intf = vns_msg.intf_name
pkt = vns_msg.ethernet_frame
try:
out_port = self.intfname_to_port[out_intf]
except KeyError:
log.debug('packet-out through wrong port number %s' % out_port)
return
log.debug("Packet out %s.%s: %r" % (self.swid, out_intf, ethernet(pkt)))
log.debug('VNetServerHandler raise packet out event')
core.VNetHandler.raiseEvent(VNetPacketOut(pkt, out_port, self.swid))
def handle_VNetPacketIn(self, event):
try:
intfname = self.port_to_intfname[event.port]
except KeyError:
log.debug("Couldn't find interface for portnumber %s" % event.port)
return
log.debug("Packet in %s.%s: %s" % (self.swid, intfname,
ethernet(event.pkt)))
if (self.conn is None):
log.debug("VNet device %s is not connected" % (self.swid))
return
self.conn.send(VNSPacket(intfname, event.pkt))
class VNetServerListener(EventMixin):
''' TCP Server to handle connection to VNet '''
def __init__ (self, address=('127.0.0.1', 8888)):
port = address[1]
self.listenTo(core.VNetOFNetHandler)
self.devsByConn = {}
self.devsByName = {}
self.server = create_vns_server(port, self.recv_msg,
self.handle_new_client, self.handle_client_disconnect)
log.info("VNet server listening on %s:%d" % (address[0],address[1]))
return
def _handle_VNetPacketIn(self, event):
dev = self.devsByName[event.swid]
if (dev is None):
return
dev.handle_VNetPacketIn(event)
def recv_msg(self, conn, vns_msg):
# demux sr-client messages and take approriate actions
if vns_msg is None:
log.debug("invalid message")
self.handle_close_msg(conn)
return
log.debug('Received VNS msg: %s' % vns_msg)
if vns_msg.get_type() == VNSOpen.get_type():
self.handle_open_msg(conn, vns_msg)
elif vns_msg.get_type() == VNSClose.get_type():
self.handle_close_msg(conn)
elif vns_msg.get_type() == VNSPacket.get_type():
self.handle_packet_msg(conn, vns_msg)
else:
log.debug('Unexpected VNS message received: %s' % vns_msg)
def handle_open_msg(self, conn, vns_msg):
dev = self.devsByName[vns_msg.vhost]
if (dev is None):
log.debug('interfaces for %s not populated yet' % (vns_msg.vhost))
return
self.devsByConn[conn] = dev
dev.conn = conn
conn.send(VNSHardwareInfo(dev.interfaces))
return
def handle_close_msg(self, conn):
#conn.send("Goodbyte!") # spelling mistake intended...
conn.transport.loseConnection()
return
def handle_packet_msg(self, conn, vns_msg):
dev = self.devsByConn[conn]
dev.handle_packet_msg(vns_msg)
def handle_new_client(self, conn):
log.debug('Accepted client at %s' % conn.transport.getPeer().host)
return
def handle_client_disconnect(self, conn):
log.info("Client disconnected")
del self.devsByConn[conn]
conn.transport.loseConnection()
return
class VNetPacketOut(Event):
'''Event to raise upon receicing a packet back from VNet device'''
def __init__(self, packet, port, swid):
Event.__init__(self)
self.pkt = packet
self.port = port
self.swid = swid
class VNetHandler(EventMixin):
_eventMixin_events = set([VNetPacketOut])
def __init__(self):
EventMixin.__init__(self)
self.listenTo(core)
self.listenTo(core.VNetOFNetHandler)
# self.server_thread = threading.Thread(target=asyncore.loop)
# use twisted as VNS also used Twisted.
# its messages are already nicely defined in VNSProtocol.py
self.server_thread = threading.Thread(target=lambda: reactor.run(installSignalHandlers=False))
self.server_thread.daemon = True
self.server_thread.start()
self.server = VNetServerListener()
def _handle_VNetDevInfo(self, event):
log.info("VNetHandler catch VNetDevInfo(ifaces=%s,swid=%s,dpid=%d)",
event.ifaces, event.swid, event.dpid)
self.server.devsByName[event.swid] = VNetDevice(event.swid, event.ifaces)
def _handle_GoingDownEvent (self, event):
log.debug("Shutting down VNetServer")
def launch():
"""
Starts the VNet handler application.
"""
core.registerNew(VNetHandler)
| 1.921875 | 2 |
frontends/python3/ProgInfo.py | Chengifei/SySSFEs | 1 | 12787677 | <gh_stars>1-10
################################################################################
# Copyright 2017-2018 by <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file contains structures to store data from source code, and useful
# processing routines for code generation.
#
# WARNING ## DATA CORRUPTION MAY OCCUR ## CAUTION
# DO NOT RUN COMPILER IN PARALLEL IN CURRENT RELEASE
# THE FILE CONTAINS BAD USAGE PATTERN OF GLOBAL VARIABLE USED FOR TESTING.
################################################################################
from collections import OrderedDict as odict
from . import warning
from . import templating
from .support import cSrcLocation as src_tracker, cTypes, cVariable as Var
from . import resolver
from itertools import chain
from contextlib import contextmanager
from .expr import expr
class Flag:
def __init__(self, val):
self.flag = val
def __bool__(self):
return self.flag
class CondRuleType:
# This has the same interface as RuleType, should have an abstract base
def __init__(self, src, dom, *condrulepair):
self.src = src
self.dom = dom
self.crpack = condrulepair
self.diffs = {rule[0].findDerivatives() for _, rule in condrulepair}
self.diffs.update(rule[1].findDerivatives() for _, rule in condrulepair)
def list_vars(self):
return {i.name for i in self.diffs}
class ObjType:
def __init__(self, src, vals):
self.src = src
self.vals = vals
class DeclType:
def __init__(self, src, types):
self.src = src
self.types = types
class Op:
def __init__(self, name, argc=1, writer=None):
self.name = name
self.argc = argc
self.writer = writer if writer \
else lambda args, writer: f'{self.name}({", ".join(writer.write_expr(i) for i in args)})'
def __repr__(self):
return 'Op ' + self.name
def diff_writer(args, writer: 'StepWriter'):
expr, *wrt = args
if wrt:
raise NotImplementedError
all_vars = set(resolver.cNVar(i.name, i.order[0]+1) for i in expr.findDerivatives())
solving = {var for var in writer.solving_for if var in all_vars}
if not solving:
varlist = list(expr.listVars())
with writer.tmp_push(writer.VarWriter.lambda_arg,
{i: writer.write_arg(i) for i in varlist}) as token:
return 'math::calculus::fdiff(' \
+ writer.write_lambda("&", token, "return " + writer.write_expr(expr) + ";") \
+ ", step, " + ", ".join(writer.write_getter(i) for i in varlist) + ')'
else:
# When using chain-rule we have to adjust writing scheme for base
# variables as well
var = solving.pop()
if var.order[0] > 1:
var_base = resolver.cNVar(var.name, var.order[0] - 1)
else:
var_base = var.name
original_write = writer.VarWriter.write_value(var_base)
with writer.tmp_push(writer.VarWriter.lambda_arg,
{var_base: writer.write_arg(var_base)}) as token:
text = f'math::calculus::fdiff({writer.write_lambda("&", token, "return " + writer.write_expr(expr) + ";")}, {original_write})'
return text + f' * {writer.VarWriter.write(var)}'
OP = {
'+': Op('math::op::add', 2),
'-': Op('math::op::sub', 2),
'*': Op('math::op::mul', 2),
'/': Op('math::op::div', 2),
'pow': Op('std::pow', 2),
'sin': Op('std::sin', 1),
'cos': Op('std::cos', 1),
'tan': Op('std::tan', 1),
'sqrt': Op('math::sqrt', 1),
'diff': Op('diff', 1, diff_writer),
}
class Expr:
def __init__(self, expr):
if isinstance(expr, tuple):
self.content = (OP[expr[0]], [Expr(i) for i in expr[1]])
else:
self.content = expr
def listVars(self):
if isinstance(self.content, tuple):
for i in self.content[1]:
yield from i.listVars()
elif isinstance(self.content, str):
yield self.content
def findDerivatives(self, DIFF=0):
if isinstance(self.content, tuple):
if self.content[0] is OP['diff']:
DIFF += 1
for i in self.content[1]:
yield from i.findDerivatives(DIFF)
if self.content[0] is OP['diff']:
DIFF -= 1
elif isinstance(self.content, str):
yield resolver.cNVar(self.content, DIFF)
def __repr__(self):
return repr(self.content)
class StepWriter:
def __init__(self, space: 'Space'):
self.space = space
self.solving_for = []
self.VarWriter = StepWriter.VarWriter(self.space.locate_var)
self.noBind = False
@contextmanager
def tmp_nobind(self):
old = self.noBind
self.noBind = True
yield
self.noBind = old
@staticmethod
@contextmanager
def tmp_push(stack, L):
stack.append(L)
yield L
stack.pop()
def write(self, step):
use, update = step
if isinstance(use, int):
if self.noBind:
bind, end = '', ''
else:
bind = f'{self.VarWriter.write(update)} = '
end = ';'
if update.order[0] == 0:
update = update.name
with self.tmp_push(self.VarWriter.lambda_arg,
{update: self.write_arg(update)}) as token, \
self.tmp_push(self.solving_for, update):
rule = self.write_lambda('&', token, f'return {self.write_rule(self.space.rules[use])};')
seed = self.VarWriter.write_value(update)
return bind + self.write_alg(rule, seed) + end
elif isinstance(use, tuple):
if self.noBind:
bind, end = '', ''
else:
bind = f'{self.VarWriter.write(update)} = '
end = ';'
with self.tmp_nobind(), \
self.tmp_push(self.VarWriter.lambda_arg, # requires Python 3.6
{'t': 't', # where default dict is ordered (calling convention)
update: self.write_arg(update)}) as token, \
self.tmp_push(self.solving_for, update):
gen = self.write_lambda('&', token, f'return {self.write(use)};')
wrt = 't'
seed = self.VarWriter.write_value(update)
step = 'step'
return bind + self.write_diff(gen, wrt, seed, step) + '.first' + end
else:
bind = ''
end = '' if self.noBind else ';'
return bind + self.write_alg_sys(', '.join(self.VarWriter.write(i) for i in update),
', '.join(self.space.rules[i] for i in use)
) + end
@staticmethod
def write_alg(rule, seed):
return f'math::solver::algebraic({rule}, {seed})'
@staticmethod
def write_alg_sys(vars, rules):
return f'math::solver::algebraic_sys({{{vars}}}, {rules})'
@staticmethod
def write_diff(gen, wrt, seed, step):
return f'math::solver::differential({gen}, {wrt}, {seed}, {step})'
def write_rule(self, rule):
return f'math::op::sub({self.write_expr(rule.lhs)}, {self.write_expr(rule.rhs)})'
def write_expr(self, expr):
if isinstance(expr, Expr):
expr = expr.content
if isinstance(expr, tuple):
op = expr[0]
return op.writer(expr[1], self)
elif isinstance(expr, str) or isinstance(expr, resolver.cNVar):
return self.VarWriter.write_value(expr)
else:
return str(expr)
class VarWriter:
def __init__(self, parent):
self.look_up = parent
self.lambda_arg = []
def write(self, var):
for i in self.lambda_arg:
try:
return i[var]
except KeyError:
pass
if isinstance(var, resolver.cNVar):
if sum(var.order) != 0:
return f'{self.write(var.name)}_{var.order[0]}'
else:
return self.write(var.name)
else:
lookup_result = self.look_up(var)
if isinstance(lookup_result, tuple):
return self.write_watch_new(lookup_result[0], lookup_result[1])
else:
return var
def write_value(self, var):
for i in self.lambda_arg:
try:
return i[var]
except KeyError:
pass
if isinstance(var, resolver.cNVar):
if sum(var.order) != 0:
return 0 # TODO: Check for step by-product of differential solver
else:
return self.write_value(var.name)
else:
lookup_result = self.look_up(var)
if isinstance(lookup_result, tuple):
return self.write_watch_last(lookup_result[0], lookup_result[1])
else:
return var
@staticmethod
def write_watch_new(num, var):
return f'srd_.get({num}).{var}'
@staticmethod
def write_watch_last(num, var):
return f'last_data_.get({num}).{var}'
def write_lambda(self, capture, args, body):
return f'[{capture}]({", ".join(self._write_arg(i) for i in args)}){{{body}}}'
def _write_arg(self, var):
if isinstance(var, resolver.cNVar):
if sum(var.order) != 0:
# FIXME: Duplicate code
lookup_result = self.space.locate_var(var.name)
assert isinstance(lookup_result, tuple) # must be a watch to be differentiated
return f'{lookup_result[2]} {var.name.split(".", 1)[1]}_{var.order[0]}'
else:
var = var.name
lookup_result = self.space.locate_var(var)
if isinstance(lookup_result, tuple):
return f'{lookup_result[2]} {lookup_result[1]}'
else:
return f'{lookup_result.type.base} {var}'
def write_arg(self, var):
return self._write_arg(var).split(' ', 1)[1]
def write_getter(self, var):
"""write a functor that can get all history values of watch of a specific object"""
lookup_result = self.space.locate_var(var)
assert isinstance(lookup_result, tuple)
offset = f'offsetof(object_, {lookup_result[1]})'
return f'getter_<{lookup_result[0]}, {offset}, {lookup_result[2]}>(history)';
def __iter__(self):
for step in self.space.steps:
yield self.write(step)
class Space:
def __init__(self):
self.objs = odict()
self.watches = odict()
self.globals = odict()
self.globals['step'] = Var(cTypes(cTypes.BaseEnum.REAL, True, 0), 0.01)
self.tmps = odict()
self.rules = []
self.loopctl = odict()
self.addLoopctl('t', cTypes.BaseEnum.REAL, 0, 1)
self.steps = None
def addRule(self, rule, src=None):
self.rules.append(
resolver.cRule(src if src else src_tracker(0, 0),
Expr(expr.stringToExpr(rule[0])), Expr(expr.stringToExpr(rule[1]))))
def addObj(self, obj, vals, src=None):
if obj in self.objs:
if not warning.warn_ask(f'Object named {obj} exists, continue?'):
return
self.objs[obj] = ObjType(src if src else src_tracker(0, 0), vals)
def addTmp(self, var, types, src=None):
self.tmps[var] = DeclType(src, types)
def addLoopctl(self, var, types, begin, end, src=None):
tmp = Var(cTypes(types, False, 0))
tmp.begin = begin
tmp.end = end
self.loopctl[var] = tmp
def addWatch(self, var, types, src=None):
if var in self.watches:
if not warning.warn_ask(f'Watch {var} exists, continue?'):
return
self.watches[var] = cTypes(types, False, 0)
def var_is_known(self, NVar):
return all(i == 0 for i in
NVar.order) # FIXME: Refer to actually boundary conditions
def var_need_update(self, NVar):
v = self.locate_var(NVar.name)
return isinstance(v, tuple) # FIXME
def processRules(self):
#TODO: Check consistency of boundary values first
# prep for C++ routines, these are awkward in C++
NVars = []
eqns = []
for eqn in self.rules:
eqns.append([])
for i in chain(eqn.lhs.findDerivatives(), eqn.rhs.findDerivatives()):
try:
idx = NVars.index(i)
except ValueError:
i.can_start = self.var_is_known(i)
i.need_update = self.var_need_update(i)
idx = len(NVars)
NVars.append(i)
eqns[-1].append(idx)
self.steps = resolver.resolve(eqns, NVars)
# propagate updates here
updated = {}
for i, (_, var) in enumerate(self.steps):
if var.order[0] < updated.get(var.name, (None, [256]))[1][0]: # FIXME: Magic value, shall be inf
updated[var.name] = i, var.order
for var, (i, order) in updated.items():
if sum(order) != 0:
self.steps[i] = self.steps[i], resolver.cNVar(var, 0)
def write(self):
gen = templating.template('template')
consume = templating.consume
yield from consume(gen, (f'{type} {name};' for name, type in self.watches.items()))
yield from consume(gen, self.objs)
yield from consume(gen, (f'obj.{var}' for var in self.watches))
yield from consume(gen,
(', '.join(str(i) for i in obj.vals) for obj in self.objs.values()))
yield from consume(gen, (f'{tv.type} {name} = {tv.val};' for name, tv in self.globals.items()))
yield from consume(gen, [f'<1, 0, {len(self.objs)}>']) # FIXME: insert true value when tested
yield from consume(gen, (f'{val.type} {name} = {val.begin};' for name, val in self.loopctl.items()))
yield from consume(gen, (str(v) for v in self.loopctl))
yield from consume(gen, (str(v) for v in self.loopctl))
yield from consume(gen, (f'{n} < {i.end}' for n, i in self.loopctl.items()))
yield from consume(gen, (str(v) for v in self.loopctl))
writer = StepWriter(self)
yield from consume(gen, writer)
yield from consume(gen, (str(v) for v in self.loopctl))
yield from consume(gen, (str(v) for v in self.loopctl))
yield from gen
def locate_var(self, var):
if var.startswith('$'):
obj, watch = var[1:].split('.', 1)
base_watch = self.watches[watch]
return int(obj) if obj.isdigit() else obj, watch, base_watch
elif var in self.tmps:
return self.tmps[var]
elif var in self.globals:
return self.globals[var]
elif var in self.loopctl:
return self.loopctl[var]
else:
return self.objs[var]
| 1.757813 | 2 |
tests/test_async.py | fruch/rpyc | 0 | 12787678 | import time
from nose.tools import raises
import rpyc
class Test_Async(object):
def __init__(self):
pass
def setup(self):
self.conn = rpyc.classic.connect_thread()
self.a_sleep = rpyc.async(self.conn.modules.time.sleep)
self.a_int = rpyc.async(self.conn.modules.__builtin__.int)
def teardown(self):
self.conn.close()
def test_asyncresult_api(self):
res = self.a_sleep(2)
assert not res.ready
res.wait()
assert res.ready
assert not res.expired
assert not res.error
assert res.value is None
def test_asyncresult_expiry(self):
res = self.a_sleep(5)
res.set_expiry(4)
t0 = time.time()
try:
res.wait()
except rpyc.AsyncResultTimeout:
dt = time.time() - t0
else:
assert False, "expected AsyncResultTimeout"
print( "timed out after %s" % (dt,) )
assert 3.9 <= dt <= 4.1
def test_asyncresult_callbacks(self):
res = self.a_sleep(2)
visited = []
def f(res):
assert res.ready
assert not res.error
visited.append("f")
def g(res):
visited.append("g")
res.add_callback(f)
res.add_callback(g)
res.wait()
assert set(visited) == set(["f", "g"])
@raises(rpyc.AsyncResultTimeout)
def test_timed(self):
timed_sleep = rpyc.timed(self.a_sleep, 5)
print( timed_sleep )
res = timed_sleep(3)
print( res.value )
res = timed_sleep(7)
print( res.value )
@raises(ValueError)
def test_exceptions(self):
res = self.a_int("foo")
res.wait()
assert res.error
res.value
| 2.453125 | 2 |
django/bot/users/migrations/0001_initial.py | AngelOnFira/megagame-controller | 0 | 12787679 | # Generated by Django 3.2.8 on 2021-11-20 23:06
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("players", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Member",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("discord_id", models.CharField(max_length=50, unique=True)),
("name", models.CharField(max_length=255, verbose_name="name")),
("last_seen", models.DateTimeField(blank=True, null=True)),
("is_bot", models.BooleanField(default=False)),
("can_admin_bot", models.BooleanField(default=False)),
(
"player",
models.OneToOneField(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="discord_member",
to="players.player",
),
),
],
),
]
| 2 | 2 |
setup.py | gsevla/pyYDL | 0 | 12787680 | <reponame>gsevla/pyYDL<gh_stars>0
import os
from setuptools import setup
with open('README.md') as readme_file:
readme = readme_file.read()
this = os.path.dirname(os.path.realpath(__file__))
def read(name):
with open(os.path.join(this, name)) as f:
return f.read()
setup(
name='pyydl',
version='1.0',
description='description',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/gsevla/pyYDL',
packages=['my_src'],
install_requires=read('requirements.txt'),
include_package_data=True,
zip_safe=True,
license='MIT',
keywords='example app snap linux ubuntu',
classifiers=[
'Development Status :: 5 - Production/devel',
'Intended Audience :: Developers',
'Natural Language :: English'
],
scripts=['bin/pyydl']
) | 1.757813 | 2 |
app/tests.py | nevooronni/RideAlong | 1 | 12787681 | from django.test import TestCase
from .models import Driver,Rider,DriverProfile,RiderProfile,RiderReview,DriverReview
class DriverTestClass(TestCase):
'''
test for driver class
'''
def setUp(self):
'''
setup method
'''
self.driver = Driver(first_name = "Neville",last_name = "Oronni",email = "<EMAIL>",phone = "0799244265",city = "Nairobi")
def test_drivers_list(self):
'''
test to see if you can get drivers list
'''
all_drivers = Driver.drivers_list()
drivers = Driver.objects.all()
self.assertTrue(len(all_drivers) == len(drivers))
class DriverProfileTestClass(TestCase):
'''
test for driver profile class
'''
def setUp(self):
'''
setup method
'''
self.profile = DriverProfile(gender='male',car_plate='KAV4718',car_color='black',car_capacity=4)
def test_instance(self):
'''
check if profile is an instance of the driver profile class
'''
self.assertTrue(isinstance(self.profile,DriverProfile))
def test_drivers_profile_list(self):
'''
get list of drivers profiles from the db and return a list
'''
list_of_drivers_profiles = DriverProfile.driver_profile_list()
driver_profiles = DriverProfile.objects.all()
self.assertTrue(len(list_of_drivers_profiles) == len(driver_profiles))
class RiderTestClass(TestCase):
'''
test for rider class
'''
def setUp(self):
'''
set up method
'''
self.rider = Rider(first_name = "Neville",last_name = "Oronni",email = "<EMAIL>",phone = "0704569516",city = "Nairobi")
def test_riders_list(self):
'''
test to see if you can get the riders list
'''
all_riders = Rider.riders_list()
riders = Rider.objects.all()
self.assertTrue(len(all_riders) == len(riders))
class RiderProfileTestClass(TestCase):
'''
test for rider profile class
'''
def setUp(self):
'''
set up method create an instance of driver profile class
'''
self.profile = RiderProfile(gender='male',home_address='pangani')
def test_instance(self):
'''
test to check if rider profiles is an instance of rider class
'''
self.assertTrue(isinstance(self.profile,RiderProfile))
def test_rider_profile_list(self):
'''
test to see if you can get rider profile from the db
'''
list_of_riders_profiles = RiderProfile.riders_profile_list()
rider_profiles = RiderProfile.objects.all()
self.assertTrue(len(list_of_riders_profiles) == len(rider_profiles))
class RiderReviewTestClass(TestCase):
'''
test for the rider review class
'''
def setUp(self):
'''
set up method to create an instance of the RiderReview class
'''
self.review = RiderReview(review='rider was very cooperative')
def test_instance(self):
'''
test to check if the rider reveiw is an instance of the RiderReview class
'''
self.assertTrue(isinstance(self.review,RiderReview))
def test_rider_reviews(self):
'''
test to get list of rider reviews from the db
'''
self.driver = Driver(first_name = "Neville",last_name = "Oronni",email = "<EMAIL>",phone = "0799244265",city = "Nairobi")
self.driver.save()
self.rider = Rider(first_name = "chelsea",last_name = "obrien",email = "<EMAIL>",phone = "0725459516",city = "Nairobi")
self.rider.save()
self.rider_profile = RiderProfile(rider=self.rider,gender="female",home_address="Nairobi")
self.rider_review = RiderReview(driver=self.driver,rider_profile=self.rider_profile,review="this was a good ride")
all_reviews = RiderReview.all_rider_reviews(self.rider_profile.id)
rider_reviews = RiderReview.objects.all()
self.assertTrue(len(all_reviews) == len(rider_reviews))
class DriverReviewTestClass(TestCase):
'''
test for the driver review class
'''
def setUp(self):
'''
set up method
'''
self.review = DriverReview(review='not bad for the firs ride')
def test_instance(self):
'''
test to check if the driver review is an instace of the DriverReview class
'''
self.assertTrue(isinstance(self.review,DriverReview))
def test_driver_reviews(self):
'''
test to get list of driver reviews from the db
'''
self.rider = Rider(first_name = "chelsea",last_name = "obrien",email = "<EMAIL>",phone = "0725459516",city = "Nairobi")
self.rider.save()
self.driver = Driver(first_name = "Neville",last_name = "Oronni",email = "<EMAIL>",phone = "0799244265",city = "Nairobi")
self.driver.save()
self.driver_profile = DriverProfile(driver=self.driver,gender='male',car_plate='KAV4718',car_color='black',car_capacity=4)
self.driver_review = DriverReview(rider=self.rider,driver_profile=self.driver_profile,review='not bad for the firs ride')
all_reviews = DriverReview.all_driver_reviews(self.driver_profile.id)
driver_reviews = DriverReview.objects.all()
self.assertTrue(len(all_reviews) == len(driver_reviews)) | 2.8125 | 3 |
app/resources/drug.py | thehyve/rest_api | 15 | 12787682 | <reponame>thehyve/rest_api
import time
import json
from flask import current_app, request
from flask_restful import abort, Resource
from app.common.response_templates import CTTVResponse
from app.common.results import RawResult
class Drug(Resource):
def get(self, drug_id):
start_time = time.time()
es = current_app.extensions['esquery']
res = es.get_drug_info_from_id(drug_id)
if res:
return CTTVResponse.OK(res,
took=time.time() - start_time)
else:
abort(404, message="drug code %s cannot be found"%drug_id) | 2.484375 | 2 |
general-practice/Exercises solved/w3resource/challenges/challenge2.py | lugabrielbueno/Projeto | 0 | 12787683 | <filename>general-practice/Exercises solved/w3resource/challenges/challenge2.py
# Write a Python program to check if a given positive integer is a power of three
def ispower3(num):
count = 0
for x in range(1000):
if 3**x == num:
count += 1
return count > 0
print(ispower3(9))
print(ispower3(15))
print(ispower3(20)) | 4.4375 | 4 |
config.py | lok-i/AntennaEvolutionStrategy | 2 | 12787684 | <reponame>lok-i/AntennaEvolutionStrategy
from src.AntennaArray import PatchAntennaArray
from src.PatchTopology import *
import time
# EXPERIMENTE PARAMETERS
PATCH_TOPOLOGY = 'Spiral2'# None
NO_OF_GENERATIONS = 25
NO_OF_PATCHES = 25 # give a perfect square for grid
# -----------------------------------------------------------------
delta_angle_for_integration = 2 #keep it 1 for a better surface plot
if 'None' in str(PATCH_TOPOLOGY):
# only XYZ
if 'XYZ' in str(PATCH_TOPOLOGY):
param_opt_range = {'x':{'greater_than':-0.25,'lesser_than':0.25},
'y':{'greater_than':-0.25,'lesser_than':0.25},
'z':{'equal_to':0.},
'A':{'greater_than':0.,'lesser_than':5.},
'beta':{'equal_to':0.},
'W':{'equal_to':10.7e-3},
'L':{'equal_to':10.47e-3},
'h':{'equal_to':3e-3},
}
# XYZ + WLH
else:
param_opt_range = {'x':{'greater_than':-0.1,'lesser_than':0.1},
'y':{'greater_than':-0.1,'lesser_than':0.1},
'z':{'equal_to':0.},
'A':{'greater_than':0.,'lesser_than':5.},
'beta':{'equal_to':0.},
'W':{'greater_than':1.0e-3,'lesser_than':10.7e-3},
'L':{'greater_than':1.0e-3,'lesser_than':10.47e-3},
'h':{'greater_than':1.0e-3,'lesser_than':3e-3},}
else:
if PATCH_TOPOLOGY == 'Grid':
PatchDist = Grid(
n_patches=NO_OF_PATCHES,
Wmax=20.47e-3,
Lmax=10.47e-3,
clearence= 10.47e-3
)
elif PATCH_TOPOLOGY == 'Spiral':
PatchDist = Spiral(
n_patches=NO_OF_PATCHES,
Wmax=20.47e-3,
Lmax=10.47e-3,
clearence= 10.47e-3
)
elif PATCH_TOPOLOGY == 'Spiral2':
PatchDist = Spiral2(
n_patches=NO_OF_PATCHES,
Wmax=20.47e-3,
Lmax=10.47e-3,
clearence= 10.47e-3
)
x_pos,y_pos = PatchDist.get_path_pos()
param_opt_range = { 'x':{'equal_to':x_pos},
'y':{'equal_to':y_pos},
'z':{'equal_to':0},
'A':{'greater_than':0.,'lesser_than':5.},
'beta':{'equal_to':0.},
'W':{'greater_than':1.0e-3,'lesser_than':PatchDist.Wmax},
'L':{'greater_than':1.0e-3,'lesser_than':PatchDist.Lmax},
'h':{'greater_than':1.0e-3,'lesser_than':3e-3}}
PatchArray = PatchAntennaArray(
n_patches=NO_OF_PATCHES,
Freq=14e9,
Er=2.5,
param_range=param_opt_range
)
# print('initial_elements:\n',PatchArray.element_array)
# update_to = [0.,0.,1.,0.,0.,1.]
# PatchArray.update_array_params(update_to)
# print('updates_elements:\n',PatchArray.element_array)
steps_per_gen = 0
no_of_generations_done = 0
def fitness_func(solution, solution_idx):
global steps_per_gen
global no_of_generations_done
steps_per_gen +=1
# print(steps_per_gen)
if steps_per_gen%sol_per_pop == 0:
steps_per_gen = 0
no_of_generations_done += 1
print("Generation:",no_of_generations_done)
PatchArray.CalculateFieldSumPatch(dAngleInDeg=delta_angle_for_integration)
PatchArray.update_array_params(solution)
fitness = PatchArray.get_gain(dAngleInDeg=delta_angle_for_integration)
return fitness
fitness_function = fitness_func
num_generations = NO_OF_GENERATIONS # Number of generations.
num_parents_mating = 15 # Number of solutions to be selected as parents in the mating pool.
# To prepare the initial population, there are 2 ways:
# 1) Prepare it yourself and pass it to the initial_population parameter. This way is useful when the user wants to start the genetic algorithm with a custom initial population.
# 2) Assign valid integer values to the sol_per_pop and num_genes parameters. If the initial_population parameter exists, then the sol_per_pop and num_genes parameters are useless.
sol_per_pop = 25 # Number of solutions in the population.
num_genes = len(PatchArray.params_to_opt_range)
gene_ranges = [{'low':p_2_opt_range[0],'high':p_2_opt_range[1]} for p_2_opt_range in PatchArray.params_to_opt_range]
parent_selection_type = "sss" # Type of parent selection.
keep_parents = 10 # Number of parents to keep in the next population. -1 means keep all parents and 0 means keep nothing.
crossover_type = "single_point" # Type of the crossover operator.
# Parameters of the mutation operation.
mutation_type = "random" # Type of the mutation operator.
mutation_percent_genes = 10 # Percentage of genes to mutate. This parameter has no action if the parameter mutation_num_genes exists or when mutation_type is None.
print("Number of Params to Optimize:",num_genes)
print("Number of Patches to Optimize:",NO_OF_PATCHES)
print("TOPOLOGY:",PATCH_TOPOLOGY,'\n')
| 2.3125 | 2 |
authlib/deprecate.py | YPCrumble/authlib | 3,172 | 12787685 | import warnings
class AuthlibDeprecationWarning(DeprecationWarning):
pass
warnings.simplefilter('always', AuthlibDeprecationWarning)
def deprecate(message, version=None, link_uid=None, link_file=None):
if version:
message += '\nIt will be compatible before version {}.'.format(version)
if link_uid and link_file:
message += '\nRead more <https://git.io/{}#file-{}-md>'.format(link_uid, link_file)
warnings.warn(AuthlibDeprecationWarning(message), stacklevel=2)
| 2.390625 | 2 |
ansible/roles/cumulus/files/amis.py | Kitware/HPCCloud-deploy | 18 | 12787686 | import boto.ec2
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('aws_access_key_id')
parser.add_argument('aws_secret_access_key')
parser.add_argument('region')
config = parser.parse_args()
conn = boto.ec2.connect_to_region(config.region,
aws_access_key_id=config.aws_access_key_id,
aws_secret_access_key=config.aws_secret_access_key)
images = conn.get_all_images(owners=['self'])
values = []
for image in images:
values.append('"%s": "%s"' % (image.name, image.id))
print( ','.join(values))
| 2.46875 | 2 |
myblog/blog_api/models.py | EUGINELETHAL/BLOG-API | 0 | 12787687 | <gh_stars>0
# Create your models here.
# -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
class Category(models.Model):
"""
Defines a blog category
"""
name = models.CharField(max_length=150)
def __str__(self):
return self.name
class Article(models.Model):
"""
Defines a blog article
"""
writer = models.ForeignKey(User, related_name="articles", on_delete=models.CASCADE )
title = models.CharField(max_length=150)
content = models.TextField()
category = models.ForeignKey(Category, related_name="articles", on_delete=models.CASCADE)
image = models.CharField(max_length=250)
def __str__(self):
return self.content
| 2.53125 | 3 |
python/mypython3.py | Georgakopoulos-Sp/Adlr | 1 | 12787688 |
import numpy as np
#import matplotlib.pyplot as plt
# Make sure that caffe is on the python path:
caffe_root = '/home/legolas/CNN_libs/caffe_mike/caffe/python/caffe' # this file is expected to be in {caffe_root}/examples
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe
#net = caffe.Net( net_def_proto , weights )
net = caffe.Net( '/home/legolas/CNN_libs/mycaffe_test/examples/cifar10/cifar10_quick_train_test.prototxt' , '/home/legolas/CNN_libs/mycaffe_test/examples/cifar10/cifar10_quick_iter_4000.caffemodel', caffe.TEST )
#net = caffe.Net( '/home/legolas/CNN_libs/mycaffe_test/examples/cifar10/cifar10_quick_train_test.prototxt' , caffe.TEST )
caffe.set_device(0)
caffe.set_mode_gpu()
#net.set_phase_test()
accuracy=0
myrange = 100
for i in range(myrange):
out = net.forward()
accuracy +=out['accuracy']
# print net.blobs['accuracy'].data
print out['accuracy']
print accuracy
| 2.1875 | 2 |
tools/update-dependencies.py | dfjxs/l2tdevtools | 0 | 12787689 | <filename>tools/update-dependencies.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
"""Script to update the dependencies in various configuration files."""
from __future__ import unicode_literals
import io
import os
import sys
from l2tdevtools import dependencies
from l2tdevtools.helpers import project
from l2tdevtools.dependency_writers import appveyor_yml
from l2tdevtools.dependency_writers import check_dependencies
from l2tdevtools.dependency_writers import dependencies_py
from l2tdevtools.dependency_writers import dpkg
from l2tdevtools.dependency_writers import gift_copr
from l2tdevtools.dependency_writers import gift_ppa
from l2tdevtools.dependency_writers import jenkins_scripts
from l2tdevtools.dependency_writers import linux_scripts
from l2tdevtools.dependency_writers import macos
from l2tdevtools.dependency_writers import pylint_rc
from l2tdevtools.dependency_writers import requirements
from l2tdevtools.dependency_writers import setup
from l2tdevtools.dependency_writers import tox_ini
from l2tdevtools.dependency_writers import travis
from l2tdevtools.dependency_writers import travis_yml
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
l2tdevtools_path = os.path.abspath(__file__)
l2tdevtools_path = os.path.dirname(l2tdevtools_path)
l2tdevtools_path = os.path.dirname(l2tdevtools_path)
project_path = os.getcwd()
projects_helper = project.ProjectHelper(project_path)
project_definition = projects_helper.ReadDefinitionFile()
dependencies_helper = dependencies.DependencyHelper()
test_dependencies_helper = None
if os.path.exists('test_dependencies.ini'):
test_dependencies_helper = dependencies.DependencyHelper(
'test_dependencies.ini')
for writer_class in (
pylint_rc.PylintRcWriter, travis.TravisRunWithTimeoutScriptWriter,
requirements.RequirementsWriter, requirements.TestRequirementsWriter,
setup.SetupCfgWriter, setup.SetupPyWriter,
travis.TravisInstallScriptWriter, travis.TravisRunPylintScriptWriter,
travis.TravisRunPython3ScriptWriter, travis.TravisRunTestsScriptWriter,
travis.TravisRunWithTimeoutScriptWriter, travis_yml.TravisYMLWriter):
writer = writer_class(
l2tdevtools_path, project_definition, dependencies_helper,
test_dependencies_helper)
writer.Write()
for writer_class in (
appveyor_yml.AppveyorYmlWriter,
check_dependencies.CheckDependenciesWriter,
dependencies_py.DependenciesPyWriter, dpkg.DPKGCompatWriter,
dpkg.DPKGControlWriter, dpkg.DPKGRulesWriter,
gift_copr.GIFTCOPRInstallScriptWriter,
gift_ppa.GIFTPPAInstallScriptPY3Writer,
jenkins_scripts.LinuxRunEndToEndTestsScriptWriter,
jenkins_scripts.RunPython3EndToEndTestsScriptWriter,
linux_scripts.UbuntuInstallationScriptWriter,
macos.MacOSInstallScriptWriter, macos.MacOSMakeDistScriptWriter,
macos.MacOSUninstallScriptWriter, tox_ini.ToxIniWriter):
if not os.path.exists(writer_class.PATH):
continue
writer = writer_class(
l2tdevtools_path, project_definition, dependencies_helper,
test_dependencies_helper)
writer.Write()
output_path = os.path.join('utils', 'dependencies.py')
if os.path.exists(output_path):
input_path = os.path.join(
l2tdevtools_path, 'l2tdevtools', 'dependencies.py')
file_data = []
with io.open(input_path, 'r', encoding='utf-8') as file_object:
for line in file_object.readlines():
if 'GetDPKGDepends' in line:
break
file_data.append(line)
file_data.pop()
file_data = ''.join(file_data)
with io.open(output_path, 'w', encoding='utf-8') as file_object:
file_object.write(file_data)
# Remove old scripts.
script_path = os.path.join('config', 'linux', 'gift_ppa_install.sh')
if os.path.isfile(script_path):
os.remove(script_path)
script_path = os.path.join('config', 'travis', 'run_coverage.sh')
if os.path.isfile(script_path):
os.remove(script_path)
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
| 2.03125 | 2 |
vagrancy/cli/__init__.py | seeraven/vagrancyCtrl | 0 | 12787690 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2020 by <NAME> <<EMAIL>>
# All rights reserved.
# This file is part of vagrancyCtrl (https://github.com/seeraven/vagrancyCtrl)
# and is released under the "BSD 3-Clause License". Please see the LICENSE file
# that is included as part of this package.
#
"""Command line interface used by vagrancyCtrl."""
# -----------------------------------------------------------------------------
# Module Import
# -----------------------------------------------------------------------------
import sys
import argcomplete
from .parser_cmd_delete import get_subparser_delete
from .parser_cmd_download import get_subparser_download
from .parser_cmd_print import get_subparser_print
from .parser_cmd_upload import get_subparser_upload
from .parser_main import get_main_parser
# -----------------------------------------------------------------------------
# Exported Functions
# -----------------------------------------------------------------------------
def get_parser():
"""Get the command line argument parser for vagrancyCtrl.
Returns:
argparse.ArgumentParser: A new ArgumentParser object of the parser.
"""
parser = get_main_parser()
subparsers = parser.add_subparsers()
get_subparser_delete(subparsers)
get_subparser_download(subparsers)
get_subparser_print(subparsers)
get_subparser_upload(subparsers)
return parser
def vagrancy_ctrl_main():
"""Handle the vagrancyCtrl actions."""
parser = get_parser()
argcomplete.autocomplete(parser)
args = parser.parse_args()
if hasattr(args, 'func'):
args.func(args)
else:
parser.print_help(sys.stderr)
sys.exit(1)
# -----------------------------------------------------------------------------
# EOF
# -----------------------------------------------------------------------------
| 1.859375 | 2 |
daisy_server.py | J-Pai/408DaisyJetson | 1 | 12787691 | <gh_stars>1-10
#!/usr/bin/env python3
from flask import Flask, render_template, Response
from flask_httpauth import HTTPBasicAuth
import argparse
from multiprocessing.managers import SyncManager
from queue import Empty
import io
import base64
import matplotlib.pyplot as plt
from pymongo import MongoClient
MONGODB_URI = "mongodb://Teddy:password@<PASSWORD> <EMAIL>.mlab.com:53889/records"
client = MongoClient(MONGODB_URI, connectTimeoutMS=30000)
db = client.get_default_database()
memory_records = db.memory_records
exercise_records = db.exercise_records
class NeuronManager(SyncManager):
pass
NeuronManager.register('get_web_neuron')
NeuronManager.register('get_alexa_neuron')
manager = NeuronManager(address=('', 4081), authkey=b'daisy')
connected = True
try:
manager.connect()
print("Eye connected to neuron manager.")
except ConnectionRefusedError:
print("Eye not connected to neuron manager.")
connected = False
app = Flask(__name__)
auth = HTTPBasicAuth()
users = {
"daisy_login": "iknowthisisinsecure"
}
@auth.get_password
def get_pw(username):
if username in users:
return users.get(username)
return None
@app.route('/')
@auth.login_required
def index():
user = None
if connected:
alexa_neuron = manager.get_alexa_neuron()
user = alexa_neuron.get('user')
return render_template('index.html',
mem_graph=mem_game_graph(),
ex_graph=exercise_graph(),
currUser=user)
def gen():
while True:
img = None
if connected:
web_neuron = manager.get_web_neuron()
img = web_neuron.get('image')
if img is None:
img = b'\0'
yield( b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + img + b'\r\n' )
@app.route('/video_feed')
def video_feed():
return Response(gen(), mimetype='multipart/x-mixed-replace; boundary=frame')
def get_MEMORY_RECORD(name):
record = memory_records.find_one({"user":name})
return record
def get_EXERCISE_RECORD(name):
record = exercise_records.find_one({"user":name})
return record
def mem_game_graph():
if not connected:
return '<p>Manager is not connected<p>'
alexa_neuron = manager.get_alexa_neuron()
record = get_MEMORY_RECORD(alexa_neuron.get('user'))
if record is None:
return '<p>No memory game data recorded for user<p>'
count = record['count'] + 1
data = record['data']
xaxis = list(range(1, count))
yaxis = data
y_mean = [record['overall_performance']]*len(xaxis)
fig, ax = plt.subplots()
data_line = ax.plot(xaxis,yaxis, label='Data', marker='o')
mean_line = ax.plot(xaxis,y_mean, label='Mean', linestyle='--')
ax.set(xlabel='Number of times played (#)', ylabel='Percentage Score (%)',
title='Memory Game Performance Analytics')
legend = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
img = io.BytesIO()
plt.savefig(img, format='png', bbox_extra_artists=(legend,), bbox_inches='tight')
plt.close(fig)
img.seek(0)
imgData = base64.b64encode(img.getvalue()).decode()
img.close()
return '<img class="pure-img" src="data:image/png;base64, {}">'.format(imgData)
def exercise_graph():
if not connected:
return 'Manager is not connected'
alexa_neuron = manager.get_alexa_neuron()
record = get_EXERCISE_RECORD(alexa_neuron.get('user'))
if record is None:
return '<p>No exercise data recorded for user<p>'
count = record['count'] + 1
data = record['data']
xaxis = list(range(1, count))
yaxis = data
y_mean = [record['overall_performance']]*len(xaxis)
fig, ax = plt.subplots()
data_line = ax.plot(xaxis,yaxis, label='Data', marker='o')
mean_line = ax.plot(xaxis,y_mean, label='Mean', linestyle='--')
ax.set(xlabel='Number of times exercised (#)', ylabel='Repetitions (#)',
title='Exercise Performance Analytics')
legend = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
img = io.BytesIO()
plt.savefig(img, format='png', bbox_extra_artists=(legend,), bbox_inches='tight')
img.seek(0)
plt.close(fig)
imgData = base64.b64encode(img.getvalue()).decode()
img.close()
return '<img class="pure-img" src="data:image/png;base64, {}">'.format(imgData)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Start Daisy's Internet Connection")
parser.add_argument("--set-ip",
dest="ip",
default="localhost",
help="Specify the IP address to use for initialization")
parser.add_argument("--set-port",
dest="port",
default="8080",
help="Specify the port to use for initialization")
args = parser.parse_args()
app.run(args.ip, int(args.port))
| 2.453125 | 2 |
stubs/micropython-esp32-1_13-103/btree.py | RonaldHiemstra/micropython-stubs | 38 | 12787692 | """
Module: 'btree' on esp32 1.13.0-103
"""
# MCU: (sysname='esp32', nodename='esp32', release='1.13.0', version='v1.13-103-gb137d064e on 2020-10-09', machine='ESP32 module (spiram) with ESP32')
# Stubber: 1.3.4
DESC = 2
INCL = 1
def open():
pass
| 1.515625 | 2 |
venv/lib/python2.7/site-packages/jinja/parser.py | mallika2011/Recreating-VLabs-CSO-Experiment | 7 | 12787693 | <reponame>mallika2011/Recreating-VLabs-CSO-Experiment
# -*- coding: utf-8 -*-
"""
jinja.parser
~~~~~~~~~~~~
Implements the template parser.
The Jinja template parser is not a real parser but a combination of the
python compiler package and some postprocessing. The tokens yielded by
the lexer are used to separate template data and expressions. The
expression tokens are then converted into strings again and processed
by the python parser.
:copyright: 2007 by <NAME>.
:license: BSD, see LICENSE for more details.
"""
from jinja import nodes
from jinja.datastructure import StateTest
from jinja.exceptions import TemplateSyntaxError
from jinja.utils import set
__all__ = ['Parser']
# general callback functions for the parser
end_of_block = StateTest.expect_token('block_end',
msg='expected end of block tag')
end_of_variable = StateTest.expect_token('variable_end',
msg='expected end of variable')
end_of_comment = StateTest.expect_token('comment_end',
msg='expected end of comment')
# internal tag callbacks
switch_for = StateTest.expect_token('else', 'endfor')
end_of_for = StateTest.expect_token('endfor')
switch_if = StateTest.expect_token('else', 'elif', 'endif')
end_of_if = StateTest.expect_token('endif')
end_of_filter = StateTest.expect_token('endfilter')
end_of_macro = StateTest.expect_token('endmacro')
end_of_call = StateTest.expect_token('endcall')
end_of_block_tag = StateTest.expect_token('endblock')
end_of_trans = StateTest.expect_token('endtrans')
# this ends a tuple
tuple_edge_tokens = set(['rparen', 'block_end', 'variable_end', 'in',
'recursive'])
class Parser(object):
"""
The template parser class.
Transforms sourcecode into an abstract syntax tree.
"""
def __init__(self, environment, source, filename=None):
self.environment = environment
if isinstance(source, str):
source = source.decode(environment.template_charset, 'ignore')
if isinstance(filename, unicode):
filename = filename.encode('utf-8')
self.source = source
self.filename = filename
self.closed = False
#: set for blocks in order to keep them unique
self.blocks = set()
#: mapping of directives that require special treatment
self.directives = {
# "fake" directives that just trigger errors
'raw': self.parse_raw_directive,
'extends': self.parse_extends_directive,
# real directives
'for': self.parse_for_loop,
'if': self.parse_if_condition,
'cycle': self.parse_cycle_directive,
'call': self.parse_call_directive,
'set': self.parse_set_directive,
'filter': self.parse_filter_directive,
'print': self.parse_print_directive,
'macro': self.parse_macro_directive,
'block': self.parse_block_directive,
'include': self.parse_include_directive,
'trans': self.parse_trans_directive
}
#: set of directives that are only available in a certain
#: context.
self.context_directives = set([
'elif', 'else', 'endblock', 'endfilter', 'endfor', 'endif',
'endmacro', 'endraw', 'endtrans', 'pluralize'
])
#: get the `no_variable_block` flag
self.no_variable_block = self.environment.lexer.no_variable_block
self.stream = environment.lexer.tokenize(source, filename)
def parse_raw_directive(self):
"""
Handle fake raw directive. (real raw directives are handled by
the lexer. But if there are arguments to raw or the end tag
is missing the parser tries to resolve this directive. In that
case present the user a useful error message.
"""
if self.stream:
raise TemplateSyntaxError('raw directive does not support '
'any arguments.', self.stream.lineno,
self.filename)
raise TemplateSyntaxError('missing end tag for raw directive.',
self.stream.lineno, self.filename)
def parse_extends_directive(self):
"""
Handle the extends directive used for inheritance.
"""
raise TemplateSyntaxError('mispositioned extends tag. extends must '
'be the first tag of a template.',
self.stream.lineno, self.filename)
def parse_for_loop(self):
"""
Handle a for directive and return a ForLoop node
"""
token = self.stream.expect('for')
item = self.parse_tuple_expression(simplified=True)
if not item.allows_assignments():
raise TemplateSyntaxError('cannot assign to expression',
token.lineno, self.filename)
self.stream.expect('in')
seq = self.parse_tuple_expression()
if self.stream.current.type == 'recursive':
self.stream.next()
recursive = True
else:
recursive = False
self.stream.expect('block_end')
body = self.subparse(switch_for)
# do we have an else section?
if self.stream.current.type == 'else':
self.stream.next()
self.stream.expect('block_end')
else_ = self.subparse(end_of_for, True)
else:
self.stream.next()
else_ = None
self.stream.expect('block_end')
return nodes.ForLoop(item, seq, body, else_, recursive,
token.lineno, self.filename)
def parse_if_condition(self):
"""
Handle if/else blocks.
"""
token = self.stream.expect('if')
expr = self.parse_expression()
self.stream.expect('block_end')
tests = [(expr, self.subparse(switch_if))]
else_ = None
# do we have an else section?
while True:
if self.stream.current.type == 'else':
self.stream.next()
self.stream.expect('block_end')
else_ = self.subparse(end_of_if, True)
elif self.stream.current.type == 'elif':
self.stream.next()
expr = self.parse_expression()
self.stream.expect('block_end')
tests.append((expr, self.subparse(switch_if)))
continue
else:
self.stream.next()
break
self.stream.expect('block_end')
return nodes.IfCondition(tests, else_, token.lineno, self.filename)
def parse_cycle_directive(self):
"""
Handle {% cycle foo, bar, baz %}.
"""
token = self.stream.expect('cycle')
expr = self.parse_tuple_expression()
self.stream.expect('block_end')
return nodes.Cycle(expr, token.lineno, self.filename)
def parse_set_directive(self):
"""
Handle {% set foo = 'value of foo' %}.
"""
token = self.stream.expect('set')
name = self.stream.expect('name')
self.test_name(name.value)
self.stream.expect('assign')
value = self.parse_expression()
if self.stream.current.type == 'bang':
self.stream.next()
scope_local = False
else:
scope_local = True
self.stream.expect('block_end')
return nodes.Set(name.value, value, scope_local,
token.lineno, self.filename)
def parse_filter_directive(self):
"""
Handle {% filter foo|bar %} directives.
"""
token = self.stream.expect('filter')
filters = []
while self.stream.current.type != 'block_end':
if filters:
self.stream.expect('pipe')
token = self.stream.expect('name')
args = []
if self.stream.current.type == 'lparen':
self.stream.next()
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
args.append(self.parse_expression())
self.stream.expect('rparen')
filters.append((token.value, args))
self.stream.expect('block_end')
body = self.subparse(end_of_filter, True)
self.stream.expect('block_end')
return nodes.Filter(body, filters, token.lineno, self.filename)
def parse_print_directive(self):
"""
Handle {% print foo %}.
"""
token = self.stream.expect('print')
expr = self.parse_tuple_expression()
node = nodes.Print(expr, token.lineno, self.filename)
self.stream.expect('block_end')
return node
def parse_macro_directive(self):
"""
Handle {% macro foo bar, baz %} as well as
{% macro foo(bar, baz) %}.
"""
token = self.stream.expect('macro')
macro_name = self.stream.expect('name')
self.test_name(macro_name.value)
if self.stream.current.type == 'lparen':
self.stream.next()
needle_token = 'rparen'
else:
needle_token = 'block_end'
args = []
while self.stream.current.type != needle_token:
if args:
self.stream.expect('comma')
name = self.stream.expect('name').value
self.test_name(name)
if self.stream.current.type == 'assign':
self.stream.next()
default = self.parse_expression()
else:
default = None
args.append((name, default))
self.stream.next()
if needle_token == 'rparen':
self.stream.expect('block_end')
body = self.subparse(end_of_macro, True)
self.stream.expect('block_end')
return nodes.Macro(macro_name.value, args, body, token.lineno,
self.filename)
def parse_call_directive(self):
"""
Handle {% call foo() %}...{% endcall %}
"""
token = self.stream.expect('call')
expr = self.parse_call_expression()
self.stream.expect('block_end')
body = self.subparse(end_of_call, True)
self.stream.expect('block_end')
return nodes.Call(expr, body, token.lineno, self.filename)
def parse_block_directive(self):
"""
Handle block directives used for inheritance.
"""
token = self.stream.expect('block')
name = self.stream.expect('name').value
# check if this block does not exist by now.
if name in self.blocks:
raise TemplateSyntaxError('block %r defined twice' %
name, token.lineno,
self.filename)
self.blocks.add(name)
if self.stream.current.type != 'block_end':
lineno = self.stream.lineno
expr = self.parse_tuple_expression()
node = nodes.Print(expr, lineno, self.filename)
body = nodes.NodeList([node], lineno, self.filename)
self.stream.expect('block_end')
else:
# otherwise parse the body and attach it to the block
self.stream.expect('block_end')
body = self.subparse(end_of_block_tag, True)
self.stream.expect('block_end')
return nodes.Block(name, body, token.lineno, self.filename)
def parse_include_directive(self):
"""
Handle the include directive used for template inclusion.
"""
token = self.stream.expect('include')
template = self.stream.expect('string').value
self.stream.expect('block_end')
return nodes.Include(template, token.lineno, self.filename)
def parse_trans_directive(self):
"""
Handle translatable sections.
"""
trans_token = self.stream.expect('trans')
# string based translations {% trans "foo" %}
if self.stream.current.type == 'string':
text = self.stream.expect('string')
self.stream.expect('block_end')
return nodes.Trans(text.value, None, None, None,
trans_token.lineno, self.filename)
# block based translations
replacements = {}
plural_var = None
while self.stream.current.type != 'block_end':
if replacements:
self.stream.expect('comma')
name = self.stream.expect('name')
if self.stream.current.type == 'assign':
self.stream.next()
value = self.parse_expression()
else:
value = nodes.NameExpression(name.value, name.lineno,
self.filename)
if name.value in replacements:
raise TemplateSyntaxError('translation variable %r '
'is defined twice' % name.value,
name.lineno, self.filename)
replacements[name.value] = value
if plural_var is None:
plural_var = name.value
self.stream.expect('block_end')
def process_variable():
var_name = self.stream.expect('name')
if var_name.value not in replacements:
raise TemplateSyntaxError('unregistered translation variable'
" '%s'." % var_name.value,
var_name.lineno, self.filename)
buf.append('%%(%s)s' % var_name.value)
buf = singular = []
plural = None
while True:
token = self.stream.current
if token.type == 'data':
buf.append(token.value.replace('%', '%%'))
self.stream.next()
elif token.type == 'variable_begin':
self.stream.next()
process_variable()
self.stream.expect('variable_end')
elif token.type == 'block_begin':
self.stream.next()
if plural is None and self.stream.current.type == 'pluralize':
self.stream.next()
if self.stream.current.type == 'name':
plural_var = self.stream.expect('name').value
plural = buf = []
elif self.stream.current.type == 'endtrans':
self.stream.next()
self.stream.expect('block_end')
break
else:
if self.no_variable_block:
process_variable()
else:
raise TemplateSyntaxError('blocks are not allowed '
'in trans tags',
self.stream.lineno,
self.filename)
self.stream.expect('block_end')
else:
assert False, 'something very strange happened'
singular = u''.join(singular)
if plural is not None:
plural = u''.join(plural)
return nodes.Trans(singular, plural, plural_var, replacements,
trans_token.lineno, self.filename)
def parse_expression(self):
"""
Parse one expression from the stream.
"""
return self.parse_conditional_expression()
def parse_subscribed_expression(self):
"""
Like parse_expression but parses slices too. Because this
parsing function requires a border the two tokens rbracket
and comma mark the end of the expression in some situations.
"""
lineno = self.stream.lineno
if self.stream.current.type == 'colon':
self.stream.next()
args = [None]
else:
node = self.parse_expression()
if self.stream.current.type != 'colon':
return node
self.stream.next()
args = [node]
if self.stream.current.type == 'colon':
args.append(None)
elif self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
if self.stream.current.type == 'colon':
self.stream.next()
if self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
else:
args.append(None)
return nodes.SliceExpression(*(args + [lineno, self.filename]))
def parse_conditional_expression(self):
"""
Parse a conditional expression (foo if bar else baz)
"""
lineno = self.stream.lineno
expr1 = self.parse_or_expression()
while self.stream.current.type == 'if':
self.stream.next()
expr2 = self.parse_or_expression()
self.stream.expect('else')
expr3 = self.parse_conditional_expression()
expr1 = nodes.ConditionalExpression(expr2, expr1, expr3,
lineno, self.filename)
lineno = self.stream.lineno
return expr1
def parse_or_expression(self):
"""
Parse something like {{ foo or bar }}.
"""
lineno = self.stream.lineno
left = self.parse_and_expression()
while self.stream.current.type == 'or':
self.stream.next()
right = self.parse_and_expression()
left = nodes.OrExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_and_expression(self):
"""
Parse something like {{ foo and bar }}.
"""
lineno = self.stream.lineno
left = self.parse_compare_expression()
while self.stream.current.type == 'and':
self.stream.next()
right = self.parse_compare_expression()
left = nodes.AndExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_compare_expression(self):
"""
Parse something like {{ foo == bar }}.
"""
known_operators = set(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq', 'in'])
lineno = self.stream.lineno
expr = self.parse_add_expression()
ops = []
while True:
if self.stream.current.type in known_operators:
op = self.stream.current.type
self.stream.next()
ops.append([op, self.parse_add_expression()])
elif self.stream.current.type == 'not' and \
self.stream.look().type == 'in':
self.stream.skip(2)
ops.append(['not in', self.parse_add_expression()])
else:
break
if not ops:
return expr
return nodes.CompareExpression(expr, ops, lineno, self.filename)
def parse_add_expression(self):
"""
Parse something like {{ foo + bar }}.
"""
lineno = self.stream.lineno
left = self.parse_sub_expression()
while self.stream.current.type == 'add':
self.stream.next()
right = self.parse_sub_expression()
left = nodes.AddExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_sub_expression(self):
"""
Parse something like {{ foo - bar }}.
"""
lineno = self.stream.lineno
left = self.parse_concat_expression()
while self.stream.current.type == 'sub':
self.stream.next()
right = self.parse_concat_expression()
left = nodes.SubExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_concat_expression(self):
"""
Parse something like {{ foo ~ bar }}.
"""
lineno = self.stream.lineno
args = [self.parse_mul_expression()]
while self.stream.current.type == 'tilde':
self.stream.next()
args.append(self.parse_mul_expression())
if len(args) == 1:
return args[0]
return nodes.ConcatExpression(args, lineno, self.filename)
def parse_mul_expression(self):
"""
Parse something like {{ foo * bar }}.
"""
lineno = self.stream.lineno
left = self.parse_div_expression()
while self.stream.current.type == 'mul':
self.stream.next()
right = self.parse_div_expression()
left = nodes.MulExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_div_expression(self):
"""
Parse something like {{ foo / bar }}.
"""
lineno = self.stream.lineno
left = self.parse_floor_div_expression()
while self.stream.current.type == 'div':
self.stream.next()
right = self.parse_floor_div_expression()
left = nodes.DivExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_floor_div_expression(self):
"""
Parse something like {{ foo // bar }}.
"""
lineno = self.stream.lineno
left = self.parse_mod_expression()
while self.stream.current.type == 'floordiv':
self.stream.next()
right = self.parse_mod_expression()
left = nodes.FloorDivExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_mod_expression(self):
"""
Parse something like {{ foo % bar }}.
"""
lineno = self.stream.lineno
left = self.parse_pow_expression()
while self.stream.current.type == 'mod':
self.stream.next()
right = self.parse_pow_expression()
left = nodes.ModExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_pow_expression(self):
"""
Parse something like {{ foo ** bar }}.
"""
lineno = self.stream.lineno
left = self.parse_unary_expression()
while self.stream.current.type == 'pow':
self.stream.next()
right = self.parse_unary_expression()
left = nodes.PowExpression(left, right, lineno, self.filename)
lineno = self.stream.lineno
return left
def parse_unary_expression(self):
"""
Parse all kinds of unary expressions.
"""
if self.stream.current.type == 'not':
return self.parse_not_expression()
elif self.stream.current.type == 'sub':
return self.parse_neg_expression()
elif self.stream.current.type == 'add':
return self.parse_pos_expression()
return self.parse_primary_expression()
def parse_not_expression(self):
"""
Parse something like {{ not foo }}.
"""
token = self.stream.expect('not')
node = self.parse_unary_expression()
return nodes.NotExpression(node, token.lineno, self.filename)
def parse_neg_expression(self):
"""
Parse something like {{ -foo }}.
"""
token = self.stream.expect('sub')
node = self.parse_unary_expression()
return nodes.NegExpression(node, token.lineno, self.filename)
def parse_pos_expression(self):
"""
Parse something like {{ +foo }}.
"""
token = self.stream.expect('add')
node = self.parse_unary_expression()
return nodes.PosExpression(node, token.lineno, self.filename)
def parse_primary_expression(self, parse_postfix=True):
"""
Parse a primary expression such as a name or literal.
"""
current = self.stream.current
if current.type == 'name':
if current.value in ('true', 'false'):
node = self.parse_bool_expression()
elif current.value == 'none':
node = self.parse_none_expression()
elif current.value == 'undefined':
node = self.parse_undefined_expression()
elif current.value == '_':
node = self.parse_gettext_call()
else:
node = self.parse_name_expression()
elif current.type in ('integer', 'float'):
node = self.parse_number_expression()
elif current.type == 'string':
node = self.parse_string_expression()
elif current.type == 'regex':
node = self.parse_regex_expression()
elif current.type == 'lparen':
node = self.parse_paren_expression()
elif current.type == 'lbracket':
node = self.parse_list_expression()
elif current.type == 'lbrace':
node = self.parse_dict_expression()
elif current.type == 'at':
node = self.parse_set_expression()
else:
raise TemplateSyntaxError("unexpected token '%s'" %
self.stream.current,
self.stream.current.lineno,
self.filename)
if parse_postfix:
node = self.parse_postfix_expression(node)
return node
def parse_tuple_expression(self, enforce=False, simplified=False):
"""
Parse multiple expressions into a tuple. This can also return
just one expression which is not a tuple. If you want to enforce
a tuple, pass it enforce=True.
"""
lineno = self.stream.lineno
if simplified:
parse = self.parse_primary_expression
else:
parse = self.parse_expression
args = []
is_tuple = False
while True:
if args:
self.stream.expect('comma')
if self.stream.current.type in tuple_edge_tokens:
break
args.append(parse())
if self.stream.current.type == 'comma':
is_tuple = True
else:
break
if not is_tuple and args:
if enforce:
raise TemplateSyntaxError('tuple expected', lineno,
self.filename)
return args[0]
return nodes.TupleExpression(args, lineno, self.filename)
def parse_bool_expression(self):
"""
Parse a boolean literal.
"""
token = self.stream.expect('name')
if token.value == 'true':
value = True
elif token.value == 'false':
value = False
else:
raise TemplateSyntaxError("expected boolean literal",
token.lineno, self.filename)
return nodes.ConstantExpression(value, token.lineno, self.filename)
def parse_none_expression(self):
"""
Parse a none literal.
"""
token = self.stream.expect('name', 'none')
return nodes.ConstantExpression(None, token.lineno, self.filename)
def parse_undefined_expression(self):
"""
Parse an undefined literal.
"""
token = self.stream.expect('name', 'undefined')
return nodes.UndefinedExpression(token.lineno, self.filename)
def parse_gettext_call(self):
"""
parse {{ _('foo') }}.
"""
# XXX: check if only one argument was passed and if
# it is a string literal. Maybe that should become a special
# expression anyway.
token = self.stream.expect('name', '_')
node = nodes.NameExpression(token.value, token.lineno, self.filename)
return self.parse_call_expression(node)
def parse_name_expression(self):
"""
Parse any name.
"""
token = self.stream.expect('name')
self.test_name(token.value)
return nodes.NameExpression(token.value, token.lineno, self.filename)
def parse_number_expression(self):
"""
Parse a number literal.
"""
token = self.stream.current
if token.type not in ('integer', 'float'):
raise TemplateSyntaxError('integer or float literal expected',
token.lineno, self.filename)
self.stream.next()
return nodes.ConstantExpression(token.value, token.lineno, self.filename)
def parse_string_expression(self):
"""
Parse a string literal.
"""
token = self.stream.expect('string')
return nodes.ConstantExpression(token.value, token.lineno, self.filename)
def parse_regex_expression(self):
"""
Parse a regex literal.
"""
token = self.stream.expect('regex')
return nodes.RegexExpression(token.value, token.lineno, self.filename)
def parse_paren_expression(self):
"""
Parse a parenthized expression.
"""
self.stream.expect('lparen')
try:
return self.parse_tuple_expression()
finally:
self.stream.expect('rparen')
def parse_list_expression(self):
"""
Parse something like {{ [1, 2, "three"] }}
"""
token = self.stream.expect('lbracket')
items = []
while self.stream.current.type != 'rbracket':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbracket':
break
items.append(self.parse_expression())
self.stream.expect('rbracket')
return nodes.ListExpression(items, token.lineno, self.filename)
def parse_dict_expression(self):
"""
Parse something like {{ {1: 2, 3: 4} }}
"""
token = self.stream.expect('lbrace')
items = []
while self.stream.current.type != 'rbrace':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbrace':
break
key = self.parse_expression()
self.stream.expect('colon')
value = self.parse_expression()
items.append((key, value))
self.stream.expect('rbrace')
return nodes.DictExpression(items, token.lineno, self.filename)
def parse_set_expression(self):
"""
Parse something like {{ @(1, 2, 3) }}.
"""
token = self.stream.expect('at')
self.stream.expect('lparen')
items = []
while self.stream.current.type != 'rparen':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rparen':
break
items.append(self.parse_expression())
self.stream.expect('rparen')
return nodes.SetExpression(items, token.lineno, self.filename)
def parse_postfix_expression(self, node):
"""
Parse a postfix expression such as a filter statement or a
function call.
"""
while True:
current = self.stream.current.type
if current == 'dot' or current == 'lbracket':
node = self.parse_subscript_expression(node)
elif current == 'lparen':
node = self.parse_call_expression(node)
elif current == 'pipe':
node = self.parse_filter_expression(node)
elif current == 'is':
node = self.parse_test_expression(node)
else:
break
return node
def parse_subscript_expression(self, node):
"""
Parse a subscript statement. Gets attributes and items from an
object.
"""
lineno = self.stream.lineno
if self.stream.current.type == 'dot':
self.stream.next()
token = self.stream.current
if token.type in ('name', 'integer'):
arg = nodes.ConstantExpression(token.value, token.lineno,
self.filename)
else:
raise TemplateSyntaxError('expected name or number',
token.lineno, self.filename)
self.stream.next()
elif self.stream.current.type == 'lbracket':
self.stream.next()
args = []
while self.stream.current.type != 'rbracket':
if args:
self.stream.expect('comma')
args.append(self.parse_subscribed_expression())
self.stream.expect('rbracket')
if len(args) == 1:
arg = args[0]
else:
arg = nodes.TupleExpression(args, lineno, self.filename)
else:
raise TemplateSyntaxError('expected subscript expression',
self.lineno, self.filename)
return nodes.SubscriptExpression(node, arg, lineno, self.filename)
def parse_call_expression(self, node=None):
"""
Parse a call.
"""
if node is None:
node = self.parse_primary_expression(parse_postfix=False)
token = self.stream.expect('lparen')
args = []
kwargs = []
dyn_args = None
dyn_kwargs = None
require_comma = False
def ensure(expr):
if not expr:
raise TemplateSyntaxError('invalid syntax for function '
'call expression', token.lineno,
self.filename)
while self.stream.current.type != 'rparen':
if require_comma:
self.stream.expect('comma')
# support for trailing comma
if self.stream.current.type == 'rparen':
break
if self.stream.current.type == 'mul':
ensure(dyn_args is None and dyn_kwargs is None)
self.stream.next()
dyn_args = self.parse_expression()
elif self.stream.current.type == 'pow':
ensure(dyn_kwargs is None)
self.stream.next()
dyn_kwargs = self.parse_expression()
else:
ensure(dyn_args is None and dyn_kwargs is None)
if self.stream.current.type == 'name' and \
self.stream.look().type == 'assign':
key = self.stream.current.value
self.stream.skip(2)
kwargs.append((key, self.parse_expression()))
else:
ensure(not kwargs)
args.append(self.parse_expression())
require_comma = True
self.stream.expect('rparen')
return nodes.CallExpression(node, args, kwargs, dyn_args,
dyn_kwargs, token.lineno,
self.filename)
def parse_filter_expression(self, node):
"""
Parse filter calls.
"""
lineno = self.stream.lineno
filters = []
while self.stream.current.type == 'pipe':
self.stream.next()
token = self.stream.expect('name')
args = []
if self.stream.current.type == 'lparen':
self.stream.next()
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
args.append(self.parse_expression())
self.stream.expect('rparen')
filters.append((token.value, args))
return nodes.FilterExpression(node, filters, lineno, self.filename)
def parse_test_expression(self, node):
"""
Parse test calls.
"""
token = self.stream.expect('is')
if self.stream.current.type == 'not':
self.stream.next()
negated = True
else:
negated = False
name = self.stream.expect('name').value
args = []
if self.stream.current.type == 'lparen':
self.stream.next()
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
args.append(self.parse_expression())
self.stream.expect('rparen')
elif self.stream.current.type in ('name', 'string', 'integer',
'float', 'lparen', 'lbracket',
'lbrace', 'regex'):
args.append(self.parse_expression())
node = nodes.TestExpression(node, name, args, token.lineno,
self.filename)
if negated:
node = nodes.NotExpression(node, token.lineno, self.filename)
return node
def test_name(self, name):
"""
Test if a name is not a special constant
"""
if name in ('true', 'false', 'none', 'undefined', '_'):
raise TemplateSyntaxError('expected name not special constant',
self.stream.lineno, self.filename)
def subparse(self, test, drop_needle=False):
"""
Helper function used to parse the sourcecode until the test
function which is passed a tuple in the form (lineno, token, data)
returns True. In that case the current token is pushed back to
the stream and the generator ends.
The test function is only called for the first token after a
block tag. Variable tags are *not* aliases for {% print %} in
that case.
If drop_needle is True the needle_token is removed from the
stream.
"""
if self.closed:
raise RuntimeError('parser is closed')
result = []
buffer = []
next = self.stream.next
lineno = self.stream.lineno
def assemble_list():
push_buffer()
return nodes.NodeList(result, lineno, self.filename)
def push_variable():
buffer.append((True, self.parse_tuple_expression()))
def push_data():
buffer.append((False, self.stream.expect('data')))
def push_buffer():
if not buffer:
return
template = []
variables = []
for is_var, data in buffer:
if is_var:
template.append('%s')
variables.append(data)
else:
template.append(data.value.replace('%', '%%'))
result.append(nodes.Text(u''.join(template), variables,
buffer[0][1].lineno, self.filename))
del buffer[:]
def push_node(node):
push_buffer()
result.append(node)
while self.stream:
token_type = self.stream.current.type
if token_type == 'variable_begin':
next()
push_variable()
self.stream.expect('variable_end')
elif token_type == 'raw_begin':
next()
push_data()
self.stream.expect('raw_end')
elif token_type == 'block_begin':
next()
if test is not None and test(self.stream.current):
if drop_needle:
next()
return assemble_list()
handler = self.directives.get(self.stream.current.type)
if handler is None:
if self.no_variable_block:
push_variable()
self.stream.expect('block_end')
elif self.stream.current.type in self.context_directives:
raise TemplateSyntaxError('unexpected directive %r.' %
self.stream.current.type,
lineno, self.filename)
else:
name = self.stream.current.value
raise TemplateSyntaxError('unknown directive %r.' %
name, lineno, self.filename)
else:
node = handler()
if node is not None:
push_node(node)
elif token_type == 'data':
push_data()
# this should be unreachable code
else:
assert False, "unexpected token %r" % self.stream.current
if test is not None:
msg = isinstance(test, StateTest) and ': ' + test.msg or ''
raise TemplateSyntaxError('unexpected end of stream' + msg,
self.stream.lineno, self.filename)
return assemble_list()
def sanitize_tree(self, body, extends):
self._sanitize_tree([body], [body], extends, body)
return body
def _sanitize_tree(self, nodelist, stack, extends, body):
"""
This is not a closure because python leaks memory if it is. It's used
by `parse()` to make sure blocks do not trigger unexpected behavior.
"""
for node in nodelist:
if extends is not None and \
node.__class__ is nodes.Block and \
stack[-1] is not body:
for n in stack:
if n.__class__ is nodes.Block:
break
else:
raise TemplateSyntaxError('misplaced block %r, '
'blocks in child '
'templates must be '
'either top level or '
'located in a block '
'tag.' % node.name,
node.lineno,
self.filename)
stack.append(node)
self._sanitize_tree(node.get_child_nodes(), stack, extends, body)
stack.pop()
def parse(self):
"""
Parse the template and return a Template node. This also does some
post processing sanitizing and parses for an extends tag.
"""
if self.closed:
raise RuntimeError('parser is closed')
try:
# get the leading whitespace, if we are not in a child
# template we push that back to the stream later.
leading_whitespace = self.stream.read_whitespace()
# parse an optional extends which *must* be the first node
# of a template.
if self.stream.current.type == 'block_begin' and \
self.stream.look().type == 'extends':
self.stream.skip(2)
extends = self.stream.expect('string').value
self.stream.expect('block_end')
else:
extends = None
if leading_whitespace:
self.stream.shift(leading_whitespace)
body = self.sanitize_tree(self.subparse(None), extends)
return nodes.Template(extends, body, 1, self.filename)
finally:
self.close()
def close(self):
"""Clean up soon."""
self.closed = True
self.stream = self.directives = self.stream = self.blocks = \
self.environment = None
| 2.375 | 2 |
Project 16 -- Skin-Cancer-Detection-CNN-Keras/get_dataset.py | Vauke/Deep-Neural-Networks-HealthCare | 2 | 12787694 | <gh_stars>1-10
from download_dataset_subset import download_dataset_subset, validate_image
import re
import os
import shutil
import requests
from os.path import join
from threading import Thread
from PIL import Image
# Required Parameters:
# Specify the current dataset size
size = 13786
# Optional parameters:
# Specify the path to the dir the images will be saved in
images_dir = join(os.pardir, 'Data', 'Images')
# Specify the path to the dir the descriptions will be saved in
descs_dir = join(os.pardir, 'Data', 'Descriptions')
# Choose the number of images each thread will download
thread_subset_size = 300
def main():
# If any of the images dir, descs dir or ids file exists - remove them so we won't override data
# and perhaps create corrupted data
create_or_recreate_dir(images_dir)
create_or_recreate_dir(descs_dir)
# 1. Get the ids of all the images
ids = get_ids()
# 2. Download all the images using their ids
download_dataset(ids)
print('Finished downloading the dataset')
def get_ids():
print('Collecting all images ids')
# Specify the url that lists the meta data about the images (id, name, etc..)
url = 'https://isic-archive.com/api/v1/image?limit={0}&offset=0&sort=name&sortdir=1'.format(size)
# Get the images metadata
response = requests.get(url, stream=True)
# Parse as json
meta_data = response.json()
# Extract all the ids
ids = [str(meta_data[index]['_id']) for index in range(size)]
return ids
def download_dataset(ids):
# Determine the dataset subsets which multiple threads will download
bins = range(0, size, thread_subset_size)
# Create the threads to download subsets of the dataset
# and determine the edges for the current thread
threads = []
for idx, left_edge in enumerate(bins):
# Deretmine the right edge
right_edge = left_edge + thread_subset_size
if right_edge >= size:
right_edge = size
# Define the thread on the current subset
thread = Thread(target=download_dataset_subset, kwargs={'start': left_edge, 'end': right_edge, 'ids': ids[left_edge: right_edge],
'images_dir': images_dir, 'descs_dir': descs_dir, 'thread_id': idx})
# Start it and add it to the list of threads
thread.start()
threads.append(thread)
# Wait for all the threads to finish
for thread in threads:
thread.join()
print('All threads have finished')
def validate_images():
# We would like to check that all the images are valid
try:
for index, image in enumerate(os.listdir(images_dir)):
image_path = join(images_dir, image)
validate_image(image_path)
if (index + 1) % 100 == 0:
print('Validated {0} out of {1} images'.format(index + 1, size))
print('Finished validating the images')
except IOError as e:
print(e.message)
print("The image {0} wasn't downloaded successfully. "
"Please Open an issue in the github repository together with the error".format(image))
def create_or_recreate_dir(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
os.makedirs(dir_path)
if __name__ == '__main__':
main() | 2.78125 | 3 |
DailyProgrammer/DP20130111C.py | DayGitH/Python-Challenges | 2 | 12787695 | """
[01/11/13] Challenge #116 [Hard] Maximum Random Walk
https://www.reddit.com/r/dailyprogrammer/comments/16dbyh/011113_challenge_116_hard_maximum_random_walk/
# [](#HardIcon) *(Hard)*: Maximum Random Walk
Consider the classic random walk: at each step, you have a 1/2 chance of taking a step to the left and a 1/2 chance of
taking a step to the right. Your expected position after a period of time is zero; that is the average over many such
random walks is that you end up where you started. A more interesting question is what is the expected rightmost
position you will attain during the walk.
*Author: thePersonCSC*
# Formal Inputs & Outputs
## Input Description
The input consists of an integer n, which is the number of steps to take (1 <= n <= 1000). The final two are double
precision floating-point values L and R which are the probabilities of taking a step left or right respectively at each
step (0 <= L <= 1, 0 <= R <= 1, 0 <= L + R <= 1). Note: the probability of not taking a step would be 1-L-R.
## Output Description
A single double precision floating-point value which is the expected rightmost position you will obtain during the walk
(to, at least, four decimal places).
# Sample Inputs & Outputs
## Sample Input
walk(1,.5,.5)
walk(4,.5,.5)
walk(10,.5,.4)
## Sample Output
walk(1,.5,.5) returns 0.5000
walk(4,.5,.5) returns 1.1875
walk(10,.5,.4) returns 1.4965
# Challenge Input
What is walk(1000,.5,.4)?
## Challenge Input Solution
(No solution provided by author)
# Note
* Have your code execute in less that 2 minutes with any input where n <= 1000
* I took this problem from the regional ACM ICPC of Greater New York.
"""
def main():
pass
if __name__ == "__main__":
main()
| 3.6875 | 4 |
common/sync.py | sefcom/dbling | 9 | 12787696 | # *-* coding: utf-8 *-*
"""Context manager for easily using a pymemcache mutex.
The `acquire_lock` context manager makes it easy to use :mod:`pymemcache` (which
uses memcached) to create a mutex for a certain portion of code. Of course,
this requires the :mod:`pymemcache` library to be installed, which in turn
requires `memcached <https://memcached.org>`_ to be installed.
"""
import json
import logging
from contextlib import contextmanager
from time import sleep
from pymemcache.client.base import Client
__all__ = ['acquire_lock', 'LockUnavailable']
class LockUnavailable(Exception):
"""Raised when a cached lock is already in use."""
def json_serializer(key, value):
# Borrowed from the pymemcache docs: https://pymemcache.readthedocs.io/en/latest/getting_started.html#serialization
if type(value) == str:
return value, 1
return json.dumps(value), 2
def json_deserializer(key, value, flags):
# Borrowed from the pymemcache docs: https://pymemcache.readthedocs.io/en/latest/getting_started.html#serialization
if flags == 1:
return value
if flags == 2:
return json.loads(value)
raise Exception("Unknown serialization format")
cache_client = Client(('localhost', 11211), serializer=json_serializer, deserializer=json_deserializer)
@contextmanager
def acquire_lock(lock_id, wait=0, max_retries=0):
"""Acquire a lock on the given lock ID, or raise an exception.
This context manager can be used as a mutex by doing something like the
following:
>>> from time import sleep
>>> job_done = False
>>> while not job_done:
... try:
... with acquire_lock('some id'):
... sensitive_function()
... job_done = True
... except LockUnavailable:
... # Sleep for a couple seconds while the other code runs and
... # hopefully completes
... sleep(2)
In the above example, ``sensitive_function()`` should only be run if no
other code is also running it. A more concise way of writing the above
example would be to use the other parameters, like this:
>>> with acquire_lock('some id', wait=2):
... sensitive_function()
:param lock_id: The ID for this lock. See :mod:`pymemcache`'s documentation
on `key constraints
<https://pymemcache.readthedocs.io/en/latest/getting_started.html#key-constraints>`_
for more info.
:type lock_id: str or bytes
:param int wait: Indicates how many seconds after failing to acquire the
lock to wait (sleep) before retrying. When set to 0 (default), will
immediately raise a `LockUnavailable` exception.
:param int max_retries: Maximum number of times to retry to acquire the
lock before raising a `LockUnavailable` exception. When set to 0
(default), will always retry. Has essentially no effect if ``wait`` is
0.
:raises LockUnavailable: when a lock with the same ID already exists and
``wait`` is set to 0.
"""
assert isinstance(lock_id, str) or isinstance(lock_id, bytes)
if (not isinstance(wait, int)) or wait < 0:
wait = 0
if (not isinstance(max_retries, int)) or max_retries < 0:
max_retries = 0
# Get lock
retries = 0
while retries <= max_retries:
if cache_client.add(lock_id, str('Locked by dbling')): # We got the lock
break
if wait == 0:
raise LockUnavailable
if max_retries > 0:
retries += 1
logging.info('Unable to acquire lock "{}". Will retry in {} seconds.'.format(lock_id, wait))
sleep(wait)
# Tell the `with` statement to execute
yield
# Release lock, don't wait for the reply
cache_client.delete(lock_id, noreply=True)
| 2.984375 | 3 |
Dynamic_Programming/coin_1.py | Mayner0220/Programmers | 1 | 12787697 | # https://www.acmicpc.net/problem/2293
n, k = map(int, input().split(" "))
coin = [int(input()) for _ in range(n)]
dp = [0] * (k+1)
dp[0] = 1
for i in coin:
for j in range(i, k+1):
if j-i >= 0:
dp[j] += dp[j-i]
print(dp[k]) | 2.9375 | 3 |
fidelity.py | syedraza2/Variational_Quantum_Embedding | 1 | 12787698 | <reponame>syedraza2/Variational_Quantum_Embedding
"""
Fidelity classifier
===================
Implements the fidelity classifier.
``predict()`` returns the predicted label or continuous output for a new input
``accuracy()`` returns the accuracy on a test set
The 'exact' implementation computes overlap of ket vectors numerically.
The 'circuit' implementation performs a swap test on all data pairs.
"""
import pennylane as qml
from pennylane import numpy as np
import dill as pickle # to load featuremap
def negate(item):
if isinstance(item, list):
return [-i for i in item]
else:
return -item
def cphase_inv(k):
gate = [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, np.exp(np.complex(0, -2*np.pi/2**k))]]
return np.array(gate)
def _fast(x_new, A_samples, B_samples, featmap, pars, n_inp):
"""
Implements the fidelity measurement circuit using the "overlap with 0" trick.
"""
# Allocate registers
dev = qml.device('default.qubit', wires=n_inp)
# Identify input register wires
wires = list(range(n_inp))
Proj0 = np.zeros((2**n_inp, 2**n_inp))
Proj0[0, 0] = 1
@qml.qnode(dev)
def circuit(weights, x1=None, x2=None):
# Apply embedding
featmap(weights, x1, wires)
# Apply inverse embedding
featmap(negate(weights), negate(x2), wires)
# Measure overlap with |0..0>
return qml.expval(qml.Hermitian(Proj0, wires=wires))
# Compute mean overlap with A
overlap_A = 0
for a in A_samples:
overlap_A += circuit(pars, x1=a, x2=x_new)
overlap_A = overlap_A/len(A_samples)
# Compute mean overlap with B
overlap_B = 0
for b in B_samples:
overlap_B += circuit(pars, x1=b, x2=x_new)
overlap_B = overlap_B/len(B_samples)
return overlap_A, overlap_B
def _circuit(x_new, A_samples, B_samples, featmap, pars, n_inp):
"""
Implements the fidelity measurement circuit using samples of class A and B.
"""
# Allocate registers
n_qubits = 2*n_inp + 1 # Total number of qubits
dev = qml.device('default.qubit', wires=n_qubits)
# Identify input register wires
wires_x1 = list(range(1, n_inp+1))
wires_x2 = list(range(n_inp+1, 2*n_inp+1))
@qml.qnode(dev)
def circuit(weights, x1=None, x2=None):
# Load the two inputs into two different registers
featmap(weights, x1, wires_x1)
featmap(weights, x2, wires_x2)
# Do a SWAP test
qml.Hadamard(wires=0)
for k in range(n_inp):
qml.CSWAP(wires=[0, k + 1, n_inp + k + 1])
qml.Hadamard(wires=0)
# Measure overlap by checking ancilla
return qml.expval(qml.PauliZ(0))
# Compute mean overlap with A
overlap_A = 0
for a in A_samples:
overlap_A += circuit(pars, x1=a, x2=x_new)
overlap_A = overlap_A/len(A_samples)
# Compute mean overlap with B
overlap_B = 0
for b in B_samples:
overlap_B += circuit(pars, x1=b, x2=x_new)
overlap_B = overlap_B/len(B_samples)
return overlap_A, overlap_B
def _exact(x_new, A_samples, B_samples, featmap, n_inp, pars):
"""Calculates the analytical result of the fidelity measurement,
overlap_A = \sum_i p_A |<\phi(x_new)|\phi(a_i)>|^2,
overlap_B = \sum_i p_B |<\phi(x_new)|\phi(b_i)>|^2,
using numpy as well as pennylane to simulate the feature map.
"""
dev = qml.device('default.qubit', wires=n_inp)
@qml.qnode(dev)
def fm(weights, x=None):
"""Circuit to get the state after feature map"""
featmap(weights, x, range(n_inp))
return qml.expval(qml.PauliZ(0))
# Compute feature states for A
A_states = []
for a in A_samples:
fm(pars, x=a)
phi_a = dev._state
A_states.append(phi_a)
# Compute feature states for B
B_states = []
for b in B_samples:
fm(pars, x=b)
phi_b = dev._state
B_states.append(phi_b)
# Get feature state for new input
fm(pars, x=x_new)
phi_x = dev._state
# Put together
overlap_A = sum([np.abs(np.vdot(phi_x, phi_a)) ** 2 for phi_a in A_states])
overlap_A = overlap_A/len(A_states)
overlap_B = sum([np.abs(np.vdot(phi_x, phi_b)) ** 2 for phi_b in B_states])
overlap_B = overlap_B/len(B_states)
return overlap_A, overlap_B
def predict(x_new, path_to_featmap, n_samples=None,
probs_A=None, probs_B=None, binary=True, implementation=None, seed=None):
"""
Predicts which class the new input is from, using either exact numerical simulation
or a simulated quantum circuit.
As a convention, the class labeled by +1 is 'A', the class labeled by -1 is 'B'.
:param x_new: new input to predict label for
:param path_to_featmap: Where to load featmap from.
:param n_samples: How many samples to use, if None, use full class (simulating perfect measurement)
:param probs_A: Probabilities with which to draw each samples from A. If None, use uniform.
:param probs_B: Probabilities with which to draw each samples from B. If None, use uniform.
:param binary: If True, return probability, else return value {-1, 1}
:param implementation: String that chooses the background implementation. Can be 'exact',
'fast' or 'circuit'
:return: probability or prediction of class for x_new
"""
if seed is not None:
np.random.seed(seed)
# Load settings from result of featmap learning function
settings = np.load(path_to_featmap, allow_pickle=True).item()
featmap = pickle.loads(settings['featmap'])
pars = settings['pars']
n_inp = settings['n_wires']
X = settings['X']
Y = settings['Y']
A = X[Y == 1]
B = X[Y == -1]
if probs_A is not None and len(probs_A) != len(A):
raise ValueError("Length of probs_A and A have to be the same, got {} and {}."
.format(len(probs_A), len(A)))
if probs_B is not None and len(probs_B) != len(B):
raise ValueError("Length of probs_B and B have to be the same, got {} and {}."
.format(len(probs_B), len(B)))
# Sample subsets from A and B
if n_samples is None:
# Consider all samples from A, B
A_samples = A
B_samples = B
else:
selectA = np.random.choice(range(len(A)), size=(n_samples,), replace=True, p=probs_A)
A_samples = A[selectA]
selectB = np.random.choice(range(len(B)), size=(n_samples,), replace=True, p=probs_B)
B_samples = B[selectB]
if implementation == "exact":
overlap_A, overlap_B = _exact(x_new=x_new, A_samples=A_samples, B_samples=B_samples,
featmap=featmap, n_inp=n_inp, pars=pars)
elif implementation == "circuit":
overlap_A, overlap_B = _circuit(x_new=x_new, A_samples=A_samples, B_samples=B_samples,
featmap=featmap, pars=pars, n_inp=n_inp)
elif implementation == "fast":
overlap_A, overlap_B = _fast(x_new=x_new, A_samples=A_samples, B_samples=B_samples,
featmap=featmap, pars=pars, n_inp=n_inp)
else:
raise ValueError("Implementation not recognized.")
if binary:
if overlap_A > overlap_B:
return 1
elif overlap_A < overlap_B:
return -1
else:
return 0
else:
return overlap_A - overlap_B
def accuracy(X, Y, path_to_featmap, n_samples=None, probs_A=None, probs_B=None,
implementation=None, seed=None):
"""
Computes the ratio of correctly classified samples to all samples.
:param X: Array of test inputs
:param Y: 1-d array of test labels
:param path_to_featmap: Where to load featmap from.
:param n_samples: How many samples to use, if None, use full class (simulating perfect measurement)
:param probs_A: Probabilities with which to draw each samples from A. If None, use uniform.
:param probs_B: Probabilities with which to draw each samples from B. If None, use uniform.
:param implementation: String that chooses the background implementation.
:return: accuracy of predictions on test set
"""
acc = []
for x_test, y_test in zip(X, Y):
y_pred = predict(x_new=x_test,
path_to_featmap=path_to_featmap,
n_samples=n_samples,
probs_A=probs_A,
probs_B=probs_B,
binary=True,
implementation=implementation,
seed=seed)
if y_test == y_pred:
acc.append(1)
else:
acc.append(0)
return sum(acc)/len(acc)
| 2.578125 | 3 |
engine/src/hopeit/dataobjects/payload.py | pcanto-hopeit/hopeit.engine | 15 | 12787699 | <reponame>pcanto-hopeit/hopeit.engine
"""
Payload tools to serialize and deserialze event payloads and responses, including dataobjects
"""
import json
from typing import Type, Generic, Optional, Union
from dataclasses_jsonschema import ValidationError
from hopeit.dataobjects import EventPayloadType
_ATOMIC_TYPES = (str, int, float, bool)
_COLLECTION_TYPES = (dict, list, set)
_MAPPING_TYPES = (dict, )
_LIST_TYPES = (list, set)
_UNORDERED_LIST_TYPES = (set, )
class Payload(Generic[EventPayloadType]):
"""
Convenience ser/deser functions for @dataobject decorated object (@see DataObject)
"""
@staticmethod
def from_json(json_str: Union[str, bytes],
datatype: Type[EventPayloadType],
key: str = 'value') -> EventPayloadType:
"""
Converts json_str to desired datatype
:param json_str: str containing valid json,
or string representation for atomic values
:param datatype: supported types defined in EventPayload
:param key: key to extract atomic types from
:return: instance of datatype
"""
if datatype in _ATOMIC_TYPES:
return datatype(json.loads(json_str).get(key)) # type: ignore
if datatype in _COLLECTION_TYPES:
return datatype(json.loads(json_str)) # type: ignore
assert getattr(datatype, 'from_json'), \
f"{datatype} should be annotated with @dataobject"
try:
return datatype.from_json(json_str, validate=datatype.__data_object__['validate']) # type: ignore
except ValidationError as e:
raise ValueError(f"Cannot read JSON: type={datatype} validation_error={str(e)}") from e
@staticmethod
def from_obj(data: Union[dict, list],
datatype: Type[EventPayloadType],
key: str = 'value',
item_datatype: Optional[Type[EventPayloadType]] = None) -> EventPayloadType:
"""
Converts dictionary to desired datatype
:param data: dictionary containing fields expected on datatype
:param datatype: supported types defined in EventPayload
:param key: key to extract atomic types from
:param item_datatype: optional datatype to parse items in collections
:return: instance of datatype
"""
if datatype in _ATOMIC_TYPES:
return datatype(data.get(key)) # type: ignore
if datatype in _MAPPING_TYPES:
if item_datatype and isinstance(data, _MAPPING_TYPES):
return { # type: ignore
k: Payload.from_obj(v, item_datatype, key) for k, v in data.items()
}
return datatype(data) # type: ignore
if datatype in _LIST_TYPES:
if item_datatype and isinstance(data, _LIST_TYPES):
return datatype([ # type: ignore
Payload.from_obj(v, item_datatype, key) for v in data
])
return datatype(data) # type: ignore
assert getattr(datatype, 'from_dict'), \
f"{datatype} should be annotated with @dataobject"
try:
return datatype.from_dict(data, validate=datatype.__data_object__['validate']) # type: ignore
except ValidationError as e:
raise ValueError(f"Cannot read object: type={datatype} validation_error={str(e)}") from e
@staticmethod
def to_json(payload: EventPayloadType, key: Optional[str] = 'value') -> str:
"""
Converts event payload to json string
:param payload: EventPayload, instance of supported object type
:param key: key name used in generated json when serializing atomic values
:return: str containing json representation of data. In case of simple datatypes,
a json str of key:value form will be generated using key parameter if it's not None.
"""
if isinstance(payload, _ATOMIC_TYPES): # immutable supported types
if key is None:
return json.dumps(payload)
return json.dumps({key: payload})
if isinstance(payload, _LIST_TYPES):
return "[" + ', '.join(Payload.to_json(item, key=None) for item in payload) + "]"
if isinstance(payload, _MAPPING_TYPES):
return "{" + ', '.join(
f'"{str(k)}": {Payload.to_json(item, key=None)}' for k, item in payload.items()
) + "}"
assert getattr(payload, 'to_json'), \
f"{type(payload)} should be annotated with @dataobject"
try:
return payload.to_json(validate=payload.__data_object__['validate']) # type: ignore
except (ValidationError, AttributeError) as e:
raise ValueError(f"Cannot convert to JSON: type={type(payload)} validation_error={str(e)}") from e
@staticmethod
def to_obj(payload: EventPayloadType, key: Optional[str] = 'value') -> Union[dict, list]:
"""
Converts event payload to dictionary or list
:param payload: EventPayload, instance of supported object type
:param key: key name used in generated json when serializing atomic values
:return: dict or list containing mapped representation of data. In case of simple datatypes,
a key:value form will be generated using key parameter. All objects mappable to dict will
be converted. Flat collections will be converted to list.
"""
if isinstance(payload, _ATOMIC_TYPES): # immutable supported types
if key is None:
return payload # type: ignore # only for recursive use
return {key: payload}
if isinstance(payload, _UNORDERED_LIST_TYPES):
return [Payload.to_obj(v, key=None) for v in sorted(payload)]
if isinstance(payload, _LIST_TYPES):
return [Payload.to_obj(v, key=None) for v in payload]
if isinstance(payload, _MAPPING_TYPES):
return {k: Payload.to_obj(v, key=None) for k, v in payload.items()}
assert getattr(payload, 'to_dict'), \
f"{type(payload)} should be annotated with @dataobject"
try:
return payload.to_dict(validate=payload.__data_object__['validate']) # type: ignore
except (ValidationError, AttributeError) as e:
raise ValueError(f"Cannot convert to dict: type={type(payload)} validation_error={str(e)}") from e
@staticmethod
def parse_form_field(field_data: Union[str, dict], datatype: Type[EventPayloadType],
key: str = 'value') -> EventPayloadType:
"""Helper to parse dataobjects from form-fields where encoding type is not correctly set to json"""
if isinstance(field_data, str):
return Payload.from_json(field_data, datatype, key)
return datatype.from_dict(field_data) # type: ignore
| 1.929688 | 2 |
Sorting/Gnome-Sort.py | harshil2004/Data-Structures-and-Algorithms | 14 | 12787700 | # Python implementation of gnome sort
# This program is a sorting technique which is also called as stupid sort
# Contributed by <NAME>
# github link :https://github.com/amitsat27
# userid : amitsat27
# Gnome sort function which helps to sort the array
def gnomesort(array,size):
# initialising index to 0
index = 0
while(index < size):
# checks if index is zero if zero increments index by 1
if(index == 0):
index = index + 1
# checks if present element is larger than previous element
# if larger then goes to right of array increments index
if(array[index] >= array[index - 1]):
index = index + 1
else:
# else if present element is smaller then
# it swaps the two elements and goes to left decrements index value
array[index], array[index-1] = array[index-1], array[index]
index = index - 1
return array
# declaring array of numbers
arrayofNumbers = [8,1,6,9,2,-1]
# taking length of array of numbers
size = len(arrayofNumbers)
# function call for gnomeSort
arrayofNumbers = gnomesort(arrayofNumbers, size)
# printing the sorted array
print("Sorted Numbers after applying Gnome sort : ")
for i in arrayofNumbers:
print(i,end=" ")
| 4.46875 | 4 |
changes/backends/jenkins/generic_builder.py | vault-the/changes | 443 | 12787701 | <reponame>vault-the/changes<gh_stars>100-1000
from __future__ import absolute_import
from flask import current_app
from changes.config import db
from changes.models.snapshot import SnapshotImage
from changes.models.command import FutureCommand
from changes.utils.http import build_internal_uri
from changes.buildsteps.base import LXCConfig
from .builder import JenkinsBuilder
class JenkinsGenericBuilder(JenkinsBuilder):
def __init__(self, master_urls=None, setup_script='', teardown_script='',
artifacts=(), reset_script='', path='', workspace='',
snapshot_script=None, clean=True, cluster=None, *args, **kwargs):
"""Builder for JenkinsGenericBuildStep. See JenkinsGenericBuildStep
for information on most of these arguments.
"""
self.setup_script = setup_script
self.script = kwargs.pop('script')
self.teardown_script = teardown_script
self.snapshot_script = snapshot_script
self.reset_script = reset_script
self.path = path
self.workspace = workspace
self.artifacts = artifacts
self.clean = clean
# See configuration for more details; by default, the default build type is
# legacy which sets up no additional configuration.
self.build_type = kwargs.pop('build_type',
current_app.config['CHANGES_CLIENT_DEFAULT_BUILD_TYPE'])
if self.build_type is None:
self.build_type = current_app.config['CHANGES_CLIENT_DEFAULT_BUILD_TYPE']
# If a server url is not provided (default: None), set it to a blank string
self.artifact_server_base_url = current_app.config['ARTIFACTS_SERVER'] or ''
# we do this as early as possible in order to propagate the
# error faster. The build description is simply the configuration
# key'd by the build_type, documented in config.py
self.build_desc = self.load_build_desc(self.build_type)
super(JenkinsGenericBuilder, self).__init__(master_urls, cluster=cluster, *args, **kwargs)
def load_build_desc(self, build_type):
build_desc = current_app.config['CHANGES_CLIENT_BUILD_TYPES'][build_type]
self.validate_build_desc(build_type, build_desc)
return build_desc
# TODO validate configuration at start of application or use a linter to validate
# configuration before pushing/deploying
def validate_build_desc(self, build_type, build_desc):
if build_desc.get('uses_client', False):
if 'jenkins-command' not in build_desc:
raise ValueError('[CHANGES_CLIENT_BUILD_TYPES INVALID] build type %s missing required key: jenkins-command' % build_type)
if 'adapter' not in build_desc:
raise ValueError('[CHANGES_CLIENT_BUILD_TYPES INVALID] build type %s missing required key: adapter' % build_type)
# These three methods all describe which build specification,
# setup, and teardown should be used to create a snapshot
# build. In the generic builder, this is the same as a normal build,
# but sharded builds need to override these with the shard equivalents
# in order to create the correct snapshot.
def get_snapshot_build_desc(self):
return self.build_desc
def get_snapshot_setup_script(self):
return self.setup_script
def get_snapshot_teardown_script(self):
return self.teardown_script
def get_expected_image(self, job_id):
"""
Get the snapshot-image (filesystem tarball for this jobstep).
If this returns None, it is a normal build (the more common case),
otherwise it returns the id of the snapshot image, which indicates
to where the build agent should upload the snapshot onto s3.
"""
return db.session.query(
SnapshotImage.id,
).filter(
SnapshotImage.job_id == job_id,
).scalar()
def _get_build_desc(self, jobstep):
if self.get_expected_image(jobstep.job_id):
return self.get_snapshot_build_desc()
return self.build_desc
def get_lxc_config(self, jobstep):
"""
Get the LXC configuration, if the LXC adapter should be used.
Args:
jobstep (JobStep): The JobStep to get the LXC config for.
Returns:
LXCConfig: The config to use for this jobstep, or None.
"""
build_desc = self._get_build_desc(jobstep)
if build_desc.get('uses_client') and build_desc.get('adapter') == 'lxc':
app_cfg = current_app.config
snapshot_bucket = app_cfg.get('SNAPSHOT_S3_BUCKET', '')
default_pre = self.debug_config.get('prelaunch_script') or app_cfg.get('LXC_PRE_LAUNCH', '')
default_post = app_cfg.get('LXC_POST_LAUNCH', '')
default_release = app_cfg.get('LXC_RELEASE', 'trusty')
return LXCConfig(s3_bucket=snapshot_bucket,
compression='lz4',
prelaunch=build_desc.get('pre-launch', default_pre),
postlaunch=build_desc.get('post-launch', default_post),
release=build_desc.get('release', default_release),
template=None,
mirror=None,
security_mirror=None)
return None
def get_job_parameters(self, job, changes_bid, setup_script=None,
script=None, teardown_script=None, path=None):
"""
Gets a list containing dictionaries, each with two keys - name and value.
These key,value pairs correspond to the input variables in Jenkins.
changes_bid is actually the jobstep id, and job is the current job.
*_script and path override the corresponding fields of the current
builder.
"""
params = super(JenkinsGenericBuilder, self).get_job_parameters(
job, changes_bid=changes_bid)
if path is None:
path = self.path
if setup_script is None:
setup_script = self.setup_script
if script is None:
script = self.script
if teardown_script is None:
teardown_script = self.teardown_script
project = job.project
repository = project.repository
vcs = repository.get_vcs()
if vcs:
repo_url = vcs.remote_url
else:
repo_url = repository.url
snapshot_bucket = current_app.config.get('SNAPSHOT_S3_BUCKET', '')
default_pre = self.debug_config.get('prelaunch_script') or current_app.config.get('LXC_PRE_LAUNCH', '')
default_post = current_app.config.get('LXC_POST_LAUNCH', '')
default_release = current_app.config.get('LXC_RELEASE', 'trusty')
build_desc = self.build_desc
# This is the image we are expected to produce or None
# if this is not a snapshot build.
expected_image = self.get_expected_image(job.id)
# Setting script to be empty essentially forces nothing
# but setup/teardown to be run, making a clean snapshot
snapshot_id = ''
if expected_image:
snapshot_id = expected_image.hex
# this is a no-op command in sh, essentially equivalent
# to '' except it tells changes-client that we are
# deliberately doing absolutely nothing. However,
# if snapshot script is not None, then we just use
# that in place of script (so the normal script is
# never used).
script = self.snapshot_script or ':'
# sharded builds will have different setup/teardown/build_desc
# scripts between shards and collector so we need to
# use the shard ones
build_desc = self.get_snapshot_build_desc()
setup_script = self.get_snapshot_setup_script()
teardown_script = self.get_snapshot_teardown_script()
# CHANGES_BID, the jobstep id, is provided by superclass
params.update({
'CHANGES_PID': project.slug,
'PROJECT_CONFIG': project.get_config_path(),
'REPO_URL': repo_url,
'SETUP_SCRIPT': setup_script,
'SCRIPT': script,
'TEARDOWN_SCRIPT': teardown_script,
'RESET_SCRIPT': self.reset_script,
'REPO_VCS': repository.backend.name,
'WORK_PATH': path,
'C_WORKSPACE': self.workspace,
'ARTIFACTS_SERVER_BASE_URL': self.artifact_server_base_url})
if 'bind_mounts' in self.debug_config:
params['bind-mounts'] = self.debug_config['bind_mounts']
if build_desc.get('uses_client', False):
params.update({
'JENKINS_COMMAND': build_desc['jenkins-command'],
'CHANGES_CLIENT_ADAPTER': build_desc['adapter'],
'CHANGES_CLIENT_SERVER': build_internal_uri('/api/0'),
'CHANGES_CLIENT_SNAPSHOT_BUCKET': snapshot_bucket,
'CHANGES_CLIENT_SNAPSHOT_ID': snapshot_id,
'CHANGES_CLIENT_LXC_PRE_LAUNCH': build_desc.get('pre-launch',
default_pre),
'CHANGES_CLIENT_LXC_POST_LAUNCH': build_desc.get('post-launch',
default_post),
'CHANGES_CLIENT_LXC_RELEASE': build_desc.get('release',
default_release)
})
return params
def get_future_commands(self, env, commands, artifacts):
"""Create future commands which are later created as comands.
See models/command.py.
"""
return map(lambda command: FutureCommand(command['script'],
artifacts=artifacts,
env=env),
commands)
def create_commands(self, jobstep, env):
"""
This seems slightly redundant, but in fact is necessary for
changes-client to work. The issue is mainly that the client is
designed for the exact flow of information that mesos uses,
in which the commands are taken from changes through an api request.
We need to tell changes to run what would normally be ran through
the Jenkins configuration - so we move this from the Jenkins
configuration into the commands of the build type.
Arguments:
jobstep (JobStep): jobstep to create commands under
env (dict): Env variables to supply to all commands.
"""
commands = self.build_desc.get('commands', [])
artifacts = self.artifacts_for_jobstep(jobstep)
env = env.copy()
if not self.clean:
env['SKIP_GIT_CLEAN'] = "1"
index = 0
for future_command in self.get_future_commands(env, commands, artifacts):
db.session.add(future_command.as_command(jobstep, index))
index += 1
def can_snapshot(self):
"""
Whether or not this build can snapshot is purely a function of the
build type. Right now the only adapter supporting this is the lxc
adapter, but in the scenario that another adapter is added (e.g.
docker?) then we would need for multiple adapters to support snapshots,
so we just encode whether it can or not as a field, defaulting to
false as most types don't support this operation.
"""
return self.build_desc.get('can_snapshot', False)
def artifacts_for_jobstep(self, jobstep):
"""
The artifact names/patterns we want to collect for a given jobstep.
For example, we may want to collect different artifacts for a
collection phase jobstep.
Arguments:
jobstep (JobStep): jobstep in question
"""
return self.artifacts
| 2.125 | 2 |
src/researchhub_case/models.py | ResearchHub/ResearchHub-Backend-Open | 18 | 12787702 | <filename>src/researchhub_case/models.py
# flake8: noqa
from .related_models.researchhub_case_abstract_model import (
AbstractResearchhubCase
)
from .related_models.author_claim_case_model import AuthorClaimCase
| 1.242188 | 1 |
emgineer/__init__.py | mechAneko12/emgineer | 0 | 12787703 | <filename>emgineer/__init__.py
from .emg_decomposition.main import EmgDecomposition
__version__ = '0.1.2'
| 0.96875 | 1 |
tests/settings.py | jarmovanlenthe/django-internationalflavor | 22 | 12787704 | <reponame>jarmovanlenthe/django-internationalflavor<gh_stars>10-100
# -*- coding: utf-8 -*-
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = [
'internationalflavor',
]
import django
if django.VERSION[:2] < (1, 6):
TEST_RUNNER = 'discover_runner.DiscoverRunner'
SECRET_KEY = 'spam-spam-spam-spam'
MIDDLEWARE_CLASSES = [] | 1.515625 | 2 |
adjustments.py | TheWolfA2/cppstyle | 1 | 12787705 | <reponame>TheWolfA2/cppstyle<gh_stars>1-10
def adjust_rme_in_header(self):
# adjust missing RME error if RME is in a #included header file
for filename in self.missing_rme.iterkeys():
extension = filename.split('.')[-1]
if extension == 'cpp':
name = filename.split('.')[0]
full_header_filename = name + '.h'
# dealing with file paths
short_header_filename = full_header_filename.split('/')[-1]
# check if header is #included
if short_header_filename in self.local_includes[filename]:
for missing_rme in self.missing_rme[filename]:
# remove error if RME is present or missing (to avoid double-counting) in the header file
if missing_rme in self.all_rme.get(full_header_filename) or \
missing_rme in self.missing_rme.get(full_header_filename):
for error in self.error_tracker[filename]:
if error.message == error.get_error_message('MISSING_RME') and \
error.get_data().get('function_signature') == missing_rme:
self.error_types['MISSING_RME'] -= 1
self.total_errors -= 1
self.error_tracker[filename].remove(error)
def adjust_definitions_above_main(self):
for filename in self.error_tracker.iterkeys():
if not self.file_has_a_main[filename]:
# remove error
errors_to_keep = list()
for error in self.error_tracker[filename]:
if error.message == error.get_error_message('DEFINITION_ABOVE_MAIN'):
self.error_types['DEFINITION_ABOVE_MAIN'] -= 1
self.total_errors -= 1
else:
errors_to_keep.append(error)
self.error_tracker[filename] = errors_to_keep
| 2.375 | 2 |
useless_ui/useless_ui.py | laurasiviero/UselessApp | 1 | 12787706 | '''
#*************************************************************************
Useless App:
#*************************************************************************
Description: - useless but hopefully beautiful;
- app that changes its color and themes;
- UI Modules.
#*************************************************************************
Author: <NAME>
<EMAIL>
License: MIT https://github.com/laurasiviero/UselessApp/blob/main/LICENSE
Date 2021.04.17
#*************************************************************************
'''
import sys
import maya.cmds as cmds
import useless_theme_functions as uth
import useless_functions as ufx
# *************************************************************************
# UI:
# *************************************************************************
def useless_app(USERPATH):
PATH_ICONS = USERPATH + r"\icon"
sys.path.append(USERPATH)
sys.path.append(PATH_ICONS)
print("directories have been updated")
ui_title = "Useless App"
theme_color = [0.286, 0.286, 0.286]
analogue_color = [0.2, 0.2, 0.2]
complementary_color = [0.792, 0.195, 0.203]
# DELETE if it already exists:
if cmds.window(ui_title, exists=True):
cmds.deleteUI(ui_title)
window = cmds.window(ui_title, title="USELESS APP",
backgroundColor=theme_color,
resizeToFitChildren=True)
# ************************************************************************
# LAYOUT
# ************************************************************************
cmds.formLayout("useless_form_layout", backgroundColor=theme_color,
numberOfDivisions=100)
theme_column = cmds.columnLayout("theme_column", adjustableColumn=True,
rowSpacing=5)
# THEME PANEL:
# ************************************************************************
theme_title = cmds.text("Change Theme:",
font="boldLabelFont", align="left")
cmds.separator("theme_separator", backgroundColor=complementary_color,
style="none", height=3)
cmds.iconTextButton("button_day", style='iconOnly',
image=PATH_ICONS + r'\day_icon.png',
backgroundColor=analogue_color,
command="useless_ui.uth.change_theme('day', USERPATH)")
cmds.iconTextButton("button_night", style='iconOnly',
image=PATH_ICONS + r'\night_icon.png',
backgroundColor=analogue_color,
command="useless_ui.uth.change_theme('night', USERPATH)")
cmds.iconTextButton("button_user", style='iconOnly',
image=PATH_ICONS + r'\user_icon.png',
backgroundColor=analogue_color,
command="useless_ui.uth.change_theme('user', USERPATH)")
cmds.iconTextButton("button_default", style='iconOnly',
image=PATH_ICONS + r'\default_icon.png',
backgroundColor=analogue_color,
command="useless_ui.uth.change_theme('default', USERPATH)")
cmds.setParent("..")
# APP COLUMN:
#************************************************************************
app_column = cmds.columnLayout(adjustableColumn=True, rowSpacing=5)
cmds.text("This is the space for the title:",
font="boldLabelFont", align="center")
cmds.separator("title_separator", backgroundColor=complementary_color,
style="none", height=3)
cmds.text("This is the place where it should be the most powerful tool ever made",
font="boldLabelFont", align="center")
cmds.text("Sorry, I don't have to create it for this contest",
font="boldLabelFont", align="center")
cmds.separator("stuff_separator", backgroundColor=complementary_color,
style="none", height=3)
# BUTTONS:
cmds.rowLayout(numberOfColumns=2, adjustableColumn1=True)
cmds.iconTextButton("useless_stuff_button", label="Set Idle",
width=190, style="textOnly",
backgroundColor=analogue_color,
command="useless_ui.ufx.set_idle()")
cmds.iconTextButton("useless_random_button", label="I feel lucky",
style="textOnly",
width=190,
backgroundColor=analogue_color,
command='useless_ui.ufx.get_random_quotes()')
cmds.setParent("..")
cmds.columnLayout(adjustableColumn=True, rowSpacing=5)
cmds.iconTextButton("useless_credits", label="CREDITS!",
style="textOnly",
width=190,
backgroundColor=analogue_color,
command='useless_ui.ufx.show_credits()')
cmds.separator("buttons_separator", backgroundColor=complementary_color,
style="none", height=3)
cmds.setParent("..")
# SLIDERS:
cmds.rowLayout(numberOfColumns=2, adjustableColumn1=True)
cmds.intSliderGrp("useless_number_slider", field=True, label='Numbers',
value=0, min=0, max=10,
columnWidth=(1, 50),
columnAlign=[(1, "left"), (2, "left")])
cmds.iconTextButton("useless_number", label="Pick it out!",
style="textOnly",
width=80,
backgroundColor=analogue_color,
command='useless_ui.ufx.pick_numbers()')
cmds.setParent("..")
cmds.separator("end_separator", backgroundColor=complementary_color,
style="none", height=3)
# MAIN LAYOUT:
# *********************************************************************
cmds.formLayout("useless_form_layout", edit=True,
attachForm=[(theme_column, 'left', 5),
(app_column, 'right', 10)],
attachControl=[(app_column, 'left', 10, theme_column)])
cmds.showWindow(window)
| 1.5625 | 2 |
examples/deeplearning/reinforcement/handson/chapter3-pyTorch/custommodule.py | sszffz/study | 0 | 12787707 | import torch
import torch.nn as nn
class OurModule(nn.Module):
def __init__(self, num_inputs, num_classes, dropout_prob=0.3):
super().__init__()
self.pipe = nn.Sequential(nn.Linear(num_inputs, 5),
nn.ReLU(),
nn.Linear(5, 20),
nn.ReLU(),
nn.Linear(20, num_classes),
nn.Dropout(p=dropout_prob),
nn.Softmax(dim=1))
def forward(self, x):
return self.pipe(x)
if __name__ == "__main__":
net = OurModule(num_inputs=2, num_classes=3)
v = torch.FloatTensor([[2, 3]])
out = net(v)
print(net)
print("*"*20)
print(out) | 3.203125 | 3 |
tf_unet/scripts/radio_util.py | abhineet123/river_ice_segmentation | 10 | 12787708 | <filename>tf_unet/scripts/radio_util.py<gh_stars>1-10
# tf_unet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tf_unet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tf_unet. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Aug 18, 2016
author: jakeret
'''
from __future__ import print_function, division, absolute_import, unicode_literals
import h5py
import numpy as np
from tf_unet.image_util import BaseDataProvider
class DataProvider(BaseDataProvider):
"""
Extends the BaseDataProvider to randomly select the next
data chunk
"""
channels = 1
n_class = 2
def __init__(self, nx, files, a_min=30, a_max=210):
super(DataProvider, self).__init__(a_min, a_max)
self.nx = nx
self.files = files
assert len(files) > 0, "No training files"
print("Number of files used: %s"%len(files))
self._cylce_file()
def _read_chunck(self):
with h5py.File(self.files[self.file_idx], "r") as fp:
nx = fp["data"].shape[1]
idx = np.random.randint(0, nx - self.nx)
sl = slice(idx, (idx+self.nx))
data = fp["data"][:, sl]
rfi = fp["mask"][:, sl]
return data, rfi
def _next_data(self):
data, rfi = self._read_chunck()
nx = data.shape[1]
while nx < self.nx:
self._cylce_file()
data, rfi = self._read_chunck()
nx = data.shape[1]
return data, rfi
def _cylce_file(self):
self.file_idx = np.random.choice(len(self.files))
| 2.125 | 2 |
src/graphPlot.py | KUTuaNithid/connect4Nithid | 41 | 12787709 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 15 15:16:06 2018
@author: Arpit
"""
import numpy as np
import matplotlib.pyplot as plt
import threading
from settings import charts_folder
class GraphPlot:
lock = threading.Lock()
def __init__(self, name, xCnt=1, yCnt=1, labels=None):
self.name = name
self.xCnt = xCnt
self.yCnt = yCnt
self.labels = labels
self.X = []
self.Ys = np.empty((yCnt,), dtype=object)
for i,v in enumerate(self.Ys): self.Ys[i] = list()
def add(self, X, Y):
self.X.append(X)
for i in range(self.yCnt):
self.Ys[i].append(Y[i])
def save(self):
try:
with self.lock:
fig = plt.figure()
for i in range(self.yCnt):
plt.plot(self.X, self.Ys[i], label=self.labels[i] if self.labels is not None else i)
plt.legend(loc = "best")
plt.savefig(charts_folder + str(self.name) + '.png')
plt.close(fig)
except Exception as e:
print("error: " + str(e))
plt.close()
| 2.453125 | 2 |
test_memory_pytorch_gpu.py | AdidasSuperstar/detr | 0 | 12787710 | <filename>test_memory_pytorch_gpu.py
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
parser = argparse.ArgumentParser(description='PyTorch Example')
parser.add_argument('--disable-cuda', action='store_true',
help='Disable CUDA')
args = parser.parse_args()
args.device = None
if not args.disable_cuda and torch.cuda.is_available():
args.device = torch.device('cuda')
else:
args.device = torch.device('cpu')
x = torch.empty((8, 42), device=args.device)
net = Network().to(device=args.device)
| 2.65625 | 3 |
servicenowpy/servicenowpy/exceptions.py | henriquencmt/servicenowpy | 0 | 12787711 | <gh_stars>0
class StatusCodeError(Exception):
"""Exception used when the status code of a http response is not as expected."""
def __init__(self, message, detail, status):
self.message = message
self.detail = detail
self.status = status
def __str__(self):
return f"\n Message: {self.message}\n Detail: {self.detail}\n Status: {self.status}" | 3 | 3 |
scanner_window.py | UnicycleDumpTruck/VetRFID | 0 | 12787712 | <gh_stars>0
#!/usr/bin/env python3
"""Subclassing pyglet Window to add behavior."""
from __future__ import annotations
from typing import List, Any
from datetime import datetime
from enum import Enum, auto
from loguru import logger
import pyglet # type: ignore
import files
import epc
import log
RET_SIDE = 200 # Length of side of reticle box
RET_BOX_WT = 10 # Line weight of reticle box lines
LABEL_FONT = 'Montserrat-SemiBold'
class State(Enum):
"""Window state, whether a tag is being displayed."""
IMG_SHOWING = auto()
VID_SHOWING = auto()
IDLE = auto()
class ScannerWindow(pyglet.window.Window): # pylint: disable=abstract-method
"""Subclassing pyglet Window to add logic and display."""
def __init__(self, *args, window_number, idle_seconds, ** kwargs):
"""Set up backgroud and periphery labels."""
super().__init__(*args, **kwargs)
self.state = State.IDLE
# self.background_graphics = []
# self.graphics = []
# self.graphics_batch = pyglet.graphics.Batch()
# self.background_graphics_batch = pyglet.graphics.Batch()
self.window_number = window_number
self.idle_seconds = idle_seconds
self.media_dir = 'xray'
self.media_type = 'img'
self.serial = None
self.label_controller = LabelController(self)
self.video_player = pyglet.media.Player()
source = pyglet.media.StreamingSource() # TODO In pyglet examples, but unused?
self.video_player.window = self
@self.video_player.event # TODO reassess after video fix
def on_eos(): # Attempting to stop error on video end
logger.debug("Video player telling window to idle, but line is commented.")
# self.idle(0) # Commented out to allow looping
self.label_bg = None
self.idle_image = pyglet.resource.image("graphics/instructions.png")
self.idle_image.anchor_x = self.idle_image.width // 2
self.idle_image.anchor_y = self.idle_image.height // 2
self.image = None
self.orig_image = None
self.video = None
self.clock = pyglet.clock.get_default()
self.idle(0) # idle needs delta_time argument
self.set_mouse_visible(False)
# self.setup_magnifer()
def setup_magnifer(self):
# Magnifier
self.mag_pos = [0, 0]
self.mag_x = 0
self.mag_y = 0
self.reticle_batch = pyglet.graphics.Batch()
self.ret_left = pyglet.shapes.Line(self.mag_x, self.mag_y, self.mag_x, self.mag_y + RET_SIDE, width=10,
color=(200, 20, 20), batch=self.reticle_batch)
self.ret_right = pyglet.shapes.Line(self.mag_x + RET_SIDE, self.mag_y, self.mag_x + RET_SIDE, self.mag_y + RET_SIDE, width=10,
color=(200, 20, 20), batch=self.reticle_batch)
self.ret_top = pyglet.shapes.Line(self.mag_x - RET_BOX_WT // 2, self.mag_y + RET_SIDE, self.mag_x + RET_SIDE + RET_BOX_WT // 2, self.mag_y + RET_SIDE, width=10,
color=(200, 20, 20), batch=self.reticle_batch)
self.ret_bot = pyglet.shapes.Line(self.mag_x - RET_BOX_WT // 2, self.mag_y, self.mag_x + RET_SIDE + RET_BOX_WT // 2, self.mag_y, width=10,
color=(200, 20, 20), batch=self.reticle_batch)
def idle(self, delta_time):
"""Clear medical imagery, return to idle screen."""
self.clock.unschedule(self.idle)
self.state = State.IDLE
logger.info(f"{self.window_number} Going idle, ",
delta_time, " seconds since scan.")
self.clear()
self.image = None
self.orig_image = None
self.label_bg = None
self.video = None
self.video_player.next_source()
self.serial = None
self.label_controller.idle_labels.draw()
# Not used at all?
# def on_player_eos(self): # TODO reassess after video fix
# """When video player runs out of queued files."""
# logger.debug("Player EOS received by ScannerWindow!")
# # self.idle(0)
# def on_eos(self): # TODO reassess after video fix
# """When current video file ends."""
# logger.debug("EOS received by ScannerWindow")
# # self.idle(0)
def on_tag_read(self, tag: epc.Tag):
"""New tag scanned, display imagery."""
logger.debug(
f"{tag.epc.species_string} {tag.epc.serial} rx by ScannerWindow {self.window_number}")
self.clock.unschedule(self.idle)
serial = tag.epc.serial
if serial != self.serial:
logger.info(f"New tag: {tag.epc.species_string} {tag.epc.serial} on window {self.window_number}")
tag.last_seen = log.log_tag(tag)
self.clear()
self.serial = serial
logger.debug(f"Seeking imagery for {tag.epc.species_string}")
file, file_type, overlay = files.random_of_species(
tag.epc.species_string)
if file_type == "img":
self.show_image(file)
# self.state = State.IMG_SHOWING
# self.video = None
# self.video_player.next_source()
# self.video_player.delete()
# self.image, self.orig_image = file, file
elif file_type == "vid":
self.show_video(file)
# self.state = State.VID_SHOWING
# self.image = None
# self.orig_image = None
# self.video = file
# self.video_player.next_source()
# self.video_player.delete()
# self.video_player.queue(self.video)
# self.video_player.play()
self.label_controller.make_tag_labels(tag).draw()
self.label_bg = overlay
else:
if self.state == State.VID_SHOWING:
self.video_player.loop = True
self.clock.schedule_once(self.idle, self.idle_seconds)
return pyglet.event.EVENT_HANDLED
def show_image(self, file):
self.state = State.IMG_SHOWING
self.video = None
self.video_player.next_source()
self.video_player.delete()
self.image = file
# self.label_controller.make_tag_labels(tag).draw()
def show_video(self, vid):
self.state = State.VID_SHOWING
self.image = None
self.orig_image = None
self.video = vid
self.video_player.next_source()
self.video_player.delete()
self.video_player.queue(self.video)
self.video_player.play()
def on_key_press(self, symbol, modifiers):
"""Pressing any key exits app."""
if symbol == pyglet.window.key.P:
print("Sending self same pig tag.")
self.on_tag_read(epc.same_pig())
# elif symbol == pyglet.window.key.D:
# print("Sending self random dog tag.")
# self.on_tag_read(epc.random_dog())
elif symbol == pyglet.window.key.G:
print("Sending self same goat.")
self.on_tag_read(epc.same_goat())
elif symbol == pyglet.window.key.D:
print("d pressed")
self.mag_x += 10
elif symbol == pyglet.window.key.A:
print("a pressed")
self.mag_x -= 10
elif symbol == pyglet.window.key.W:
print("w pressed")
self.mag_y += 10
elif symbol == pyglet.window.key.S:
print("s pressed")
self.mag_y -= 10
elif symbol == pyglet.window.key.LEFT:
self.show_image(files.prev_png())
elif symbol == pyglet.window.key.RIGHT:
self.show_image(files.next_png())
elif symbol == pyglet.window.key.UP:
self.show_video(files.next_mp4())
elif symbol == pyglet.window.key.DOWN:
self.show_video(files.prev_mp4())
else:
logger.warning(f"{symbol} key pressed, exiting application!")
pyglet.app.exit()
# def on_mouse_motion(self, x, y, button, modifiers):
# self.mag_x = x
# self.mag_y = y
# # TODO: ? Not tested, passing dt. Schedule this?
# self.update_magnifier(0)
def draw_magnifier(self):
mag_image = self.orig_image.get_region(
# Subtract half of RET_SIDE to center magnified image on cursor
x=self.mag_x // self.image.scale, # - RET_SIDE // 2,
y=self.mag_y // self.image.scale, # - RET_SIDE // 2,
width=RET_SIDE,
height=RET_SIDE)
mag_image.blit(self.mag_x, self.mag_y, 0)
self.reticle_batch.draw()
def draw_mag_image(self):
# Magnifier
mag_image = self.orig_image.get_region(
# Subtract half of RET_SIDE to center magnified image on cursor
x=self.mag_x // self.image.scale, # - RET_SIDE // 2,
y=self.mag_y // self.image.scale, # - RET_SIDE // 2,
width=RET_SIDE,
height=RET_SIDE)
mag_image.blit(self.mag_x, self.mag_y, 0)
self.reticle_batch.draw()
def on_draw(self):
"""Draw what should be on the screen, set by other methods."""
self.clear()
pyglet.gl.glEnable(pyglet.gl.GL_BLEND)
if self.image:
self.image.anchor_x = self.image.width // 2
self.image.anchor_y = self.image.height // 2
pyglet.gl.glBlendFunc(pyglet.gl.GL_SRC_ALPHA,
pyglet.gl.GL_ONE_MINUS_SRC_ALPHA)
self.image.blit(self.width // 2, self.height // 2)
# Magnifier
# self.draw_magnifier()
if self.video:
if self.video_player.source and self.video_player.source.video_format:
self.video_player.texture.anchor_x = self.video_player.texture.width // 2
self.video_player.texture.anchor_y = self.video_player.texture.height // 2
self.video_player.texture.blit(
self.width // 2, self.height // 2)
else:
self.idle(0) # TODO Figure out other return method
# This will idle at the video end even if the animal remains.
pyglet.gl.glBlendFunc(pyglet.gl.GL_SRC_ALPHA,
pyglet.gl.GL_ONE_MINUS_SRC_ALPHA)
# if self.state != State.VID_SHOWING:
# self.label_bg.blit(self.width // 2, self.height // 2)
if self.state != State.IDLE: #== State.IMG_SHOWING:
# Draw species detected and last-seen labels
#self.label_controller.tag_labels.draw()
# Draw species illustration and label graphic overlay
if self.label_bg:
self.label_bg.blit(40, 40)
if self.state == State.IDLE:
# self.label_controller.idle_labels.draw()
self.idle_image.blit(self.width // 2, self.height // 2)
# if self.state != State.VID_SHOWING:
# self.label_controller.always_labels.draw()
# Commented out for now, as one station will show many imagery types
def __repr__(self):
return f'ScannerWindow #{self.window_number}'
def update_magnifier(self, dt):
"""Move position of magnifying image, and lines making rect."""
# Move position used to get magnified region of image.
# TODO: If randomly moving, keep within bounds of memory.
# self.mag_x += 50 * dt # Move 50px per second
# self.mag_y += 50 * dt
# Move lines making up reticle rectangle
self.ret_left.x = self.mag_x
self.ret_left.y = self.mag_y
self.ret_left.x2 = self.mag_x
self.ret_left.y2 = self.mag_y + RET_SIDE
self.ret_right.x = self.mag_x + RET_SIDE
self.ret_right.y = self.mag_y
self.ret_right.x2 = self.mag_x + RET_SIDE
self.ret_right.y2 = self.mag_y + RET_SIDE
self.ret_top.x = self.mag_x - RET_BOX_WT // 2
self.ret_top.y = self.mag_y + RET_SIDE
self.ret_top.x2 = self.mag_x + RET_SIDE + RET_BOX_WT // 2
self.ret_top.y2 = self.mag_y + RET_SIDE
self.ret_bot.x = self.mag_x - RET_BOX_WT // 2
self.ret_bot.y = self.mag_y
self.ret_bot.x2 = self.mag_x + RET_SIDE + RET_BOX_WT // 2
self.ret_bot.y2 = self.mag_y
ScannerWindow.register_event_type('on_tag_read')
ScannerWindow.register_event_type('on_player_eos')
ScannerWindow.register_event_type('on_eos')
pyglet.media.Player.register_event_type('on_eos')
def get_video_size(width, height, sample_aspect):
"""Calculate new size based on current size and scale factor."""
if sample_aspect > 1.:
return width * sample_aspect, height
if sample_aspect < 1.:
return width, height / sample_aspect
return width, height
X_LABEL_OFFSET = 185
Y_LABEL_OFFSET = 95
class LabelController():
"""Manage labels for a ScannerWindow"""
def __init__(self, window: ScannerWindow):
"""Initialize and make idle labels."""
self.tag_graphics: List[Any] = []
self.idle_graphics: List[Any] = []
self.always_graphics: List[Any] = []
self.tag_labels = pyglet.graphics.Batch()
self.idle_labels = pyglet.graphics.Batch()
self.always_labels = pyglet.graphics.Batch()
self.window = window
self.make_idle_labels()
self.make_always_labels()
def idle_label_batch(self):
"""Return self.idle_label_batch"""
return self.idle_label_batch
def make_tag_labels(self, tag):
"""Delete old labels, generate new ones."""
for item in self.tag_graphics:
item.delete()
self.tag_graphics = []
# Create labels for tag:
species_label_1 = pyglet.text.Label(
text="Species detected:",
color=(255, 255, 255, 255),
font_size=28, font_name=LABEL_FONT,
x=self.window.width - X_LABEL_OFFSET, y=Y_LABEL_OFFSET,
anchor_x='center', anchor_y='bottom',
batch=self.tag_labels)
self.tag_graphics.append(species_label_1)
species_label_2 = pyglet.text.Label(
text=tag.epc.species_string.capitalize(),
color=(255, 255, 255, 255),
font_size=48, font_name=LABEL_FONT,
x=self.window.width - X_LABEL_OFFSET, y=Y_LABEL_OFFSET,
anchor_x='center', anchor_y='top',
batch=self.tag_labels)
self.tag_graphics.append(species_label_2)
last_seen_date = datetime.strftime(
tag.last_seen, "%m/%d/%Y")
last_seen_time = datetime.strftime(
tag.last_seen, "%H:%M:%S")
last_seen_label_2 = pyglet.text.Label(
text=last_seen_date,
color=(255, 255, 255, 255),
font_size=28, font_name=LABEL_FONT,
x=self.window.width - X_LABEL_OFFSET, y=self.window.height - Y_LABEL_OFFSET,
anchor_x='center', anchor_y='center',
batch=self.tag_labels)
last_seen_label_1 = pyglet.text.Label(
text='Patient last seen:',
color=(255, 255, 255, 255),
font_size=28, font_name=LABEL_FONT,
x=self.window.width - X_LABEL_OFFSET,
y=self.window.height - Y_LABEL_OFFSET + 28,
anchor_x='center', anchor_y='bottom',
batch=self.tag_labels)
last_seen_label_3 = pyglet.text.Label(
text=last_seen_time,
color=(255, 255, 255, 255),
font_size=28, font_name=LABEL_FONT,
x=self.window.width - X_LABEL_OFFSET, y=self.window.height - Y_LABEL_OFFSET - 28,
anchor_x='center', anchor_y='top',
batch=self.tag_labels)
self.tag_graphics.append(last_seen_label_1)
self.tag_graphics.append(last_seen_label_2)
self.tag_graphics.append(last_seen_label_3)
return self.tag_labels
def make_idle_labels(self):
"""Generate new idle labels."""
label = pyglet.text.Label(
'Please place the patient in the scanning area.',
color=(255, 255, 255, 255),
font_size=36, font_name=LABEL_FONT,
x=self.window.width // 2, y=self.window.height // 2,
anchor_x='center', anchor_y='center',
batch=self.idle_labels)
self.idle_graphics.append(label)
def make_always_labels(self):
"""Create labels that will remain on screen always."""
station_label_1 = pyglet.text.Label(
f"Station #{str(self.window.window_number)}",
color=(255, 255, 255, 255),
font_size=48, font_name=LABEL_FONT,
x=X_LABEL_OFFSET, y=self.window.height - Y_LABEL_OFFSET,
anchor_x='center', anchor_y='center',
batch=self.always_labels)
self.always_graphics.append(station_label_1)
station_label_2 = pyglet.text.Label(
"X-Ray",
color=(255, 255, 255, 255),
font_size=48, font_name=LABEL_FONT,
x=X_LABEL_OFFSET, y=Y_LABEL_OFFSET,
anchor_x='center', anchor_y='center',
batch=self.always_labels)
self.always_graphics.append(station_label_2)
return self.always_labels
| 2.609375 | 3 |
week9/api/urls.py | yestemir/web | 0 | 12787713 | <reponame>yestemir/web<filename>week9/api/urls.py
from django.urls import path
from api.views import product_list, getproduct, category_list, getcategory, getproductsbycategory
urlpatterns = [
path('products/', product_list),
path('products/<int:product_id>/', getproduct),
path('categories/', category_list),
path('categories/<int:category_id>', getcategory),
path('categories/<int:category_id>/products', getproductsbycategory),
] | 1.945313 | 2 |
adopt/views.py | LogstonEducation/TFA-Project-Adopt-A-Pet | 4 | 12787714 | <reponame>LogstonEducation/TFA-Project-Adopt-A-Pet<gh_stars>1-10
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from .models import Pet
from .forms import AdoptRequestForm
def index(request):
pets = Pet.objects.all()
context = {
'pets': pets,
}
return render(request, 'adopt/all.html', context)
def pet_details(request, pet_id):
pet = get_object_or_404(Pet, pk=pet_id)
context = {
'pet': pet,
}
return render(request, 'adopt/detail.html', context)
def request_a_pet(request):
if request.method == 'POST':
form = AdoptRequestForm(request.POST)
if form.is_valid():
form.save()
return JsonResponse({})
else:
return JsonResponse({'errors': form.errors}, status=400)
return JsonResponse({})
| 2.203125 | 2 |
autoscale_cloudroast/test_repo/autoscale/functional/launch_config/test_launch_config_personality.py | codebyravi/otter | 20 | 12787715 | """
Test for launch config's personality validation.
"""
import base64
from test_repo.autoscale.fixtures import AutoscaleFixture
class LaunchConfigPersonalityTest(AutoscaleFixture):
"""
Verify launch config.
"""
def setUp(self):
"""
Create a scaling group.
"""
super(LaunchConfigPersonalityTest, self).setUp()
self.path = '/root/test.txt'
def test_launch_config_personality_without_encoding(self):
"""
Create a scaling group such that the server's personality in the
launch config is not base64 encoded.
"""
file_contents = 'This is a test file.'
personality = [{'path': '/root/.csivh',
'contents': file_contents}]
self._assert_create_group(personality)
def test_launch_config_personality_with_invalid_personality(self):
"""
Create a scaling group with invalid personality and verify the creation
fails with an error 400.
"""
personalities = ['abc', 0, {'path': '/abc'}, {'contents': 'test'},
[{'path': self.path}], [{'content': 'test'}]]
for personality in personalities:
self._assert_create_group(personality)
def test_launch_config_personality_with_max_path_size(self):
"""
Create a scaling group with path over 255 characters and verify the
creation fails with an error 400.
"""
long_path = 'z' * (self.personality_maxlength + 1)
personality = [{'path': '/root/{0}.txt'.format(long_path),
'contents': base64.b64encode('tests')}]
self._assert_create_group(personality)
def test_launch_config_personality_with_max_file_content_size(self):
"""
Create a scaling group with file contents over 1000 characters and
verify the creation fails with an error 400.
"""
file_content = 'z' * (self.personality_max_file_size + 1)
personality = [{'path': self.path,
'contents': base64.b64encode(file_content)}]
self._assert_create_group(personality)
def test_launch_config_personality_with_max_personalities(self):
"""
Create a scaling group with over max personalities allowed and
verify the creation fails with an error 400.
"""
personality_content = {'path': self.path,
'contents': base64.b64encode('tests')}
personality = [personality_content
for _ in range(self.max_personalities + 1)]
self._assert_create_group(personality)
def _assert_create_group(self, personality, response=400):
"""
Creates a group with the given server personality.
"""
group_response = self.autoscale_behaviors.create_scaling_group_given(
lc_personality=personality)
self.assertEquals(group_response.status_code, response, msg='Create group '
'with invalid lc_personality returned {0} as against '
'{1}'.format(group_response.status_code, response))
if response is 200:
group = group_response.entity
self.resources.add(group, self.empty_scaling_group)
return group
| 2.75 | 3 |
funcs.py | egehanyorulmaz/bookish-engine | 1 | 12787716 | <reponame>egehanyorulmaz/bookish-engine
from utils import *
from collections import defaultdict
def data_importer(order_number, tao_number, r_number, instance_number):
dat_filename = f'Dataslack_{order_number}orders_Tao{tao_number}R{r_number}_{instance_number}_without_setup.dat'
dat_filepath = f'project_data/{dat_filename}'
datContent = [i.strip().split() for i in open(dat_filepath).readlines()]
release_times = list_separator(datContent[1][0])
processing_times = list_separator(datContent[4][0])
revenues = list_separator(datContent[7][0])
due_dates = list_separator(datContent[10][0])
deadlines = list_separator(datContent[13][0])
tardiness_penalty_costs = list_separator(datContent[16][0])
JOB_ATTRIBUTES = defaultdict()
for job_number in range(len(release_times)):
JOB_ATTRIBUTES[job_number] = {'release_time': release_times[job_number],
'processing_times': processing_times[job_number],
'revenues': revenues[job_number],
'due_dates': due_dates[job_number],
'deadlines': deadlines[job_number],
'tardiness_penalty_costs': tardiness_penalty_costs[job_number],
'slack_time': due_dates[job_number] - release_times[job_number] - processing_times[job_number]
}
return JOB_ATTRIBUTES
def calculate_weighted_tardiness(completion_times, due_dates, deadlines, weights):
weighted_tardiness = 0
for completion_time, due_date, deadline, weight in zip(completion_times, due_dates, deadlines, weights):
if due_date < completion_time:
weighted_tardiness += weight * (completion_time - due_date)
return weighted_tardiness
if __name__ == '__main__':
data_dictionary = data_importer(10,1,5,6)
print('a') | 2.453125 | 2 |
src/main_cmt.py | Jiangtong-Li/ZHSIR | 8 | 12787717 | import os
import random
import numpy as np
from scipy.spatial.distance import cdist
import cv2
import time
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
# import torch.multiprocessing as mp
from torch.utils.data import DataLoader
from torch.optim import Adam, SGD
# from torch.utils.tensorboard import SummaryWriter
from scipy.spatial.distance import cdist
from package.model.cmt import CMT
from package.loss.cmt_loss import _CMT_loss
from package.dataset.data_cmt import *
from package.args.cmt_args import parse_config
from package.dataset.utils import make_logger
from package.model.utils import *
from package.loss.regularization import _Regularization
import numpy as np
from sklearn.neighbors import NearestNeighbors as NN
DEBUG = False
def dr_dec(optimizer, args):
args.lr *= 0.5
args.lr = max(args.lr, 5e-5)
optimizer.param_groups[0]['lr'] = args.lr
def _get_pre_from_matches(matches):
"""
:param matches: A n-by-m matrix. n is number of test samples, m is the top m elements used for evaluation
:return: precision
"""
return np.mean(matches)
def _map_change(inputArr):
dup = np.copy(inputArr)
for idx in range(inputArr.shape[1]):
if idx != 0:
# dup cannot be bool type
dup[:,idx] = dup[:,idx-1] + dup[:,idx]
return np.multiply(dup, inputArr)
def _get_map_from_matches(matches):
"""
mAP's calculation refers to https://github.com/ShivaKrishnaM/ZS-SBIR/blob/master/trainCVAE_pre.py.
:param matches: A n-by-m matrix. n is number of test samples, m is the top m elements used for evaluation
matches[i][j] == 1 indicates the j-th retrieved test image j belongs to the same class as test sketch i,
otherwise, matches[i][j] = 0.
:return: mAP
"""
temp = [np.arange(matches.shape[1]) for _ in range(matches.shape[0])]
mAP_term = 1.0 / (np.stack(temp, axis=0) + 1.0)
precisions = np.multiply(_map_change(matches), mAP_term)
mAP = np.mean(precisions, axis=1)
return np.mean(mAP)
def _eval(feats_labels_sk, feats_labels_im, n=200):
"""
:param feats_labels_sk: a two-element tuple [features_of_sketches, labels_of_sketches]
labels_of_sketches and labels_of_images are scalars(class id).
:param feats_labels_im: a two-element tuple [features_of_images, labels_of_images]
features_of_images and features_of_sketches are used for distance calculation.
:param n: the top n elements used for evaluation
:return: precision@n, mAP@n, mAP@all
"""
nn = NN(n_neighbors=feats_labels_im[0].shape[0], metric='cosine', algorithm='brute').fit(feats_labels_im[0])
_, indices = nn.kneighbors(feats_labels_sk[0])
retrieved_classes = np.array(feats_labels_im[1])[indices]
matches = np.vstack([(retrieved_classes[i] == feats_labels_sk[1][i])
for i in range(retrieved_classes.shape[0])]).astype(np.uint16)
return _get_pre_from_matches(matches[:, :n]), _get_map_from_matches(matches[:, :n])
def _test_and_save(epochs, optimizer, data_test, model, logger, args, loss_sum):
if not hasattr(_test_and_save, 'best_acc'):
_test_and_save.best_acc = 0
n = 200
start_cpu_t = time.time()
feats_labels_sk, feats_labels_im = _extract_feats_sk_im(data=data_test, model=model,
batch_size=args.batch_size)
pre, mAPn = _eval(feats_labels_sk, feats_labels_im, n)
logger.info("Precision@{}: {}, mAP@{}: {}, bestPrecsion: {}".format(n, pre, n, mAPn, max(pre, _test_and_save.best_acc)) +
" " + 'epochs: {}, loss_sk: {}, loss_im: {}, (eval cpu time: {}s)'.
format(epochs, np.mean(loss_sum[SK]), np.mean(loss_sum[IM]), time.time() - start_cpu_t))
if pre > _test_and_save.best_acc:
_test_and_save.best_acc = pre
torch.save({'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'epochs': epochs,
'args': args},
save_fn(args.save_dir, epochs, pre, mAPn))
torch.cuda.empty_cache()
def save_fn(save_dir, it, pre=0, mAP=0):
return join(mkdir(join(save_dir, 'models')), 'Iter__{}__{}_{}.pkl'.format(it, int(pre * 1000), int(mAP * 1000)))
def _try_load(args, logger, model, optimizer):
if args.start_from is None:
# try to find the latest checkpoint
files = os.listdir(mkdir(join(mkdir(args.save_dir), 'models')))
if len(files) == 0:
logger.info("Cannot find any checkpoint. Start new training.")
return 0
latest = max(files, key=lambda name: int(name.split('\\')[-1].split('/')[-1].split('.')[0].split('__')[1]))
checkpoint = join(args.save_dir, 'models', latest)
else:
try: checkpoint = save_fn(args.save_dir, str(int(args.start_from)))
except: checkpoint = args.start_from
logger.info("Load model from {}".format(checkpoint))
ckpt = torch.load(checkpoint, map_location='cpu')
model.load_state_dict(ckpt['model'])
optimizer.load_state_dict(ckpt['optimizer'])
return ckpt['epochs']
def _extract_feats_sk_im(data, model, batch_size=64):
skip = 1
model.eval()
feats_labels_sk = _extract_feats(data, lambda x: model(sk=x), SK, skip=skip,
batch_size=batch_size)
feats_labels_im = _extract_feats(data, lambda x: model(im=x), IM, skip=skip,
batch_size=batch_size)
model.train()
return feats_labels_sk, feats_labels_im
def _extract_feats(data_test, model, what, skip=1, batch_size=16):
"""
:param data_test: test Dataset
:param model: network model
:param what: SK or IM
:param skip: skip a certain number of image/sketches to reduce computation
:return: a two-element list [extracted_features, extracted_labels]
"""
labels = []
feats = []
for batch_idx, (xs, id) in \
enumerate(data_test.traverse(what, skip=skip, batch_size=batch_size)):
feats.append(model(xs.cuda()).data.cpu().numpy())
# print(type(labels[0]), labels[0].shape)# <class 'numpy.ndarray'> (16, 256)
# print(type(id), id) # <class 'torch.Tensor'> tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
labels.append(id.numpy())
# print(feats[-1][-1][:4])
return np.concatenate(feats), np.concatenate(labels)
def _parse_args_paths(args):
if args.dataset == 'sketchy':
sketch_folder = SKETCH_FOLDER_SKETCHY
im_folder = IMAGE_FOLDER_SKETCHY
path_semantic = PATH_SEMANTIC_SKETCHY
train_class = TRAIN_CLASS_SKETCHY
test_class = TEST_CLASS_SKETCHY
npy_folder = NPY_FOLDER_SKETCHY
elif args.dataset == 'tuberlin':
sketch_folder = SKETCH_FOLDER_TUBERLIN
im_folder = IMAGE_FOLDER_TUBERLIN
path_semantic = PATH_SEMANTIC_TUBERLIN
train_class = TRAIN_CLASS_TUBERLIN
test_class = TEST_CLASS_TUBERLIN
npy_folder = NPY_FOLDER_TUBERLIN
else: raise Exception("dataset args error!")
if args.sketch_dir != '': sketch_folder = args.sketch_dir
if args.image_dir != '': im_folder = args.image_dir
if args.path_semantic != '': im_folder = args.path_semantic
if args.npy_dir == '0': args.npy_dir = npy_folder
elif args.npy_dir == '': args.npy_dir = None
if args.ni_path == '0': args.ni_path = PATH_NAMES
return sketch_folder, im_folder, path_semantic, train_class, test_class
def train(args):
# srun -p gpu --gres=gpu:1 --exclusive --output=san10.out python main_san.py --epochs 50000 --print_every 500 --save_every 2000 --batch_size 96 --dataset sketchy --margin 10 --npy_dir 0 --save_dir san_sketchy10
# srun -p gpu --gres=gpu:1 --exclusive --output=san1.out python main_san.py --epochs 50000 --print_every 500 --save_every 2000 --batch_size 96 --dataset sketchy --margin 1 --npy_dir 0 --save_dir san_sketchy1
# srun -p gpu --gres=gpu:1 --output=san_sketchy03.out python main_san.py --epochs 30000 --print_every 200 --save_every 3000 --batch_size 96 --dataset sketchy --margin 0.3 --npy_dir 0 --save_dir san_sketchy03 --lr 0.0001
sketch_folder, image_folder, path_semantic, train_class, test_class = _parse_args_paths(args)
if DEBUG:
args.back_bone = 'default'
args.npy_dir = NPY_FOLDER_SKETCHY
args.ni_path = PATH_NAMES
args.print_every = 1
args.save_every = 5
args.paired = True
args.epochs = 20000
# args.lr = 0.001
args.sz = 32
# args.l2_reg = 0.0001
args.back_bone = 'default'
args.batch_size = 32
args.h = 500
test_class = train_class[5:7]
train_class = train_class[:5]
logger = make_logger(join(mkdir(args.save_dir), curr_time_str() + '.log'))
data_train = CMT_dataloader(folder_sk=sketch_folder, clss=train_class, folder_nps=args.npy_dir,
path_semantic=path_semantic, paired=args.paired, names=args.ni_path,
folder_im=image_folder, normalize01=False, doaug=False, logger=logger,
sz=None if args.back_bone=='vgg' else args.sz)
dataloader_train = DataLoader(dataset=data_train, batch_size=args.batch_size, shuffle=True)
data_test = CMT_dataloader(folder_sk=sketch_folder, clss=test_class, folder_nps=args.npy_dir,
path_semantic=path_semantic, folder_im=image_folder, normalize01=False, doaug=False,
logger=logger, sz=None if args.back_bone=='vgg' else args.sz)
model = CMT(d=data_train.d(), h=args.h, back_bone=args.back_bone, batch_normalization=args.bn, sz=args.sz)
model.cuda()
if not args.ft:
model.fix_vgg()
optimizer = SGD(params=model.parameters(), lr=args.lr, momentum=0.6)
epochs = _try_load(args, logger, model, optimizer)
logger.info(str(args))
args.epochs += epochs
cmt_loss = _CMT_loss()
model.train()
l2_regularization = _Regularization(model, args.l2_reg, p=2, logger=None)
loss_sum = [[0], [0]]
logger.info("Start training:\n train_classes: {}\n test_classes: {}".format(train_class, test_class))
_test_and_save(epochs=epochs, optimizer=optimizer, data_test=data_test,
model=model, logger=logger, args=args, loss_sum=loss_sum)
while True:
for mode, get_feat in [[IM, lambda data: model(im=data)],
[SK, lambda data: model(sk=data)]]:
data_train.mode = mode
for _, (data, semantics) in enumerate(dataloader_train):
# Skip one-element batch in consideration of batch normalization
if data.shape[0] == 1:
continue
# print(data.shape)
optimizer.zero_grad()
loss = cmt_loss(get_feat(data.cuda()),
semantics.cuda()) \
+ l2_regularization()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
loss_sum[mode].append(float(loss.item()))
epochs += 1
dr_dec(optimizer=optimizer, args=args)
if (epochs + 1) % args.save_every == 0:
_test_and_save(epochs=epochs, optimizer=optimizer, data_test=data_test,
model=model, logger=logger, args=args, loss_sum=loss_sum)
if (epochs + 1) % args.print_every == 0:
logger.info('epochs: {}, loss_sk: {}, loss_im: {},'.
format(epochs, np.mean(loss_sum[SK]), np.mean(loss_sum[IM])))
loss_sum = [[], []]
if epochs >= args.epochs: break
def gen_args(h=500, dataset='sketchy', back_bone='vgg', sz=32, ft=True, paired=False):
ft = int(ft)
paired = int(paired)
return \
"""
###
#!/bin/bash
#SBATCH --job-name=ZXLing
#SBATCH --partition=gpu
#SBATCH --gres=gpu:1
#SBATCH --output=cmt_%j.out
#SBATCH --time=7-00:00:00
module load gcc/7.3.0 anaconda/3 cuda/9.2 cudnn/7.1.4
source activate lzxtc2
python main_cmt.py --npy_dir 0 --dataset {} --save_dir cmts/cmt{}{}_{}_{}_{}_{} --h {} --back_bone {} --sz {} --ft {} --paired {} --ni_path 0
""".format(dataset, int(ft), int(paired) , dataset, h, back_bone, sz if back_bone=='default' else "", h, back_bone, sz, ft, paired)
if __name__ == '__main__':
if False:
print(gen_args(back_bone='vgg', ft=False, paired=True))
print(gen_args(back_bone='vgg', ft=True, paired=False))
print(gen_args(back_bone='vgg', ft=True, paired=True))
print(gen_args(back_bone='vgg', ft=False, paired=False))
print(gen_args(back_bone='default'))
exit()
args = parse_config()
print(str(args))
# train(args)
# srun --gres=gpu:1 --output=cmt_%j.out python main_cmt.py
'''
#!/bin/bash
#SBATCH --job-name=ZXLing
#SBATCH --partition=gpu
#SBATCH --gres=gpu:1
#SBATCH --output=cmt_%j.out
#SBATCH --time=7-00:00:00
module load gcc/7.3.0 anaconda/3 cuda/9.2 cudnn/7.1.4
source activate lzxtc2
python main_cmt.py --npy_dir 0 --dataset sketchy --save_dir cmts/cmt11_sketchy_500_default_32 --h 500 --back_bone default --sz 32 --ft 1 --paired 1 --ni_path 0
python main_cmt.py --npy_dir 0 --dataset sketchy --save_dir cmts/cmt01_sketchy_500_vgg_ --h 500 --back_bone vgg --sz 32 --ft 0 --paired 1 --ni_path 0
python main_cmt.py --npy_dir 0 --dataset sketchy --save_dir cmts/cmt10_sketchy_500_vgg_ --h 500 --back_bone vgg --sz 32 --ft 1 --paired 0 --ni_path 0
python main_cmt.py --npy_dir 0 --dataset sketchy --save_dir cmts/cmt11_sketchy_500_vgg_ --h 500 --back_bone vgg --sz 32 --ft 1 --paired 1 --ni_path 0
python main_cmt.py --npy_dir 0 --dataset sketchy --save_dir cmts/cmt00_sketchy_500_vgg_ --h 500 --back_bone vgg --sz 32 --ft 0 --paired 0 --ni_path 0
python main_cmt.py --npy_dir 0 --dataset sketchy --save_dir cmts/cmt10_sketchy_500_default_32 --h 500 --back_bone default --sz 32 --ft 1 --paired 0 --ni_path 0
''' | 1.8125 | 2 |
processor/symptoms.py | ohteedee/COVID-19_diagnosing_app | 0 | 12787718 | import pandas as pd
from utils import new_RF_model
# since processing of the symptoms data has several related elements, I deceided to wrap it into a class
# this makes it easier for someone reading the code that all these function address on the synptoms data and has nothing to do with the image data
class ProcessSymptomsData:
def __init__(self, cough, temperature, sore_throat, shortness_of_breath, head_ache,
age, test_indication):
'''
the attributes of the '''
self.cough = cough
self.temperature = temperature
self.sore_throat= sore_throat
self.shortness_of_breath = shortness_of_breath
self.head_ache = head_ache
self.age_60_and_above = age
self.test_indication = test_indication
self.fever = None
self.new_test_indication = None
def convert_temperature_to_categories(self):
'''
This functions takes users temerature and generate categorical data of fever presence
'''
if self.temperature >= 38.0:
self.fever = 'yes'
else:
self.fever = 'no'
return self.fever
def convert_age_to_category(self):
'''
This functions takes users age and covert it into category of younger than 60 and older than 60.
It helps to discourage the user from feeling discriminated against
'''
if self.age_60_and_above < 60:
age_60_and_above = 'no'
else:
age_60_and_above = 'yes'
return age_60_and_above
def convert_test_indication_to_category(self):
'''
This functions takes users test indication (four possibilities) and converts to three categories needed by the model
'''
if self.test_indication == 'I had contact with someone that tested positive for COVID-19':
self.new_test_indication = 'Contact with confirmed'
elif self.test_indication == 'I traveled abroad to a region with high COVID incidence':
self.new_test_indication = 'Abroad'
elif self.test_indication == 'both of the above':
self.new_test_indication = 'Contact with confirmed'
else:
self.new_test_indication = 'Other'
return self.new_test_indication
def convert_symptoms_to_dataframe(self):
'''
function to conver the input data of users into a dataframe that can be used to predict outcome
'''
user_input = {
'cough': self.cough,
'fever': self.fever,
'sore_throat': self.sore_throat,
'shortness_of_breath': self.shortness_of_breath,
'head_ache': self.head_ache,
'age_60_and_above': self.age_60_and_above,
'test_indication': self.new_test_indication,
}
self.dataframe = pd.DataFrame([user_input])
return self.dataframe
def predict_probability(self):
'''
This function imports Random forest model from utils and predict the probability of COVID-19 infection oucome using symptoms of user.
it takes a dataframe as input
'''
predicted_probability = new_RF_model.predict_proba(self.dataframe)
return predicted_probability
def predict_symptoms_outcome(self):
'''
This function imports Random forest model from utils and predict class with hihest probability using symptoms of user.
it takes a dataframe as input
'''
predicted_class = new_RF_model.predict(self.dataframe)
return predicted_class
# def search_conditions(fuzzy_condition):
# '''
# does a fuzzy search of the underlying conditions and returns best matched conditions in a list of defined conditions
# '''
# extracted = []
# defined_conditions = ['hypertension', 'diabetes', 'Immunocompromised', 'hiv', 'pregnant', 'overweight', 'cardiovascular', 'lung', 'heart', 'kidney', 'liver','stroke', 'cancer']
# for condition in defined_conditions:
# ratio1 = fuzz.ratio(fuzzy_condition, condition)
# if ratio1 > 40:
# extracted.append(condition)
# else:
# pass
# return extracted | 3.015625 | 3 |
scripts/dec2020_compositional.py | yanzv/indra_world | 3 | 12787719 | import os
import sys
import glob
import tqdm
import pickle
import logging
from indra_world.corpus import Corpus
from indra_world.assembly.operations import *
from indra_world.sources.dart import process_reader_outputs
from indra.pipeline import AssemblyPipeline
logger = logging.getLogger('dec2020_compositional')
HERE = os.path.dirname(os.path.abspath(__file__))
# December experiment
reader_versions = {'flat':
{'cwms': '2020.10.22',
'hume': 'r2020_10_26_2.flat',
# Note that this just matches the version on the
# bioexp machine dart drive and was manually renamed
# On DART, these entries appear as 1.1 and can only
# be differentiated by date.
'sofia': '1.1_old',
'eidos': '1.0.3'},
'compositional':
{'cwms': '2020.10.22',
'hume': 'r2020_10_28.compositional',
'sofia': '1.1',
'eidos': '1.0.3'}}
DART_STORAGE = '/dart'
def load_reader_outputs(reader_versions):
logger.info('Loading outputs based on %s' % str(reader_versions))
reader_outputs = {}
for reader, version in reader_versions.items():
logger.info('Loading %s/%s' % (reader, version))
reader_outputs[reader] = {}
reader_folder = os.path.join(DART_STORAGE, reader, version)
fnames = glob.glob('%s/*' % reader_folder)
logger.info('Found %d files' % len(fnames))
for fname in tqdm.tqdm(fnames):
doc_id = os.path.basename(fname)
with open(fname, 'r') as fh:
doc_str = fh.read()
reader_outputs[reader][doc_id] = doc_str
return reader_outputs
if __name__ == '__main__':
corpus_id = 'compositional_dec2020'
logger.info('Processing reader output...')
reader_outputs = load_reader_outputs(reader_versions['compositional'])
stmts = process_reader_outputs(reader_outputs, corpus_id)
'''
stmts = []
for reader in reader_versions['compositional']:
logger.info('Loading %s' % reader)
if os.path.exists('compositional_dec2020_%s_raw.pkl' % reader):
with open('compositional_dec2020_%s_raw.pkl' % reader, 'rb') as fh:
stmts += pickle.load(fh)
'''
logger.info('Got a total of %s statements' % len(stmts))
assembly_config_file = os.path.join(
HERE, os.pardir, 'indra_wm_service', 'resources',
'assembly_compositional_december2020.json')
pipeline = AssemblyPipeline.from_json_file(assembly_config_file)
assembled_stmts = pipeline.run(stmts)
num_docs = 44591
meta_data = {
'corpus_id': corpus_id,
'description': 'Compositional grounding assembly for the December '
'2020 documents.',
'display_name': 'Compositional grounding assembly December 2020',
'readers': list(reader_versions['compositional'].keys()),
'assembly': {
'level': 'grounding_location',
'grounding_threshold': 0.6,
},
'num_statements': len(assembled_stmts),
'num_documents': num_docs
}
corpus = Corpus(corpus_id=corpus_id,
statements=assembled_stmts,
raw_statements=stmts,
meta_data=meta_data)
corpus.s3_put()
| 2.125 | 2 |
cam/button/cam.py | davidegaspar/raspi | 1 | 12787720 | #!/usr/bin/python
import os
import time
import RPi.GPIO as GPIO
import subprocess
#import sys
import signal
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
buttonPin = 17
buzzPin = 22
GPIO.setup(buttonPin, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
GPIO.setup(buzzPin, GPIO.OUT)
GPIO.output(buzzPin, GPIO.LOW)
# Morse
def dot():
GPIO.output(buzzPin, GPIO.HIGH)
time.sleep(0.1)
GPIO.output(buzzPin, GPIO.LOW)
time.sleep(0.1)
def dash():
GPIO.output(buzzPin, GPIO.HIGH)
time.sleep(0.3)
GPIO.output(buzzPin, GPIO.LOW)
time.sleep(0.1)
def letterSpace():
time.sleep(0.2)
def up():
dot()
dot()
dash()
def down():
dash()
dot()
dot()
def log(c):
global motion
if (motion):
print("stop")
subprocess.call(['service','motion','stop'])
motion = False
down()
else:
print("start")
subprocess.call(['service','motion','start'])
motion = True
up()
def main():
GPIO.add_event_detect(buttonPin,GPIO.RISING,callback=log,bouncetime=300)
signal.pause()
motion = False
main()
GPIO.remove_event_detect(buttonPin)
GPIO.cleanup()
| 2.671875 | 3 |
arguments.py | mjlbach/Object-Goal-Navigation | 106 | 12787721 | import argparse
import torch
def get_args():
parser = argparse.ArgumentParser(
description='Goal-Oriented-Semantic-Exploration')
# General Arguments
parser.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
parser.add_argument('--auto_gpu_config', type=int, default=1)
parser.add_argument('--total_num_scenes', type=str, default="auto")
parser.add_argument('-n', '--num_processes', type=int, default=5,
help="""how many training processes to use (default:5)
Overridden when auto_gpu_config=1
and training on gpus""")
parser.add_argument('--num_processes_per_gpu', type=int, default=6)
parser.add_argument('--num_processes_on_first_gpu', type=int, default=1)
parser.add_argument('--eval', type=int, default=0,
help='0: Train, 1: Evaluate (default: 0)')
parser.add_argument('--num_training_frames', type=int, default=10000000,
help='total number of training frames')
parser.add_argument('--num_eval_episodes', type=int, default=200,
help="number of test episodes per scene")
parser.add_argument('--num_train_episodes', type=int, default=10000,
help="""number of train episodes per scene
before loading the next scene""")
parser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument("--sim_gpu_id", type=int, default=0,
help="gpu id on which scenes are loaded")
parser.add_argument("--sem_gpu_id", type=int, default=-1,
help="""gpu id for semantic model,
-1: same as sim gpu, -2: cpu""")
# Logging, loading models, visualization
parser.add_argument('--log_interval', type=int, default=10,
help="""log interval, one log per n updates
(default: 10) """)
parser.add_argument('--save_interval', type=int, default=1,
help="""save interval""")
parser.add_argument('-d', '--dump_location', type=str, default="./tmp/",
help='path to dump models and log (default: ./tmp/)')
parser.add_argument('--exp_name', type=str, default="exp1",
help='experiment name (default: exp1)')
parser.add_argument('--save_periodic', type=int, default=500000,
help='Model save frequency in number of updates')
parser.add_argument('--load', type=str, default="0",
help="""model path to load,
0 to not reload (default: 0)""")
parser.add_argument('-v', '--visualize', type=int, default=0,
help="""1: Render the observation and
the predicted semantic map,
2: Render the observation with semantic
predictions and the predicted semantic map
(default: 0)""")
parser.add_argument('--print_images', type=int, default=0,
help='1: save visualization as images')
# Environment, dataset and episode specifications
parser.add_argument('-efw', '--env_frame_width', type=int, default=640,
help='Frame width (default:640)')
parser.add_argument('-efh', '--env_frame_height', type=int, default=480,
help='Frame height (default:480)')
parser.add_argument('-fw', '--frame_width', type=int, default=160,
help='Frame width (default:160)')
parser.add_argument('-fh', '--frame_height', type=int, default=120,
help='Frame height (default:120)')
parser.add_argument('-el', '--max_episode_length', type=int, default=500,
help="""Maximum episode length""")
parser.add_argument("--task_config", type=str,
default="tasks/objectnav_gibson.yaml",
help="path to config yaml containing task information")
parser.add_argument("--split", type=str, default="train",
help="dataset split (train | val | val_mini) ")
parser.add_argument('--camera_height', type=float, default=0.88,
help="agent camera height in metres")
parser.add_argument('--hfov', type=float, default=79.0,
help="horizontal field of view in degrees")
parser.add_argument('--turn_angle', type=float, default=30,
help="Agent turn angle in degrees")
parser.add_argument('--min_depth', type=float, default=0.5,
help="Minimum depth for depth sensor in meters")
parser.add_argument('--max_depth', type=float, default=5.0,
help="Maximum depth for depth sensor in meters")
parser.add_argument('--success_dist', type=float, default=1.0,
help="success distance threshold in meters")
parser.add_argument('--floor_thr', type=int, default=50,
help="floor threshold in cm")
parser.add_argument('--min_d', type=float, default=1.5,
help="min distance to goal during training in meters")
parser.add_argument('--max_d', type=float, default=100.0,
help="max distance to goal during training in meters")
parser.add_argument('--version', type=str, default="v1.1",
help="dataset version")
# Model Hyperparameters
parser.add_argument('--agent', type=str, default="sem_exp")
parser.add_argument('--lr', type=float, default=2.5e-5,
help='learning rate (default: 2.5e-5)')
parser.add_argument('--global_hidden_size', type=int, default=256,
help='global_hidden_size')
parser.add_argument('--eps', type=float, default=1e-5,
help='RL Optimizer epsilon (default: 1e-5)')
parser.add_argument('--alpha', type=float, default=0.99,
help='RL Optimizer alpha (default: 0.99)')
parser.add_argument('--gamma', type=float, default=0.99,
help='discount factor for rewards (default: 0.99)')
parser.add_argument('--use_gae', action='store_true', default=False,
help='use generalized advantage estimation')
parser.add_argument('--tau', type=float, default=0.95,
help='gae parameter (default: 0.95)')
parser.add_argument('--entropy_coef', type=float, default=0.001,
help='entropy term coefficient (default: 0.01)')
parser.add_argument('--value_loss_coef', type=float, default=0.5,
help='value loss coefficient (default: 0.5)')
parser.add_argument('--max_grad_norm', type=float, default=0.5,
help='max norm of gradients (default: 0.5)')
parser.add_argument('--num_global_steps', type=int, default=20,
help='number of forward steps in A2C (default: 5)')
parser.add_argument('--ppo_epoch', type=int, default=4,
help='number of ppo epochs (default: 4)')
parser.add_argument('--num_mini_batch', type=str, default="auto",
help='number of batches for ppo (default: 32)')
parser.add_argument('--clip_param', type=float, default=0.2,
help='ppo clip parameter (default: 0.2)')
parser.add_argument('--use_recurrent_global', type=int, default=0,
help='use a recurrent global policy')
parser.add_argument('--num_local_steps', type=int, default=25,
help="""Number of steps the local policy
between each global step""")
parser.add_argument('--reward_coeff', type=float, default=0.1,
help="Object goal reward coefficient")
parser.add_argument('--intrinsic_rew_coeff', type=float, default=0.02,
help="intrinsic exploration reward coefficient")
parser.add_argument('--num_sem_categories', type=float, default=16)
parser.add_argument('--sem_pred_prob_thr', type=float, default=0.9,
help="Semantic prediction confidence threshold")
# Mapping
parser.add_argument('--global_downscaling', type=int, default=2)
parser.add_argument('--vision_range', type=int, default=100)
parser.add_argument('--map_resolution', type=int, default=5)
parser.add_argument('--du_scale', type=int, default=1)
parser.add_argument('--map_size_cm', type=int, default=2400)
parser.add_argument('--cat_pred_threshold', type=float, default=5.0)
parser.add_argument('--map_pred_threshold', type=float, default=1.0)
parser.add_argument('--exp_pred_threshold', type=float, default=1.0)
parser.add_argument('--collision_threshold', type=float, default=0.20)
# parse arguments
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
if args.auto_gpu_config:
num_gpus = torch.cuda.device_count()
if args.total_num_scenes != "auto":
args.total_num_scenes = int(args.total_num_scenes)
elif "objectnav_gibson" in args.task_config and \
"train" in args.split:
args.total_num_scenes = 25
elif "objectnav_gibson" in args.task_config and \
"val" in args.split:
args.total_num_scenes = 5
else:
assert False, "Unknown task config, please specify" + \
" total_num_scenes"
# GPU Memory required for the SemExp model:
# 0.8 + 0.4 * args.total_num_scenes (GB)
# GPU Memory required per thread: 2.6 (GB)
min_memory_required = max(0.8 + 0.4 * args.total_num_scenes, 2.6)
# Automatically configure number of training threads based on
# number of GPUs available and GPU memory size
gpu_memory = 1000
for i in range(num_gpus):
gpu_memory = min(gpu_memory,
torch.cuda.get_device_properties(
i).total_memory
/ 1024 / 1024 / 1024)
assert gpu_memory > min_memory_required, \
"""Insufficient GPU memory for GPU {}, gpu memory ({}GB)
needs to be greater than {}GB""".format(
i, gpu_memory, min_memory_required)
num_processes_per_gpu = int(gpu_memory / 2.6)
num_processes_on_first_gpu = \
int((gpu_memory - min_memory_required) / 2.6)
if args.eval:
max_threads = num_processes_per_gpu * (num_gpus - 1) \
+ num_processes_on_first_gpu
assert max_threads >= args.total_num_scenes, \
"""Insufficient GPU memory for evaluation"""
if num_gpus == 1:
args.num_processes_on_first_gpu = num_processes_on_first_gpu
args.num_processes_per_gpu = 0
args.num_processes = num_processes_on_first_gpu
assert args.num_processes > 0, "Insufficient GPU memory"
else:
num_threads = num_processes_per_gpu * (num_gpus - 1) \
+ num_processes_on_first_gpu
num_threads = min(num_threads, args.total_num_scenes)
args.num_processes_per_gpu = num_processes_per_gpu
args.num_processes_on_first_gpu = max(
0,
num_threads - args.num_processes_per_gpu * (num_gpus - 1))
args.num_processes = num_threads
args.sim_gpu_id = 1
print("Auto GPU config:")
print("Number of processes: {}".format(args.num_processes))
print("Number of processes on GPU 0: {}".format(
args.num_processes_on_first_gpu))
print("Number of processes per GPU: {}".format(
args.num_processes_per_gpu))
else:
args.sem_gpu_id = -2
if args.num_mini_batch == "auto":
args.num_mini_batch = max(args.num_processes // 2, 1)
else:
args.num_mini_batch = int(args.num_mini_batch)
return args
| 2.671875 | 3 |
web/actions/forktools.py | bdeprez/machinaris | 0 | 12787722 | #
# Control of the Forktools configuration and services
#
from flask import Flask, jsonify, abort, request, flash, g
from common.models import alerts as a
from web import app, db, utils
from . import worker as wk
def load_config(farmer, blockchain):
return utils.send_get(farmer, "/configs/tools/"+ blockchain, debug=False).content
def save_config(farmer, blockchain, config):
try:
utils.send_put(farmer, "/configs/tools/" + blockchain, config, debug=False)
except Exception as ex:
flash('Failed to save config to farmer. Please check log files.', 'danger')
flash(str(ex), 'warning')
else:
flash('Nice! Tools config validated and saved successfully. Worker services now restarting. Please allow 10-15 minutes to take effect.', 'success')
| 1.921875 | 2 |
medios/diarios/infobae.py | miglesias91/dicenlosmedios | 1 | 12787723 | <filename>medios/diarios/infobae.py<gh_stars>1-10
import dateutil
import datetime
import yaml
import feedparser as fp
import newspaper as np
import re
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup as bs
from medios.medio import Medio
from medios.diarios.noticia import Noticia
from medios.diarios.diario import Diario
from bd.entidades import Kiosco
class Infobae(Diario):
def __init__(self):
Diario.__init__(self, "infobae")
def leer(self):
kiosco = Kiosco()
print("leyendo '" + self.etiqueta + "'...")
tag_regexp = re.compile(r'<[^>]+>')
for entrada in fp.parse(self.feed_noticias).entries:
url = entrada.link
if kiosco.contar_noticias(diario=self.etiqueta, url=url): # si existe ya la noticia (url), no la decargo
continue
titulo = entrada.title
texto = re.sub(tag_regexp,' ',entrada.content[0].value)
fecha = dateutil.parser.parse(entrada.published) - datetime.timedelta(hours=3)
categoria = url.split('/')[3]
if categoria == "america":
categoria = "internacional"
if categoria == "teleshow":
categoria = "espectaculos"
if categoria == "deportes-2":
categoria = "deportes"
if categoria not in self.categorias:
continue
self.noticias.append(Noticia(fecha=fecha, url=url, diario=self.etiqueta, categoria=categoria, titulo=titulo, texto=self.limpiar_texto(texto)))
# for tag, url_feed in self.feeds.items():
# for entrada in fp.parse(url_feed).entries:
# titulo = entrada.title
# texto = re.sub(tag_regexp,' ',entrada.content[0].value)
# fecha = dateutil.parser.parse(entrada.published) - datetime.timedelta(hours=3)
# url = entrada.link
# if kiosco.contar_noticias(diario=self.etiqueta, url=url): # si existe ya la noticia (url), no la decargo
# continue
# self.noticias.append(Noticia(fecha=fecha, url=url, diario=self.etiqueta, categoria=tag, titulo=titulo, texto=self.limpiar_texto(texto)))
def limpiar_texto(self, texto):
regexp = re.compile(r'SEGUÍ LEYENDO[^$]+')
texto = re.sub(regexp,' ',texto)
regexp = re.compile(r'MÁS SOBRE ESTE TEMA[^$]+')
texto = re.sub(regexp,' ',texto)
regexp = re.compile(r'Seguí leyendo[^$]+')
texto = re.sub(regexp,' ',texto)
return texto | 2.765625 | 3 |
nova/tests/functional/integrated_helpers.py | bopopescu/nova-token | 0 | 12787724 | begin_unit
comment|'# Copyright 2011 <NAME>'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
string|'"""\nProvides common functionality for integrated unit tests\n"""'
newline|'\n'
nl|'\n'
name|'import'
name|'random'
newline|'\n'
name|'import'
name|'string'
newline|'\n'
name|'import'
name|'uuid'
newline|'\n'
nl|'\n'
name|'from'
name|'oslo_log'
name|'import'
name|'log'
name|'as'
name|'logging'
newline|'\n'
nl|'\n'
name|'import'
name|'nova'
op|'.'
name|'conf'
newline|'\n'
name|'import'
name|'nova'
op|'.'
name|'image'
op|'.'
name|'glance'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'test'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
name|'import'
name|'fixtures'
name|'as'
name|'nova_fixtures'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
name|'import'
name|'cast_as_call'
newline|'\n'
name|'import'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'image'
op|'.'
name|'fake'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|CONF
name|'CONF'
op|'='
name|'nova'
op|'.'
name|'conf'
op|'.'
name|'CONF'
newline|'\n'
DECL|variable|LOG
name|'LOG'
op|'='
name|'logging'
op|'.'
name|'getLogger'
op|'('
name|'__name__'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|generate_random_alphanumeric
name|'def'
name|'generate_random_alphanumeric'
op|'('
name|'length'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Creates a random alphanumeric string of specified length."""'
newline|'\n'
name|'return'
string|"''"
op|'.'
name|'join'
op|'('
name|'random'
op|'.'
name|'choice'
op|'('
name|'string'
op|'.'
name|'ascii_uppercase'
op|'+'
name|'string'
op|'.'
name|'digits'
op|')'
nl|'\n'
name|'for'
name|'_x'
name|'in'
name|'range'
op|'('
name|'length'
op|')'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|generate_random_numeric
dedent|''
name|'def'
name|'generate_random_numeric'
op|'('
name|'length'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Creates a random numeric string of specified length."""'
newline|'\n'
name|'return'
string|"''"
op|'.'
name|'join'
op|'('
name|'random'
op|'.'
name|'choice'
op|'('
name|'string'
op|'.'
name|'digits'
op|')'
nl|'\n'
name|'for'
name|'_x'
name|'in'
name|'range'
op|'('
name|'length'
op|')'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|generate_new_element
dedent|''
name|'def'
name|'generate_new_element'
op|'('
name|'items'
op|','
name|'prefix'
op|','
name|'numeric'
op|'='
name|'False'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Creates a random string with prefix, that is not in \'items\' list."""'
newline|'\n'
name|'while'
name|'True'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'numeric'
op|':'
newline|'\n'
indent|' '
name|'candidate'
op|'='
name|'prefix'
op|'+'
name|'generate_random_numeric'
op|'('
number|'8'
op|')'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'candidate'
op|'='
name|'prefix'
op|'+'
name|'generate_random_alphanumeric'
op|'('
number|'8'
op|')'
newline|'\n'
dedent|''
name|'if'
name|'candidate'
name|'not'
name|'in'
name|'items'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'candidate'
newline|'\n'
dedent|''
name|'LOG'
op|'.'
name|'debug'
op|'('
string|'"Random collision on %s"'
op|'%'
name|'candidate'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|_IntegratedTestBase
dedent|''
dedent|''
name|'class'
name|'_IntegratedTestBase'
op|'('
name|'test'
op|'.'
name|'TestCase'
op|')'
op|':'
newline|'\n'
DECL|variable|REQUIRES_LOCKING
indent|' '
name|'REQUIRES_LOCKING'
op|'='
name|'True'
newline|'\n'
DECL|variable|ADMIN_API
name|'ADMIN_API'
op|'='
name|'False'
newline|'\n'
nl|'\n'
DECL|member|setUp
name|'def'
name|'setUp'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'_IntegratedTestBase'
op|','
name|'self'
op|')'
op|'.'
name|'setUp'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'f'
op|'='
name|'self'
op|'.'
name|'_get_flags'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
op|'**'
name|'f'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'flags'
op|'('
name|'verbose'
op|'='
name|'True'
op|')'
newline|'\n'
nl|'\n'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'image'
op|'.'
name|'fake'
op|'.'
name|'stub_out_image_service'
op|'('
name|'self'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_setup_services'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'api_fixture'
op|'='
name|'self'
op|'.'
name|'useFixture'
op|'('
nl|'\n'
name|'nova_fixtures'
op|'.'
name|'OSAPIFixture'
op|'('
name|'self'
op|'.'
name|'api_major_version'
op|')'
op|')'
newline|'\n'
nl|'\n'
comment|'# if the class needs to run as admin, make the api endpoint'
nl|'\n'
comment|"# the admin, otherwise it's safer to run as non admin user."
nl|'\n'
name|'if'
name|'self'
op|'.'
name|'ADMIN_API'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'api'
op|'='
name|'self'
op|'.'
name|'api_fixture'
op|'.'
name|'admin_api'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'api'
op|'='
name|'self'
op|'.'
name|'api_fixture'
op|'.'
name|'api'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'useFixture'
op|'('
name|'cast_as_call'
op|'.'
name|'CastAsCall'
op|'('
name|'self'
op|'.'
name|'stubs'
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'addCleanup'
op|'('
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'unit'
op|'.'
name|'image'
op|'.'
name|'fake'
op|'.'
name|'FakeImageService_reset'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_setup_compute_service
dedent|''
name|'def'
name|'_setup_compute_service'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'self'
op|'.'
name|'start_service'
op|'('
string|"'compute'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|_setup_scheduler_service
dedent|''
name|'def'
name|'_setup_scheduler_service'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'flags'
op|'('
name|'scheduler_driver'
op|'='
string|"'chance_scheduler'"
op|')'
newline|'\n'
name|'return'
name|'self'
op|'.'
name|'start_service'
op|'('
string|"'scheduler'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|_setup_services
dedent|''
name|'def'
name|'_setup_services'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'conductor'
op|'='
name|'self'
op|'.'
name|'start_service'
op|'('
string|"'conductor'"
op|','
nl|'\n'
name|'manager'
op|'='
name|'CONF'
op|'.'
name|'conductor'
op|'.'
name|'manager'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'compute'
op|'='
name|'self'
op|'.'
name|'_setup_compute_service'
op|'('
op|')'
newline|'\n'
name|'self'
op|'.'
name|'consoleauth'
op|'='
name|'self'
op|'.'
name|'start_service'
op|'('
string|"'consoleauth'"
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'network'
op|'='
name|'self'
op|'.'
name|'start_service'
op|'('
string|"'network'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'scheduler'
op|'='
name|'self'
op|'.'
name|'_setup_scheduler_service'
op|'('
op|')'
newline|'\n'
nl|'\n'
DECL|member|_get_flags
dedent|''
name|'def'
name|'_get_flags'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Allow subclass to modify global config before we start services."""'
newline|'\n'
comment|'# NOTE(sdague): _get_flags is used by about 13 tests that'
nl|'\n'
comment|'# subclass this mostly to modify the extensions list. We'
nl|'\n'
comment|'# should instead make that declarative in the future, at which'
nl|'\n'
comment|'# point we can get rid of this.'
nl|'\n'
name|'return'
op|'{'
op|'}'
newline|'\n'
nl|'\n'
DECL|member|get_unused_server_name
dedent|''
name|'def'
name|'get_unused_server_name'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'servers'
op|'='
name|'self'
op|'.'
name|'api'
op|'.'
name|'get_servers'
op|'('
op|')'
newline|'\n'
name|'server_names'
op|'='
op|'['
name|'server'
op|'['
string|"'name'"
op|']'
name|'for'
name|'server'
name|'in'
name|'servers'
op|']'
newline|'\n'
name|'return'
name|'generate_new_element'
op|'('
name|'server_names'
op|','
string|"'server'"
op|')'
newline|'\n'
nl|'\n'
DECL|member|get_unused_flavor_name_id
dedent|''
name|'def'
name|'get_unused_flavor_name_id'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'flavors'
op|'='
name|'self'
op|'.'
name|'api'
op|'.'
name|'get_flavors'
op|'('
op|')'
newline|'\n'
name|'flavor_names'
op|'='
name|'list'
op|'('
op|')'
newline|'\n'
name|'flavor_ids'
op|'='
name|'list'
op|'('
op|')'
newline|'\n'
op|'['
op|'('
name|'flavor_names'
op|'.'
name|'append'
op|'('
name|'flavor'
op|'['
string|"'name'"
op|']'
op|')'
op|','
nl|'\n'
name|'flavor_ids'
op|'.'
name|'append'
op|'('
name|'flavor'
op|'['
string|"'id'"
op|']'
op|')'
op|')'
nl|'\n'
name|'for'
name|'flavor'
name|'in'
name|'flavors'
op|']'
newline|'\n'
name|'return'
op|'('
name|'generate_new_element'
op|'('
name|'flavor_names'
op|','
string|"'flavor'"
op|')'
op|','
nl|'\n'
name|'int'
op|'('
name|'generate_new_element'
op|'('
name|'flavor_ids'
op|','
string|"''"
op|','
name|'True'
op|')'
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|get_invalid_image
dedent|''
name|'def'
name|'get_invalid_image'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'str'
op|'('
name|'uuid'
op|'.'
name|'uuid4'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_get_any_image_href
dedent|''
name|'def'
name|'_get_any_image_href'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'image'
op|'='
name|'self'
op|'.'
name|'api'
op|'.'
name|'get_images'
op|'('
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'LOG'
op|'.'
name|'debug'
op|'('
string|'"Image: %s"'
op|'%'
name|'image'
op|')'
newline|'\n'
nl|'\n'
name|'if'
name|'self'
op|'.'
name|'_image_ref_parameter'
name|'in'
name|'image'
op|':'
newline|'\n'
indent|' '
name|'image_href'
op|'='
name|'image'
op|'['
name|'self'
op|'.'
name|'_image_ref_parameter'
op|']'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'image_href'
op|'='
name|'image'
op|'['
string|"'id'"
op|']'
newline|'\n'
name|'image_href'
op|'='
string|"'http://fake.server/%s'"
op|'%'
name|'image_href'
newline|'\n'
dedent|''
name|'return'
name|'image_href'
newline|'\n'
nl|'\n'
DECL|member|_build_minimal_create_server_request
dedent|''
name|'def'
name|'_build_minimal_create_server_request'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'server'
op|'='
op|'{'
op|'}'
newline|'\n'
nl|'\n'
name|'image_href'
op|'='
name|'self'
op|'.'
name|'_get_any_image_href'
op|'('
op|')'
newline|'\n'
nl|'\n'
comment|'# We now have a valid imageId'
nl|'\n'
name|'server'
op|'['
name|'self'
op|'.'
name|'_image_ref_parameter'
op|']'
op|'='
name|'image_href'
newline|'\n'
nl|'\n'
comment|'# Set a valid flavorId'
nl|'\n'
name|'flavor'
op|'='
name|'self'
op|'.'
name|'api'
op|'.'
name|'get_flavors'
op|'('
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'LOG'
op|'.'
name|'debug'
op|'('
string|'"Using flavor: %s"'
op|'%'
name|'flavor'
op|')'
newline|'\n'
name|'server'
op|'['
name|'self'
op|'.'
name|'_flavor_ref_parameter'
op|']'
op|'='
op|'('
string|"'http://fake.server/%s'"
nl|'\n'
op|'%'
name|'flavor'
op|'['
string|"'id'"
op|']'
op|')'
newline|'\n'
nl|'\n'
comment|'# Set a valid server name'
nl|'\n'
name|'server_name'
op|'='
name|'self'
op|'.'
name|'get_unused_server_name'
op|'('
op|')'
newline|'\n'
name|'server'
op|'['
string|"'name'"
op|']'
op|'='
name|'server_name'
newline|'\n'
name|'return'
name|'server'
newline|'\n'
nl|'\n'
DECL|member|_create_flavor_body
dedent|''
name|'def'
name|'_create_flavor_body'
op|'('
name|'self'
op|','
name|'name'
op|','
name|'ram'
op|','
name|'vcpus'
op|','
name|'disk'
op|','
name|'ephemeral'
op|','
name|'id'
op|','
name|'swap'
op|','
nl|'\n'
name|'rxtx_factor'
op|','
name|'is_public'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'{'
nl|'\n'
string|'"flavor"'
op|':'
op|'{'
nl|'\n'
string|'"name"'
op|':'
name|'name'
op|','
nl|'\n'
string|'"ram"'
op|':'
name|'ram'
op|','
nl|'\n'
string|'"vcpus"'
op|':'
name|'vcpus'
op|','
nl|'\n'
string|'"disk"'
op|':'
name|'disk'
op|','
nl|'\n'
string|'"OS-FLV-EXT-DATA:ephemeral"'
op|':'
name|'ephemeral'
op|','
nl|'\n'
string|'"id"'
op|':'
name|'id'
op|','
nl|'\n'
string|'"swap"'
op|':'
name|'swap'
op|','
nl|'\n'
string|'"rxtx_factor"'
op|':'
name|'rxtx_factor'
op|','
nl|'\n'
string|'"os-flavor-access:is_public"'
op|':'
name|'is_public'
op|','
nl|'\n'
op|'}'
nl|'\n'
op|'}'
newline|'\n'
nl|'\n'
DECL|member|_create_flavor
dedent|''
name|'def'
name|'_create_flavor'
op|'('
name|'self'
op|','
name|'memory_mb'
op|'='
number|'2048'
op|','
name|'vcpu'
op|'='
number|'2'
op|','
name|'disk'
op|'='
number|'10'
op|','
name|'ephemeral'
op|'='
number|'10'
op|','
nl|'\n'
name|'swap'
op|'='
number|'0'
op|','
name|'rxtx_factor'
op|'='
number|'1.0'
op|','
name|'is_public'
op|'='
name|'True'
op|','
nl|'\n'
name|'extra_spec'
op|'='
name|'None'
op|')'
op|':'
newline|'\n'
indent|' '
name|'flv_name'
op|','
name|'flv_id'
op|'='
name|'self'
op|'.'
name|'get_unused_flavor_name_id'
op|'('
op|')'
newline|'\n'
name|'body'
op|'='
name|'self'
op|'.'
name|'_create_flavor_body'
op|'('
name|'flv_name'
op|','
name|'memory_mb'
op|','
name|'vcpu'
op|','
name|'disk'
op|','
nl|'\n'
name|'ephemeral'
op|','
name|'flv_id'
op|','
name|'swap'
op|','
name|'rxtx_factor'
op|','
nl|'\n'
name|'is_public'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'api_fixture'
op|'.'
name|'admin_api'
op|'.'
name|'post_flavor'
op|'('
name|'body'
op|')'
newline|'\n'
name|'if'
name|'extra_spec'
name|'is'
name|'not'
name|'None'
op|':'
newline|'\n'
indent|' '
name|'spec'
op|'='
op|'{'
string|'"extra_specs"'
op|':'
name|'extra_spec'
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'api_fixture'
op|'.'
name|'admin_api'
op|'.'
name|'post_extra_spec'
op|'('
name|'flv_id'
op|','
name|'spec'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'flv_id'
newline|'\n'
nl|'\n'
DECL|member|_build_server
dedent|''
name|'def'
name|'_build_server'
op|'('
name|'self'
op|','
name|'flavor_id'
op|')'
op|':'
newline|'\n'
indent|' '
name|'server'
op|'='
op|'{'
op|'}'
newline|'\n'
nl|'\n'
name|'image_href'
op|'='
name|'self'
op|'.'
name|'_get_any_image_href'
op|'('
op|')'
newline|'\n'
name|'image'
op|'='
name|'self'
op|'.'
name|'api'
op|'.'
name|'get_images'
op|'('
op|')'
op|'['
number|'0'
op|']'
newline|'\n'
name|'LOG'
op|'.'
name|'debug'
op|'('
string|'"Image: %s"'
op|'%'
name|'image'
op|')'
newline|'\n'
nl|'\n'
name|'if'
name|'self'
op|'.'
name|'_image_ref_parameter'
name|'in'
name|'image'
op|':'
newline|'\n'
indent|' '
name|'image_href'
op|'='
name|'image'
op|'['
name|'self'
op|'.'
name|'_image_ref_parameter'
op|']'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'image_href'
op|'='
name|'image'
op|'['
string|"'id'"
op|']'
newline|'\n'
name|'image_href'
op|'='
string|"'http://fake.server/%s'"
op|'%'
name|'image_href'
newline|'\n'
nl|'\n'
comment|'# We now have a valid imageId'
nl|'\n'
dedent|''
name|'server'
op|'['
name|'self'
op|'.'
name|'_image_ref_parameter'
op|']'
op|'='
name|'image_href'
newline|'\n'
nl|'\n'
comment|'# Set a valid flavorId'
nl|'\n'
name|'flavor'
op|'='
name|'self'
op|'.'
name|'api'
op|'.'
name|'get_flavor'
op|'('
name|'flavor_id'
op|')'
newline|'\n'
name|'LOG'
op|'.'
name|'debug'
op|'('
string|'"Using flavor: %s"'
op|'%'
name|'flavor'
op|')'
newline|'\n'
name|'server'
op|'['
name|'self'
op|'.'
name|'_flavor_ref_parameter'
op|']'
op|'='
op|'('
string|"'http://fake.server/%s'"
nl|'\n'
op|'%'
name|'flavor'
op|'['
string|"'id'"
op|']'
op|')'
newline|'\n'
nl|'\n'
comment|'# Set a valid server name'
nl|'\n'
name|'server_name'
op|'='
name|'self'
op|'.'
name|'get_unused_server_name'
op|'('
op|')'
newline|'\n'
name|'server'
op|'['
string|"'name'"
op|']'
op|'='
name|'server_name'
newline|'\n'
name|'return'
name|'server'
newline|'\n'
nl|'\n'
DECL|member|_check_api_endpoint
dedent|''
name|'def'
name|'_check_api_endpoint'
op|'('
name|'self'
op|','
name|'endpoint'
op|','
name|'expected_middleware'
op|')'
op|':'
newline|'\n'
indent|' '
name|'app'
op|'='
name|'self'
op|'.'
name|'api_fixture'
op|'.'
name|'osapi'
op|'.'
name|'app'
op|'.'
name|'get'
op|'('
op|'('
name|'None'
op|','
string|"'/v2'"
op|')'
op|')'
newline|'\n'
nl|'\n'
name|'while'
name|'getattr'
op|'('
name|'app'
op|','
string|"'application'"
op|','
name|'False'
op|')'
op|':'
newline|'\n'
indent|' '
name|'for'
name|'middleware'
name|'in'
name|'expected_middleware'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'isinstance'
op|'('
name|'app'
op|'.'
name|'application'
op|','
name|'middleware'
op|')'
op|':'
newline|'\n'
indent|' '
name|'expected_middleware'
op|'.'
name|'remove'
op|'('
name|'middleware'
op|')'
newline|'\n'
name|'break'
newline|'\n'
dedent|''
dedent|''
name|'app'
op|'='
name|'app'
op|'.'
name|'application'
newline|'\n'
nl|'\n'
dedent|''
name|'self'
op|'.'
name|'assertEqual'
op|'('
op|'['
op|']'
op|','
nl|'\n'
name|'expected_middleware'
op|','
nl|'\n'
op|'('
string|'"The expected wsgi middlewares %s are not "'
nl|'\n'
string|'"existed"'
op|')'
op|'%'
name|'expected_middleware'
op|')'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| 1.335938 | 1 |
traffic_analysis.py | marwahmanbir/GitHubAPI | 0 | 12787725 | <filename>traffic_analysis.py
from github import Github
import os
from pprint import pprint
from operator import itemgetter
token = os.getenv('GITHUB_TOKEN')
g = Github(token)
repo = g.get_repo("marwahmanbir/OpenEDU")
clones = repo.get_clones_traffic(per="week")
views = repo.get_views_traffic(per="week")
print(f"Repository has {clones['count']} clones out of which {clones['uniques']} are unique.")
print(f"Repository has {views['count']} views out of which {views['uniques']} are unique.")
best_week = max(*list((week.count, week.timestamp) for week in views["views"]), key=itemgetter(0))
pprint(views)
print(f"Repository had most views in {best_week[1]} with {best_week[0]} views") | 3.09375 | 3 |
covid_data_handler.py | JakeGT/covid_dashboard | 0 | 12787726 | '''
This module handles the covid API, covid data, key statistics calculations and
scheduling covid updates.
'''
import logging
import sched
import datetime
import time
from re import match
import requests
from uk_covid19 import Cov19API
import uk_covid19
covid_data = {}
national_covid_data = {}
scheduled_updates = {}
config_covid_location = {}
scheduler = sched.scheduler(time.time, time.sleep)
def parse_csv_data(filename:str) -> list[str]:
'''
Take a csv file and return a list split by each row of data
Parameters:
filename (str): The name of the Covid data CSV file
Returns:
data_lines (list[str]): The list containing each row of data as a string
'''
headers = {
"areaCode":"area_code",
"areaName":"area_name",
"areaType":"area_type",
"date": "date",
"cumDailyNsoDeathsByDeathDate":"cum_deaths",
"hospitalCases":"hospital_cases",
"newCasesBySpecimenDate":"new_cases"
}
try:
with open(str(filename), encoding="ascii") as file:
data_lines = file.read().splitlines() # file to list split by new lines (rows)
logging.info("CSV file opened successfully: %s", filename)
except IOError:
logging.warning("Cannot open CSV file")
else:
file_headers = data_lines[0].split(",")
new_headers = []
for header in file_headers:
if header in headers:
new_headers.append(headers[header])
else:
logging.warning("Unknown header in the CSV file: %s", header)
new_headers.append(header)
data_lines[0] = ",".join(new_headers)
# renaming headers - API does this automatically, but currently reading from CSV
covid_data_local = convert_covid_csv_data_to_list_dict(data_lines)
return covid_data_local
def convert_covid_csv_data_to_list_dict(covid_csv_data:list[str]) -> list[dict]:
'''
Takes the parsed csv covid data and split rows into lists appendding each row to a new list
This function is only necessary when reading from a CSV. The function turns the CSV file into
the same data structure that is returned from the API.
Parameters:
covid_csv_data (list[str]): Covid data parsed through the function parse_csv_data
the data is each row of data as a string of the entire row
Returns:
covid_data_local (list[dict]): Covid data seperated in list by row and
converted to a dictionary
'''
logging.info("""convert_covid_csv_data_to_list_dict called:
Converting CSV file to list of dictionaries for further data processing.""")
covid_data_headers = covid_csv_data[0].split(',') # save covid data headers for dict
covid_csv_data_local = covid_csv_data[1:] # store data excluding headers in another list
covid_data_local = []
for row in covid_csv_data_local:
row_data = row.split(',') # split row into individual pieces of data
data = {}
for header, data_entry in zip(covid_data_headers, row_data):
data[header] = data_entry
# take individual data and map header (data title) to data in dict
covid_data_local.append(data)
# add dict to list of Covid data
covid_data_local.sort(key = lambda x: x['date'], reverse=True)
# just in case data is not in order sort by date, most recent date as index 0.
return covid_data_local
def process_covid_csv_data(covid_data_local:list[dict]) -> tuple[int|str, int|str, int|str]:
'''
Takes the Covid data processed from parse_csv_data and returns the number of cases for the past
3 days, the number of hospital cases and the number of cumulative deaths
Parameters:
covid_data (list): The Covid data from parse_csv_data - Covid data in a list containing
dictionaries in header:data form
Returns:
total_cases_last_7_days (int|str): The total number of cases in the past 7 days -
ignoring empty data entries and the first day or N/A if not applicable
hospital_cases (int|str): The number of hospital cases from most recent data
or N/A if not applicable
cum_deaths (int|str): The number of cumulative deaths from the most recent data
or N/A if not applicable
'''
logging.info("""process_covid_csv_data called:
Processing COVID data to generate 3 key statistics""")
first_date = next(
(index for index, item in enumerate(covid_data_local) if item['new_cases']), None
) # finding the index of the first non empty entry of data.
# if there is valid entry, return none.
if first_date is not None: # test to mkae sure there is data
first_date += 1 # skip the first day
if len(covid_data_local) - first_date > 7:
days = 7 # if there are 7 days worth of data
else:
days = len(covid_data_local) - first_date
# if not, then just calculate the remaining amounts of data
total_cases_last_7_days = 0
for days in range(days):
total_cases_last_7_days += int(
covid_data_local[first_date+days]['new_cases']
) # loop through 7 days and add all of them to total
else: # if there is no data
logging.info("There is no data to calculate the 7 day covid rate.")
total_cases_last_7_days = "N/A"
# The following is the while loop as above but for the next statistics, without adding 1 day
first_date = next(
(i for i, item in enumerate(covid_data_local) if item['hospital_cases']), None
) # this is the same as the next() statement as above but for hospital cases
if first_date is not None: # makes sure data is there as some API calls don't have this data.
hospital_cases = int(covid_data_local[first_date]['hospital_cases'])
else: # if API call doesn't have this data, simply diplay N/A to user.
logging.info("There is insufficient data to show hospital cases.")
hospital_cases = "N/A"
first_date = next(
(i for i, item in enumerate(covid_data_local) if item['cum_deaths']), None
) # this is the same as the next() statement as above but for cumulative deaths
if first_date is not None: # makes sure data is there as some API calls don't have this data.
cum_deaths = int(covid_data_local[first_date]["cum_deaths"])
else: # if API call doesn't have this data, simply display N/A to user.
logging.info("There is insufficient data to show cumulative deaths.")
cum_deaths = "N/A"
return total_cases_last_7_days, hospital_cases, cum_deaths
def covid_API_request(location:str = "Exeter", location_type:str = "ltla") -> dict:
'''
This requests information from the UK Covid API
Parameters:
location (str): The location for information to be request about, default=Exeter
location_type (str): The type of location, default=ltla (Lower-tier local
authority data)
Returns:
data (dict): The data the API returns based on the filter and structure provided
'''
logging.info("Beginning API request to update COVID data.")
if location_type != "overview":
location_data = [
"areaType="+location_type,
"areaName="+location
]
else: # if areaType is overview, there is no need for areaName in request
location_data = ["areaType=overview"]
# generate a filter as required by covid API
structure_data = {
"area_name": "areaName",
"date": "date",
"cum_deaths": "cumDailyNsoDeathsByDeathDate",
"hospital_cases": "hospitalCases",
"new_cases": "newCasesBySpecimenDate"
} # information needed from API and renaming as per API parameters
try:
api = Cov19API(filters=location_data, structure=structure_data)
data = api.get_json() # json data already processed by API.
logging.info("API call completed")
return data
except uk_covid19.exceptions.FailedRequestError as error:
# may occur if there is a connection error
logging.warning("COVID API call failed: %s", error)
print("COVID API call failed: Check internet connection")
print("Retrying in 30 seconds...")
schedule_covid_updates(30, "API Retry")
return {"data": None}
except requests.exceptions.ConnectionError as error:
# may occur if there is a connection error
logging.warning("COVID API call failed: %s", error)
print("COVID API call failed: Check internet connection")
print("Retrying in 30 seconds...")
schedule_covid_updates(30, "API Retry")
return {"data": None}
def sch_update_covid_data(update_time: datetime.datetime, update_name: int, repeat: bool) -> None:
'''
This procedure is called by the scheduler to run an update and determine whether to schedule
a new update depending on whether this was a repeating update
Parameters:
update_interval (int|datetime.datetime): the datetime object of the update time
update_name (str): the name of the scheduled update
repeat (bool): whether the update is repeating
'''
global covid_data
global national_covid_data
# no way around using global variables here. They needs to be assigned on update
logging.info("Running scheduled COVID update %s", update_name)
del scheduled_updates[update_name] # scheduled update called, delete from dict
if config_covid_location: # make sure that covid API requests use config data if it is there
location_type = config_covid_location["area_type"]
location = config_covid_location["area_name"]
api_response = covid_API_request(location, location_type)
else:
api_response = covid_API_request()
national_api_response = covid_API_request(location_type="overview")
if api_response:
covid_data = api_response
if national_api_response:
national_covid_data = national_api_response
if repeat: # this is for if the user requested a repeating update
update_time = update_time + datetime.timedelta(days=1)
logging.info("Covid update (%s) to be repeated. Scheduling next update", update_name)
schedule_covid_updates(update_time, update_name, repeat)
def cancel_scheduled_update(update_name:str) -> None:
'''
This procedure simply cancels a scheduled update and remoevd it from the scheduled update dict
Parameters:
update_name(str): The key of the scheduled update in dict
'''
logging.info("Cancelling schduled COVID update named: %s", update_name)
if update_name in scheduled_updates:
# if the update exists, then find the event and remove it from the scheduler and
# list of scheduled updates
event = scheduled_updates[update_name]["event"]
scheduler.cancel(event)
del scheduled_updates[update_name]
logging.info("%s successfully removed from scheduled COVID updates", update_name)
logging.debug("COVID scheduled_updates = %s", scheduled_updates)
logging.debug("COVID Scheduler queue = %s", scheduler.queue)
else:
logging.warning("""Attempted to remove scheduled update event from scheduler
but event does not exist: %s""", update_name)
def schedule_covid_updates(update_interval: int|str|datetime.datetime,
update_name: int, repeat=False) -> None:
'''
This procedure is called when the user requests to schedule an update. All scheduled events
are added to the scheduled_updates dictionary with the name as the key.
Parameters:
update_interval (int|str|datetime.datetime):
if int, time to update in seconds
if str, time of next update in the format HH:MM
if datetime.datetime, the datetime of next update
update_name (str): the name of the scheduled update
repeat (bool): whether the update is repeating
'''
logging.info("Scheduling covid update: %s", update_name)
if isinstance(update_interval, str):
logging.info("Recieved string. Attempting to parse...")
# if it's a string, test if its coming from the dashboard and therefore HH:MM format
if match("^([0-1]?[0-9]|2[0-3]):[0-5][0-9]$", update_interval):
time_to_update, update_time = time_to_update_interval(update_interval)
logging.debug("time_to_update = %s", str(time_to_update))
logging.debug("update_time = %s", str(update_time))
elif update_interval.isdigit():
update_interval = int(update_interval)
# this will trigger the if statement below for int types
else:
logging.warning("Can't parse update time. Cancelling update scheduling")
# If we can't parse the update time parameter, cancel and exit function
return None
if isinstance(update_interval, datetime.datetime):
# if datetime object, calcuate time to next update
logging.info("Recieved datetime object.")
update_time = update_interval
if update_time < datetime.datetime.now():
update_time = datetime.datetime.now().replace(
hour=update_time.hour, minute=update_time.minute, second=0, microsecond=0
)
if update_time < datetime.datetime.now():
update_time += datetime.timedelta(days=1)
# if the datetime object is in the past, we assume the next point where that
# hour and minute occur
time_to_update = (update_time - datetime.datetime.now()).total_seconds()
if isinstance(update_interval, int):
# if int, calculate datetime object of update
logging.info("Recieved int. Parsing as seconds from now.")
time_to_update = abs(update_interval)
# if number is negative, assume absolute value anyways
update_time = datetime.datetime.now() + datetime.timedelta(seconds = update_interval)
logging.info("Covid update time has been parsed")
logging.debug("Update time parsed as %s", str(update_time))
if update_name not in scheduled_updates:
# make sure we are not trying to create an update with a duplicate name
event = scheduler.enter(
time_to_update,1,sch_update_covid_data,(update_time, update_name, repeat, )
)
scheduled_updates[update_name] = {
"event": event,
"update_time":update_time,
"repeat":repeat
}
logging.info("Scheduled COVID update: %s", update_name)
logging.debug("Scheduler Queue (covid): %s", str(scheduler.queue))
else:
# should modify HTML to tell user that the app cannot schedule update as the
# update name is already in use but outside bounds of CA
logging.warning("Tried to schedule update with same name as existing update")
logging.debug("Update Name: %s", update_name)
logging.debug("Scheduler Queue (covid): %s", str(scheduler.queue))
def time_to_update_interval(update_interval:str) -> tuple[int, datetime.datetime]:
'''
Function to convert the data taken from the website form into a datetime object and
a integer variable with the amount of time from now to the update time recieved.
Parameters:
update_interval (str): The time in "HH:MM" format.
Returns:
time_to_update (int): The amount of seconds from now to the update time
update_time (datetime.datetime): datetime object that corresponds to the update time
'''
logging.info("Converting string to datetime object and seconds to update")
logging.debug("update_interval = %s", str(update_interval))
hrs, mins = map(int, update_interval.split(":"))
update_time = datetime.datetime.now().replace(hour=hrs, minute=mins, second=0, microsecond=0)
if update_time < datetime.datetime.now():
update_time = update_time + datetime.timedelta(days=1)
time_to_update = (update_time - datetime.datetime.now()).total_seconds()
return time_to_update, update_time
if __name__ == "__main__":
# if file is run individually, run these tests.
print("Running self tests")
TEST_FILE = "nation_2021-10-28.csv"
data_covid = parse_csv_data(TEST_FILE)
last_7_days_cases, current_hospital_cases, total_deaths = (
process_covid_csv_data(data_covid)
)
print(f"""{last_7_days_cases = :,} (expected 240,299)\n
{current_hospital_cases = :,} (expeced 7,019)\n
{total_deaths = :,} (expected 141,544)""")
| 3.53125 | 4 |
main.py | LukaszLapaj/pi-hole-lists-backup | 0 | 12787727 | <reponame>LukaszLapaj/pi-hole-lists-backup<gh_stars>0
import json
import os
from urllib import request
from urllib.parse import urlparse
def main():
with open('adlist.json') as json_file:
adlist = json.load(json_file)
for item in adlist:
url_path = urlparse(item["address"]).path
filename = url_path.replace('/', '', 1).replace('/', '-')
try:
with request.urlopen(item["address"]) as response, open("backups/" + filename, 'wb') as out_file:
blacklist = response.read()
out_file.write(blacklist)
except:
print(filename + " error")
os.system("./commit.sh")
exit(0)
if __name__ == "__main__":
main()
| 2.71875 | 3 |
mud/events/inspect.py | erwanaubry/alamud_IUT_Escape | 0 | 12787728 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 <NAME>, IUT d'Orléans
#==============================================================================
from .event import Event2
class InspectEvent(Event2):
NAME = "look"
def get_event_templates(self):
return self.object.get_event_templates()
def perform(self):
if not self.actor.can_see():
return self.failed_cannot_see()
self.buffer_clear()
self.buffer_inform("look.actor", object=self.object)
if not self.object.is_container() or self.object.has_prop("closed"):
return self.actor.send_result(self.buffer_get())
players = []
objects = []
for x in self.object.contents():
if x is self.actor:
pass
elif x.is_player():
players.append(x)
else:
objects.append(x)
if players or objects:
self.buffer_inform("look.inside.intro")
self.buffer_append("<ul>")
for x in players:
self.buffer_peek(x)
for x in objects:
self.buffer_peek(x)
self.buffer_append("</ul>")
else:
self.buffer_inform("look.inside.empty")
self.actor.send_result(self.buffer_get())
def failed_cannot_see(self):
self.fail()
self.buffer_clear()
self.buffer_inform("look.failed")
self.actor.send_result(self.buffer_get())
| 2.609375 | 3 |
SensorTile/STM32CubeFunctionPack_SENSING1_V4.0.2/Utilities/AI_Ressources/TFlite/ASC/asc_keras_to_tflite_full_int8.py | MahendraSondagar/STMicroelectronics | 0 | 12787729 | #!/usr/bin/env python
# coding: utf-8
# This software component is licensed by ST under BSD 3-Clause license,
# the "License"; You may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
# https://opensource.org/licenses/BSD-3-Clause
"""
Optimze Full int8 - with reference dataset
Fully quantized model tflite ASC - TF 1.14.0ASC 3CL Training script from Pre calculated features.
"""
import numpy as np
import tensorflow as tf
# load ASC training Set as representative quantization dataset (100 samples)
# reduced 'dummy' data set is provided , a full representative one should be provided instead
x_train_dataset = np.load('Asc_quant_representative_data_dummy.npz')
x_train = x_train_dataset['x_train']
ASC_SHAPE = (30, 32, 1)
N_CLASSES = 3
def representative_dataset_gen():
for i in range(len(x_train)):
# Get sample input data as a numpy array in a method of your choosing.
yield [x_train[i].reshape((-1, ) + ASC_SHAPE)]
converter = tf.lite.TFLiteConverter.from_keras_model_file("Session_keras_mod_93_Model.h5" )
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset_gen
converter.target_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
print("\nConverting the model...", flush=True)
tflite_model = converter.convert()
open('asc_keras_mod_93_to_tflite_int8_xtrain.tflite','wb').write(tflite_model) | 2.171875 | 2 |
L1TriggerConfig/L1TConfigProducers/python/L1TMuonEndcapParamsOnline_cfi.py | pasmuss/cmssw | 0 | 12787730 | <reponame>pasmuss/cmssw<gh_stars>0
import FWCore.ParameterSet.Config as cms
#from L1Trigger.L1TMuonEndCap.fakeEmtfParams_cff import *
from CondCore.CondDB.CondDB_cfi import CondDB
CondDB.connect = cms.string('oracle://cms_orcon_prod/CMS_CONDITIONS')
l1emtfparProtodb = cms.ESSource("PoolDBESSource",
CondDB,
toGet = cms.VPSet(
cms.PSet(
record = cms.string('L1TMuonEndcapParamsRcd'),
tag = cms.string('L1TMuonEndCapParamsPrototype_Stage2v0_hlt')
)
)
)
L1TMuonEndcapParamsOnlineProd = cms.ESProducer("L1TMuonEndcapParamsOnlineProd",
onlineAuthentication = cms.string('.'),
forceGeneration = cms.bool(False),
onlineDB = cms.string('oracle://CMS_OMDS_LB/CMS_TRG_R')
)
| 1.390625 | 1 |
django_workflow_system/migrations/0005_auto_20210720_0834.py | eikonomega/django-workflow-system | 2 | 12787731 | # Generated by Django 3.1.8 on 2021-07-20 13:34
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('django_workflow_system', '0004_auto_20210701_0910'),
]
operations = [
migrations.CreateModel(
name='WorkflowCollectionDependency',
fields=[
('created_date', models.DateTimeField(auto_now_add=True)),
('modified_date', models.DateTimeField(auto_now=True)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('source', models.ForeignKey(help_text='The collection for which we want to create a dependency.', on_delete=django.db.models.deletion.PROTECT, related_name='source_workflow_collection', to='django_workflow_system.workflowcollection')),
('target', models.ForeignKey(help_text="The collection which we want to require be completed before the user can create engagements for the 'source' collection.", on_delete=django.db.models.deletion.PROTECT, related_name='target_workflow_collection', to='django_workflow_system.workflowcollection')),
],
options={
'verbose_name_plural': 'Workflow Collection Dependencies',
'db_table': 'workflow_collection_dependency',
'unique_together': {('source', 'target')},
},
),
migrations.AddField(
model_name='workflowcollection',
name='collection_dependencies',
field=models.ManyToManyField(blank=True, help_text='Specify which collections a user must complete before accessing this Collection.', through='django_workflow_system.WorkflowCollectionDependency', to='django_workflow_system.WorkflowCollection'),
),
]
| 1.828125 | 2 |
dtamg_py/extract.py | transparencia-mg/dtamg-py | 0 | 12787732 | <reponame>transparencia-mg/dtamg-py
import click
from dtamg_py.utils import extract
@click.command(name='extract')
@click.option('--resource', '-r', required=True,
help="Recurso a ser extraído")
def extract_cli(resource):
"""
Função responsável pela extração dos dados de tabela específica no banco Mysql.
"""
extract(resource)
| 2.046875 | 2 |
blog/models.py | Vardancer/django_blog | 0 | 12787733 | <reponame>Vardancer/django_blog
from django.contrib.auth.models import User
from django.db import models
from django.urls import reverse
# Create your models here.
class Article(models.Model):
author = models.ForeignKey(User, on_delete=models.SET(0))
title = models.CharField(max_length=100, help_text="Title")
text = models.TextField(help_text="A blog body")
active = models.BooleanField(verbose_name="is_published")
date_add = models.DateField(auto_now_add=True)
date_change = models.DateTimeField(auto_now=True)
def __str__(self):
return "{} -- {}".format(self.title, self.author.last_name)
class Comment(models.Model):
user = models.ForeignKey(User, on_delete=models.SET(0))
comment = models.CharField(max_length=200, default=None)
article = models.ForeignKey(Article, on_delete=models.CASCADE)
date_add = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "{} -- {}".format(self.article.title, self.date_add)
# class Category(models.Model):
# name = models.CharField(max_length=200)
class Survey(models.Model):
description = models.CharField(max_length=150)
# is_published = models.BooleanField(default=False)
# need_logged_user = models.BooleanField(default=True)
# category = models.ForeignKey(Category, on_delete=models.SET_NULL, null=True, related_name='categories')
def __str__(self):
return self.description
def get_absolute_url(self):
return reverse("survey-detail", kwargs={"id": self.pk})
class Questions(models.Model):
survey = models.ForeignKey(Survey, on_delete=models.CASCADE, related_name='survey')
question = models.CharField(max_length=255)
choices = models.CharField(default="yes,no,maybe", max_length=200)
def get_choices(self):
choices = self.choices.split(',')
c_list = []
for c in choices:
c = c.strip()
c_list.append((c, c))
choices_tuple = tuple(c_list)
return choices_tuple
def __str__(self):
return self.question
class Answers(models.Model):
question = models.ForeignKey(Questions, on_delete=models.CASCADE)
survey = models.ForeignKey(Survey, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, related_name='users')
answer = models.CharField(max_length=20)
def __str__(self):
return self.question
| 2.375 | 2 |
src/pipeline/preprocessing/paths.py | guyfreund/data_drift_detection | 0 | 12787734 | <reponame>guyfreund/data_drift_detection
import os
# Encoding Mapping
BANK_MARKETING_LABEL_ENCODER_PATH_DEPLOYMENT = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BanMarketingLabelEncoding.npy"))
GERMAN_CREDIT_LABEL_ENCODER_PATH_DEPLOYMENT = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditLabelEncoding.npy"))
# Encoding Mapping
BANK_MARKETING_LABEL_ENCODER_PATH_TRAINING = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BanMarketingLabelEncoding.npy"))
GERMAN_CREDIT_LABEL_ENCODER_PATH_TRAINING = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditLabelEncoding.npy"))
| 2.015625 | 2 |
dag_gettsim/main.py | janosg/dag_gettsim | 3 | 12787735 | <gh_stars>1-10
import inspect
from functools import partial
from inspect import getfullargspec, getmembers
import networkx as nx
import pandas as pd
from dag_gettsim import aggregation, benefits, taxes
def tax_transfer(
baseline_date, data, functions=None, params=None, targets="all", return_dag=False
):
"""Simulate a tax and tranfers system specified in model_spec.
Args:
baseline_date (str): A date, e.g. '2019-01-01'. This is used to select a set of
baseline parameters (and in the future baseline functions).
data (dict): User provided dataset as dictionary of Series.
functions (dict): Dictionary with user provided functions. The keys are the
names of the function. The values are either callables or strings with
absolute or relative import paths to a function. If functions have the
same name as an existing gettsim function they override that function.
params (dict): A pandas Series or dictionary with user provided parameters.
Currently just mapping a parameter name to a parameter value, in the
future we will need more metadata. If parameters have the same name as
an existing parameter from the gettsim parameters database at the
specified date they override that parameter.
targets (list): List of strings with names of functions whose output is actually
needed by the user. By default, all results are returned.
Returns:
dict: Dictionary of Series containing the target quantities.
"""
user_params = {} if params is None else params
user_functions = {} if functions is None else functions
params = get_params(baseline_date, user_params)
func_dict = create_function_dict(user_functions=user_functions, params=params)
dag = create_dag(func_dict)
if targets != "all":
dag = prune_dag(dag, targets)
results = execute_dag(func_dict, dag, data, targets)
if return_dag:
results = (results, dag)
return results
def get_params(baseline_date, user_params):
"""Combine baseline parameters and user parameters.
Currently this just generates baseline independent parameters for the toy model.
In the long run it will load a database, query the baseline parameters and update
or extend it with user parameters.
Args:
baseline_date (str): A date, e.g. '2019-01-01'
user_params (dict or pd.Series): User provided parameters that override or
extend the baseline parameters.
Returns:
pd.Series
"""
params = {
"income_tax": 0.2,
"wealth_tax": 0.9, # 90 % wealth tax is just to make Max happy ;-)
"benefit_per_child": 2000,
"benefit_cutoff": 30000,
}
if isinstance(user_params, pd.Series):
user_params = user_params.to_dict()
params.update(user_params)
return pd.Series(params)
def create_function_dict(user_functions, params):
"""Create a dictionary of all functions that will appear in the DAG.
Args:
user_functions (dict): Dictionary with user provided functions. The keys are the
names of the function. The values are either callables or strings with
absolute or relative import paths to a function.
Returns:
dict: Dictionary mapping function names to callables.
"""
func_dict = {}
for module in taxes, benefits, aggregation:
func_dict.update(dict(getmembers(module, inspect.isfunction)))
func_dict.update(user_functions)
partialed = {name: partial(func, params=params) for name, func in func_dict.items()}
return partialed
def create_dag(func_dict):
"""Create a directed acyclic graph (DAG) capturing dependencies between functions.
Args:
func_dict (dict): Maps function names to functions.
Returns:
dict: The DAG, represented as a dictionary of lists that maps function names
to a list of its data dependencies.
"""
dag_dict = {name: getfullargspec(func).args for name, func in func_dict.items()}
return nx.DiGraph(dag_dict).reverse()
def prune_dag(dag, targets):
"""Prune the dag.
Args:
dag (nx.DiGraph): The unpruned DAG.
targets (list): Variables of interest.
Returns:
dag (nx.DiGraph): Pruned DAG.
"""
# Go through the DAG from the targets to the bottom and collect all visited nodes.
visited_nodes = set(targets)
visited_nodes_changed = True
while visited_nodes_changed:
n_visited_nodes = len(visited_nodes)
for node in visited_nodes:
visited_nodes = visited_nodes.union(nx.ancestors(dag, node))
visited_nodes_changed = n_visited_nodes != len(visited_nodes)
# Redundant nodes are nodes not visited going from the targets through the graph.
all_nodes = set(dag.nodes)
redundant_nodes = all_nodes - visited_nodes
dag.remove_nodes_from(redundant_nodes)
return dag
def execute_dag(func_dict, dag, data, targets):
"""Naive serial scheduler for our tasks.
We will probably use some existing scheduler instead. Interesting sources are:
- https://ipython.org/ipython-doc/3/parallel/dag_dependencies.html
- https://docs.dask.org/en/latest/graphs.html
The main reason for writing an own implementation is to explore how difficult it
would to avoid dask as a dependency.
Args:
func_dict (dict): Maps function names to functions.
dag (nx.DiGraph)
data (dict):
targets (list):
Returns:
dict: Dictionary of pd.Series with the results.
"""
# Needed for garbage collection.
visited_nodes = set()
results = data.copy()
for task in nx.topological_sort(dag):
if task not in results:
if task in func_dict:
kwargs = _dict_subset(results, dag.predecessors(task))
results[task] = func_dict[task](**kwargs)
else:
raise KeyError(f"Missing variable or function: {task}")
visited_nodes.add(task)
if targets != "all":
results = collect_garbage(results, task, visited_nodes, targets, dag)
return results
def _dict_subset(dictionary, keys):
return {k: dictionary[k] for k in keys}
def collect_garbage(results, task, visited_nodes, targets, dag):
"""Remove data which is no longer necessary.
If all descendants of a node have been evaluated, the information in the node
becomes redundant and can be removed to save memory.
Args:
results (dict)
task (str)
visited_nodes (set)
dag (nx.DiGraph)
Returns:
results (dict)
"""
ancestors_of_task = nx.ancestors(dag, task)
for node in ancestors_of_task:
is_obsolete = all(
descendant in visited_nodes for descendant in nx.descendants(dag, node)
)
if is_obsolete and task not in targets:
del results[node]
return results
| 2.828125 | 3 |
main.py | OverLoardPro/agency-survey | 0 | 12787736 | import os
import time
breakout=False
crimeseverity=False
crimesevereaction=False
# variables =
# text
# gender
# name
# age
# height
# drunk
print ("Welcome to the test, Citizen.")
time.sleep(1)
print("Today you are applying for a job at the Agency.")
time.sleep(1)
print("By participating in this test, you agree to the terms and conditions.")
time.sleep(1)
print("Please type 'I agree.' if you agree to the terms above. ")
while True:
try:
text = (input("Your Answer: "))
if text == ("I agree."):
print("Then we may begin.")
time.sleep(1)
break;
elif text == ("I agree"):
print("Then let's begin.")
time.sleep(1)
break;
else:
print("Please follow instructions.")
except ValueError:
print("Answer properly!")
continue
os.system("clear")
print("You will fill out a survey for us.")
time.sleep(1)
print("It will be reviewed.")
time.sleep(1)
print("Please answer honestly.")
time.sleep(1)
name = (input('What is your name, Citizen? '))
print('Hello, %s.' % name)
time.sleep(1)
print ('How old are you?')
while True:
try:
age = int(input("Your answer: "))
if age <= 40 and age >=18:
break;
elif age > 40:
print ("You are too old to enlist!")
elif age < 18:
print("You are too young to enlist!")
except ValueError:
print("Please input your age.")
continue
print("Okay.")
time.sleep(0.9)
os.system("clear")
time.sleep(1)
print("Are you a male or female?")
while True:
try:
gender = (input("Answer honestly: "))
if gender == "Male":
print("So you are a man?")
time.sleep(1)
break;
elif gender == "Female":
print("So you are a woman?")
time.sleep(1)
break;
elif gender == "female":
print("So you are a woman?")
time.sleep(1)
break;
elif gender == "male":
print("So you are a man?")
time.sleep(1)
break;
else:
print("Please input your gender.")
except ValueError:
print("Please answer correctly.")
continue
print("I see.")
time.sleep(1)
os.system("clear")
print("Have you drunk before?")
while True:
try:
drunk = (input('Answer : '))
if drunk == "Y":
print ("So you have drunk before?")
break;
elif drunk == "N":
print ("So you have not drunk before?")
break;
else:
print("Answer with Y/N")
except ValueError:
print("Answer with Y/N")
continue
time.sleep(1)
os.system("clear")
print("Do you have any experience with firearms?")
while True:
try:
experience = (input("Answer : "))
if experience == ("Y"):
print("So you have shot a gun before?")
break;
elif experience == ("N"):
print("So you have not shot a gun before?")
break;
else:
print("Please answer with [Y/N]")
except ValueError:
print("Please answer with [Y/N]")
continue
time.sleep(1)
print("Very well.")
time.sleep(1)
os.system("clear")
print("How tall are you?")
while True:
try:
height = int(input('cm : '))
if height >= 165 and height <=195:
break;
elif height < 165:
print("You are too short!")
elif height > 195:
print("You are too tall!")
else:
print("Please enter your height in cm.")
except ValueError:
print ("Please enter your height in cm.")
continue
print ("You are %d cm tall?" % height)
time.sleep(1)
print ("Very well.")
time.sleep(1)
os.system("clear")
print ("Have you commited any crimes?")
while True:
try:
crime = (input("Answer : "))
if crime == "N":
print("So you have not comitted any crimes?")
time.sleep(1)
break;
elif crime == "Y":
print("Was it a severe, or minor one?")
while True:
try:
severity = (input("Answer : "))
if severity == "Minor":
crimeseverity=True
print("Very well then.")
time.sleep(1)
breakout=True
break;
elif severity == "Severe":
crimeseverity=True
crimesevereaction=True
print("So, you have comitted a severe crime?")
time.sleep(1)
print("Like what?")
time.sleep(1)
os.system("clear")
print("1. Homicide")
print("2. Extortion")
print("3. Blackmail")
print("4. Use of drugs")
print("5. Rape")
print("6. Other")
while True:
try:
actions = (input("Answer : "))
if actions == "1":
print ("So you have killed someone before?")
time.sleep(1)
print ("That's okay, we do that alot here.")
time.sleep(1)
breakout=True
break
elif actions == "2":
print("So you have extorted someone before?")
time.sleep(1)
print("Do not be ashamed, we do that alot here.")
time.sleep(1)
breakout=True
break
elif actions == "3":
print("So you have blackmailed people before?")
time.sleep(1)
print("We do that alot here, do not be ashamed.")
time.sleep(1)
breakout=True
break
elif actions == "4":
print("You have consumed illegal drugs?")
time.sleep(1)
print("I guess it is okay, as long as you do not do it here.")
time.sleep(1)
breakout=True
break
elif actions == "5":
print("Rape? uh. We will note that down.")
time.sleep(1)
print("Very well.")
time.sleep(1)
breakout=True
break
elif actions == "6":
print("Very well.")
time.sleep(1)
breakout=True
break
else:
print("Answer the question with (1,2,3,4,5,6)")
except ValueError:
print("Answer the question with (1,2,3,4,5,6)")
continue
if breakout:
break
else:
print("Answer with [Severe\Minor]")
except ValueError:
print("Answer with [Severe\Minor]")
continue
if breakout:
break
else:
print("Answer with Y/N")
except ValueError:
print("Answer with Y/N")
continue
if breakout:
break
os.system("clear")
time.sleep(1)
print ("Now.")
time.sleep(1)
print("%s" % name)
time.sleep(1)
print("Here is a summary of all your answers.")
time.sleep(1)
os.system("clear")
print("Name : %s" % name)
time.sleep(0.5)
print("Gender : %s" % gender)
time.sleep(0.5)
print("Age : %s" % age)
time.sleep(0.5)
print("Height : %s cm" % height)
time.sleep(0.5)
print("Experience with firearms? : %s" % experience)
time.sleep(0.5)
print("Alcohol before? : %s" % drunk)
time.sleep(0.5)
print("Crime before? : %s" % crime)
time.sleep(0.5)
if crimeseverity is True:
print("Crime severity : %s" % severity)
time.sleep(0.5)
if crimesevereaction is True:
if actions == "1":
print("Type : Homicide")
time.sleep(0.5)
elif actions == "2":
time.sleep(0.5)
print("Type : Extortion")
elif actions == "3":
print("Type : Blackmail")
time.sleep(0.5)
elif actions == "4":
print("Type : Illegal substances")
time.sleep(0.5)
elif actions == "5":
print("Type : Rape")
time.sleep(1)
elif actions == "6":
print("Type : Other")
time.sleep(0.5)
print("Summary Evaluation")
time.sleep(1.5)
os.system("clear")
loop = 0
while loop <=2:
loop = loop + 1
print("Evaluating results")
time.sleep(0.5)
os.system("clear")
print("Evaluating results.")
time.sleep(0.5)
os.system("clear")
print("Evaluating results..")
time.sleep(0.5)
os.system("clear")
print("Evaluating results...")
time.sleep(0.5)
os.system("clear")
score = 1
#EVALUATION
if height >170:
score = score + 1
elif height <170:
score = score - 1
if drunk == "Y":
score = score - 1
elif drunk == "N":
score = score + 1
if experience == "Y":
score = score + 1
elif experience == "N":
score = score + 0
if crime == "N":
score = score + 1
elif crime == "Y":
if severity == "Minor":
score = score + 0
elif severity == "Severe":
score = score - 1
if actions == "1":
score = score + 1
elif actions == "2":
score = score + 1
elif actions == "3":
score = score + 1
elif actions == "4":
score = score + 0
elif actions == "5":
score = score + 0
elif actions == "6":
score = score + 0
if score >=3: #pass
print ('We have come back to tell you.')
time.sleep(1)
print ('That you have passed the test!')
time.sleep(1)
print ('Your final score was %d' % score)
time.sleep(1)
print ('For more info regarding this application')
time.sleep(1)
print ('Please visit bit.ly/agencysummary')
elif score <=2: #nopass
print ("We regret to inform you.")
time.sleep(1)
print("That you have failed the test.")
time.sleep(1)
print("Please re-evaluate your ways and try again.")
| 4.0625 | 4 |
AspectSelection.py | ChenliangLi205/HeterogeneousEmbedding | 1 | 12787737 | # -*- coding: utf-8 -*-
import networkx as nx
import itertools
def is_subset(node_types):
"""Judge if the given aspect is a subset of the Selected ones"""
global Selected_Aspects
nt_set = set(node_types)
for sa in Selected_Aspects:
if nt_set.issubset(sa):
return True
return False
def is_rational(type_graph):
"""The rationality of the given aspect is determined by its connectivity"""
return nx.is_connected(type_graph)
def center_nodes(type_graph):
"""Return the center node types of an aspect"""
centers = []
for node in type_graph.nodes():
if len([n for n in type_graph[node]]) > 1:
centers.append(node)
return centers
def Incompatibility(graph, node_types, edge_types, center_types):
"""Calculate Incompatitance for the given aspect
Each bloody aspect is determined by its node types"""
center_nodes_dict = {}
for c_type in center_types:
center_nodes_dict[c_type] = []
for node in graph.nodes():
if node[0] in center_nodes_dict.keys():
center_nodes_dict[node[0]].append(node)
inc = 0.
num_nonzero = 0
for c_type, node_list in center_nodes_dict.items():
accessable_nodetypes = extract_accessable_edgetypes(c_type, node_types, edge_types)
count = 0
total = len(node_list)
for u in node_list:
if count % 1000 == 0:
print('{} / {}'.format(count, total))
inc_u, nonzero = Inc_score(graph, u, node_list, accessable_nodetypes)
inc += inc_u
num_nonzero += nonzero
count += 1
return inc / num_nonzero
def extract_accessable_edgetypes(c, node_types, edge_types):
a_types = []
for e_t in edge_types:
if c == e_t[0] and e_t[-1] in node_types:
a_types.append(e_t[-1])
continue
if c == e_t[-1] and e_t[0] in node_types:
a_types.append(e_t[0])
return a_types
def Inc_score(graph, u, node_list, accessable):
"""Calculate gamma(u) for a single node u"""
numerator = 0.
denominator = 0.
for v in node_list:
if u == v:
continue
# compute the reachability through all accessable edge types
reachability = Num_Cn(graph, u, v, accessable)
numerator += max(reachability)
denominator += min(reachability)
if -0.1 <= denominator <= 0.1:
return 0, 0
else:
return numerator / denominator - 1, 1
def Num_Cn(graph, u, v, accessable):
neighbors_u = set([n for n in graph[u]])
neighbors_v = set([n for n in graph[v]])
cn = neighbors_u & neighbors_v
count = [0] * len(accessable)
for n in cn:
assert n[0] in accessable
count[accessable.index(n[0])] += 1
return count
# node types : ['A', 'P', 'P', 'V'], P appears multiple times because the P-P edge type
# edge types : ['A-P', 'P-P', 'P-V', ...]
def Select_Aspect(graph, node_types, edge_types, threshold):
"""Se个粑粑"""
global Selected_Aspects
if is_subset(node_types):
return
type_graph = nx.Graph()
for et in edge_types:
if et[0] in node_types and et[-1] in node_types:
type_graph.add_edge(et[0], et[-1])
if is_rational(type_graph):
# whether it is a valid aspect
center_types = center_nodes(type_graph)
Inc = Incompatibility(graph, node_types, edge_types, center_types)
if Inc > threshold:
Selected_Aspects.append(node_types)
return
if len(node_types) <= 3:
# It takes at least 3 node types to make an aspect
return
else:
for c in itertools.combinations(node_types, len(node_types)-1):
Select_Aspect(graph, list(c), edge_types, threshold)
def show_Inc_aspects(graph, node_types, edge_types, aspects):
for a in aspects:
type_graph = nx.Graph()
for et in edge_types:
if et[0] in a and et[-1] in a:
type_graph.add_edge(et[0], et[-1])
center_types = center_nodes(type_graph)
print(Incompatibility(graph, node_types, edge_types, center_types))
if __name__ == '__main__':
datasets = ['dblp/']
using = datasets[0]
graph = nx.read_edgelist(
'data/' + using + 'graph.edgelist', delimiter=',',
create_using=nx.Graph(), nodetype=str, data=False
)
Select_Aspect(
graph=graph,
node_types=['A', 'P', 'P', 'V'],
edge_types=['A-P', 'P-V', 'P-P'],
threshold=1.
)
| 3.046875 | 3 |
ambari-server/src/main/resources/stacks/HDP/3.0/services/SMARTSENSE/package/scripts/hst_service.py | niuchp/ambari-2.7.3 | 0 | 12787738 | '''
Copyright (c) 2011-2018, Hortonworks Inc. All rights reserved.
Except as expressly permitted in a written agreement between you
or your company and Hortonworks, Inc, any use, reproduction,
modification,
redistribution, sharing, lending or other exploitation
of all or any part of the contents of this file is strictly prohibited.
'''
from resource_management import *
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def hst_service(action='start'):
import params
if action == 'start':
daemon_cmd = "hst start"
no_op_test = format("ls {hst_pid_file} >/dev/null 2>&1 && ps -p `cat {zk_pid_file}` >/dev/null 2>&1")
Execute(daemon_cmd, not_if=no_op_test)
elif action == 'stop':
daemon_cmd = "hst stop"
rm_pid = format("rm -f {hst_pid_file}")
Execute(daemon_cmd)
Execute(rm_pid)
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def hst_service(action='start'):
import params
if action == 'start':
Service(params.hst_win_service_name, action="start")
elif action == 'stop':
Service(params.hst_win_service_name, action="stop")
| 1.382813 | 1 |
job_executor/exceptions.py | seamless-io/seamless-web | 1 | 12787739 | <filename>job_executor/exceptions.py
class ExecutorBuildException(Exception):
PREFIX = "[SEAMLESS BUILD ERROR]"
def __init__(self, message, *args, **kwargs):
message = f"{self.PREFIX} {message}"
super().__init__(message, *args, **kwargs)
| 2.0625 | 2 |
test/test_importing.py | linyc74/qiime2_pipeline | 0 | 12787740 | <filename>test/test_importing.py
from .setup import TestCase
from qiime2_pipeline.importing import ImportFeatureTable, ImportFeatureSequence, ImportTaxonomy
from qiime2_pipeline.exporting import ExportFeatureTable, ExportFeatureSequence
class TestImportFeatureTable(TestCase):
def setUp(self):
self.set_up(py_path=__file__)
def tearDown(self):
self.tear_down()
def test_main(self):
actual = ImportFeatureTable(self.settings).main(
feature_table_tsv=f'{self.indir}/feature-table.tsv')
expected = f'{self.workdir}/feature-table.qza'
self.assertFileExists(expected, actual)
def test_data_integrity(self):
qza = ImportFeatureTable(self.settings).main(
feature_table_tsv=f'{self.indir}/feature-table.tsv')
actual = ExportFeatureTable(self.settings).main(
feature_table_qza=qza)
expected = f'{self.indir}/feature-table-from-qza.tsv'
self.assertFileEqual(expected, actual)
class TestImportFeatureSequence(TestCase):
def setUp(self):
self.set_up(py_path=__file__)
def tearDown(self):
self.tear_down()
def test_main(self):
actual = ImportFeatureSequence(self.settings).main(
feature_sequence_fa=f'{self.indir}/feature-sequence.fa')
expected = f'{self.workdir}/feature-sequence.qza'
self.assertFileExists(expected, actual)
def test_data_integrity(self):
input_fa = f'{self.indir}/feature-sequence.fa'
qza = ImportFeatureSequence(self.settings).main(feature_sequence_fa=input_fa)
output_fa = ExportFeatureSequence(self.settings).main(feature_sequence_qza=qza)
self.assertFileEqual(input_fa, output_fa)
class TestImportTaxonomy(TestCase):
def setUp(self):
self.set_up(py_path=__file__)
def tearDown(self):
self.tear_down()
def test_main(self):
actual = ImportTaxonomy(self.settings).main(
taxonomy_tsv=f'{self.indir}/taxonomy.tsv')
expected = f'{self.workdir}/taxonomy.qza'
self.assertFileExists(expected, actual)
| 2.296875 | 2 |
floret_qvec/scripts/export_comparable_vectors.py | adrianeboyd/fasttext-bloom-demo-projects | 0 | 12787741 | import spacy
import typer
from pathlib import Path
def main(
input_vectors: Path, input_model: Path, input_oracle: Path, output_vectors: Path
):
nlp = spacy.load(input_model)
vectors = {}
with open(input_vectors) as fileh:
for line in fileh.readlines():
parts = line.strip().split()
vectors[parts[0]] = " ".join(parts[1:])
with open(input_oracle) as fileh:
lines = fileh.readlines()
words = [line.split()[0] for line in lines]
for word in words:
if word not in vectors:
vectors[word] = " ".join(str(v) for v in nlp.vocab[word].vector)
with open(output_vectors, "w") as fileh:
for word in sorted(vectors.keys()):
fileh.write(word + " " + vectors[word] + "\n")
if __name__ == "__main__":
typer.run(main)
| 2.9375 | 3 |
tests/test_flows.py | danielward27/flowjax | 0 | 12787742 | from flowjax.flows import Flow, RealNVPFlow, NeuralSplineFlow
from flowjax.bijections.utils import Permute
import jax.numpy as jnp
from jax import random
import pytest
def test_Flow():
key = random.PRNGKey(0)
bijection = Permute(jnp.array([2, 1, 0]))
dim = 3
flow = Flow(bijection, dim)
x = flow.sample(key, n=1)
assert x.shape == (1, dim)
x = flow.sample(random.PRNGKey(0), n=2)
assert x.shape == (2, dim)
# Note condition is ignored for transformation (but can be used to infer sample size)
x = flow.sample(key, condition=jnp.zeros((0,)), n=5)
assert x.shape == (5, dim)
x = flow.sample(key, condition=jnp.zeros((5, 0)))
assert x.shape == (5, dim)
with pytest.raises(AssertionError):
flow.sample(key, condition=jnp.zeros((5, 0)), n=3)
with pytest.raises(AssertionError):
flow.sample(key, condition=jnp.zeros((0,)))
# Test log prob work for vector and matrices input too
x1, x2 = x[0], x[None, 0]
lp1, lp2 = [flow.log_prob(x).item() for x in (x1, x2)]
assert lp1 == pytest.approx(lp2)
def test_broadcast():
# Matrices
size_pairs = [((5,2), (5,3)), ((1,2), (5,3)), ((5,2), (1,3)), ((2,), (5,3)), ((5,2), (3,))]
out_sizes = [((5,2), (5,3))] * len(size_pairs)
for in_s, out_s in zip(size_pairs, out_sizes):
a,b = Flow._broadcast(jnp.ones(in_s[0]), jnp.ones(in_s[1]))
assert (a.shape, b.shape) == out_s
def test_NeuralSplineFlow():
# Unconditional
n = 10
dim = 3
key = random.PRNGKey(2)
flow = NeuralSplineFlow(key, dim, num_layers=2)
x = flow.sample(key, n=n)
assert x.shape == (n, dim)
lp = flow.log_prob(x)
assert lp.shape == (n,)
# Conditional
cond_dim = 2
flow = NeuralSplineFlow(key, dim, condition_dim=cond_dim, num_layers=2)
cond = random.uniform(key, (n, cond_dim))
x = flow.sample(key, condition=cond)
lp = flow.log_prob(x, cond)
assert lp.shape == (n,)
lp = flow.log_prob(x, jnp.ones(cond_dim))
assert lp.shape == (n,)
lp = flow.log_prob(jnp.ones(dim), cond)
assert lp.shape == (n,)
x = flow.sample(key, condition=jnp.ones(2), n=n)
assert x.shape == (n, dim)
def test_RealNVPFlow():
key = random.PRNGKey(1)
flow = RealNVPFlow(key, 3)
x = flow.sample(key, n=10)
assert x.shape == (10, 3)
lp = flow.log_prob(x)
assert lp.shape == (10,) | 1.976563 | 2 |
src/1 mac_changer/macchanger-1.py | raminjafary/ethical-hacking | 0 | 12787743 | #!/usr/bin/python
import subprocess
subprocess.call("ifconfig enp2s0 down",shell=True)
subprocess.call("ifconfig enp2s0 hw ether 00:11:22:33:44:55",shell=True)
subprocess.call("ifconfig enp2s0 up",shell=True) | 2.375 | 2 |
preprocessing/get_input_pdb.py | sysu-yanglab/GRASP | 4 | 12787744 | <gh_stars>1-10
#!/usr/bin/python
'''
Usege:
python3 get_input_ph.py --input_fasta='./raw_data/NMR_X-Ray/new75.fa'
--input_ss='./raw_data/NMR_X-Ray/pdball.SS'
--window_size=37
--dataset='pdb'
input: sequences with score
output: top5 and last5 fragments(encode) from each sequence
'''
import os
import argparse
import pandas as pd
import csv
def rd_normal(dat_dir):
with open(dat_dir) as file_obj:
dat_list = file_obj.readlines()
return dat_list
def rd_fasta(dat_dir):
with open(dat_dir) as file_obj:
dat_list = file_obj.readlines()
dat_list2 = []
for i in range(0,len(dat_list),2):
dat_list2.append([dat_list[i],dat_list[i+1]])
return dat_list2
def rd_score(dat_dir):
with open(dat_dir) as file_obj:
#dat_list = file_obj.readlines()
dat_list2 = []
for lines in file_obj:
dat_list2.append(lines.split())
return dat_list2
def rd_fasta_name(dat_dir):
with open(dat_dir) as file_obj:
dat_list = file_obj.readlines()
dat_list2 = []
for i in range(0,len(dat_list),2):
dat_list2.append(dat_list[i][1:-1])
return dat_list2
def get_fasta_name(list):
name_list =[]
for i in range(0,len(list)):
name_list.append(list[i][0][1:-2])
return name_list
def get_score_name(list):
name_list =[]
for i in range(len(list)):
name_list.append(list[i][0])
return name_list
def search_index(gene_name,list):
try:
gene_index = list.index(gene_name)
except ValueError:
return None
else:
return gene_index
def find_topK_index(your_list,k=5,top='max'):
if k > len(your_list)-1:
print("k is larger than length of list")
else:
import numpy as np
arr = np.array(your_list)
topK_index=[]
index_seq = np.argsort(arr)
if top=='max':
if abs(arr[index_seq[-1]])==0.0:
return None
else:
i=0
if abs(arr[index_seq[len(arr)-k]])== 0.0:
while abs(arr[index_seq[len(arr)-k+i]])== 0.0:
i +=1
topK_index = index_seq[len(arr)-k+i:]
else:
while(i>=0):
if arr[index_seq[len(arr)-k-(i+1)]] ==arr[index_seq[len(arr)-k-i]]:
i +=1
else:
topK_index = index_seq[len(arr)-k-i:]
break
if top=='min':
if abs(arr[index_seq[0]])==0.0:
return None
else:
i=0
if abs(arr[index_seq[k-1]])== 0.0:
while abs(arr[index_seq[k-1-i]])== 0.0:
i +=1
topK_index = index_seq[:(k-1-i+1)]
else:
while(i>=0):
if arr[index_seq[k+i]] ==arr[index_seq[(k-1)+i]]:
i +=1
else:
topK_index = index_seq[:(k+i)]
break
return list(topK_index)
def find_pair_index(your_list,top='pair'):
pair_index=list()
#index_seq = np.argsort(arr)
if top=='pair':
pair_index = [i for i in range(len(your_list)) if your_list[i] >0.0]
if top=='unpair':
pair_index = [i for i in range(len(your_list)) if your_list[i] == 0.0]
return list(pair_index)
def get_seq_lst(cenucle_idx_lst,frag_len,seq_lst):
side_len = int((frag_len-1)/2)
all_frag_seq = []
for idx in cenucle_idx_lst:
init_idx = idx-side_len
end_idx = idx+side_len
frag_seq = []
for i in range(frag_len):
frag_seq.append('N')
if init_idx<0 and end_idx<=(len(seq_lst)-1):
for i in range(end_idx+1):
frag_seq[abs(init_idx)+i]=seq_lst[i]
elif init_idx>=0 and end_idx>(len(seq_lst)-1):
for i in range(len(seq_lst)-init_idx):
frag_seq[i]=seq_lst[init_idx+i]
elif init_idx<0 and end_idx>(len(seq_lst)-1):
for i in range(len(seq_lst)):
frag_seq[abs(init_idx)+i]=seq_lst[i]
else:
for i in range(frag_len):
frag_seq[i]=seq_lst[init_idx+i]
all_frag_seq.append(frag_seq)
return all_frag_seq
dataset_type = {'py':'PARS_yeast', 'ph':'PARS_human', 'pdb':'NMR_X-Ray'}
def create_dir_if_not_exists(config):
dir_path = '../data/' + dataset_type[config.dataset]
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return dir_path
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input_fasta', type=str, default='./raw_data/NMR_X-Ray/new75.fa', help='FASTA format file, containing RNA sequences.')
parser.add_argument('--input_ss', type=str, default='./raw_data/NMR_X-Ray/pdball.SS', help='The RNA sequences structural profile.')
parser.add_argument('--window_size', type=int, default=37, help='The window size when truncating RNA sequences.')
parser.add_argument('--dataset', type=str, default='pdb', help='The type of dataset: PARS_human:ph, PARS_yeast:py or NMR_X-Ray:pdb')
config = parser.parse_args()
pdb_select_p=str(config.input_fasta) #new75.fa
pdb_all_p=str(config.input_ss) #pdball.SS
window_len=int(config.window_size) #37
output_dir = create_dir_if_not_exists(config)
save_nocode_p = output_dir+'/'+config.dataset+'_nocode_{}.csv'.format(window_len)#./py_nocode_{window_size}
save_encode_p = output_dir+'/'+config.dataset+'_encode_{}.csv'.format(window_len)#./py_encode_{window_size}
select_name = rd_fasta_name(pdb_select_p)
pdb_all_lst = rd_normal(pdb_all_p)
all_data_info =list()
all_data_info.append(['gene_name','whole_seq','whole_score','pos_frag','pos_ix','neg_frag','neg_ix'])
for i in range(0,len(pdb_all_lst),3):
tmp_name = pdb_all_lst[i][1:-1]
if tmp_name in select_name:
single_data_info = list()
single_data_info.append(pdb_all_lst[i][1:-1])
a_seq=pdb_all_lst[i+1]
a_score=pdb_all_lst[i+2]
single_data_info.append(a_seq)
single_data_info.append(a_score)
pos_seq_ix = find_pair_index(list(map(float, a_score.split())),top='pair')
pos_seq_lst = get_seq_lst(pos_seq_ix,window_len,seq_lst=list(a_seq[:-1]))
single_data_info.append(pos_seq_lst)
single_data_info.append(pos_seq_ix)
neg_seq_ix = find_pair_index(list(map(float, a_score.split())),top='unpair')
neg_seq_lst = get_seq_lst(neg_seq_ix,window_len,seq_lst=list(a_seq[:-1]))
single_data_info.append(neg_seq_lst)
single_data_info.append(neg_seq_ix)
all_data_info.append(single_data_info)
# print('get seq over')
input_data=[]
feature_lst=['gene_name']+['f'+str(i+1) for i in range(window_len)]+['label','position']
input_data.append(feature_lst)
for i in range(1,len(all_data_info)):
for j1 in range(len(all_data_info[i][3])):
single_input=[]
single_input.append(all_data_info[i][0])
for j1j in range(window_len):
single_input.append(all_data_info[i][3][j1][j1j])
single_input.append(1)
single_input.append(all_data_info[i][4][j1])
input_data.append(single_input)
for j2 in range(len(all_data_info[i][5])):
single_input=[]
single_input.append(all_data_info[i][0])
for j2j in range(window_len):
single_input.append(all_data_info[i][5][j2][j2j])
single_input.append(0)
single_input.append(all_data_info[i][6][j2])
input_data.append(single_input)
pd.DataFrame(input_data).to_csv(save_nocode_p,header=0,index=0)
#encode
PDB_input_data = pd.read_csv(save_nocode_p)
n_PDB_input_data=PDB_input_data.drop(['gene_name','position'],axis=1)
nucle_code = {'A':[1,0,0,0],'U':[0,1,0,0],'T':[0,1,0,0],'C':[0,0,1,0],'G':[0,0,0,1],'N':[0,0,0,0],'P':[0,0,0,0],'a':[1,0,0,0],'u':[0,1,0,0],'t':[0,1,0,0],'c':[0,0,1,0],'g':[0,0,0,1],'I':[0,0,0,0]}
frag_code_input =[]
code_title = []
for i in range(1,window_len+2):
if i<(window_len+1):
single_title = ['f'+str(i)+'_1','f'+str(i)+'_2','f'+str(i)+'_3','f'+str(i)+'_4']
code_title.extend(single_title)
else:
code_title.append('label')
for i in range(len(n_PDB_input_data)):
one_frag = n_PDB_input_data.iloc[i]
single_code=[]
for j in range(len(one_frag)-1):
single_code.extend(nucle_code[one_frag[j]])
single_code.append(one_frag[-1])
frag_code_input.append(single_code)
frag_code_input = pd.DataFrame(frag_code_input)
gene_name = PDB_input_data['gene_name']
full_sequences = (n_PDB_input_data.drop('label', axis=1)).apply(lambda x: x.sum(), axis=1)
frag_code_input.insert(0, 'sequences', full_sequences)
code_title.insert(0, 'sequences')
frag_code_input.columns = code_title
frag_code_input.index = gene_name.tolist()
frag_code_input.to_csv(save_encode_p,index=True) | 2.578125 | 3 |
djangoroku/djangoroku.py | maen08/djangoroku | 3 | 12787745 | #!/usr/bin/python3
import os
import sys
import subprocess
import logging
import time
from djangoroku.djangoroku.linux import DeployOnLinux
class DjangoHerokuDeploy():
#I: SELECTING OS
os_name = input('Which OS are you using?\n1.Linux\n2.Windows')
if os_name == '1':
DeployOnLinux()
# I:THE DJANGO PART-SETTING UP EVERYTHING
# ask the user to enter the project-name
project_name = input('Whats your project name:')
try:
os.system('pip install gunicorn psycopg2-binary django-heroku dj-database-url')
logger.debug('DONE: All packages are installed successfully')
except FileExistsError:
logger.debug('DONE: All packages are installed successfully')
time.sleep(4)
# create a requirements.txt file
try:
os.system('pip freeze > requirements.txt')
logger.debug('DONE: requirements.txt file created')
except FileExistsError:
logger.debug('DONE: requirements.txt file created')
time.sleep(4)
# create a Procfile
try:
with open('Procfile', 'x') as f:
# make sure the project name is correct
f.write('web: gunicorn ' + project_name + '.wsgi:application')
logger.debug('DONE: Procfile created')
except FileExistsError:
logger.debug('DONE: Procfile created')
time.sleep(3)
# a function to prepend the import statement
to_settings = os.chdir(project_name)
def line_prepender(filename, line):
with open(filename, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(line.rstrip('\r\n') + '\n' + content)
try:
line_prepender('settings.py', 'import dj_database_url')
line_prepender('settings.py', 'import django_heroku')
except FileExistsError:
logger.debug('DONE: All packages are imported')
time.sleep(3)
logger.debug('Remember to push everything on Github')
# II: HEROKU PART-DEPLOYMENT
try:
logger.debug("INFO: Please login to heroku...")
# os.system('heroku login')
except:
logger.debug('INFO: Please login to heroku')
time.sleep(2)
# creating a heroku domain-name
domain_name = input('Choose the app name: ')
os.system('heroku create' +' '+ domain_name)
reading_file = open('settings.py', 'r')
new_file_content = ""
ALLOWED_HOSTS = domain_name + '.herokuapp.com'
link = ALLOWED_HOSTS.split(' ')
for line in reading_file:
stripped_line = line.strip()
new_line = stripped_line.replace(
'ALLOWED_HOSTS = []', f'ALLOWED_HOSTS = {link}') # user should not rewrite ALLOWED_HOSTS
# before the script. Let it handle everything
new_file_content += new_line + "\n"
reading_file.close()
writing_file = open('settings.py', 'w')
writing_file.write(new_file_content)
writing_file.close()
# push to heroku
logger.debug('INFO: Deploying...')
time.sleep(4)
os.system('heroku config:set DISABLE_COLLECTSTATIC=1')
os.system('heroku git:remote -a' + ' ' + domain_name)
os.system('heroku config:set DISABLE_COLLECTSTATIC=1')
os.system('git push heroku master')
logger.debug('Setting up database...')
time.sleep(3)
os.system('heroku run python manage.py makemigrations')
os.system('heroku run python manage.py migrate')
time.sleep(2)
logger.debug('DONE: SUCCESSFUL DEPLOYED!')
elif os_name == '2':
print('two')
# condition
# windows
else:
# condition
print('last')
| 2.578125 | 3 |
liveplotter.py | icns-distributed-cloud/CACHING-efficient-data-crammer | 33 | 12787746 | <reponame>icns-distributed-cloud/CACHING-efficient-data-crammer
# importing libraries
import time
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
matplotlib.use('TkAgg')
plt.style.use('ggplot')
def live_plotter(x_vec, y1_data, line1, identifier='', pause_time=0.1):
if line1 == []:
# this is the call to matplotlib that allows dynamic plotting
plt.ion()
fig = plt.figure(figsize=(13, 6))
ax = fig.add_subplot(111)
# create a variable for the line so we can later update it
line1, = ax.plot(x_vec, y1_data, '-o', alpha=1)
# update plot label/title
plt.ylabel('PID output')
plt.title('Title: {}'.format(identifier))
plt.show()
# after the figure, axis, and line are created, we only need to update the y-data
line1.set_ydata(y1_data)
# print(x_vec)
# line1.axes.set_xticklabels([str(item) for item in x_vec])
# Remove lable of x-axis
line1.axes.set_xticks([])
# adjust limits if new data goes beyond bounds
if np.min(y1_data) <= line1.axes.get_ylim()[0] or np.max(y1_data) >= line1.axes.get_ylim()[1]:
plt.ylim([np.min(y1_data) - np.std(y1_data), np.max(y1_data) + np.std(y1_data)])
# # adjust limits if new data goes beyond bounds
# if np.min(x_vec) <= line1.axes.get_xlim()[0] or np.max(x_vec) >= line1.axes.get_xlim()[1]:
# plt.xlim([np.min(x_vec) - np.std(x_vec), np.max(x_vec) + np.std(x_vec)])
# this pauses the data so the figure/axis can catch up - the amount of pause can be altered above
plt.pause(pause_time)
# return line so we can update it again in the next iteration
return line1
| 2.890625 | 3 |
benchmarks/cholesky.py | dbkinghorn/silly-olympiad | 0 | 12787747 |
import numpy as np
from .utils import Timer
def run(size='large', repeats=3 ):
sizes = {'huge': 28000, 'large': 15000, 'small': 6000, 'tiny': 2000, 'test': 2}
n = sizes[size]
A = np.array(np.random.rand(n,n))
A = [email protected]
num_runs = repeats
print('num_runs =', num_runs)
results = []
for i in range(num_runs):
print("run ", i)
with Timer() as t:
L = np.linalg.cholesky(A)
run_time=t.elapsed
print(f'Time {t.elapsed} seconds from Timer')
ops = 1E-9 * (n**3/3.0)
gflops = ops/run_time
results.append({'run_time': run_time, 'gflops': gflops})
return results
if __name__ == '__main__':
run()
| 3.25 | 3 |
swd/stlinkcom.py | gastonfeng/pyswd | 0 | 12787748 | """ST-Link/V2 USB communication"""
import logging as _logging
import usb.core as _usb
import pyswd.swd._log as _log
class StlinkComException(Exception):
"""Exception"""
class StlinkComNotFound(Exception):
"""Exception"""
class StlinkComV2Usb():
"""ST-Link/V2 USB communication class"""
ID_VENDOR = 0x0483
ID_PRODUCT = 0x3748
PIPE_OUT = 0x02
PIPE_IN = 0x81
DEV_NAME = "V2"
_LOGGER_LEVEL3 = _logging.DEBUG - 3
def __init__(self):
self._dev = _usb.find(idVendor=self.ID_VENDOR, idProduct=self.ID_PRODUCT)
if self._dev is None:
raise StlinkComNotFound()
@_log.log(_log.DEBUG4)
def write(self, data, tout=200):
"""Write data to USB pipe"""
_logging.log(_log.DEBUG4, "%s", ', '.join(['0x%02x' % i for i in data]))
try:
count = self._dev.write(self.PIPE_OUT, data, tout)
except _usb.USBError as err:
self._dev = None
raise StlinkComException("USB Error: %s" % err)
_logging.log(_log.DEBUG4, "count=%d", count)
if count != len(data):
raise StlinkComException("Error Sending data")
@_log.log(_log.DEBUG4)
def read(self, size, tout=200):
"""Read data from USB pipe"""
read_size = size
_logging.log(_log.DEBUG4, "size=%d, read_size=%d", size, read_size)
try:
data = self._dev.read(self.PIPE_IN, read_size, tout).tolist()[:size]
except _usb.USBError as err:
self._dev = None
raise StlinkComException("USB Error: %s" % err)
_logging.log(_log.DEBUG4, "%s", ', '.join(['0x%02x' % i for i in data]))
return data
def __del__(self):
if self._dev is not None:
self._dev.finalize()
class StlinkComV21Usb(StlinkComV2Usb):
"""ST-Link/V2-1 USB communication"""
ID_VENDOR = 0x0483
ID_PRODUCT = 0x374b
PIPE_OUT = 0x01
PIPE_IN = 0x81
DEV_NAME = "V2-1"
class StlinkCom():
"""ST-Link communication class"""
_STLINK_CMD_SIZE = 16
_COM_CLASSES = [StlinkComV2Usb, StlinkComV21Usb]
def __init__(self):
self._dev = None
for com_cls in self._COM_CLASSES:
try:
self._dev = com_cls()
break
except StlinkComNotFound:
continue
else:
raise StlinkComNotFound()
@property
def version(self):
"""property with device version"""
return self._dev.DEV_NAME
@_log.log(_log.DEBUG3)
def xfer(self, command, data=None, rx_length=0, tout=200):
"""Transfer command between ST-Link
Arguments:
command: is an list of bytes with command (max 16 bytes)
data: data will be sent after command
rx_length: number of expected data to receive after command and data transfer
tout: maximum waiting time for received data
Return:
received data
Raises:
StlinkComException
"""
if len(command) > self._STLINK_CMD_SIZE:
raise StlinkComException(
"Error too many Bytes in command (maximum is %d Bytes)"
% self._STLINK_CMD_SIZE)
# pad to _STLINK_CMD_SIZE
command += [0] * (self._STLINK_CMD_SIZE - len(command))
self._dev.write(command, tout)
if data:
self._dev.write(data, tout)
if rx_length:
return self._dev.read(rx_length)
return None
| 2.421875 | 2 |
WebParser.py | Ted96/Di_Bot | 1 | 12787749 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from other import keys_and_strings
def convert_to_cap_greek( s : str ) -> str:
dict_accented_caps = { 'Ό' : 'Ο', 'Ά' : 'Α', 'Ί' : 'Ι', 'Έ' : 'Ε', 'Ύ' : 'Υ', 'Ή' : 'Η', 'Ώ' : 'Ω'}
res = s.upper()
for orig, new in dict_accented_caps.items():
res = res.replace(orig , new)
#print(s + ' -->\n' + res)
return res
class SeleniumWebParser:
def __init__(self):
chrome_options = webdriver.ChromeOptions()
prefs = {"profile.managed_default_content_settings.images": 2}
chrome_options.add_experimental_option("prefs", prefs)
self.driver = webdriver.Chrome(keys_and_strings.PATH_TO_DRIVER, options=chrome_options)
self.driver.set_window_size(width=1280, height=720) # if window too narrow : dropdown doesnt appear !
# todo: headless?? problem with width (left menu) ^^^ ?
def login_website(self, site : int):
# mystudies
if site == 1 :
from other import mycredentials
url = 'https://my-studies.uoa.gr/secr3w/connect.aspx'
elem_usr = 'username'
elem_pass = 'password'
val_usr = mycredentials.hidden_u
val_pass = mycredentials.hidden_p
else : # site == 2:
url = 'https://eclass-sandbox.noc.uoa.gr/'
elem_usr = 'uname'
elem_pass = '<PASSWORD>'
val_usr = 'stud11'
val_pass = '<PASSWORD>'
# initiate
self.driver.get(url) # go to the url
# login
username_field = self.driver.find_element_by_name(elem_usr)
password_field = self.driver.find_element_by_name(elem_pass)
username_field.send_keys(val_usr)
password_field.send_keys(val_pass)
password_field.send_keys(Keys.RETURN)
def get_average_grades(self) -> str:
# mystudies : get average grade
self.login_website(1)
sum_grades: float = 0
counter = 0
self.driver.get('https://my-studies.uoa.gr/Secr3w/app/accHistory/default.aspx')
self.driver.switch_to.frame('accmain')
all_tr_rows = self.driver.find_elements_by_xpath('//table/tbody/tr')
for row in all_tr_rows:
if not str(row.text).endswith('\n '):
continue # this row is not a course-grade
td_columns = row.text.split('\n')
course: str = td_columns[0]
course = course[course.find('- ') + 2: course.rfind('(')]
grade: str = td_columns[1]
grade = grade[grade.find('(') + 1: grade.find(')')]
if ',' in grade or '.' in grade:
grade: float = float(grade.replace(',', '.'))
else:
grade: int = int(grade)
if grade < 5:
continue
sum_grades = sum_grades + grade
counter = counter + 1
print("\t__WB__ //mystudies: ", course, '\t= ', grade)
self.driver.close()
# this takes alot of time :: self.driver.quit()
return str( (sum_grades / counter).__round__(2) if counter != 0 else 0)
def get_grade_of(self, param_target_course: str = '') -> str:
self.login_website(1)
# mystudies : get grade
grade: str = ''
self.driver.get('https://my-studies.uoa.gr/Secr3w/app/accHistory/default.aspx')
self.driver.switch_to.frame('accmain')
all_tr_rows = self.driver.find_elements_by_xpath('//table/tbody/tr')
for row in all_tr_rows:
if not str(row.text).endswith('\n '):
continue # this row is not a course-grade
td_columns = row.text.split('\n')
course: str = td_columns[0]
course = course[course.find('- ') + 2: course.rfind('(')]
# string comparison: check if this course == {:param_target_course}
if param_target_course.upper() in convert_to_cap_greek(course):
grade = td_columns[1]
grade = grade[grade.find('(') + 1: grade.find(')')]
print("\t__WB__ //mystudies found : ", param_target_course, '\t= ', grade)
break
self.driver.close()
return grade
def get_eclass_element(self, type_element, param_target_course: str = '') -> str:
self.login_website(2)
# eclass : get anakoinwseis + ergasies + plhrofories ma8hmatos
# get list of courses from main page
webelem_courses = self.driver.find_elements_by_xpath('//table/tbody/tr/td/b/a')
# #webelem_courses = self.driver.find_elements_by_class_name('text-left')
# (string comparison) click on the course with name == [ most similar to the string parameter {:param_target_course} ]
# https://www.datacamp.com/community/tutorials/fuzzy-string-python
for c in webelem_courses:
if convert_to_cap_greek(param_target_course) in convert_to_cap_greek(c.text):
c.click()
w_side_categories = self.driver.find_elements_by_class_name('list-group-item')
if w_side_categories is None:
print("!course: |"+ param_target_course+"| no side category=", type_element)
self.driver.close()
return 'not-found'
result : str
# indexes ::: 0=anakoinwseis 1=ergasies 2=ergasies 5=plhrofories
w_side_categories[type_element].click()
self.driver.implicitly_wait(0.7)
if type_element == 0:
#latest anouncement
elem = self.driver.find_elements_by_xpath("//*[@id=\"ann_table3\"]/tbody/tr[1]/td[1]/div")
announcement : str = elem[0].text
elem = self.driver.find_elements_by_xpath("//*[@id=\"ann_table3\"]/tbody/tr[1]/td[2]")
date_of_announcement =elem[0].text
result = date_of_announcement + " :\n " + announcement.replace('\n' , ' ')
if type_element == 1:
#latest deadline
pass
self.driver.close()
return result
if __name__ == "__main__":
wb = SeleniumWebParser()
test = wb.get_eclass_element( 0 , 'Εισαγωγή στον Προγραμματισμό' )
print("=" + test)
#print("\n\n", wb.get_average_grades(), "/10") # ok
| 2.78125 | 3 |
apps/feedback/fields.py | uk-gov-mirror/ministryofjustice.manchester_traffic_offences_pleas | 3 | 12787750 | <filename>apps/feedback/fields.py
from django.utils.translation import ugettext_lazy as _
ERROR_MESSAGES = {
"USED_CALL_CENTRE_REQUIRED":
_("You must tell us if you used the call centre"),
"CALL_CENTRE_SATISFACTION_REQUIRED":
_("Select an option to tell us how satisfied you were with the call centre"),
"SERVICE_SATISFACTION_REQUIRED":
_("Select an option to tell us how satisfied you were with the service")
}
| 1.992188 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.