Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def _dispatch_coroutine(self, event, listener, *args, **kwargs):
"""Schedule a coroutine for execution.
Args:
event (str): The name of the event that triggered this call.
listener (async def): The async def that needs to be executed.
*args: Any number of positional arguments.
**kwargs: Any number of keyword arguments.
The values of *args and **kwargs are passed, unaltered, to the async
def when generating the coro. If there is an exception generating the
coro, such as the wrong number of arguments, the emitter's error event
is triggered. If the triggering event _is_ the emitter's error event
then the exception is reraised. The reraised exception may show in
debug mode for the event loop but is otherwise silently dropped.
"""
try:
coro = listener(*args, **kwargs)
except Exception as exc:
if event == self.LISTENER_ERROR_EVENT:
raise
return self.emit(self.LISTENER_ERROR_EVENT, event, listener, exc)
asyncio.ensure_future(
_try_catch_coro(self, event, listener, coro),
loop=self._loop,
) |
def _dispatch_function(self, event, listener, *args, **kwargs):
"""Execute a sync function.
Args:
event (str): The name of the event that triggered this call.
listener (def): The def that needs to be executed.
*args: Any number of positional arguments.
**kwargs: Any number of keyword arguments.
The values of *args and **kwargs are passed, unaltered, to the def
when exceuting. If there is an exception executing the def, such as the
wrong number of arguments, the emitter's error event is triggered. If
the triggering event _is_ the emitter's error event then the exception
is reraised. The reraised exception may show in debug mode for the
event loop but is otherwise silently dropped.
"""
try:
return listener(*args, **kwargs)
except Exception as exc:
if event == self.LISTENER_ERROR_EVENT:
raise
return self.emit(self.LISTENER_ERROR_EVENT, event, listener, exc) |
def _dispatch(self, event, listener, *args, **kwargs):
"""Dispatch an event to a listener.
Args:
event (str): The name of the event that triggered this call.
listener (def or async def): The listener to trigger.
*args: Any number of positional arguments.
**kwargs: Any number of keyword arguments.
This method inspects the listener. If it is a def it dispatches the
listener to a method that will execute that def. If it is an async def
it dispatches it to a method that will schedule the resulting coro with
the event loop.
"""
if (
asyncio.iscoroutinefunction(listener) or
isinstance(listener, functools.partial) and
asyncio.iscoroutinefunction(listener.func)
):
return self._dispatch_coroutine(event, listener, *args, **kwargs)
return self._dispatch_function(event, listener, *args, **kwargs) |
def emit(self, event, *args, **kwargs):
"""Call each listener for the event with the given arguments.
Args:
event (str): The event to trigger listeners on.
*args: Any number of positional arguments.
**kwargs: Any number of keyword arguments.
This method passes all arguments other than the event name directly
to the listeners. If a listener raises an exception for any reason the
'listener-error', or current value of LISTENER_ERROR_EVENT, is emitted.
Listeners to this event are given the event name, listener object, and
the exception raised. If an error listener fails it does so silently.
All event listeners are fired in a deferred way so this method returns
immediately. The calling coro must yield at some point for the event
to propagate to the listeners.
"""
listeners = self._listeners[event]
listeners = itertools.chain(listeners, self._once[event])
self._once[event] = []
for listener in listeners:
self._loop.call_soon(
functools.partial(
self._dispatch,
event,
listener,
*args,
**kwargs,
)
)
return self |
def count(self, event):
"""Get the number of listeners for the event.
Args:
event (str): The event for which to count all listeners.
The resulting count is a combination of listeners added using
'on'/'add_listener' and 'once'.
"""
return len(self._listeners[event]) + len(self._once[event]) |
def phasicTonic(self,m1=None,m2=None,chunkMs=50,quietPercentile=10,
histResolution=.5,plotToo=False):
"""
let's keep the chunkMs as high as we reasonably can. 50ms is good.
Things get flakey at lower numbers like 10ms.
IMPORTANT! for this to work, prevent 0s from averaging in, so keep
bin sizes well above the data resolution.
"""
# prepare sectioning values to be used later
m1=0 if m1 is None else m1*self.pointsPerSec
m2=len(abf.sweepY) if m2 is None else m2*self.pointsPerSec
m1,m2=int(m1),int(m2)
# prepare histogram values to be used later
padding=200 # pA or mV of maximum expected deviation
chunkPoints=int(chunkMs*self.pointsPerMs)
histBins=int((padding*2)/histResolution)
# center the data at 0 using peak histogram, not the mean
Y=self.sweepY[m1:m2]
hist,bins=np.histogram(Y,bins=2*padding)
Yoffset=bins[np.where(hist==max(hist))[0][0]]
Y=Y-Yoffset # we don't have to, but PDF math is easier
# calculate all histogram
nChunks=int(len(Y)/chunkPoints)
hist,bins=np.histogram(Y,bins=histBins,range=(-padding,padding))
hist=hist/len(Y) # count as a fraction of total
Xs=bins[1:]
# get baseline data from chunks with smallest variance
chunks=np.reshape(Y[:nChunks*chunkPoints],(nChunks,chunkPoints))
variances=np.var(chunks,axis=1)
percentiles=np.empty(len(variances))
for i,variance in enumerate(variances):
percentiles[i]=sorted(variances).index(variance)/len(variances)*100
blData=chunks[np.where(percentiles<=quietPercentile)[0]].flatten()
# generate the standard curve and pull it to the histogram height
sigma=np.sqrt(np.var(blData))
center=np.average(blData)+histResolution/2
blCurve=mlab.normpdf(Xs,center,sigma)
blCurve=blCurve*max(hist)/max(blCurve)
# determine the phasic current by subtracting-out the baseline
#diff=hist-blCurve
diff=hist
IGNORE_DISTANCE=5 # KEEP THIS FIXED, NOT A FUNCTION OF VARIANCE
ignrCenter=len(Xs)/2
ignrPad=IGNORE_DISTANCE/histResolution
ignr1,ignt2=int(ignrCenter-ignrPad),int(ignrCenter+ignrPad)
diff[ignr1:ignt2]=0
# optionally graph all this
if plotToo:
plt.figure(figsize=(15,5))
plt.plot(Y)
plt.figure(figsize=(7,7))
ax1=plt.subplot(211)
plt.title(abf.ID+" phasic analysis")
plt.ylabel("fraction")
plt.plot(Xs,hist,'-',alpha=.8,color='b',lw=3)
plt.plot(Xs,blCurve,lw=3,alpha=.5,color='r')
plt.margins(0,.1)
plt.subplot(212,sharex=ax1)
plt.title("baseline subtracted")
plt.ylabel("fraction")
plt.xlabel("data points (%s)"%abf.units)
plt.plot(Xs,diff,'-',alpha=.8,color='b',lw=3)
plt.axhline(0,lw=3,alpha=.5,color='r')
plt.axvline(0,lw=3,alpha=.5,color='k')
plt.margins(0,.1)
plt.axis([-50,50,None,None])
plt.tight_layout()
plt.show()
print(np.sum(np.split(diff,2),1))
return diff/len(Y)*abf.pointsPerSec |
def genPNGs(folder,files=None):
"""Convert each TIF to PNG. Return filenames of new PNGs."""
if files is None:
files=glob.glob(folder+"/*.*")
new=[]
for fname in files:
ext=os.path.basename(fname).split(".")[-1].lower()
if ext in ['tif','tiff']:
if not os.path.exists(fname+".png"):
print(" -- converting %s to PNG..."%os.path.basename(fname))
cm.image_convert(fname)
new.append(fname) #fancy burn-in of image data
else:
pass
#print(" -- already converted %s to PNG..."%os.path.basename(fname))
return new |
def htmlABFcontent(ID,group,d):
"""generate text to go inside <body> for single ABF page."""
html=""
files=[]
for abfID in group:
files.extend(d[abfID])
files=sorted(files)
#start with root images
html+="<hr>"
for fname in files:
if ".png" in fname.lower() and not "swhlab4" in fname:
fname="../"+os.path.basename(fname)
html+='<a href="%s"><img src="%s" width="348"></a> '%(fname,fname)
#progress to /swhlab4/ images
html+="<hr>"
#ABFinfo
lastID=''
for fname in sorted(files):
if not "swhlab4" in fname:
continue
ID=os.path.basename(fname).split("_")[0]
if not ID==lastID:
lastID=ID
html+="<h3>%s</h3>"%os.path.basename(fname).split("_")[0]
if ".png" in fname.lower():
fname=os.path.basename(fname)
html+='<a href="%s"><img src="%s" height="300"></a> '%(fname,fname)
continue
html+="<hr>"
for fname in files:
if not "swhlab4" in fname:
continue
if ".pkl" in fname:
callit=os.path.basename(fname)
thing=cm.getPkl(fname)
if "_APs.pkl" in fname:
callit+=" (first AP)"
thing=cm.dictFlat(thing)
if len(thing):
thing=thing[0]
elif "_MTs.pkl" in fname:
if type(thing) == dict:
callit+=" (from AVG of all sweeps)"
else:
callit+=" (first sweep)"
thing=thing[0]
elif "_SAP.pkl" in fname:
continue #don't plot those, too complicated
elif "_info.pkl" in fname or "_iv.pkl" in fname:
pass #no trouble, go for it
else:
print(" ?? not sure how to index [%s]"%os.path.basename(fname))
continue
if type(thing) is dict:
thing=cm.msgDict(thing)
if type(thing) is list:
out=''
for item in thing:
out+=str(item)+"\n"
thing=out
thing=str(thing) #lol stringthing
thing="### %s ###\n"%os.path.basename(fname)+thing
# putting it in a textbox is obnoxious. put it in the source instead.
#html+='<br><br><textarea rows="%d" cols="70">%s</textarea>'%(str(thing).count("\n")+5,thing)
html+="(view source for %s) <!--\n\n%s\n\n-->"%(os.path.basename(fname),thing)
return html |
def htmlABF(ID,group,d,folder,overwrite=False):
"""given an ID and the dict of files, generate a static html for that abf."""
fname=folder+"/swhlab4/%s_index.html"%ID
if overwrite is False and os.path.exists(fname):
return
html=TEMPLATES['abf']
html=html.replace("~ID~",ID)
html=html.replace("~CONTENT~",htmlABFcontent(ID,group,d))
print(" <- writing [%s]"%os.path.basename(fname))
with open(fname,'w') as f:
f.write(html)
return |
def expMenu(groups,folder):
"""read experiment.txt and return a dict with [firstOfNewExp, color, star, comments]."""
### GENERATE THE MENU DATA BASED ON EXPERIMENT FILE
orphans = sorted(list(groups.keys()))
menu=[]
if os.path.exists(folder+'/experiment.txt'):
with open(folder+'/experiment.txt') as f:
raw=f.read()
else:
raw=""
for line in raw.split("\n"):
item={}
if len(line)==0:
continue
if line.startswith("~"):
line=line[1:].split(" ",2)
item["ID"]=line[0]
item["symbol"]=''
if len(line)>1:
item["color"]=line[1]
else:
item["color"]="white"
if len(line)>2 and len(line[2]):
item["comment"]=line[2]
if item["comment"][0]=="*":
item["symbol"]='*'
else:
item["comment"]=''
if item["ID"] in orphans:
orphans.remove(item["ID"])
elif line.startswith("###"):
line=line[3:].strip().split(" ",1)
item["title"]=line[0]
item["comment"]=''
if len(line)>1:
if line[1].startswith("- "):
line[1]=line[1][2:]
item["comment"]=line[1]
else:
item["unknown"]=line
menu.append(item)
menu.append({"title":"orphans","comment":""})
for ophan in orphans:
menu.append({"orphan":ophan,"ID":ophan,"color":'',"symbol":'',"comment":''})
return menu |
def genIndex(folder,forceIDs=[]):
"""expects a folder of ABFs."""
if not os.path.exists(folder+"/swhlab4/"):
print(" !! cannot index if no /swhlab4/")
return
timestart=cm.timethis()
files=glob.glob(folder+"/*.*") #ABF folder
files.extend(glob.glob(folder+"/swhlab4/*.*"))
print(" -- indexing glob took %.02f ms"%(cm.timethis(timestart)*1000))
files.extend(genPNGs(folder,files))
files=sorted(files)
timestart=cm.timethis()
d=cm.getIDfileDict(files) #TODO: this is really slow
print(" -- filedict length:",len(d))
print(" -- generating ID dict took %.02f ms"%(cm.timethis(timestart)*1000))
groups=cm.getABFgroups(files)
print(" -- groups length:",len(groups))
for ID in sorted(list(groups.keys())):
overwrite=False
for abfID in groups[ID]:
if abfID in forceIDs:
overwrite=True
try:
htmlABF(ID,groups[ID],d,folder,overwrite)
except:
print("~~ HTML GENERATION FAILED!!!")
menu=expMenu(groups,folder)
makeSplash(menu,folder)
makeMenu(menu,folder)
htmlFrames(d,folder)
makeMenu(menu,folder)
makeSplash(menu,folder) |
def drawPhasePlot(abf,m1=0,m2=None):
"""
Given an ABF object (SWHLab), draw its phase plot of the current sweep.
m1 and m2 are optional marks (in seconds) for plotting only a range of data.
Assume a matplotlib figure is already open and just draw on top if it.
"""
if not m2:
m2 = abf.sweepLength
cm = plt.get_cmap('CMRmap')
#cm = plt.get_cmap('CMRmap_r')
#cm = plt.get_cmap('spectral')
#cm = plt.get_cmap('winter')
# prepare Xs, Ys, and dYs
Y = abf.sweepY
Y = Y[int(abf.pointsPerSec*m1):int(abf.pointsPerSec*m2)]
dY = (Y[1:]-Y[:-1])*abf.rate/1000.0 # mV/ms
dY = np.append(dY,dY[-1])
Xs = np.arange(len(dY))/abf.pointsPerSec
Xs = Xs + Xs[-1]*abf.sweep
# plot the voltage
plt.subplot(131)
plt.grid(alpha=.5)
plt.plot(Xs,Y,lw=.5,color=cm(abf.sweep/abf.sweeps))
plt.title("membrane voltage")
plt.ylabel("V (mV)")
plt.xlabel("time (sec)")
plt.margins(0,.1)
# plot the first derivative of the voltage
plt.subplot(132)
plt.grid(alpha=.5)
plt.plot(Xs,dY,lw=.5,color=cm(abf.sweep/abf.sweeps))
plt.title("voltage velocity")
plt.ylabel("dV (mV/ms)")
plt.xlabel("time (sec)")
plt.margins(0,.1)
# make the phase plot
plt.subplot(133)
plt.grid(alpha=.5)
plt.plot(Y,dY,alpha=.5,lw=.5,color=cm(abf.sweep/abf.sweeps))
plt.title("phase plot")
plt.ylabel("dV (mV/ms)")
plt.xlabel("V (mV)")
plt.margins(.1,.1)
# tighten up the figure
plt.tight_layout() |
def plotAllSweeps(abfFile):
"""simple example how to load an ABF file and plot every sweep."""
r = io.AxonIO(filename=abfFile)
bl = r.read_block(lazy=False, cascade=True)
print(abfFile+"\nplotting %d sweeps..."%len(bl.segments))
plt.figure(figsize=(12,10))
plt.title(abfFile)
for sweep in range(len(bl.segments)):
trace = bl.segments[sweep].analogsignals[0]
plt.plot(trace.times-trace.times[0],trace.magnitude,alpha=.5)
plt.ylabel(trace.dimensionality)
plt.xlabel("seconds")
plt.show()
plt.close() |
def TIF_to_jpg(fnameTiff, overwrite=False, saveAs=""):
"""
given a TIF taken by our cameras, make it a pretty labeled JPG.
if the filename contains "f10" or "f20", add appropraite scale bars.
automatic contrast adjustment is different depending on if its a DIC
image or fluorescent image (which is detected automatically).
"""
if saveAs == "":
saveAs=fnameTiff+".jpg"
if overwrite is False and os.path.exists(saveAs):
print("file exists, not overwriting...")
return
# load the image
img=pylab.imread(fnameTiff)
img=img/np.max(img) # now the data is from 0 to 1
# determine the old histogram
hist1,bins1=np.histogram(img.ravel(),bins=256, range=(0,1))
#pylab.plot(bins[:-1],hist)
# detect darkfield by average:
if np.average(img)<.2:
vmin=None
vmax=None
msg=" | FLU"
while np.average(img)<.5:
img=np.sqrt(img)
msg+="^(.5)"
else:
msg=" | DIC"
percentile=.005
vmin=np.percentile(img.ravel(),percentile)
vmax=np.percentile(img.ravel(),100-percentile)
# determine the new histogram
hist2,bins2=np.histogram(img.ravel(),bins=256, range=(0,1))
# plot it with resizing magic
fig=pylab.figure(facecolor='r')
fig.gca().imshow(img,cmap=pylab.gray(),vmin=vmin,vmax=vmax)
pylab.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
pylab.gca().xaxis.set_major_locator(pylab.NullLocator())
pylab.gca().yaxis.set_major_locator(pylab.NullLocator())
pylab.axis('off')
# resize it to the original size
fig.set_size_inches(img.shape[1]/100, img.shape[0]/100)
# add text
msg="%s | %s"%(os.path.basename(fnameTiff),
datetime.datetime.fromtimestamp(os.path.getmtime(fnameTiff)))+msg
center=10
pylab.text(center,center,"%s"%(msg),va="top",color='w',size='small',
family='monospace',weight='bold',
bbox=dict(facecolor='k', alpha=.5))
# add scale bar
scaleWidthPx=False
if "f10" in fnameTiff:
scaleWidthPx,scaleBarText=39,"25 um"
if "f20" in fnameTiff:
scaleWidthPx,scaleBarText=31,"10 um"
if scaleWidthPx:
scaleBarPadding=10
x2,y2=img.shape[1]-scaleBarPadding,img.shape[0]-scaleBarPadding
x1,y1=x2-scaleWidthPx,y2
for offset,color,alpha in [[2,'k',.5],[0,'w',1]]:
pylab.plot([x1+offset,x2+offset],[y1+offset,y2+offset],'-',
color=color,lw=4,alpha=alpha)
pylab.text((x1+x2)/2+offset,y1-5+offset,scaleBarText,color=color,
ha="center",weight="bold",alpha=alpha,
size="small",va="bottom",family="monospace")
# add histogram
#pylab.plot(img.shape[1]-bins1[:-1][::-1]*200,-hist1/max(hist1)*100+110,color='g')
#pylab.plot(img.shape[1]-bins2[:-1][::-1]*200,-hist2/max(hist2)*100+110,color='b')
#pylab.show()
# save it
pylab.savefig(saveAs,dpi=100)
# clean up
pylab.close() |
def TIF_to_jpg_all(path):
"""run TIF_to_jpg() on every TIF of a folder."""
for fname in sorted(glob.glob(path+"/*.tif")):
print(fname)
TIF_to_jpg(fname) |
def analyzeSweep(abf,sweep,m1=None,m2=None,plotToo=False):
"""
m1 and m2, if given, are in seconds.
returns [# EPSCs, # IPSCs]
"""
abf.setsweep(sweep)
if m1 is None: m1=0
else: m1=m1*abf.pointsPerSec
if m2 is None: m2=-1
else: m2=m2*abf.pointsPerSec
# obtain X and Y
Yorig=abf.sweepY[int(m1):int(m2)]
X=np.arange(len(Yorig))/abf.pointsPerSec
Ylpf=linear_gaussian(Yorig,sigmaSize=abf.pointsPerMs*300,forwardOnly=False)
Yflat=Yorig-Ylpf
EPSCs,IPSCs=[],[]
if plotToo:
plt.figure(figsize=(15,6))
ax1=plt.subplot(211)
plt.title("%s sweep %d"%(abf.ID,sweep))
plt.grid()
plt.plot(X,Yorig,alpha=.5)
plt.plot(X,Ylpf,'k',alpha=.5,lw=2)
plt.margins(0,.2)
plt.subplot(212,sharex=ax1)
plt.title("gaussian baseline subtraction")
plt.grid()
plt.plot(X,Yflat,alpha=.5)
plt.axhline(0,color='k',lw=2,alpha=.5)
plt.tight_layout()
plt.show()
# TEST GAUSS
hist, bin_edges = np.histogram(Yflat, density=True, bins=200)
peakPa=bin_edges[np.where(hist==max(hist))[0][0]+1]
if plotToo:
plt.figure()
plt.grid()
plt.plot(bin_edges[1:],hist,alpha=.5)
plt.axvline(0,color='k')
plt.axvline(peakPa,color='r',ls='--',lw=2,alpha=.5)
plt.semilogy()
plt.title("sweep data distribution")
plt.ylabel("power")
plt.xlabel("pA deviation")
plt.show()
return peakPa |
def convert(fname,saveAs=True,showToo=False):
"""
Convert weird TIF files into web-friendly versions.
Auto contrast is applied (saturating lower and upper 0.1%).
make saveAs True to save as .TIF.png
make saveAs False and it won't save at all
make saveAs "someFile.jpg" to save it as a different path/format
"""
# load the image
#im = Image.open(fname) #PIL can't handle 12-bit TIFs well
im=ndimage.imread(fname) #scipy does better with it
im=np.array(im,dtype=float) # now it's a numpy array
# do all image enhancement here
cutoffLow=np.percentile(im,.01)
cutoffHigh=np.percentile(im,99.99)
im[np.where(im<cutoffLow)]=cutoffLow
im[np.where(im>cutoffHigh)]=cutoffHigh
# IMAGE FORMATTING
im-=np.min(im) #auto contrast
im/=np.max(im) #normalize
im*=255 #stretch contrast (8-bit)
im = Image.fromarray(im)
# IMAGE DRAWING
msg="%s\n"%os.path.basename(fname)
msg+="%s\n"%cm.epochToString(os.path.getmtime(fname))
d = ImageDraw.Draw(im)
fnt = ImageFont.truetype("arial.ttf", 20)
d.text((6,6),msg,font=fnt,fill=0)
d.text((4,4),msg,font=fnt,fill=255)
if showToo:
im.show()
if saveAs is False:
return
if saveAs is True:
saveAs=fname+".png"
im.convert('RGB').save(saveAs)
return saveAs |
def plot_shaded_data(X,Y,variances,varianceX):
"""plot X and Y data, then shade its background by variance."""
plt.plot(X,Y,color='k',lw=2)
nChunks=int(len(Y)/CHUNK_POINTS)
for i in range(0,100,PERCENT_STEP):
varLimitLow=np.percentile(variances,i)
varLimitHigh=np.percentile(variances,i+PERCENT_STEP)
varianceIsAboveMin=np.where(variances>=varLimitLow)[0]
varianceIsBelowMax=np.where(variances<=varLimitHigh)[0]
varianceIsRange=[chunkNumber for chunkNumber in range(nChunks) \
if chunkNumber in varianceIsAboveMin \
and chunkNumber in varianceIsBelowMax]
for chunkNumber in varianceIsRange:
t1=chunkNumber*CHUNK_POINTS/POINTS_PER_SEC
t2=t1+CHUNK_POINTS/POINTS_PER_SEC
plt.axvspan(t1,t2,alpha=.3,color=COLORMAP(i/100),lw=0) |
def show_variances(Y,variances,varianceX,logScale=False):
"""create some fancy graphs to show color-coded variances."""
plt.figure(1,figsize=(10,7))
plt.figure(2,figsize=(10,7))
varSorted=sorted(variances)
plt.figure(1)
plt.subplot(211)
plt.grid()
plt.title("chronological variance")
plt.ylabel("original data")
plot_shaded_data(X,Y,variances,varianceX)
plt.margins(0,.1)
plt.subplot(212)
plt.ylabel("variance (pA) (log%s)"%str(logScale))
plt.xlabel("time in sweep (sec)")
plt.plot(varianceX,variances,'k-',lw=2)
plt.figure(2)
plt.ylabel("variance (pA) (log%s)"%str(logScale))
plt.xlabel("chunk number")
plt.title("sorted variance")
plt.plot(varSorted,'k-',lw=2)
for i in range(0,100,PERCENT_STEP):
varLimitLow=np.percentile(variances,i)
varLimitHigh=np.percentile(variances,i+PERCENT_STEP)
label="%2d-%d percentile"%(i,i++PERCENT_STEP)
color=COLORMAP(i/100)
print("%s: variance = %.02f - %.02f"%(label,varLimitLow,varLimitHigh))
plt.figure(1)
plt.axhspan(varLimitLow,varLimitHigh,alpha=.5,lw=0,color=color,label=label)
plt.figure(2)
chunkLow=np.where(varSorted>=varLimitLow)[0][0]
chunkHigh=np.where(varSorted>=varLimitHigh)[0][0]
plt.axvspan(chunkLow,chunkHigh,alpha=.5,lw=0,color=color,label=label)
for fignum in [1,2]:
plt.figure(fignum)
if logScale:
plt.semilogy()
plt.margins(0,0)
plt.grid()
if fignum is 2:
plt.legend(fontsize=10,loc='upper left',shadow=True)
plt.tight_layout()
plt.savefig('2016-12-15-variance-%d-log%s.png'%(fignum,str(logScale)))
plt.show() |
def ensureDetection(self):
"""
run this before analysis. Checks if event detection occured.
If not, runs AP detection on all sweeps.
"""
if self.APs==False:
self.log.debug("analysis attempted before event detection...")
self.detect() |
def detect(self):
"""runs AP detection on every sweep."""
self.log.info("initializing AP detection on all sweeps...")
t1=cm.timeit()
for sweep in range(self.abf.sweeps):
self.detectSweep(sweep)
self.log.info("AP analysis of %d sweeps found %d APs (completed in %s)",
self.abf.sweeps,len(self.APs),cm.timeit(t1)) |
def detectSweep(self,sweep=0):
"""perform AP detection on current sweep."""
if self.APs is False: # indicates detection never happened
self.APs=[] # now indicates detection occured
# delete every AP from this sweep from the existing array
for i,ap in enumerate(self.APs):
if ap["sweep"]==sweep:
self.APs[i]=None
if self.APs.count(None):
self.log.debug("deleting %d existing APs from memory",self.APs.count(None))
while None in self.APs:
self.APs.remove(None)
self.log.debug("initiating AP detection (%d already in memory)",len(self.APs))
self.abf.derivative=True
self.abf.setsweep(sweep)
# detect potential AP (Is) by a dV/dT threshold crossing
Is = cm.where_cross(self.abf.sweepD,self.detect_over)
self.log.debug("initial AP detection: %d APs"%len(Is))
# eliminate APs where dV/dT doesn't cross below -10 V/S within 2 ms
for i,I in enumerate(Is):
if np.min(self.abf.sweepD[I:I+2*self.abf.pointsPerMs])>-10:
Is[i]=0
Is=Is[np.nonzero(Is)]
self.log.debug("after lower threshold checking: %d APs"%len(Is))
# walk 1ms backwards and find point of +10 V/S threshold crossing
for i,I in enumerate(Is):
stepBack=0
while(self.abf.sweepD[I-stepBack])>10 and stepBack/self.abf.pointsPerMs<1: #2ms max
stepBack+=1
Is[i]-=stepBack
# analyze each AP
sweepAPs=[]
for i,I in enumerate(Is):
try:
timeInSweep=I/self.abf.pointsPerSec
if timeInSweep<self.detect_time1 or timeInSweep>self.detect_time2:
continue # skip because it's not within the marks
ap={} # create the AP entry
ap["sweep"]=sweep # number of the sweep containing this AP
ap["I"]=I # index sweep point of start of AP (10 mV/ms threshold crossing)
ap["Tsweep"]=I/self.abf.pointsPerSec # time in the sweep of index crossing (sec)
ap["T"]=ap["Tsweep"]+self.abf.sweepInterval*sweep # time in the experiment
ap["Vthreshold"]=self.abf.sweepY[I] # threshold at rate of -10mV/ms
# determine how many points from the start dV/dt goes below -10 (from a 5ms chunk)
chunk=self.abf.sweepD[I:I+5*self.abf.pointsPerMs] # give it 5ms to cross once
I_toNegTen=np.where(chunk<-10)[0][0]
chunk=self.abf.sweepD[I+I_toNegTen:I+I_toNegTen+10*self.abf.pointsPerMs] # give it 30ms to cross back
if not max(chunk)>-10:
self.log.debug("skipping unreal AP at T=%f"%ap["T"])
self.log.error("^^^ can you confirm this is legit?")
continue # probably a pre-AP "bump" to be ignored
I_recover=np.where(chunk>-10)[0][0]+I_toNegTen+I # point where trace returns to above -10 V/S
ap["dVfastIs"]=[I,I_recover] # span of the fast component of the dV/dt trace
ap["dVfastMS"]=(I_recover-I)/self.abf.pointsPerMs # time (in ms) of this fast AP component
# determine derivative min/max from a 2ms chunk which we expect to capture the fast AP
chunk=self.abf.sweepD[ap["dVfastIs"][0]:ap["dVfastIs"][1]]
ap["dVmax"]=np.max(chunk)
ap["dVmaxI"]=np.where(chunk==ap["dVmax"])[0][0]+I
ap["dVmin"]=np.min(chunk)
ap["dVminI"]=np.where(chunk==ap["dVmin"])[0][0]+I
if ap["dVmax"]<10 or ap["dVmin"]>-10:
self.log.debug("throwing out AP with low dV/dt to be an AP")
self.log.error("^^^ can you confirm this is legit?")
continue
# before determining AP shape stats, see where trace recovers to threshold
chunkSize=self.abf.pointsPerMs*10 #AP shape may be 10ms
if len(Is)-1>i and Is[i+1]<(I+chunkSize): # if slow AP runs into next AP
chunkSize=Is[i+1]-I # chop it down
if chunkSize<(self.abf.pointsPerMs*2):
continue # next AP is so soon, it's >500 Hz. Can't be real.
ap["VslowIs"]=[I,I+chunkSize] # time range of slow AP dynamics
chunk=self.abf.sweepY[I:I+chunkSize]
# determine AP peak and minimum
ap["Vmax"]=np.max(chunk)
ap["VmaxI"]=np.where(chunk==ap["Vmax"])[0][0]+I
chunkForMin=np.copy(chunk) # so we can destroy it
chunkForMin[:ap["VmaxI"]-I]=np.inf # minimum won't be before peak now
ap["Vmin"]=np.min(chunkForMin) # supposedly the minimum is the AHP
ap["VminI"]=np.where(chunkForMin==ap["Vmin"])[0][0]+I
if ap["VminI"]<ap["VmaxI"]:
self.log.error("-------------------------------")
self.log.error("how is the AHP before the peak?") #TODO: start chunk at the peak
self.log.error("-------------------------------")
#print((I+len(chunk))-ap["VminI"],len(chunk))
if (len(chunk))-((I+len(chunk))-ap["VminI"])<10:
self.log.error("-------------------------------")
self.log.error("HP too close for comfort!")
self.log.error("-------------------------------")
ap["msRiseTime"]=(ap["VmaxI"]-I)/self.abf.pointsPerMs # time from threshold to peak
ap["msFallTime"]=(ap["VminI"]-ap["VmaxI"])/self.abf.pointsPerMs # time from peak to nadir
# determine halfwidth
ap["Vhalf"]=np.average([ap["Vmax"],ap["Vthreshold"]]) # half way from threshold to peak
ap["VhalfI1"]=cm.where_cross(chunk,ap["Vhalf"])[0]+I # time it's first crossed
ap["VhalfI2"]=cm.where_cross(-chunk,-ap["Vhalf"])[1]+I # time it's second crossed
ap["msHalfwidth"]=(ap["VhalfI2"]-ap["VhalfI1"])/self.abf.pointsPerMs # time between crossings
# AP error checking goes here
# TODO:
# if we got this far, add the AP to the list
sweepAPs.extend([ap])
except Exception as e:
self.log.error("crashed analyzing AP %d of %d",i,len(Is))
self.log.error(cm.exceptionToString(e))
#cm.pause()
#cm.waitFor(30)
#self.log.error("EXCEPTION!:\n%s"%str(sys.exc_info()))
self.log.debug("finished analyzing sweep. Found %d APs",len(sweepAPs))
self.APs.extend(sweepAPs)
self.abf.derivative=False |
def get_times(self):
"""return an array of times (in sec) of all APs."""
self.ensureDetection()
times=[]
for ap in self.APs:
times.append(ap["T"])
return np.array(sorted(times)) |
def get_bySweep(self,feature="freqs"):
"""
returns AP info by sweep arranged as a list (by sweep).
feature:
* "freqs" - list of instantaneous frequencies by sweep.
* "firsts" - list of first instantaneous frequency by sweep.
* "times" - list of times of each AP in the sweep.
* "count" - numer of APs per sweep.
* "average" - average instanteous frequency per sweep.
* "median" - median instanteous frequency per sweep.
"""
self.ensureDetection()
bySweepTimes=[[]]*self.abf.sweeps
# determine AP spike times by sweep
for sweep in range(self.abf.sweeps):
sweepTimes=[]
for ap in self.APs:
if ap["sweep"]==sweep:
sweepTimes.append(ap["Tsweep"])
bySweepTimes[sweep]=sweepTimes
# determine instantaneous frequencies by sweep
bySweepFreqs=[[]]*self.abf.sweeps
for i,times in enumerate(bySweepTimes):
if len(times)<2:
continue
diffs=np.array(times[1:])-np.array(times[:-1])
bySweepFreqs[i]=np.array(1/diffs).tolist()
# give the user what they want
if feature == "freqs":
return bySweepFreqs
elif feature == "firsts":
result=np.zeros(self.abf.sweeps) # initialize to this
for i,freqs in enumerate(bySweepFreqs):
if len(freqs):
result[i]=freqs[0]
return result
elif feature == "times":
return bySweepTimes
elif feature == "count":
result=np.zeros(self.abf.sweeps) # initialize to this
for i,times in enumerate(bySweepTimes):
result[i]=len(bySweepTimes[i])
return result
elif feature == "average":
result=np.zeros(self.abf.sweeps) # initialize to this
for i,freqs in enumerate(bySweepFreqs):
if len(freqs):
result[i]=np.nanmean(freqs)
return result
elif feature == "median":
result=np.zeros(self.abf.sweeps) # initialize to this
for i,freqs in enumerate(bySweepFreqs):
if len(freqs):
result[i]=np.nanmedian(freqs)
return result
else:
self.log.error("get_bySweep() can't handle [%s]",feature)
return None |
def get_author_and_version(package):
"""
Return package author and version as listed in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
author = re.search("__author__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
version = re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
return author, version |
def api_subclass_factory(name, docstring, remove_methods, base=SlackApi):
"""Create an API subclass with fewer methods than its base class.
Arguments:
name (:py:class:`str`): The name of the new class.
docstring (:py:class:`str`): The docstring for the new class.
remove_methods (:py:class:`dict`): The methods to remove from
the base class's :py:attr:`API_METHODS` for the subclass. The
key is the name of the root method (e.g. ``'auth'`` for
``'auth.test'``, the value is either a tuple of child method
names (e.g. ``('test',)``) or, if all children should be
removed, the special value :py:const:`ALL`.
base (:py:class:`type`, optional): The base class (defaults to
:py:class:`SlackApi`).
Returns:
:py:class:`type`: The new subclass.
Raises:
:py:class:`KeyError`: If the method wasn't in the superclass.
"""
methods = deepcopy(base.API_METHODS)
for parent, to_remove in remove_methods.items():
if to_remove is ALL:
del methods[parent]
else:
for method in to_remove:
del methods[parent][method]
return type(name, (base,), dict(API_METHODS=methods, __doc__=docstring)) |
async def execute_method(self, method, **params):
"""Execute a specified Slack Web API method.
Arguments:
method (:py:class:`str`): The name of the method.
**params (:py:class:`dict`): Any additional parameters
required.
Returns:
:py:class:`dict`: The JSON data from the response.
Raises:
:py:class:`aiohttp.web_exceptions.HTTPException`: If the HTTP
request returns a code other than 200 (OK).
SlackApiError: If the Slack API is reached but the response
contains an error message.
"""
url = self.url_builder(method, url_params=params)
logger.info('Executing method %r', method)
response = await aiohttp.get(url)
logger.info('Status: %r', response.status)
if response.status == 200:
json = await response.json()
logger.debug('...with JSON %r', json)
if json.get('ok'):
return json
raise SlackApiError(json['error'])
else:
raise_for_status(response) |
def method_exists(cls, method):
"""Whether a given method exists in the known API.
Arguments:
method (:py:class:`str`): The name of the method.
Returns:
:py:class:`bool`: Whether the method is in the known API.
"""
methods = cls.API_METHODS
for key in method.split('.'):
methods = methods.get(key)
if methods is None:
break
if isinstance(methods, str):
logger.debug('%r: %r', method, methods)
return True
return False |
def _add_parsley_ns(cls, namespace_dict):
"""
Extend XPath evaluation with Parsley extensions' namespace
"""
namespace_dict.update({
'parslepy' : cls.LOCAL_NAMESPACE,
'parsley' : cls.LOCAL_NAMESPACE,
})
return namespace_dict |
def make(self, selection):
"""
XPath expression can also use EXSLT functions (as long as they are
understood by libxslt)
"""
cached = self._selector_cache.get(selection)
if cached:
return cached
try:
selector = lxml.etree.XPath(selection,
namespaces = self.namespaces,
extensions = self.extensions,
smart_strings=(self.SMART_STRINGS
or self._test_smart_strings_needed(selection)),
)
except lxml.etree.XPathSyntaxError as syntax_error:
syntax_error.msg += ": %s" % selection
raise syntax_error
except Exception as e:
if self.DEBUG:
print(repr(e), selection)
raise
# wrap it/cache it
self._selector_cache[selection] = Selector(selector)
return self._selector_cache[selection] |
def extract(self, document, selector, debug_offset=''):
"""
Try and convert matching Elements to unicode strings.
If this fails, the selector evaluation probably already
returned some string(s) of some sort, or boolean value,
or int/float, so return that instead.
"""
selected = self.select(document, selector)
if selected is not None:
if isinstance(selected, (list, tuple)):
# FIXME: return None or return empty list?
if not len(selected):
return
return [self._extract_single(m) for m in selected]
else:
return self._extract_single(selected)
# selector did not match anything
else:
if self.DEBUG:
print(debug_offset, "selector did not match anything; return None")
return None |
def make(self, selection):
"""
Scopes and selectors are tested in this order:
* is this a CSS selector with an appended @something attribute?
* is this a regular CSS selector?
* is this an XPath expression?
XPath expression can also use EXSLT functions (as long as they are
understood by libxslt)
"""
cached = self._selector_cache.get(selection)
if cached:
return cached
namespaces = self.EXSLT_NAMESPACES
self._add_parsley_ns(namespaces)
try:
# CSS with attribute? (non-standard but convenient)
# CSS selector cannot select attributes
# this "<css selector> @<attr>" syntax is a Parsley extension
# construct CSS selector and append attribute to XPath expression
m = self.REGEX_ENDING_ATTRIBUTE.match(selection)
if m:
# the selector should be a regular CSS selector
cssxpath = css_to_xpath(m.group("expr"))
# if "|" is used for namespace prefix reference,
# convert it to XPath prefix syntax
attribute = m.group("attr").replace('|', ':')
cssxpath = "%s/%s" % (cssxpath, attribute)
else:
cssxpath = css_to_xpath(selection)
selector = lxml.etree.XPath(
cssxpath,
namespaces = self.namespaces,
extensions = self.extensions,
smart_strings=(self.SMART_STRINGS
or self._test_smart_strings_needed(selection)),
)
except tuple(self.CSSSELECT_SYNTAXERROR_EXCEPTIONS) as syntax_error:
if self.DEBUG:
print(repr(syntax_error), selection)
print("Try interpreting as XPath selector")
try:
selector = lxml.etree.XPath(selection,
namespaces = self.namespaces,
extensions = self.extensions,
smart_strings=(self.SMART_STRINGS
or self._test_smart_strings_needed(selection)),
)
except lxml.etree.XPathSyntaxError as syntax_error:
syntax_error.msg += ": %s" % selection
raise syntax_error
except Exception as e:
if self.DEBUG:
print(repr(e), selection)
raise
# for exception when trying to convert <cssselector> @<attribute> syntax
except lxml.etree.XPathSyntaxError as syntax_error:
syntax_error.msg += ": %s" % selection
raise syntax_error
except Exception as e:
if self.DEBUG:
print(repr(e), selection)
raise
# wrap it/cache it
self._selector_cache[selection] = Selector(selector)
return self._selector_cache[selection] |
async def join_rtm(self, filters=None):
"""Join the real-time messaging service.
Arguments:
filters (:py:class:`dict`, optional): Dictionary mapping
message filters to the functions they should dispatch to.
Use a :py:class:`collections.OrderedDict` if precedence is
important; only one filter, the first match, will be
applied to each message.
"""
if filters is None:
filters = [cls(self) for cls in self.MESSAGE_FILTERS]
url = await self._get_socket_url()
logger.debug('Connecting to %r', url)
async with ws_connect(url) as socket:
first_msg = await socket.receive()
self._validate_first_message(first_msg)
self.socket = socket
async for message in socket:
if message.tp == MsgType.text:
await self.handle_message(message, filters)
elif message.tp in (MsgType.closed, MsgType.error):
if not socket.closed:
await socket.close()
self.socket = None
break
logger.info('Left real-time messaging.') |
async def handle_message(self, message, filters):
"""Handle an incoming message appropriately.
Arguments:
message (:py:class:`aiohttp.websocket.Message`): The incoming
message to handle.
filters (:py:class:`list`): The filters to apply to incoming
messages.
"""
data = self._unpack_message(message)
logger.debug(data)
if data.get('type') == 'error':
raise SlackApiError(
data.get('error', {}).get('msg', str(data))
)
elif self.message_is_to_me(data):
text = data['text'][len(self.address_as):].strip()
if text == 'help':
return self._respond(
channel=data['channel'],
text=self._instruction_list(filters),
)
elif text == 'version':
return self._respond(
channel=data['channel'],
text=self.VERSION,
)
for _filter in filters:
if _filter.matches(data):
logger.debug('Response triggered')
async for response in _filter:
self._respond(channel=data['channel'], text=response) |
def message_is_to_me(self, data):
"""If you send a message directly to me"""
return (data.get('type') == 'message' and
data.get('text', '').startswith(self.address_as)) |
async def from_api_token(cls, token=None, api_cls=SlackBotApi):
"""Create a new instance from the API token.
Arguments:
token (:py:class:`str`, optional): The bot's API token
(defaults to ``None``, which means looking in the
environment).
api_cls (:py:class:`type`, optional): The class to create
as the ``api`` argument for API access (defaults to
:py:class:`aslack.slack_api.SlackBotApi`).
Returns:
:py:class:`SlackBot`: The new instance.
"""
api = api_cls.from_env() if token is None else api_cls(api_token=token)
data = await api.execute_method(cls.API_AUTH_ENDPOINT)
return cls(data['user_id'], data['user'], api) |
def _format_message(self, channel, text):
"""Format an outgoing message for transmission.
Note:
Adds the message type (``'message'``) and incremental ID.
Arguments:
channel (:py:class:`str`): The channel to send to.
text (:py:class:`str`): The message text to send.
Returns:
:py:class:`str`: The JSON string of the message.
"""
payload = {'type': 'message', 'id': next(self._msg_ids)}
payload.update(channel=channel, text=text)
return json.dumps(payload) |
async def _get_socket_url(self):
"""Get the WebSocket URL for the RTM session.
Warning:
The URL expires if the session is not joined within 30
seconds of the API call to the start endpoint.
Returns:
:py:class:`str`: The socket URL.
"""
data = await self.api.execute_method(
self.RTM_START_ENDPOINT,
simple_latest=True,
no_unreads=True,
)
return data['url'] |
def _instruction_list(self, filters):
"""Generates the instructions for a bot and its filters.
Note:
The guidance for each filter is generated by combining the
docstrings of the predicate filter and resulting dispatch
function with a single space between. The class's
:py:attr:`INSTRUCTIONS` and the default help command are
added.
Arguments:
filters (:py:class:`list`): The filters to apply to incoming
messages.
Returns:
:py:class:`str`: The bot's instructions.
"""
return '\n\n'.join([
self.INSTRUCTIONS.strip(),
'*Supported methods:*',
'If you send "@{}: help" to me I reply with these '
'instructions.'.format(self.user),
'If you send "@{}: version" to me I reply with my current '
'version.'.format(self.user),
] + [filter.description() for filter in filters]) |
def _respond(self, channel, text):
"""Respond to a message on the current socket.
Args:
channel (:py:class:`str`): The channel to send to.
text (:py:class:`str`): The message text to send.
"""
result = self._format_message(channel, text)
if result is not None:
logger.info(
'Sending message: %r',
truncate(result, max_len=50),
)
self.socket.send_str(result) |
def _validate_first_message(cls, msg):
"""Check the first message matches the expected handshake.
Note:
The handshake is provided as :py:attr:`RTM_HANDSHAKE`.
Arguments:
msg (:py:class:`aiohttp.Message`): The message to validate.
Raises:
:py:class:`SlackApiError`: If the data doesn't match the
expected handshake.
"""
data = cls._unpack_message(msg)
logger.debug(data)
if data != cls.RTM_HANDSHAKE:
raise SlackApiError('Unexpected response: {!r}'.format(data))
logger.info('Joined real-time messaging.') |
def find_first_existing_executable(exe_list):
"""
Accepts list of [('executable_file_path', 'options')],
Returns first working executable_file_path
"""
for filepath, opts in exe_list:
try:
proc = subprocess.Popen([filepath, opts],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
except OSError:
pass
else:
return filepath |
def get_app_locations():
"""
Returns list of paths to tested apps
"""
return [os.path.dirname(os.path.normpath(import_module(app_name).__file__))
for app_name in PROJECT_APPS] |
def get_tasks():
"""Get the imported task classes for each task that will be run"""
task_classes = []
for task_path in TASKS:
try:
module, classname = task_path.rsplit('.', 1)
except ValueError:
raise ImproperlyConfigured('%s isn\'t a task module' % task_path)
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured('Error importing task %s: "%s"'
% (module, e))
try:
task_class = getattr(mod, classname)
except AttributeError:
raise ImproperlyConfigured('Task module "%s" does not define a '
'"%s" class' % (module, classname))
task_classes.append(task_class)
return task_classes |
def get_task_options():
"""Get the options for each task that will be run"""
options = ()
task_classes = get_tasks()
for cls in task_classes:
options += cls.option_list
return options |
def to_cldf(self, dest, mdname='cldf-metadata.json'):
"""
Write the data from the db to a CLDF dataset according to the metadata in `self.dataset`.
:param dest:
:param mdname:
:return: path of the metadata file
"""
dest = Path(dest)
if not dest.exists():
dest.mkdir()
data = self.read()
if data[self.source_table_name]:
sources = Sources()
for src in data[self.source_table_name]:
sources.add(Source(
src['genre'],
src['id'],
**{k: v for k, v in src.items() if k not in ['id', 'genre']}))
sources.write(dest / self.dataset.properties.get('dc:source', 'sources.bib'))
for table_type, items in data.items():
try:
table = self.dataset[table_type]
table.common_props['dc:extent'] = table.write(
[self.retranslate(table, item) for item in items],
base=dest)
except KeyError:
assert table_type == self.source_table_name, table_type
return self.dataset.write_metadata(dest / mdname) |
def validate(args):
"""
cldf validate <DATASET>
Validate a dataset against the CLDF specification, i.e. check
- whether required tables and columns are present
- whether values for required columns are present
- the referential integrity of the dataset
"""
ds = _get_dataset(args)
ds.validate(log=args.log) |
def stats(args):
"""
cldf stats <DATASET>
Print basic stats for CLDF dataset <DATASET>, where <DATASET> may be the path to
- a CLDF metadata file
- a CLDF core data file
"""
ds = _get_dataset(args)
print(ds)
md = Table('key', 'value')
md.extend(ds.properties.items())
print(md.render(condensed=False, tablefmt=None))
print()
t = Table('Path', 'Type', 'Rows')
for p, type_, r in ds.stats():
t.append([p, type_, r])
print(t.render(condensed=False, tablefmt=None)) |
def createdb(args):
"""
cldf createdb <DATASET> <SQLITE_DB_PATH>
Load CLDF dataset <DATASET> into a SQLite DB, where <DATASET> may be the path to
- a CLDF metadata file
- a CLDF core data file
"""
if len(args.args) < 2:
raise ParserError('not enough arguments')
ds = _get_dataset(args)
db = Database(ds, fname=args.args[1])
db.write_from_tg()
args.log.info('{0} loaded in {1}'.format(ds, db.fname)) |
def dumpdb(args):
"""
cldf dumpdb <DATASET> <SQLITE_DB_PATH> [<METADATA_PATH>]
"""
if len(args.args) < 2:
raise ParserError('not enough arguments') # pragma: no cover
ds = _get_dataset(args)
db = Database(ds, fname=args.args[1])
mdpath = Path(args.args[2]) if len(args.args) > 2 else ds.tablegroup._fname
args.log.info('dumped db to {0}'.format(db.to_cldf(mdpath.parent, mdname=mdpath.name))) |
def description(self):
"""A user-friendly description of the handler.
Returns:
:py:class:`str`: The handler's description.
"""
if self._description is None:
text = '\n'.join(self.__doc__.splitlines()[1:]).strip()
lines = []
for line in map(str.strip, text.splitlines()):
if line and lines:
lines[-1] = ' '.join((lines[-1], line))
elif line:
lines.append(line)
else:
lines.append('')
self._description = '\n'.join(lines)
return self._description |
def from_jsonfile(cls, fp, selector_handler=None, strict=False, debug=False):
"""
Create a Parselet instance from a file containing
the Parsley script as a JSON object
>>> import parslepy
>>> with open('parselet.json') as fp:
... parslepy.Parselet.from_jsonfile(fp)
...
<parslepy.base.Parselet object at 0x2014e50>
:param file fp: an open file-like pointer containing the Parsley script
:rtype: :class:`.Parselet`
Other arguments: same as for :class:`.Parselet` contructor
"""
return cls._from_jsonlines(fp,
selector_handler=selector_handler, strict=strict, debug=debug) |
def from_yamlfile(cls, fp, selector_handler=None, strict=False, debug=False):
"""
Create a Parselet instance from a file containing
the Parsley script as a YAML object
>>> import parslepy
>>> with open('parselet.yml') as fp:
... parslepy.Parselet.from_yamlfile(fp)
...
<parslepy.base.Parselet object at 0x2014e50>
:param file fp: an open file-like pointer containing the Parsley script
:rtype: :class:`.Parselet`
Other arguments: same as for :class:`.Parselet` contructor
"""
return cls.from_yamlstring(fp.read(), selector_handler=selector_handler, strict=strict, debug=debug) |
def from_yamlstring(cls, s, selector_handler=None, strict=False, debug=False):
"""
Create a Parselet instance from s (str) containing
the Parsley script as YAML
>>> import parslepy
>>> parsley_string = '''---
title: h1
link: a @href
'''
>>> p = parslepy.Parselet.from_yamlstring(parsley_string)
>>> type(p)
<class 'parslepy.base.Parselet'>
>>>
:param string s: a Parsley script as a YAML string
:rtype: :class:`.Parselet`
Other arguments: same as for :class:`.Parselet` contructor
"""
import yaml
return cls(yaml.load(s), selector_handler=selector_handler, strict=strict, debug=debug) |
def from_jsonstring(cls, s, selector_handler=None, strict=False, debug=False):
"""
Create a Parselet instance from s (str) containing
the Parsley script as JSON
>>> import parslepy
>>> parsley_string = '{ "title": "h1", "link": "a @href"}'
>>> p = parslepy.Parselet.from_jsonstring(parsley_string)
>>> type(p)
<class 'parslepy.base.Parselet'>
>>>
:param string s: a Parsley script as a JSON string
:rtype: :class:`.Parselet`
Other arguments: same as for :class:`.Parselet` contructor
"""
return cls._from_jsonlines(s.split("\n"),
selector_handler=selector_handler, strict=strict, debug=debug) |
def _from_jsonlines(cls, lines, selector_handler=None, strict=False, debug=False):
"""
Interpret input lines as a JSON Parsley script.
Python-style comment lines are skipped.
"""
return cls(json.loads(
"\n".join([l for l in lines if not cls.REGEX_COMMENT_LINE.match(l)])
), selector_handler=selector_handler, strict=strict, debug=debug) |
def parse(self, fp, parser=None, context=None):
"""
Parse an HTML or XML document and
return the extacted object following the Parsley rules give at instantiation.
:param fp: file-like object containing an HTML or XML document, or URL or filename
:param parser: *lxml.etree._FeedParser* instance (optional); defaults to lxml.etree.HTMLParser()
:param context: user-supplied context that will be passed to custom XPath extensions (as first argument)
:rtype: Python :class:`dict` object with mapped extracted content
:raises: :class:`.NonMatchingNonOptionalKey`
To parse from a string, use the :meth:`~base.Parselet.parse_fromstring` method instead.
Note that the fp paramater is passed directly
to `lxml.etree.parse <http://lxml.de/api/lxml.etree-module.html#parse>`_,
so you can also give it an URL, and lxml will download it for you.
(Also see `<http://lxml.de/tutorial.html#the-parse-function>`_.)
"""
if parser is None:
parser = lxml.etree.HTMLParser()
doc = lxml.etree.parse(fp, parser=parser).getroot()
return self.extract(doc, context=context) |
def parse_fromstring(self, s, parser=None, context=None):
"""
Parse an HTML or XML document and
return the extacted object following the Parsley rules give at instantiation.
:param string s: an HTML or XML document as a string
:param parser: *lxml.etree._FeedParser* instance (optional); defaults to lxml.etree.HTMLParser()
:param context: user-supplied context that will be passed to custom XPath extensions (as first argument)
:rtype: Python :class:`dict` object with mapped extracted content
:raises: :class:`.NonMatchingNonOptionalKey`
"""
if parser is None:
parser = lxml.etree.HTMLParser()
doc = lxml.etree.fromstring(s, parser=parser)
return self.extract(doc, context=context) |
def compile(self):
"""
Build the abstract Parsley tree starting from the root node
(recursive)
"""
if not isinstance(self.parselet, dict):
raise ValueError("Parselet must be a dict of some sort. Or use .from_jsonstring(), " \
".from_jsonfile(), .from_yamlstring(), or .from_yamlfile()")
self.parselet_tree = self._compile(self.parselet) |
def _compile(self, parselet_node, level=0):
"""
Build part of the abstract Parsley extraction tree
Arguments:
parselet_node (dict) -- part of the Parsley tree to compile
(can be the root dict/node)
level (int) -- current recursion depth (used for debug)
"""
if self.DEBUG:
debug_offset = "".join([" " for x in range(level)])
if self.DEBUG:
print(debug_offset, "%s::compile(%s)" % (
self.__class__.__name__, parselet_node))
if isinstance(parselet_node, dict):
parselet_tree = ParsleyNode()
for k, v in list(parselet_node.items()):
# we parse the key raw elements but without much
# interpretation (which is done by the SelectorHandler)
try:
m = self.REGEX_PARSELET_KEY.match(k)
if not m:
if self.DEBUG:
print(debug_offset, "could not parse key", k)
raise InvalidKeySyntax(k)
except:
raise InvalidKeySyntax("Key %s is not valid" % k)
key = m.group('key')
# by default, fields are required
key_required = True
operator = m.group('operator')
if operator == '?':
key_required = False
# FIXME: "!" operator not supported (complete array)
scope = m.group('scope')
# example: get list of H3 tags
# { "titles": ["h3"] }
# FIXME: should we support multiple selectors in list?
# e.g. { "titles": ["h1", "h2", "h3", "h4"] }
if isinstance(v, (list, tuple)):
v = v[0]
iterate = True
else:
iterate = False
# keys in the abstract Parsley trees are of type `ParsleyContext`
try:
parsley_context = ParsleyContext(
key,
operator=operator,
required=key_required,
scope=self.selector_handler.make(scope) if scope else None,
iterate=iterate)
except SyntaxError:
if self.DEBUG:
print("Invalid scope:", k, scope)
raise
if self.DEBUG:
print(debug_offset, "current context:", parsley_context)
# go deeper in the Parsley tree...
try:
child_tree = self._compile(v, level=level+1)
except SyntaxError:
if self.DEBUG:
print("Invalid value: ", v)
raise
except:
raise
if self.DEBUG:
print(debug_offset, "child tree:", child_tree)
parselet_tree[parsley_context] = child_tree
return parselet_tree
# a string leaf should match some kind of selector,
# let the selector handler deal with it
elif isstr(parselet_node):
return self.selector_handler.make(parselet_node)
else:
raise ValueError(
"Unsupported type(%s) for Parselet node <%s>" % (
type(parselet_node), parselet_node)) |
def extract(self, document, context=None):
"""
Extract values as a dict object following the structure
of the Parsley script (recursive)
:param document: lxml-parsed document
:param context: user-supplied context that will be passed to custom XPath extensions (as first argument)
:rtype: Python *dict* object with mapped extracted content
:raises: :class:`.NonMatchingNonOptionalKey`
>>> import lxml.etree
>>> import parslepy
>>> html = '''
... <!DOCTYPE html>
... <html>
... <head>
... <title>Sample document to test parslepy</title>
... <meta http-equiv="content-type" content="text/html;charset=utf-8" />
... </head>
... <body>
... <h1 id="main">What’s new</h1>
... <ul>
... <li class="newsitem"><a href="/article-001.html">This is the first article</a></li>
... <li class="newsitem"><a href="/article-002.html">A second report on something</a></li>
... <li class="newsitem"><a href="/article-003.html">Python is great!</a> <span class="fresh">New!</span></li>
... </ul>
... </body>
... </html>
... '''
>>> html_parser = lxml.etree.HTMLParser()
>>> doc = lxml.etree.fromstring(html, parser=html_parser)
>>> doc
<Element html at 0x7f5fb1fce9b0>
>>> rules = {
... "headingcss": "#main",
... "headingxpath": "//h1[@id='main']"
... }
>>> p = parslepy.Parselet(rules)
>>> p.extract(doc)
{'headingcss': u'What\u2019s new', 'headingxpath': u'What\u2019s new'}
"""
if context:
self.selector_handler.context = context
return self._extract(self.parselet_tree, document) |
def _extract(self, parselet_node, document, level=0):
"""
Extract values at this document node level
using the parselet_node instructions:
- go deeper in tree
- or call selector handler in case of a terminal selector leaf
"""
if self.DEBUG:
debug_offset = "".join([" " for x in range(level)])
# we must go deeper in the Parsley tree
if isinstance(parselet_node, ParsleyNode):
# default output
output = {}
# process all children
for ctx, v in list(parselet_node.items()):
if self.DEBUG:
print(debug_offset, "context:", ctx, v)
extracted=None
try:
# scoped-extraction:
# extraction should be done deeper in the document tree
if ctx.scope:
extracted = []
selected = self.selector_handler.select(document, ctx.scope)
if selected:
for i, elem in enumerate(selected, start=1):
parse_result = self._extract(v, elem, level=level+1)
if isinstance(parse_result, (list, tuple)):
extracted.extend(parse_result)
else:
extracted.append(parse_result)
# if we're not in an array,
# we only care about the first iteration
if not ctx.iterate:
break
if self.DEBUG:
print(debug_offset,
"parsed %d elements in scope (%s)" % (i, ctx.scope))
# local extraction
else:
extracted = self._extract(v, document, level=level+1)
except NonMatchingNonOptionalKey as e:
if self.DEBUG:
print(debug_offset, str(e))
if not ctx.required or not self.STRICT_MODE:
output[ctx.key] = {}
else:
raise
except Exception as e:
if self.DEBUG:
print(str(e))
raise
# replace empty-list result when not looping by empty dict
if ( isinstance(extracted, list)
and not extracted
and not ctx.iterate):
extracted = {}
# keep only the first element if we're not in an array
if self.KEEP_ONLY_FIRST_ELEMENT_IF_LIST:
try:
if ( isinstance(extracted, list)
and extracted
and not ctx.iterate):
if self.DEBUG:
print(debug_offset, "keep only 1st element")
extracted = extracted[0]
except Exception as e:
if self.DEBUG:
print(str(e))
print(debug_offset, "error getting first element")
# extraction for a required key gave nothing
if ( self.STRICT_MODE
and ctx.required
and extracted is None):
raise NonMatchingNonOptionalKey(
'key "%s" is required but yield nothing\nCurrent path: %s/(%s)\n' % (
ctx.key,
document.getroottree().getpath(document),v
)
)
# special key to extract a selector-defined level deeper
# but still output at same level
# this can be useful for breaking up long selectors
# or when you need to mix XPath and CSS selectors
# e.g.
# {
# "something(#content div.main)": {
# "--(.//div[re:test(@class, 'style\d{3,6}')])": {
# "title": "h1",
# "subtitle": "h2"
# }
# }
# }
#
if ctx.key == self.SPECIAL_LEVEL_KEY:
if isinstance(extracted, dict):
output.update(extracted)
elif isinstance(extracted, list):
if extracted:
raise RuntimeError(
"could not merge non-empty list at higher level")
else:
#empty list, dont bother?
pass
else:
# required keys are handled above
if extracted is not None:
output[ctx.key] = extracted
else:
# do not add this optional key/value pair in the output
pass
return output
# a leaf/Selector node
elif isinstance(parselet_node, Selector):
return self.selector_handler.extract(document, parselet_node)
else:
# FIXME: can this happen?
# if selector handler returned None at compile time,
# probably yes
pass |
def auto_constraints(self, component=None):
"""
Use CLDF reference properties to implicitely create foreign key constraints.
:param component: A Table object or `None`.
"""
if not component:
for table in self.tables:
self.auto_constraints(table)
return
if not component.tableSchema.primaryKey:
idcol = component.get_column(term_uri('id'))
if idcol:
component.tableSchema.primaryKey = [idcol.name]
self._auto_foreign_keys(component)
try:
table_type = self.get_tabletype(component)
except ValueError:
# New component is not a known CLDF term, so cannot add components
# automatically. TODO: We might me able to infer some based on
# `xxxReference` column properties?
return
# auto-add foreign keys targetting the new component:
for table in self.tables:
self._auto_foreign_keys(table, component=component, table_type=table_type) |
def url_builder(self, endpoint, *, root=None, params=None, url_params=None):
"""Create a URL for the specified endpoint.
Arguments:
endpoint (:py:class:`str`): The API endpoint to access.
root: (:py:class:`str`, optional): The root URL for the
service API.
params: (:py:class:`dict`, optional): The values for format
into the created URL (defaults to ``None``).
url_params: (:py:class:`dict`, optional): Parameters to add
to the end of the URL (defaults to ``None``).
Returns:
:py:class:`str`: The resulting URL.
"""
if root is None:
root = self.ROOT
scheme, netloc, path, _, _ = urlsplit(root)
return urlunsplit((
scheme,
netloc,
urljoin(path, endpoint),
urlencode(url_params or {}),
'',
)).format(**params or {}) |
def raise_for_status(response):
"""Raise an appropriate error for a given response.
Arguments:
response (:py:class:`aiohttp.ClientResponse`): The API response.
Raises:
:py:class:`aiohttp.web_exceptions.HTTPException`: The appropriate
error for the response's status.
"""
for err_name in web_exceptions.__all__:
err = getattr(web_exceptions, err_name)
if err.status_code == response.status:
payload = dict(
headers=response.headers,
reason=response.reason,
)
if issubclass(err, web_exceptions._HTTPMove): # pylint: disable=protected-access
raise err(response.headers['Location'], **payload)
raise err(**payload) |
def truncate(text, max_len=350, end='...'):
"""Truncate the supplied text for display.
Arguments:
text (:py:class:`str`): The text to truncate.
max_len (:py:class:`int`, optional): The maximum length of the
text before truncation (defaults to 350 characters).
end (:py:class:`str`, optional): The ending to use to show that
the text was truncated (defaults to ``'...'``).
Returns:
:py:class:`str`: The truncated text.
"""
if len(text) <= max_len:
return text
return text[:max_len].rsplit(' ', maxsplit=1)[0] + end |
def add(self, *entries):
"""
Add a source, either specified by glottolog reference id, or as bibtex record.
"""
for entry in entries:
if isinstance(entry, string_types):
self._add_entries(database.parse_string(entry, bib_format='bibtex'))
else:
self._add_entries(entry) |
def primary_avatar(user, size=AVATAR_DEFAULT_SIZE):
"""
This tag tries to get the default avatar for a user without doing any db
requests. It achieve this by linking to a special view that will do all the
work for us. If that special view is then cached by a CDN for instance,
we will avoid many db calls.
"""
alt = unicode(user)
url = reverse('avatar_render_primary', kwargs={'user' : user, 'size' : size})
return """<img src="%s" alt="%s" />""" % (url, alt,
) |
def get_cache_key(user_or_username, size, prefix):
"""
Returns a cache key consisten of a username and image size.
"""
if isinstance(user_or_username, get_user_model()):
user_or_username = user_or_username.username
return '%s_%s_%s' % (prefix, user_or_username, size) |
def cache_result(func):
"""
Decorator to cache the result of functions that take a ``user`` and a
``size`` value.
"""
def cache_set(key, value):
cache.set(key, value, AVATAR_CACHE_TIMEOUT)
return value
def cached_func(user, size):
prefix = func.__name__
cached_funcs.add(prefix)
key = get_cache_key(user, size, prefix=prefix)
return cache.get(key) or cache_set(key, func(user, size))
return cached_func |
def invalidate_cache(user, size=None):
"""
Function to be called when saving or changing an user's avatars.
"""
sizes = set(AUTO_GENERATE_AVATAR_SIZES)
if size is not None:
sizes.add(size)
for prefix in cached_funcs:
for size in sizes:
cache.delete(get_cache_key(user, size, prefix)) |
def get_field_for_proxy(pref_proxy):
"""Returns a field object instance for a given PrefProxy object.
:param PrefProxy pref_proxy:
:rtype: models.Field
"""
field = {
bool: models.BooleanField,
int: models.IntegerField,
float: models.FloatField,
datetime: models.DateTimeField,
}.get(type(pref_proxy.default), models.TextField)()
update_field_from_proxy(field, pref_proxy)
return field |
def update_field_from_proxy(field_obj, pref_proxy):
"""Updates field object with data from a PrefProxy object.
:param models.Field field_obj:
:param PrefProxy pref_proxy:
"""
attr_names = ('verbose_name', 'help_text', 'default')
for attr_name in attr_names:
setattr(field_obj, attr_name, getattr(pref_proxy, attr_name)) |
def get_pref_model_class(app, prefs, get_prefs_func):
"""Returns preferences model class dynamically crated for a given app or None on conflict."""
module = '%s.%s' % (app, PREFS_MODULE_NAME)
model_dict = {
'_prefs_app': app,
'_get_prefs': staticmethod(get_prefs_func),
'__module__': module,
'Meta': type('Meta', (models.options.Options,), {
'verbose_name': _('Preference'),
'verbose_name_plural': _('Preferences'),
'app_label': app,
'managed': False,
})
}
for field_name, val_proxy in prefs.items():
model_dict[field_name] = val_proxy.field
model = type('Preferences', (models.Model,), model_dict)
def fake_save_base(self, *args, **kwargs):
updated_prefs = {
f.name: getattr(self, f.name) for f in self._meta.fields if not isinstance(f, models.fields.AutoField)
}
app_prefs = self._get_prefs(self._prefs_app)
for pref in app_prefs.keys():
if pref in updated_prefs:
app_prefs[pref].db_value = updated_prefs[pref]
self.pk = self._prefs_app # Make Django 1.7 happy.
prefs_save.send(sender=self, app=self._prefs_app, updated_prefs=updated_prefs)
return True
model.save_base = fake_save_base
return model |
def get_frame_locals(stepback=0):
"""Returns locals dictionary from a given frame.
:param int stepback:
:rtype: dict
"""
with Frame(stepback=stepback) as frame:
locals_dict = frame.f_locals
return locals_dict |
def traverse_local_prefs(stepback=0):
"""Generator to walk through variables considered as preferences
in locals dict of a given frame.
:param int stepback:
:rtype: tuple
"""
locals_dict = get_frame_locals(stepback+1)
for k in locals_dict:
if not k.startswith('_') and k.upper() == k:
yield k, locals_dict |
def import_prefs():
"""Imports preferences modules from packages (apps) and project root."""
# settings.py locals if autodiscover_siteprefs() is in urls.py
settings_locals = get_frame_locals(3)
if 'self' not in settings_locals: # If not SiteprefsConfig.ready()
# Try to import project-wide prefs.
project_package = settings_locals['__package__'] # Expected project layout introduced in Django 1.4
if not project_package:
# Fallback to old layout.
project_package = os.path.split(os.path.dirname(settings_locals['__file__']))[-1]
import_module(project_package, PREFS_MODULE_NAME)
import_project_modules(PREFS_MODULE_NAME) |
def print_file_info():
"""Prints file details in the current directory"""
tpl = TableLogger(columns='file,created,modified,size')
for f in os.listdir('.'):
size = os.stat(f).st_size
date_created = datetime.fromtimestamp(os.path.getctime(f))
date_modified = datetime.fromtimestamp(os.path.getmtime(f))
tpl(f, date_created, date_modified, size) |
def _bind_args(sig, param_matchers, args, kwargs):
'''
Attempt to bind the args to the type signature. First try to just bind
to the signature, then ensure that all arguments match the parameter
types.
'''
#Bind to signature. May throw its own TypeError
bound = sig.bind(*args, **kwargs)
if not all(param_matcher(bound.arguments[param_name])
for param_name, param_matcher in param_matchers):
raise TypeError
return bound |
def _make_param_matcher(annotation, kind=None):
'''
For a given annotation, return a function which, when called on a
function argument, returns true if that argument matches the annotation.
If the annotation is a type, it calls isinstance; if it's a callable,
it calls it on the object; otherwise, it performs a value comparison.
If the parameter is variadic (*args) and the annotation is a type, the
matcher will attempt to match each of the arguments in args
'''
if isinstance(annotation, type) or (
isinstance(annotation, tuple) and
all(isinstance(a, type) for a in annotation)):
if kind is Parameter.VAR_POSITIONAL:
return (lambda args: all(isinstance(x, annotation) for x in args))
else:
return (lambda x: isinstance(x, annotation))
elif callable(annotation):
return annotation
else:
return (lambda x: x == annotation) |
def _make_all_matchers(cls, parameters):
'''
For every parameter, create a matcher if the parameter has an
annotation.
'''
for name, param in parameters:
annotation = param.annotation
if annotation is not Parameter.empty:
yield name, cls._make_param_matcher(annotation, param.kind) |
def _make_dispatch(cls, func):
'''
Create a dispatch pair for func- a tuple of (bind_args, func), where
bind_args is a function that, when called with (args, kwargs), attempts
to bind those args to the type signature of func, or else raise a
TypeError
'''
sig = signature(func)
matchers = tuple(cls._make_all_matchers(sig.parameters.items()))
return (partial(cls._bind_args, sig, matchers), func) |
def _make_wrapper(self, func):
'''
Makes a wrapper function that executes a dispatch call for func. The
wrapper has the dispatch and dispatch_first attributes, so that
additional overloads can be added to the group.
'''
#TODO: consider using a class to make attribute forwarding easier.
#TODO: consider using simply another DispatchGroup, with self.callees
# assigned by reference to the original callees.
@wraps(func)
def executor(*args, **kwargs):
return self.execute(args, kwargs)
executor.dispatch = self.dispatch
executor.dispatch_first = self.dispatch_first
executor.func = func
executor.lookup = self.lookup
return executor |
def dispatch(self, func):
'''
Adds the decorated function to this dispatch.
'''
self.callees.append(self._make_dispatch(func))
return self._make_wrapper(func) |
def dispatch_first(self, func):
'''
Adds the decorated function to this dispatch, at the FRONT of the order.
Useful for allowing third parties to add overloaded functionality
to be executed before default functionality.
'''
self.callees.appendleft(self._make_dispatch(func))
return self._make_wrapper(func) |
def lookup_explicit(self, args, kwargs):
'''
Lookup the function that will be called with a given set of arguments,
or raise DispatchError. Requires explicit tuple/dict grouping of
arguments (see DispatchGroup.lookup for a function-like interface).
'''
for bind_args, callee in self.callees:
try:
#bind to the signature and types. Raises TypeError on failure
bind_args(args, kwargs)
except TypeError:
#TypeError: failed to bind arguments. Try the next dispatch
continue
#All the parameters matched. Return the function and args
return callee
else:
#Nothing was able to bind. Error.
raise DispatchError(args, kwargs, self) |
def execute(self, args, kwargs):
'''
Dispatch a call. Call the first function whose type signature matches
the arguemts.
'''
return self.lookup_explicit(args, kwargs)(*args, **kwargs) |
def setup_formatters(self, *args):
"""Setup formatters by observing the first row.
Args:
*args: row cells
"""
formatters = []
col_offset = 0
# initialize formatters for row-id, timestamp and time-diff columns
if self.rownum:
formatters.append(fmt.RowNumberFormatter.setup(0))
col_offset += 1
if self.timestamp:
formatters.append(fmt.DatetimeFormatter.setup(
datetime.datetime.now(),
fmt='{:%Y-%m-%d %H:%M:%S.%f}'.format,
col_width=26))
col_offset += 1
if self.time_diff:
formatters.append(fmt.TimeDeltaFormatter.setup(0))
col_offset += 1
# initialize formatters for user-defined columns
for coli, value in enumerate(args):
fmt_class = type2fmt.get(type(value), fmt.GenericFormatter)
kwargs = {}
# set column width
if self.default_colwidth is not None:
kwargs['col_width'] = self.default_colwidth
if coli in self.column_widths:
kwargs['col_width'] = self.column_widths[coli]
elif self.columns and self.columns[coli + col_offset] in self.column_widths:
kwargs['col_width'] = self.column_widths[self.columns[coli + col_offset]]
# set formatter function
if fmt_class == fmt.FloatFormatter and self.float_format is not None:
kwargs['fmt'] = self.float_format
if coli in self.column_formatters:
kwargs['fmt'] = self.column_formatters[coli]
elif self.columns and self.columns[coli + col_offset] in self.column_formatters:
kwargs['fmt'] = self.column_formatters[self.columns[coli + col_offset]]
formatter = fmt_class.setup(value, **kwargs)
formatters.append(formatter)
self.formatters = formatters |
def setup(self, *args):
"""Do preparations before printing the first row
Args:
*args: first row cells
"""
self.setup_formatters(*args)
if self.columns:
self.print_header()
elif self.border and not self.csv:
self.print_line(self.make_horizontal_border()) |
def csv_format(self, row):
"""Converts row values into a csv line
Args:
row: a list of row cells as unicode
Returns:
csv_line (unicode)
"""
if PY2:
buf = io.BytesIO()
csvwriter = csv.writer(buf)
csvwriter.writerow([c.strip().encode(self.encoding) for c in row])
csv_line = buf.getvalue().decode(self.encoding).rstrip()
else:
buf = io.StringIO()
csvwriter = csv.writer(buf)
csvwriter.writerow([c.strip() for c in row])
csv_line = buf.getvalue().rstrip()
return csv_line |
def convertShpToExtend(pathToShp):
"""
reprojette en WGS84 et recupere l'extend
"""
driver = ogr.GetDriverByName('ESRI Shapefile')
dataset = driver.Open(pathToShp)
if dataset is not None:
# from Layer
layer = dataset.GetLayer()
spatialRef = layer.GetSpatialRef()
# from Geometry
feature = layer.GetNextFeature()
geom = feature.GetGeometryRef()
spatialRef = geom.GetSpatialReference()
#WGS84
outSpatialRef = osr.SpatialReference()
outSpatialRef.ImportFromEPSG(4326)
coordTrans = osr.CoordinateTransformation(spatialRef, outSpatialRef)
env = geom.GetEnvelope()
pointMAX = ogr.Geometry(ogr.wkbPoint)
pointMAX.AddPoint(env[1], env[3])
pointMAX.Transform(coordTrans)
pointMIN = ogr.Geometry(ogr.wkbPoint)
pointMIN.AddPoint(env[0], env[2])
pointMIN.Transform(coordTrans)
return [pointMAX.GetPoint()[1],pointMIN.GetPoint()[0],pointMIN.GetPoint()[1],pointMAX.GetPoint()[0]]
else:
exit(" shapefile not found. Please verify your path to the shapefile") |
def create_request_gfs(dateStart,dateEnd,stepList,levelList,grid,extent,paramList,typeData):
"""
Genere la structure de requete pour le téléchargement de données GFS
INPUTS:\n
-date : au format annee-mois-jour\n
-heure : au format heure:minute:seconde\n
-coord : une liste des coordonnees au format [N,W,S,E]\n
-dim_grille : taille de la grille en degree \n
"""
URLlist=[]
#Control datetype
listForcastSurface=['GUST','HINDEX','PRES','HGT','TMP','WEASD','SNOD','CPOFP','WILT','FLDCP','SUNSD','LFTX','CAPE','CIN','4LFTX','HPBL','LAND']
if (0 not in [int(x) for x in stepList]):
listForcastSurface=listForcastSurface+['PEVPR','CPRAT','PRATE','APCP','ACPCP','WATR','CSNOW','CICEP','CFPER','CRAIN','LHTFL','SHTFL','SHTFL','GFLUX','UFLX','VFLX','U-GWD','V-GWD','DSWRF','DLWRF','ULWRF','USWRF','ALBDO']
listAnalyseSurface=['HGT','PRES','LFTX','CAPE','CIN','4LFTX']
if typeData == 'analyse' and all([x in listAnalyseSurface for x in paramList]):
typeData= 'analyse'
validChoice = None
prbParameters = None
else:
if all([x in listForcastSurface for x in paramList]) and typeData != 'cycleforecast':
if typeData=='analyse':
typeData= 'forecast'
validChoice = typeData
else:
validChoice = None
indexParameters=[i for i, elem in enumerate([x in listAnalyseSurface for x in paramList], 1) if not elem]
prbParameters=[]
for i in indexParameters:
prbParameters.append(paramList[i-1])
else:
if typeData != 'cycleforecast':
typeData= 'cycleforecast'
validChoice = typeData
else:
validChoice = None
indexParameters=[i for i, elem in enumerate([x in listAnalyseSurface for x in paramList], 1) if not elem]
prbParameters=[]
for i in indexParameters:
prbParameters.append(paramList[i-1])
#Control si date/timeList disponible
today=date.today()
lastData = today - timedelta(days=14)
if dateStart < lastData or dateEnd > today :
exit('date are not in 14 days range from today' )
else:
#Pour chaque jour souhaité
nbDays=(dateEnd-dateStart).days+1
for i in range(0,nbDays):
#on crontrole pour les timeList
if dateStart + timedelta(days=i) == today:
maxT=datetime.now().hour-5
timeListCorr=[ x for x in stepList if x<maxT ]
else:
timeListCorr=stepList
for t in timeListCorr:
URL='http://nomads.ncep.noaa.gov/cgi-bin/filter_gfs_'
#grid
URL=URL+"{:.2f}".format(grid).replace('.','p')+'.pl?file=gfs.'
#time ( attention limiter avec décalage horaire for today
URL=URL+'t'+str(t).zfill(2)+'z.'
if (grid==0.5):
URL=URL+'pgrb2full.'
else:
URL=URL+'pgrb2.'
URL=URL+"{:.2f}".format(grid).replace('.','p')+'.'
if typeData=='cycleforecast':
URL=URL+'f006&lev_'
elif typeData=='forecast':
URL=URL+'f000&lev_'
else:
URL=URL+'anl&lev_'
URL=URL+"=on&lev_".join(levelList)+"=on&var_"
URL=URL+"=on&var_".join(paramList)+"=on&subregion=&"
URL=URL+"leftlon="+str(round(float(extent[1])-0.05,1))+"&rightlon="+str(round(float(extent[3])+0.05,1))+"&toplat="+str(round(float(extent[0])+0.5,1))+"&bottomlat="+str(round(float(extent[2])-0.5,1))
URL=URL+"&dir=%2Fgfs."+"{:%Y%m%d}".format(dateStart+timedelta(days=i))+str(t).zfill(2)
URLlist.append(URL)
return (URLlist,validChoice,prbParameters) |
def convertGribToTiff(listeFile,listParam,listLevel,liststep,grid,startDate,endDate,outFolder):
""" Convert GRIB to Tif"""
dicoValues={}
for l in listeFile:
grbs = pygrib.open(l)
grbs.seek(0)
index=1
for j in range(len(listLevel),0,-1):
for i in range(len(listParam)-1,-1,-1):
grb = grbs[index]
p=grb.name.replace(' ','_')
if grb.level != 0:
l=str(grb.level)+'_'+grb.typeOfLevel
else:
l=grb.typeOfLevel
if p+'_'+l not in dicoValues.keys():
dicoValues[p+'_'+l]=[]
dicoValues[p+'_'+l].append(grb.values)
shape=grb.values.shape
lat,lon=grb.latlons()
geoparam=(lon.min(),lat.max(),grid,grid)
index+= 1
nbJour=(endDate-startDate).days+1
#on joute des arrayNan si il manque des fichiers
for s in range(0, (len(liststep)*nbJour-len(listeFile))):
for k in dicoValues.keys():
dicoValues[k].append(np.full(shape, np.nan))
#On écrit pour chacune des variables dans un fichier
for i in range(len(dicoValues.keys())-1,-1,-1):
dictParam=dict((k,dicoValues[dicoValues.keys()[i]][k]) for k in range(0,len(dicoValues[dicoValues.keys()[i]])))
sorted(dictParam.items(), key=lambda x: x[0])
outputImg=outFolder+'/'+dicoValues.keys()[i]+'_'+startDate.strftime('%Y%M%d')+'_'+endDate.strftime('%Y%M%d')+'.tif'
writeTiffFromDicoArray(dictParam,outputImg,shape,geoparam)
for f in listeFile:
os.remove(f) |
def on_pref_update(*args, **kwargs):
"""Triggered on dynamic preferences model save.
Issues DB save and reread.
"""
Preference.update_prefs(*args, **kwargs)
Preference.read_prefs(get_prefs()) |
def get_app_prefs(app=None):
"""Returns a dictionary with preferences for a certain app/module.
:param str|unicode app:
:rtype: dict
"""
if app is None:
with Frame(stepback=1) as frame:
app = frame.f_globals['__name__'].split('.')[0]
prefs = get_prefs()
if app not in prefs:
return {}
return prefs[app] |
def bind_proxy(values, category=None, field=None, verbose_name=None, help_text='', static=True, readonly=False):
"""Binds PrefProxy objects to module variables used by apps as preferences.
:param list|tuple values: Preference values.
:param str|unicode category: Category name the preference belongs to.
:param Field field: Django model field to represent this preference.
:param str|unicode verbose_name: Field verbose name.
:param str|unicode help_text: Field help text.
:param bool static: Leave this preference static (do not store in DB).
:param bool readonly: Make this field read only.
:rtype: list
"""
addrs = OrderedDict()
depth = 3
for local_name, locals_dict in traverse_local_prefs(depth):
addrs[id(locals_dict[local_name])] = local_name
proxies = []
locals_dict = get_frame_locals(depth)
for value in values: # Try to preserve fields order.
id_val = id(value)
if id_val in addrs:
local_name = addrs[id_val]
local_val = locals_dict[local_name]
if isinstance(local_val, PatchedLocal) and not isinstance(local_val, PrefProxy):
proxy = PrefProxy(
local_name, value.val,
category=category,
field=field,
verbose_name=verbose_name,
help_text=help_text,
static=static,
readonly=readonly,
)
app_name = locals_dict['__name__'].split('.')[-2] # x.y.settings -> y
prefs = get_prefs()
if app_name not in prefs:
prefs[app_name] = OrderedDict()
prefs[app_name][local_name.lower()] = proxy
# Replace original pref variable with a proxy.
locals_dict[local_name] = proxy
proxies.append(proxy)
return proxies |
def register_admin_models(admin_site):
"""Registers dynamically created preferences models for Admin interface.
:param admin.AdminSite admin_site: AdminSite object.
"""
global __MODELS_REGISTRY
prefs = get_prefs()
for app_label, prefs_items in prefs.items():
model_class = get_pref_model_class(app_label, prefs_items, get_app_prefs)
if model_class is not None:
__MODELS_REGISTRY[app_label] = model_class
admin_site.register(model_class, get_pref_model_admin_class(prefs_items)) |
def autodiscover_siteprefs(admin_site=None):
"""Automatically discovers and registers all preferences available in all apps.
:param admin.AdminSite admin_site: Custom AdminSite object.
"""
if admin_site is None:
admin_site = admin.site
# Do not discover anything if called from manage.py (e.g. executing commands from cli).
if 'manage' not in sys.argv[0] or (len(sys.argv) > 1 and sys.argv[1] in MANAGE_SAFE_COMMANDS):
import_prefs()
Preference.read_prefs(get_prefs())
register_admin_models(admin_site) |
def patch_locals(depth=2):
"""Temporarily (see unpatch_locals()) replaces all module variables
considered preferences with PatchedLocal objects, so that every
variable has different hash returned by id().
"""
for name, locals_dict in traverse_local_prefs(depth):
locals_dict[name] = PatchedLocal(name, locals_dict[name])
get_frame_locals(depth)[__PATCHED_LOCALS_SENTINEL] = True |
def unpatch_locals(depth=3):
"""Restores the original values of module variables
considered preferences if they are still PatchedLocal
and not PrefProxy.
"""
for name, locals_dict in traverse_local_prefs(depth):
if isinstance(locals_dict[name], PatchedLocal):
locals_dict[name] = locals_dict[name].val
del get_frame_locals(depth)[__PATCHED_LOCALS_SENTINEL] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.