Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def scanABFfolder(abfFolder):
"""
scan an ABF directory and subdirectory. Try to do this just once.
Returns ABF files, SWHLab files, and groups.
"""
assert os.path.isdir(abfFolder)
filesABF=forwardSlash(sorted(glob.glob(abfFolder+"/*.*")))
filesSWH=[]
if os.path.exists(abfFolder+"/swhlab4/"):
filesSWH=forwardSlash(sorted(glob.glob(abfFolder+"/swhlab4/*.*")))
groups=getABFgroups(filesABF)
return filesABF,filesSWH,groups |
def getParent(abfFname):
"""given an ABF file name, return the ABF of its parent."""
child=os.path.abspath(abfFname)
files=sorted(glob.glob(os.path.dirname(child)+"/*.*"))
parentID=abfFname #its own parent
for fname in files:
if fname.endswith(".abf") and fname.replace(".abf",".TIF") in files:
parentID=os.path.basename(fname).replace(".abf","")
if os.path.basename(child) in fname:
break
return parentID |
def getParent2(abfFname,groups):
"""given an ABF and the groups dict, return the ID of its parent."""
if ".abf" in abfFname:
abfFname=os.path.basename(abfFname).replace(".abf","")
for parentID in groups.keys():
if abfFname in groups[parentID]:
return parentID
return abfFname |
def getNotesForABF(abfFile):
"""given an ABF, find the parent, return that line of experiments.txt"""
parent=getParent(abfFile)
parent=os.path.basename(parent).replace(".abf","")
expFile=os.path.dirname(abfFile)+"/experiment.txt"
if not os.path.exists(expFile):
return "no experiment file"
with open(expFile) as f:
raw=f.readlines()
for line in raw:
if line[0]=='~':
line=line[1:].strip()
if line.startswith(parent):
while "\t\t" in line:
line=line.replace("\t\t","\t")
line=line.replace("\t","\n")
return line
return "experiment.txt found, but didn't contain %s"%parent |
def getABFgroups(files):
"""
given a list of ALL files (not just ABFs), return a dict[ID]=[ID,ID,ID].
Parents are determined if a .abf matches a .TIF.
This is made to assign children files to parent ABF IDs.
"""
children=[]
groups={}
for fname in sorted(files):
if fname.endswith(".abf"):
if fname.replace(".abf",".TIF") in files: #TODO: cap sensitive
if len(children):
groups[children[0]]=children
children=[os.path.basename(fname)[:-4]]
else:
children.append(os.path.basename(fname)[:-4])
groups[children[0]]=children
#print(" -- found %d groups of %d ABFs"%(len(groups),len(files)))
return groups |
def getIDfileDict(files):
"""
given a list of files, return a dict[ID]=[files].
This is made to assign children files to parent ABF IDs.
"""
d={}
orphans=[]
for fname in files:
if fname.endswith(".abf"):
d[os.path.basename(fname)[:-4]]=[]
for fname in files:
if fname.endswith(".html") or fname.endswith(".txt"):
continue #don't try to assign to an ABF
if len(os.path.basename(fname).split(".")[0])>=8:
ID = os.path.basename(fname)[:8] #ABF ID (first several chars)
else:
ID = os.path.basename(fname).split(".")[0] #short filename, just not extension
if ID in d.keys():
d[ID]=d[ID]+[fname]
else:
orphans.append(os.path.basename(fname))
#print(" ?? orphan file",ID,os.path.basename(fname))
if orphans:
print(" ?? found %d orphan files"%len(orphans))
return d |
def getIDsFromFiles(files):
"""given a path or list of files, return ABF IDs."""
if type(files) is str:
files=glob.glob(files+"/*.*")
IDs=[]
for fname in files:
if fname[-4:].lower()=='.abf':
ext=fname.split('.')[-1]
IDs.append(os.path.basename(fname).replace('.'+ext,''))
return sorted(IDs) |
def inspectABF(abf=exampleABF,saveToo=False,justPlot=False):
"""May be given an ABF object or filename."""
pylab.close('all')
print(" ~~ inspectABF()")
if type(abf) is str:
abf=swhlab.ABF(abf)
swhlab.plot.new(abf,forceNewFigure=True)
if abf.sweepInterval*abf.sweeps<60*5: #shorter than 5 minutes
pylab.subplot(211)
pylab.title("%s [%s]"%(abf.ID,abf.protoComment))
swhlab.plot.sweep(abf,'all')
pylab.subplot(212)
swhlab.plot.sweep(abf,'all',continuous=True)
swhlab.plot.comments(abf)
else:
print(" -- plotting as long recording")
swhlab.plot.sweep(abf,'all',continuous=True,minutes=True)
swhlab.plot.comments(abf,minutes=True)
pylab.title("%s [%s]"%(abf.ID,abf.protoComment))
swhlab.plot.annotate(abf)
if justPlot:
return
if saveToo:
path=os.path.split(abf.fname)[0]
basename=os.path.basename(abf.fname)
pylab.savefig(os.path.join(path,"_"+basename.replace(".abf",".png")))
pylab.show()
return |
def ftp_login(folder=None):
"""return an "FTP" object after logging in."""
pwDir=os.path.realpath(__file__)
for i in range(3):
pwDir=os.path.dirname(pwDir)
pwFile = os.path.join(pwDir,"passwd.txt")
print(" -- looking for login information in:\n [%s]"%pwFile)
try:
with open(pwFile) as f:
lines=f.readlines()
username=lines[0].strip()
password=lines[1].strip()
print(" -- found a valid username/password")
except:
print(" -- password lookup FAILED.")
username=TK_askPassword("FTP LOGIN","enter FTP username")
password=TK_askPassword("FTP LOGIN","enter password for %s"%username)
if not username or not password:
print(" !! failed getting login info. aborting FTP effort.")
return
print(" username:",username)
print(" password:","*"*(len(password)))
print(" -- logging in to FTP ...")
try:
ftp = ftplib.FTP("swharden.com")
ftp.login(username, password)
if folder:
ftp.cwd(folder)
return ftp
except:
print(" !! login failure !!")
return False |
def ftp_folder_match(ftp,localFolder,deleteStuff=True):
"""upload everything from localFolder into the current FTP folder."""
for fname in glob.glob(localFolder+"/*.*"):
ftp_upload(ftp,fname)
return |
def version_upload(fname,username="nibjb"):
"""Only scott should do this. Upload new version to site."""
print("popping up pasword window...")
password=TK_askPassword("FTP LOGIN","enter password for %s"%username)
if not password:
return
print("username:",username)
print("password:","*"*(len(password)))
print("connecting...")
ftp = ftplib.FTP("swharden.com")
ftp.login(username, password)
print("successful login!")
ftp.cwd("/software/swhlab/versions") #IMMEDIATELY GO HERE!!!
print("uploading",os.path.basename(fname))
ftp.storbinary("STOR " + os.path.basename(fname), open(fname, "rb"), 1024) #for binary files
print("disconnecting...")
ftp.quit() |
def TK_askPassword(title="input",msg="type here:"):
"""use the GUI to ask for a string."""
root = tkinter.Tk()
root.withdraw() #hide tk window
root.attributes("-topmost", True) #always on top
root.lift() #bring to top
value=tkinter.simpledialog.askstring(title,msg)
root.destroy()
return value |
def TK_message(title,msg):
"""use the GUI to pop up a message."""
root = tkinter.Tk()
root.withdraw() #hide tk window
root.attributes("-topmost", True) #always on top
root.lift() #bring to top
tkinter.messagebox.showwarning(title, msg)
root.destroy() |
def TK_ask(title,msg):
"""use the GUI to ask YES or NO."""
root = tkinter.Tk()
root.attributes("-topmost", True) #always on top
root.withdraw() #hide tk window
result=tkinter.messagebox.askyesno(title,msg)
root.destroy()
return result |
def image_convert(fname,saveAs=True,showToo=False):
"""
Convert weird TIF files into web-friendly versions.
Auto contrast is applied (saturating lower and upper 0.1%).
make saveAs True to save as .TIF.png
make saveAs False and it won't save at all
make saveAs "someFile.jpg" to save it as a different path/format
"""
# load the image
#im = Image.open(fname) #PIL can't handle 12-bit TIFs well
im=scipy.ndimage.imread(fname) #scipy does better with it
im=np.array(im,dtype=float) # now it's a numpy array
# do all image enhancement here
cutoffLow=np.percentile(im,.01)
cutoffHigh=np.percentile(im,99.99)
im[np.where(im<cutoffLow)]=cutoffLow
im[np.where(im>cutoffHigh)]=cutoffHigh
# IMAGE FORMATTING
im-=np.min(im) #auto contrast
im/=np.max(im) #normalize
im*=255 #stretch contrast (8-bit)
im = Image.fromarray(im)
# IMAGE DRAWING
msg="Filename: %s\n"%os.path.basename(fname)
timestamp = datetime.datetime.fromtimestamp(os.path.getctime(fname))
msg+="Created: %s\n"%timestamp.strftime('%Y-%m-%d %H:%M:%S')
d = ImageDraw.Draw(im)
fnt = ImageFont.truetype("arial.ttf", 20)
d.text((6,6),msg,font=fnt,fill=0)
d.text((4,4),msg,font=fnt,fill=255)
if showToo:
im.show()
if saveAs is False:
return
if saveAs is True:
saveAs=fname+".png"
im.convert('RGB').save(saveAs) |
def processArgs():
"""check out the arguments and figure out what to do."""
if len(sys.argv)<2:
print("\n\nERROR:")
print("this script requires arguments!")
print('try "python command.py info"')
return
if sys.argv[1]=='info':
print("import paths:\n ","\n ".join(sys.path))
print()
print("python version:",sys.version)
print("SWHLab path:",__file__)
print("SWHLab version:",swhlab.__version__)
return
if sys.argv[1]=='glanceFolder':
abfFolder=swhlab.common.gui_getFolder()
if not abfFolder or not os.path.isdir(abfFolder):
print("bad path")
return
fnames=sorted(glob.glob(abfFolder+"/*.abf"))
outFolder=tempfile.gettempdir()+"/swhlab/"
if os.path.exists(outFolder):
shutil.rmtree(outFolder)
os.mkdir(outFolder)
outFile=outFolder+"/index.html"
out='<html><body>'
out+='<h2>%s</h2>'%abfFolder
for i,fname in enumerate(fnames):
print("\n\n### PROCESSING %d of %d"%(i,len(fnames)))
saveAs=os.path.join(os.path.dirname(outFolder),os.path.basename(fname))+".png"
out+='<br><br><br><code>%s</code><br>'%os.path.abspath(fname)
out+='<a href="%s"><img src="%s"></a><br>'%(saveAs,saveAs)
swhlab.analysis.glance.processAbf(fname,saveAs)
out+='</body></html>'
with open(outFile,'w') as f:
f.write(out)
webbrowser.open_new_tab(outFile)
return
print("\n\nERROR:\nI'm not sure how to process these arguments!")
print(sys.argv) |
def detect(abf,sweep=None,threshold_upslope=50,dT=.1,saveToo=True):
"""
An AP will be detected by a upslope that exceeds 50V/s. Analyzed too.
if type(sweep) is int, graph int(sweep)
if sweep==None, process all sweeps sweep.
"""
if type(sweep) is int:
sweeps=[sweep]
else:
sweeps=list(range(abf.sweeps))
timeStart=time.clock()
abf.APs=[None]*abf.sweeps
abf.SAP=[None]*abf.sweeps
for sweep in sweeps:
abf.setSweep(sweep)
Y=abf.dataY
dI = int(dT/1000*abf.rate) #dI is dT/rate
dY = (Y[dI:]-Y[:-dI])*(abf.rate/1000/dI) #now in V/S
Is = cm.where_cross(dY,threshold_upslope) #found every putative AP (I units)
abf.APs[sweep]=[]
for i in range(len(Is)): #for each putative AP
try:
AP=analyzeAP(Y,dY,Is[i],abf.rate) #try to characterize it
if AP:
AP["sweep"]=sweep
AP["expI"]=sweep*abf.sweepInterval*abf.rate*+AP["sweepI"]
AP["expT"]=sweep*abf.sweepInterval+AP["sweepT"]
AP["freq"]=np.nan #default
if len(abf.APs[sweep]):
AP["freq"]=1/(AP["expT"]-abf.APs[sweep][-1]["expT"])
if AP["freq"] is np.nan or AP["freq"]<500: #at 500Hz, assume you have a duplicate AP
abf.APs[sweep].append(AP)
except:
print(" -- AP %d of %d excluded from analysis..."%(i+1,len(Is)))
#print("!!! AP CRASH !!!")
#print(traceback.format_exc())
analyzeAPgroup(abf) #now that APs are known, get grouping stats
abf.APs=cm.matrixfromDicts(abf.APs)
abf.SAP=cm.matrixfromDicts(abf.SAP)
print(" -- analyzed %d APs in %.02f ms"%(len(cm.dictFlat(abf.APs)),(time.clock()-timeStart)*1000))
if saveToo:
abf.saveThing(abf.APs,"APs")
abf.saveThing(abf.SAP,"SAP") |
def analyzeAPgroup(abf=exampleABF,T1=None,T2=None,plotToo=False):
"""
On the current (setSweep()) sweep, calculate things like accomodation.
Only call directly just for demonstrating how it works by making a graph.
Or call this if you want really custom T1 and T2 (multiple per sweep)
This is called by default with default T1 and T2.
Manually call it again for custom.
"""
if T1 is None or T2 is None:
if len(abf.protoSeqX)>2:
T1=abf.protoSeqX[1]/abf.rate
T2=abf.protoSeqX[2]/abf.rate
else:
T1=0
T2=abf.sweepLength
s={} #sweep dictionary to contain our stas
s["sweep"]=abf.currentSweep
s["commandI"]=abf.protoSeqY[1]
APs=[]
for key in ['freqAvg','freqBin']:
s[key]=0
for AP in abf.APs[abf.currentSweep]:
if T1<AP["sweepT"]<T2:
APs.append(AP)
s["nAPs"]=len(APs) #number of APs in the bin period (T1-T2)
apTimes=cm.dictVals(APs,'sweepT')
if len(APs)>1: #some measurements require multiple APs, like accomodation
s["centerBinTime"]=np.average(apTimes)-T1 #average time of APs in the bin
s["centerBinFrac"]=s["centerBinTime"]/(T2-T1)*100 #fractional average of APs in bin (steady = .5)
s["centerTime"]=np.average(apTimes)-APs[0]["sweepT"] #time of average AP WRT first AP (not bin)
s["centerFrac"]=s["centerTime"]/(APs[-1]["sweepT"]-APs[0]["sweepT"])*100 #WRT first/last AP
s["msToFirst"]=(APs[0]["sweepT"]-T1)*1000 #ms to first AP (from T1)
s["freqFirst1"]=APs[1]['freq'] #inst frequency of first AP
s["freqFirst5"]=cm.dictAvg(APs[1:6],'freq')[0] #inst frequency of first AP
s["freqLast"]=APs[-1]['freq'] #inst frequency of last AP
s["freqAvg"]=cm.dictAvg(APs,'freq')[0] #average inst frequency of all aps
s["freqBin"]=len(APs)/(T2-T1) #frequency of APs in the bin (T1-T2)
s["freqSteady25"]=cm.dictAvg(APs[-int(len(APs)*.25):],'freq')[0] # average freq of the last 25% of APs
s["accom1Avg"]=s["freqFirst1"]/s["freqAvg"] #accomodation (first II / average)
s["accom1Steady25"]=s["freqFirst1"]/s["freqSteady25"] #accomodation (first II / steady state)
s["accom5Avg"]=s["freqFirst5"]/s["freqAvg"] #accomodation from average 5 first
s["accom5Steady25"]=s["freqFirst5"]/s["freqSteady25"] #accomodation from average 5 first
s["freqCV"]=cm.dictAvg(APs,'freq')[1]/cm.dictAvg(APs,'freq')[0] #coefficient of variation (Hz)
s["T1"]=T1
s["T2"]=T2
abf.SAP[abf.currentSweep]=s |
def check_AP_group(abf=exampleABF,sweep=0):
"""
after running detect() and abf.SAP is populated, this checks it.
"""
abf.setSweep(sweep)
swhlab.plot.new(abf,title="sweep %d (%d pA)"%(abf.currentSweep,abf.protoSeqY[1]))
swhlab.plot.sweep(abf)
SAP=cm.matrixToDicts(abf.SAP[sweep])
if "T1" in SAP.keys():
T1=SAP["T1"]
T2=SAP["T2"]
pylab.axvspan(T1/abf.rate,T2/abf.rate,color='r',alpha=.1)
else:
T1=0
T2=abf.sweepLength
swhlab.plot.annotate(abf)
pylab.tight_layout()
pylab.subplots_adjust(right=0.6)
pylab.annotate(cm.msgDict(SAP),(.71,.95),ha='left',va='top',
weight='bold',family='monospace',
xycoords='figure fraction',size=12,color='g')
pylab.axis([T1-.05,T2+.05,None,None]) |
def analyzeAP(Y,dY,I,rate,verbose=False):
"""
given a sweep and a time point, return the AP array for that AP.
APs will be centered in time by their maximum upslope.
"""
Ims = int(rate/1000) #Is per MS
IsToLook=5*Ims #TODO: clarify this, ms until downslope is over
upslope=np.max(dY[I:I+IsToLook]) #maximum rise velocity
upslopeI=np.where(dY[I:I+IsToLook]==upslope)[0][0]+I
I=upslopeI #center sweep at the upslope
downslope=np.min(dY[I:I+IsToLook]) #maximum fall velocity
downslopeI=np.where(dY[I:I+IsToLook]==downslope)[0][0]+I
peak=np.max(Y[I:I+IsToLook]) #find peak value (mV)
peakI=np.where(Y[I:I+IsToLook]==peak)[0][0]+I #find peak I
thresholdI=I-np.where(dY[I:I+IsToLook:--1]<10)[0] #detect <10V/S
if not len(thresholdI):
return False
thresholdI=thresholdI[0]
threshold=Y[thresholdI] # mV where >10mV/S
height=peak-threshold # height (mV) from threshold to peak
halfwidthPoint=np.average((threshold,peak))
halfwidth=np.where(Y[I-IsToLook:I+IsToLook]>halfwidthPoint)[0]
if not len(halfwidth):
return False #doesn't look like a real AP
halfwidthI1=halfwidth[0]+I-IsToLook
halfwidthI2=halfwidth[-1]+I-IsToLook
if Y[halfwidthI1-1]>halfwidthPoint or Y[halfwidthI2+1]>halfwidthPoint:
return False #doesn't look like a real AP
halfwidth=len(halfwidth)/rate*1000 #now in MS
riseTime=(peakI-thresholdI)*1000/rate # time (ms) from threshold to peak
IsToLook=100*Ims #TODO: max prediction until AHP reaches nadir
AHPchunk=np.diff(Y[downslopeI:downslopeI+IsToLook]) #first inflection
AHPI=np.where(AHPchunk>0)[0]
if len(AHPI)==0:
AHPI=np.nan
else:
AHPI=AHPI[0]+downslopeI
AHPchunk=Y[AHPI:AHPI+IsToLook]
if max(AHPchunk)>threshold: #if another AP is coming, cut it out
AHPchunk=AHPchunk[:np.where(AHPchunk>threshold)[0][0]]
if len(AHPchunk):
AHP=np.nanmin(AHPchunk)
AHPI=np.where(AHPchunk==AHP)[0][0]+AHPI
AHPheight=threshold-AHP # AHP magnitude from threshold (mV)
IsToLook=500*Ims #TODO: max prediction until AHP reaches threshold
AHPreturn=np.average((AHP,threshold)) #half of threshold
AHPreturnI=np.where(Y[AHPI:AHPI+IsToLook]>AHPreturn)[0]
if len(AHPreturnI): #not having a clean decay won't cause AP to crash
AHPreturnI=AHPreturnI[0]+AHPI
AHPrisetime=(AHPreturnI-AHPI)*2/rate*1000 #predicted return time (ms)
AHPupslope=AHPheight/AHPrisetime #mV/ms = V/S
AHPreturnFullI=(AHPreturnI-AHPI)*2+AHPI
else: #make them nan so you can do averages later
AHPreturnI,AHPrisetime,AHPupslope=np.nan,np.nan,np.nan
downslope=np.nan
#fasttime (10V/S to 10V/S) #TODO:
#dpp (deriv peak to peak) #TODO:
sweepI,sweepT=I,I/rate # clean up variable names
del IsToLook,I, Y, dY, Ims, AHPchunk, verbose #delete what we don't need
return locals() |
def check_sweep(abf,sweep=None,dT=.1):
"""Plotting for an eyeball check of AP detection in the given sweep."""
if abf.APs is None:
APs=[]
else:
APs=cm.matrixToDicts(abf.APs)
if sweep is None or len(sweep)==0: #find the first sweep with >5APs in it
for sweepNum in range(abf.sweeps):
foundInThisSweep=0
for AP in APs:
if AP["sweep"]==sweepNum:
foundInThisSweep+=1
if foundInThisSweep>=5:
break
sweep=sweepNum
abf.setSweep(sweep)
Y=abf.dataY
dI = int(dT/1000*abf.rate) #dI is dT/rate
dY = (Y[dI:]-Y[:-dI])*(abf.rate/1000/dI) #now in V/S
pylab.figure(figsize=(12,6))
ax=pylab.subplot(211)
pylab.title("sweep %d"%abf.currentSweep)
pylab.ylabel("membrane potential (mV)")
pylab.plot(Y,'-',alpha=.8)
for AP in APs:
if not AP["sweep"]==sweep:
continue
pylab.axvline(AP["sweepI"],alpha=.2,color='r')
pylab.plot(AP["peakI"],AP["peak"],'.',alpha=.5,ms=20,color='r')
pylab.plot(AP["thresholdI"],AP["threshold"],'.',alpha=.5,ms=20,color='c')
pylab.plot([AP["AHPI"],AP["AHPreturnI"]],
[AP["AHP"],AP["AHPreturn"]],
'-',alpha=.2,ms=20,color='b',lw=7)
pylab.plot([AP["halfwidthI1"],AP["halfwidthI2"]],
[AP["halfwidthPoint"],AP["halfwidthPoint"]],
'-',lw=5,alpha=.5,color='g')
pylab.subplot(212,sharex=ax)
pylab.ylabel("velocity (V/S)")
pylab.xlabel("data points (%.02f kHz)"%(abf.rate/1000))
pylab.plot(dY,'-',alpha=.8)
pylab.margins(0,.1)
for AP in APs:
if not AP["sweep"]==sweep:
continue
pylab.axvline(AP["sweepI"],alpha=.2,color='r')
pylab.plot(AP["upslopeI"],AP["upslope"],'.',alpha=.5,ms=20,color='g')
pylab.plot(AP["downslopeI"],AP["downslope"],'.',alpha=.5,ms=20,color='g')
pylab.axis([APs[0]["sweepI"]-1000,APs[-1]["sweepI"]+1000,None,None]) |
def get_AP_timepoints(abf):
"""return list of time points (sec) of all AP events in experiment."""
col=abf.APs.dtype.names.index("expT")
timePoints=[]
for i in range(len(abf.APs)):
timePoints.append(abf.APs[i][col])
return timePoints |
def check_AP_raw(abf,n=10):
"""X"""
timePoints=get_AP_timepoints(abf)[:n] #first 10
if len(timePoints)==0:
return
swhlab.plot.new(abf,True,title="AP shape (n=%d)"%n,xlabel="ms")
Ys=abf.get_data_around(timePoints,padding=.2)
Xs=(np.arange(len(Ys[0]))-len(Ys[0])/2)*1000/abf.rate
for i in range(1,len(Ys)):
pylab.plot(Xs,Ys[i],alpha=.2,color='b')
pylab.plot(Xs,Ys[0],alpha=.4,color='r',lw=2)
pylab.margins(0,.1)
msg=cm.msgDict(cm.dictFlat(abf.APs)[0],cantEndWith="I")
pylab.subplots_adjust(right=0.7)
pylab.annotate(msg,(.71,.95),ha='left',va='top',
xycoords='figure fraction',family='monospace',size=10) |
def check_AP_deriv(abf,n=10):
"""X"""
timePoints=get_AP_timepoints(abf)[:10] #first 10
if len(timePoints)==0:
return
swhlab.plot.new(abf,True,title="AP velocity (n=%d)"%n,xlabel="ms",ylabel="V/S")
pylab.axhline(-50,color='r',lw=2,ls="--",alpha=.2)
pylab.axhline(-100,color='r',lw=2,ls="--",alpha=.2)
Ys=abf.get_data_around(timePoints,msDeriv=.1,padding=.005)
Xs=(np.arange(len(Ys[0]))-len(Ys[0])/2)*1000/abf.rate
for i in range(1,len(Ys)):
pylab.plot(Xs,Ys[i],alpha=.2,color='b')
pylab.plot(Xs,Ys[0],alpha=.4,color='r',lw=2)
pylab.margins(0,.1) |
def check_AP_phase(abf,n=10):
"""X"""
timePoints=get_AP_timepoints(abf)[:10] #first 10
if len(timePoints)==0:
return
swhlab.plot.new(abf,True,title="AP phase (n=%d)"%n,xlabel="mV",ylabel="V/S")
Ys=abf.get_data_around(timePoints,msDeriv=.1,padding=.005)
Xs=abf.get_data_around(timePoints,padding=.005)
for i in range(1,len(Ys)):
pylab.plot(Xs[i],Ys[i],alpha=.2,color='b')
pylab.plot(Xs[0],Ys[0],alpha=.4,color='r',lw=1)
pylab.margins(.1,.1) |
def stats_first(abf):
"""provide all stats on the first AP."""
msg=""
for sweep in range(abf.sweeps):
for AP in abf.APs[sweep]:
for key in sorted(AP.keys()):
if key[-1] is "I" or key[-2:] in ["I1","I2"]:
continue
msg+="%s = %s\n"%(key,AP[key])
return msg |
def get_values(abf,key="freq",continuous=False):
"""returns Xs, Ys (the key), and sweep #s for every AP found."""
Xs,Ys,Ss=[],[],[]
for sweep in range(abf.sweeps):
for AP in cm.matrixToDicts(abf.APs):
if not AP["sweep"]==sweep:
continue
Ys.append(AP[key])
Ss.append(AP["sweep"])
if continuous:
Xs.append(AP["expT"])
else:
Xs.append(AP["sweepT"])
return np.array(Xs),np.array(Ys),np.array(Ss) |
def getAvgBySweep(abf,feature,T0=None,T1=None):
"""return average of a feature divided by sweep."""
if T1 is None:
T1=abf.sweepLength
if T0 is None:
T0=0
data = [np.empty((0))]*abf.sweeps
for AP in cm.dictFlat(cm.matrixToDicts(abf.APs)):
if T0<AP['sweepT']<T1:
val=AP[feature]
data[int(AP['sweep'])]=np.concatenate((data[int(AP['sweep'])],[val]))
for sweep in range(abf.sweeps):
if len(data[sweep])>1 and np.any(data[sweep]):
data[sweep]=np.nanmean(data[sweep])
elif len(data[sweep])==1:
data[sweep]=data[sweep][0]
else:
data[sweep]=np.nan
return data |
def readLog(fname="workdays.csv",onlyAfter=datetime.datetime(year=2017,month=1,day=1)):
"""return a list of [stamp, project] elements."""
with open(fname) as f:
raw=f.read().split("\n")
efforts=[] #date,nickname
for line in raw[1:]:
line=line.strip().split(",")
date=datetime.datetime.strptime(line[0], "%Y-%m-%d")
if onlyAfter and date<onlyAfter:
continue
if len(line)<3:
continue
for project in line[2:]:
project=project.strip()
if len(project):
efforts.append([date,project])
return efforts |
def waitTillCopied(fname):
"""
sometimes a huge file takes several seconds to copy over.
This will hang until the file is copied (file size is stable).
"""
lastSize=0
while True:
thisSize=os.path.getsize(fname)
print("size:",thisSize)
if lastSize==thisSize:
print("size: STABLE")
return
else:
lastSize=thisSize
time.sleep(.1) |
def lazygo(watchFolder='../abfs/',reAnalyze=False,rebuildSite=False,
keepGoing=True,matching=False):
"""
continuously monitor a folder for new abfs and try to analyze them.
This is intended to watch only one folder, but can run multiple copies.
"""
abfsKnown=[]
while True:
print()
pagesNeeded=[]
for fname in glob.glob(watchFolder+"/*.abf"):
ID=os.path.basename(fname).replace(".abf","")
if not fname in abfsKnown:
if os.path.exists(fname.replace(".abf",".rsv")): #TODO: or something like this
continue
if matching and not matching in fname:
continue
abfsKnown.append(fname)
if os.path.exists(os.path.dirname(fname)+"/swhlab4/"+os.path.basename(fname).replace(".abf","_info.pkl")) and reAnalyze==False:
print("already analyzed",os.path.basename(fname))
if rebuildSite:
pagesNeeded.append(ID)
else:
handleNewABF(fname)
pagesNeeded.append(ID)
if len(pagesNeeded):
print(" -- rebuilding index page")
indexing.genIndex(os.path.dirname(fname),forceIDs=pagesNeeded)
if not keepGoing:
return
for i in range(50):
print('.',end='')
time.sleep(.2) |
def phasicTonic(self,m1=None,m2=None,chunkMs=50,quietPercentile=10,
histResolution=.5,plotToo=False,rmsExpected=5):
"""
chunkMs should be ~50 ms or greater.
bin sizes must be equal to or multiples of the data resolution.
transients smaller than the expected RMS will be silenced.
"""
# prepare sectioning values to be used later
m1=0 if m1 is None else m1*self.pointsPerSec
m2=len(abf.sweepY) if m2 is None else m2*self.pointsPerSec
m1,m2=int(m1),int(m2)
# prepare histogram values to be used later
padding=200 # pA or mV of maximum expected deviation
chunkPoints=int(chunkMs*self.pointsPerMs)
histBins=int((padding*2)/histResolution)
# center the data at 0 using peak histogram, not the mean
Y=self.sweepY[m1:m2]
hist,bins=np.histogram(Y,bins=2*padding)
Yoffset=bins[np.where(hist==max(hist))[0][0]]
Y=Y-Yoffset # we don't have to, but PDF math is easier
# calculate all histogram
nChunks=int(len(Y)/chunkPoints)
hist,bins=np.histogram(Y,bins=histBins,range=(-padding,padding))
hist=hist/len(Y) # count as a fraction of total
Xs=bins[1:]
# get baseline data from chunks with smallest variance
chunks=np.reshape(Y[:nChunks*chunkPoints],(nChunks,chunkPoints))
variances=np.var(chunks,axis=1)
percentiles=np.empty(len(variances))
for i,variance in enumerate(variances):
percentiles[i]=sorted(variances).index(variance)/len(variances)*100
blData=chunks[np.where(percentiles<=quietPercentile)[0]].flatten()
# generate the standard curve and pull it to the histogram height
sigma=np.sqrt(np.var(blData))
center=np.average(blData)+histResolution/2
blCurve=mlab.normpdf(Xs,center,sigma)
blCurve=blCurve*max(hist)/max(blCurve)
# determine the phasic current by subtracting-out the baseline
diff=hist-blCurve
# manually zero-out data which we expect to be within the RMS range
ignrCenter=len(Xs)/2
ignrPad=rmsExpected/histResolution
ignr1,ignt2=int(ignrCenter-ignrPad),int(ignrCenter+ignrPad)
diff[ignr1:ignt2]=0
return diff/len(Y)*abf.pointsPerSec |
def values_above_sweep(abf,dataI,dataY,ylabel="",useFigure=None):
"""
To make plots like AP frequency over original trace.
dataI=[i] #the i of the sweep
dataY=[1.234] #something like inst freq
"""
xOffset = abf.currentSweep*abf.sweepInterval
if not useFigure: #just passing the figure makes it persistant!
pylab.figure(figsize=(8,6))
ax=pylab.subplot(221)
pylab.grid(alpha=.5)
if len(dataI):
pylab.plot(abf.dataX[dataI],dataY,'.',ms=10,alpha=.5,
color=abf.colormap[abf.currentSweep])
pylab.margins(0,.1)
pylab.ylabel(ylabel)
pylab.subplot(223,sharex=ax)
pylab.grid(alpha=.5)
pylab.plot(abf.dataX,abf.dataY,color=abf.colormap[abf.currentSweep],alpha=.5)
pylab.ylabel("raw data (%s)"%abf.units)
ax2=pylab.subplot(222)
pylab.grid(alpha=.5)
if len(dataI):
pylab.plot(abf.dataX[dataI]+xOffset,dataY,'.',ms=10,alpha=.5,
color=abf.colormap[abf.currentSweep])
pylab.margins(0,.1)
pylab.ylabel(ylabel)
pylab.subplot(224,sharex=ax2)
pylab.grid(alpha=.5)
pylab.plot(abf.dataX+xOffset,abf.dataY,color=abf.colormap[abf.currentSweep])
pylab.ylabel("raw data (%s)"%abf.units)
pylab.tight_layout() |
def gain(abf):
"""easy way to plot a gain function."""
Ys=np.nan_to_num(swhlab.ap.getAvgBySweep(abf,'freq'))
Xs=abf.clampValues(abf.dataX[int(abf.protoSeqX[1]+.01)])
swhlab.plot.new(abf,title="gain function",xlabel="command current (pA)",
ylabel="average inst. freq. (Hz)")
pylab.plot(Xs,Ys,'.-',ms=20,alpha=.5,color='b')
pylab.axhline(0,alpha=.5,lw=2,color='r',ls="--")
pylab.margins(.1,.1) |
def IV(abf,T1,T2,plotToo=True,color='b'):
"""
Given two time points (seconds) return IV data.
Optionally plots a fancy graph (with errorbars)
Returns [[AV],[SD]] for the given range.
"""
rangeData=abf.average_data([[T1,T2]]) #get the average data per sweep
AV,SD=rangeData[:,0,0],rangeData[:,0,1] #separate by average and SD
Xs=abf.clampValues(T1) #get clamp values at time point T1
if plotToo:
new(abf) #do this so it's the right shape and size
# plot the original sweep
pylab.subplot(221)
pylab.title("sweep data")
pylab.xlabel("time (s)")
pylab.ylabel("Measurement (%s)"%abf.units)
sweep(abf,'all',protocol=False)
pylab.axis([None,None,np.min(rangeData)-50,np.max(rangeData)+50])
pylab.axvspan(T1,T2,alpha=.1,color=color) #share measurement region
pylab.margins(0,.1)
# plot the data zoomed in
pylab.subplot(223)
pylab.title("measurement region")
pylab.xlabel("time (s)")
pylab.ylabel("Measurement (%s)"%abf.units)
sweep(abf,'all',protocol=False)
pylab.axis([T1-.05,T2+.05,np.min(rangeData)-50,np.max(rangeData)+50])
pylab.axvspan(T1,T2,alpha=.1,color=color) #share measurement region
pylab.margins(0,.1)
# plot the protocol
pylab.subplot(222)
pylab.title("protocol")
pylab.xlabel("time (s)")
pylab.ylabel("Command (%s)"%abf.unitsCommand)
sweep(abf,'all',protocol=True)
pylab.axvspan(T1,T2,alpha=.1,color=color) #share measurement region
pylab.margins(0,.1)
# plot the I/V
pylab.subplot(224)
pylab.grid(alpha=.5)
pylab.title("command / measure relationship")
pylab.xlabel("Command (%s)"%abf.unitsCommand)
pylab.ylabel("Measurement (%s)"%abf.units)
pylab.errorbar(Xs,AV,SD,capsize=0,marker='.',color=color)
if abf.units=="pA":
pylab.axhline(0,alpha=.5,lw=2,color='r',ls="--")
pylab.axvline(-70,alpha=.5,lw=2,color='r',ls="--")
else:
pylab.axhline(-70,alpha=.5,lw=2,color='r',ls="--")
pylab.axvline(0,alpha=.5,lw=2,color='r',ls="--")
pylab.margins(.1,.1)
annotate(abf)
return AV,SD |
def comments(abf,minutes=False):
"""draw vertical lines at comment points. Defaults to seconds."""
if not len(abf.commentTimes):
return
for i in range(len(abf.commentTimes)):
t,c = abf.commentTimes[i],abf.commentTags[i]
if minutes:
t=t/60
pylab.axvline(t,lw=1,color='r',ls="--",alpha=.5)
X1,X2,Y1,Y2=pylab.axis()
Y2=Y2-abs(Y2-Y1)*.02
pylab.text(t,Y2,c,size=8,color='r',rotation='vertical',
ha='right',va='top',weight='bold',alpha=.5)
if minutes:
pylab.xlabel("minutes")
else:
pylab.xlabel("seconds") |
def dual(ABF):
"""Plot two channels of current sweep (top/bottom)."""
new(ABF)
pylab.subplot(211)
pylab.title("Input A (channel 0)")
ABF.channel=0
sweep(ABF)
pylab.subplot(212)
pylab.title("Input B (channel 1)")
ABF.channel=1
sweep(ABF) |
def sweep(ABF,sweep=None,rainbow=True,alpha=None,protocol=False,color='b',
continuous=False,offsetX=0,offsetY=0,minutes=False,
decimate=None,newFigure=False):
"""
Load a particular sweep then plot it.
If sweep is None or False, just plot current dataX/dataY.
If rainbow, it'll make it color coded prettily.
"""
if len(pylab.get_fignums())==0 or newFigure:
new(ABF,True)
if offsetY>0:
pylab.grid(None)
# figure which sweeps to plot
if sweep is None:
sweeps=[ABF.currentSweep]
if not ABF.currentSweep:
sweeps=[0]
elif sweep=="all":
sweeps=range(0,ABF.sweeps)
elif type(sweep) in [int,float]:
sweeps=[int(sweep)]
elif type(sweep) is list:
sweeps=sweep
else:
print("DONT KNOW WHAT TO DO WITH THIS SWEEPS!!!\n",type(sweep),sweep)
#figure out offsets:
if continuous:
offsetX=ABF.sweepInterval
# determine the colors to use
colors=[color]*len(sweeps) #detault to blue
if rainbow and len(sweeps)>1:
for i in range(len(sweeps)):
colors[i]=ABF.colormap[i]
if alpha is None and len(sweeps)==1:
alpha=1
if rainbow and alpha is None:
alpha=.5
# correct for alpha
if alpha is None:
alpha=1
# conversion to minutes?
if minutes == False:
minutes=1
else:
minutes=60
pylab.xlabel("minutes")
ABF.decimateMethod=decimate
# do the plotting of each sweep
for i in range(len(sweeps)):
ABF.setSweep(sweeps[i])
if protocol:
pylab.plot((np.array(ABF.protoX)/ABF.rate+offsetX*i)/minutes,
ABF.protoY+offsetY*i,
alpha=alpha,color=colors[i])
else:
pylab.plot((ABF.dataX+offsetX*i)/minutes,
ABF.dataY+offsetY*i,alpha=alpha,color=colors[i])
ABF.decimateMethod=None
pylab.margins(0,.02) |
def annotate(abf):
"""stamp the bottom with file info."""
msg="SWHLab %s "%str(swhlab.VERSION)
msg+="ID:%s "%abf.ID
msg+="CH:%d "%abf.channel
msg+="PROTOCOL:%s "%abf.protoComment
msg+="COMMAND: %d%s "%(abf.holding,abf.units)
msg+="GENERATED:%s "%'{0:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now())
pylab.annotate(msg,(.001,.001),xycoords='figure fraction',ha='left',
va='bottom',color='#999999',family='monospace',size=8,
weight='bold')
if abf.nADC>1:
msg="Ch %d/%d"%(abf.channel+1,abf.nADC)
pylab.annotate(msg,(.01,.99),xycoords='figure fraction',ha='left',
va='top',color='#FF0000',family='monospace',size=12,
weight='bold') |
def new(ABF,forceNewFigure=False,title=None,xlabel=None,ylabel=None):
"""
makes a new matplotlib figure with default dims and DPI.
Also labels it with pA or mV depending on ABF.
"""
if len(pylab.get_fignums()) and forceNewFigure==False:
#print("adding to existing figure")
return
pylab.figure(figsize=(8,6))
pylab.grid(alpha=.5)
pylab.title(ABF.ID)
pylab.ylabel(ABF.units)
pylab.xlabel("seconds")
if xlabel:
pylab.xlabel(xlabel)
if ylabel:
pylab.ylabel(ylabel)
if title:
pylab.title(title)
annotate(ABF) |
def save(abf,fname=None,tag=None,width=700,close=True,facecolor='w',
resize=True):
"""
Save the pylab figure somewhere.
If fname==False, show it instead.
Height force > dpi force
if a tag is given instead of a filename, save it alongside the ABF
"""
if len(pylab.gca().get_lines())==0:
print("can't save, no figure!")
return
if resize:
pylab.tight_layout()
pylab.subplots_adjust(bottom=.1)
annotate(abf)
if tag:
fname = abf.outpath+abf.ID+"_"+tag+".png"
inchesX,inchesY = pylab.gcf().get_size_inches()
dpi=width/inchesX
if fname:
if not os.path.exists(abf.outpath):
os.mkdir(abf.outpath)
print(" <- saving [%s] at %d DPI (%dx%d)"%(os.path.basename(fname),dpi,inchesX*dpi,inchesY*dpi))
pylab.savefig(fname,dpi=dpi,facecolor=facecolor)
else:
pylab.show()
if close:
pylab.close() |
def tryLoadingFrom(tryPath,moduleName='swhlab'):
"""if the module is in this path, load it from the local folder."""
if not 'site-packages' in swhlab.__file__:
print("loaded custom swhlab module from",
os.path.dirname(swhlab.__file__))
return # no need to warn if it's already outside.
while len(tryPath)>5:
sp=tryPath+"/swhlab/" # imaginary swhlab module path
if os.path.isdir(sp) and os.path.exists(sp+"/__init__.py"):
if not os.path.dirname(tryPath) in sys.path:
sys.path.insert(0,os.path.dirname(tryPath))
print("#"*80)
print("# WARNING: using site-packages swhlab module")
print("#"*80)
tryPath=os.path.dirname(tryPath)
return |
def update(self, tids, info):
"""
Called to update the state of the iterator. This methods
receives the set of task ids from the previous set of tasks
together with the launch information to allow the output
values to be parsed using the output_extractor. This data is then
used to determine the next desired point in the parameter
space by calling the _update_state method.
"""
outputs_dir = os.path.join(info['root_directory'], 'streams')
pattern = '%s_*_tid_*{tid}.o.{tid}*' % info['batch_name']
flist = os.listdir(outputs_dir)
try:
outputs = []
for tid in tids:
matches = fnmatch.filter(flist, pattern.format(tid=tid))
if len(matches) != 1:
self.warning("No unique output file for tid %d" % tid)
contents = open(os.path.join(outputs_dir, matches[0]),'r').read()
outputs.append(self.output_extractor(contents))
self._next_val = self._update_state(outputs)
self.trace.append((outputs, self._next_val))
except:
self.warning("Cannot load required output files. Cannot continue.")
self._next_val = StopIteration |
def show(self):
"""
When dynamic, not all argument values may be available.
"""
copied = self.copy()
enumerated = [el for el in enumerate(copied)]
for (group_ind, specs) in enumerated:
if len(enumerated) > 1: print("Group %d" % group_ind)
ordering = self.constant_keys + self.varying_keys
# Ordered nicely by varying_keys definition.
spec_lines = [', '.join(['%s=%s' % (k, s[k]) for k in ordering]) for s in specs]
print('\n'.join(['%d: %s' % (i,l) for (i,l) in enumerate(spec_lines)]))
print('Remaining arguments not available for %s' % self.__class__.__name__) |
def _trace_summary(self):
"""
Summarizes the trace of values used to update the DynamicArgs
and the arguments subsequently returned. May be used to
implement the summary method.
"""
for (i, (val, args)) in enumerate(self.trace):
if args is StopIteration:
info = "Terminated"
else:
pprint = ','.join('{' + ','.join('%s=%r' % (k,v)
for (k,v) in arg.items()) + '}' for arg in args)
info = ("exploring arguments [%s]" % pprint )
if i == 0: print("Step %d: Initially %s." % (i, info))
else: print("Step %d: %s after receiving input(s) %s." % (i, info.capitalize(), val)) |
def _update_state(self, vals):
"""
Takes as input a list or tuple of two elements. First the
value returned by incrementing by 'stepsize' followed by the
value returned after a 'stepsize' decrement.
"""
self._steps_complete += 1
if self._steps_complete == self.max_steps:
self._termination_info = (False, self._best_val, self._arg)
return StopIteration
arg_inc, arg_dec = vals
best_val = min(arg_inc, arg_dec, self._best_val)
if best_val == self._best_val:
self._termination_info = (True, best_val, self._arg)
return StopIteration
self._arg += self.stepsize if (arg_dec > arg_inc) else -self.stepsize
self._best_val= best_val
return [{self.key:self._arg+self.stepsize},
{self.key:self._arg-self.stepsize}] |
def proto_unknown(theABF):
"""protocol: unknown."""
abf=ABF(theABF)
abf.log.info("analyzing as an unknown protocol")
plot=ABFplot(abf)
plot.rainbow=False
plot.title=None
plot.figure_height,plot.figure_width=SQUARESIZE,SQUARESIZE
plot.kwargs["lw"]=.5
plot.figure_chronological()
plt.gca().set_axis_bgcolor('#AAAAAA') # different background if unknown protocol
frameAndSave(abf,"UNKNOWN") |
def proto_0111(theABF):
"""protocol: IC ramp for AP shape analysis."""
abf=ABF(theABF)
abf.log.info("analyzing as an IC ramp")
# AP detection
ap=AP(abf)
ap.detect()
# also calculate derivative for each sweep
abf.derivative=True
# create the multi-plot figure
plt.figure(figsize=(SQUARESIZE,SQUARESIZE))
ax1=plt.subplot(221)
plt.ylabel(abf.units2)
ax2=plt.subplot(222,sharey=ax1)
ax3=plt.subplot(223)
plt.ylabel(abf.unitsD2)
ax4=plt.subplot(224,sharey=ax3)
# put data in each subplot
for sweep in range(abf.sweeps):
abf.setsweep(sweep)
ax1.plot(abf.sweepX,abf.sweepY,color='b',lw=.25)
ax2.plot(abf.sweepX,abf.sweepY,color='b')
ax3.plot(abf.sweepX,abf.sweepD,color='r',lw=.25)
ax4.plot(abf.sweepX,abf.sweepD,color='r')
# modify axis
for ax in [ax1,ax2,ax3,ax4]: # everything
ax.margins(0,.1)
ax.grid(alpha=.5)
for ax in [ax3,ax4]: # only derivative APs
ax.axhline(-100,color='r',alpha=.5,ls="--",lw=2)
for ax in [ax2,ax4]: # only zoomed in APs
ax.get_yaxis().set_visible(False)
if len(ap.APs):
firstAP=ap.APs[0]["T"]
ax2.axis([firstAP-.25,firstAP+.25,None,None])
ax4.axis([firstAP-.01,firstAP+.01,None,None])
# show message from first AP
if len(ap.APs):
firstAP=ap.APs[0]
msg="\n".join(["%s = %s"%(x,str(firstAP[x])) for x in sorted(firstAP.keys()) if not "I" in x[-2:]])
plt.subplot(221)
plt.gca().text(0.02, 0.98, msg, transform= plt.gca().transAxes, fontsize=10, verticalalignment='top', family='monospace')
# save it
plt.tight_layout()
frameAndSave(abf,"AP shape")
plt.close('all') |
def proto_gain(theABF,stepSize=25,startAt=-100):
"""protocol: gain function of some sort. step size and start at are pA."""
abf=ABF(theABF)
abf.log.info("analyzing as an IC ramp")
plot=ABFplot(abf)
plot.kwargs["lw"]=.5
plot.title=""
currents=np.arange(abf.sweeps)*stepSize-startAt
# AP detection
ap=AP(abf)
ap.detect_time1=.1
ap.detect_time2=.7
ap.detect()
# stacked plot
plt.figure(figsize=(SQUARESIZE,SQUARESIZE))
ax1=plt.subplot(221)
plot.figure_sweeps()
ax2=plt.subplot(222)
ax2.get_yaxis().set_visible(False)
plot.figure_sweeps(offsetY=150)
# add vertical marks to graphs:
for ax in [ax1,ax2]:
for limit in [ap.detect_time1,ap.detect_time2]:
ax.axvline(limit,color='r',ls='--',alpha=.5,lw=2)
# make stacked gain function
ax4=plt.subplot(223)
plt.ylabel("frequency (Hz)")
plt.ylabel("seconds")
plt.grid(alpha=.5)
freqs=ap.get_bySweep("freqs")
times=ap.get_bySweep("times")
for i in range(abf.sweeps):
if len(freqs[i]):
plt.plot(times[i][:-1],freqs[i],'-',alpha=.5,lw=2,
color=plot.getColor(i/abf.sweeps))
# make gain function graph
ax4=plt.subplot(224)
ax4.grid(alpha=.5)
plt.plot(currents,ap.get_bySweep("median"),'b.-',label="median")
plt.plot(currents,ap.get_bySweep("firsts"),'g.-',label="first")
plt.xlabel("applied current (pA)")
plt.legend(loc=2,fontsize=10)
plt.axhline(40,color='r',alpha=.5,ls="--",lw=2)
plt.margins(.02,.1)
# save it
plt.tight_layout()
frameAndSave(abf,"AP Gain %d_%d"%(startAt,stepSize))
plt.close('all')
# make a second figure that just shows every sweep up to the first AP
plt.figure(figsize=(SQUARESIZE,SQUARESIZE))
plt.grid(alpha=.5)
plt.ylabel("Membrane Potential (mV)")
plt.xlabel("Time (seconds)")
for sweep in abf.setsweeps():
plt.plot(abf.sweepX2,abf.sweepY,color='b',alpha=.5)
if np.max(abf.sweepY>0):
break
plt.tight_layout()
plt.margins(0,.1)
plt.axis([0,1,None,None])
plt.title("%d pA Steps from Rest"%stepSize)
frameAndSave(abf,"voltage response fromRest",closeWhenDone=False)
plt.axis([1.5,2.5,None,None])
plt.title("%d pA Steps from %d pA"%(stepSize,startAt))
frameAndSave(abf,"voltage response hyperpol",closeWhenDone=False)
plt.close('all') |
def proto_0201(theABF):
"""protocol: membrane test."""
abf=ABF(theABF)
abf.log.info("analyzing as a membrane test")
plot=ABFplot(abf)
plot.figure_height,plot.figure_width=SQUARESIZE/2,SQUARESIZE/2
plot.figure_sweeps()
# save it
plt.tight_layout()
frameAndSave(abf,"membrane test")
plt.close('all') |
def proto_0202(theABF):
"""protocol: MTIV."""
abf=ABF(theABF)
abf.log.info("analyzing as MTIV")
plot=ABFplot(abf)
plot.figure_height,plot.figure_width=SQUARESIZE,SQUARESIZE
plot.title=""
plot.kwargs["alpha"]=.6
plot.figure_sweeps()
# frame to uppwer/lower bounds, ignoring peaks from capacitive transients
abf.setsweep(0)
plt.axis([None,None,abf.average(.9,1)-100,None])
abf.setsweep(-1)
plt.axis([None,None,None,abf.average(.9,1)+100])
# save it
plt.tight_layout()
frameAndSave(abf,"MTIV")
plt.close('all') |
def proto_0203(theABF):
"""protocol: vast IV."""
abf=ABF(theABF)
abf.log.info("analyzing as a fast IV")
plot=ABFplot(abf)
plot.title=""
m1,m2=.7,1
plt.figure(figsize=(SQUARESIZE,SQUARESIZE/2))
plt.subplot(121)
plot.figure_sweeps()
plt.axvspan(m1,m2,color='r',ec=None,alpha=.1)
plt.subplot(122)
plt.grid(alpha=.5)
Xs=np.arange(abf.sweeps)*5-110
Ys=[]
for sweep in range(abf.sweeps):
abf.setsweep(sweep)
Ys.append(abf.average(m1,m2))
plt.plot(Xs,Ys,'.-',ms=10)
plt.axvline(-70,color='r',ls='--',lw=2,alpha=.5)
plt.axhline(0,color='r',ls='--',lw=2,alpha=.5)
plt.margins(.1,.1)
plt.xlabel("membrane potential (mV)")
# save it
plt.tight_layout()
frameAndSave(abf,"fast IV")
plt.close('all') |
def proto_0303(theABF):
"""protocol: repeated IC ramps."""
abf=ABF(theABF)
abf.log.info("analyzing as a halorhodopsin (2s pulse)")
# show average voltage
proto_avgRange(theABF,0.2,1.2)
plt.close('all')
# show stacked sweeps
plt.figure(figsize=(8,8))
for sweep in abf.setsweeps():
color='b'
if sweep in np.array(abf.comment_sweeps,dtype=int):
color='r'
plt.plot(abf.sweepX2,abf.sweepY+100*sweep,color=color,alpha=.5)
plt.margins(0,.01)
plt.tight_layout()
frameAndSave(abf,"IC ramps")
plt.close('all')
# do AP event detection
ap=AP(abf)
ap.detect_time1=2.3
ap.detect_time2=8.3
ap.detect()
apCount=[]
apSweepTimes=[]
for sweepNumber,times in enumerate(ap.get_bySweep("times")):
apCount.append(len(times))
if len(times):
apSweepTimes.append(times[0])
else:
apSweepTimes.append(0)
# plot AP frequency vs time
plt.figure(figsize=(8,8))
ax1=plt.subplot(211)
plt.grid(alpha=.4,ls='--')
plt.plot(np.arange(len(apCount))*abf.sweepLength/60,apCount,'.-',ms=15)
comment_lines(abf)
plt.ylabel("AP Count")
plt.subplot(212,sharex=ax1)
plt.grid(alpha=.4,ls='--')
plt.plot(np.arange(len(apCount))*abf.sweepLength/60,apSweepTimes,'.-',ms=15)
comment_lines(abf)
plt.ylabel("First AP Time (s)")
plt.xlabel("Experiment Duration (minutes)")
plt.tight_layout()
frameAndSave(abf,"IC ramp freq")
plt.close('all') |
def proto_0304(theABF):
"""protocol: repeated IC steps."""
abf=ABF(theABF)
abf.log.info("analyzing as repeated current-clamp step")
# prepare for AP analysis
ap=AP(abf)
# calculate rest potential
avgVoltagePerSweep = [];
times = []
for sweep in abf.setsweeps():
avgVoltagePerSweep.append(abf.average(0,3))
times.append(abf.sweepStart/60)
# detect only step APs
M1,M2=3.15,4.15
ap.detect_time1, ap.detect_time2 = M1,M2
ap.detect()
apsPerSweepCos=[len(x) for x in ap.get_bySweep()]
# detect all APs
M1,M2=0,10
ap.detect_time1, ap.detect_time2 = M1,M2
ap.detect()
apsPerSweepRamp=[len(x) for x in ap.get_bySweep()]
# make the plot of APs and stuff
plt.figure(figsize=(8,8))
plt.subplot(311)
plt.grid(ls='--',alpha=.5)
plt.plot(times,avgVoltagePerSweep,'.-')
plt.ylabel("Rest Potential (mV)")
comment_lines(abf)
plt.subplot(312)
plt.grid(ls='--',alpha=.5)
plt.plot(times,apsPerSweepCos,'.-')
plt.ylabel("APs in Step (#)")
comment_lines(abf)
plt.subplot(313)
plt.grid(ls='--',alpha=.5)
plt.plot(times,apsPerSweepRamp,'.-')
plt.ylabel("APs in Sweep (#)")
comment_lines(abf)
plt.tight_layout()
frameAndSave(abf,"cos ramp")
plt.close('all') |
def proto_avgRange(theABF,m1=None,m2=None):
"""experiment: generic VC time course experiment."""
abf=ABF(theABF)
abf.log.info("analyzing as a fast IV")
if m1 is None:
m1=abf.sweepLength
if m2 is None:
m2=abf.sweepLength
I1=int(abf.pointsPerSec*m1)
I2=int(abf.pointsPerSec*m2)
Ts=np.arange(abf.sweeps)*abf.sweepInterval
Yav=np.empty(abf.sweeps)*np.nan # average
Ysd=np.empty(abf.sweeps)*np.nan # standard deviation
#Yar=np.empty(abf.sweeps)*np.nan # area
for sweep in abf.setsweeps():
Yav[sweep]=np.average(abf.sweepY[I1:I2])
Ysd[sweep]=np.std(abf.sweepY[I1:I2])
#Yar[sweep]=np.sum(abf.sweepY[I1:I2])/(I2*I1)-Yav[sweep]
plot=ABFplot(abf)
plt.figure(figsize=(SQUARESIZE*2,SQUARESIZE/2))
plt.subplot(131)
plot.title="first sweep"
plot.figure_sweep(0)
plt.title("First Sweep\n(shaded measurement range)")
plt.axvspan(m1,m2,color='r',ec=None,alpha=.1)
plt.subplot(132)
plt.grid(alpha=.5)
for i,t in enumerate(abf.comment_times):
plt.axvline(t/60,color='r',alpha=.5,lw=2,ls='--')
plt.plot(Ts/60,Yav,'.',alpha=.75)
plt.title("Range Average\nTAGS: %s"%(", ".join(abf.comment_tags)))
plt.ylabel(abf.units2)
plt.xlabel("minutes")
plt.margins(0,.1)
plt.subplot(133)
plt.grid(alpha=.5)
for i,t in enumerate(abf.comment_times):
plt.axvline(t/60,color='r',alpha=.5,lw=2,ls='--')
plt.plot(Ts/60,Ysd,'.',alpha=.5,color='g',ms=15,mew=0)
#plt.fill_between(Ts/60,Ysd*0,Ysd,lw=0,alpha=.5,color='g')
plt.title("Range Standard Deviation\nTAGS: %s"%(", ".join(abf.comment_tags)))
plt.ylabel(abf.units2)
plt.xlabel("minutes")
plt.margins(0,.1)
plt.axis([None,None,0,np.percentile(Ysd,99)*1.25])
plt.tight_layout()
frameAndSave(abf,"sweep vs average","experiment")
plt.close('all') |
def analyze(fname=False,save=True,show=None):
"""given a filename or ABF object, try to analyze it."""
if fname and os.path.exists(fname.replace(".abf",".rst")):
print("SKIPPING DUE TO RST FILE")
return
swhlab.plotting.core.IMAGE_SAVE=save
if show is None:
if cm.isIpython():
swhlab.plotting.core.IMAGE_SHOW=True
else:
swhlab.plotting.core.IMAGE_SHOW=False
#swhlab.plotting.core.IMAGE_SHOW=show
abf=ABF(fname) # ensure it's a class
print(">>>>> PROTOCOL >>>>>",abf.protocomment)
runFunction="proto_unknown"
if "proto_"+abf.protocomment in globals():
runFunction="proto_"+abf.protocomment
abf.log.debug("running %s()"%(runFunction))
plt.close('all') # get ready
globals()[runFunction](abf) # run that function
try:
globals()[runFunction](abf) # run that function
except:
abf.log.error("EXCEPTION DURING PROTOCOL FUNCTION")
abf.log.error(sys.exc_info()[0])
return "ERROR"
plt.close('all') # clean up
return "SUCCESS" |
def processFolder(abfFolder):
"""call processAbf() for every ABF in a folder."""
if not type(abfFolder) is str or not len(abfFolder)>3:
return
files=sorted(glob.glob(abfFolder+"/*.abf"))
for i,fname in enumerate(files):
print("\n\n\n### PROCESSING {} of {}:".format(i,len(files)),os.path.basename(fname))
processAbf(fname,show=False)
plt.show()
return |
def processAbf(abfFname,saveAs=False,dpi=100,show=True):
"""
automatically generate a single representative image for an ABF.
If saveAs is given (full path of a jpg of png file), the image will be saved.
Otherwise, the image will pop up in a matplotlib window.
"""
if not type(abfFname) is str or not len(abfFname)>3:
return
abf=swhlab.ABF(abfFname)
plot=swhlab.plotting.ABFplot(abf)
plot.figure_height=6
plot.figure_width=10
plot.subplot=False
plot.figure(True)
if abf.get_protocol_sequence(0)==abf.get_protocol_sequence(1) or abf.sweeps<2:
# same protocol every time
if abf.lengthMinutes<2:
# short (probably a memtest or tau)
ax1=plt.subplot(211)
plot.figure_sweeps()
plt.title("{} ({} sweeps)".format(abf.ID,abf.sweeps))
plt.gca().get_xaxis().set_visible(False)
plt.subplot(212,sharex=ax1)
plot.figure_protocol()
plt.title("")
else:
# long (probably a drug experiment)
plot.figure_chronological()
else:
# protocol changes every sweep
plots=[211,212] # assume we want 2 images
if abf.units=='mV': # maybe it's something with APs?
ap=swhlab.AP(abf) # go ahead and do AP detection
ap.detect() # try to detect APs
if len(ap.APs): # if we found some
plots=[221,223,222,224] # get ready for 4 images
ax1=plt.subplot(plots[0])
plot.figure_sweeps()
plt.title("{} ({} sweeps)".format(abf.ID,abf.sweeps))
plt.gca().get_xaxis().set_visible(False)
plt.subplot(plots[1],sharex=ax1)
plot.figure_protocols()
plt.title("protocol")
if len(plots)>2:
# assume we want to look at the first AP
ax2=plt.subplot(plots[2])
plot.rainbow=False
plot.kwargs["color"]='b'
plot.figure_chronological()
plt.gca().get_xaxis().set_visible(False)
plt.title("first AP magnitude")
# velocity plot
plt.subplot(plots[3],sharex=ax2)
plot.abf.derivative=True
plot.rainbow=False
plot.traceColor='r'
plot.figure_chronological()
plt.axis([ap.APs[0]["T"]-.05,ap.APs[0]["T"]+.05,None,None])
plt.title("first AP velocity")
if saveAs:
print("saving",os.path.abspath(saveAs))
plt.savefig(os.path.abspath(saveAs),dpi=dpi)
return
if show:
plot.show() |
def frameAndSave(abf,tag="",dataType="plot",saveAsFname=False,closeWhenDone=True):
"""
frame the current matplotlib plot with ABF info, and optionally save it.
Note that this is entirely independent of the ABFplot class object.
if saveImage is False, show it instead.
Datatype should be:
* plot
* experiment
"""
print("closeWhenDone",closeWhenDone)
plt.tight_layout()
plt.subplots_adjust(top=.93,bottom =.07)
plt.annotate(tag,(.01,.99),xycoords='figure fraction',ha='left',va='top',family='monospace',size=10,alpha=.5)
msgBot="%s [%s]"%(abf.ID,abf.protocomment)
plt.annotate(msgBot,(.01,.01),xycoords='figure fraction',ha='left',va='bottom',family='monospace',size=10,alpha=.5)
fname=tag.lower().replace(" ",'_')+".jpg"
fname=dataType+"_"+fname
plt.tight_layout()
if IMAGE_SAVE:
abf.log.info("saving [%s]",fname)
try:
if saveAsFname:
saveAs=os.path.abspath(saveAsFname)
else:
saveAs=os.path.abspath(abf.outPre+fname)
if not os.path.exists(abf.outFolder):
os.mkdir(abf.outFolder)
plt.savefig(saveAs)
except Exception as E:
abf.log.error("saving [%s] failed! 'pip install pillow'?",fname)
print(E)
if IMAGE_SHOW==True:
if closeWhenDone==False:
print("NOT SHOWING (because closeWhenDone==True and showing would mess things up)")
else:
abf.log.info("showing [%s]",fname)
plt.show()
if closeWhenDone:
print("closing figure")
plt.close('all') |
def figure(self,forceNew=False):
"""make sure a figure is ready."""
if plt._pylab_helpers.Gcf.get_num_fig_managers()>0 and forceNew is False:
self.log.debug("figure already seen, not creating one.")
return
if self.subplot:
self.log.debug("subplot mode enabled, not creating new figure")
else:
self.log.debug("creating new figure")
plt.figure(figsize=(self.figure_width,self.figure_height)) |
def save(self,callit="misc",closeToo=True,fullpath=False):
"""save the existing figure. does not close it."""
if fullpath is False:
fname=self.abf.outPre+"plot_"+callit+".jpg"
else:
fname=callit
if not os.path.exists(os.path.dirname(fname)):
os.mkdir(os.path.dirname(fname))
plt.savefig(fname)
self.log.info("saved [%s]",os.path.basename(fname))
if closeToo:
plt.close() |
def comments(self,minutes=False):
"""
Add comment lines/text to an existing plot. Defaults to seconds.
Call after a plot has been made, and after margins have been set.
"""
if self.comments==0:
return
self.log.debug("adding comments to plot")
for i,t in enumerate(self.abf.comment_times):
if minutes:
t/=60.0
plt.axvline(t,color='r',ls=':')
X1,X2,Y1,Y2=plt.axis()
Y2=Y2-abs(Y2-Y1)*.02
plt.text(t,Y2,self.abf.comment_tags[i],color='r',rotation='vertical',
ha='right',va='top',weight='bold',alpha=.5,size=8,) |
def figure_chronological(self):
"""plot every sweep of an ABF file (with comments)."""
self.log.debug("creating chronological plot")
self.figure()
for sweep in range(self.abf.sweeps):
self.abf.setsweep(sweep)
self.setColorBySweep()
if self.abf.derivative:
plt.plot(self.abf.sweepX,self.abf.sweepD,**self.kwargs)
else:
plt.plot(self.abf.sweepX,self.abf.sweepY,**self.kwargs)
self.comments()
self.decorate() |
def figure_sweeps(self, offsetX=0, offsetY=0):
"""plot every sweep of an ABF file."""
self.log.debug("creating overlayed sweeps plot")
self.figure()
for sweep in range(self.abf.sweeps):
self.abf.setsweep(sweep)
self.setColorBySweep()
plt.plot(self.abf.sweepX2+sweep*offsetX,
self.abf.sweepY+sweep*offsetY,
**self.kwargs)
if offsetX:
self.marginX=.05
self.decorate() |
def figure_protocol(self):
"""plot the current sweep protocol."""
self.log.debug("creating overlayed protocols plot")
self.figure()
plt.plot(self.abf.protoX,self.abf.protoY,color='r')
self.marginX=0
self.decorate(protocol=True) |
def figure_protocols(self):
"""plot the protocol of all sweeps."""
self.log.debug("creating overlayed protocols plot")
self.figure()
for sweep in range(self.abf.sweeps):
self.abf.setsweep(sweep)
plt.plot(self.abf.protoX,self.abf.protoY,color='r')
self.marginX=0
self.decorate(protocol=True) |
def fread(f,byteLocation,structFormat=None,nBytes=1):
"""
Given an already-open (rb mode) file object, return a certain number of bytes at a specific location.
If a struct format is given, calculate the number of bytes required and return the object it represents.
"""
f.seek(byteLocation)
if structFormat:
val = struct.unpack(structFormat, f.read(struct.calcsize(structFormat)))
val = val[0] if len(val)==1 else list(val)
return val
else:
return f.read(nBytes) |
def abf_read_header(fname, saveHeader=True):
"""
Practice pulling data straight out of an ABF's binary header. Support only ABF2 (ClampEx an ClampFit 10).
Use only native python libraries. Strive for simplicity and readability (to promote language portability).
This was made by Scott Harden after a line-by-line analysis of axonrawio.py from the neo io library.
Unlike NeoIO's format, I'm going to try to prevent nested dictionaries to keep things simple.
"""
### THESE OBJETS WILL BE FULL WHEN THIS FUNCTION IS COMPLETE
header={} # information about the file format
sections={} # contains byte positions (and block sizes) for header information
strings=[] # a list of strings stored in the ABF header (protocol file, abf comment, and channel labels / units)
protocol = {} # info about the ABF recording
tags=[] # timed comments made during the abf as a list of lists [pos,comment,tagtype,voice]
adcs=[] # ADC info as a list of dicts
dacs=[] # DAC info as a list of dicts
digitalOutputs=[] # one 0b00000000 code per epoch
config={} # a concise collection of ABF info I think is useful. I intend only to return this object.
### READ THE FIRST PART OF THE FILE INTO MEMORY
# TODO: figure out the most memory-effecient way to do this
f=open(fname,'rb')
config["abf_filename"]=os.path.abspath(fname) # full path to the abf file on disk
config["abf_ID"]=os.path.basename(fname)[:-4] # abf filename without the ".abf"
### DECODE HEADER - this tells basic information about the file format
for key, byte_location, fmt in headerDescriptionV2:
header[key]=fread(f,byte_location,fmt)
header['fFileSignature']=header['fFileSignature'].decode()
### DECODE SECTIONS - sections are where (byte location) in this file different data is stored
for sectionNumber, sectionName in enumerate(sectionNames):
uBlockIndex, uBytes, llNumEntries = fread(f,76+sectionNumber*16,"IIl")
sections[sectionName] = [uBlockIndex,uBytes,llNumEntries]
config["abf_sweep_start_time"]=header['uFileStartTimeMS']/1000/1000
### DECODE STRINGS - figure out where (byte location) strings are in this file
# There are 20 strings. Protocol path, ABF comment, then alternating channel name and units.
byte_location = sections['StringsSection'][0]*BLOCKSIZE
string_size = sections['StringsSection'][1]
strings_data = fread(f,byte_location,structFormat=None,nBytes=string_size)
for key in [b'AXENGN', b'clampex', b'Clampex', b'CLAMPEX', b'axoscope']:
if key in strings_data:
for line in strings_data.split(key)[1].split(b'\x00')[1:-1]:
strings.append(line.decode())
config["abf_protocol_file"]=strings[0]
config["abf_comment"]=strings[1]
config["abf_channels"]=strings[2::2]
config["abf_units"]=strings[3::2]
break
### DECODE ADC INFO - a list of dictionaries, one per ADC employed
for ADCsection in range(sections['ADCSection'][2]):
thisADC={}
byte_location=sections['ADCSection'][0]*BLOCKSIZE+sections['ADCSection'][1]*ADCsection
for key, fmt in ADCInfoDescription:
thisADC[key]=fread(f,byte_location,fmt)
byte_location+=struct.calcsize(fmt)
adcs.append(thisADC)
### PROTOCOL - info about the nature of the recording
byte_location=sections['ProtocolSection'][0]*BLOCKSIZE
for key, fmt in protocolInfoDescription:
protocol[key]=fread(f,byte_location,fmt)
byte_location+=struct.calcsize(fmt)
protocol.pop('sUnused', None) # we don't need this
### TAGS (COMMMENTS) - those with a timestamp/comment (not those embedded in the protocol)
#TODO: not sure what the tagtime units actually are. Byte positions? Block number?
byte_location=sections['TagSection'][0]*BLOCKSIZE
for i in range(sections['TagSection'][2]):
thisTag=[]
for key, fmt in TagInfoDescription:
val=fread(f,byte_location,fmt)
if type(val) is bytes:
val=val.decode().strip()
thisTag.append(val)
byte_location+=struct.calcsize(fmt)
tags.append(thisTag)
### DAC SECTIONS
for dacNumber in range(sections['DACSection'][2]):
thisDAC={}
byte_location=sections['DACSection'][0]*BLOCKSIZE+sections['DACSection'][1]*dacNumber
for key, fmt in DACInfoDescription:
thisDAC[key]=fread(f,byte_location,fmt)
byte_location+=struct.calcsize(fmt)
thisDAC.pop('sUnused', None) # we don't need this
if thisDAC['nWaveformEnable']==0: continue # don't record unused DACs
dacs.append(thisDAC)
### EPOCHS PER DAC - this is what appear on the waveform tab
epochs=[]
for epochNumber in range(sections['EpochPerDACSection'][2]):
thisEpoch={}
byte_location=sections['EpochPerDACSection'][0]*BLOCKSIZE+sections['EpochPerDACSection'][1]*epochNumber
for key, fmt in EpochInfoPerDACDescription:
thisEpoch[key]=fread(f,byte_location,fmt)
byte_location+=struct.calcsize(fmt)
thisEpoch.pop('sUnused', None) # we don't need this
epochs.append(thisEpoch)
### DIGITAL OUTPUTS - this is where digital outputs are stored. Returns binary string (7-0)
# this does not exist in Neo IO. It was hacked-in by Scott Harden (github: swharden) to capture digital outputs.
# let's just add the digital output string to the epochs array we already have started.
byte_location=sections['EpochSection'][0]*BLOCKSIZE
for epochNumber in range(sections['EpochSection'][0]):
if epochNumber>=len(epochs):
break # don't bother looking up unused epochs
thisEpoch=epochs[epochNumber]
for key, fmt in EpochSectionDescription:
val=fread(f,byte_location,fmt)
if key=='nEpochDigitalOutput':
val=format(val, 'b').rjust(8,'0') # convert to a binary string (7->0)
thisEpoch[key]=val
byte_location+=struct.calcsize(fmt)
thisEpoch.pop('sUnused', None) # we don't need this
epochs[epochNumber]=thisEpoch
### WE ARE DONE READING THE FILE
f.close()
### EXTRA CONFIG - cherry-pick just what I find useful and arrange it in a simple dictionary
config["abfVersion"]=float("".join([str(x) for x in header['fFileVersionNumber']]))/100 # now a float
config['signalNames']=config['abf_channels'][:len(adcs)] # don't need more than the number of channels
config['signalUnits']=config['abf_units'][:len(adcs)] # don't need more than the number of channels
config['comments']=[x[:2] for x in tags]
config['nSweeps']=sections['SynchArraySection'][2]
# Determine the recording date from the header
YY = int(header['uFileStartDate'] / 10000)
MM = int((header['uFileStartDate'] - YY * 10000) / 100)
DD = int(header['uFileStartDate'] - YY * 10000 - MM * 100)
hh = int(header['uFileStartTimeMS'] / 1000. / 3600.)
mm = int((header['uFileStartTimeMS'] / 1000. - hh * 3600) / 60)
ss = header['uFileStartTimeMS'] / 1000. - hh * 3600 - mm * 60
ms = int((ss%1)*1e6)
ss = int(ss)
config['abf_datetime'] = datetime.datetime(YY, MM, DD, hh, mm, ss, ms) |
def frames(fname=None,menuWidth=200,launch=False):
"""create and save a two column frames HTML file."""
html="""
<frameset cols="%dpx,*%%">
<frame name="menu" src="index_menu.html">
<frame name="content" src="index_splash.html">
</frameset>"""%(menuWidth)
with open(fname,'w') as f:
f.write(html)
if launch:
webbrowser.open(fname) |
def save(html,fname=None,launch=False):
"""wrap HTML in a top and bottom (with css) and save to disk."""
html=html_top+html+html_bot
html=html.replace("~GENAT~",swhlab.common.datetimeToString())
if fname is None:
fname = tempfile.gettempdir()+"/temp.html"
launch=True
fname=os.path.abspath(fname)
with open(fname,'w') as f:
f.write(html)
global stylesheetSaved
stylesheetPath=os.path.join(os.path.dirname(fname),"style.css")
if not os.path.exists(stylesheetPath) or stylesheetSaved is False:
with open(stylesheetPath,'w') as f:
f.write(stylesheet)
stylesheetSaved=True
if launch:
webbrowser.open(fname) |
def clampfit_rename(path,char):
"""
Given ABFs and TIFs formatted long style, rename each of them to prefix their number with a different number.
Example: 2017_10_11_0011.abf
Becomes: 2017_10_11_?011.abf
where ? can be any character.
"""
assert len(char)==1 and type(char)==str, "replacement character must be a single character"
assert os.path.exists(path), "path doesn't exist"
files = sorted(os.listdir(path))
files = [x for x in files if len(x)>18 and x[4]+x[7]+x[10]=='___']
for fname in files:
fname2 = list(fname)
fname2[11]=char
fname2="".join(fname2)
if fname==fname2:
print(fname, "==", fname2)
else:
print(fname, "->", fname2)
# fname=os.path.join(path,fname)
# fname2=os.path.join(path,fname2)
# if not os.path.exists(fname2):
# os.rename(fname,fname2)
return |
def kernel_gaussian(size=100, sigma=None, forwardOnly=False):
"""
return a 1d gassuan array of a given size and sigma.
If sigma isn't given, it will be 1/10 of the size, which is usually good.
"""
if sigma is None:sigma=size/10
points=np.exp(-np.power(np.arange(size)-size/2,2)/(2*np.power(sigma,2)))
if forwardOnly:
points[:int(len(points)/2)]=0
return points/sum(points) |
def analyzeSweep(abf,sweep,m1=None,m2=None,plotToo=False):
"""
m1 and m2, if given, are in seconds.
returns [# EPSCs, # IPSCs]
"""
abf.setsweep(sweep)
if m1 is None: m1=0
else: m1=m1*abf.pointsPerSec
if m2 is None: m2=-1
else: m2=m2*abf.pointsPerSec
# obtain X and Y
Yorig=abf.sweepY[int(m1):int(m2)]
X=np.arange(len(Yorig))/abf.pointsPerSec
# start by lowpass filtering (1 direction)
# Klpf=kernel_gaussian(size=abf.pointsPerMs*10,forwardOnly=True)
# Ylpf=np.convolve(Yorig,Klpf,mode='same')
# Y=Ylpf # commit
Kmb=kernel_gaussian(size=abf.pointsPerMs*10,forwardOnly=True)
Ymb=np.convolve(Yorig,Kmb,mode='same')
Y=Yorig-Ymb # commit
#Y1=np.copy(Y)
#Y[np.where(Y>0)[0]]=np.power(Y,2)
#Y[np.where(Y<0)[0]]=-np.power(Y,2)
# event detection
thresh=5 # threshold for an event
hitPos=np.where(Y>thresh)[0] # area above the threshold
hitNeg=np.where(Y<-thresh)[0] # area below the threshold
hitPos=np.concatenate((hitPos,[len(Y)-1])) # helps with the diff() coming up
hitNeg=np.concatenate((hitNeg,[len(Y)-1])) # helps with the diff() coming up
hitsPos=hitPos[np.where(np.abs(np.diff(hitPos))>10)[0]] # time point of EPSC
hitsNeg=hitNeg[np.where(np.abs(np.diff(hitNeg))>10)[0]] # time point of IPSC
hitsNeg=hitsNeg[1:] # often the first one is in error
#print(hitsNeg[0])
if plotToo:
plt.figure(figsize=(10,5))
ax1=plt.subplot(211)
plt.title("sweep %d: detected %d IPSCs (red) and %d EPSCs (blue)"%(sweep,len(hitsPos),len(hitsNeg)))
plt.ylabel("delta pA")
plt.grid()
plt.plot(X,Yorig,color='k',alpha=.5)
for hit in hitsPos:
plt.plot(X[hit],Yorig[hit]+20,'r.',ms=20,alpha=.5)
for hit in hitsNeg:
plt.plot(X[hit],Yorig[hit]-20,'b.',ms=20,alpha=.5)
plt.margins(0,.1)
plt.subplot(212,sharex=ax1)
plt.title("moving gaussian baseline subtraction used for threshold detection")
plt.ylabel("delta pA")
plt.grid()
plt.axhline(thresh,color='r',ls='--',alpha=.5,lw=3)
plt.axhline(-thresh,color='r',ls='--',alpha=.5,lw=3)
plt.plot(X,Y,color='b',alpha=.5)
plt.axis([X[0],X[-1],-thresh*1.5,thresh*1.5])
plt.tight_layout()
if type(plotToo) is str and os.path.isdir(plotToo):
print('saving %s/%05d.jpg'%(plotToo,sweep))
plt.savefig(plotToo+"/%05d.jpg"%sweep)
else:
plt.show()
plt.close('all')
return [len(hitsPos),len(hitsNeg)] |
def filesByExtension(fnames):
"""given a list of files, return a dict organized by extension."""
byExt={"abf":[],"jpg":[],"tif":[]} # prime it with empties
for fname in fnames:
ext = os.path.splitext(fname)[1].replace(".",'').lower()
if not ext in byExt.keys():
byExt[ext]=[]
byExt[ext]=byExt[ext]+[fname]
return byExt |
def findCells(fnames):
"""
given a list of files, return a list of cells by their ID.
A cell is indicated when an ABF name matches the start of another file.
Example:
123456.abf
123456-whatever.tif
"""
IDs=[]
filesByExt = filesByExtension(fnames)
for abfFname in filesByExt['abf']:
ID=os.path.splitext(abfFname)[0]
for picFname in filesByExt['jpg']+filesByExt['tif']:
if picFname.startswith(ID):
IDs.append(ID)
break
return smartSort(IDs) |
def filesByCell(fnames,cells):
"""given files and cells, return a dict of files grouped by cell."""
byCell={}
fnames=smartSort(fnames)
days = list(set([elem[:5] for elem in fnames if elem.endswith(".abf")])) # so pythonic!
for day in smartSort(days):
parent=None
for i,fname in enumerate([elem for elem in fnames if elem.startswith(day) and elem.endswith(".abf")]):
ID=os.path.splitext(fname)[0]
if len([x for x in fnames if x.startswith(ID)])-1:
parent=ID
if not parent in byCell:
byCell[parent]=[]
byCell[parent]=byCell[parent]+[fname]
return byCell |
def folderScan(self,abfFolder=None):
"""populate class properties relating to files in the folder."""
if abfFolder is None and 'abfFolder' in dir(self):
abfFolder=self.abfFolder
else:
self.abfFolder=abfFolder
self.abfFolder=os.path.abspath(self.abfFolder)
self.log.info("scanning [%s]",self.abfFolder)
if not os.path.exists(self.abfFolder):
self.log.error("path doesn't exist: [%s]",abfFolder)
return
self.abfFolder2=os.path.abspath(self.abfFolder+"/swhlab/")
if not os.path.exists(self.abfFolder2):
self.log.error("./swhlab/ doesn't exist. creating it...")
os.mkdir(self.abfFolder2)
self.fnames=os.listdir(self.abfFolder)
self.fnames2=os.listdir(self.abfFolder2)
self.log.debug("./ has %d files",len(self.fnames))
self.log.debug("./swhlab/ has %d files",len(self.fnames2))
self.fnamesByExt = filesByExtension(self.fnames)
if not "abf" in self.fnamesByExt.keys():
self.log.error("no ABF files found")
self.log.debug("found %d ABFs",len(self.fnamesByExt["abf"]))
self.cells=findCells(self.fnames) # list of cells by their ID
self.log.debug("found %d cells"%len(self.cells))
self.fnamesByCell = filesByCell(self.fnames,self.cells) # only ABFs
self.log.debug("grouped cells by number of source files: %s"%\
str([len(self.fnamesByCell[elem]) for elem in self.fnamesByCell])) |
def html_index(self,launch=False,showChildren=False):
"""
generate list of cells with links. keep this simple.
automatically generates splash page and regnerates frames.
"""
self.makePics() # ensure all pics are converted
# generate menu
html='<a href="index_splash.html" target="content">./%s/</a><br>'%os.path.basename(self.abfFolder)
for ID in smartSort(self.fnamesByCell.keys()):
link=''
if ID+".html" in self.fnames2:
link='href="%s.html" target="content"'%ID
html+=('<a %s>%s</a><br>'%(link,ID)) # show the parent ABF (ID)
if showChildren:
for fname in self.fnamesByCell[ID]:
thisID=os.path.splitext(fname)[0]
files2=[x for x in self.fnames2 if x.startswith(thisID) and not x.endswith(".html")]
html+='<i>%s</i>'%thisID # show the child ABF
if len(files2):
html+=' (%s)'%len(files2) # show number of supporting files
html+='<br>'
html+="<br>"
style.save(html,self.abfFolder2+"/index_menu.html")
self.html_index_splash() # make splash page
style.frames(self.abfFolder2+"/index.html",launch=launch) |
def html_index_splash(self):
"""generate landing page."""
html="""<h1 style="background-color: #EEEEFF; padding: 10px; border: 1px solid #CCCCFF;">
SWHLab <span style="font-size: 35%%;">%s<?span></h1>
"""%version.__version__
#html+='<code>%s</code><br><br>'%self.abfFolder
#html+='<hr>'
for parent in smartSort(self.fnamesByCell.keys()):
html+='<br><b><a href="%s.html">%s</a></b><br>'%(parent,parent)
for child in self.fnamesByCell[parent]:
fullpath=os.path.join(self.abfFolder,child)
protocol = swhlab.swh_abf.abfProtocol(fullpath)
html+='<code>%s[%s]</code><br>'%(fullpath,protocol)
style.save(html,self.abfFolder2+"/index_splash.html")
return |
def html_single_basic(self,ID):
"""
generate ./swhlab/xxyxxzzz.html for a single given abf.
Input can be an ABF file path of ABF ID.
"""
if not ID in self.cells:
self.log.error("ID [%s] not seen!",ID)
return
htmlFname=os.path.abspath(self.abfFolder2+"/"+ID+".html")
html="<h1>Data for ID %s</h1>"%ID
npics=0
for childID in [os.path.splitext(x)[0] for x in self.fnamesByCell[ID]]:
pics=[x for x in self.fnames2 if x.startswith(childID) and os.path.splitext(x)[1].lower() in [".png",".jpg"]]
html+="<code>%s</code><br>"%(os.path.abspath(self.abfFolder+'/'+childID+".abf"))
for i,pic in enumerate(pics):
html+='<a href="%s"><img class="datapic" src="%s" width="200"></a>'%(pic,pic)
npics+=1
html+="<br><br><br>"
style.save(html,htmlFname)
self.log.info("created %s containing %d pictures",htmlFname,npics) |
def html_singleAll(self,template="basic"):
"""generate a data view for every ABF in the project folder."""
for fname in smartSort(self.cells):
if template=="fixed":
self.html_single_fixed(fname)
else:
self.html_single_basic(fname) |
def makePics(self):
"""convert every .image we find to a ./swhlab/ image"""
rescanNeeded=False
for fname in smartSort(self.fnames):
if fname in self.fnames2:
continue
ext=os.path.splitext(fname)[1].lower()
if ext in [".jpg",".png"]:
if not fname in self.abfFolder2:
self.log.debug("copying %s",fname)
shutil.copy(os.path.join(self.abfFolder,fname),os.path.join(self.abfFolder2,fname))
rescanNeeded=True
if ext in [".tif",".tiff"]:
if not fname+".jpg" in self.fnames2:
self.log.debug("converting %s",fname)
swhlab.swh_image.TIF_to_jpg(os.path.join(self.abfFolder,fname),saveAs=os.path.join(self.abfFolder2,fname+".jpg"))
rescanNeeded=True
if rescanNeeded:
self.log.debug("new pics, so a rescan is needed...")
self.log.debug("REBUILDING ALL RECOMMENDED!!!!!!!!!!!")
self.folderScan() |
def proto_01_01_HP010(abf=exampleABF):
"""hyperpolarization step. Use to calculate tau and stuff."""
swhlab.memtest.memtest(abf) #knows how to do IC memtest
swhlab.memtest.checkSweep(abf) #lets you eyeball check how it did
swhlab.plot.save(abf,tag="tau") |
def proto_01_11_rampStep(abf=exampleABF):
"""each sweep is a ramp (of set size) which builds on the last sweep.
Used for detection of AP properties from first few APs."""
standard_inspect(abf)
swhlab.ap.detect(abf)
swhlab.ap.check_sweep(abf) #eyeball how well event detection worked
swhlab.plot.save(abf,tag="check")
swhlab.ap.check_AP_raw(abf) #show overlayed first few APs
swhlab.plot.save(abf,tag="raw",resize=False)
swhlab.ap.check_AP_deriv(abf) #show overlayed first few APs
swhlab.plot.save(abf,tag="deriv")
swhlab.ap.check_AP_phase(abf) #show overlayed first few APs
swhlab.plot.save(abf,tag="phase")
for feature in ['downslope','freq']:
swhlab.ap.plot_values(abf,feature,continuous=True) #plot AP info
swhlab.plot.save(abf,tag=feature) |
def proto_01_12_steps025(abf=exampleABF):
"""IC steps. Use to determine gain function."""
swhlab.ap.detect(abf)
standard_groupingForInj(abf,200)
for feature in ['freq','downslope']:
swhlab.ap.plot_values(abf,feature,continuous=False) #plot AP info
swhlab.plot.save(abf,tag='A_'+feature)
swhlab.plot.gain(abf) #easy way to do a gain function!
swhlab.plot.save(abf,tag='05-gain') |
def proto_01_13_steps025dual(abf=exampleABF):
"""IC steps. See how hyperpol. step affects things."""
swhlab.ap.detect(abf)
standard_groupingForInj(abf,200)
for feature in ['freq','downslope']:
swhlab.ap.plot_values(abf,feature,continuous=False) #plot AP info
swhlab.plot.save(abf,tag='A_'+feature)
f1=swhlab.ap.getAvgBySweep(abf,'freq',None,1)
f2=swhlab.ap.getAvgBySweep(abf,'freq',1,None)
f1=np.nan_to_num(f1)
f2=np.nan_to_num(f2)
Xs=abf.clampValues(abf.dataX[int(abf.protoSeqX[1]+.01)])
swhlab.plot.new(abf,title="gain function",xlabel="command current (pA)",
ylabel="average inst. freq. (Hz)")
pylab.plot(Xs,f1,'.-',ms=20,alpha=.5,label="step 1",color='b')
pylab.plot(Xs,f2,'.-',ms=20,alpha=.5,label="step 2",color='r')
pylab.legend(loc='upper left')
pylab.axis([Xs[0],Xs[-1],None,None])
swhlab.plot.save(abf,tag='gain') |
def proto_02_01_MT70(abf=exampleABF):
"""repeated membrane tests."""
standard_overlayWithAverage(abf)
swhlab.memtest.memtest(abf)
swhlab.memtest.checkSweep(abf)
swhlab.plot.save(abf,tag='check',resize=False) |
def proto_02_02_IVdual(abf=exampleABF):
"""dual I/V steps in VC mode, one from -70 and one -50."""
av1,sd1=swhlab.plot.IV(abf,.7,1,True,'b')
swhlab.plot.save(abf,tag='iv1')
a2v,sd2=swhlab.plot.IV(abf,2.2,2.5,True,'r')
swhlab.plot.save(abf,tag='iv2')
swhlab.plot.sweep(abf,'all')
pylab.axis([None,None,min(av1)-50,max(av1)+50])
swhlab.plot.save(abf,tag='overlay') |
def proto_02_03_IVfast(abf=exampleABF):
"""fast sweeps, 1 step per sweep, for clean IV without fast currents."""
av1,sd1=swhlab.plot.IV(abf,.6,.9,True)
swhlab.plot.save(abf,tag='iv1')
Xs=abf.clampValues(.6) #generate IV clamp values
abf.saveThing([Xs,av1],'iv') |
def proto_04_01_MTmon70s2(abf=exampleABF):
"""repeated membrane tests, likely with drug added. Maybe IPSCs."""
standard_inspect(abf)
swhlab.memtest.memtest(abf)
swhlab.memtest.checkSweep(abf)
swhlab.plot.save(abf,tag='check',resize=False)
swhlab.memtest.plot_standard4(abf)
swhlab.plot.save(abf,tag='memtests') |
def proto_VC_50_MT_IV(abf=exampleABF):
"""combination of membrane test and IV steps."""
swhlab.memtest.memtest(abf) #do membrane test on every sweep
swhlab.memtest.checkSweep(abf) #see all MT values
swhlab.plot.save(abf,tag='02-check',resize=False)
av1,sd1=swhlab.plot.IV(abf,1.2,1.4,True,'b')
swhlab.plot.save(abf,tag='iv')
Xs=abf.clampValues(1.2) #generate IV clamp values
abf.saveThing([Xs,av1],'01_iv') |
def proto_IC_ramp_gain(abf=exampleABF):
"""increasing ramps in (?) pA steps."""
standard_inspect(abf)
swhlab.ap.detect(abf)
swhlab.ap.check_AP_raw(abf) #show overlayed first few APs
swhlab.plot.save(abf,tag="01-raw",resize=False)
swhlab.ap.check_AP_deriv(abf) #show overlayed first few APs
swhlab.plot.save(abf,tag="02-deriv")
swhlab.ap.check_AP_phase(abf) #show overlayed first few APs
swhlab.plot.save(abf,tag="03-phase")
swhlab.ap.plot_values(abf,'freq',continuous=True) #plot AP info
pylab.subplot(211)
pylab.axhline(40,color='r',lw=2,ls="--",alpha=.2)
swhlab.plot.save(abf,tag='04-freq')
swhlab.ap.plot_values(abf,'downslope',continuous=True) #plot AP info
pylab.subplot(211)
pylab.axhline(-100,color='r',lw=2,ls="--",alpha=.2)
swhlab.plot.save(abf,tag='04-downslope') |
def indexImages(folder,fname="index.html"):
"""OBSOLETE WAY TO INDEX A FOLDER.""" #TODO: REMOVE
html="<html><body>"
for item in glob.glob(folder+"/*.*"):
if item.split(".")[-1] in ['jpg','png']:
html+="<h3>%s</h3>"%os.path.basename(item)
html+='<img src="%s">'%os.path.basename(item)
html+='<br>'*10
html+="</html></body>"
f=open(folder+"/"+fname,'w')
f.write(html)
f.close
print("indexed:")
print(" ",os.path.abspath(folder+"/"+fname))
return |
def save(self, *args, **kwargs):
"""
A custom save method that handles figuring out when something is activated or deactivated.
"""
current_activable_value = getattr(self, self.ACTIVATABLE_FIELD_NAME)
is_active_changed = self.id is None or self.__original_activatable_value != current_activable_value
self.__original_activatable_value = current_activable_value
ret_val = super(BaseActivatableModel, self).save(*args, **kwargs)
# Emit the signals for when the is_active flag is changed
if is_active_changed:
model_activations_changed.send(self.__class__, instance_ids=[self.id], is_active=current_activable_value)
if self.activatable_field_updated:
model_activations_updated.send(self.__class__, instance_ids=[self.id], is_active=current_activable_value)
return ret_val |
def delete(self, force=False, **kwargs):
"""
It is impossible to delete an activatable model unless force is True. This function instead sets it to inactive.
"""
if force:
return super(BaseActivatableModel, self).delete(**kwargs)
else:
setattr(self, self.ACTIVATABLE_FIELD_NAME, False)
return self.save(update_fields=[self.ACTIVATABLE_FIELD_NAME]) |
def show(self, args, file_handle=None, **kwargs):
"Write to file_handle if supplied, othewise print output"
full_string = ''
info = {'root_directory': '<root_directory>',
'batch_name': '<batch_name>',
'batch_tag': '<batch_tag>',
'batch_description': '<batch_description>',
'launcher': '<launcher>',
'timestamp_format': '<timestamp_format>',
'timestamp': tuple(time.localtime()),
'varying_keys': args.varying_keys,
'constant_keys': args.constant_keys,
'constant_items': args.constant_items}
quoted_cmds = [ subprocess.list2cmdline(
[el for el in self(self._formatter(s),'<tid>',info)])
for s in args.specs]
cmd_lines = ['%d: %s\n' % (i, qcmds) for (i,qcmds)
in enumerate(quoted_cmds)]
full_string += ''.join(cmd_lines)
if file_handle:
file_handle.write(full_string)
file_handle.flush()
else:
print(full_string) |
def update(self):
"""Update the launch information -- use if additional launches were
made.
"""
launches = []
for path in os.listdir(self.output_dir):
full_path = os.path.join(self.output_dir, path)
if os.path.isdir(full_path):
launches.append(self._get_launch_info(full_path))
self.launches = sorted(launches) |
def get_root_directory(self, timestamp=None):
"""
A helper method that supplies the root directory name given a
timestamp.
"""
if timestamp is None: timestamp = self.timestamp
if self.timestamp_format is not None:
root_name = (time.strftime(self.timestamp_format, timestamp)
+ '-' + self.batch_name)
else:
root_name = self.batch_name
path = os.path.join(self.output_directory,
*(self.subdir+[root_name]))
return os.path.abspath(path) |
def _append_log(self, specs):
"""
The log contains the tids and corresponding specifications
used during launch with the specifications in JSON format.
"""
self._spec_log += specs # This should be removed
log_path = os.path.join(self.root_directory, ("%s.log" % self.batch_name))
core.Log.write_log(log_path, [spec for (_, spec) in specs], allow_append=True) |
def _record_info(self, setup_info=None):
"""
All launchers should call this method to write the info file
at the end of the launch. The .info file is saved given
setup_info supplied by _setup_launch into the
root_directory. When called without setup_info, the existing
info file is updated with the end-time.
"""
info_path = os.path.join(self.root_directory, ('%s.info' % self.batch_name))
if setup_info is None:
try:
with open(info_path, 'r') as info_file:
setup_info = json.load(info_file)
except:
setup_info = {}
setup_info.update({'end_time' : tuple(time.localtime())})
else:
setup_info.update({
'end_time' : None,
'metadata' : self.metadata
})
with open(info_path, 'w') as info_file:
json.dump(setup_info, info_file, sort_keys=True, indent=4) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.