repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
PmagPy/PmagPy | programs/magic_gui.py | MainFrame.reset_highlights | def reset_highlights(self):
"""
Remove red outlines from all buttons
"""
for dtype in ["specimens", "samples", "sites", "locations", "ages"]:
wind = self.FindWindowByName(dtype + '_btn')
wind.Unbind(wx.EVT_PAINT, handler=self.highlight_button)
self.Refresh()
#self.message.SetLabel('Highlighted grids have incorrect or incomplete data')
self.bSizer_msg.ShowItems(False)
self.hbox.Fit(self) | python | def reset_highlights(self):
"""
Remove red outlines from all buttons
"""
for dtype in ["specimens", "samples", "sites", "locations", "ages"]:
wind = self.FindWindowByName(dtype + '_btn')
wind.Unbind(wx.EVT_PAINT, handler=self.highlight_button)
self.Refresh()
#self.message.SetLabel('Highlighted grids have incorrect or incomplete data')
self.bSizer_msg.ShowItems(False)
self.hbox.Fit(self) | Remove red outlines from all buttons | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/magic_gui.py#L433-L443 |
PmagPy/PmagPy | programs/magic_gui.py | MainFrame.highlight_button | def highlight_button(self, event):
"""
Draw a red highlight line around the event object
"""
wind = event.GetEventObject()
pos = wind.GetPosition()
size = wind.GetSize()
try:
dc = wx.PaintDC(self)
except wx._core.PyAssertionError:
# if it's not a native paint event, we can't us wx.PaintDC
dc = wx.ClientDC(self)
dc.SetPen(wx.Pen('red', 5, wx.SOLID))
dc.DrawRectangle(pos[0], pos[1], size[0], size[1])
event.Skip() | python | def highlight_button(self, event):
"""
Draw a red highlight line around the event object
"""
wind = event.GetEventObject()
pos = wind.GetPosition()
size = wind.GetSize()
try:
dc = wx.PaintDC(self)
except wx._core.PyAssertionError:
# if it's not a native paint event, we can't us wx.PaintDC
dc = wx.ClientDC(self)
dc.SetPen(wx.Pen('red', 5, wx.SOLID))
dc.DrawRectangle(pos[0], pos[1], size[0], size[1])
event.Skip() | Draw a red highlight line around the event object | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/magic_gui.py#L446-L460 |
PmagPy/PmagPy | programs/magic_gui.py | MagICMenu.on_clear | def on_clear(self, event):
"""
initialize window to allow user to empty the working directory
"""
dia = pmag_menu_dialogs.ClearWD(self.parent, self.parent.WD)
clear = dia.do_clear()
if clear:
print('-I- Clear data object')
self.contribution = cb.Contribution(self.WD, dmodel=self.data_model)
self.edited = False | python | def on_clear(self, event):
"""
initialize window to allow user to empty the working directory
"""
dia = pmag_menu_dialogs.ClearWD(self.parent, self.parent.WD)
clear = dia.do_clear()
if clear:
print('-I- Clear data object')
self.contribution = cb.Contribution(self.WD, dmodel=self.data_model)
self.edited = False | initialize window to allow user to empty the working directory | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/magic_gui.py#L523-L532 |
PmagPy/PmagPy | programs/magic_gui.py | MagICMenu.on_close_grid | def on_close_grid(self, event):
"""
If there is an open grid, save its data and close it.
"""
if self.parent.grid_frame:
self.parent.grid_frame.onSave(None)
self.parent.grid_frame.Destroy() | python | def on_close_grid(self, event):
"""
If there is an open grid, save its data and close it.
"""
if self.parent.grid_frame:
self.parent.grid_frame.onSave(None)
self.parent.grid_frame.Destroy() | If there is an open grid, save its data and close it. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/magic_gui.py#L557-L563 |
PmagPy/PmagPy | programs/k15_s.py | main | def main():
"""
NAME
k15_s.py
DESCRIPTION
converts .k15 format data to .s format.
assumes Jelinek Kappabridge measurement scheme
SYNTAX
k15_s.py [-h][-i][command line options][<filename]
OPTIONS
-h prints help message and quits
-i allows interactive entry of options
-f FILE, specifies input file, default: standard input
-F FILE, specifies output file, default: standard output
-crd [g, t] specifies [g]eographic rotation,
or geographic AND tectonic rotation
INPUT
name [az,pl,strike,dip], followed by
3 rows of 5 measurements for each specimen
OUTPUT
least squares matrix elements and sigma:
x11,x22,x33,x12,x23,x13,sigma
"""
firstline,itilt,igeo,linecnt,key=1,0,0,0,""
out=""
data,k15=[],[]
dir='./'
ofile=""
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir=sys.argv[ind+1]+'/'
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-i' in sys.argv:
file=input("Input file name [.k15 format]: ")
f=open(file,'r')
data=f.readlines()
f.close()
file=input("Output file name [.s format]: ")
out=open(file,'w')
print (" [g]eographic, [t]ilt corrected, ")
tg=input(" [return for specimen coordinates]: ")
if tg=='g':
igeo=1
elif tg=='t':
igeo,itilt=1,1
elif '-f' in sys.argv:
ind=sys.argv.index('-f')
file=dir+sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
f.close()
else:
data= sys.stdin.readlines()
if len(data)==0:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
ofile=dir+sys.argv[ind+1]
out=open(ofile,'w')
if '-crd' in sys.argv:
ind=sys.argv.index('-crd')
tg=sys.argv[ind+1]
if tg=='g':igeo=1
if tg=='t': igeo,itilt=1,1
for line in data:
rec=line.split()
if firstline==1:
firstline=0
nam=rec[0]
if igeo==1: az,pl=float(rec[1]),float(rec[2])
if itilt==1: bed_az,bed_dip=90.+float(rec[3]),float(rec[4])
else:
linecnt+=1
for i in range(5):
k15.append(float(rec[i]))
if linecnt==3:
sbar,sigma,bulk=pmag.dok15_s(k15)
if igeo==1: sbar=pmag.dosgeo(sbar,az,pl)
if itilt==1: sbar=pmag.dostilt(sbar,bed_az,bed_dip)
outstring=""
for s in sbar:outstring+='%10.8f '%(s)
outstring+='%10.8f'%(sigma)
if out=="":
print(outstring)
else:
out.write(outstring+'\n')
linecnt,firstline,k15=0,1,[]
if ofile!="":print ('Output saved in ',ofile) | python | def main():
"""
NAME
k15_s.py
DESCRIPTION
converts .k15 format data to .s format.
assumes Jelinek Kappabridge measurement scheme
SYNTAX
k15_s.py [-h][-i][command line options][<filename]
OPTIONS
-h prints help message and quits
-i allows interactive entry of options
-f FILE, specifies input file, default: standard input
-F FILE, specifies output file, default: standard output
-crd [g, t] specifies [g]eographic rotation,
or geographic AND tectonic rotation
INPUT
name [az,pl,strike,dip], followed by
3 rows of 5 measurements for each specimen
OUTPUT
least squares matrix elements and sigma:
x11,x22,x33,x12,x23,x13,sigma
"""
firstline,itilt,igeo,linecnt,key=1,0,0,0,""
out=""
data,k15=[],[]
dir='./'
ofile=""
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir=sys.argv[ind+1]+'/'
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-i' in sys.argv:
file=input("Input file name [.k15 format]: ")
f=open(file,'r')
data=f.readlines()
f.close()
file=input("Output file name [.s format]: ")
out=open(file,'w')
print (" [g]eographic, [t]ilt corrected, ")
tg=input(" [return for specimen coordinates]: ")
if tg=='g':
igeo=1
elif tg=='t':
igeo,itilt=1,1
elif '-f' in sys.argv:
ind=sys.argv.index('-f')
file=dir+sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
f.close()
else:
data= sys.stdin.readlines()
if len(data)==0:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
ofile=dir+sys.argv[ind+1]
out=open(ofile,'w')
if '-crd' in sys.argv:
ind=sys.argv.index('-crd')
tg=sys.argv[ind+1]
if tg=='g':igeo=1
if tg=='t': igeo,itilt=1,1
for line in data:
rec=line.split()
if firstline==1:
firstline=0
nam=rec[0]
if igeo==1: az,pl=float(rec[1]),float(rec[2])
if itilt==1: bed_az,bed_dip=90.+float(rec[3]),float(rec[4])
else:
linecnt+=1
for i in range(5):
k15.append(float(rec[i]))
if linecnt==3:
sbar,sigma,bulk=pmag.dok15_s(k15)
if igeo==1: sbar=pmag.dosgeo(sbar,az,pl)
if itilt==1: sbar=pmag.dostilt(sbar,bed_az,bed_dip)
outstring=""
for s in sbar:outstring+='%10.8f '%(s)
outstring+='%10.8f'%(sigma)
if out=="":
print(outstring)
else:
out.write(outstring+'\n')
linecnt,firstline,k15=0,1,[]
if ofile!="":print ('Output saved in ',ofile) | NAME
k15_s.py
DESCRIPTION
converts .k15 format data to .s format.
assumes Jelinek Kappabridge measurement scheme
SYNTAX
k15_s.py [-h][-i][command line options][<filename]
OPTIONS
-h prints help message and quits
-i allows interactive entry of options
-f FILE, specifies input file, default: standard input
-F FILE, specifies output file, default: standard output
-crd [g, t] specifies [g]eographic rotation,
or geographic AND tectonic rotation
INPUT
name [az,pl,strike,dip], followed by
3 rows of 5 measurements for each specimen
OUTPUT
least squares matrix elements and sigma:
x11,x22,x33,x12,x23,x13,sigma | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/k15_s.py#L8-L103 |
PmagPy/PmagPy | programs/deprecated/zeq_magic_redo.py | main | def main():
"""
NAME
zeq_magic_redo.py
DESCRIPTION
Calculate principal components through demagnetization data using bounds and calculation type stored in "redo" file
SYNTAX
zeq_magic_redo.py [command line options]
OPTIONS
-h prints help message
-usr USER: identify user, default is ""
-f: specify input file, default is magic_measurements.txt
-F: specify output file, default is zeq_specimens.txt
-fre REDO: specify redo file, default is "zeq_redo"
-fsa SAMPFILE: specify er_samples format file, default is "er_samples.txt"
-A : don't average replicate measurements, default is yes
-crd [s,g,t] :
specify coordinate system [s,g,t] [default is specimen coordinates]
are specimen, geographic, and tilt corrected respectively
NB: you must have a SAMPFILE in this directory to rotate from specimen coordinates
-leg: attaches "Recalculated from original measurements; supercedes published results. " to comment field
INPUTS
zeq_redo format file is:
specimen_name calculation_type[DE-BFL,DE-BFL-A,DE-BFL-O,DE-BFP,DE-FM] step_min step_max component_name[A,B,C]
"""
dir_path='.'
INCL=["LT-NO","LT-AF-Z","LT-T-Z","LT-M-Z"] # looking for demag data
beg,end,pole,geo,tilt,askave,save=0,0,[],0,0,0,0
user,doave,comment= "",1,""
geo,tilt=0,0
version_num=pmag.get_version()
args=sys.argv
if '-WD' in args:
ind=args.index('-WD')
dir_path=args[ind+1]
meas_file,pmag_file,mk_file= dir_path+"/"+"magic_measurements.txt",dir_path+"/"+"zeq_specimens.txt",dir_path+"/"+"zeq_redo"
samp_file,coord=dir_path+"/"+"er_samples.txt",""
if "-h" in args:
print(main.__doc__)
sys.exit()
if "-usr" in args:
ind=args.index("-usr")
user=sys.argv[ind+1]
if "-A" in args:doave=0
if "-leg" in args: comment="Recalculated from original measurements; supercedes published results. "
if "-f" in args:
ind=args.index("-f")
meas_file=dir_path+'/'+sys.argv[ind+1]
if "-F" in args:
ind=args.index("-F")
pmag_file=dir_path+'/'+sys.argv[ind+1]
if "-fre" in args:
ind=args.index("-fre")
mk_file=dir_path+"/"+args[ind+1]
try:
mk_f=open(mk_file,'r')
except:
print("Bad redo file")
sys.exit()
mkspec,skipped=[],[]
speclist=[]
for line in mk_f.readlines():
tmp=line.split()
mkspec.append(tmp)
speclist.append(tmp[0])
if "-fsa" in args:
ind=args.index("-fsa")
samp_file=dir_path+'/'+sys.argv[ind+1]
if "-crd" in args:
ind=args.index("-crd")
coord=sys.argv[ind+1]
if coord=="g":geo,tilt=1,0
if coord=="t":geo,tilt=1,1
#
# now get down to bidness
if geo==1:
samp_data,file_type=pmag.magic_read(samp_file)
if file_type != 'er_samples':
print(file_type)
print("This is not a valid er_samples file ")
sys.exit()
#
#
#
meas_data,file_type=pmag.magic_read(meas_file)
if file_type != 'magic_measurements':
print(file_type)
print(file_type,"This is not a valid magic_measurements file ")
sys.exit()
#
# sort the specimen names
#
k = 0
print('Processing ',len(speclist), ' specimens - please wait')
PmagSpecs=[]
while k < len(speclist):
s=speclist[k]
recnum=0
PmagSpecRec={}
method_codes,inst_codes=[],[]
# find the data from the meas_data file for this sample
#
# collect info for the PmagSpecRec dictionary
#
meas_meth=[]
spec=pmag.get_dictitem(meas_data,'er_specimen_name',s,'T')
if len(spec)==0:
print('no data found for specimen: ',s)
print('delete from zeq_redo input file...., then try again')
else:
for rec in spec: # copy of vital stats to PmagSpecRec from first spec record in demag block
skip=1
methods=rec["magic_method_codes"].split(":")
if len(set(methods) & set(INCL))>0:
PmagSpecRec["er_analyst_mail_names"]=user
PmagSpecRec["magic_software_packages"]=version_num
PmagSpecRec["er_specimen_name"]=s
PmagSpecRec["er_sample_name"]=rec["er_sample_name"]
PmagSpecRec["er_site_name"]=rec["er_site_name"]
PmagSpecRec["er_location_name"]=rec["er_location_name"]
if "er_expedition_name" in list(rec.keys()):PmagSpecRec["er_expedition_name"]=rec["er_expedition_name"]
PmagSpecRec["er_citation_names"]="This study"
if "magic_experiment_name" not in list(rec.keys()): rec["magic_experiment_name"]=""
PmagSpecRec["magic_experiment_names"]=rec["magic_experiment_name"]
if "magic_instrument_codes" not in list(rec.keys()): rec["magic_instrument_codes"]=""
inst=rec['magic_instrument_codes'].split(":")
for I in inst:
if I not in inst_codes: # copy over instruments
inst_codes.append(I)
meths=rec["magic_method_codes"].split(":")
for meth in meths:
if meth.strip() not in meas_meth:meas_meth.append(meth)
if "LP-DIR-AF" in meas_meth or "LT-AF-Z" in meas_meth:
PmagSpecRec["measurement_step_unit"]="T"
if "LP-DIR-AF" not in method_codes:method_codes.append("LP-DIR-AF")
if "LP-DIR-T" in meas_meth or "LT-T-Z" in meas_meth:
PmagSpecRec["measurement_step_unit"]="K"
if "LP-DIR-T" not in method_codes:method_codes.append("LP-DIR-T")
if "LP-DIR-M" in meas_meth or "LT-M-Z" in meas_meth:
PmagSpecRec["measurement_step_unit"]="J"
if "LP-DIR-M" not in method_codes:method_codes.append("LP-DIR-M")
#
#
datablock,units=pmag.find_dmag_rec(s,spec) # fish out the demag data for this specimen
#
if len(datablock) <2 or s not in speclist :
k+=1
# print 'skipping ', s,len(datablock)
else:
#
# find replicate measurements at given treatment step and average them
#
# step_meth,avedata=pmag.vspec(data)
#
# if len(avedata) != len(datablock):
# if doave==1:
# method_codes.append("DE-VM")
# datablock=avedata
#
# do geo or stratigraphic correction now
#
if geo==1 or tilt==1:
# find top priority orientation method
orient,az_type=pmag.get_orient(samp_data,PmagSpecRec["er_sample_name"])
if az_type not in method_codes:method_codes.append(az_type)
#
# if tilt selected, get stratigraphic correction
#
tiltblock,geoblock=[],[]
for rec in datablock:
if "sample_azimuth" in list(orient.keys()) and orient["sample_azimuth"]!="":
d_geo,i_geo=pmag.dogeo(rec[1],rec[2],float(orient["sample_azimuth"]),float(orient["sample_dip"]))
geoblock.append([rec[0],d_geo,i_geo,rec[3],rec[4],rec[5]])
if tilt==1 and "sample_bed_dip_direction" in list(orient.keys()):
d_tilt,i_tilt=pmag.dotilt(d_geo,i_geo,float(orient["sample_bed_dip_direction"]),float(orient["sample_bed_dip"]))
tiltblock.append([rec[0],d_tilt,i_tilt,rec[3],rec[4],rec[5]])
elif tilt==1:
if PmagSpecRec["er_sample_name"] not in skipped:
print('no tilt correction for ', PmagSpecRec["er_sample_name"],' skipping....')
skipped.append(PmagSpecRec["er_sample_name"])
else:
if PmagSpecRec["er_sample_name"] not in skipped:
print('no geographic correction for ', PmagSpecRec["er_sample_name"],' skipping....')
skipped.append(PmagSpecRec["er_sample_name"])
#
# get beg_pca, end_pca, pca
if PmagSpecRec['er_sample_name'] not in skipped:
compnum=-1
for spec in mkspec:
if spec[0]==s:
CompRec={}
for key in list(PmagSpecRec.keys()):CompRec[key]=PmagSpecRec[key]
compnum+=1
calculation_type=spec[1]
beg=float(spec[2])
end=float(spec[3])
if len(spec)>4:
comp_name=spec[4]
else:
comp_name=string.uppercase[compnum]
CompRec['specimen_comp_name']=comp_name
if beg < float(datablock[0][0]):beg=float(datablock[0][0])
if end > float(datablock[-1][0]):end=float(datablock[-1][0])
for l in range(len(datablock)):
if datablock[l][0]==beg:beg_pca=l
if datablock[l][0]==end:end_pca=l
if geo==1 and tilt==0:
mpars=pmag.domean(geoblock,beg_pca,end_pca,calculation_type)
if mpars["specimen_direction_type"]!="Error":
CompRec["specimen_dec"]='%7.1f ' %(mpars["specimen_dec"])
CompRec["specimen_inc"]='%7.1f ' %(mpars["specimen_inc"])
CompRec["specimen_tilt_correction"]='0'
if geo==1 and tilt==1:
mpars=pmag.domean(tiltblock,beg_pca,end_pca,calculation_type)
if mpars["specimen_direction_type"]!="Error":
CompRec["specimen_dec"]='%7.1f ' %(mpars["specimen_dec"])
CompRec["specimen_inc"]='%7.1f ' %(mpars["specimen_inc"])
CompRec["specimen_tilt_correction"]='100'
if geo==0 and tilt==0:
mpars=pmag.domean(datablock,beg_pca,end_pca,calculation_type)
if mpars["specimen_direction_type"]!="Error":
CompRec["specimen_dec"]='%7.1f ' %(mpars["specimen_dec"])
CompRec["specimen_inc"]='%7.1f ' %(mpars["specimen_inc"])
CompRec["specimen_tilt_correction"]='-1'
if mpars["specimen_direction_type"]=="Error":
pass
else:
CompRec["measurement_step_min"]='%8.3e '%(datablock[beg_pca][0])
try:
CompRec["measurement_step_max"]='%8.3e '%(datablock[end_pca][0] )
except:
print('error in end_pca ',PmagSpecRec['er_specimen_name'])
CompRec["specimen_correction"]='u'
if calculation_type!='DE-FM':
CompRec["specimen_mad"]='%7.1f '%(mpars["specimen_mad"])
CompRec["specimen_alpha95"]=""
else:
CompRec["specimen_mad"]=""
CompRec["specimen_alpha95"]='%7.1f '%(mpars["specimen_alpha95"])
CompRec["specimen_n"]='%i '%(mpars["specimen_n"])
CompRec["specimen_dang"]='%7.1f '%(mpars["specimen_dang"])
CompMeths=[]
for meth in method_codes:
if meth not in CompMeths:CompMeths.append(meth)
if calculation_type not in CompMeths:CompMeths.append(calculation_type)
if geo==1: CompMeths.append("DA-DIR-GEO")
if tilt==1: CompMeths.append("DA-DIR-TILT")
if "DE-BFP" not in calculation_type:
CompRec["specimen_direction_type"]='l'
else:
CompRec["specimen_direction_type"]='p'
CompRec["magic_method_codes"]=""
if len(CompMeths) != 0:
methstring=""
for meth in CompMeths:
methstring=methstring+ ":" +meth
CompRec["magic_method_codes"]=methstring.strip(':')
CompRec["specimen_description"]=comment
if len(inst_codes) != 0:
inststring=""
for inst in inst_codes:
inststring=inststring+ ":" +inst
CompRec["magic_instrument_codes"]=inststring.strip(':')
PmagSpecs.append(CompRec)
k+=1
pmag.magic_write(pmag_file,PmagSpecs,'pmag_specimens')
print("Recalculated specimen data stored in ",pmag_file) | python | def main():
"""
NAME
zeq_magic_redo.py
DESCRIPTION
Calculate principal components through demagnetization data using bounds and calculation type stored in "redo" file
SYNTAX
zeq_magic_redo.py [command line options]
OPTIONS
-h prints help message
-usr USER: identify user, default is ""
-f: specify input file, default is magic_measurements.txt
-F: specify output file, default is zeq_specimens.txt
-fre REDO: specify redo file, default is "zeq_redo"
-fsa SAMPFILE: specify er_samples format file, default is "er_samples.txt"
-A : don't average replicate measurements, default is yes
-crd [s,g,t] :
specify coordinate system [s,g,t] [default is specimen coordinates]
are specimen, geographic, and tilt corrected respectively
NB: you must have a SAMPFILE in this directory to rotate from specimen coordinates
-leg: attaches "Recalculated from original measurements; supercedes published results. " to comment field
INPUTS
zeq_redo format file is:
specimen_name calculation_type[DE-BFL,DE-BFL-A,DE-BFL-O,DE-BFP,DE-FM] step_min step_max component_name[A,B,C]
"""
dir_path='.'
INCL=["LT-NO","LT-AF-Z","LT-T-Z","LT-M-Z"] # looking for demag data
beg,end,pole,geo,tilt,askave,save=0,0,[],0,0,0,0
user,doave,comment= "",1,""
geo,tilt=0,0
version_num=pmag.get_version()
args=sys.argv
if '-WD' in args:
ind=args.index('-WD')
dir_path=args[ind+1]
meas_file,pmag_file,mk_file= dir_path+"/"+"magic_measurements.txt",dir_path+"/"+"zeq_specimens.txt",dir_path+"/"+"zeq_redo"
samp_file,coord=dir_path+"/"+"er_samples.txt",""
if "-h" in args:
print(main.__doc__)
sys.exit()
if "-usr" in args:
ind=args.index("-usr")
user=sys.argv[ind+1]
if "-A" in args:doave=0
if "-leg" in args: comment="Recalculated from original measurements; supercedes published results. "
if "-f" in args:
ind=args.index("-f")
meas_file=dir_path+'/'+sys.argv[ind+1]
if "-F" in args:
ind=args.index("-F")
pmag_file=dir_path+'/'+sys.argv[ind+1]
if "-fre" in args:
ind=args.index("-fre")
mk_file=dir_path+"/"+args[ind+1]
try:
mk_f=open(mk_file,'r')
except:
print("Bad redo file")
sys.exit()
mkspec,skipped=[],[]
speclist=[]
for line in mk_f.readlines():
tmp=line.split()
mkspec.append(tmp)
speclist.append(tmp[0])
if "-fsa" in args:
ind=args.index("-fsa")
samp_file=dir_path+'/'+sys.argv[ind+1]
if "-crd" in args:
ind=args.index("-crd")
coord=sys.argv[ind+1]
if coord=="g":geo,tilt=1,0
if coord=="t":geo,tilt=1,1
#
# now get down to bidness
if geo==1:
samp_data,file_type=pmag.magic_read(samp_file)
if file_type != 'er_samples':
print(file_type)
print("This is not a valid er_samples file ")
sys.exit()
#
#
#
meas_data,file_type=pmag.magic_read(meas_file)
if file_type != 'magic_measurements':
print(file_type)
print(file_type,"This is not a valid magic_measurements file ")
sys.exit()
#
# sort the specimen names
#
k = 0
print('Processing ',len(speclist), ' specimens - please wait')
PmagSpecs=[]
while k < len(speclist):
s=speclist[k]
recnum=0
PmagSpecRec={}
method_codes,inst_codes=[],[]
# find the data from the meas_data file for this sample
#
# collect info for the PmagSpecRec dictionary
#
meas_meth=[]
spec=pmag.get_dictitem(meas_data,'er_specimen_name',s,'T')
if len(spec)==0:
print('no data found for specimen: ',s)
print('delete from zeq_redo input file...., then try again')
else:
for rec in spec: # copy of vital stats to PmagSpecRec from first spec record in demag block
skip=1
methods=rec["magic_method_codes"].split(":")
if len(set(methods) & set(INCL))>0:
PmagSpecRec["er_analyst_mail_names"]=user
PmagSpecRec["magic_software_packages"]=version_num
PmagSpecRec["er_specimen_name"]=s
PmagSpecRec["er_sample_name"]=rec["er_sample_name"]
PmagSpecRec["er_site_name"]=rec["er_site_name"]
PmagSpecRec["er_location_name"]=rec["er_location_name"]
if "er_expedition_name" in list(rec.keys()):PmagSpecRec["er_expedition_name"]=rec["er_expedition_name"]
PmagSpecRec["er_citation_names"]="This study"
if "magic_experiment_name" not in list(rec.keys()): rec["magic_experiment_name"]=""
PmagSpecRec["magic_experiment_names"]=rec["magic_experiment_name"]
if "magic_instrument_codes" not in list(rec.keys()): rec["magic_instrument_codes"]=""
inst=rec['magic_instrument_codes'].split(":")
for I in inst:
if I not in inst_codes: # copy over instruments
inst_codes.append(I)
meths=rec["magic_method_codes"].split(":")
for meth in meths:
if meth.strip() not in meas_meth:meas_meth.append(meth)
if "LP-DIR-AF" in meas_meth or "LT-AF-Z" in meas_meth:
PmagSpecRec["measurement_step_unit"]="T"
if "LP-DIR-AF" not in method_codes:method_codes.append("LP-DIR-AF")
if "LP-DIR-T" in meas_meth or "LT-T-Z" in meas_meth:
PmagSpecRec["measurement_step_unit"]="K"
if "LP-DIR-T" not in method_codes:method_codes.append("LP-DIR-T")
if "LP-DIR-M" in meas_meth or "LT-M-Z" in meas_meth:
PmagSpecRec["measurement_step_unit"]="J"
if "LP-DIR-M" not in method_codes:method_codes.append("LP-DIR-M")
#
#
datablock,units=pmag.find_dmag_rec(s,spec) # fish out the demag data for this specimen
#
if len(datablock) <2 or s not in speclist :
k+=1
# print 'skipping ', s,len(datablock)
else:
#
# find replicate measurements at given treatment step and average them
#
# step_meth,avedata=pmag.vspec(data)
#
# if len(avedata) != len(datablock):
# if doave==1:
# method_codes.append("DE-VM")
# datablock=avedata
#
# do geo or stratigraphic correction now
#
if geo==1 or tilt==1:
# find top priority orientation method
orient,az_type=pmag.get_orient(samp_data,PmagSpecRec["er_sample_name"])
if az_type not in method_codes:method_codes.append(az_type)
#
# if tilt selected, get stratigraphic correction
#
tiltblock,geoblock=[],[]
for rec in datablock:
if "sample_azimuth" in list(orient.keys()) and orient["sample_azimuth"]!="":
d_geo,i_geo=pmag.dogeo(rec[1],rec[2],float(orient["sample_azimuth"]),float(orient["sample_dip"]))
geoblock.append([rec[0],d_geo,i_geo,rec[3],rec[4],rec[5]])
if tilt==1 and "sample_bed_dip_direction" in list(orient.keys()):
d_tilt,i_tilt=pmag.dotilt(d_geo,i_geo,float(orient["sample_bed_dip_direction"]),float(orient["sample_bed_dip"]))
tiltblock.append([rec[0],d_tilt,i_tilt,rec[3],rec[4],rec[5]])
elif tilt==1:
if PmagSpecRec["er_sample_name"] not in skipped:
print('no tilt correction for ', PmagSpecRec["er_sample_name"],' skipping....')
skipped.append(PmagSpecRec["er_sample_name"])
else:
if PmagSpecRec["er_sample_name"] not in skipped:
print('no geographic correction for ', PmagSpecRec["er_sample_name"],' skipping....')
skipped.append(PmagSpecRec["er_sample_name"])
#
# get beg_pca, end_pca, pca
if PmagSpecRec['er_sample_name'] not in skipped:
compnum=-1
for spec in mkspec:
if spec[0]==s:
CompRec={}
for key in list(PmagSpecRec.keys()):CompRec[key]=PmagSpecRec[key]
compnum+=1
calculation_type=spec[1]
beg=float(spec[2])
end=float(spec[3])
if len(spec)>4:
comp_name=spec[4]
else:
comp_name=string.uppercase[compnum]
CompRec['specimen_comp_name']=comp_name
if beg < float(datablock[0][0]):beg=float(datablock[0][0])
if end > float(datablock[-1][0]):end=float(datablock[-1][0])
for l in range(len(datablock)):
if datablock[l][0]==beg:beg_pca=l
if datablock[l][0]==end:end_pca=l
if geo==1 and tilt==0:
mpars=pmag.domean(geoblock,beg_pca,end_pca,calculation_type)
if mpars["specimen_direction_type"]!="Error":
CompRec["specimen_dec"]='%7.1f ' %(mpars["specimen_dec"])
CompRec["specimen_inc"]='%7.1f ' %(mpars["specimen_inc"])
CompRec["specimen_tilt_correction"]='0'
if geo==1 and tilt==1:
mpars=pmag.domean(tiltblock,beg_pca,end_pca,calculation_type)
if mpars["specimen_direction_type"]!="Error":
CompRec["specimen_dec"]='%7.1f ' %(mpars["specimen_dec"])
CompRec["specimen_inc"]='%7.1f ' %(mpars["specimen_inc"])
CompRec["specimen_tilt_correction"]='100'
if geo==0 and tilt==0:
mpars=pmag.domean(datablock,beg_pca,end_pca,calculation_type)
if mpars["specimen_direction_type"]!="Error":
CompRec["specimen_dec"]='%7.1f ' %(mpars["specimen_dec"])
CompRec["specimen_inc"]='%7.1f ' %(mpars["specimen_inc"])
CompRec["specimen_tilt_correction"]='-1'
if mpars["specimen_direction_type"]=="Error":
pass
else:
CompRec["measurement_step_min"]='%8.3e '%(datablock[beg_pca][0])
try:
CompRec["measurement_step_max"]='%8.3e '%(datablock[end_pca][0] )
except:
print('error in end_pca ',PmagSpecRec['er_specimen_name'])
CompRec["specimen_correction"]='u'
if calculation_type!='DE-FM':
CompRec["specimen_mad"]='%7.1f '%(mpars["specimen_mad"])
CompRec["specimen_alpha95"]=""
else:
CompRec["specimen_mad"]=""
CompRec["specimen_alpha95"]='%7.1f '%(mpars["specimen_alpha95"])
CompRec["specimen_n"]='%i '%(mpars["specimen_n"])
CompRec["specimen_dang"]='%7.1f '%(mpars["specimen_dang"])
CompMeths=[]
for meth in method_codes:
if meth not in CompMeths:CompMeths.append(meth)
if calculation_type not in CompMeths:CompMeths.append(calculation_type)
if geo==1: CompMeths.append("DA-DIR-GEO")
if tilt==1: CompMeths.append("DA-DIR-TILT")
if "DE-BFP" not in calculation_type:
CompRec["specimen_direction_type"]='l'
else:
CompRec["specimen_direction_type"]='p'
CompRec["magic_method_codes"]=""
if len(CompMeths) != 0:
methstring=""
for meth in CompMeths:
methstring=methstring+ ":" +meth
CompRec["magic_method_codes"]=methstring.strip(':')
CompRec["specimen_description"]=comment
if len(inst_codes) != 0:
inststring=""
for inst in inst_codes:
inststring=inststring+ ":" +inst
CompRec["magic_instrument_codes"]=inststring.strip(':')
PmagSpecs.append(CompRec)
k+=1
pmag.magic_write(pmag_file,PmagSpecs,'pmag_specimens')
print("Recalculated specimen data stored in ",pmag_file) | NAME
zeq_magic_redo.py
DESCRIPTION
Calculate principal components through demagnetization data using bounds and calculation type stored in "redo" file
SYNTAX
zeq_magic_redo.py [command line options]
OPTIONS
-h prints help message
-usr USER: identify user, default is ""
-f: specify input file, default is magic_measurements.txt
-F: specify output file, default is zeq_specimens.txt
-fre REDO: specify redo file, default is "zeq_redo"
-fsa SAMPFILE: specify er_samples format file, default is "er_samples.txt"
-A : don't average replicate measurements, default is yes
-crd [s,g,t] :
specify coordinate system [s,g,t] [default is specimen coordinates]
are specimen, geographic, and tilt corrected respectively
NB: you must have a SAMPFILE in this directory to rotate from specimen coordinates
-leg: attaches "Recalculated from original measurements; supercedes published results. " to comment field
INPUTS
zeq_redo format file is:
specimen_name calculation_type[DE-BFL,DE-BFL-A,DE-BFL-O,DE-BFP,DE-FM] step_min step_max component_name[A,B,C] | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/deprecated/zeq_magic_redo.py#L8-L278 |
PmagPy/PmagPy | programs/dipole_plat.py | main | def main():
"""
NAME
dipole_plat.py
DESCRIPTION
gives paleolatitude from given inclination, assuming GAD field
SYNTAX
dipole_plat.py [command line options]<filename
OPTIONS
-h prints help message and quits
-i allows interactive entry of latitude
-f file, specifies file name on command line
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
elif '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
elif '-i' not in sys.argv:
data=sys.stdin.readlines()
if '-i' not in sys.argv:
for line in data:
rec=line.split()
print('%7.1f'%(pmag.plat(float(rec[0]))))
else:
while 1:
try:
inc=input("Inclination for converting to paleolatitude: <cntl-D> to quit ")
print('%7.1f'%(pmag.plat(float(inc))))
except:
print('\n Good-bye \n')
sys.exit() | python | def main():
"""
NAME
dipole_plat.py
DESCRIPTION
gives paleolatitude from given inclination, assuming GAD field
SYNTAX
dipole_plat.py [command line options]<filename
OPTIONS
-h prints help message and quits
-i allows interactive entry of latitude
-f file, specifies file name on command line
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
elif '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
elif '-i' not in sys.argv:
data=sys.stdin.readlines()
if '-i' not in sys.argv:
for line in data:
rec=line.split()
print('%7.1f'%(pmag.plat(float(rec[0]))))
else:
while 1:
try:
inc=input("Inclination for converting to paleolatitude: <cntl-D> to quit ")
print('%7.1f'%(pmag.plat(float(inc))))
except:
print('\n Good-bye \n')
sys.exit() | NAME
dipole_plat.py
DESCRIPTION
gives paleolatitude from given inclination, assuming GAD field
SYNTAX
dipole_plat.py [command line options]<filename
OPTIONS
-h prints help message and quits
-i allows interactive entry of latitude
-f file, specifies file name on command line | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/dipole_plat.py#L7-L44 |
PmagPy/PmagPy | programs/zeq_magic.py | main | def main():
"""
NAME
zeq_magic.py
DESCRIPTION
reads in a MagIC measurements formatted file, makes plots of remanence decay
during demagnetization experiments. Reads in prior interpretations saved in
a specimens formatted file interpretations in a specimens file.
interpretations are saved in the coordinate system used.
SYNTAX
zeq_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f MEASFILE: sets measurements format input file, default: measurements.txt
-fsp SPECFILE: sets specimens format file with prior interpreations, default: specimens.txt
-fsa SAMPFILE: sets samples format file sample=>site information, default: samples.txt
-fsi SITEFILE: sets sites format file with site=>location informationprior interpreations, default: samples.txt
-Fp PLTFILE: sets filename for saved plot, default is name_type.fmt (where type is zijd, eqarea or decay curve)
-crd [s,g,t]: sets coordinate system, g=geographic, t=tilt adjusted, default: specimen coordinate system
-spc SPEC plots single specimen SPEC, saves plot with specified format
with optional -dir settings and quits
-dir [L,P,F][beg][end]: sets calculation type for principal component analysis, default is none
beg: starting step for PCA calculation
end: ending step for PCA calculation
[L,P,F]: calculation type for line, plane or fisher mean
must be used with -spc option
-fmt FMT: set format of saved plot [png,svg,jpg]
-A: suppresses averaging of replicate measurements, default is to average
-sav: saves all plots without review
"""
if '-h' in sys.argv:
print(main.__doc__)
return
dir_path = pmag.get_named_arg("-WD", default_val=os.getcwd())
meas_file = pmag.get_named_arg(
"-f", default_val="measurements.txt")
spec_file = pmag.get_named_arg(
"-fsp", default_val="specimens.txt")
specimen = pmag.get_named_arg(
"-spc", default_val="")
samp_file = pmag.get_named_arg("-fsa", default_val="samples.txt")
site_file = pmag.get_named_arg("-fsi", default_val="sites.txt")
plot_file = pmag.get_named_arg("-Fp", default_val="")
crd = pmag.get_named_arg("-crd", default_val="s")
fmt = pmag.get_named_arg("-fmt", "svg")
specimen = pmag.get_named_arg("-spc", default_val="")
interactive = True
save_plots = False
if "-sav" in sys.argv:
interactive = False
save_plots = True
ipmag.zeq_magic(meas_file, spec_file, crd, dir_path, n_plots="all",
save_plots=save_plots, fmt=fmt, interactive=interactive, specimen=specimen) | python | def main():
"""
NAME
zeq_magic.py
DESCRIPTION
reads in a MagIC measurements formatted file, makes plots of remanence decay
during demagnetization experiments. Reads in prior interpretations saved in
a specimens formatted file interpretations in a specimens file.
interpretations are saved in the coordinate system used.
SYNTAX
zeq_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f MEASFILE: sets measurements format input file, default: measurements.txt
-fsp SPECFILE: sets specimens format file with prior interpreations, default: specimens.txt
-fsa SAMPFILE: sets samples format file sample=>site information, default: samples.txt
-fsi SITEFILE: sets sites format file with site=>location informationprior interpreations, default: samples.txt
-Fp PLTFILE: sets filename for saved plot, default is name_type.fmt (where type is zijd, eqarea or decay curve)
-crd [s,g,t]: sets coordinate system, g=geographic, t=tilt adjusted, default: specimen coordinate system
-spc SPEC plots single specimen SPEC, saves plot with specified format
with optional -dir settings and quits
-dir [L,P,F][beg][end]: sets calculation type for principal component analysis, default is none
beg: starting step for PCA calculation
end: ending step for PCA calculation
[L,P,F]: calculation type for line, plane or fisher mean
must be used with -spc option
-fmt FMT: set format of saved plot [png,svg,jpg]
-A: suppresses averaging of replicate measurements, default is to average
-sav: saves all plots without review
"""
if '-h' in sys.argv:
print(main.__doc__)
return
dir_path = pmag.get_named_arg("-WD", default_val=os.getcwd())
meas_file = pmag.get_named_arg(
"-f", default_val="measurements.txt")
spec_file = pmag.get_named_arg(
"-fsp", default_val="specimens.txt")
specimen = pmag.get_named_arg(
"-spc", default_val="")
samp_file = pmag.get_named_arg("-fsa", default_val="samples.txt")
site_file = pmag.get_named_arg("-fsi", default_val="sites.txt")
plot_file = pmag.get_named_arg("-Fp", default_val="")
crd = pmag.get_named_arg("-crd", default_val="s")
fmt = pmag.get_named_arg("-fmt", "svg")
specimen = pmag.get_named_arg("-spc", default_val="")
interactive = True
save_plots = False
if "-sav" in sys.argv:
interactive = False
save_plots = True
ipmag.zeq_magic(meas_file, spec_file, crd, dir_path, n_plots="all",
save_plots=save_plots, fmt=fmt, interactive=interactive, specimen=specimen) | NAME
zeq_magic.py
DESCRIPTION
reads in a MagIC measurements formatted file, makes plots of remanence decay
during demagnetization experiments. Reads in prior interpretations saved in
a specimens formatted file interpretations in a specimens file.
interpretations are saved in the coordinate system used.
SYNTAX
zeq_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f MEASFILE: sets measurements format input file, default: measurements.txt
-fsp SPECFILE: sets specimens format file with prior interpreations, default: specimens.txt
-fsa SAMPFILE: sets samples format file sample=>site information, default: samples.txt
-fsi SITEFILE: sets sites format file with site=>location informationprior interpreations, default: samples.txt
-Fp PLTFILE: sets filename for saved plot, default is name_type.fmt (where type is zijd, eqarea or decay curve)
-crd [s,g,t]: sets coordinate system, g=geographic, t=tilt adjusted, default: specimen coordinate system
-spc SPEC plots single specimen SPEC, saves plot with specified format
with optional -dir settings and quits
-dir [L,P,F][beg][end]: sets calculation type for principal component analysis, default is none
beg: starting step for PCA calculation
end: ending step for PCA calculation
[L,P,F]: calculation type for line, plane or fisher mean
must be used with -spc option
-fmt FMT: set format of saved plot [png,svg,jpg]
-A: suppresses averaging of replicate measurements, default is to average
-sav: saves all plots without review | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/zeq_magic.py#L19-L71 |
PmagPy/PmagPy | programs/conversion_scripts2/iodp_dscr_magic2.py | main | def main(command_line=True, **kwargs):
"""
NAME
iodp_dscr_magic.py
DESCRIPTION
converts ODP LIMS discrete sample format files to magic_measurements format files
SYNTAX
iodp_descr_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input .csv file, default is all in directory
-F FILE: specify output measurements file, default is magic_measurements.txt
-A : don't average replicate measurements
INPUTS
IODP discrete sample .csv file format exported from LIMS database
"""
#
# initialize defaults
version_num=pmag.get_version()
meas_file='magic_measurements.txt'
csv_file=''
MagRecs,Specs=[],[]
citation="This study"
dir_path,demag='.','NRM'
args=sys.argv
noave=0
# get command line args
if command_line:
if '-WD' in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if '-ID' in args:
ind = args.index('-ID')
input_dir_path = args[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
if "-h" in args:
print(main.__doc__)
return False
if "-A" in args: noave=1
if '-f' in args:
ind=args.index("-f")
csv_file=args[ind+1]
if '-F' in args:
ind=args.index("-F")
meas_file=args[ind+1]
if not command_line:
dir_path = kwargs.get('dir_path', '.')
input_dir_path = kwargs.get('input_dir_path', dir_path)
output_dir_path = dir_path # rename dir_path after input_dir_path is set
noave = kwargs.get('noave', 0) # default (0) is DO average
csv_file = kwargs.get('csv_file', '')
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
# format variables
meas_file= os.path.join(output_dir_path, meas_file)
if csv_file=="":
filelist=os.listdir(input_dir_path) # read in list of files to import
else:
csv_file = os.path.join(input_dir_path, csv_file)
filelist=[csv_file]
# parsing the data
file_found = False
for fname in filelist: # parse each file
if fname[-3:].lower()=='csv':
file_found = True
print('processing: ',fname)
with open(fname, 'r') as finput:
data = list(finput.readlines())
keys = data[0].replace('\n','').split(',') # splits on underscores
interval_key="Offset (cm)"
demag_key="Demag level (mT)"
offline_demag_key="Treatment Value (mT or °C)"
offline_treatment_type="Treatment type"
run_key="Test No."
if "Inclination background + tray corrected (deg)" in keys: inc_key="Inclination background + tray corrected (deg)"
if "Inclination background & tray corrected (deg)" in keys: inc_key="Inclination background & tray corrected (deg)"
if "Declination background + tray corrected (deg)" in keys: dec_key="Declination background + tray corrected (deg)"
if "Declination background & tray corrected (deg)" in keys: dec_key="Declination background & tray corrected (deg)"
if "Intensity background + tray corrected (A/m)" in keys: int_key="Intensity background + tray corrected (A/m)"
if "Intensity background & tray corrected (A/m)" in keys: int_key="Intensity background & tray corrected (A/m)"
type="Type"
sect_key="Sect"
half_key="A/W"
# need to add volume_key to LORE format!
if "Sample volume (cm^3)" in keys:volume_key="Sample volume (cm^3)"
if "Sample volume (cc)" in keys:volume_key="Sample volume (cc)"
if "Sample volume (cm³)" in keys:volume_key="Sample volume (cm³)"
for line in data[1:]:
InRec={}
for k in range(len(keys)):InRec[keys[k]]=line.split(',')[k]
inst="IODP-SRM"
MagRec={}
expedition=InRec['Exp']
location=InRec['Site']+InRec['Hole']
offsets=InRec[interval_key].split('.') # maintain consistency with er_samples convention of using top interval
if len(offsets)==1:
offset=int(offsets[0])
else:
offset=int(offsets[0])-1
#interval=str(offset+1)# maintain consistency with er_samples convention of using top interval
interval=str(offset)# maintain consistency with er_samples convention of using top interval
specimen=expedition+'-'+location+'-'+InRec['Core']+InRec[type]+"-"+InRec[sect_key]+'_'+InRec[half_key]+'_'+interval
if specimen not in Specs:Specs.append(specimen)
MagRec['er_expedition_name']=expedition
MagRec['er_location_name']=location
MagRec['er_site_name']=specimen
MagRec['er_citation_names']=citation
MagRec['er_specimen_name']=specimen
MagRec['er_sample_name']=specimen
MagRec['er_site_name']=specimen
# set up measurement record - default is NRM
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["treatment_ac_field"]='0'
MagRec["treatment_dc_field"]='0'
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
MagRec["measurement_flag"]='g' # assume all data are "good"
MagRec["measurement_standard"]='u' # assume all data are "good"
MagRec["measurement_csd"]='0' # assume all data are "good"
volume=InRec[volume_key]
MagRec["magic_method_codes"]='LT-NO'
sort_by='treatment_ac_field' # set default to AF demag
if InRec[demag_key]!="0":
MagRec['magic_method_codes'] = 'LT-AF-Z'
inst=inst+':IODP-SRM-AF' # measured on shipboard in-line 2G AF
treatment_value=float(InRec[demag_key].strip('"'))*1e-3 # convert mT => T
if sort_by =="treatment_ac_field":
MagRec["treatment_ac_field"]=treatment_value # AF demag in treat mT => T
else:
MagRec["treatment_ac_field"]=str(treatment_value)# AF demag in treat mT => T
elif offline_treatment_type in list(InRec.keys()) and InRec[offline_treatment_type]!="":
if "Lowrie" in InRec['Comments']:
MagRec['magic_method_codes'] = 'LP-IRM-3D'
treatment_value=float(InRec[offline_demag_key].strip('"'))+273. # convert C => K
MagRec["treatment_temp"]=treatment_value
MagRec["treatment_ac_field"]="0"
sort_by='treatment_temp'
elif 'Isothermal' in InRec[offline_treatment_type]:
MagRec['magic_method_codes'] = 'LT-IRM'
treatment_value=float(InRec[offline_demag_key].strip('"'))*1e-3 # convert mT => T
MagRec["treatment_dc_field"]=treatment_value
MagRec["treatment_ac_field"]="0"
sort_by='treatment_dc_field'
MagRec["measurement_standard"]='u' # assume all data are "good"
vol=float(volume)*1e-6 # convert from cc to m^3
if run_key in list(InRec.keys()):
run_number=InRec[run_key]
MagRec['external_database_ids']=run_number
MagRec['external_database_names']='LIMS'
else:
MagRec['external_database_ids']=""
MagRec['external_database_names']=''
MagRec['measurement_description']='sample orientation: '+InRec['Sample orientation']
MagRec['measurement_inc']=InRec[inc_key].strip('"')
MagRec['measurement_dec']=InRec[dec_key].strip('"')
intens= InRec[int_key].strip('"')
MagRec['measurement_magn_moment']='%8.3e'%(float(intens)*vol) # convert intensity from A/m to Am^2 using vol
MagRec['magic_instrument_codes']=inst
MagRec['measurement_number']='1'
MagRec['measurement_positions']=''
MagRecs.append(MagRec)
if not file_found:
print("No .csv files were found")
return False, "No .csv files were found"
MagOuts=[]
for spec in Specs:
Speclist=pmag.get_dictitem(MagRecs,'er_specimen_name',spec,'T')
Meassorted=sorted(Speclist, key=lambda x,y=None: int(round(float(x[sort_by])-float(y[sort_by]))) if y!=None else 0)
for rec in Meassorted:
for key in list(rec.keys()): rec[key]=str(rec[key])
MagOuts.append(rec)
Fixed=pmag.measurements_methods(MagOuts,noave)
Out,keys=pmag.fillkeys(Fixed)
if pmag.magic_write(meas_file,Out,'magic_measurements'):
print('data stored in ',meas_file)
return True, meas_file
else:
print('no data found. bad magfile?')
return False, 'no data found. bad magfile?' | python | def main(command_line=True, **kwargs):
"""
NAME
iodp_dscr_magic.py
DESCRIPTION
converts ODP LIMS discrete sample format files to magic_measurements format files
SYNTAX
iodp_descr_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input .csv file, default is all in directory
-F FILE: specify output measurements file, default is magic_measurements.txt
-A : don't average replicate measurements
INPUTS
IODP discrete sample .csv file format exported from LIMS database
"""
#
# initialize defaults
version_num=pmag.get_version()
meas_file='magic_measurements.txt'
csv_file=''
MagRecs,Specs=[],[]
citation="This study"
dir_path,demag='.','NRM'
args=sys.argv
noave=0
# get command line args
if command_line:
if '-WD' in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if '-ID' in args:
ind = args.index('-ID')
input_dir_path = args[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
if "-h" in args:
print(main.__doc__)
return False
if "-A" in args: noave=1
if '-f' in args:
ind=args.index("-f")
csv_file=args[ind+1]
if '-F' in args:
ind=args.index("-F")
meas_file=args[ind+1]
if not command_line:
dir_path = kwargs.get('dir_path', '.')
input_dir_path = kwargs.get('input_dir_path', dir_path)
output_dir_path = dir_path # rename dir_path after input_dir_path is set
noave = kwargs.get('noave', 0) # default (0) is DO average
csv_file = kwargs.get('csv_file', '')
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
# format variables
meas_file= os.path.join(output_dir_path, meas_file)
if csv_file=="":
filelist=os.listdir(input_dir_path) # read in list of files to import
else:
csv_file = os.path.join(input_dir_path, csv_file)
filelist=[csv_file]
# parsing the data
file_found = False
for fname in filelist: # parse each file
if fname[-3:].lower()=='csv':
file_found = True
print('processing: ',fname)
with open(fname, 'r') as finput:
data = list(finput.readlines())
keys = data[0].replace('\n','').split(',') # splits on underscores
interval_key="Offset (cm)"
demag_key="Demag level (mT)"
offline_demag_key="Treatment Value (mT or °C)"
offline_treatment_type="Treatment type"
run_key="Test No."
if "Inclination background + tray corrected (deg)" in keys: inc_key="Inclination background + tray corrected (deg)"
if "Inclination background & tray corrected (deg)" in keys: inc_key="Inclination background & tray corrected (deg)"
if "Declination background + tray corrected (deg)" in keys: dec_key="Declination background + tray corrected (deg)"
if "Declination background & tray corrected (deg)" in keys: dec_key="Declination background & tray corrected (deg)"
if "Intensity background + tray corrected (A/m)" in keys: int_key="Intensity background + tray corrected (A/m)"
if "Intensity background & tray corrected (A/m)" in keys: int_key="Intensity background & tray corrected (A/m)"
type="Type"
sect_key="Sect"
half_key="A/W"
# need to add volume_key to LORE format!
if "Sample volume (cm^3)" in keys:volume_key="Sample volume (cm^3)"
if "Sample volume (cc)" in keys:volume_key="Sample volume (cc)"
if "Sample volume (cm³)" in keys:volume_key="Sample volume (cm³)"
for line in data[1:]:
InRec={}
for k in range(len(keys)):InRec[keys[k]]=line.split(',')[k]
inst="IODP-SRM"
MagRec={}
expedition=InRec['Exp']
location=InRec['Site']+InRec['Hole']
offsets=InRec[interval_key].split('.') # maintain consistency with er_samples convention of using top interval
if len(offsets)==1:
offset=int(offsets[0])
else:
offset=int(offsets[0])-1
#interval=str(offset+1)# maintain consistency with er_samples convention of using top interval
interval=str(offset)# maintain consistency with er_samples convention of using top interval
specimen=expedition+'-'+location+'-'+InRec['Core']+InRec[type]+"-"+InRec[sect_key]+'_'+InRec[half_key]+'_'+interval
if specimen not in Specs:Specs.append(specimen)
MagRec['er_expedition_name']=expedition
MagRec['er_location_name']=location
MagRec['er_site_name']=specimen
MagRec['er_citation_names']=citation
MagRec['er_specimen_name']=specimen
MagRec['er_sample_name']=specimen
MagRec['er_site_name']=specimen
# set up measurement record - default is NRM
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["treatment_ac_field"]='0'
MagRec["treatment_dc_field"]='0'
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
MagRec["measurement_flag"]='g' # assume all data are "good"
MagRec["measurement_standard"]='u' # assume all data are "good"
MagRec["measurement_csd"]='0' # assume all data are "good"
volume=InRec[volume_key]
MagRec["magic_method_codes"]='LT-NO'
sort_by='treatment_ac_field' # set default to AF demag
if InRec[demag_key]!="0":
MagRec['magic_method_codes'] = 'LT-AF-Z'
inst=inst+':IODP-SRM-AF' # measured on shipboard in-line 2G AF
treatment_value=float(InRec[demag_key].strip('"'))*1e-3 # convert mT => T
if sort_by =="treatment_ac_field":
MagRec["treatment_ac_field"]=treatment_value # AF demag in treat mT => T
else:
MagRec["treatment_ac_field"]=str(treatment_value)# AF demag in treat mT => T
elif offline_treatment_type in list(InRec.keys()) and InRec[offline_treatment_type]!="":
if "Lowrie" in InRec['Comments']:
MagRec['magic_method_codes'] = 'LP-IRM-3D'
treatment_value=float(InRec[offline_demag_key].strip('"'))+273. # convert C => K
MagRec["treatment_temp"]=treatment_value
MagRec["treatment_ac_field"]="0"
sort_by='treatment_temp'
elif 'Isothermal' in InRec[offline_treatment_type]:
MagRec['magic_method_codes'] = 'LT-IRM'
treatment_value=float(InRec[offline_demag_key].strip('"'))*1e-3 # convert mT => T
MagRec["treatment_dc_field"]=treatment_value
MagRec["treatment_ac_field"]="0"
sort_by='treatment_dc_field'
MagRec["measurement_standard"]='u' # assume all data are "good"
vol=float(volume)*1e-6 # convert from cc to m^3
if run_key in list(InRec.keys()):
run_number=InRec[run_key]
MagRec['external_database_ids']=run_number
MagRec['external_database_names']='LIMS'
else:
MagRec['external_database_ids']=""
MagRec['external_database_names']=''
MagRec['measurement_description']='sample orientation: '+InRec['Sample orientation']
MagRec['measurement_inc']=InRec[inc_key].strip('"')
MagRec['measurement_dec']=InRec[dec_key].strip('"')
intens= InRec[int_key].strip('"')
MagRec['measurement_magn_moment']='%8.3e'%(float(intens)*vol) # convert intensity from A/m to Am^2 using vol
MagRec['magic_instrument_codes']=inst
MagRec['measurement_number']='1'
MagRec['measurement_positions']=''
MagRecs.append(MagRec)
if not file_found:
print("No .csv files were found")
return False, "No .csv files were found"
MagOuts=[]
for spec in Specs:
Speclist=pmag.get_dictitem(MagRecs,'er_specimen_name',spec,'T')
Meassorted=sorted(Speclist, key=lambda x,y=None: int(round(float(x[sort_by])-float(y[sort_by]))) if y!=None else 0)
for rec in Meassorted:
for key in list(rec.keys()): rec[key]=str(rec[key])
MagOuts.append(rec)
Fixed=pmag.measurements_methods(MagOuts,noave)
Out,keys=pmag.fillkeys(Fixed)
if pmag.magic_write(meas_file,Out,'magic_measurements'):
print('data stored in ',meas_file)
return True, meas_file
else:
print('no data found. bad magfile?')
return False, 'no data found. bad magfile?' | NAME
iodp_dscr_magic.py
DESCRIPTION
converts ODP LIMS discrete sample format files to magic_measurements format files
SYNTAX
iodp_descr_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input .csv file, default is all in directory
-F FILE: specify output measurements file, default is magic_measurements.txt
-A : don't average replicate measurements
INPUTS
IODP discrete sample .csv file format exported from LIMS database | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/conversion_scripts2/iodp_dscr_magic2.py#L9-L197 |
PmagPy/PmagPy | programs/conversion_scripts2/sio_magic2.py | main | def main(command_line=True, **kwargs):
"""
NAME
sio_magic.py
DESCRIPTION
converts SIO .mag format files to magic_measurements format files
SYNTAX
sio_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-usr USER: identify user, default is ""
-f FILE: specify .mag format input file, required
-fsa SAMPFILE : specify er_samples.txt file relating samples, site and locations names,default is none -- values in SAMPFILE will override selections for -loc (location), -spc (designate specimen), and -ncn (sample-site naming convention)
-F FILE: specify output file, default is magic_measurements.txt
-Fsy: specify er_synthetics file, default is er_sythetics.txt
-LP [colon delimited list of protocols, include all that apply]
AF: af demag
T: thermal including thellier but not trm acquisition
S: Shaw method
I: IRM (acquisition)
I3d: 3D IRM experiment
N: NRM only
TRM: trm acquisition
ANI: anisotropy experiment
D: double AF demag
G: triple AF demag (GRM protocol)
CR: cooling rate experiment.
The treatment coding of the measurement file should be: XXX.00,XXX.10, XXX.20 ...XX.70 etc. (XXX.00 is optional)
where XXX in the temperature and .10,.20... are running numbers of the cooling rates steps.
XXX.00 is optional zerofield baseline. XXX.70 is alteration check.
syntax in sio_magic is: -LP CR xxx,yyy,zzz,..... xxx -A
where xxx, yyy, zzz...xxx are cooling time in [K/minutes], seperated by comma, ordered at the same order as XXX.10,XXX.20 ...XX.70
if you use a zerofield step then no need to specify the cooling rate for the zerofield
It is important to add to the command line the -A option so the measurements will not be averaged.
But users need to make sure that there are no duplicate measurements in the file
-V [1,2,3] units of IRM field in volts using ASC coil #1,2 or 3
-spc NUM : specify number of characters to designate a specimen, default = 0
-loc LOCNAME : specify location/study name, must have either LOCNAME or SAMPFILE or be a synthetic
-syn INST TYPE: sets these specimens as synthetics created at institution INST and of type TYPE
-ins INST : specify which demag instrument was used (e.g, SIO-Suzy or SIO-Odette),default is ""
-dc B PHI THETA: dc lab field (in micro tesla) and phi,theta, default is none
NB: use PHI, THETA = -1 -1 to signal that it changes, i.e. in anisotropy experiment
-ac B : peak AF field (in mT) for ARM acquisition, default is none
-ncn NCON: specify naming convention: default is #1 below
-A: don't average replicate measurements
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail [email protected] for help.
[8] synthetic - has no site name
[9] ODP naming convention
INPUT
Best to put separate experiments (all AF, thermal, thellier, trm aquisition, Shaw, etc.) in
seperate .mag files (eg. af.mag, thermal.mag, etc.)
Format of SIO .mag files:
Spec Treat CSD Intensity Declination Inclination [optional metadata string]
Spec: specimen name
Treat: treatment step
XXX T in Centigrade
XXX AF in mT
for special experiments:
Thellier:
XXX.0 first zero field step
XXX.1 first in field step [XXX.0 and XXX.1 can be done in any order]
XXX.2 second in-field step at lower temperature (pTRM check)
XXX.3 second zero-field step after infield (pTRM check step)
XXX.3 MUST be done in this order [XXX.0, XXX.1 [optional XXX.2] XXX.3]
AARM:
X.00 baseline step (AF in zero bias field - high peak field)
X.1 ARM step (in field step) where
X is the step number in the 15 position scheme
(see Appendix to Lecture 13 - http://magician.ucsd.edu/Essentials_2)
ATRM:
X.00 optional baseline
X.1 ATRM step (+X)
X.2 ATRM step (+Y)
X.3 ATRM step (+Z)
X.4 ATRM step (-X)
X.5 ATRM step (-Y)
X.6 ATRM step (-Z)
X.7 optional alteration check (+X)
TRM:
XXX.YYY XXX is temperature step of total TRM
YYY is dc field in microtesla
Intensity assumed to be total moment in 10^3 Am^2 (emu)
Declination: Declination in specimen coordinate system
Inclination: Declination in specimen coordinate system
Optional metatdata string: mm/dd/yy;hh:mm;[dC,mT];xx.xx;UNITS;USER;INST;NMEAS
hh in 24 hours.
dC or mT units of treatment XXX (see Treat above) for thermal or AF respectively
xx.xxx DC field
UNITS of DC field (microT, mT)
INST: instrument code, number of axes, number of positions (e.g., G34 is 2G, three axes,
measured in four positions)
NMEAS: number of measurements in a single position (1,3,200...)
"""
# initialize some stuff
mag_file = None
codelist = None
infile_type="mag"
noave=0
methcode,inst="LP-NO",""
phi,theta,peakfield,labfield=0,0,0,0
pTRM,MD,samp_con,Z=0,0,'1',1
dec=[315,225,180,135,45,90,270,270,270,90,180,180,0,0,0]
inc=[0,0,0,0,0,-45,-45,0,45,45,45,-45,-90,-45,45]
tdec=[0,90,0,180,270,0,0,90,0]
tinc=[0,0,90,0,0,-90,0,0,90]
missing=1
demag="N"
er_location_name=""
citation='This study'
args=sys.argv
fmt='old'
syn=0
synfile='er_synthetics.txt'
samp_infile,Samps='',[]
trm=0
irm=0
specnum=0
coil=""
mag_file=""
#
# get command line arguments
#
meas_file="magic_measurements.txt"
user=""
if not command_line:
user = kwargs.get('user', '')
meas_file = kwargs.get('meas_file', '')
syn_file = kwargs.get('syn_file', '')
mag_file = kwargs.get('mag_file', '')
labfield = kwargs.get('labfield', '')
if labfield:
labfield = float(labfield) *1e-6
else:
labfield = 0
phi = kwargs.get('phi', 0)
if phi:
phi = float(phi)
else:
phi = 0
theta = kwargs.get('theta', 0)
if theta:
theta=float(theta)
else:
theta = 0
peakfield = kwargs.get('peakfield', 0)
if peakfield:
peakfield=float(peakfield) *1e-3
else:
peakfield = 0
specnum = kwargs.get('specnum', 0)
samp_con = kwargs.get('samp_con', '1')
er_location_name = kwargs.get('er_location_name', '')
samp_infile = kwargs.get('samp_infile', '')
syn = kwargs.get('syn', 0)
institution = kwargs.get('institution', '')
syntype = kwargs.get('syntype', '')
inst = kwargs.get('inst', '')
noave = kwargs.get('noave', 0)
codelist = kwargs.get('codelist', '')
coil = kwargs.get('coil', '')
cooling_rates = kwargs.get('cooling_rates', '')
if command_line:
if "-h" in args:
print(main.__doc__)
return False
if "-usr" in args:
ind=args.index("-usr")
user=args[ind+1]
if '-F' in args:
ind=args.index("-F")
meas_file=args[ind+1]
if '-Fsy' in args:
ind=args.index("-Fsy")
synfile=args[ind+1]
if '-f' in args:
ind=args.index("-f")
mag_file=args[ind+1]
if "-dc" in args:
ind=args.index("-dc")
labfield=float(args[ind+1])*1e-6
phi=float(args[ind+2])
theta=float(args[ind+3])
if "-ac" in args:
ind=args.index("-ac")
peakfield=float(args[ind+1])*1e-3
if "-spc" in args:
ind=args.index("-spc")
specnum=int(args[ind+1])
if "-loc" in args:
ind=args.index("-loc")
er_location_name=args[ind+1]
if "-fsa" in args:
ind=args.index("-fsa")
samp_infile = args[ind+1]
if '-syn' in args:
syn=1
ind=args.index("-syn")
institution=args[ind+1]
syntype=args[ind+2]
if '-fsy' in args:
ind=args.index("-fsy")
synfile=args[ind+1]
if "-ins" in args:
ind=args.index("-ins")
inst=args[ind+1]
if "-A" in args: noave=1
if "-ncn" in args:
ind=args.index("-ncn")
samp_con=sys.argv[ind+1]
if '-LP' in args:
ind=args.index("-LP")
codelist=args[ind+1]
if "-V" in args:
ind=args.index("-V")
coil=args[ind+1]
# make sure all initial values are correctly set up (whether they come from the command line or a GUI)
if samp_infile:
Samps, file_type = pmag.magic_read(samp_infile)
if coil:
coil = str(coil)
methcode="LP-IRM"
irmunits = "V"
if coil not in ["1","2","3"]:
print(main.__doc__)
print('not a valid coil specification')
return False, '{} is not a valid coil specification'.format(coil)
if mag_file:
try:
#with open(mag_file,'r') as finput:
# lines = finput.readlines()
lines=pmag.open_file(mag_file)
except:
print("bad mag file name")
return False, "bad mag file name"
if not mag_file:
print(main.__doc__)
print("mag_file field is required option")
return False, "mag_file field is required option"
if specnum!=0:
specnum=-specnum
#print 'samp_con:', samp_con
if samp_con:
if "4" == samp_con[0]:
if "-" not in samp_con:
print("naming convention option [4] must be in form 4-Z where Z is an integer")
print('---------------')
return False, "naming convention option [4] must be in form 4-Z where Z is an integer"
else:
Z=samp_con.split("-")[1]
samp_con="4"
if "7" == samp_con[0]:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
return False, "option [7] must be in form 7-Z where Z is an integer"
else:
Z=samp_con.split("-")[1]
samp_con="7"
if codelist:
codes=codelist.split(':')
if "AF" in codes:
demag='AF'
if'-dc' not in args: methcode="LT-AF-Z"
if'-dc' in args: methcode="LT-AF-I"
if "T" in codes:
demag="T"
if '-dc' not in args: methcode="LT-T-Z"
if '-dc' in args: methcode="LT-T-I"
if "I" in codes:
methcode="LP-IRM"
irmunits="mT"
if "I3d" in codes:
methcode="LT-T-Z:LP-IRM-3D"
if "S" in codes:
demag="S"
methcode="LP-PI-TRM:LP-PI-ALT-AFARM"
trm_labfield=labfield
ans=input("DC lab field for ARM step: [50uT] ")
if ans=="":
arm_labfield=50e-6
else:
arm_labfield=float(ans)*1e-6
ans=input("temperature for total trm step: [600 C] ")
if ans=="":
trm_peakT=600+273 # convert to kelvin
else:
trm_peakT=float(ans)+273 # convert to kelvin
if "G" in codes: methcode="LT-AF-G"
if "D" in codes: methcode="LT-AF-D"
if "TRM" in codes:
demag="T"
trm=1
if "CR" in codes:
demag="T"
cooling_rate_experiment=1
if command_line:
ind=args.index("CR")
cooling_rates=args[ind+1]
cooling_rates_list=cooling_rates.split(',')
else:
cooling_rates_list=str(cooling_rates).split(',')
if demag=="T" and "ANI" in codes:
methcode="LP-AN-TRM"
if demag=="T" and "CR" in codes:
methcode="LP-CR-TRM"
if demag=="AF" and "ANI" in codes:
methcode="LP-AN-ARM"
if labfield==0: labfield=50e-6
if peakfield==0: peakfield=.180
SynRecs,MagRecs=[],[]
version_num=pmag.get_version()
##################################
if 1:
#if infile_type=="SIO format":
for line in lines:
instcode=""
if len(line)>2:
SynRec={}
MagRec={}
MagRec['er_location_name']=er_location_name
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["treatment_ac_field"]='0'
MagRec["treatment_dc_field"]='0'
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
meas_type="LT-NO"
rec=line.split()
if rec[1]==".00":rec[1]="0.00"
treat=rec[1].split('.')
if methcode=="LP-IRM":
if irmunits=='mT':
labfield=float(treat[0])*1e-3
else:
labfield=pmag.getfield(irmunits,coil,treat[0])
if rec[1][0]!="-":
phi,theta=0.,90.
else:
phi,theta=0.,-90.
meas_type="LT-IRM"
MagRec["treatment_dc_field"]='%8.3e'%(labfield)
MagRec["treatment_dc_field_phi"]='%7.1f'%(phi)
MagRec["treatment_dc_field_theta"]='%7.1f'%(theta)
if len(rec)>6:
code1=rec[6].split(';') # break e.g., 10/15/02;7:45 indo date and time
if len(code1)==2: # old format with AM/PM
missing=0
code2=code1[0].split('/') # break date into mon/day/year
code3=rec[7].split(';') # break e.g., AM;C34;200 into time;instr/axes/measuring pos;number of measurements
yy=int(code2[2])
if yy <90:
yyyy=str(2000+yy)
else: yyyy=str(1900+yy)
mm=int(code2[0])
if mm<10:
mm="0"+str(mm)
else: mm=str(mm)
dd=int(code2[1])
if dd<10:
dd="0"+str(dd)
else: dd=str(dd)
time=code1[1].split(':')
hh=int(time[0])
if code3[0]=="PM":hh=hh+12
if hh<10:
hh="0"+str(hh)
else: hh=str(hh)
min=int(time[1])
if min<10:
min= "0"+str(min)
else: min=str(min)
MagRec["measurement_date"]=yyyy+":"+mm+":"+dd+":"+hh+":"+min+":00.00"
MagRec["measurement_time_zone"]='SAN'
if inst=="":
if code3[1][0]=='C':instcode='SIO-bubba'
if code3[1][0]=='G':instcode='SIO-flo'
else:
instcode=''
MagRec["measurement_positions"]=code3[1][2]
elif len(code1)>2: # newest format (cryo7 or later)
if "LP-AN-ARM" not in methcode:labfield=0
fmt='new'
date=code1[0].split('/') # break date into mon/day/year
yy=int(date[2])
if yy <90:
yyyy=str(2000+yy)
else: yyyy=str(1900+yy)
mm=int(date[0])
if mm<10:
mm="0"+str(mm)
else: mm=str(mm)
dd=int(date[1])
if dd<10:
dd="0"+str(dd)
else: dd=str(dd)
time=code1[1].split(':')
hh=int(time[0])
if hh<10:
hh="0"+str(hh)
else: hh=str(hh)
min=int(time[1])
if min<10:
min= "0"+str(min)
else:
min=str(min)
MagRec["measurement_date"]=yyyy+":"+mm+":"+dd+":"+hh+":"+min+":00.00"
MagRec["measurement_time_zone"]='SAN'
if inst=="":
if code1[6][0]=='C':
instcode='SIO-bubba'
if code1[6][0]=='G':
instcode='SIO-flo'
else:
instcode=''
if len(code1)>1:
MagRec["measurement_positions"]=code1[6][2]
else:
MagRec["measurement_positions"]=code1[7] # takes care of awkward format with bubba and flo being different
if user=="":user=code1[5]
if code1[2][-1]=='C':
demag="T"
if code1[4]=='microT' and float(code1[3])!=0. and "LP-AN-ARM" not in methcode: labfield=float(code1[3])*1e-6
if code1[2]=='mT' and methcode!="LP-IRM":
demag="AF"
if code1[4]=='microT' and float(code1[3])!=0.: labfield=float(code1[3])*1e-6
if code1[4]=='microT' and labfield!=0. and meas_type!="LT-IRM":
phi,theta=0.,-90.
if demag=="T": meas_type="LT-T-I"
if demag=="AF": meas_type="LT-AF-I"
MagRec["treatment_dc_field"]='%8.3e'%(labfield)
MagRec["treatment_dc_field_phi"]='%7.1f'%(phi)
MagRec["treatment_dc_field_theta"]='%7.1f'%(theta)
if code1[4]=='' or labfield==0. and meas_type!="LT-IRM":
if demag=='T':meas_type="LT-T-Z"
if demag=="AF":meas_type="LT-AF-Z"
MagRec["treatment_dc_field"]='0'
if syn==0:
MagRec["er_specimen_name"]=rec[0]
MagRec["er_synthetic_name"]=""
MagRec["er_site_name"]=""
if specnum!=0:
MagRec["er_sample_name"]=rec[0][:specnum]
else:
MagRec["er_sample_name"]=rec[0]
if samp_infile and Samps: # if samp_infile was provided AND yielded sample data
samp=pmag.get_dictitem(Samps,'er_sample_name',MagRec['er_sample_name'],'T')
if len(samp)>0:
MagRec["er_location_name"]=samp[0]["er_location_name"]
MagRec["er_site_name"]=samp[0]["er_site_name"]
else:
MagRec['er_location_name']=''
MagRec["er_site_name"]=''
elif int(samp_con)!=6:
site=pmag.parse_site(MagRec['er_sample_name'],samp_con,Z)
MagRec["er_site_name"]=site
if MagRec['er_site_name']=="":
print('No site name found for: ',MagRec['er_specimen_name'],MagRec['er_sample_name'])
if MagRec["er_location_name"]=="":
print('no location name for: ',MagRec["er_specimen_name"])
else:
MagRec["er_specimen_name"]=rec[0]
if specnum!=0:
MagRec["er_sample_name"]=rec[0][:specnum]
else:
MagRec["er_sample_name"]=rec[0]
MagRec["er_site_name"]=""
MagRec["er_synthetic_name"]=MagRec["er_specimen_name"]
SynRec["er_synthetic_name"]=MagRec["er_specimen_name"]
site=pmag.parse_site(MagRec['er_sample_name'],samp_con,Z)
SynRec["synthetic_parent_sample"]=site
SynRec["er_citation_names"]="This study"
SynRec["synthetic_institution"]=institution
SynRec["synthetic_type"]=syntype
SynRecs.append(SynRec)
if float(rec[1])==0:
pass
elif demag=="AF":
if methcode != "LP-AN-ARM":
MagRec["treatment_ac_field"]='%8.3e' %(float(rec[1])*1e-3) # peak field in tesla
if meas_type=="LT-AF-Z": MagRec["treatment_dc_field"]='0'
else: # AARM experiment
if treat[1][0]=='0':
meas_type="LT-AF-Z:LP-AN-ARM:"
MagRec["treatment_ac_field"]='%8.3e' %(peakfield) # peak field in tesla
MagRec["treatment_dc_field"]='%8.3e'%(0)
if labfield!=0 and methcode!="LP-AN-ARM": print("Warning - inconsistency in mag file with lab field - overriding file with 0")
else:
meas_type="LT-AF-I:LP-AN-ARM"
ipos=int(treat[0])-1
MagRec["treatment_dc_field_phi"]='%7.1f' %(dec[ipos])
MagRec["treatment_dc_field_theta"]='%7.1f'% (inc[ipos])
MagRec["treatment_dc_field"]='%8.3e'%(labfield)
MagRec["treatment_ac_field"]='%8.3e' %(peakfield) # peak field in tesla
elif demag=="T" and methcode == "LP-AN-TRM":
MagRec["treatment_temp"]='%8.3e' % (float(treat[0])+273.) # temp in kelvin
if treat[1][0]=='0':
meas_type="LT-T-Z:LP-AN-TRM"
MagRec["treatment_dc_field"]='%8.3e'%(0)
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
else:
MagRec["treatment_dc_field"]='%8.3e'%(labfield)
if treat[1][0]=='7': # alteration check as final measurement
meas_type="LT-PTRM-I:LP-AN-TRM"
else:
meas_type="LT-T-I:LP-AN-TRM"
# find the direction of the lab field in two ways:
# (1) using the treatment coding (XX.1=+x, XX.2=+y, XX.3=+z, XX.4=-x, XX.5=-y, XX.6=-z)
ipos_code=int(treat[1][0])-1
# (2) using the magnetization
DEC=float(rec[4])
INC=float(rec[5])
if INC < 45 and INC > -45:
if DEC>315 or DEC<45: ipos_guess=0
if DEC>45 and DEC<135: ipos_guess=1
if DEC>135 and DEC<225: ipos_guess=3
if DEC>225 and DEC<315: ipos_guess=4
else:
if INC >45: ipos_guess=2
if INC <-45: ipos_guess=5
# prefer the guess over the code
ipos=ipos_guess
MagRec["treatment_dc_field_phi"]='%7.1f' %(tdec[ipos])
MagRec["treatment_dc_field_theta"]='%7.1f'% (tinc[ipos])
# check it
if ipos_guess!=ipos_code and treat[1][0]!='7':
print("-E- ERROR: check specimen %s step %s, ATRM measurements, coding does not match the direction of the lab field!"%(rec[0],".".join(list(treat))))
elif demag=="S": # Shaw experiment
if treat[1][1]=='0':
if int(treat[0])!=0:
MagRec["treatment_ac_field"]='%8.3e' % (float(treat[0])*1e-3) # AF field in tesla
MagRec["treatment_dc_field"]='0'
meas_type="LT-AF-Z" # first AF
else:
meas_type="LT-NO"
MagRec["treatment_ac_field"]='0'
MagRec["treatment_dc_field"]='0'
elif treat[1][1]=='1':
if int(treat[0])==0:
MagRec["treatment_ac_field"]='%8.3e' %(peakfield) # peak field in tesla
MagRec["treatment_dc_field"]='%8.3e'%(arm_labfield)
MagRec["treatment_dc_field_phi"]='%7.1f'%(phi)
MagRec["treatment_dc_field_theta"]='%7.1f'%(theta)
meas_type="LT-AF-I"
else:
MagRec["treatment_ac_field"]='%8.3e' % ( float(treat[0])*1e-3) # AF field in tesla
MagRec["treatment_dc_field"]='0'
meas_type="LT-AF-Z"
elif treat[1][1]=='2':
if int(treat[0])==0:
MagRec["treatment_ac_field"]='0'
MagRec["treatment_dc_field"]='%8.3e'%(trm_labfield)
MagRec["treatment_dc_field_phi"]='%7.1f'%(phi)
MagRec["treatment_dc_field_theta"]='%7.1f'%(theta)
MagRec["treatment_temp"]='%8.3e' % (trm_peakT)
meas_type="LT-T-I"
else:
MagRec["treatment_ac_field"]='%8.3e' % ( float(treat[0])*1e-3) # AF field in tesla
MagRec["treatment_dc_field"]='0'
meas_type="LT-AF-Z"
elif treat[1][1]=='3':
if int(treat[0])==0:
MagRec["treatment_ac_field"]='%8.3e' %(peakfield) # peak field in tesla
MagRec["treatment_dc_field"]='%8.3e'%(arm_labfield)
MagRec["treatment_dc_field_phi"]='%7.1f'%(phi)
MagRec["treatment_dc_field_theta"]='%7.1f'%(theta)
meas_type="LT-AF-I"
else:
MagRec["treatment_ac_field"]='%8.3e' % ( float(treat[0])*1e-3) # AF field in tesla
MagRec["treatment_dc_field"]='0'
meas_type="LT-AF-Z"
# Cooling rate experient # added by rshaar
elif demag=="T" and methcode == "LP-CR-TRM":
MagRec["treatment_temp"]='%8.3e' % (float(treat[0])+273.) # temp in kelvin
if treat[1][0]=='0':
meas_type="LT-T-Z:LP-CR-TRM"
MagRec["treatment_dc_field"]='%8.3e'%(0)
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
else:
MagRec["treatment_dc_field"]='%8.3e'%(labfield)
if treat[1][0]=='7': # alteration check as final measurement
meas_type="LT-PTRM-I:LP-CR-TRM"
else:
meas_type="LT-T-I:LP-CR-TRM"
MagRec["treatment_dc_field_phi"]='%7.1f' % (phi) # labfield phi
MagRec["treatment_dc_field_theta"]='%7.1f' % (theta) # labfield theta
indx=int(treat[1][0])-1
# alteration check matjed as 0.7 in the measurement file
if indx==6:
cooling_time= cooling_rates_list[-1]
else:
cooling_time=cooling_rates_list[indx]
MagRec["measurement_description"]="cooling_rate"+":"+cooling_time+":"+"K/min"
elif demag!='N':
if len(treat)==1:treat.append('0')
MagRec["treatment_temp"]='%8.3e' % (float(treat[0])+273.) # temp in kelvin
if trm==0: # demag=T and not trmaq
if treat[1][0]=='0':
meas_type="LT-T-Z"
else:
MagRec["treatment_dc_field"]='%8.3e' % (labfield) # labfield in tesla (convert from microT)
MagRec["treatment_dc_field_phi"]='%7.1f' % (phi) # labfield phi
MagRec["treatment_dc_field_theta"]='%7.1f' % (theta) # labfield theta
if treat[1][0]=='1':meas_type="LT-T-I" # in-field thermal step
if treat[1][0]=='2':
meas_type="LT-PTRM-I" # pTRM check
pTRM=1
if treat[1][0]=='3':
MagRec["treatment_dc_field"]='0' # this is a zero field step
meas_type="LT-PTRM-MD" # pTRM tail check
else:
labfield=float(treat[1])*1e-6
MagRec["treatment_dc_field"]='%8.3e' % (labfield) # labfield in tesla (convert from microT)
MagRec["treatment_dc_field_phi"]='%7.1f' % (phi) # labfield phi
MagRec["treatment_dc_field_theta"]='%7.1f' % (theta) # labfield theta
meas_type="LT-T-I:LP-TRM" # trm acquisition experiment
MagRec["measurement_csd"]=rec[2]
MagRec["measurement_magn_moment"]='%10.3e'% (float(rec[3])*1e-3) # moment in Am^2 (from emu)
MagRec["measurement_dec"]=rec[4]
MagRec["measurement_inc"]=rec[5]
MagRec["magic_instrument_codes"]=instcode
MagRec["er_analyst_mail_names"]=user
MagRec["er_citation_names"]=citation
if "LP-IRM-3D" in methcode : meas_type=methcode
#MagRec["magic_method_codes"]=methcode.strip(':')
MagRec["magic_method_codes"]=meas_type
MagRec["measurement_flag"]='g'
MagRec["er_specimen_name"]=rec[0]
if 'std' in rec[0]:
MagRec["measurement_standard"]='s'
else:
MagRec["measurement_standard"]='u'
MagRec["measurement_number"]='1'
#print MagRec['treatment_temp']
MagRecs.append(MagRec)
MagOuts=pmag.measurements_methods(MagRecs,noave)
pmag.magic_write(meas_file,MagOuts,'magic_measurements')
print("results put in ",meas_file)
if len(SynRecs)>0:
pmag.magic_write(synfile,SynRecs,'er_synthetics')
print("synthetics put in ",synfile)
return True, meas_file | python | def main(command_line=True, **kwargs):
"""
NAME
sio_magic.py
DESCRIPTION
converts SIO .mag format files to magic_measurements format files
SYNTAX
sio_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-usr USER: identify user, default is ""
-f FILE: specify .mag format input file, required
-fsa SAMPFILE : specify er_samples.txt file relating samples, site and locations names,default is none -- values in SAMPFILE will override selections for -loc (location), -spc (designate specimen), and -ncn (sample-site naming convention)
-F FILE: specify output file, default is magic_measurements.txt
-Fsy: specify er_synthetics file, default is er_sythetics.txt
-LP [colon delimited list of protocols, include all that apply]
AF: af demag
T: thermal including thellier but not trm acquisition
S: Shaw method
I: IRM (acquisition)
I3d: 3D IRM experiment
N: NRM only
TRM: trm acquisition
ANI: anisotropy experiment
D: double AF demag
G: triple AF demag (GRM protocol)
CR: cooling rate experiment.
The treatment coding of the measurement file should be: XXX.00,XXX.10, XXX.20 ...XX.70 etc. (XXX.00 is optional)
where XXX in the temperature and .10,.20... are running numbers of the cooling rates steps.
XXX.00 is optional zerofield baseline. XXX.70 is alteration check.
syntax in sio_magic is: -LP CR xxx,yyy,zzz,..... xxx -A
where xxx, yyy, zzz...xxx are cooling time in [K/minutes], seperated by comma, ordered at the same order as XXX.10,XXX.20 ...XX.70
if you use a zerofield step then no need to specify the cooling rate for the zerofield
It is important to add to the command line the -A option so the measurements will not be averaged.
But users need to make sure that there are no duplicate measurements in the file
-V [1,2,3] units of IRM field in volts using ASC coil #1,2 or 3
-spc NUM : specify number of characters to designate a specimen, default = 0
-loc LOCNAME : specify location/study name, must have either LOCNAME or SAMPFILE or be a synthetic
-syn INST TYPE: sets these specimens as synthetics created at institution INST and of type TYPE
-ins INST : specify which demag instrument was used (e.g, SIO-Suzy or SIO-Odette),default is ""
-dc B PHI THETA: dc lab field (in micro tesla) and phi,theta, default is none
NB: use PHI, THETA = -1 -1 to signal that it changes, i.e. in anisotropy experiment
-ac B : peak AF field (in mT) for ARM acquisition, default is none
-ncn NCON: specify naming convention: default is #1 below
-A: don't average replicate measurements
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail [email protected] for help.
[8] synthetic - has no site name
[9] ODP naming convention
INPUT
Best to put separate experiments (all AF, thermal, thellier, trm aquisition, Shaw, etc.) in
seperate .mag files (eg. af.mag, thermal.mag, etc.)
Format of SIO .mag files:
Spec Treat CSD Intensity Declination Inclination [optional metadata string]
Spec: specimen name
Treat: treatment step
XXX T in Centigrade
XXX AF in mT
for special experiments:
Thellier:
XXX.0 first zero field step
XXX.1 first in field step [XXX.0 and XXX.1 can be done in any order]
XXX.2 second in-field step at lower temperature (pTRM check)
XXX.3 second zero-field step after infield (pTRM check step)
XXX.3 MUST be done in this order [XXX.0, XXX.1 [optional XXX.2] XXX.3]
AARM:
X.00 baseline step (AF in zero bias field - high peak field)
X.1 ARM step (in field step) where
X is the step number in the 15 position scheme
(see Appendix to Lecture 13 - http://magician.ucsd.edu/Essentials_2)
ATRM:
X.00 optional baseline
X.1 ATRM step (+X)
X.2 ATRM step (+Y)
X.3 ATRM step (+Z)
X.4 ATRM step (-X)
X.5 ATRM step (-Y)
X.6 ATRM step (-Z)
X.7 optional alteration check (+X)
TRM:
XXX.YYY XXX is temperature step of total TRM
YYY is dc field in microtesla
Intensity assumed to be total moment in 10^3 Am^2 (emu)
Declination: Declination in specimen coordinate system
Inclination: Declination in specimen coordinate system
Optional metatdata string: mm/dd/yy;hh:mm;[dC,mT];xx.xx;UNITS;USER;INST;NMEAS
hh in 24 hours.
dC or mT units of treatment XXX (see Treat above) for thermal or AF respectively
xx.xxx DC field
UNITS of DC field (microT, mT)
INST: instrument code, number of axes, number of positions (e.g., G34 is 2G, three axes,
measured in four positions)
NMEAS: number of measurements in a single position (1,3,200...)
"""
# initialize some stuff
mag_file = None
codelist = None
infile_type="mag"
noave=0
methcode,inst="LP-NO",""
phi,theta,peakfield,labfield=0,0,0,0
pTRM,MD,samp_con,Z=0,0,'1',1
dec=[315,225,180,135,45,90,270,270,270,90,180,180,0,0,0]
inc=[0,0,0,0,0,-45,-45,0,45,45,45,-45,-90,-45,45]
tdec=[0,90,0,180,270,0,0,90,0]
tinc=[0,0,90,0,0,-90,0,0,90]
missing=1
demag="N"
er_location_name=""
citation='This study'
args=sys.argv
fmt='old'
syn=0
synfile='er_synthetics.txt'
samp_infile,Samps='',[]
trm=0
irm=0
specnum=0
coil=""
mag_file=""
#
# get command line arguments
#
meas_file="magic_measurements.txt"
user=""
if not command_line:
user = kwargs.get('user', '')
meas_file = kwargs.get('meas_file', '')
syn_file = kwargs.get('syn_file', '')
mag_file = kwargs.get('mag_file', '')
labfield = kwargs.get('labfield', '')
if labfield:
labfield = float(labfield) *1e-6
else:
labfield = 0
phi = kwargs.get('phi', 0)
if phi:
phi = float(phi)
else:
phi = 0
theta = kwargs.get('theta', 0)
if theta:
theta=float(theta)
else:
theta = 0
peakfield = kwargs.get('peakfield', 0)
if peakfield:
peakfield=float(peakfield) *1e-3
else:
peakfield = 0
specnum = kwargs.get('specnum', 0)
samp_con = kwargs.get('samp_con', '1')
er_location_name = kwargs.get('er_location_name', '')
samp_infile = kwargs.get('samp_infile', '')
syn = kwargs.get('syn', 0)
institution = kwargs.get('institution', '')
syntype = kwargs.get('syntype', '')
inst = kwargs.get('inst', '')
noave = kwargs.get('noave', 0)
codelist = kwargs.get('codelist', '')
coil = kwargs.get('coil', '')
cooling_rates = kwargs.get('cooling_rates', '')
if command_line:
if "-h" in args:
print(main.__doc__)
return False
if "-usr" in args:
ind=args.index("-usr")
user=args[ind+1]
if '-F' in args:
ind=args.index("-F")
meas_file=args[ind+1]
if '-Fsy' in args:
ind=args.index("-Fsy")
synfile=args[ind+1]
if '-f' in args:
ind=args.index("-f")
mag_file=args[ind+1]
if "-dc" in args:
ind=args.index("-dc")
labfield=float(args[ind+1])*1e-6
phi=float(args[ind+2])
theta=float(args[ind+3])
if "-ac" in args:
ind=args.index("-ac")
peakfield=float(args[ind+1])*1e-3
if "-spc" in args:
ind=args.index("-spc")
specnum=int(args[ind+1])
if "-loc" in args:
ind=args.index("-loc")
er_location_name=args[ind+1]
if "-fsa" in args:
ind=args.index("-fsa")
samp_infile = args[ind+1]
if '-syn' in args:
syn=1
ind=args.index("-syn")
institution=args[ind+1]
syntype=args[ind+2]
if '-fsy' in args:
ind=args.index("-fsy")
synfile=args[ind+1]
if "-ins" in args:
ind=args.index("-ins")
inst=args[ind+1]
if "-A" in args: noave=1
if "-ncn" in args:
ind=args.index("-ncn")
samp_con=sys.argv[ind+1]
if '-LP' in args:
ind=args.index("-LP")
codelist=args[ind+1]
if "-V" in args:
ind=args.index("-V")
coil=args[ind+1]
# make sure all initial values are correctly set up (whether they come from the command line or a GUI)
if samp_infile:
Samps, file_type = pmag.magic_read(samp_infile)
if coil:
coil = str(coil)
methcode="LP-IRM"
irmunits = "V"
if coil not in ["1","2","3"]:
print(main.__doc__)
print('not a valid coil specification')
return False, '{} is not a valid coil specification'.format(coil)
if mag_file:
try:
#with open(mag_file,'r') as finput:
# lines = finput.readlines()
lines=pmag.open_file(mag_file)
except:
print("bad mag file name")
return False, "bad mag file name"
if not mag_file:
print(main.__doc__)
print("mag_file field is required option")
return False, "mag_file field is required option"
if specnum!=0:
specnum=-specnum
#print 'samp_con:', samp_con
if samp_con:
if "4" == samp_con[0]:
if "-" not in samp_con:
print("naming convention option [4] must be in form 4-Z where Z is an integer")
print('---------------')
return False, "naming convention option [4] must be in form 4-Z where Z is an integer"
else:
Z=samp_con.split("-")[1]
samp_con="4"
if "7" == samp_con[0]:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
return False, "option [7] must be in form 7-Z where Z is an integer"
else:
Z=samp_con.split("-")[1]
samp_con="7"
if codelist:
codes=codelist.split(':')
if "AF" in codes:
demag='AF'
if'-dc' not in args: methcode="LT-AF-Z"
if'-dc' in args: methcode="LT-AF-I"
if "T" in codes:
demag="T"
if '-dc' not in args: methcode="LT-T-Z"
if '-dc' in args: methcode="LT-T-I"
if "I" in codes:
methcode="LP-IRM"
irmunits="mT"
if "I3d" in codes:
methcode="LT-T-Z:LP-IRM-3D"
if "S" in codes:
demag="S"
methcode="LP-PI-TRM:LP-PI-ALT-AFARM"
trm_labfield=labfield
ans=input("DC lab field for ARM step: [50uT] ")
if ans=="":
arm_labfield=50e-6
else:
arm_labfield=float(ans)*1e-6
ans=input("temperature for total trm step: [600 C] ")
if ans=="":
trm_peakT=600+273 # convert to kelvin
else:
trm_peakT=float(ans)+273 # convert to kelvin
if "G" in codes: methcode="LT-AF-G"
if "D" in codes: methcode="LT-AF-D"
if "TRM" in codes:
demag="T"
trm=1
if "CR" in codes:
demag="T"
cooling_rate_experiment=1
if command_line:
ind=args.index("CR")
cooling_rates=args[ind+1]
cooling_rates_list=cooling_rates.split(',')
else:
cooling_rates_list=str(cooling_rates).split(',')
if demag=="T" and "ANI" in codes:
methcode="LP-AN-TRM"
if demag=="T" and "CR" in codes:
methcode="LP-CR-TRM"
if demag=="AF" and "ANI" in codes:
methcode="LP-AN-ARM"
if labfield==0: labfield=50e-6
if peakfield==0: peakfield=.180
SynRecs,MagRecs=[],[]
version_num=pmag.get_version()
##################################
if 1:
#if infile_type=="SIO format":
for line in lines:
instcode=""
if len(line)>2:
SynRec={}
MagRec={}
MagRec['er_location_name']=er_location_name
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["treatment_ac_field"]='0'
MagRec["treatment_dc_field"]='0'
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
meas_type="LT-NO"
rec=line.split()
if rec[1]==".00":rec[1]="0.00"
treat=rec[1].split('.')
if methcode=="LP-IRM":
if irmunits=='mT':
labfield=float(treat[0])*1e-3
else:
labfield=pmag.getfield(irmunits,coil,treat[0])
if rec[1][0]!="-":
phi,theta=0.,90.
else:
phi,theta=0.,-90.
meas_type="LT-IRM"
MagRec["treatment_dc_field"]='%8.3e'%(labfield)
MagRec["treatment_dc_field_phi"]='%7.1f'%(phi)
MagRec["treatment_dc_field_theta"]='%7.1f'%(theta)
if len(rec)>6:
code1=rec[6].split(';') # break e.g., 10/15/02;7:45 indo date and time
if len(code1)==2: # old format with AM/PM
missing=0
code2=code1[0].split('/') # break date into mon/day/year
code3=rec[7].split(';') # break e.g., AM;C34;200 into time;instr/axes/measuring pos;number of measurements
yy=int(code2[2])
if yy <90:
yyyy=str(2000+yy)
else: yyyy=str(1900+yy)
mm=int(code2[0])
if mm<10:
mm="0"+str(mm)
else: mm=str(mm)
dd=int(code2[1])
if dd<10:
dd="0"+str(dd)
else: dd=str(dd)
time=code1[1].split(':')
hh=int(time[0])
if code3[0]=="PM":hh=hh+12
if hh<10:
hh="0"+str(hh)
else: hh=str(hh)
min=int(time[1])
if min<10:
min= "0"+str(min)
else: min=str(min)
MagRec["measurement_date"]=yyyy+":"+mm+":"+dd+":"+hh+":"+min+":00.00"
MagRec["measurement_time_zone"]='SAN'
if inst=="":
if code3[1][0]=='C':instcode='SIO-bubba'
if code3[1][0]=='G':instcode='SIO-flo'
else:
instcode=''
MagRec["measurement_positions"]=code3[1][2]
elif len(code1)>2: # newest format (cryo7 or later)
if "LP-AN-ARM" not in methcode:labfield=0
fmt='new'
date=code1[0].split('/') # break date into mon/day/year
yy=int(date[2])
if yy <90:
yyyy=str(2000+yy)
else: yyyy=str(1900+yy)
mm=int(date[0])
if mm<10:
mm="0"+str(mm)
else: mm=str(mm)
dd=int(date[1])
if dd<10:
dd="0"+str(dd)
else: dd=str(dd)
time=code1[1].split(':')
hh=int(time[0])
if hh<10:
hh="0"+str(hh)
else: hh=str(hh)
min=int(time[1])
if min<10:
min= "0"+str(min)
else:
min=str(min)
MagRec["measurement_date"]=yyyy+":"+mm+":"+dd+":"+hh+":"+min+":00.00"
MagRec["measurement_time_zone"]='SAN'
if inst=="":
if code1[6][0]=='C':
instcode='SIO-bubba'
if code1[6][0]=='G':
instcode='SIO-flo'
else:
instcode=''
if len(code1)>1:
MagRec["measurement_positions"]=code1[6][2]
else:
MagRec["measurement_positions"]=code1[7] # takes care of awkward format with bubba and flo being different
if user=="":user=code1[5]
if code1[2][-1]=='C':
demag="T"
if code1[4]=='microT' and float(code1[3])!=0. and "LP-AN-ARM" not in methcode: labfield=float(code1[3])*1e-6
if code1[2]=='mT' and methcode!="LP-IRM":
demag="AF"
if code1[4]=='microT' and float(code1[3])!=0.: labfield=float(code1[3])*1e-6
if code1[4]=='microT' and labfield!=0. and meas_type!="LT-IRM":
phi,theta=0.,-90.
if demag=="T": meas_type="LT-T-I"
if demag=="AF": meas_type="LT-AF-I"
MagRec["treatment_dc_field"]='%8.3e'%(labfield)
MagRec["treatment_dc_field_phi"]='%7.1f'%(phi)
MagRec["treatment_dc_field_theta"]='%7.1f'%(theta)
if code1[4]=='' or labfield==0. and meas_type!="LT-IRM":
if demag=='T':meas_type="LT-T-Z"
if demag=="AF":meas_type="LT-AF-Z"
MagRec["treatment_dc_field"]='0'
if syn==0:
MagRec["er_specimen_name"]=rec[0]
MagRec["er_synthetic_name"]=""
MagRec["er_site_name"]=""
if specnum!=0:
MagRec["er_sample_name"]=rec[0][:specnum]
else:
MagRec["er_sample_name"]=rec[0]
if samp_infile and Samps: # if samp_infile was provided AND yielded sample data
samp=pmag.get_dictitem(Samps,'er_sample_name',MagRec['er_sample_name'],'T')
if len(samp)>0:
MagRec["er_location_name"]=samp[0]["er_location_name"]
MagRec["er_site_name"]=samp[0]["er_site_name"]
else:
MagRec['er_location_name']=''
MagRec["er_site_name"]=''
elif int(samp_con)!=6:
site=pmag.parse_site(MagRec['er_sample_name'],samp_con,Z)
MagRec["er_site_name"]=site
if MagRec['er_site_name']=="":
print('No site name found for: ',MagRec['er_specimen_name'],MagRec['er_sample_name'])
if MagRec["er_location_name"]=="":
print('no location name for: ',MagRec["er_specimen_name"])
else:
MagRec["er_specimen_name"]=rec[0]
if specnum!=0:
MagRec["er_sample_name"]=rec[0][:specnum]
else:
MagRec["er_sample_name"]=rec[0]
MagRec["er_site_name"]=""
MagRec["er_synthetic_name"]=MagRec["er_specimen_name"]
SynRec["er_synthetic_name"]=MagRec["er_specimen_name"]
site=pmag.parse_site(MagRec['er_sample_name'],samp_con,Z)
SynRec["synthetic_parent_sample"]=site
SynRec["er_citation_names"]="This study"
SynRec["synthetic_institution"]=institution
SynRec["synthetic_type"]=syntype
SynRecs.append(SynRec)
if float(rec[1])==0:
pass
elif demag=="AF":
if methcode != "LP-AN-ARM":
MagRec["treatment_ac_field"]='%8.3e' %(float(rec[1])*1e-3) # peak field in tesla
if meas_type=="LT-AF-Z": MagRec["treatment_dc_field"]='0'
else: # AARM experiment
if treat[1][0]=='0':
meas_type="LT-AF-Z:LP-AN-ARM:"
MagRec["treatment_ac_field"]='%8.3e' %(peakfield) # peak field in tesla
MagRec["treatment_dc_field"]='%8.3e'%(0)
if labfield!=0 and methcode!="LP-AN-ARM": print("Warning - inconsistency in mag file with lab field - overriding file with 0")
else:
meas_type="LT-AF-I:LP-AN-ARM"
ipos=int(treat[0])-1
MagRec["treatment_dc_field_phi"]='%7.1f' %(dec[ipos])
MagRec["treatment_dc_field_theta"]='%7.1f'% (inc[ipos])
MagRec["treatment_dc_field"]='%8.3e'%(labfield)
MagRec["treatment_ac_field"]='%8.3e' %(peakfield) # peak field in tesla
elif demag=="T" and methcode == "LP-AN-TRM":
MagRec["treatment_temp"]='%8.3e' % (float(treat[0])+273.) # temp in kelvin
if treat[1][0]=='0':
meas_type="LT-T-Z:LP-AN-TRM"
MagRec["treatment_dc_field"]='%8.3e'%(0)
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
else:
MagRec["treatment_dc_field"]='%8.3e'%(labfield)
if treat[1][0]=='7': # alteration check as final measurement
meas_type="LT-PTRM-I:LP-AN-TRM"
else:
meas_type="LT-T-I:LP-AN-TRM"
# find the direction of the lab field in two ways:
# (1) using the treatment coding (XX.1=+x, XX.2=+y, XX.3=+z, XX.4=-x, XX.5=-y, XX.6=-z)
ipos_code=int(treat[1][0])-1
# (2) using the magnetization
DEC=float(rec[4])
INC=float(rec[5])
if INC < 45 and INC > -45:
if DEC>315 or DEC<45: ipos_guess=0
if DEC>45 and DEC<135: ipos_guess=1
if DEC>135 and DEC<225: ipos_guess=3
if DEC>225 and DEC<315: ipos_guess=4
else:
if INC >45: ipos_guess=2
if INC <-45: ipos_guess=5
# prefer the guess over the code
ipos=ipos_guess
MagRec["treatment_dc_field_phi"]='%7.1f' %(tdec[ipos])
MagRec["treatment_dc_field_theta"]='%7.1f'% (tinc[ipos])
# check it
if ipos_guess!=ipos_code and treat[1][0]!='7':
print("-E- ERROR: check specimen %s step %s, ATRM measurements, coding does not match the direction of the lab field!"%(rec[0],".".join(list(treat))))
elif demag=="S": # Shaw experiment
if treat[1][1]=='0':
if int(treat[0])!=0:
MagRec["treatment_ac_field"]='%8.3e' % (float(treat[0])*1e-3) # AF field in tesla
MagRec["treatment_dc_field"]='0'
meas_type="LT-AF-Z" # first AF
else:
meas_type="LT-NO"
MagRec["treatment_ac_field"]='0'
MagRec["treatment_dc_field"]='0'
elif treat[1][1]=='1':
if int(treat[0])==0:
MagRec["treatment_ac_field"]='%8.3e' %(peakfield) # peak field in tesla
MagRec["treatment_dc_field"]='%8.3e'%(arm_labfield)
MagRec["treatment_dc_field_phi"]='%7.1f'%(phi)
MagRec["treatment_dc_field_theta"]='%7.1f'%(theta)
meas_type="LT-AF-I"
else:
MagRec["treatment_ac_field"]='%8.3e' % ( float(treat[0])*1e-3) # AF field in tesla
MagRec["treatment_dc_field"]='0'
meas_type="LT-AF-Z"
elif treat[1][1]=='2':
if int(treat[0])==0:
MagRec["treatment_ac_field"]='0'
MagRec["treatment_dc_field"]='%8.3e'%(trm_labfield)
MagRec["treatment_dc_field_phi"]='%7.1f'%(phi)
MagRec["treatment_dc_field_theta"]='%7.1f'%(theta)
MagRec["treatment_temp"]='%8.3e' % (trm_peakT)
meas_type="LT-T-I"
else:
MagRec["treatment_ac_field"]='%8.3e' % ( float(treat[0])*1e-3) # AF field in tesla
MagRec["treatment_dc_field"]='0'
meas_type="LT-AF-Z"
elif treat[1][1]=='3':
if int(treat[0])==0:
MagRec["treatment_ac_field"]='%8.3e' %(peakfield) # peak field in tesla
MagRec["treatment_dc_field"]='%8.3e'%(arm_labfield)
MagRec["treatment_dc_field_phi"]='%7.1f'%(phi)
MagRec["treatment_dc_field_theta"]='%7.1f'%(theta)
meas_type="LT-AF-I"
else:
MagRec["treatment_ac_field"]='%8.3e' % ( float(treat[0])*1e-3) # AF field in tesla
MagRec["treatment_dc_field"]='0'
meas_type="LT-AF-Z"
# Cooling rate experient # added by rshaar
elif demag=="T" and methcode == "LP-CR-TRM":
MagRec["treatment_temp"]='%8.3e' % (float(treat[0])+273.) # temp in kelvin
if treat[1][0]=='0':
meas_type="LT-T-Z:LP-CR-TRM"
MagRec["treatment_dc_field"]='%8.3e'%(0)
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
else:
MagRec["treatment_dc_field"]='%8.3e'%(labfield)
if treat[1][0]=='7': # alteration check as final measurement
meas_type="LT-PTRM-I:LP-CR-TRM"
else:
meas_type="LT-T-I:LP-CR-TRM"
MagRec["treatment_dc_field_phi"]='%7.1f' % (phi) # labfield phi
MagRec["treatment_dc_field_theta"]='%7.1f' % (theta) # labfield theta
indx=int(treat[1][0])-1
# alteration check matjed as 0.7 in the measurement file
if indx==6:
cooling_time= cooling_rates_list[-1]
else:
cooling_time=cooling_rates_list[indx]
MagRec["measurement_description"]="cooling_rate"+":"+cooling_time+":"+"K/min"
elif demag!='N':
if len(treat)==1:treat.append('0')
MagRec["treatment_temp"]='%8.3e' % (float(treat[0])+273.) # temp in kelvin
if trm==0: # demag=T and not trmaq
if treat[1][0]=='0':
meas_type="LT-T-Z"
else:
MagRec["treatment_dc_field"]='%8.3e' % (labfield) # labfield in tesla (convert from microT)
MagRec["treatment_dc_field_phi"]='%7.1f' % (phi) # labfield phi
MagRec["treatment_dc_field_theta"]='%7.1f' % (theta) # labfield theta
if treat[1][0]=='1':meas_type="LT-T-I" # in-field thermal step
if treat[1][0]=='2':
meas_type="LT-PTRM-I" # pTRM check
pTRM=1
if treat[1][0]=='3':
MagRec["treatment_dc_field"]='0' # this is a zero field step
meas_type="LT-PTRM-MD" # pTRM tail check
else:
labfield=float(treat[1])*1e-6
MagRec["treatment_dc_field"]='%8.3e' % (labfield) # labfield in tesla (convert from microT)
MagRec["treatment_dc_field_phi"]='%7.1f' % (phi) # labfield phi
MagRec["treatment_dc_field_theta"]='%7.1f' % (theta) # labfield theta
meas_type="LT-T-I:LP-TRM" # trm acquisition experiment
MagRec["measurement_csd"]=rec[2]
MagRec["measurement_magn_moment"]='%10.3e'% (float(rec[3])*1e-3) # moment in Am^2 (from emu)
MagRec["measurement_dec"]=rec[4]
MagRec["measurement_inc"]=rec[5]
MagRec["magic_instrument_codes"]=instcode
MagRec["er_analyst_mail_names"]=user
MagRec["er_citation_names"]=citation
if "LP-IRM-3D" in methcode : meas_type=methcode
#MagRec["magic_method_codes"]=methcode.strip(':')
MagRec["magic_method_codes"]=meas_type
MagRec["measurement_flag"]='g'
MagRec["er_specimen_name"]=rec[0]
if 'std' in rec[0]:
MagRec["measurement_standard"]='s'
else:
MagRec["measurement_standard"]='u'
MagRec["measurement_number"]='1'
#print MagRec['treatment_temp']
MagRecs.append(MagRec)
MagOuts=pmag.measurements_methods(MagRecs,noave)
pmag.magic_write(meas_file,MagOuts,'magic_measurements')
print("results put in ",meas_file)
if len(SynRecs)>0:
pmag.magic_write(synfile,SynRecs,'er_synthetics')
print("synthetics put in ",synfile)
return True, meas_file | NAME
sio_magic.py
DESCRIPTION
converts SIO .mag format files to magic_measurements format files
SYNTAX
sio_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-usr USER: identify user, default is ""
-f FILE: specify .mag format input file, required
-fsa SAMPFILE : specify er_samples.txt file relating samples, site and locations names,default is none -- values in SAMPFILE will override selections for -loc (location), -spc (designate specimen), and -ncn (sample-site naming convention)
-F FILE: specify output file, default is magic_measurements.txt
-Fsy: specify er_synthetics file, default is er_sythetics.txt
-LP [colon delimited list of protocols, include all that apply]
AF: af demag
T: thermal including thellier but not trm acquisition
S: Shaw method
I: IRM (acquisition)
I3d: 3D IRM experiment
N: NRM only
TRM: trm acquisition
ANI: anisotropy experiment
D: double AF demag
G: triple AF demag (GRM protocol)
CR: cooling rate experiment.
The treatment coding of the measurement file should be: XXX.00,XXX.10, XXX.20 ...XX.70 etc. (XXX.00 is optional)
where XXX in the temperature and .10,.20... are running numbers of the cooling rates steps.
XXX.00 is optional zerofield baseline. XXX.70 is alteration check.
syntax in sio_magic is: -LP CR xxx,yyy,zzz,..... xxx -A
where xxx, yyy, zzz...xxx are cooling time in [K/minutes], seperated by comma, ordered at the same order as XXX.10,XXX.20 ...XX.70
if you use a zerofield step then no need to specify the cooling rate for the zerofield
It is important to add to the command line the -A option so the measurements will not be averaged.
But users need to make sure that there are no duplicate measurements in the file
-V [1,2,3] units of IRM field in volts using ASC coil #1,2 or 3
-spc NUM : specify number of characters to designate a specimen, default = 0
-loc LOCNAME : specify location/study name, must have either LOCNAME or SAMPFILE or be a synthetic
-syn INST TYPE: sets these specimens as synthetics created at institution INST and of type TYPE
-ins INST : specify which demag instrument was used (e.g, SIO-Suzy or SIO-Odette),default is ""
-dc B PHI THETA: dc lab field (in micro tesla) and phi,theta, default is none
NB: use PHI, THETA = -1 -1 to signal that it changes, i.e. in anisotropy experiment
-ac B : peak AF field (in mT) for ARM acquisition, default is none
-ncn NCON: specify naming convention: default is #1 below
-A: don't average replicate measurements
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail [email protected] for help.
[8] synthetic - has no site name
[9] ODP naming convention
INPUT
Best to put separate experiments (all AF, thermal, thellier, trm aquisition, Shaw, etc.) in
seperate .mag files (eg. af.mag, thermal.mag, etc.)
Format of SIO .mag files:
Spec Treat CSD Intensity Declination Inclination [optional metadata string]
Spec: specimen name
Treat: treatment step
XXX T in Centigrade
XXX AF in mT
for special experiments:
Thellier:
XXX.0 first zero field step
XXX.1 first in field step [XXX.0 and XXX.1 can be done in any order]
XXX.2 second in-field step at lower temperature (pTRM check)
XXX.3 second zero-field step after infield (pTRM check step)
XXX.3 MUST be done in this order [XXX.0, XXX.1 [optional XXX.2] XXX.3]
AARM:
X.00 baseline step (AF in zero bias field - high peak field)
X.1 ARM step (in field step) where
X is the step number in the 15 position scheme
(see Appendix to Lecture 13 - http://magician.ucsd.edu/Essentials_2)
ATRM:
X.00 optional baseline
X.1 ATRM step (+X)
X.2 ATRM step (+Y)
X.3 ATRM step (+Z)
X.4 ATRM step (-X)
X.5 ATRM step (-Y)
X.6 ATRM step (-Z)
X.7 optional alteration check (+X)
TRM:
XXX.YYY XXX is temperature step of total TRM
YYY is dc field in microtesla
Intensity assumed to be total moment in 10^3 Am^2 (emu)
Declination: Declination in specimen coordinate system
Inclination: Declination in specimen coordinate system
Optional metatdata string: mm/dd/yy;hh:mm;[dC,mT];xx.xx;UNITS;USER;INST;NMEAS
hh in 24 hours.
dC or mT units of treatment XXX (see Treat above) for thermal or AF respectively
xx.xxx DC field
UNITS of DC field (microT, mT)
INST: instrument code, number of axes, number of positions (e.g., G34 is 2G, three axes,
measured in four positions)
NMEAS: number of measurements in a single position (1,3,200...) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/conversion_scripts2/sio_magic2.py#L8-L693 |
PmagPy/PmagPy | SPD/lib/new_lib_curvature.py | fitcircle | def fitcircle(n, x, y):
# n points, x points, y points
"""c Fit circle to arbitrary number of x,y pairs, based on the
c modified least squares method of Umback and Jones (2000),
c IEEE Transactions on Instrumentation and Measurement."""
# adding in normalize vectors step
#x = numpy.array(x) / max(x)
#y = numpy.array(y) / max(y)
#
sx, sx2, sx3, sy, sy2, sy3, sxy, sxy2, syx2 = (0,) * 9
print(type(sx), sx)
for i in range(n):
sx = sx + x[i]
sx2 = sx2 + x[i]**2
sx3 = sx3 + x[i]**3
sy = sy + y[i]
sy2 = sy2 + y[i]**2
sy3 = sy3 + y[i]**3
sxy = sxy + x[i] * y[i]
sxy2 = sxy2 + x[i] * y[i]**2
syx2 = syx2 + y[i] * x[i]**2
A = n * sx2 - sx**2
B = n * sxy - sx*sy
C = n * sy2 - sy**2
D = 0.5 * (n * sxy2 - sx * sy2 + n * sx3 - sx * sx2)
E = 0.5 * (n * syx2 - sy * sx2 + n * sy3 - sy * sy2)
# values check out up to here
xo = old_div((D * C - B * E), (A * C - B**2))
yo = old_div((A * E - B * D), (A * C - B**2))
print("xo", xo)
print("yo", yo)
r = 0
for z in range(n):
r = r + old_div(numpy.sqrt( (x[z]-xo)**2 + (y[z]-yo)**2 ), n)
if xo <= numpy.mean(x) and yo <= numpy.mean(y):
k = old_div(-1.,r)
else:
k = old_div(1.,r)
SSE = lib_k.get_SSE(xo, yo, r, x, y)
print("r", r)
return k, xo, yo, SSE | python | def fitcircle(n, x, y):
# n points, x points, y points
"""c Fit circle to arbitrary number of x,y pairs, based on the
c modified least squares method of Umback and Jones (2000),
c IEEE Transactions on Instrumentation and Measurement."""
# adding in normalize vectors step
#x = numpy.array(x) / max(x)
#y = numpy.array(y) / max(y)
#
sx, sx2, sx3, sy, sy2, sy3, sxy, sxy2, syx2 = (0,) * 9
print(type(sx), sx)
for i in range(n):
sx = sx + x[i]
sx2 = sx2 + x[i]**2
sx3 = sx3 + x[i]**3
sy = sy + y[i]
sy2 = sy2 + y[i]**2
sy3 = sy3 + y[i]**3
sxy = sxy + x[i] * y[i]
sxy2 = sxy2 + x[i] * y[i]**2
syx2 = syx2 + y[i] * x[i]**2
A = n * sx2 - sx**2
B = n * sxy - sx*sy
C = n * sy2 - sy**2
D = 0.5 * (n * sxy2 - sx * sy2 + n * sx3 - sx * sx2)
E = 0.5 * (n * syx2 - sy * sx2 + n * sy3 - sy * sy2)
# values check out up to here
xo = old_div((D * C - B * E), (A * C - B**2))
yo = old_div((A * E - B * D), (A * C - B**2))
print("xo", xo)
print("yo", yo)
r = 0
for z in range(n):
r = r + old_div(numpy.sqrt( (x[z]-xo)**2 + (y[z]-yo)**2 ), n)
if xo <= numpy.mean(x) and yo <= numpy.mean(y):
k = old_div(-1.,r)
else:
k = old_div(1.,r)
SSE = lib_k.get_SSE(xo, yo, r, x, y)
print("r", r)
return k, xo, yo, SSE | c Fit circle to arbitrary number of x,y pairs, based on the
c modified least squares method of Umback and Jones (2000),
c IEEE Transactions on Instrumentation and Measurement. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/lib/new_lib_curvature.py#L29-L75 |
PmagPy/PmagPy | programs/vgp_di.py | main | def main():
"""
NAME
vgp_di.py
DESCRIPTION
converts site latitude, longitude and pole latitude, longitude to declination, inclination
SYNTAX
vgp_di.py [-h] [-i] [-f FILE] [< filename]
OPTIONS
-h prints help message and quits
-i interactive data entry
-f FILE to specify file name on the command line
INPUT
for file entry:
PLAT PLON SLAT SLON
where:
PLAT: pole latitude
PLON: pole longitude (positive east)
SLAT: site latitude (positive north)
SLON: site longitude (positive east)
OUTPUT
D I
where:
D: declination
I: inclination
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-i' in sys.argv: # if one is -i
while 1:
try:
ans=input("Input Pole Latitude [positive north]: <cntrl-D to quit> ")
plat=float(ans) # assign input to plat, after conversion to floating point
ans=input("Input Pole Longitude [positive east]: ")
plon =float(ans)
ans=input("Input Site Latitude: ")
slat =float(ans)
ans=input("Input Site Longitude: ")
slong =float(ans)
dec,inc=pmag.vgp_di(plat,plon,slat,slong) # call vgp_di function from pmag module
print('%7.1f %7.1f'%(dec,inc)) # print out returned stuff
except EOFError:
print("\n Good-bye\n")
sys.exit()
elif '-f' in sys.argv: # manual input of file name
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
inp = f.readlines() # read from standard inp
for line in inp: # read in the data (as string variable), line by line
dec,inc= spitout(line)
else:
inp = sys.stdin.readlines() # read from standard input
for line in inp: # read in the data (as string variable), line by line
spitout(line) | python | def main():
"""
NAME
vgp_di.py
DESCRIPTION
converts site latitude, longitude and pole latitude, longitude to declination, inclination
SYNTAX
vgp_di.py [-h] [-i] [-f FILE] [< filename]
OPTIONS
-h prints help message and quits
-i interactive data entry
-f FILE to specify file name on the command line
INPUT
for file entry:
PLAT PLON SLAT SLON
where:
PLAT: pole latitude
PLON: pole longitude (positive east)
SLAT: site latitude (positive north)
SLON: site longitude (positive east)
OUTPUT
D I
where:
D: declination
I: inclination
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-i' in sys.argv: # if one is -i
while 1:
try:
ans=input("Input Pole Latitude [positive north]: <cntrl-D to quit> ")
plat=float(ans) # assign input to plat, after conversion to floating point
ans=input("Input Pole Longitude [positive east]: ")
plon =float(ans)
ans=input("Input Site Latitude: ")
slat =float(ans)
ans=input("Input Site Longitude: ")
slong =float(ans)
dec,inc=pmag.vgp_di(plat,plon,slat,slong) # call vgp_di function from pmag module
print('%7.1f %7.1f'%(dec,inc)) # print out returned stuff
except EOFError:
print("\n Good-bye\n")
sys.exit()
elif '-f' in sys.argv: # manual input of file name
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
inp = f.readlines() # read from standard inp
for line in inp: # read in the data (as string variable), line by line
dec,inc= spitout(line)
else:
inp = sys.stdin.readlines() # read from standard input
for line in inp: # read in the data (as string variable), line by line
spitout(line) | NAME
vgp_di.py
DESCRIPTION
converts site latitude, longitude and pole latitude, longitude to declination, inclination
SYNTAX
vgp_di.py [-h] [-i] [-f FILE] [< filename]
OPTIONS
-h prints help message and quits
-i interactive data entry
-f FILE to specify file name on the command line
INPUT
for file entry:
PLAT PLON SLAT SLON
where:
PLAT: pole latitude
PLON: pole longitude (positive east)
SLAT: site latitude (positive north)
SLON: site longitude (positive east)
OUTPUT
D I
where:
D: declination
I: inclination | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/vgp_di.py#L17-L77 |
PmagPy/PmagPy | dev_setup.py | unix_install | def unix_install():
"""
Edits or creates .bashrc, .bash_profile, and .profile files in the users
HOME directory in order to add your current directory (hopefully your
PmagPy directory) and assorted lower directories in the PmagPy/programs
directory to your PATH environment variable. It also adds the PmagPy and
the PmagPy/programs directories to PYTHONPATH.
"""
PmagPyDir = os.path.abspath(".")
COMMAND = """\n
for d in %s/programs/*/ "%s/programs/"; do
case ":$PATH:" in
*":$d:"*) :;; # already there
*) PMAGPATHS="$PMAGPATHS:$d";; # or PATH="$PATH:$new_entry"
esac
done
export PYTHONPATH="$PYTHONPATH:%s:%s/programs/"
export PATH="$PATH:$PMAGPATHS" """ % (PmagPyDir, PmagPyDir, PmagPyDir, PmagPyDir)
frc_path = os.path.join(
os.environ["HOME"], ".bashrc") # not recommended, but hey it freaking works
fbprof_path = os.path.join(os.environ["HOME"], ".bash_profile")
fprof_path = os.path.join(os.environ["HOME"], ".profile")
all_paths = [frc_path, fbprof_path, fprof_path]
for f_path in all_paths:
open_type = 'a'
if not os.path.isfile(f_path):
open_type = 'w+'
fout = open(f_path, open_type)
fout.write(COMMAND)
fout.close()
else:
fin = open(f_path, 'r')
current_f = fin.read()
fin.close()
if COMMAND not in current_f:
fout = open(f_path, open_type)
fout.write(COMMAND)
fout.close()
print("Install complete. Please restart the shell to complete install.\nIf you are seeing strange or non-existent paths in your PATH or PYTHONPATH variable please manually check your .bashrc, .bash_profile, and .profile or attempt to reinstall.") | python | def unix_install():
"""
Edits or creates .bashrc, .bash_profile, and .profile files in the users
HOME directory in order to add your current directory (hopefully your
PmagPy directory) and assorted lower directories in the PmagPy/programs
directory to your PATH environment variable. It also adds the PmagPy and
the PmagPy/programs directories to PYTHONPATH.
"""
PmagPyDir = os.path.abspath(".")
COMMAND = """\n
for d in %s/programs/*/ "%s/programs/"; do
case ":$PATH:" in
*":$d:"*) :;; # already there
*) PMAGPATHS="$PMAGPATHS:$d";; # or PATH="$PATH:$new_entry"
esac
done
export PYTHONPATH="$PYTHONPATH:%s:%s/programs/"
export PATH="$PATH:$PMAGPATHS" """ % (PmagPyDir, PmagPyDir, PmagPyDir, PmagPyDir)
frc_path = os.path.join(
os.environ["HOME"], ".bashrc") # not recommended, but hey it freaking works
fbprof_path = os.path.join(os.environ["HOME"], ".bash_profile")
fprof_path = os.path.join(os.environ["HOME"], ".profile")
all_paths = [frc_path, fbprof_path, fprof_path]
for f_path in all_paths:
open_type = 'a'
if not os.path.isfile(f_path):
open_type = 'w+'
fout = open(f_path, open_type)
fout.write(COMMAND)
fout.close()
else:
fin = open(f_path, 'r')
current_f = fin.read()
fin.close()
if COMMAND not in current_f:
fout = open(f_path, open_type)
fout.write(COMMAND)
fout.close()
print("Install complete. Please restart the shell to complete install.\nIf you are seeing strange or non-existent paths in your PATH or PYTHONPATH variable please manually check your .bashrc, .bash_profile, and .profile or attempt to reinstall.") | Edits or creates .bashrc, .bash_profile, and .profile files in the users
HOME directory in order to add your current directory (hopefully your
PmagPy directory) and assorted lower directories in the PmagPy/programs
directory to your PATH environment variable. It also adds the PmagPy and
the PmagPy/programs directories to PYTHONPATH. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dev_setup.py#L56-L96 |
PmagPy/PmagPy | dev_setup.py | windows_install | def windows_install(path_to_python=""):
"""
Sets the .py extension to be associated with the ftype Python which is
then set to the python.exe you provide in the path_to_python variable or
after the -p flag if run as a script. Once the python environment is set
up the function proceeds to set PATH and PYTHONPATH using setx.
Parameters
----------
path_to_python : the path the python.exe you want windows to execute when
running .py files
"""
if not path_to_python:
print("Please enter the path to your python.exe you wish Windows to use to run python files. If you do not, this script will not be able to set up a full python environment in Windows. If you already have a python environment set up in Windows such that you can run python scripts from command prompt with just a file name then ignore this message. Otherwise, you will need to run dev_setup.py again with the command line option '-p' followed by the correct full path to python.\nRun dev_setup.py with the -h flag for more details")
print("Would you like to continue? [y/N] ")
ans = input()
if ans == 'y':
pass
else:
return
# be sure to add python.exe if the user forgets to include the file name
if os.path.isdir(path_to_python):
path_to_python = os.path.join(path_to_python, "python.exe")
if not os.path.isfile(path_to_python):
print("The path to python provided is not a full path to the python.exe file or this path does not exist, was given %s.\nPlease run again with the command line option '-p' followed by the correct full path to python.\nRun dev_setup.py with the -h flag for more details" % path_to_python)
return
# make windows associate .py with python
subprocess.check_call('assoc .py=Python', shell=True)
subprocess.check_call('ftype Python=%s ' %
path_to_python + '"%1" %*', shell=True)
PmagPyDir = os.path.abspath(".")
ProgramsDir = os.path.join(PmagPyDir, 'programs')
dirs_to_add = [ProgramsDir]
for d in next(os.walk(ProgramsDir))[1]:
dirs_to_add.append(os.path.join(ProgramsDir, d))
path = str(subprocess.check_output('echo %PATH%', shell=True)).strip('\n')
if "PATH" in path:
path = ''
pypath = str(subprocess.check_output(
'echo %PYTHONPATH%', shell=True)).strip('\n')
if "PYTHONPATH" in pypath:
pypath = PmagPyDir + ';' + ProgramsDir
else:
pypath += ';' + PmagPyDir + ';' + ProgramsDir
for d_add in dirs_to_add:
path += ';' + d_add
unique_path_list = []
for p in path.split(';'):
p = p.replace('"', '')
if p not in unique_path_list:
unique_path_list.append(p)
unique_pypath_list = []
for p in pypath.split(';'):
p = p.replace('"', '')
if p not in unique_pypath_list:
unique_pypath_list.append(p)
path = functools.reduce(lambda x, y: x + ';' + y, unique_path_list)
pypath = functools.reduce(lambda x, y: x + ';' + y, unique_pypath_list)
print('setx PATH "%s"' % path)
subprocess.call('setx PATH "%s"' % path, shell=True)
print('setx PYTHONPATH "%s"' % pypath)
subprocess.call('setx PYTHONPATH "%s"' % (pypath), shell=True)
print("Install complete. Please restart the command prompt to complete install") | python | def windows_install(path_to_python=""):
"""
Sets the .py extension to be associated with the ftype Python which is
then set to the python.exe you provide in the path_to_python variable or
after the -p flag if run as a script. Once the python environment is set
up the function proceeds to set PATH and PYTHONPATH using setx.
Parameters
----------
path_to_python : the path the python.exe you want windows to execute when
running .py files
"""
if not path_to_python:
print("Please enter the path to your python.exe you wish Windows to use to run python files. If you do not, this script will not be able to set up a full python environment in Windows. If you already have a python environment set up in Windows such that you can run python scripts from command prompt with just a file name then ignore this message. Otherwise, you will need to run dev_setup.py again with the command line option '-p' followed by the correct full path to python.\nRun dev_setup.py with the -h flag for more details")
print("Would you like to continue? [y/N] ")
ans = input()
if ans == 'y':
pass
else:
return
# be sure to add python.exe if the user forgets to include the file name
if os.path.isdir(path_to_python):
path_to_python = os.path.join(path_to_python, "python.exe")
if not os.path.isfile(path_to_python):
print("The path to python provided is not a full path to the python.exe file or this path does not exist, was given %s.\nPlease run again with the command line option '-p' followed by the correct full path to python.\nRun dev_setup.py with the -h flag for more details" % path_to_python)
return
# make windows associate .py with python
subprocess.check_call('assoc .py=Python', shell=True)
subprocess.check_call('ftype Python=%s ' %
path_to_python + '"%1" %*', shell=True)
PmagPyDir = os.path.abspath(".")
ProgramsDir = os.path.join(PmagPyDir, 'programs')
dirs_to_add = [ProgramsDir]
for d in next(os.walk(ProgramsDir))[1]:
dirs_to_add.append(os.path.join(ProgramsDir, d))
path = str(subprocess.check_output('echo %PATH%', shell=True)).strip('\n')
if "PATH" in path:
path = ''
pypath = str(subprocess.check_output(
'echo %PYTHONPATH%', shell=True)).strip('\n')
if "PYTHONPATH" in pypath:
pypath = PmagPyDir + ';' + ProgramsDir
else:
pypath += ';' + PmagPyDir + ';' + ProgramsDir
for d_add in dirs_to_add:
path += ';' + d_add
unique_path_list = []
for p in path.split(';'):
p = p.replace('"', '')
if p not in unique_path_list:
unique_path_list.append(p)
unique_pypath_list = []
for p in pypath.split(';'):
p = p.replace('"', '')
if p not in unique_pypath_list:
unique_pypath_list.append(p)
path = functools.reduce(lambda x, y: x + ';' + y, unique_path_list)
pypath = functools.reduce(lambda x, y: x + ';' + y, unique_pypath_list)
print('setx PATH "%s"' % path)
subprocess.call('setx PATH "%s"' % path, shell=True)
print('setx PYTHONPATH "%s"' % pypath)
subprocess.call('setx PYTHONPATH "%s"' % (pypath), shell=True)
print("Install complete. Please restart the command prompt to complete install") | Sets the .py extension to be associated with the ftype Python which is
then set to the python.exe you provide in the path_to_python variable or
after the -p flag if run as a script. Once the python environment is set
up the function proceeds to set PATH and PYTHONPATH using setx.
Parameters
----------
path_to_python : the path the python.exe you want windows to execute when
running .py files | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dev_setup.py#L127-L193 |
PmagPy/PmagPy | programs/pmag_results_extract.py | main | def main():
"""
NAME
pmag_results_extract.py
DESCRIPTION
make a tab delimited output file from pmag_results table
SYNTAX
pmag_results_extract.py [command line options]
OPTIONS
-h prints help message and quits
-f RFILE, specify pmag_results table; default is pmag_results.txt
-fa AFILE, specify er_ages table; default is NONE
-fsp SFILE, specify pmag_specimens table, default is NONE
-fcr CFILE, specify pmag_criteria table, default is NONE
-g include specimen_grade in table - only works for PmagPy generated pmag_specimen formatted files.
-tex, output in LaTeX format
"""
do_help = pmag.get_flag_arg_from_sys('-h')
if do_help:
print(main.__doc__)
return False
res_file = pmag.get_named_arg('-f', 'pmag_results.txt')
crit_file = pmag.get_named_arg('-fcr', '')
spec_file = pmag.get_named_arg('-fsp', '')
age_file = pmag.get_named_arg('-fa', '')
grade = pmag.get_flag_arg_from_sys('-g')
latex = pmag.get_flag_arg_from_sys('-tex')
WD = pmag.get_named_arg('-WD', os.getcwd())
ipmag.pmag_results_extract(res_file, crit_file, spec_file, age_file, latex, grade, WD) | python | def main():
"""
NAME
pmag_results_extract.py
DESCRIPTION
make a tab delimited output file from pmag_results table
SYNTAX
pmag_results_extract.py [command line options]
OPTIONS
-h prints help message and quits
-f RFILE, specify pmag_results table; default is pmag_results.txt
-fa AFILE, specify er_ages table; default is NONE
-fsp SFILE, specify pmag_specimens table, default is NONE
-fcr CFILE, specify pmag_criteria table, default is NONE
-g include specimen_grade in table - only works for PmagPy generated pmag_specimen formatted files.
-tex, output in LaTeX format
"""
do_help = pmag.get_flag_arg_from_sys('-h')
if do_help:
print(main.__doc__)
return False
res_file = pmag.get_named_arg('-f', 'pmag_results.txt')
crit_file = pmag.get_named_arg('-fcr', '')
spec_file = pmag.get_named_arg('-fsp', '')
age_file = pmag.get_named_arg('-fa', '')
grade = pmag.get_flag_arg_from_sys('-g')
latex = pmag.get_flag_arg_from_sys('-tex')
WD = pmag.get_named_arg('-WD', os.getcwd())
ipmag.pmag_results_extract(res_file, crit_file, spec_file, age_file, latex, grade, WD) | NAME
pmag_results_extract.py
DESCRIPTION
make a tab delimited output file from pmag_results table
SYNTAX
pmag_results_extract.py [command line options]
OPTIONS
-h prints help message and quits
-f RFILE, specify pmag_results table; default is pmag_results.txt
-fa AFILE, specify er_ages table; default is NONE
-fsp SFILE, specify pmag_specimens table, default is NONE
-fcr CFILE, specify pmag_criteria table, default is NONE
-g include specimen_grade in table - only works for PmagPy generated pmag_specimen formatted files.
-tex, output in LaTeX format | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/pmag_results_extract.py#L8-L39 |
PmagPy/PmagPy | programs/replace_ac_specimens.py | main | def main():
"""
NAME
replace_AC_specimens.py
DESCRIPTION
finds anisotropy corrected data and
replaces that specimen with it.
puts in pmag_specimen format file
SYNTAX
replace_AC_specimens.py [command line options]
OPTIONS
-h prints help message and quits
-i allows interactive setting of file names
-fu TFILE uncorrected pmag_specimen format file with thellier interpretations
created by thellier_magic_redo.py
-fc AFILE anisotropy corrected pmag_specimen format file
created by thellier_magic_redo.py
-F FILE pmag_specimens format output file
DEFAULTS
TFILE: thellier_specimens.txt
AFILE: AC_specimens.txt
FILE: TorAC_specimens.txt
"""
dir_path='.'
tspec="thellier_specimens.txt"
aspec="AC_specimens.txt"
ofile="TorAC_specimens.txt"
critfile="pmag_criteria.txt"
ACSamplist,Samplist,sigmin=[],[],10000
GoodSamps,SpecOuts=[],[]
# get arguments from command line
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-fu' in sys.argv:
ind=sys.argv.index('-fu')
tspec=sys.argv[ind+1]
if '-fc' in sys.argv:
ind=sys.argv.index('-fc')
aspec=sys.argv[ind+1]
if '-F' in sys.argv:
ind=sys.argv.index('-F')
ofile=sys.argv[ind+1]
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
# read in pmag_specimens file
tspec=dir_path+'/'+tspec
aspec=dir_path+'/'+aspec
ofile=dir_path+'/'+ofile
Specs,file_type=pmag.magic_read(tspec)
Specs,file_type=pmag.magic_read(tspec)
Speclist=pmag.get_specs(Specs)
ACSpecs,file_type=pmag.magic_read(aspec)
ACspeclist=pmag.get_specs(ACSpecs)
for spec in Specs:
if spec["er_sample_name"] not in Samplist:Samplist.append(spec["er_sample_name"])
for spec in ACSpecs:
if spec["er_sample_name"] not in ACSamplist:ACSamplist.append(spec["er_sample_name"])
#
for samp in Samplist:
useAC,Ints,ACInts,GoodSpecs,AC,UC=0,[],[],[],[],[]
for spec in Specs:
if spec["er_sample_name"].lower()==samp.lower():
UC.append(spec)
if samp in ACSamplist:
for spec in ACSpecs:
if spec["er_sample_name"].lower()==samp.lower():
AC.append(spec)
if len(AC)>0:
AClist=[]
for spec in AC:
SpecOuts.append(spec)
AClist.append(spec['er_specimen_name'])
print('using AC: ',spec['er_specimen_name'],'%7.1f'%(1e6*float(spec['specimen_int'])))
for spec in UC:
if spec['er_specimen_name'] not in AClist:
SpecOuts.append(spec)
# print 'using UC: ',spec['er_specimen_name'],'%7.1f'%(1e6*float(spec['specimen_int']))
else:
for spec in UC:
SpecOuts.append(spec)
# print 'using UC: ',spec['er_specimen_name'],'%7.1f'%(1e6*float(spec['specimen_int']))
SpecOuts,keys=pmag.fillkeys(SpecOuts)
pmag.magic_write(ofile,SpecOuts,'pmag_specimens')
print('thellier data assessed for AC correction put in ', ofile) | python | def main():
"""
NAME
replace_AC_specimens.py
DESCRIPTION
finds anisotropy corrected data and
replaces that specimen with it.
puts in pmag_specimen format file
SYNTAX
replace_AC_specimens.py [command line options]
OPTIONS
-h prints help message and quits
-i allows interactive setting of file names
-fu TFILE uncorrected pmag_specimen format file with thellier interpretations
created by thellier_magic_redo.py
-fc AFILE anisotropy corrected pmag_specimen format file
created by thellier_magic_redo.py
-F FILE pmag_specimens format output file
DEFAULTS
TFILE: thellier_specimens.txt
AFILE: AC_specimens.txt
FILE: TorAC_specimens.txt
"""
dir_path='.'
tspec="thellier_specimens.txt"
aspec="AC_specimens.txt"
ofile="TorAC_specimens.txt"
critfile="pmag_criteria.txt"
ACSamplist,Samplist,sigmin=[],[],10000
GoodSamps,SpecOuts=[],[]
# get arguments from command line
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-fu' in sys.argv:
ind=sys.argv.index('-fu')
tspec=sys.argv[ind+1]
if '-fc' in sys.argv:
ind=sys.argv.index('-fc')
aspec=sys.argv[ind+1]
if '-F' in sys.argv:
ind=sys.argv.index('-F')
ofile=sys.argv[ind+1]
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
# read in pmag_specimens file
tspec=dir_path+'/'+tspec
aspec=dir_path+'/'+aspec
ofile=dir_path+'/'+ofile
Specs,file_type=pmag.magic_read(tspec)
Specs,file_type=pmag.magic_read(tspec)
Speclist=pmag.get_specs(Specs)
ACSpecs,file_type=pmag.magic_read(aspec)
ACspeclist=pmag.get_specs(ACSpecs)
for spec in Specs:
if spec["er_sample_name"] not in Samplist:Samplist.append(spec["er_sample_name"])
for spec in ACSpecs:
if spec["er_sample_name"] not in ACSamplist:ACSamplist.append(spec["er_sample_name"])
#
for samp in Samplist:
useAC,Ints,ACInts,GoodSpecs,AC,UC=0,[],[],[],[],[]
for spec in Specs:
if spec["er_sample_name"].lower()==samp.lower():
UC.append(spec)
if samp in ACSamplist:
for spec in ACSpecs:
if spec["er_sample_name"].lower()==samp.lower():
AC.append(spec)
if len(AC)>0:
AClist=[]
for spec in AC:
SpecOuts.append(spec)
AClist.append(spec['er_specimen_name'])
print('using AC: ',spec['er_specimen_name'],'%7.1f'%(1e6*float(spec['specimen_int'])))
for spec in UC:
if spec['er_specimen_name'] not in AClist:
SpecOuts.append(spec)
# print 'using UC: ',spec['er_specimen_name'],'%7.1f'%(1e6*float(spec['specimen_int']))
else:
for spec in UC:
SpecOuts.append(spec)
# print 'using UC: ',spec['er_specimen_name'],'%7.1f'%(1e6*float(spec['specimen_int']))
SpecOuts,keys=pmag.fillkeys(SpecOuts)
pmag.magic_write(ofile,SpecOuts,'pmag_specimens')
print('thellier data assessed for AC correction put in ', ofile) | NAME
replace_AC_specimens.py
DESCRIPTION
finds anisotropy corrected data and
replaces that specimen with it.
puts in pmag_specimen format file
SYNTAX
replace_AC_specimens.py [command line options]
OPTIONS
-h prints help message and quits
-i allows interactive setting of file names
-fu TFILE uncorrected pmag_specimen format file with thellier interpretations
created by thellier_magic_redo.py
-fc AFILE anisotropy corrected pmag_specimen format file
created by thellier_magic_redo.py
-F FILE pmag_specimens format output file
DEFAULTS
TFILE: thellier_specimens.txt
AFILE: AC_specimens.txt
FILE: TorAC_specimens.txt | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/replace_ac_specimens.py#L6-L96 |
PmagPy/PmagPy | dialogs/thellier_gui_lib.py | check_specimen_PI_criteria | def check_specimen_PI_criteria(pars,acceptance_criteria):
'''
# Check if specimen pass Acceptance criteria
'''
#if 'pars' not in self.Data[specimen].kes():
# return
pars['specimen_fail_criteria']=[]
for crit in list(acceptance_criteria.keys()):
if crit not in list(pars.keys()):
continue
if acceptance_criteria[crit]['value']==-999:
continue
if acceptance_criteria[crit]['category']!='IE-SPEC':
continue
cutoff_value=acceptance_criteria[crit]['value']
if crit=='specimen_scat':
if pars["specimen_scat"] in ["Fail",'b',0,'0','FALSE',"False",False,"f"]:
pars['specimen_fail_criteria'].append('specimen_scat')
elif crit=='specimen_k' or crit=='specimen_k_prime':
if abs(pars[crit])>cutoff_value:
pars['specimen_fail_criteria'].append(crit)
# high threshold value:
elif acceptance_criteria[crit]['threshold_type']=="high":
if pars[crit]>cutoff_value:
pars['specimen_fail_criteria'].append(crit)
elif acceptance_criteria[crit]['threshold_type']=="low":
if pars[crit]<cutoff_value:
pars['specimen_fail_criteria'].append(crit)
return pars | python | def check_specimen_PI_criteria(pars,acceptance_criteria):
'''
# Check if specimen pass Acceptance criteria
'''
#if 'pars' not in self.Data[specimen].kes():
# return
pars['specimen_fail_criteria']=[]
for crit in list(acceptance_criteria.keys()):
if crit not in list(pars.keys()):
continue
if acceptance_criteria[crit]['value']==-999:
continue
if acceptance_criteria[crit]['category']!='IE-SPEC':
continue
cutoff_value=acceptance_criteria[crit]['value']
if crit=='specimen_scat':
if pars["specimen_scat"] in ["Fail",'b',0,'0','FALSE',"False",False,"f"]:
pars['specimen_fail_criteria'].append('specimen_scat')
elif crit=='specimen_k' or crit=='specimen_k_prime':
if abs(pars[crit])>cutoff_value:
pars['specimen_fail_criteria'].append(crit)
# high threshold value:
elif acceptance_criteria[crit]['threshold_type']=="high":
if pars[crit]>cutoff_value:
pars['specimen_fail_criteria'].append(crit)
elif acceptance_criteria[crit]['threshold_type']=="low":
if pars[crit]<cutoff_value:
pars['specimen_fail_criteria'].append(crit)
return pars | # Check if specimen pass Acceptance criteria | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/thellier_gui_lib.py#L295-L324 |
PmagPy/PmagPy | programs/grab_magic_key.py | main | def main():
"""
NAME
grab_magic_key.py
DESCRIPTION
picks out key and saves to file
SYNTAX
grab_magic_key.py [command line optins]
OPTIONS
-h prints help message and quits
-f FILE: specify input magic format file
-key KEY: specify key to print to standard output
"""
dir_path = "./"
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind = sys.argv.index('-f')
magic_file = dir_path+'/'+sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
if '-key' in sys.argv:
ind = sys.argv.index('-key')
grab_key = sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
#
#
# get data read in
Data, file_type = pmag.magic_read(magic_file)
if len(Data) > 0:
for rec in Data:
print(rec[grab_key])
else:
print('bad file name') | python | def main():
"""
NAME
grab_magic_key.py
DESCRIPTION
picks out key and saves to file
SYNTAX
grab_magic_key.py [command line optins]
OPTIONS
-h prints help message and quits
-f FILE: specify input magic format file
-key KEY: specify key to print to standard output
"""
dir_path = "./"
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind = sys.argv.index('-f')
magic_file = dir_path+'/'+sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
if '-key' in sys.argv:
ind = sys.argv.index('-key')
grab_key = sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
#
#
# get data read in
Data, file_type = pmag.magic_read(magic_file)
if len(Data) > 0:
for rec in Data:
print(rec[grab_key])
else:
print('bad file name') | NAME
grab_magic_key.py
DESCRIPTION
picks out key and saves to file
SYNTAX
grab_magic_key.py [command line optins]
OPTIONS
-h prints help message and quits
-f FILE: specify input magic format file
-key KEY: specify key to print to standard output | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/grab_magic_key.py#L6-L50 |
PmagPy/PmagPy | programs/dia_vgp.py | main | def main():
"""
NAME
dia_vgp.py
DESCRIPTION
converts declination inclination alpha95 to virtual geomagnetic pole, dp and dm
SYNTAX
dia_vgp.py [-h] [-i] [-f FILE] [< filename]
OPTIONS
-h prints help message and quits
-i interactive data entry
-f FILE to specify file name on the command line
INPUT
for file entry:
D I A95 SLAT SLON
where:
D: declination
I: inclination
A95: alpha_95
SLAT: site latitude (positive north)
SLON: site longitude (positive east)
OUTPUT
PLON PLAT DP DM
where:
PLAT: pole latitude
PLON: pole longitude (positive east)
DP: 95% confidence angle in parallel
DM: 95% confidence angle in meridian
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-i' in sys.argv: # if one is -i
while 1:
try:
ans=input("Input Declination: <cntrl-D to quit> ")
Dec=float(ans) # assign input to Dec, after conversion to floating point
ans=input("Input Inclination: ")
Inc =float(ans)
ans=input("Input Alpha 95: ")
a95 =float(ans)
ans=input("Input Site Latitude: ")
slat =float(ans)
ans=input("Input Site Longitude: ")
slong =float(ans)
spitout(Dec,Inc,a95,slat,slong) # call dia_vgp function from pmag module
print('%7.1f %7.1f %7.1f %7.1f'%(plong,plat,dp,dm)) # print out returned stuff
except:
print("\n Good-bye\n")
sys.exit()
elif '-f' in sys.argv: # manual input of file name
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
inlist = []
for line in f.readlines():
inlist.append([])
# loop over the elements, split by whitespace
for el in line.split():
inlist[-1].append(float(el))
spitout(inlist)
else:
input = sys.stdin.readlines() # read from standard input
inlist = []
for line in input: # read in the data (as string variable), line by line
inlist.append([])
# loop over the elements, split by whitespace
for el in line.split():
inlist[-1].append(float(el))
spitout(inlist) | python | def main():
"""
NAME
dia_vgp.py
DESCRIPTION
converts declination inclination alpha95 to virtual geomagnetic pole, dp and dm
SYNTAX
dia_vgp.py [-h] [-i] [-f FILE] [< filename]
OPTIONS
-h prints help message and quits
-i interactive data entry
-f FILE to specify file name on the command line
INPUT
for file entry:
D I A95 SLAT SLON
where:
D: declination
I: inclination
A95: alpha_95
SLAT: site latitude (positive north)
SLON: site longitude (positive east)
OUTPUT
PLON PLAT DP DM
where:
PLAT: pole latitude
PLON: pole longitude (positive east)
DP: 95% confidence angle in parallel
DM: 95% confidence angle in meridian
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-i' in sys.argv: # if one is -i
while 1:
try:
ans=input("Input Declination: <cntrl-D to quit> ")
Dec=float(ans) # assign input to Dec, after conversion to floating point
ans=input("Input Inclination: ")
Inc =float(ans)
ans=input("Input Alpha 95: ")
a95 =float(ans)
ans=input("Input Site Latitude: ")
slat =float(ans)
ans=input("Input Site Longitude: ")
slong =float(ans)
spitout(Dec,Inc,a95,slat,slong) # call dia_vgp function from pmag module
print('%7.1f %7.1f %7.1f %7.1f'%(plong,plat,dp,dm)) # print out returned stuff
except:
print("\n Good-bye\n")
sys.exit()
elif '-f' in sys.argv: # manual input of file name
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
inlist = []
for line in f.readlines():
inlist.append([])
# loop over the elements, split by whitespace
for el in line.split():
inlist[-1].append(float(el))
spitout(inlist)
else:
input = sys.stdin.readlines() # read from standard input
inlist = []
for line in input: # read in the data (as string variable), line by line
inlist.append([])
# loop over the elements, split by whitespace
for el in line.split():
inlist[-1].append(float(el))
spitout(inlist) | NAME
dia_vgp.py
DESCRIPTION
converts declination inclination alpha95 to virtual geomagnetic pole, dp and dm
SYNTAX
dia_vgp.py [-h] [-i] [-f FILE] [< filename]
OPTIONS
-h prints help message and quits
-i interactive data entry
-f FILE to specify file name on the command line
INPUT
for file entry:
D I A95 SLAT SLON
where:
D: declination
I: inclination
A95: alpha_95
SLAT: site latitude (positive north)
SLON: site longitude (positive east)
OUTPUT
PLON PLAT DP DM
where:
PLAT: pole latitude
PLON: pole longitude (positive east)
DP: 95% confidence angle in parallel
DM: 95% confidence angle in meridian | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/dia_vgp.py#L27-L102 |
PmagPy/PmagPy | programs/plot_2cdfs.py | main | def main():
"""
NAME
plot_2cdfs.py
DESCRIPTION
makes plots of cdfs of data in input file
SYNTAX
plot_2cdfs.py [-h][command line options]
OPTIONS
-h prints help message and quits
-f FILE1 FILE2
-t TITLE
-fmt [svg,eps,png,pdf,jpg..] specify format of output figure, default is svg
"""
fmt='svg'
title=""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
X=numpy.loadtxt(file)
file=sys.argv[ind+2]
X2=numpy.loadtxt(file)
# else:
# X=numpy.loadtxt(sys.stdin,dtype=numpy.float)
else:
print('-f option required')
print(main.__doc__)
sys.exit()
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-t' in sys.argv:
ind=sys.argv.index('-t')
title=sys.argv[ind+1]
CDF={'X':1}
pmagplotlib.plot_init(CDF['X'],5,5)
pmagplotlib.plot_cdf(CDF['X'],X,'','r','')
pmagplotlib.plot_cdf(CDF['X'],X2,title,'b','')
D,p=scipy.stats.ks_2samp(X,X2)
if p>=.05:
print(D,p,' not rejected at 95%')
else:
print(D,p,' rejected at 95%')
pmagplotlib.draw_figs(CDF)
ans= input('S[a]ve plot, <Return> to quit ')
if ans=='a':
files={'X':'CDF_.'+fmt}
pmagplotlib.save_plots(CDF,files) | python | def main():
"""
NAME
plot_2cdfs.py
DESCRIPTION
makes plots of cdfs of data in input file
SYNTAX
plot_2cdfs.py [-h][command line options]
OPTIONS
-h prints help message and quits
-f FILE1 FILE2
-t TITLE
-fmt [svg,eps,png,pdf,jpg..] specify format of output figure, default is svg
"""
fmt='svg'
title=""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
X=numpy.loadtxt(file)
file=sys.argv[ind+2]
X2=numpy.loadtxt(file)
# else:
# X=numpy.loadtxt(sys.stdin,dtype=numpy.float)
else:
print('-f option required')
print(main.__doc__)
sys.exit()
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-t' in sys.argv:
ind=sys.argv.index('-t')
title=sys.argv[ind+1]
CDF={'X':1}
pmagplotlib.plot_init(CDF['X'],5,5)
pmagplotlib.plot_cdf(CDF['X'],X,'','r','')
pmagplotlib.plot_cdf(CDF['X'],X2,title,'b','')
D,p=scipy.stats.ks_2samp(X,X2)
if p>=.05:
print(D,p,' not rejected at 95%')
else:
print(D,p,' rejected at 95%')
pmagplotlib.draw_figs(CDF)
ans= input('S[a]ve plot, <Return> to quit ')
if ans=='a':
files={'X':'CDF_.'+fmt}
pmagplotlib.save_plots(CDF,files) | NAME
plot_2cdfs.py
DESCRIPTION
makes plots of cdfs of data in input file
SYNTAX
plot_2cdfs.py [-h][command line options]
OPTIONS
-h prints help message and quits
-f FILE1 FILE2
-t TITLE
-fmt [svg,eps,png,pdf,jpg..] specify format of output figure, default is svg | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/plot_2cdfs.py#L11-L65 |
PmagPy/PmagPy | programs/lowrie_magic.py | main | def main():
"""
NAME
lowrie_magic.py
DESCRIPTION
plots intensity decay curves for Lowrie experiments
SYNTAX
lowrie_magic.py -h [command line options]
INPUT
takes measurements formatted input files
OPTIONS
-h prints help message and quits
-f FILE: specify input file, default is magic_measurements.txt
-N do not normalize by maximum magnetization
-fmt [svg, pdf, eps, png] specify fmt, default is svg
-sav saves plots and quits
-DM [2, 3] MagIC data model number
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if len(sys.argv) <= 1:
print(main.__doc__)
print('you must supply a file name')
sys.exit()
FIG = {} # plot dictionary
FIG['lowrie'] = 1 # demag is figure 1
pmagplotlib.plot_init(FIG['lowrie'], 6, 6)
norm = 1 # default is to normalize by maximum axis
in_file = pmag.get_named_arg("-f", "measurements.txt")
dir_path = pmag.get_named_arg("-WD", ".")
in_file = pmag.resolve_file_name(in_file, dir_path)
data_model = pmag.get_named_arg("-DM", 3)
data_model = int(float(data_model))
fmt = pmag.get_named_arg("-fmt", "svg")
if '-N' in sys.argv:
norm = 0 # don't normalize
if '-sav' in sys.argv:
plot = 1 # silently save and quit
else:
plot = 0 # generate plots
print(in_file)
# read in data
PmagRecs, file_type = pmag.magic_read(in_file)
if data_model == 2 and file_type != "magic_measurements":
print('bad input file', file_type)
sys.exit()
if data_model == 3 and file_type != "measurements":
print('bad input file', file_type)
sys.exit()
if data_model == 2:
meth_code_col = 'magic_method_codes'
spec_col = 'er_specimen_name'
dec_col = "measurement_dec"
inc_col = 'measurement_inc'
moment_col = 'measurement_magn_moment'
temp_col = 'treatment_temp'
else:
meth_code_col = 'method_codes'
spec_col = 'specimen'
dec_col = 'dir_dec'
inc_col = 'dir_inc'
moment_col = 'magn_moment'
temp_col = "treat_temp"
PmagRecs = pmag.get_dictitem(
PmagRecs, meth_code_col, 'LP-IRM-3D', 'has') # get all 3D IRM records
if len(PmagRecs) == 0:
print('no records found with the method code LP-IRM-3D')
sys.exit()
specs = pmag.get_dictkey(PmagRecs, spec_col, '')
sids = []
for spec in specs:
if spec not in sids:
sids.append(spec) # get list of unique specimen names
for spc in sids: # step through the specimen names
print(spc)
specdata = pmag.get_dictitem(
PmagRecs, spec_col, spc, 'T') # get all this one's data
DIMs, Temps = [], []
for dat in specdata: # step through the data
DIMs.append([float(dat[dec_col]), float(
dat[inc_col]), float(dat[moment_col])])
Temps.append(float(dat[temp_col])-273.)
carts = pmag.dir2cart(DIMs).transpose()
if norm == 1: # want to normalize
nrm = (DIMs[0][2]) # normalize by NRM
ylab = "M/M_o"
else:
nrm = 1. # don't normalize
ylab = "Magnetic moment (Am^2)"
xlab = "Temperature (C)"
pmagplotlib.plot_xy(FIG['lowrie'], Temps, abs(carts[0]) / nrm, sym='r-')
pmagplotlib.plot_xy(FIG['lowrie'], Temps, abs(carts[0]) / nrm, sym='ro') # X direction
pmagplotlib.plot_xy(FIG['lowrie'], Temps, abs(carts[1]) / nrm, sym='c-')
pmagplotlib.plot_xy(FIG['lowrie'], Temps, abs(carts[1]) / nrm, sym='cs') # Y direction
pmagplotlib.plot_xy(FIG['lowrie'], Temps, abs(carts[2]) / nrm, sym='k-')
pmagplotlib.plot_xy(FIG['lowrie'], Temps, abs(carts[2]) / nrm, sym='k^', title=spc, xlab=xlab, ylab=ylab) # Z direction
files = {'lowrie': 'lowrie:_'+spc+'_.'+fmt}
if plot == 0:
pmagplotlib.draw_figs(FIG)
ans = input('S[a]ve figure? [q]uit, <return> to continue ')
if ans == 'a':
pmagplotlib.save_plots(FIG, files)
elif ans == 'q':
sys.exit()
else:
pmagplotlib.save_plots(FIG, files)
pmagplotlib.clearFIG(FIG['lowrie']) | python | def main():
"""
NAME
lowrie_magic.py
DESCRIPTION
plots intensity decay curves for Lowrie experiments
SYNTAX
lowrie_magic.py -h [command line options]
INPUT
takes measurements formatted input files
OPTIONS
-h prints help message and quits
-f FILE: specify input file, default is magic_measurements.txt
-N do not normalize by maximum magnetization
-fmt [svg, pdf, eps, png] specify fmt, default is svg
-sav saves plots and quits
-DM [2, 3] MagIC data model number
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if len(sys.argv) <= 1:
print(main.__doc__)
print('you must supply a file name')
sys.exit()
FIG = {} # plot dictionary
FIG['lowrie'] = 1 # demag is figure 1
pmagplotlib.plot_init(FIG['lowrie'], 6, 6)
norm = 1 # default is to normalize by maximum axis
in_file = pmag.get_named_arg("-f", "measurements.txt")
dir_path = pmag.get_named_arg("-WD", ".")
in_file = pmag.resolve_file_name(in_file, dir_path)
data_model = pmag.get_named_arg("-DM", 3)
data_model = int(float(data_model))
fmt = pmag.get_named_arg("-fmt", "svg")
if '-N' in sys.argv:
norm = 0 # don't normalize
if '-sav' in sys.argv:
plot = 1 # silently save and quit
else:
plot = 0 # generate plots
print(in_file)
# read in data
PmagRecs, file_type = pmag.magic_read(in_file)
if data_model == 2 and file_type != "magic_measurements":
print('bad input file', file_type)
sys.exit()
if data_model == 3 and file_type != "measurements":
print('bad input file', file_type)
sys.exit()
if data_model == 2:
meth_code_col = 'magic_method_codes'
spec_col = 'er_specimen_name'
dec_col = "measurement_dec"
inc_col = 'measurement_inc'
moment_col = 'measurement_magn_moment'
temp_col = 'treatment_temp'
else:
meth_code_col = 'method_codes'
spec_col = 'specimen'
dec_col = 'dir_dec'
inc_col = 'dir_inc'
moment_col = 'magn_moment'
temp_col = "treat_temp"
PmagRecs = pmag.get_dictitem(
PmagRecs, meth_code_col, 'LP-IRM-3D', 'has') # get all 3D IRM records
if len(PmagRecs) == 0:
print('no records found with the method code LP-IRM-3D')
sys.exit()
specs = pmag.get_dictkey(PmagRecs, spec_col, '')
sids = []
for spec in specs:
if spec not in sids:
sids.append(spec) # get list of unique specimen names
for spc in sids: # step through the specimen names
print(spc)
specdata = pmag.get_dictitem(
PmagRecs, spec_col, spc, 'T') # get all this one's data
DIMs, Temps = [], []
for dat in specdata: # step through the data
DIMs.append([float(dat[dec_col]), float(
dat[inc_col]), float(dat[moment_col])])
Temps.append(float(dat[temp_col])-273.)
carts = pmag.dir2cart(DIMs).transpose()
if norm == 1: # want to normalize
nrm = (DIMs[0][2]) # normalize by NRM
ylab = "M/M_o"
else:
nrm = 1. # don't normalize
ylab = "Magnetic moment (Am^2)"
xlab = "Temperature (C)"
pmagplotlib.plot_xy(FIG['lowrie'], Temps, abs(carts[0]) / nrm, sym='r-')
pmagplotlib.plot_xy(FIG['lowrie'], Temps, abs(carts[0]) / nrm, sym='ro') # X direction
pmagplotlib.plot_xy(FIG['lowrie'], Temps, abs(carts[1]) / nrm, sym='c-')
pmagplotlib.plot_xy(FIG['lowrie'], Temps, abs(carts[1]) / nrm, sym='cs') # Y direction
pmagplotlib.plot_xy(FIG['lowrie'], Temps, abs(carts[2]) / nrm, sym='k-')
pmagplotlib.plot_xy(FIG['lowrie'], Temps, abs(carts[2]) / nrm, sym='k^', title=spc, xlab=xlab, ylab=ylab) # Z direction
files = {'lowrie': 'lowrie:_'+spc+'_.'+fmt}
if plot == 0:
pmagplotlib.draw_figs(FIG)
ans = input('S[a]ve figure? [q]uit, <return> to continue ')
if ans == 'a':
pmagplotlib.save_plots(FIG, files)
elif ans == 'q':
sys.exit()
else:
pmagplotlib.save_plots(FIG, files)
pmagplotlib.clearFIG(FIG['lowrie']) | NAME
lowrie_magic.py
DESCRIPTION
plots intensity decay curves for Lowrie experiments
SYNTAX
lowrie_magic.py -h [command line options]
INPUT
takes measurements formatted input files
OPTIONS
-h prints help message and quits
-f FILE: specify input file, default is magic_measurements.txt
-N do not normalize by maximum magnetization
-fmt [svg, pdf, eps, png] specify fmt, default is svg
-sav saves plots and quits
-DM [2, 3] MagIC data model number | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/lowrie_magic.py#L11-L127 |
PmagPy/PmagPy | programs/deprecated/reorder_samples.py | main | def main():
"""
NAME
reorder_samples.py
DESCRIPTION
takes specimen file and reorders sample file with selected orientation methods placed first
SYNTAX
reorder_samples.py [command line options]
OPTIONS
-h prints help message and quits
-fsp: specimen input pmag_specimens format file, default is "pmag_specimens.txt"
-fsm: sample input er_samples format file, default is "er_samples.txt"
-F: output er_samples format file, default is "er_samples.txt"
OUPUT
writes re-ordered er_samples.txt file
"""
infile='pmag_specimens.txt'
sampfile="er_samples.txt"
outfile="er_samples.txt"
# get command line stuff
if "-h" in sys.argv:
print(main.__doc__)
sys.exit()
if '-fsp' in sys.argv:
ind=sys.argv.index("-fsp")
infile=sys.argv[ind+1]
if '-fsm' in sys.argv:
ind=sys.argv.index("-fsm")
sampfile=sys.argv[ind+1]
if '-F' in sys.argv:
ind=sys.argv.index("-F")
outfile=sys.argv[ind+1]
if '-WD' in sys.argv:
ind=sys.argv.index("-WD")
dir_path=sys.argv[ind+1]
infile=dir_path+'/'+infile
sampfile=dir_path+'/'+sampfile
outfile=dir_path+'/'+outfile
# now do re-ordering
pmag.ReorderSamples(infile,sampfile,outfile) | python | def main():
"""
NAME
reorder_samples.py
DESCRIPTION
takes specimen file and reorders sample file with selected orientation methods placed first
SYNTAX
reorder_samples.py [command line options]
OPTIONS
-h prints help message and quits
-fsp: specimen input pmag_specimens format file, default is "pmag_specimens.txt"
-fsm: sample input er_samples format file, default is "er_samples.txt"
-F: output er_samples format file, default is "er_samples.txt"
OUPUT
writes re-ordered er_samples.txt file
"""
infile='pmag_specimens.txt'
sampfile="er_samples.txt"
outfile="er_samples.txt"
# get command line stuff
if "-h" in sys.argv:
print(main.__doc__)
sys.exit()
if '-fsp' in sys.argv:
ind=sys.argv.index("-fsp")
infile=sys.argv[ind+1]
if '-fsm' in sys.argv:
ind=sys.argv.index("-fsm")
sampfile=sys.argv[ind+1]
if '-F' in sys.argv:
ind=sys.argv.index("-F")
outfile=sys.argv[ind+1]
if '-WD' in sys.argv:
ind=sys.argv.index("-WD")
dir_path=sys.argv[ind+1]
infile=dir_path+'/'+infile
sampfile=dir_path+'/'+sampfile
outfile=dir_path+'/'+outfile
# now do re-ordering
pmag.ReorderSamples(infile,sampfile,outfile) | NAME
reorder_samples.py
DESCRIPTION
takes specimen file and reorders sample file with selected orientation methods placed first
SYNTAX
reorder_samples.py [command line options]
OPTIONS
-h prints help message and quits
-fsp: specimen input pmag_specimens format file, default is "pmag_specimens.txt"
-fsm: sample input er_samples format file, default is "er_samples.txt"
-F: output er_samples format file, default is "er_samples.txt"
OUPUT
writes re-ordered er_samples.txt file | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/deprecated/reorder_samples.py#L7-L50 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | not_null | def not_null(val, zero_as_null=True):
"""
Comprehensive check to see if a value is null or not.
Returns True for: non-empty iterables, True, non-zero floats and ints,
non-emtpy strings.
Returns False for: empty iterables, False, zero, empty strings.
Parameters
----------
val : any Python object
zero_as_null: bool
treat zero as null, default True
Returns
---------
boolean
"""
def can_iter(x):
"""
Returns True for a non-empty iterable
"""
try:
any(x)
return True
except TypeError:
return False
def not_empty(x):
"""
Returns true if x has length
"""
if len(x):
return True
return False
def exists(x):
"""
Returns true if x
"""
if x:
return True
return False
def is_nan(x):
"""
Returns True if x is nan
"""
try:
if np.isnan(x):
return True
except TypeError:
return False
return False
# return True iff you have a non-empty iterable
# and False for an empty iterable (including an empty string)
if can_iter(val):
return not_empty(val)
# if value is not iterable, return False for np.nan, None, 0, or False
# & True for all else
else:
if is_nan(val):
return False
if not zero_as_null:
if val == 0:
return True
return exists(val) | python | def not_null(val, zero_as_null=True):
"""
Comprehensive check to see if a value is null or not.
Returns True for: non-empty iterables, True, non-zero floats and ints,
non-emtpy strings.
Returns False for: empty iterables, False, zero, empty strings.
Parameters
----------
val : any Python object
zero_as_null: bool
treat zero as null, default True
Returns
---------
boolean
"""
def can_iter(x):
"""
Returns True for a non-empty iterable
"""
try:
any(x)
return True
except TypeError:
return False
def not_empty(x):
"""
Returns true if x has length
"""
if len(x):
return True
return False
def exists(x):
"""
Returns true if x
"""
if x:
return True
return False
def is_nan(x):
"""
Returns True if x is nan
"""
try:
if np.isnan(x):
return True
except TypeError:
return False
return False
# return True iff you have a non-empty iterable
# and False for an empty iterable (including an empty string)
if can_iter(val):
return not_empty(val)
# if value is not iterable, return False for np.nan, None, 0, or False
# & True for all else
else:
if is_nan(val):
return False
if not zero_as_null:
if val == 0:
return True
return exists(val) | Comprehensive check to see if a value is null or not.
Returns True for: non-empty iterables, True, non-zero floats and ints,
non-emtpy strings.
Returns False for: empty iterables, False, zero, empty strings.
Parameters
----------
val : any Python object
zero_as_null: bool
treat zero as null, default True
Returns
---------
boolean | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L2251-L2319 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | get_intensity_col | def get_intensity_col(data):
"""
Check measurement dataframe for intensity columns 'magn_moment', 'magn_volume', 'magn_mass','magn_uncal'.
Return the first intensity column that is in the dataframe AND has data.
Parameters
----------
data : pandas DataFrame
Returns
---------
str
intensity method column or ""
"""
# possible intensity columns
intlist = ['magn_moment', 'magn_volume', 'magn_mass','magn_uncal']
# intensity columns that are in the data
int_meths = [col_name for col_name in data.columns if col_name in intlist]
# drop fully null columns
data.dropna(axis='columns', how='all')
# ignore columns with only blank values (including "")
for col_name in int_meths[:]:
if not data[col_name].any():
int_meths.remove(col_name)
if len(int_meths):
if 'magn_moment' in int_meths:
return 'magn_moment'
return int_meths[0]
return "" | python | def get_intensity_col(data):
"""
Check measurement dataframe for intensity columns 'magn_moment', 'magn_volume', 'magn_mass','magn_uncal'.
Return the first intensity column that is in the dataframe AND has data.
Parameters
----------
data : pandas DataFrame
Returns
---------
str
intensity method column or ""
"""
# possible intensity columns
intlist = ['magn_moment', 'magn_volume', 'magn_mass','magn_uncal']
# intensity columns that are in the data
int_meths = [col_name for col_name in data.columns if col_name in intlist]
# drop fully null columns
data.dropna(axis='columns', how='all')
# ignore columns with only blank values (including "")
for col_name in int_meths[:]:
if not data[col_name].any():
int_meths.remove(col_name)
if len(int_meths):
if 'magn_moment' in int_meths:
return 'magn_moment'
return int_meths[0]
return "" | Check measurement dataframe for intensity columns 'magn_moment', 'magn_volume', 'magn_mass','magn_uncal'.
Return the first intensity column that is in the dataframe AND has data.
Parameters
----------
data : pandas DataFrame
Returns
---------
str
intensity method column or "" | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L2328-L2356 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | add_sites_to_meas_table | def add_sites_to_meas_table(dir_path):
"""
Add site columns to measurements table (e.g., to plot intensity data),
or generate an informative error message.
Parameters
----------
dir_path : str
directory with data files
Returns
----------
status : bool
True if successful, else False
data : pandas DataFrame
measurement data with site/sample
"""
reqd_tables = ['measurements', 'specimens', 'samples', 'sites']
con = Contribution(dir_path, read_tables=reqd_tables)
# check that all required tables are available
missing_tables = []
for table in reqd_tables:
if table not in con.tables:
missing_tables.append(table)
if missing_tables:
return False, "You are missing {} tables".format(", ".join(missing_tables))
# put sample column into the measurements table
con.propagate_name_down('sample', 'measurements')
# put site column into the measurements table
con.propagate_name_down('site', 'measurements')
# check that column propagation was successful
if 'site' not in con.tables['measurements'].df.columns:
return False, "Something went wrong with propagating sites down to the measurement level"
return True, con.tables['measurements'].df | python | def add_sites_to_meas_table(dir_path):
"""
Add site columns to measurements table (e.g., to plot intensity data),
or generate an informative error message.
Parameters
----------
dir_path : str
directory with data files
Returns
----------
status : bool
True if successful, else False
data : pandas DataFrame
measurement data with site/sample
"""
reqd_tables = ['measurements', 'specimens', 'samples', 'sites']
con = Contribution(dir_path, read_tables=reqd_tables)
# check that all required tables are available
missing_tables = []
for table in reqd_tables:
if table not in con.tables:
missing_tables.append(table)
if missing_tables:
return False, "You are missing {} tables".format(", ".join(missing_tables))
# put sample column into the measurements table
con.propagate_name_down('sample', 'measurements')
# put site column into the measurements table
con.propagate_name_down('site', 'measurements')
# check that column propagation was successful
if 'site' not in con.tables['measurements'].df.columns:
return False, "Something went wrong with propagating sites down to the measurement level"
return True, con.tables['measurements'].df | Add site columns to measurements table (e.g., to plot intensity data),
or generate an informative error message.
Parameters
----------
dir_path : str
directory with data files
Returns
----------
status : bool
True if successful, else False
data : pandas DataFrame
measurement data with site/sample | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L2359-L2394 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | prep_for_intensity_plot | def prep_for_intensity_plot(data, meth_code, dropna=(), reqd_cols=()):
"""
Strip down measurement data to what is needed for an intensity plot.
Find the column with intensity data.
Drop empty columns, and make sure required columns are present.
Keep only records with the specified method code.
Parameters
----------
data : pandas DataFrame
measurement dataframe
meth_code : str
MagIC method code to include, i.e. 'LT-AF-Z'
dropna : list
columns that must not be empty
reqd_cols : list
columns that must be present
Returns
----------
status : bool
True if successful, else False
data : pandas DataFrame
measurement data with required columns
"""
# initialize
dropna = list(dropna)
reqd_cols = list(reqd_cols)
# get intensity column
try:
magn_col = get_intensity_col(data)
except AttributeError:
return False, "Could not get intensity method from data"
# drop empty columns
if magn_col not in dropna:
dropna.append(magn_col)
data = data.dropna(axis=0, subset=dropna)
# add to reqd_cols list
if 'method_codes' not in reqd_cols:
reqd_cols.append('method_codes')
if magn_col not in reqd_cols:
reqd_cols.append(magn_col)
# drop non reqd cols, make sure all reqd cols are present
try:
data = data[reqd_cols]
except KeyError as ex:
print(ex)
missing = set(reqd_cols).difference(data.columns)
return False, "missing these required columns: {}".format(", ".join(missing))
# filter out records without the correct method code
data = data[data['method_codes'].str.contains(meth_code).astype(bool)]
return True, data | python | def prep_for_intensity_plot(data, meth_code, dropna=(), reqd_cols=()):
"""
Strip down measurement data to what is needed for an intensity plot.
Find the column with intensity data.
Drop empty columns, and make sure required columns are present.
Keep only records with the specified method code.
Parameters
----------
data : pandas DataFrame
measurement dataframe
meth_code : str
MagIC method code to include, i.e. 'LT-AF-Z'
dropna : list
columns that must not be empty
reqd_cols : list
columns that must be present
Returns
----------
status : bool
True if successful, else False
data : pandas DataFrame
measurement data with required columns
"""
# initialize
dropna = list(dropna)
reqd_cols = list(reqd_cols)
# get intensity column
try:
magn_col = get_intensity_col(data)
except AttributeError:
return False, "Could not get intensity method from data"
# drop empty columns
if magn_col not in dropna:
dropna.append(magn_col)
data = data.dropna(axis=0, subset=dropna)
# add to reqd_cols list
if 'method_codes' not in reqd_cols:
reqd_cols.append('method_codes')
if magn_col not in reqd_cols:
reqd_cols.append(magn_col)
# drop non reqd cols, make sure all reqd cols are present
try:
data = data[reqd_cols]
except KeyError as ex:
print(ex)
missing = set(reqd_cols).difference(data.columns)
return False, "missing these required columns: {}".format(", ".join(missing))
# filter out records without the correct method code
data = data[data['method_codes'].str.contains(meth_code).astype(bool)]
return True, data | Strip down measurement data to what is needed for an intensity plot.
Find the column with intensity data.
Drop empty columns, and make sure required columns are present.
Keep only records with the specified method code.
Parameters
----------
data : pandas DataFrame
measurement dataframe
meth_code : str
MagIC method code to include, i.e. 'LT-AF-Z'
dropna : list
columns that must not be empty
reqd_cols : list
columns that must be present
Returns
----------
status : bool
True if successful, else False
data : pandas DataFrame
measurement data with required columns | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L2433-L2484 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | stringify_col | def stringify_col(df, col_name):
"""
Take a dataframe and string-i-fy a column of values.
Turn nan/None into "" and all other values into strings.
Parameters
----------
df : dataframe
col_name : string
"""
df = df.copy()
df[col_name] = df[col_name].fillna("")
df[col_name] = df[col_name].astype(str)
return df | python | def stringify_col(df, col_name):
"""
Take a dataframe and string-i-fy a column of values.
Turn nan/None into "" and all other values into strings.
Parameters
----------
df : dataframe
col_name : string
"""
df = df.copy()
df[col_name] = df[col_name].fillna("")
df[col_name] = df[col_name].astype(str)
return df | Take a dataframe and string-i-fy a column of values.
Turn nan/None into "" and all other values into strings.
Parameters
----------
df : dataframe
col_name : string | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L2486-L2499 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | Contribution.add_empty_magic_table | def add_empty_magic_table(self, dtype, col_names=None, groups=None):
"""
Add a blank MagicDataFrame to the contribution.
You can provide either a list of column names,
or a list of column group names.
If provided, col_names takes precedence.
"""
if dtype not in self.table_names:
print("-W- {} is not a valid MagIC table name".format(dtype))
print("-I- Valid table names are: {}".format(", ".join(self.table_names)))
return
data_container = MagicDataFrame(dtype=dtype, columns=col_names, groups=groups)
self.tables[dtype] = data_container | python | def add_empty_magic_table(self, dtype, col_names=None, groups=None):
"""
Add a blank MagicDataFrame to the contribution.
You can provide either a list of column names,
or a list of column group names.
If provided, col_names takes precedence.
"""
if dtype not in self.table_names:
print("-W- {} is not a valid MagIC table name".format(dtype))
print("-I- Valid table names are: {}".format(", ".join(self.table_names)))
return
data_container = MagicDataFrame(dtype=dtype, columns=col_names, groups=groups)
self.tables[dtype] = data_container | Add a blank MagicDataFrame to the contribution.
You can provide either a list of column names,
or a list of column group names.
If provided, col_names takes precedence. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L87-L99 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | Contribution.add_magic_table_from_data | def add_magic_table_from_data(self, dtype, data):
"""
Add a MagIC table to the contribution from a data list
Parameters
----------
dtype : str
MagIC table type, i.e. 'specimens'
data : list of dicts
data list with format [{'key1': 'val1', ...}, {'key1': 'val2', ...}, ... }]
"""
self.tables[dtype] = MagicDataFrame(dtype=dtype, data=data)
if dtype == 'measurements':
self.tables['measurements'].add_sequence()
return dtype, self.tables[dtype] | python | def add_magic_table_from_data(self, dtype, data):
"""
Add a MagIC table to the contribution from a data list
Parameters
----------
dtype : str
MagIC table type, i.e. 'specimens'
data : list of dicts
data list with format [{'key1': 'val1', ...}, {'key1': 'val2', ...}, ... }]
"""
self.tables[dtype] = MagicDataFrame(dtype=dtype, data=data)
if dtype == 'measurements':
self.tables['measurements'].add_sequence()
return dtype, self.tables[dtype] | Add a MagIC table to the contribution from a data list
Parameters
----------
dtype : str
MagIC table type, i.e. 'specimens'
data : list of dicts
data list with format [{'key1': 'val1', ...}, {'key1': 'val2', ...}, ... }] | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L101-L115 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | Contribution.add_magic_table | def add_magic_table(self, dtype, fname=None, df=None):
"""
Read in a new file to add a table to self.tables.
Requires dtype argument and EITHER filename or df.
Parameters
----------
dtype : str
MagIC table name (plural, i.e. 'specimens')
fname : str
filename of MagIC format file
(short path, directory is self.directory)
default: None
df : pandas DataFrame
data to create the new table with
default: None
"""
if df is None:
# if providing a filename but no data type
if dtype == "unknown":
filename = os.path.join(self.directory, fname)
if not os.path.exists(filename):
return False, False
data_container = MagicDataFrame(filename, dmodel=self.data_model)
dtype = data_container.dtype
if dtype == 'empty':
return False, False
else:
self.tables[dtype] = data_container
return dtype, data_container
# if providing a data type, use the canonical filename
elif dtype not in self.filenames:
print('-W- "{}" is not a valid MagIC table type'.format(dtype))
print("-I- Available table types are: {}".format(", ".join(self.table_names)))
return False, False
#filename = os.path.join(self.directory, self.filenames[dtype])
filename = pmag.resolve_file_name(self.filenames[dtype], self.directory)
if os.path.exists(filename):
data_container = MagicDataFrame(filename, dtype=dtype,
dmodel=self.data_model)
if data_container.dtype != "empty":
self.tables[dtype] = data_container
return dtype, data_container
else:
return False, False
else:
#print("-W- No such file: {}".format(filename))
return False, False
# df is not None
else:
if not dtype:
print("-W- Must provide dtype")
return False, False
data_container = MagicDataFrame(dtype=dtype, df=df)
self.tables[dtype] = data_container
self.tables[dtype].sort_dataframe_cols()
return dtype, self.tables[dtype] | python | def add_magic_table(self, dtype, fname=None, df=None):
"""
Read in a new file to add a table to self.tables.
Requires dtype argument and EITHER filename or df.
Parameters
----------
dtype : str
MagIC table name (plural, i.e. 'specimens')
fname : str
filename of MagIC format file
(short path, directory is self.directory)
default: None
df : pandas DataFrame
data to create the new table with
default: None
"""
if df is None:
# if providing a filename but no data type
if dtype == "unknown":
filename = os.path.join(self.directory, fname)
if not os.path.exists(filename):
return False, False
data_container = MagicDataFrame(filename, dmodel=self.data_model)
dtype = data_container.dtype
if dtype == 'empty':
return False, False
else:
self.tables[dtype] = data_container
return dtype, data_container
# if providing a data type, use the canonical filename
elif dtype not in self.filenames:
print('-W- "{}" is not a valid MagIC table type'.format(dtype))
print("-I- Available table types are: {}".format(", ".join(self.table_names)))
return False, False
#filename = os.path.join(self.directory, self.filenames[dtype])
filename = pmag.resolve_file_name(self.filenames[dtype], self.directory)
if os.path.exists(filename):
data_container = MagicDataFrame(filename, dtype=dtype,
dmodel=self.data_model)
if data_container.dtype != "empty":
self.tables[dtype] = data_container
return dtype, data_container
else:
return False, False
else:
#print("-W- No such file: {}".format(filename))
return False, False
# df is not None
else:
if not dtype:
print("-W- Must provide dtype")
return False, False
data_container = MagicDataFrame(dtype=dtype, df=df)
self.tables[dtype] = data_container
self.tables[dtype].sort_dataframe_cols()
return dtype, self.tables[dtype] | Read in a new file to add a table to self.tables.
Requires dtype argument and EITHER filename or df.
Parameters
----------
dtype : str
MagIC table name (plural, i.e. 'specimens')
fname : str
filename of MagIC format file
(short path, directory is self.directory)
default: None
df : pandas DataFrame
data to create the new table with
default: None | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L118-L174 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | Contribution.propagate_measurement_info | def propagate_measurement_info(self):
"""
Take a contribution with a measurement table.
Create specimen, sample, site, and location tables
using the unique names in the measurement table to fill in
the index.
"""
meas_df = self.tables['measurements'].df
names_list = ['specimen', 'sample', 'site', 'location']
# add in any tables that you can
for num, name in enumerate(names_list):
# don't replace tables that already exist
if (name + "s") in self.tables:
continue
elif name in meas_df.columns:
items = meas_df[name].unique()
df = pd.DataFrame(columns=[name], index=items)
df[name] = df.index
# add in parent name if possible
# (i.e., sample name to specimens table)
if num < (len(names_list) - 1):
parent = names_list[num+1]
if parent in meas_df.columns:
meas_df = meas_df.where(meas_df.notnull(), "")
df[parent] = meas_df.drop_duplicates(subset=[name])[parent].values.astype(str)
df = df.where(df != "", np.nan)
df = df.dropna(how='all', axis='rows')
if len(df):
self.tables[name + "s"] = MagicDataFrame(dtype=name + "s", df=df)
self.write_table_to_file(name + "s") | python | def propagate_measurement_info(self):
"""
Take a contribution with a measurement table.
Create specimen, sample, site, and location tables
using the unique names in the measurement table to fill in
the index.
"""
meas_df = self.tables['measurements'].df
names_list = ['specimen', 'sample', 'site', 'location']
# add in any tables that you can
for num, name in enumerate(names_list):
# don't replace tables that already exist
if (name + "s") in self.tables:
continue
elif name in meas_df.columns:
items = meas_df[name].unique()
df = pd.DataFrame(columns=[name], index=items)
df[name] = df.index
# add in parent name if possible
# (i.e., sample name to specimens table)
if num < (len(names_list) - 1):
parent = names_list[num+1]
if parent in meas_df.columns:
meas_df = meas_df.where(meas_df.notnull(), "")
df[parent] = meas_df.drop_duplicates(subset=[name])[parent].values.astype(str)
df = df.where(df != "", np.nan)
df = df.dropna(how='all', axis='rows')
if len(df):
self.tables[name + "s"] = MagicDataFrame(dtype=name + "s", df=df)
self.write_table_to_file(name + "s") | Take a contribution with a measurement table.
Create specimen, sample, site, and location tables
using the unique names in the measurement table to fill in
the index. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L177-L206 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | Contribution.propagate_all_tables_info | def propagate_all_tables_info(self, write=True):
"""
Find any items (specimens, samples, sites, or locations) from
tables other than measurements and make sure they each have a
row in their own table. For example, if a site name is in
the samples table but not in the sites table, create a row
for it in the sites table.
"""
for table_name in ["specimens", "samples", "sites", "locations"]:
if not table_name in self.tables:
continue
df = self.tables[table_name].df
parent_name, child_name = self.get_parent_and_child(table_name)
if parent_name:
if parent_name[:-1] in df.columns:
parents = sorted(set(df[parent_name[:-1]].dropna().values.astype(str)))
if parent_name in self.tables: # if there is a parent table, update it
parent_df = self.tables[parent_name].df
missing_parents = set(parents) - set(parent_df.index)
if missing_parents: # add any missing values
print("-I- Updating {} table with values from {} table".format(parent_name, table_name))
for item in missing_parents:
self.add_item(parent_name, {parent_name[:-1]: item}, label=item)
# save any changes to file
if write:
self.write_table_to_file(parent_name)
else: # if there is no parent table, create it if necessary
if parents:
# create a parent_df with the names you got from the child
print("-I- Creating new {} table with data from {} table".format(parent_name, table_name))
# add in the grandparent if available
grandparent_name = self.get_parent_and_child(parent_name)[0]
if grandparent_name:
grandparent = ""
if grandparent_name in df.columns:
grandparent = df[df[parent_name] == item][grandparent_name].values[0]
columns = [parent_name[:-1]]#, grandparent_name[:-1]]
else:
columns = [parent_name[:-1]]
parent_df = pd.DataFrame(columns=columns, index=parents)
parent_df[parent_name[:-1]] = parent_df.index
if grandparent_name:
if grandparent_name[:-1] in df.columns:
parent_df = pd.merge(df[[parent_name[:-1], grandparent_name[:-1]]], parent_df, on=parent_name[:-1])
self.tables[parent_name] = MagicDataFrame(dtype=parent_name,
df=parent_df)
if write:
# save new table to file
self.write_table_to_file(parent_name)
if child_name:
if child_name in df.columns:
raw_children = df[child_name].dropna().str.split(':')
# create dict of all children with parent info
parent_of_child = {}
for parent, children in raw_children.items():
for child in children:
# remove whitespace
child = child.strip()
old_parent = parent_of_child.get(child)
if old_parent and parent and (old_parent != parent):
print('-I- for {} {}, replacing: {} with: {}'.format(child_name[:-1], child,
old_parent, parent))
parent_of_child[child] = parent
# old way:
# flatten list, ignore duplicates
#children = sorted(set([item.strip() for sublist in raw_children for item in sublist]))
if child_name in self.tables: # if there is already a child table, update it
child_df = self.tables[child_name].df
missing_children = set(parent_of_child.keys()) - set(child_df.index)
if missing_children: # add any missing values
print("-I- Updating {} table with values from {} table".format(child_name, table_name))
for item in missing_children:
data = {child_name[:-1]: item, table_name[:-1]: parent_of_child[item]}
self.add_item(child_name, data, label=item)
if write:
# save any changes to file
self.write_table_to_file(child_name)
else: # if there is no child table, create it if necessary
if children:
# create a child_df with the names you got from the parent
print("-I- Creating new {} table with data from {} table".format(child_name, table_name))
# old way to make new table:
#child_df = pd.DataFrame(columns=[table_name[:-1]], index=children)
# new way to make new table
children_list = sorted(parent_of_child.keys())
children_data = [[child_name, parent_of_child[c_name]] for c_name in children_list]
child_df = pd.DataFrame(index=children_list, columns=[child_name[:-1], table_name[:-1]], data=children_data)
self.tables[child_name] = MagicDataFrame(dtype=child_name, df=child_df)
if write:
# save new table to file
self.write_table_to_file(child_name) | python | def propagate_all_tables_info(self, write=True):
"""
Find any items (specimens, samples, sites, or locations) from
tables other than measurements and make sure they each have a
row in their own table. For example, if a site name is in
the samples table but not in the sites table, create a row
for it in the sites table.
"""
for table_name in ["specimens", "samples", "sites", "locations"]:
if not table_name in self.tables:
continue
df = self.tables[table_name].df
parent_name, child_name = self.get_parent_and_child(table_name)
if parent_name:
if parent_name[:-1] in df.columns:
parents = sorted(set(df[parent_name[:-1]].dropna().values.astype(str)))
if parent_name in self.tables: # if there is a parent table, update it
parent_df = self.tables[parent_name].df
missing_parents = set(parents) - set(parent_df.index)
if missing_parents: # add any missing values
print("-I- Updating {} table with values from {} table".format(parent_name, table_name))
for item in missing_parents:
self.add_item(parent_name, {parent_name[:-1]: item}, label=item)
# save any changes to file
if write:
self.write_table_to_file(parent_name)
else: # if there is no parent table, create it if necessary
if parents:
# create a parent_df with the names you got from the child
print("-I- Creating new {} table with data from {} table".format(parent_name, table_name))
# add in the grandparent if available
grandparent_name = self.get_parent_and_child(parent_name)[0]
if grandparent_name:
grandparent = ""
if grandparent_name in df.columns:
grandparent = df[df[parent_name] == item][grandparent_name].values[0]
columns = [parent_name[:-1]]#, grandparent_name[:-1]]
else:
columns = [parent_name[:-1]]
parent_df = pd.DataFrame(columns=columns, index=parents)
parent_df[parent_name[:-1]] = parent_df.index
if grandparent_name:
if grandparent_name[:-1] in df.columns:
parent_df = pd.merge(df[[parent_name[:-1], grandparent_name[:-1]]], parent_df, on=parent_name[:-1])
self.tables[parent_name] = MagicDataFrame(dtype=parent_name,
df=parent_df)
if write:
# save new table to file
self.write_table_to_file(parent_name)
if child_name:
if child_name in df.columns:
raw_children = df[child_name].dropna().str.split(':')
# create dict of all children with parent info
parent_of_child = {}
for parent, children in raw_children.items():
for child in children:
# remove whitespace
child = child.strip()
old_parent = parent_of_child.get(child)
if old_parent and parent and (old_parent != parent):
print('-I- for {} {}, replacing: {} with: {}'.format(child_name[:-1], child,
old_parent, parent))
parent_of_child[child] = parent
# old way:
# flatten list, ignore duplicates
#children = sorted(set([item.strip() for sublist in raw_children for item in sublist]))
if child_name in self.tables: # if there is already a child table, update it
child_df = self.tables[child_name].df
missing_children = set(parent_of_child.keys()) - set(child_df.index)
if missing_children: # add any missing values
print("-I- Updating {} table with values from {} table".format(child_name, table_name))
for item in missing_children:
data = {child_name[:-1]: item, table_name[:-1]: parent_of_child[item]}
self.add_item(child_name, data, label=item)
if write:
# save any changes to file
self.write_table_to_file(child_name)
else: # if there is no child table, create it if necessary
if children:
# create a child_df with the names you got from the parent
print("-I- Creating new {} table with data from {} table".format(child_name, table_name))
# old way to make new table:
#child_df = pd.DataFrame(columns=[table_name[:-1]], index=children)
# new way to make new table
children_list = sorted(parent_of_child.keys())
children_data = [[child_name, parent_of_child[c_name]] for c_name in children_list]
child_df = pd.DataFrame(index=children_list, columns=[child_name[:-1], table_name[:-1]], data=children_data)
self.tables[child_name] = MagicDataFrame(dtype=child_name, df=child_df)
if write:
# save new table to file
self.write_table_to_file(child_name) | Find any items (specimens, samples, sites, or locations) from
tables other than measurements and make sure they each have a
row in their own table. For example, if a site name is in
the samples table but not in the sites table, create a row
for it in the sites table. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L209-L302 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | Contribution.get_parent_and_child | def get_parent_and_child(self, table_name):
"""
Get the name of the parent table and the child table
for a given MagIC table name.
Parameters
----------
table_name : string of MagIC table name ['specimens', 'samples', 'sites', 'locations']
Returns
-------
parent_name : string of parent table name
child_name : string of child table name
"""
if table_name not in self.ancestry:
return None, None
parent_ind = self.ancestry.index(table_name) + 1
if parent_ind + 1 > len(self.ancestry):
parent_name = None
else:
parent_name = self.ancestry[parent_ind]
child_ind = self.ancestry.index(table_name) - 1
if child_ind < 0:
child_name = None
else:
child_name = self.ancestry[child_ind]
return parent_name, child_name | python | def get_parent_and_child(self, table_name):
"""
Get the name of the parent table and the child table
for a given MagIC table name.
Parameters
----------
table_name : string of MagIC table name ['specimens', 'samples', 'sites', 'locations']
Returns
-------
parent_name : string of parent table name
child_name : string of child table name
"""
if table_name not in self.ancestry:
return None, None
parent_ind = self.ancestry.index(table_name) + 1
if parent_ind + 1 > len(self.ancestry):
parent_name = None
else:
parent_name = self.ancestry[parent_ind]
child_ind = self.ancestry.index(table_name) - 1
if child_ind < 0:
child_name = None
else:
child_name = self.ancestry[child_ind]
return parent_name, child_name | Get the name of the parent table and the child table
for a given MagIC table name.
Parameters
----------
table_name : string of MagIC table name ['specimens', 'samples', 'sites', 'locations']
Returns
-------
parent_name : string of parent table name
child_name : string of child table name | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L305-L331 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | Contribution.get_min_max_lat_lon | def get_min_max_lat_lon(self):
"""
Find latitude/longitude information from sites table
and group it by location.
Returns
---------
"""
if 'sites' not in self.tables:
return
# get min/max lat/lon from sites table
site_container = self.tables['sites']
if not ('lat' in site_container.df.columns and 'lon' in site_container.df.columns):
return
# convert lat/lon columns to string type
# (this is necessary for consistency because they MAY be string type already)
site_container.df['lat'] = site_container.df['lat'].fillna('').astype(str)
site_container.df['lon'] = site_container.df['lon'].fillna('').astype(str)
# replace empty strings with np.nan
site_container.df['lat'] = np.where(site_container.df['lat'].str.len(), site_container.df['lat'], np.nan)
site_container.df['lon'] = np.where(site_container.df['lon'].str.len(), site_container.df['lon'], np.nan)
# convert lat/lon values to float (they make be string from grid)
try:
site_container.df['lat'] = site_container.df['lat'].astype(float)
except ValueError as ex:
print('-W- Improperly formatted numbers in sites.lat')
return
try:
site_container.df['lon'] = site_container.df['lon'].astype(float)
except ValueError as ex:
print('-W- Improperly formatted numbers in sites.lon')
return
# group lat/lon by location
grouped_lon = site_container.df[['lon', 'location']].groupby('location')
grouped_lat = site_container.df[['lat', 'location']].groupby('location')
# get min/max longitude by location
lon_w = grouped_lon.min()
lon_e = grouped_lon.max()
# get min/max latitude by location
lat_s = grouped_lat.min()
lat_n = grouped_lat.max()
# assign lat/lon to location table
locs = {}
if 'locations' not in self.tables:
return
loc_container = self.tables['locations']
for loc in lat_s.index:
coords = {}
coords['lat_s'] = lat_s.loc[loc]['lat']
coords['lat_n'] = lat_n.loc[loc]['lat']
coords['lon_e'] = lon_e.loc[loc]['lon']
coords['lon_w'] = lon_w.loc[loc]['lon']
locs[loc] = coords
loc_container = self.tables['locations']
for loc_name in locs:
if loc_name in loc_container.df.index:
coords = locs[loc_name]
for coord in locs[loc_name]:
# warn user if an old value will be overwritten
new_value = coords[coord]
# if the new value is null, ignore it
if is_null(new_value, zero_as_null=False):
continue
# set old value to None if it wasn't in table
if coord not in loc_container.df.columns:
loc_container.df[coord] = None
old_value = loc_container.df.loc[loc_name, coord]
# use first value if multiple values returned, but don't shorten a string
if not (isinstance(old_value, str)):
try:
old_value = old_value.values.astype(str)[0]
except (TypeError,IndexError,AttributeError) as e: # if only one value, or np.nan, or NoneType
pass
if is_null(old_value, zero_as_null=False):
pass
elif isinstance(old_value, str):
try:
old_value = float(old_value)
except ValueError:
print('-W- In {}, automatically generated {} value ({}) will overwrite previous value ({})'.format(loc_name, coord, new_value, old_value))
old_value = None
elif not math.isclose(new_value, old_value):
print('-W- In {}, automatically generated {} value ({}) will overwrite previous value ({})'.format(loc_name, coord, new_value, old_value))
# set new value
new_value = round(float(new_value), 5)
loc_container.df.loc[loc_name, coord] = new_value
self.write_table_to_file('locations')
return locs | python | def get_min_max_lat_lon(self):
"""
Find latitude/longitude information from sites table
and group it by location.
Returns
---------
"""
if 'sites' not in self.tables:
return
# get min/max lat/lon from sites table
site_container = self.tables['sites']
if not ('lat' in site_container.df.columns and 'lon' in site_container.df.columns):
return
# convert lat/lon columns to string type
# (this is necessary for consistency because they MAY be string type already)
site_container.df['lat'] = site_container.df['lat'].fillna('').astype(str)
site_container.df['lon'] = site_container.df['lon'].fillna('').astype(str)
# replace empty strings with np.nan
site_container.df['lat'] = np.where(site_container.df['lat'].str.len(), site_container.df['lat'], np.nan)
site_container.df['lon'] = np.where(site_container.df['lon'].str.len(), site_container.df['lon'], np.nan)
# convert lat/lon values to float (they make be string from grid)
try:
site_container.df['lat'] = site_container.df['lat'].astype(float)
except ValueError as ex:
print('-W- Improperly formatted numbers in sites.lat')
return
try:
site_container.df['lon'] = site_container.df['lon'].astype(float)
except ValueError as ex:
print('-W- Improperly formatted numbers in sites.lon')
return
# group lat/lon by location
grouped_lon = site_container.df[['lon', 'location']].groupby('location')
grouped_lat = site_container.df[['lat', 'location']].groupby('location')
# get min/max longitude by location
lon_w = grouped_lon.min()
lon_e = grouped_lon.max()
# get min/max latitude by location
lat_s = grouped_lat.min()
lat_n = grouped_lat.max()
# assign lat/lon to location table
locs = {}
if 'locations' not in self.tables:
return
loc_container = self.tables['locations']
for loc in lat_s.index:
coords = {}
coords['lat_s'] = lat_s.loc[loc]['lat']
coords['lat_n'] = lat_n.loc[loc]['lat']
coords['lon_e'] = lon_e.loc[loc]['lon']
coords['lon_w'] = lon_w.loc[loc]['lon']
locs[loc] = coords
loc_container = self.tables['locations']
for loc_name in locs:
if loc_name in loc_container.df.index:
coords = locs[loc_name]
for coord in locs[loc_name]:
# warn user if an old value will be overwritten
new_value = coords[coord]
# if the new value is null, ignore it
if is_null(new_value, zero_as_null=False):
continue
# set old value to None if it wasn't in table
if coord not in loc_container.df.columns:
loc_container.df[coord] = None
old_value = loc_container.df.loc[loc_name, coord]
# use first value if multiple values returned, but don't shorten a string
if not (isinstance(old_value, str)):
try:
old_value = old_value.values.astype(str)[0]
except (TypeError,IndexError,AttributeError) as e: # if only one value, or np.nan, or NoneType
pass
if is_null(old_value, zero_as_null=False):
pass
elif isinstance(old_value, str):
try:
old_value = float(old_value)
except ValueError:
print('-W- In {}, automatically generated {} value ({}) will overwrite previous value ({})'.format(loc_name, coord, new_value, old_value))
old_value = None
elif not math.isclose(new_value, old_value):
print('-W- In {}, automatically generated {} value ({}) will overwrite previous value ({})'.format(loc_name, coord, new_value, old_value))
# set new value
new_value = round(float(new_value), 5)
loc_container.df.loc[loc_name, coord] = new_value
self.write_table_to_file('locations')
return locs | Find latitude/longitude information from sites table
and group it by location.
Returns
--------- | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L333-L420 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | Contribution.propagate_lithology_cols | def propagate_lithology_cols(self):
"""
Propagate any data from lithologies, geologic_types, or geologic_classes
from the sites table to the samples and specimens table.
In the samples/specimens tables, null or "Not Specified" values
will be overwritten based on the data from their parent site.
"""
cols = ['lithologies', 'geologic_types', 'geologic_classes']
#for table in ['specimens', 'samples']:
# convert "Not Specified" to blank
#self.tables[table].df.replace("^[Nn]ot [Ss]pecified", '',
# regex=True, inplace=True)
self.propagate_cols(cols, 'samples', 'sites')
cols = ['lithologies', 'geologic_types', 'geologic_classes']
self.propagate_cols(cols, 'specimens', 'samples')
# if sites table is missing any values,
# go ahead and propagate values UP as well
if 'sites' not in self.tables:
return
for col in cols:
if col not in self.tables['sites'].df.columns:
self.tables['sites'].df[col] = None
if not all(self.tables['sites'].df[cols].values.ravel()):
print('-I- Propagating values up from samples to sites...')
self.propagate_cols_up(cols, 'sites', 'samples') | python | def propagate_lithology_cols(self):
"""
Propagate any data from lithologies, geologic_types, or geologic_classes
from the sites table to the samples and specimens table.
In the samples/specimens tables, null or "Not Specified" values
will be overwritten based on the data from their parent site.
"""
cols = ['lithologies', 'geologic_types', 'geologic_classes']
#for table in ['specimens', 'samples']:
# convert "Not Specified" to blank
#self.tables[table].df.replace("^[Nn]ot [Ss]pecified", '',
# regex=True, inplace=True)
self.propagate_cols(cols, 'samples', 'sites')
cols = ['lithologies', 'geologic_types', 'geologic_classes']
self.propagate_cols(cols, 'specimens', 'samples')
# if sites table is missing any values,
# go ahead and propagate values UP as well
if 'sites' not in self.tables:
return
for col in cols:
if col not in self.tables['sites'].df.columns:
self.tables['sites'].df[col] = None
if not all(self.tables['sites'].df[cols].values.ravel()):
print('-I- Propagating values up from samples to sites...')
self.propagate_cols_up(cols, 'sites', 'samples') | Propagate any data from lithologies, geologic_types, or geologic_classes
from the sites table to the samples and specimens table.
In the samples/specimens tables, null or "Not Specified" values
will be overwritten based on the data from their parent site. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L422-L446 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | Contribution.rename_item | def rename_item(self, table_name, item_old_name, item_new_name):
"""
Rename item (such as a site) everywhere that it occurs.
This change often spans multiple tables.
For example, a site name will occur in the sites table,
the samples table, and possibly in the locations/ages tables.
"""
# define some helper methods:
def put_together_if_list(item):
"""
String joining function
that doesn't break with None/np.nan
"""
try:
res = ":".join(item)
return ":".join(item)
except TypeError as ex:
#print ex
return item
def replace_colon_delimited_value(df, col_name, old_value, new_value):
"""
Col must contain list
"""
count = 1
for index, row in df[df[col_name].notnull()].iterrows():
names_list = row[col_name]
names_list = [name.strip() for name in names_list]
try:
ind = names_list.index(old_value)
except ValueError as ex:
count += 1
continue
names_list[ind] = new_value
df.loc[count, col_name] = names_list
count += 1
# initialize some things
item_type = table_name
###col_name = item_type[:-1] + "_name"
col_name = item_type[:-1]
col_name_plural = col_name + "s"
table_df = self.tables[item_type].df
if item_old_name == '':
# just add a new item
self.add_item(table_name, {col_name: item_new_name}, item_new_name)
return
# rename item in its own table
table_df.rename(index={item_old_name: item_new_name}, inplace=True)
# rename in any parent/child tables
for table_name in self.tables:
df = self.tables[table_name].df
col_names = df.columns
# change anywhere col_name (singular, i.e. site) is found
if col_name in col_names:
df[col_name].where(df[col_name] != item_old_name, item_new_name, inplace=True)
# change anywhere col_name (plural, i.e. sites) is found
if col_name_plural in col_names:
df[col_name_plural + "_list"] = df[col_name_plural].str.split(":")
replace_colon_delimited_value(df, col_name_plural + "_list", item_old_name, item_new_name)
df[col_name_plural] = df[col_name_plural + "_list"].apply(put_together_if_list)
df.drop(col_name_plural + "_list", axis=1, inplace=True)
self.tables[table_name].df = df | python | def rename_item(self, table_name, item_old_name, item_new_name):
"""
Rename item (such as a site) everywhere that it occurs.
This change often spans multiple tables.
For example, a site name will occur in the sites table,
the samples table, and possibly in the locations/ages tables.
"""
# define some helper methods:
def put_together_if_list(item):
"""
String joining function
that doesn't break with None/np.nan
"""
try:
res = ":".join(item)
return ":".join(item)
except TypeError as ex:
#print ex
return item
def replace_colon_delimited_value(df, col_name, old_value, new_value):
"""
Col must contain list
"""
count = 1
for index, row in df[df[col_name].notnull()].iterrows():
names_list = row[col_name]
names_list = [name.strip() for name in names_list]
try:
ind = names_list.index(old_value)
except ValueError as ex:
count += 1
continue
names_list[ind] = new_value
df.loc[count, col_name] = names_list
count += 1
# initialize some things
item_type = table_name
###col_name = item_type[:-1] + "_name"
col_name = item_type[:-1]
col_name_plural = col_name + "s"
table_df = self.tables[item_type].df
if item_old_name == '':
# just add a new item
self.add_item(table_name, {col_name: item_new_name}, item_new_name)
return
# rename item in its own table
table_df.rename(index={item_old_name: item_new_name}, inplace=True)
# rename in any parent/child tables
for table_name in self.tables:
df = self.tables[table_name].df
col_names = df.columns
# change anywhere col_name (singular, i.e. site) is found
if col_name in col_names:
df[col_name].where(df[col_name] != item_old_name, item_new_name, inplace=True)
# change anywhere col_name (plural, i.e. sites) is found
if col_name_plural in col_names:
df[col_name_plural + "_list"] = df[col_name_plural].str.split(":")
replace_colon_delimited_value(df, col_name_plural + "_list", item_old_name, item_new_name)
df[col_name_plural] = df[col_name_plural + "_list"].apply(put_together_if_list)
df.drop(col_name_plural + "_list", axis=1, inplace=True)
self.tables[table_name].df = df | Rename item (such as a site) everywhere that it occurs.
This change often spans multiple tables.
For example, a site name will occur in the sites table,
the samples table, and possibly in the locations/ages tables. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L454-L519 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | Contribution.get_table_name | def get_table_name(self, ind):
"""
Return both the table_name (i.e., 'specimens')
and the col_name (i.e., 'specimen')
for a given index in self.ancestry.
"""
if ind >= len(self.ancestry):
return "", ""
if ind > -1:
table_name = self.ancestry[ind]
###name = table_name[:-1] + "_name"
name = table_name[:-1]
return table_name, name
return "", "" | python | def get_table_name(self, ind):
"""
Return both the table_name (i.e., 'specimens')
and the col_name (i.e., 'specimen')
for a given index in self.ancestry.
"""
if ind >= len(self.ancestry):
return "", ""
if ind > -1:
table_name = self.ancestry[ind]
###name = table_name[:-1] + "_name"
name = table_name[:-1]
return table_name, name
return "", "" | Return both the table_name (i.e., 'specimens')
and the col_name (i.e., 'specimen')
for a given index in self.ancestry. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L522-L535 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | Contribution.propagate_name_down | def propagate_name_down(self, col_name, df_name, verbose=False):
"""
Put the data for "col_name" into dataframe with df_name
Used to add 'site_name' to specimen table, for example.
"""
if df_name not in self.tables:
table = self.add_magic_table(df_name)[1]
if is_null(table):
return
df = self.tables[df_name].df
if col_name in df.columns:
if all(df[col_name].apply(not_null)):
#print('{} already in {}'.format(col_name, df_name))
return df
# otherwise, do necessary merges to get col_name into df
# get names for each level
grandparent_table_name = col_name.split('_')[0] + "s"
grandparent_name = grandparent_table_name[:-1]
ind = self.ancestry.index(grandparent_table_name) - 1
#
parent_table_name, parent_name = self.get_table_name(ind)
child_table_name, child_name = self.get_table_name(ind - 1)
bottom_table_name, bottom_name = self.get_table_name(ind - 2)
# merge in bottom level
if child_name not in df.columns:
# add child table if missing
if bottom_table_name not in self.tables:
result = self.add_magic_table(bottom_table_name)[1]
if not isinstance(result, MagicDataFrame):
if verbose:
print("-W- Couldn't read in {} data for data propagation".format(bottom_table_name))
return df
# add child_name to df
add_df = self.tables[bottom_table_name].df
# drop duplicate names
add_df = add_df.drop_duplicates(subset=bottom_name)
if child_name not in df.columns:
if verbose:
print("-W- Cannot complete propagation, {} table is missing {} column".format(df_name, child_name))
else:
add_df = stringify_col(add_df, child_name)
df = stringify_col(df, bottom_name)
df = df.merge(add_df[[child_name]],
left_on=[bottom_name],
right_index=True, how="left")
self.tables[df_name].df = df
# merge in one level above
if parent_name not in df.columns:
# add parent_table if missing
if child_table_name not in self.tables:
result = self.add_magic_table(child_table_name)[1]
if not isinstance(result, MagicDataFrame):
if verbose:
print("-W- Couldn't read in {} data".format(child_table_name))
print("-I- Make sure you've provided the correct file name")
return df
# add parent_name to df
add_df = self.tables[child_table_name].df
# drop duplicate names
add_df = add_df.drop_duplicates(subset=child_name)
if parent_name not in add_df:
if verbose:
print('-W- could not finish propagating names: {} table is missing {} column'.format(child_table_name, parent_name))
elif parent_name not in df:
if verbose:
print('-W- could not finish propagating names: {} table is missing {} column'.format(df_name, parent_name))
else:
add_df = stringify_col(add_df, parent_name)
df = stringify_col(df, child_name)
df = df.merge(add_df[[parent_name]],
left_on=[child_name],
right_index=True, how="left")
self.tables[df_name].df = df
# merge in two levels above
if grandparent_name not in df.columns:
# add grandparent table if it is missing
if parent_table_name not in self.tables:
result = self.add_magic_table(parent_table_name)[1]
if not isinstance(result, MagicDataFrame):
if verbose:
print("-W- Couldn't read in {} data".format(parent_table_name))
print("-I- Make sure you've provided the correct file name")
return df
# add grandparent name to df
add_df = self.tables[parent_table_name].df
# drop duplicate names
add_df = add_df.drop_duplicates(subset=parent_name)
if grandparent_name not in add_df.columns:
if verbose:
print('-W- could not finish propagating names: {} table is missing {} column'.format(parent_table_name, grandparent_name))
elif parent_name not in df.columns:
if verbose:
print('-W- could not finish propagating names: {} table is missing {} column'.format(df_name, parent_name))
else:
add_df = stringify_col(add_df, grandparent_name)
df = stringify_col(df, parent_name)
df = df.merge(add_df[[grandparent_name]],
left_on=[parent_name],
right_index=True, how="left")
df = stringify_col(df, grandparent_name)
# update the Contribution
self.tables[df_name].df = df
return df | python | def propagate_name_down(self, col_name, df_name, verbose=False):
"""
Put the data for "col_name" into dataframe with df_name
Used to add 'site_name' to specimen table, for example.
"""
if df_name not in self.tables:
table = self.add_magic_table(df_name)[1]
if is_null(table):
return
df = self.tables[df_name].df
if col_name in df.columns:
if all(df[col_name].apply(not_null)):
#print('{} already in {}'.format(col_name, df_name))
return df
# otherwise, do necessary merges to get col_name into df
# get names for each level
grandparent_table_name = col_name.split('_')[0] + "s"
grandparent_name = grandparent_table_name[:-1]
ind = self.ancestry.index(grandparent_table_name) - 1
#
parent_table_name, parent_name = self.get_table_name(ind)
child_table_name, child_name = self.get_table_name(ind - 1)
bottom_table_name, bottom_name = self.get_table_name(ind - 2)
# merge in bottom level
if child_name not in df.columns:
# add child table if missing
if bottom_table_name not in self.tables:
result = self.add_magic_table(bottom_table_name)[1]
if not isinstance(result, MagicDataFrame):
if verbose:
print("-W- Couldn't read in {} data for data propagation".format(bottom_table_name))
return df
# add child_name to df
add_df = self.tables[bottom_table_name].df
# drop duplicate names
add_df = add_df.drop_duplicates(subset=bottom_name)
if child_name not in df.columns:
if verbose:
print("-W- Cannot complete propagation, {} table is missing {} column".format(df_name, child_name))
else:
add_df = stringify_col(add_df, child_name)
df = stringify_col(df, bottom_name)
df = df.merge(add_df[[child_name]],
left_on=[bottom_name],
right_index=True, how="left")
self.tables[df_name].df = df
# merge in one level above
if parent_name not in df.columns:
# add parent_table if missing
if child_table_name not in self.tables:
result = self.add_magic_table(child_table_name)[1]
if not isinstance(result, MagicDataFrame):
if verbose:
print("-W- Couldn't read in {} data".format(child_table_name))
print("-I- Make sure you've provided the correct file name")
return df
# add parent_name to df
add_df = self.tables[child_table_name].df
# drop duplicate names
add_df = add_df.drop_duplicates(subset=child_name)
if parent_name not in add_df:
if verbose:
print('-W- could not finish propagating names: {} table is missing {} column'.format(child_table_name, parent_name))
elif parent_name not in df:
if verbose:
print('-W- could not finish propagating names: {} table is missing {} column'.format(df_name, parent_name))
else:
add_df = stringify_col(add_df, parent_name)
df = stringify_col(df, child_name)
df = df.merge(add_df[[parent_name]],
left_on=[child_name],
right_index=True, how="left")
self.tables[df_name].df = df
# merge in two levels above
if grandparent_name not in df.columns:
# add grandparent table if it is missing
if parent_table_name not in self.tables:
result = self.add_magic_table(parent_table_name)[1]
if not isinstance(result, MagicDataFrame):
if verbose:
print("-W- Couldn't read in {} data".format(parent_table_name))
print("-I- Make sure you've provided the correct file name")
return df
# add grandparent name to df
add_df = self.tables[parent_table_name].df
# drop duplicate names
add_df = add_df.drop_duplicates(subset=parent_name)
if grandparent_name not in add_df.columns:
if verbose:
print('-W- could not finish propagating names: {} table is missing {} column'.format(parent_table_name, grandparent_name))
elif parent_name not in df.columns:
if verbose:
print('-W- could not finish propagating names: {} table is missing {} column'.format(df_name, parent_name))
else:
add_df = stringify_col(add_df, grandparent_name)
df = stringify_col(df, parent_name)
df = df.merge(add_df[[grandparent_name]],
left_on=[parent_name],
right_index=True, how="left")
df = stringify_col(df, grandparent_name)
# update the Contribution
self.tables[df_name].df = df
return df | Put the data for "col_name" into dataframe with df_name
Used to add 'site_name' to specimen table, for example. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L560-L666 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | Contribution.propagate_cols | def propagate_cols(self, col_names, target_df_name, source_df_name,
down=True):
"""
Put the data for "col_name" from source_df into target_df
Used to get "azimuth" from sample table into measurements table
(for example).
Note: if getting data from the sample table, don't include "sample"
in the col_names list. It is included automatically.
"""
# make sure target table is read in
if target_df_name not in self.tables:
self.add_magic_table(target_df_name)
if target_df_name not in self.tables:
print("-W- Couldn't read in {} table".format(target_df_name))
return
# make sure source table is read in
if source_df_name not in self.tables:
self.add_magic_table(source_df_name)
print("-W- Couldn't read in {} table".format(source_df_name))
return
# make sure col_names are all available in source table
source_df = self.tables[source_df_name].df
if not set(col_names).issubset(source_df.columns):
for col in col_names[:]:
if col not in source_df.columns:
print("-W- Column '{}' isn't in {} table, skipping it".format(col, source_df_name))
col_names.remove(col)
if not col_names:
print("-W- Invalid or missing column names, could not propagate columns")
return
#
if down:
add_name = source_df_name[:-1]
if 'measurements' in self.tables.keys():
self.propagate_location_to_measurements()
elif 'specimens' in self.tables.keys():
self.propagate_location_to_specimens()
else:
self.propagate_name_down('location', 'sites')
else:
add_name = target_df_name[:-1]
# get dataframes for merge
target_df = self.tables[target_df_name].df
source_df = self.tables[source_df_name].df
backup_source_df = source_df.copy()
# finesse source_df to make sure it has all the right columns
# and no unnecessary duplicates
if source_df_name[:-1] not in source_df.columns:
source_df[source_df_name[:-1]] = source_df.index
source_df = source_df.drop_duplicates(inplace=False, subset=col_names + [source_df_name[:-1]])
source_df = source_df.groupby(source_df.index, sort=False).fillna(method='ffill')
source_df = source_df.groupby(source_df.index, sort=False).fillna(method='bfill')
# if the groupby/fillna operation fails due to pandas bug, do the same by hand:
if not len(source_df):
new = []
grouped = backup_source_df.groupby(backup_source_df.index)
for label, group in grouped:
new_group = group.fillna(method="ffill")
new_group = new_group.fillna(method="bfill")
new.append(new_group)
source_df = pd.concat(new, sort=True)
# if the groupby/fillna still doesn't work, we are out of luck
if not len(source_df):
return target_df
# propagate down
if down:
# do merge
target_df[add_name] = target_df[add_name].astype(str)
target_df = target_df.merge(source_df[col_names], how='left',
left_on=add_name, right_index=True,
suffixes=["_target", "_source"])
# propagate up
else:
# do merge
col_names.append(add_name)
source_df[add_name] = source_df[add_name].astype(str)
target_df = target_df.merge(source_df[col_names],
how='left', left_index=True,
right_on=add_name,
suffixes=['_target', '_source'])
target_df.index = target_df[add_name]
target_df.drop([add_name + "_source", add_name + "_target"], axis=1, inplace=True)
# ignore any duplicate rows
target_df.drop_duplicates(inplace=True)
# mess with target_df to remove un-needed merge columns
for col in col_names:
# if there has been a previous merge, consolidate and delete data
if col + "_target" in target_df.columns:
# prioritize values from target df
new_arr = np.where(target_df[col + "_target"],
target_df[col + "_target"],
target_df[col + "_source"])
target_df.rename(columns={col + "_target": col}, inplace=True)
target_df[col] = new_arr
if col + "_source" in target_df.columns:
# delete extra merge column
del target_df[col + "_source"]
#
# drop any duplicate rows
target_df.drop_duplicates(inplace=True)
self.tables[target_df_name].df = target_df
return target_df | python | def propagate_cols(self, col_names, target_df_name, source_df_name,
down=True):
"""
Put the data for "col_name" from source_df into target_df
Used to get "azimuth" from sample table into measurements table
(for example).
Note: if getting data from the sample table, don't include "sample"
in the col_names list. It is included automatically.
"""
# make sure target table is read in
if target_df_name not in self.tables:
self.add_magic_table(target_df_name)
if target_df_name not in self.tables:
print("-W- Couldn't read in {} table".format(target_df_name))
return
# make sure source table is read in
if source_df_name not in self.tables:
self.add_magic_table(source_df_name)
print("-W- Couldn't read in {} table".format(source_df_name))
return
# make sure col_names are all available in source table
source_df = self.tables[source_df_name].df
if not set(col_names).issubset(source_df.columns):
for col in col_names[:]:
if col not in source_df.columns:
print("-W- Column '{}' isn't in {} table, skipping it".format(col, source_df_name))
col_names.remove(col)
if not col_names:
print("-W- Invalid or missing column names, could not propagate columns")
return
#
if down:
add_name = source_df_name[:-1]
if 'measurements' in self.tables.keys():
self.propagate_location_to_measurements()
elif 'specimens' in self.tables.keys():
self.propagate_location_to_specimens()
else:
self.propagate_name_down('location', 'sites')
else:
add_name = target_df_name[:-1]
# get dataframes for merge
target_df = self.tables[target_df_name].df
source_df = self.tables[source_df_name].df
backup_source_df = source_df.copy()
# finesse source_df to make sure it has all the right columns
# and no unnecessary duplicates
if source_df_name[:-1] not in source_df.columns:
source_df[source_df_name[:-1]] = source_df.index
source_df = source_df.drop_duplicates(inplace=False, subset=col_names + [source_df_name[:-1]])
source_df = source_df.groupby(source_df.index, sort=False).fillna(method='ffill')
source_df = source_df.groupby(source_df.index, sort=False).fillna(method='bfill')
# if the groupby/fillna operation fails due to pandas bug, do the same by hand:
if not len(source_df):
new = []
grouped = backup_source_df.groupby(backup_source_df.index)
for label, group in grouped:
new_group = group.fillna(method="ffill")
new_group = new_group.fillna(method="bfill")
new.append(new_group)
source_df = pd.concat(new, sort=True)
# if the groupby/fillna still doesn't work, we are out of luck
if not len(source_df):
return target_df
# propagate down
if down:
# do merge
target_df[add_name] = target_df[add_name].astype(str)
target_df = target_df.merge(source_df[col_names], how='left',
left_on=add_name, right_index=True,
suffixes=["_target", "_source"])
# propagate up
else:
# do merge
col_names.append(add_name)
source_df[add_name] = source_df[add_name].astype(str)
target_df = target_df.merge(source_df[col_names],
how='left', left_index=True,
right_on=add_name,
suffixes=['_target', '_source'])
target_df.index = target_df[add_name]
target_df.drop([add_name + "_source", add_name + "_target"], axis=1, inplace=True)
# ignore any duplicate rows
target_df.drop_duplicates(inplace=True)
# mess with target_df to remove un-needed merge columns
for col in col_names:
# if there has been a previous merge, consolidate and delete data
if col + "_target" in target_df.columns:
# prioritize values from target df
new_arr = np.where(target_df[col + "_target"],
target_df[col + "_target"],
target_df[col + "_source"])
target_df.rename(columns={col + "_target": col}, inplace=True)
target_df[col] = new_arr
if col + "_source" in target_df.columns:
# delete extra merge column
del target_df[col + "_source"]
#
# drop any duplicate rows
target_df.drop_duplicates(inplace=True)
self.tables[target_df_name].df = target_df
return target_df | Put the data for "col_name" from source_df into target_df
Used to get "azimuth" from sample table into measurements table
(for example).
Note: if getting data from the sample table, don't include "sample"
in the col_names list. It is included automatically. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L668-L773 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | Contribution.propagate_cols_up | def propagate_cols_up(self, cols, target_df_name, source_df_name):
"""
Take values from source table, compile them into a colon-delimited list,
and apply them to the target table.
This method won't overwrite values in the target table, it will only
supply values where they are missing.
Parameters
----------
cols : list-like
list of columns to propagate
target_df_name : str
name of table to propagate values into
source_df_name:
name of table to propagate values from
Returns
---------
target_df : MagicDataFrame
updated MagicDataFrame with propagated values
"""
print("-I- Trying to propagate {} columns from {} table into {} table".format(cols,
source_df_name,
target_df_name))
# make sure target table is read in
if target_df_name not in self.tables:
self.add_magic_table(target_df_name)
if target_df_name not in self.tables:
print("-W- Couldn't read in {} table".format(target_df_name))
return
# make sure source table is read in
if source_df_name not in self.tables:
self.add_magic_table(source_df_name)
print("-W- Couldn't read in {} table".format(source_df_name))
return
target_df = self.tables[target_df_name]
source_df = self.tables[source_df_name]
target_name = target_df_name[:-1]
# make sure source_df has relevant columns
for col in cols:
if col not in source_df.df.columns:
source_df.df[col] = None
# if target_df has info, propagate that into all rows
target_df.front_and_backfill(cols)
# make sure target_name is in source_df for merging
if target_name not in source_df.df.columns:
print("-W- You can't merge data from {} table into {} table".format(source_df_name, target_df_name))
print(" Your {} table is missing {} column".format(source_df_name, target_name))
self.tables[target_df_name] = target_df
return target_df
source_df.front_and_backfill([target_name])
# group source df by target_name
grouped = source_df.df.groupby(source_df.df[target_name])
if not len(grouped):
print("-W- Couldn't propagate from {} to {}".format(source_df_name, target_df_name))
return target_df
# function to generate capitalized, sorted, colon-delimited list
# of unique, non-null values from a column
def func(group, col_name):
lst = group[col_name][group[col_name].notnull()].unique()
split_lst = [col.split(':') for col in lst if col]
sorted_lst = sorted(np.unique([item.capitalize() for sublist in split_lst for item in sublist]))
group_col = ":".join(sorted_lst)
return group_col
# apply func to each column
for col in cols:
res = grouped.apply(func, col)
target_df.df['new_' + col] = res
target_df.df[col] = np.where(target_df.df[col], target_df.df[col], target_df.df['new_' + col])
target_df.df.drop(['new_' + col], axis='columns', inplace=True)
# set table
self.tables[target_df_name] = target_df
return target_df | python | def propagate_cols_up(self, cols, target_df_name, source_df_name):
"""
Take values from source table, compile them into a colon-delimited list,
and apply them to the target table.
This method won't overwrite values in the target table, it will only
supply values where they are missing.
Parameters
----------
cols : list-like
list of columns to propagate
target_df_name : str
name of table to propagate values into
source_df_name:
name of table to propagate values from
Returns
---------
target_df : MagicDataFrame
updated MagicDataFrame with propagated values
"""
print("-I- Trying to propagate {} columns from {} table into {} table".format(cols,
source_df_name,
target_df_name))
# make sure target table is read in
if target_df_name not in self.tables:
self.add_magic_table(target_df_name)
if target_df_name not in self.tables:
print("-W- Couldn't read in {} table".format(target_df_name))
return
# make sure source table is read in
if source_df_name not in self.tables:
self.add_magic_table(source_df_name)
print("-W- Couldn't read in {} table".format(source_df_name))
return
target_df = self.tables[target_df_name]
source_df = self.tables[source_df_name]
target_name = target_df_name[:-1]
# make sure source_df has relevant columns
for col in cols:
if col not in source_df.df.columns:
source_df.df[col] = None
# if target_df has info, propagate that into all rows
target_df.front_and_backfill(cols)
# make sure target_name is in source_df for merging
if target_name not in source_df.df.columns:
print("-W- You can't merge data from {} table into {} table".format(source_df_name, target_df_name))
print(" Your {} table is missing {} column".format(source_df_name, target_name))
self.tables[target_df_name] = target_df
return target_df
source_df.front_and_backfill([target_name])
# group source df by target_name
grouped = source_df.df.groupby(source_df.df[target_name])
if not len(grouped):
print("-W- Couldn't propagate from {} to {}".format(source_df_name, target_df_name))
return target_df
# function to generate capitalized, sorted, colon-delimited list
# of unique, non-null values from a column
def func(group, col_name):
lst = group[col_name][group[col_name].notnull()].unique()
split_lst = [col.split(':') for col in lst if col]
sorted_lst = sorted(np.unique([item.capitalize() for sublist in split_lst for item in sublist]))
group_col = ":".join(sorted_lst)
return group_col
# apply func to each column
for col in cols:
res = grouped.apply(func, col)
target_df.df['new_' + col] = res
target_df.df[col] = np.where(target_df.df[col], target_df.df[col], target_df.df['new_' + col])
target_df.df.drop(['new_' + col], axis='columns', inplace=True)
# set table
self.tables[target_df_name] = target_df
return target_df | Take values from source table, compile them into a colon-delimited list,
and apply them to the target table.
This method won't overwrite values in the target table, it will only
supply values where they are missing.
Parameters
----------
cols : list-like
list of columns to propagate
target_df_name : str
name of table to propagate values into
source_df_name:
name of table to propagate values from
Returns
---------
target_df : MagicDataFrame
updated MagicDataFrame with propagated values | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L775-L847 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | Contribution.propagate_average_up | def propagate_average_up(self, cols=['lat', 'lon'],
target_df_name='sites', source_df_name='samples'):
"""
Propagate average values from a lower table to a higher one.
For example, propagate average lats/lons from samples to sites.
Pre-existing values will not be overwritten.
Parameters
----------
cols : list-like
list of columns to propagate
target_df_name : str
name of table to propagate values into
source_df_name:
name of table to propagate values from
Returns
---------
target_df : MagicDataFrame or None
returns table with propagated data,
or None if no propagation could be done
"""
# make sure target/source table are appropriate
target_ind = self.ancestry.index(target_df_name)
source_ind = self.ancestry.index(source_df_name)
if target_ind - source_ind != 1:
print('-W- propagate_average_up only works with tables that are spaced one apart, i.e. sites and samples.')
print(' Source table must be lower in the hierarchy than the target table.')
print(' You have provided "{}" as the target table and "{}" as the source table.'.format(target_df_name, source_df_name))
return None
# make sure target table is read in
if target_df_name not in self.tables:
self.add_magic_table(target_df_name)
if target_df_name not in self.tables:
print("-W- Couldn't read in {} table".format(target_df_name))
return
# make sure source table is read in
if source_df_name not in self.tables:
self.add_magic_table(source_df_name)
if source_df_name not in self.tables:
print("-W- Couldn't read in {} table".format(source_df_name))
return
# get tables
target_df = self.tables[target_df_name]
source_df = self.tables[source_df_name]
target_name = target_df_name[:-1]
# step 1: make sure columns exist in target_df
for col in cols:
if col not in target_df.df.columns:
target_df.df[col] = None
# step 2: propagate target_df columns forward & back
target_df.front_and_backfill(cols)
# step 3: see if any column values are missing
values = [not_null(val) for val in target_df.df[cols].values.ravel()]
if all(values):
print('-I- {} table already has {} filled column(s)'.format(target_df_name, cols))
self.tables[target_df_name] = target_df
return target_df
# step 4: make sure columns are in source table, also target name
if target_name not in source_df.df.columns:
print("-W- can't propagate from {} to {} table".format(source_df_name, target_df_name))
print(" Missing {} column in {} table".format(target_name, source_df_name))
self.tables[target_df_name] = target_df
return target_df
for col in cols:
if col not in target_df.df.columns:
target_df.df[col] = None
# step 5: if needed, average from source table and apply to target table
for col in cols:
if col not in source_df.df.columns:
source_df.df[col] = np.nan
else:
# make sure is numeric
source_df.df[col] = pd.to_numeric(source_df.df[col], errors='coerce')
grouped = source_df.df[cols + [target_name]].groupby(target_name)
grouped = grouped[cols].apply(np.mean)
for col in cols:
target_df.df['new_' + col] = grouped[col]
# use custom not_null
mask = [not_null(val) for val in target_df.df[col]]
target_df.df[col] = np.where(mask, #target_df.df[col].notnull(),
target_df.df[col],
target_df.df['new_' + col])
target_df.df.drop(['new_' + col], inplace=True, axis=1)
# round column to 5 decimal points
try:
target_df.df[col] = target_df.df[col].astype(float)
target_df.df = target_df.df.round({col: 5})
except ValueError: # if there are sneaky strings...
pass
self.tables[target_df_name] = target_df
return target_df | python | def propagate_average_up(self, cols=['lat', 'lon'],
target_df_name='sites', source_df_name='samples'):
"""
Propagate average values from a lower table to a higher one.
For example, propagate average lats/lons from samples to sites.
Pre-existing values will not be overwritten.
Parameters
----------
cols : list-like
list of columns to propagate
target_df_name : str
name of table to propagate values into
source_df_name:
name of table to propagate values from
Returns
---------
target_df : MagicDataFrame or None
returns table with propagated data,
or None if no propagation could be done
"""
# make sure target/source table are appropriate
target_ind = self.ancestry.index(target_df_name)
source_ind = self.ancestry.index(source_df_name)
if target_ind - source_ind != 1:
print('-W- propagate_average_up only works with tables that are spaced one apart, i.e. sites and samples.')
print(' Source table must be lower in the hierarchy than the target table.')
print(' You have provided "{}" as the target table and "{}" as the source table.'.format(target_df_name, source_df_name))
return None
# make sure target table is read in
if target_df_name not in self.tables:
self.add_magic_table(target_df_name)
if target_df_name not in self.tables:
print("-W- Couldn't read in {} table".format(target_df_name))
return
# make sure source table is read in
if source_df_name not in self.tables:
self.add_magic_table(source_df_name)
if source_df_name not in self.tables:
print("-W- Couldn't read in {} table".format(source_df_name))
return
# get tables
target_df = self.tables[target_df_name]
source_df = self.tables[source_df_name]
target_name = target_df_name[:-1]
# step 1: make sure columns exist in target_df
for col in cols:
if col not in target_df.df.columns:
target_df.df[col] = None
# step 2: propagate target_df columns forward & back
target_df.front_and_backfill(cols)
# step 3: see if any column values are missing
values = [not_null(val) for val in target_df.df[cols].values.ravel()]
if all(values):
print('-I- {} table already has {} filled column(s)'.format(target_df_name, cols))
self.tables[target_df_name] = target_df
return target_df
# step 4: make sure columns are in source table, also target name
if target_name not in source_df.df.columns:
print("-W- can't propagate from {} to {} table".format(source_df_name, target_df_name))
print(" Missing {} column in {} table".format(target_name, source_df_name))
self.tables[target_df_name] = target_df
return target_df
for col in cols:
if col not in target_df.df.columns:
target_df.df[col] = None
# step 5: if needed, average from source table and apply to target table
for col in cols:
if col not in source_df.df.columns:
source_df.df[col] = np.nan
else:
# make sure is numeric
source_df.df[col] = pd.to_numeric(source_df.df[col], errors='coerce')
grouped = source_df.df[cols + [target_name]].groupby(target_name)
grouped = grouped[cols].apply(np.mean)
for col in cols:
target_df.df['new_' + col] = grouped[col]
# use custom not_null
mask = [not_null(val) for val in target_df.df[col]]
target_df.df[col] = np.where(mask, #target_df.df[col].notnull(),
target_df.df[col],
target_df.df['new_' + col])
target_df.df.drop(['new_' + col], inplace=True, axis=1)
# round column to 5 decimal points
try:
target_df.df[col] = target_df.df[col].astype(float)
target_df.df = target_df.df.round({col: 5})
except ValueError: # if there are sneaky strings...
pass
self.tables[target_df_name] = target_df
return target_df | Propagate average values from a lower table to a higher one.
For example, propagate average lats/lons from samples to sites.
Pre-existing values will not be overwritten.
Parameters
----------
cols : list-like
list of columns to propagate
target_df_name : str
name of table to propagate values into
source_df_name:
name of table to propagate values from
Returns
---------
target_df : MagicDataFrame or None
returns table with propagated data,
or None if no propagation could be done | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L849-L940 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | Contribution.propagate_min_max_up | def propagate_min_max_up(self, cols=['age'],
target_df_name='locations',
source_df_name='sites',
min_suffix='low',
max_suffix='high'):
"""
Take minimum/maximum values for a set of columns in source_df,
and apply them to the target table.
This method won't overwrite values in the target table, it will only
supply values where they are missing.
Parameters
----------
cols : list-like
list of columns to propagate, default ['age']
target_df_name : str
name of table to propagate values into, default 'locations'
source_df_name:
name of table to propagate values from, default 'sites'
min_suffix : str
suffix for minimum value, default 'low'
max_suffix : str
suffix for maximum value, default 'high'
Returns
---------
target_df : MagicDataFrame
updated MagicDataFrame with propagated values
"""
# make sure target/source table are appropriate
target_ind = self.ancestry.index(target_df_name)
source_ind = self.ancestry.index(source_df_name)
if target_ind - source_ind != 1:
print('-W- propagate_min_max_up only works with tables that are spaced one apart, i.e. sites and samples.')
print(' Source table must be lower in the hierarchy than the target table.')
print(' You have provided "{}" as the target table and "{}" as the source table.'.format(target_df_name, source_df_name))
return None
# make sure target table is read in
if target_df_name not in self.tables:
self.add_magic_table(target_df_name)
if target_df_name not in self.tables:
print("-W- Couldn't read in {} table".format(target_df_name))
return
# make sure source table is read in
if source_df_name not in self.tables:
self.add_magic_table(source_df_name)
if source_df_name not in self.tables:
print("-W- Couldn't read in {} table".format(source_df_name))
return
# get tables
target_df = self.tables[target_df_name]
source_df = self.tables[source_df_name]
target_name = target_df_name[:-1]
# find and propagate min/max for each col in cols
for col in cols:
if col not in source_df.df.columns:
print('-W- {} table is missing "{}" column, skipping'.format(source_df_name, col))
continue
min_col = col + "_" + min_suffix
max_col = col + "_" + max_suffix
# add min/max cols to target_df if missing
if min_col not in target_df.df.columns:
target_df.df[min_col] = None
if max_col not in target_df.df.columns:
target_df.df[max_col] = None
# get min/max from source
if target_name not in source_df.df.columns:
print('-W- {} table missing {} column, cannot propagate age info'.format(target_name, source_df_name))
return
# make sure source is appropriately filled
source = source_df.front_and_backfill([col], inplace=False)
# add target_name back into front/backfilled source
source[target_name] = source_df.df[target_name]
grouped = source[[col, target_name]].groupby(target_name)
if len(grouped):
minimum, maximum = grouped.min(), grouped.max()
minimum = minimum.reindex(target_df.df.index)
maximum = maximum.reindex(target_df.df.index)
# update target_df without overwriting existing values
cond_min = target_df.df[min_col].apply(not_null)
cond_max = target_df.df[max_col].apply(not_null)
#
target_df.df[min_col] = np.where(cond_min,
target_df.df[min_col],
minimum[col])
target_df.df[max_col] = np.where(cond_max,
target_df.df[max_col],
maximum[col])
# update contribution
self.tables[target_df_name] = target_df
return target_df | python | def propagate_min_max_up(self, cols=['age'],
target_df_name='locations',
source_df_name='sites',
min_suffix='low',
max_suffix='high'):
"""
Take minimum/maximum values for a set of columns in source_df,
and apply them to the target table.
This method won't overwrite values in the target table, it will only
supply values where they are missing.
Parameters
----------
cols : list-like
list of columns to propagate, default ['age']
target_df_name : str
name of table to propagate values into, default 'locations'
source_df_name:
name of table to propagate values from, default 'sites'
min_suffix : str
suffix for minimum value, default 'low'
max_suffix : str
suffix for maximum value, default 'high'
Returns
---------
target_df : MagicDataFrame
updated MagicDataFrame with propagated values
"""
# make sure target/source table are appropriate
target_ind = self.ancestry.index(target_df_name)
source_ind = self.ancestry.index(source_df_name)
if target_ind - source_ind != 1:
print('-W- propagate_min_max_up only works with tables that are spaced one apart, i.e. sites and samples.')
print(' Source table must be lower in the hierarchy than the target table.')
print(' You have provided "{}" as the target table and "{}" as the source table.'.format(target_df_name, source_df_name))
return None
# make sure target table is read in
if target_df_name not in self.tables:
self.add_magic_table(target_df_name)
if target_df_name not in self.tables:
print("-W- Couldn't read in {} table".format(target_df_name))
return
# make sure source table is read in
if source_df_name not in self.tables:
self.add_magic_table(source_df_name)
if source_df_name not in self.tables:
print("-W- Couldn't read in {} table".format(source_df_name))
return
# get tables
target_df = self.tables[target_df_name]
source_df = self.tables[source_df_name]
target_name = target_df_name[:-1]
# find and propagate min/max for each col in cols
for col in cols:
if col not in source_df.df.columns:
print('-W- {} table is missing "{}" column, skipping'.format(source_df_name, col))
continue
min_col = col + "_" + min_suffix
max_col = col + "_" + max_suffix
# add min/max cols to target_df if missing
if min_col not in target_df.df.columns:
target_df.df[min_col] = None
if max_col not in target_df.df.columns:
target_df.df[max_col] = None
# get min/max from source
if target_name not in source_df.df.columns:
print('-W- {} table missing {} column, cannot propagate age info'.format(target_name, source_df_name))
return
# make sure source is appropriately filled
source = source_df.front_and_backfill([col], inplace=False)
# add target_name back into front/backfilled source
source[target_name] = source_df.df[target_name]
grouped = source[[col, target_name]].groupby(target_name)
if len(grouped):
minimum, maximum = grouped.min(), grouped.max()
minimum = minimum.reindex(target_df.df.index)
maximum = maximum.reindex(target_df.df.index)
# update target_df without overwriting existing values
cond_min = target_df.df[min_col].apply(not_null)
cond_max = target_df.df[max_col].apply(not_null)
#
target_df.df[min_col] = np.where(cond_min,
target_df.df[min_col],
minimum[col])
target_df.df[max_col] = np.where(cond_max,
target_df.df[max_col],
maximum[col])
# update contribution
self.tables[target_df_name] = target_df
return target_df | Take minimum/maximum values for a set of columns in source_df,
and apply them to the target table.
This method won't overwrite values in the target table, it will only
supply values where they are missing.
Parameters
----------
cols : list-like
list of columns to propagate, default ['age']
target_df_name : str
name of table to propagate values into, default 'locations'
source_df_name:
name of table to propagate values from, default 'sites'
min_suffix : str
suffix for minimum value, default 'low'
max_suffix : str
suffix for maximum value, default 'high'
Returns
---------
target_df : MagicDataFrame
updated MagicDataFrame with propagated values | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L942-L1032 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | Contribution.get_age_levels | def get_age_levels(self):
"""
Method to add a "level" column to the ages table.
Finds the lowest filled in level (i.e., specimen, sample, etc.)
for that particular row.
I.e., a row with both site and sample name filled in is considered
a sample-level age.
Returns
---------
self.tables['ages'] : MagicDataFrame
updated ages table
"""
def get_level(ser, levels=('specimen', 'sample', 'site', 'location')):
for level in levels:
if pd.notnull(ser[level]):
if len(ser[level]): # guard against empty strings
return level
return
# get available levels in age table
possible_levels = ['specimen', 'sample', 'site', 'location']
levels = [level for level in possible_levels if level in self.tables['ages'].df.columns]
# find level for each age row
age_levels = self.tables['ages'].df.apply(get_level, axis=1, args=[levels])
if any(age_levels):
self.tables['ages'].df.loc[:, 'level'] = age_levels
return self.tables['ages'] | python | def get_age_levels(self):
"""
Method to add a "level" column to the ages table.
Finds the lowest filled in level (i.e., specimen, sample, etc.)
for that particular row.
I.e., a row with both site and sample name filled in is considered
a sample-level age.
Returns
---------
self.tables['ages'] : MagicDataFrame
updated ages table
"""
def get_level(ser, levels=('specimen', 'sample', 'site', 'location')):
for level in levels:
if pd.notnull(ser[level]):
if len(ser[level]): # guard against empty strings
return level
return
# get available levels in age table
possible_levels = ['specimen', 'sample', 'site', 'location']
levels = [level for level in possible_levels if level in self.tables['ages'].df.columns]
# find level for each age row
age_levels = self.tables['ages'].df.apply(get_level, axis=1, args=[levels])
if any(age_levels):
self.tables['ages'].df.loc[:, 'level'] = age_levels
return self.tables['ages'] | Method to add a "level" column to the ages table.
Finds the lowest filled in level (i.e., specimen, sample, etc.)
for that particular row.
I.e., a row with both site and sample name filled in is considered
a sample-level age.
Returns
---------
self.tables['ages'] : MagicDataFrame
updated ages table | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1034-L1060 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | Contribution.propagate_ages | def propagate_ages(self):
"""
Mine ages table for any age data, and write it into
specimens, samples, sites, locations tables.
Do not overwrite existing age data.
"""
# if there is no age table, skip
if 'ages' not in self.tables:
return
# if age table has no data, skip
if not len(self.tables['ages'].df):
return
# get levels in age table
self.get_age_levels()
# if age levels could not be determined, skip
if not "level" in self.tables["ages"].df.columns:
return
if not any(self.tables["ages"].df["level"]):
return
# go through each level of age data
for level in self.tables['ages'].df['level'].unique():
table_name = level + 's'
age_headers = self.data_model.get_group_headers(table_name, 'Age')
# find age headers that are actually in table
actual_age_headers = list(set(self.tables[table_name].df.columns).intersection(age_headers))
# find site age headers that are available in ages table
available_age_headers = list(set(self.tables['ages'].df.columns).intersection(age_headers))
# fill in all available age info to all rows
self.tables[table_name].front_and_backfill(actual_age_headers)
# add any available headers to table
add_headers = set(available_age_headers).difference(actual_age_headers)
for header in add_headers:
self.tables[table_name].df[header] = None
# propagate values from ages into table
def move_values(ser, level, available_headers):
name = ser.name
cond1 = self.tables['ages'].df[level] == name
cond2 = self.tables['ages'].df['level'] == level
mask = cond1 & cond2
sli = self.tables['ages'].df[mask]
if len(sli):
return list(sli[available_headers].values[0])
return [None] * len(available_headers)
res = self.tables[table_name].df.apply(move_values, axis=1,
args=[level, available_age_headers])
# fill in table with values gleaned from ages
new_df = pd.DataFrame(data=list(res.values), index=res.index,
columns=available_age_headers)
age_values = np.where(self.tables[table_name].df[available_age_headers],
self.tables[table_name].df[available_age_headers],
new_df)
self.tables[table_name].df[available_age_headers] = age_values
#
# put age_high, age_low into locations table
print("-I- Adding age_high and age_low to locations table based on minimum/maximum ages found in sites table")
self.propagate_min_max_up(cols=['age'], target_df_name='locations',
source_df_name='sites') | python | def propagate_ages(self):
"""
Mine ages table for any age data, and write it into
specimens, samples, sites, locations tables.
Do not overwrite existing age data.
"""
# if there is no age table, skip
if 'ages' not in self.tables:
return
# if age table has no data, skip
if not len(self.tables['ages'].df):
return
# get levels in age table
self.get_age_levels()
# if age levels could not be determined, skip
if not "level" in self.tables["ages"].df.columns:
return
if not any(self.tables["ages"].df["level"]):
return
# go through each level of age data
for level in self.tables['ages'].df['level'].unique():
table_name = level + 's'
age_headers = self.data_model.get_group_headers(table_name, 'Age')
# find age headers that are actually in table
actual_age_headers = list(set(self.tables[table_name].df.columns).intersection(age_headers))
# find site age headers that are available in ages table
available_age_headers = list(set(self.tables['ages'].df.columns).intersection(age_headers))
# fill in all available age info to all rows
self.tables[table_name].front_and_backfill(actual_age_headers)
# add any available headers to table
add_headers = set(available_age_headers).difference(actual_age_headers)
for header in add_headers:
self.tables[table_name].df[header] = None
# propagate values from ages into table
def move_values(ser, level, available_headers):
name = ser.name
cond1 = self.tables['ages'].df[level] == name
cond2 = self.tables['ages'].df['level'] == level
mask = cond1 & cond2
sli = self.tables['ages'].df[mask]
if len(sli):
return list(sli[available_headers].values[0])
return [None] * len(available_headers)
res = self.tables[table_name].df.apply(move_values, axis=1,
args=[level, available_age_headers])
# fill in table with values gleaned from ages
new_df = pd.DataFrame(data=list(res.values), index=res.index,
columns=available_age_headers)
age_values = np.where(self.tables[table_name].df[available_age_headers],
self.tables[table_name].df[available_age_headers],
new_df)
self.tables[table_name].df[available_age_headers] = age_values
#
# put age_high, age_low into locations table
print("-I- Adding age_high and age_low to locations table based on minimum/maximum ages found in sites table")
self.propagate_min_max_up(cols=['age'], target_df_name='locations',
source_df_name='sites') | Mine ages table for any age data, and write it into
specimens, samples, sites, locations tables.
Do not overwrite existing age data. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1062-L1119 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | Contribution.remove_non_magic_cols | def remove_non_magic_cols(self):
"""
Remove all non-MagIC columns from all tables.
"""
for table_name in self.tables:
table = self.tables[table_name]
table.remove_non_magic_cols_from_table() | python | def remove_non_magic_cols(self):
"""
Remove all non-MagIC columns from all tables.
"""
for table_name in self.tables:
table = self.tables[table_name]
table.remove_non_magic_cols_from_table() | Remove all non-MagIC columns from all tables. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1123-L1129 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | Contribution.write_table_to_file | def write_table_to_file(self, dtype, custom_name=None, append=False, dir_path=None):
"""
Write out a MagIC table to file, using custom filename
as specified in self.filenames.
Parameters
----------
dtype : str
magic table name
"""
if custom_name:
fname = custom_name
else:
fname = self.filenames[dtype]
if not dir_path:
dir_path=self.directory
if dtype in self.tables:
write_df = self.remove_names(dtype)
outfile = self.tables[dtype].write_magic_file(custom_name=fname,
dir_path=dir_path,
append=append, df=write_df)
return outfile | python | def write_table_to_file(self, dtype, custom_name=None, append=False, dir_path=None):
"""
Write out a MagIC table to file, using custom filename
as specified in self.filenames.
Parameters
----------
dtype : str
magic table name
"""
if custom_name:
fname = custom_name
else:
fname = self.filenames[dtype]
if not dir_path:
dir_path=self.directory
if dtype in self.tables:
write_df = self.remove_names(dtype)
outfile = self.tables[dtype].write_magic_file(custom_name=fname,
dir_path=dir_path,
append=append, df=write_df)
return outfile | Write out a MagIC table to file, using custom filename
as specified in self.filenames.
Parameters
----------
dtype : str
magic table name | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1131-L1152 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | Contribution.remove_names | def remove_names(self, dtype):
"""
Remove unneeded name columns ('specimen'/'sample'/etc)
from the specified table.
Parameters
----------
dtype : str
Returns
---------
pandas DataFrame without the unneeded columns
Example
---------
Contribution.tables['specimens'].df = Contribution.remove_names('specimens')
# takes out 'location', 'site', and/or 'sample' columns from the
# specimens dataframe if those columns have been added
"""
if dtype not in self.ancestry:
return
if dtype in self.tables:
# remove extra columns here
self_ind = self.ancestry.index(dtype)
parent_ind = self_ind + 1 if self_ind < (len(self.ancestry) -1) else self_ind
remove = set(self.ancestry).difference([self.ancestry[self_ind], self.ancestry[parent_ind]])
remove = [dtype[:-1] for dtype in remove]
columns = self.tables[dtype].df.columns.difference(remove)
return self.tables[dtype].df[columns] | python | def remove_names(self, dtype):
"""
Remove unneeded name columns ('specimen'/'sample'/etc)
from the specified table.
Parameters
----------
dtype : str
Returns
---------
pandas DataFrame without the unneeded columns
Example
---------
Contribution.tables['specimens'].df = Contribution.remove_names('specimens')
# takes out 'location', 'site', and/or 'sample' columns from the
# specimens dataframe if those columns have been added
"""
if dtype not in self.ancestry:
return
if dtype in self.tables:
# remove extra columns here
self_ind = self.ancestry.index(dtype)
parent_ind = self_ind + 1 if self_ind < (len(self.ancestry) -1) else self_ind
remove = set(self.ancestry).difference([self.ancestry[self_ind], self.ancestry[parent_ind]])
remove = [dtype[:-1] for dtype in remove]
columns = self.tables[dtype].df.columns.difference(remove)
return self.tables[dtype].df[columns] | Remove unneeded name columns ('specimen'/'sample'/etc)
from the specified table.
Parameters
----------
dtype : str
Returns
---------
pandas DataFrame without the unneeded columns
Example
---------
Contribution.tables['specimens'].df = Contribution.remove_names('specimens')
# takes out 'location', 'site', and/or 'sample' columns from the
# specimens dataframe if those columns have been added | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1154-L1182 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | Contribution.find_missing_items | def find_missing_items(self, dtype):
"""
Find any items that are referenced in a child table
but are missing in their own table.
For example, a site that is listed in the samples table,
but has no entry in the sites table.
Parameters
----------
dtype : str
table name, e.g. 'specimens'
Returns
---------
set of missing values
"""
parent_dtype, child_dtype = self.get_parent_and_child(dtype)
if not child_dtype in self.tables:
return set()
items = set(self.tables[dtype].df.index.unique())
items_in_child_table = set(self.tables[child_dtype].df[dtype[:-1]].unique())
return {i for i in (items_in_child_table - items) if not_null(i)} | python | def find_missing_items(self, dtype):
"""
Find any items that are referenced in a child table
but are missing in their own table.
For example, a site that is listed in the samples table,
but has no entry in the sites table.
Parameters
----------
dtype : str
table name, e.g. 'specimens'
Returns
---------
set of missing values
"""
parent_dtype, child_dtype = self.get_parent_and_child(dtype)
if not child_dtype in self.tables:
return set()
items = set(self.tables[dtype].df.index.unique())
items_in_child_table = set(self.tables[child_dtype].df[dtype[:-1]].unique())
return {i for i in (items_in_child_table - items) if not_null(i)} | Find any items that are referenced in a child table
but are missing in their own table.
For example, a site that is listed in the samples table,
but has no entry in the sites table.
Parameters
----------
dtype : str
table name, e.g. 'specimens'
Returns
---------
set of missing values | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1187-L1208 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | Contribution.get_con_id | def get_con_id(self):
"""
Return contribution id if available
"""
con_id = ""
if "contribution" in self.tables:
if "id" in self.tables["contribution"].df.columns:
con_id = str(self.tables["contribution"].df["id"].values[0])
return con_id | python | def get_con_id(self):
"""
Return contribution id if available
"""
con_id = ""
if "contribution" in self.tables:
if "id" in self.tables["contribution"].df.columns:
con_id = str(self.tables["contribution"].df["id"].values[0])
return con_id | Return contribution id if available | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1211-L1219 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.all_to_str | def all_to_str(self):
"""
In all columns, turn all floats/ints into strings.
If a float ends with .0, strip off '.0' from the resulting string.
"""
def stringify(x):
# float --> string,
# truncating floats like 3.0 --> 3
if isinstance(x, float):
if x.is_integer():
#print('{} --> {}'.format(x, str(x).rstrip('0').rstrip('.')))
return str(x).rstrip('0').rstrip('.')
return(str(x))
# keep strings as they are,
# unless it is a string like "3.0",
# in which case truncate that too
if isinstance(x, str):
try:
float(x)
if x.endswith('0'):
if x.rstrip('0').endswith('.'):
#print('{} --> {}'.format(x, x.rstrip('0').rstrip('.')))
return x.rstrip('0').rstrip('.')
except (ValueError, TypeError):
pass
# integer --> string
if isinstance(x, int):
return str(x)
# if it is not int/str/float, just return as is
return x
def remove_extra_digits(x, prog):
"""
Remove extra digits
x is a string,
prog is always the following '_sre.SRE_Pattern':
prog = re.compile("\d*[.]\d*([0]{5,100}|[9]{5,100})\d*\Z").
However, it is compiled outside of this sub-function
for performance reasons.
"""
if not isinstance(x, str):
return x
result = prog.match(x)
if result:
decimals = result.string.split('.')[1]
result = result.string
if decimals[-3] == '0':
result = x[:-2].rstrip('0')
if decimals[-3] == '9':
result = x[:-2].rstrip('9')
try:
last_digit = int(result[-1])
result = result[:-1] + str(last_digit + 1)
except ValueError:
result = float(result[:-1]) + 1
#if result != x:
# print('changing {} to {}'.format(x, result))
return result
return x
for col in self.df.columns:
self.df[col] = self.df[col].apply(stringify)
prog = re.compile("\d*[.]\d*([0]{5,100}|[9]{5,100})\d*\Z")
for col in self.df.columns:
self.df[col] = self.df[col].apply(lambda x: remove_extra_digits(x, prog)) | python | def all_to_str(self):
"""
In all columns, turn all floats/ints into strings.
If a float ends with .0, strip off '.0' from the resulting string.
"""
def stringify(x):
# float --> string,
# truncating floats like 3.0 --> 3
if isinstance(x, float):
if x.is_integer():
#print('{} --> {}'.format(x, str(x).rstrip('0').rstrip('.')))
return str(x).rstrip('0').rstrip('.')
return(str(x))
# keep strings as they are,
# unless it is a string like "3.0",
# in which case truncate that too
if isinstance(x, str):
try:
float(x)
if x.endswith('0'):
if x.rstrip('0').endswith('.'):
#print('{} --> {}'.format(x, x.rstrip('0').rstrip('.')))
return x.rstrip('0').rstrip('.')
except (ValueError, TypeError):
pass
# integer --> string
if isinstance(x, int):
return str(x)
# if it is not int/str/float, just return as is
return x
def remove_extra_digits(x, prog):
"""
Remove extra digits
x is a string,
prog is always the following '_sre.SRE_Pattern':
prog = re.compile("\d*[.]\d*([0]{5,100}|[9]{5,100})\d*\Z").
However, it is compiled outside of this sub-function
for performance reasons.
"""
if not isinstance(x, str):
return x
result = prog.match(x)
if result:
decimals = result.string.split('.')[1]
result = result.string
if decimals[-3] == '0':
result = x[:-2].rstrip('0')
if decimals[-3] == '9':
result = x[:-2].rstrip('9')
try:
last_digit = int(result[-1])
result = result[:-1] + str(last_digit + 1)
except ValueError:
result = float(result[:-1]) + 1
#if result != x:
# print('changing {} to {}'.format(x, result))
return result
return x
for col in self.df.columns:
self.df[col] = self.df[col].apply(stringify)
prog = re.compile("\d*[.]\d*([0]{5,100}|[9]{5,100})\d*\Z")
for col in self.df.columns:
self.df[col] = self.df[col].apply(lambda x: remove_extra_digits(x, prog)) | In all columns, turn all floats/ints into strings.
If a float ends with .0, strip off '.0' from the resulting string. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1430-L1495 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.remove_non_magic_cols_from_table | def remove_non_magic_cols_from_table(self, ignore_cols=()):
"""
Remove all non-magic columns from self.df.
Changes in place.
Parameters
----------
ignore_cols : list-like
columns not to remove, whether they are proper
MagIC columns or not
Returns
---------
unrecognized_cols : list
any columns that were removed
"""
unrecognized_cols = self.get_non_magic_cols()
for col in ignore_cols:
if col in unrecognized_cols:
unrecognized_cols.remove(col)
if unrecognized_cols:
print('-I- Removing non-MagIC column names from {}:'.format(self.dtype), end=' ')
for col in unrecognized_cols:
self.df.drop(col, axis='columns', inplace=True)
print(col, end=' ')
print("\n")
return unrecognized_cols | python | def remove_non_magic_cols_from_table(self, ignore_cols=()):
"""
Remove all non-magic columns from self.df.
Changes in place.
Parameters
----------
ignore_cols : list-like
columns not to remove, whether they are proper
MagIC columns or not
Returns
---------
unrecognized_cols : list
any columns that were removed
"""
unrecognized_cols = self.get_non_magic_cols()
for col in ignore_cols:
if col in unrecognized_cols:
unrecognized_cols.remove(col)
if unrecognized_cols:
print('-I- Removing non-MagIC column names from {}:'.format(self.dtype), end=' ')
for col in unrecognized_cols:
self.df.drop(col, axis='columns', inplace=True)
print(col, end=' ')
print("\n")
return unrecognized_cols | Remove all non-magic columns from self.df.
Changes in place.
Parameters
----------
ignore_cols : list-like
columns not to remove, whether they are proper
MagIC columns or not
Returns
---------
unrecognized_cols : list
any columns that were removed | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1498-L1524 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.update_row | def update_row(self, ind, row_data):
"""
Update a row with data.
Must provide the specific numeric index (not row label).
If any new keys are present in row_data dictionary,
that column will be added to the dataframe.
This is done inplace.
"""
if sorted(row_data.keys()) != sorted(self.df.columns):
# add any new column names
for key in row_data:
if key not in self.df.columns:
self.df[key] = None
# add missing column names into row_data
for col_label in self.df.columns:
if col_label not in list(row_data.keys()):
row_data[col_label] = None
try:
self.df.iloc[ind] = pd.Series(row_data)
except IndexError:
return False
return self.df | python | def update_row(self, ind, row_data):
"""
Update a row with data.
Must provide the specific numeric index (not row label).
If any new keys are present in row_data dictionary,
that column will be added to the dataframe.
This is done inplace.
"""
if sorted(row_data.keys()) != sorted(self.df.columns):
# add any new column names
for key in row_data:
if key not in self.df.columns:
self.df[key] = None
# add missing column names into row_data
for col_label in self.df.columns:
if col_label not in list(row_data.keys()):
row_data[col_label] = None
try:
self.df.iloc[ind] = pd.Series(row_data)
except IndexError:
return False
return self.df | Update a row with data.
Must provide the specific numeric index (not row label).
If any new keys are present in row_data dictionary,
that column will be added to the dataframe.
This is done inplace. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1551-L1572 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.add_row | def add_row(self, label, row_data, columns=""):
"""
Add a row with data.
If any new keys are present in row_data dictionary,
that column will be added to the dataframe.
This is done inplace
"""
# use provided column order, making sure you don't lose any values
# from self.df.columns
if len(columns):
if sorted(self.df.columns) == sorted(columns):
self.df.columns = columns
else:
new_columns = []
new_columns.extend(columns)
for col in self.df.columns:
if col not in new_columns:
new_columns.append(col)
# makes sure all columns have data or None
if sorted(row_data.keys()) != sorted(self.df.columns):
# add any new column names
for key in row_data:
if key not in self.df.columns:
self.df[key] = None
# add missing column names into row_data
for col_label in self.df.columns:
if col_label not in list(row_data.keys()):
row_data[col_label] = None
# (make sure you are working with strings)
self.df.index = self.df.index.astype(str)
label = str(label)
# create a new row with suffix "new"
# (this ensures that you get a unique, new row,
# instead of adding on to an existing row with the same label)
self.df.loc[label + "new"] = pd.Series(row_data)
# rename it to be correct
self.df.rename(index={label + "new": label}, inplace=True)
# use next line to sort index inplace
#self.df.sort_index(inplace=True)
return self.df | python | def add_row(self, label, row_data, columns=""):
"""
Add a row with data.
If any new keys are present in row_data dictionary,
that column will be added to the dataframe.
This is done inplace
"""
# use provided column order, making sure you don't lose any values
# from self.df.columns
if len(columns):
if sorted(self.df.columns) == sorted(columns):
self.df.columns = columns
else:
new_columns = []
new_columns.extend(columns)
for col in self.df.columns:
if col not in new_columns:
new_columns.append(col)
# makes sure all columns have data or None
if sorted(row_data.keys()) != sorted(self.df.columns):
# add any new column names
for key in row_data:
if key not in self.df.columns:
self.df[key] = None
# add missing column names into row_data
for col_label in self.df.columns:
if col_label not in list(row_data.keys()):
row_data[col_label] = None
# (make sure you are working with strings)
self.df.index = self.df.index.astype(str)
label = str(label)
# create a new row with suffix "new"
# (this ensures that you get a unique, new row,
# instead of adding on to an existing row with the same label)
self.df.loc[label + "new"] = pd.Series(row_data)
# rename it to be correct
self.df.rename(index={label + "new": label}, inplace=True)
# use next line to sort index inplace
#self.df.sort_index(inplace=True)
return self.df | Add a row with data.
If any new keys are present in row_data dictionary,
that column will be added to the dataframe.
This is done inplace | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1574-L1615 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.add_data | def add_data(self, data): # add append option later
"""
Add df to a MagicDataFrame using a data list.
Parameters
----------
data : list of dicts
data list with format [{'key1': 'val1', ...}, {'key1': 'val2', ...}, ... }]
dtype : str
MagIC table type
"""
df = pd.DataFrame(data)
name, dtype = self.get_singular_and_plural_dtype(self.dtype)
if name in df.columns:
df.index = df[name]
df.index.name = name + " name"
self.df = df | python | def add_data(self, data): # add append option later
"""
Add df to a MagicDataFrame using a data list.
Parameters
----------
data : list of dicts
data list with format [{'key1': 'val1', ...}, {'key1': 'val2', ...}, ... }]
dtype : str
MagIC table type
"""
df = pd.DataFrame(data)
name, dtype = self.get_singular_and_plural_dtype(self.dtype)
if name in df.columns:
df.index = df[name]
df.index.name = name + " name"
self.df = df | Add df to a MagicDataFrame using a data list.
Parameters
----------
data : list of dicts
data list with format [{'key1': 'val1', ...}, {'key1': 'val2', ...}, ... }]
dtype : str
MagIC table type | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1617-L1633 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.add_blank_row | def add_blank_row(self, label):
"""
Add a blank row with only an index value to self.df.
This is done inplace.
"""
col_labels = self.df.columns
blank_item = pd.Series({}, index=col_labels, name=label)
# use .loc to add in place (append won't do that)
self.df.loc[blank_item.name] = blank_item
return self.df | python | def add_blank_row(self, label):
"""
Add a blank row with only an index value to self.df.
This is done inplace.
"""
col_labels = self.df.columns
blank_item = pd.Series({}, index=col_labels, name=label)
# use .loc to add in place (append won't do that)
self.df.loc[blank_item.name] = blank_item
return self.df | Add a blank row with only an index value to self.df.
This is done inplace. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1635-L1644 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.delete_row | def delete_row(self, ind):
"""
remove self.df row at ind
inplace
"""
self.df = pd.concat([self.df[:ind], self.df[ind+1:]], sort=True)
return self.df | python | def delete_row(self, ind):
"""
remove self.df row at ind
inplace
"""
self.df = pd.concat([self.df[:ind], self.df[ind+1:]], sort=True)
return self.df | remove self.df row at ind
inplace | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1646-L1652 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.delete_rows | def delete_rows(self, condition, info_str=None):
"""
delete all rows with condition==True
inplace
Parameters
----------
condition : pandas DataFrame indexer
all self.df rows that meet this condition will be deleted
info_str : str
description of the kind of rows to be deleted,
e.g "specimen rows with blank method codes"
Returns
--------
df_data : pandas DataFrame
updated self.df
"""
self.df['num'] = list(range(len(self.df)))
df_data = self.df
# delete all records that meet condition
if len(df_data[condition]) > 0: #we have one or more records to delete
inds = df_data[condition]['num'] # list of all rows where condition is TRUE
for ind in inds[::-1]:
df_data = self.delete_row(ind)
if info_str:
print("-I- Deleting {}. ".format(info_str), end=' ')
print('deleting row {}'.format(str(ind)))
# sort so that all rows for an item are together
df_data.sort_index(inplace=True)
# redo temporary index
df_data['num'] = list(range(len(df_data)))
self.df = df_data
return df_data | python | def delete_rows(self, condition, info_str=None):
"""
delete all rows with condition==True
inplace
Parameters
----------
condition : pandas DataFrame indexer
all self.df rows that meet this condition will be deleted
info_str : str
description of the kind of rows to be deleted,
e.g "specimen rows with blank method codes"
Returns
--------
df_data : pandas DataFrame
updated self.df
"""
self.df['num'] = list(range(len(self.df)))
df_data = self.df
# delete all records that meet condition
if len(df_data[condition]) > 0: #we have one or more records to delete
inds = df_data[condition]['num'] # list of all rows where condition is TRUE
for ind in inds[::-1]:
df_data = self.delete_row(ind)
if info_str:
print("-I- Deleting {}. ".format(info_str), end=' ')
print('deleting row {}'.format(str(ind)))
# sort so that all rows for an item are together
df_data.sort_index(inplace=True)
# redo temporary index
df_data['num'] = list(range(len(df_data)))
self.df = df_data
return df_data | delete all rows with condition==True
inplace
Parameters
----------
condition : pandas DataFrame indexer
all self.df rows that meet this condition will be deleted
info_str : str
description of the kind of rows to be deleted,
e.g "specimen rows with blank method codes"
Returns
--------
df_data : pandas DataFrame
updated self.df | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1654-L1687 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.drop_stub_rows | def drop_stub_rows(self, ignore_cols=('specimen',
'sample',
'software_packages',
'num')):
"""
Drop self.df rows that have only null values,
ignoring certain columns.
Parameters
----------
ignore_cols : list-like
list of column names to ignore for
Returns
---------
self.df : pandas DataFrame
"""
# ignore citations if they just say 'This study'
if 'citations' in self.df.columns:
if list(self.df['citations'].unique()) == ['This study']:
ignore_cols = ignore_cols + ('citations',)
drop_cols = self.df.columns.difference(ignore_cols)
self.df.dropna(axis='index', subset=drop_cols, how='all', inplace=True)
return self.df | python | def drop_stub_rows(self, ignore_cols=('specimen',
'sample',
'software_packages',
'num')):
"""
Drop self.df rows that have only null values,
ignoring certain columns.
Parameters
----------
ignore_cols : list-like
list of column names to ignore for
Returns
---------
self.df : pandas DataFrame
"""
# ignore citations if they just say 'This study'
if 'citations' in self.df.columns:
if list(self.df['citations'].unique()) == ['This study']:
ignore_cols = ignore_cols + ('citations',)
drop_cols = self.df.columns.difference(ignore_cols)
self.df.dropna(axis='index', subset=drop_cols, how='all', inplace=True)
return self.df | Drop self.df rows that have only null values,
ignoring certain columns.
Parameters
----------
ignore_cols : list-like
list of column names to ignore for
Returns
---------
self.df : pandas DataFrame | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1689-L1712 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.drop_duplicate_rows | def drop_duplicate_rows(self, ignore_cols=['specimen', 'sample']):
"""
Drop self.df rows that have only null values,
ignoring certain columns BUT only if those rows
do not have a unique index.
Different from drop_stub_rows because it only drops
empty rows if there is another row with that index.
Parameters
----------
ignore_cols : list_like
list of colum names to ignore
Returns
----------
self.df : pandas DataFrame
"""
# keep any row with a unique index
unique_index = self.df.index.unique()
cond1 = ~self.df.index.duplicated(keep=False)
# or with actual data
ignore_cols = [col for col in ignore_cols if col in self.df.columns]
relevant_df = self.df.drop(ignore_cols, axis=1)
cond2 = relevant_df.notnull().any(axis=1)
orig_len = len(self.df)
new_df = self.df[cond1 | cond2]
# make sure we haven't lost anything important
if any(unique_index.difference(new_df.index.unique())):
cond1 = ~self.df.index.duplicated(keep="first")
self.df = self.df[cond1 | cond2]
end_len = len(self.df)
removed = orig_len - end_len
if removed:
print('-I- Removed {} redundant records from {} table'.format(removed, self.dtype))
return self.df | python | def drop_duplicate_rows(self, ignore_cols=['specimen', 'sample']):
"""
Drop self.df rows that have only null values,
ignoring certain columns BUT only if those rows
do not have a unique index.
Different from drop_stub_rows because it only drops
empty rows if there is another row with that index.
Parameters
----------
ignore_cols : list_like
list of colum names to ignore
Returns
----------
self.df : pandas DataFrame
"""
# keep any row with a unique index
unique_index = self.df.index.unique()
cond1 = ~self.df.index.duplicated(keep=False)
# or with actual data
ignore_cols = [col for col in ignore_cols if col in self.df.columns]
relevant_df = self.df.drop(ignore_cols, axis=1)
cond2 = relevant_df.notnull().any(axis=1)
orig_len = len(self.df)
new_df = self.df[cond1 | cond2]
# make sure we haven't lost anything important
if any(unique_index.difference(new_df.index.unique())):
cond1 = ~self.df.index.duplicated(keep="first")
self.df = self.df[cond1 | cond2]
end_len = len(self.df)
removed = orig_len - end_len
if removed:
print('-I- Removed {} redundant records from {} table'.format(removed, self.dtype))
return self.df | Drop self.df rows that have only null values,
ignoring certain columns BUT only if those rows
do not have a unique index.
Different from drop_stub_rows because it only drops
empty rows if there is another row with that index.
Parameters
----------
ignore_cols : list_like
list of colum names to ignore
Returns
----------
self.df : pandas DataFrame | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1714-L1749 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.update_record | def update_record(self, name, new_data, condition, update_only=False,
debug=False):
"""
Find the first row in self.df with index == name
and condition == True.
Update that record with new_data, then delete any
additional records where index == name and condition == True.
Change is inplace
"""
# add numeric index column temporarily
self.df['num'] = list(range(len(self.df)))
df_data = self.df
condition2 = (df_data.index == name)
# edit first of existing data that meets condition
if len(df_data[condition & condition2]) > 0: #we have one or more records to update or delete
# list of all rows where condition is true and index == name
inds = df_data[condition & condition2]['num']
#inds = df_data[condition]['num'] # list of all rows where condition is true
existing_data = dict(df_data.iloc[inds.iloc[0]]) # get first record of existing_data from dataframe
existing_data.update(new_data) # update existing data with new interpretations
# update row
self.update_row(inds.iloc[0], existing_data)
# now remove all the remaining records of same condition
if len(inds) > 1:
for ind in inds[1:]:
print("deleting redundant records for:", name)
df_data = self.delete_row(ind)
else:
if update_only:
print("no record found for that condition, not updating ", name)
else:
print('no record found - creating new one for ', name)
# add new row
df_data = self.add_row(name, new_data)
# sort so that all rows for an item are together
df_data.sort_index(inplace=True)
# redo temporary index
df_data['num'] = list(range(len(df_data)))
self.df = df_data
return df_data | python | def update_record(self, name, new_data, condition, update_only=False,
debug=False):
"""
Find the first row in self.df with index == name
and condition == True.
Update that record with new_data, then delete any
additional records where index == name and condition == True.
Change is inplace
"""
# add numeric index column temporarily
self.df['num'] = list(range(len(self.df)))
df_data = self.df
condition2 = (df_data.index == name)
# edit first of existing data that meets condition
if len(df_data[condition & condition2]) > 0: #we have one or more records to update or delete
# list of all rows where condition is true and index == name
inds = df_data[condition & condition2]['num']
#inds = df_data[condition]['num'] # list of all rows where condition is true
existing_data = dict(df_data.iloc[inds.iloc[0]]) # get first record of existing_data from dataframe
existing_data.update(new_data) # update existing data with new interpretations
# update row
self.update_row(inds.iloc[0], existing_data)
# now remove all the remaining records of same condition
if len(inds) > 1:
for ind in inds[1:]:
print("deleting redundant records for:", name)
df_data = self.delete_row(ind)
else:
if update_only:
print("no record found for that condition, not updating ", name)
else:
print('no record found - creating new one for ', name)
# add new row
df_data = self.add_row(name, new_data)
# sort so that all rows for an item are together
df_data.sort_index(inplace=True)
# redo temporary index
df_data['num'] = list(range(len(df_data)))
self.df = df_data
return df_data | Find the first row in self.df with index == name
and condition == True.
Update that record with new_data, then delete any
additional records where index == name and condition == True.
Change is inplace | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1752-L1791 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.front_and_backfill | def front_and_backfill(self, cols, inplace=True):
"""
Groups dataframe by index name then replaces null values in selected
columns with front/backfilled values if available.
Changes self.df inplace.
Parameters
----------
self : MagicDataFrame
cols : array-like
list of column names
Returns
---------
self.df
"""
cols = list(cols)
for col in cols:
if col not in self.df.columns: self.df[col] = np.nan
short_df = self.df[cols]
# horrible, bizarre hack to test for pandas malfunction
tester = short_df.groupby(short_df.index, sort=False).fillna(method='ffill')
if not_null(tester):
short_df = short_df.groupby(short_df.index, sort=False).fillna(method='ffill').groupby(short_df.index, sort=False).fillna(method='bfill')
else:
print('-W- Was not able to front/back fill table {} with these columns: {}'.format(self.dtype, ', '.join(cols)))
if inplace:
self.df[cols] = short_df[cols]
return self.df
return short_df | python | def front_and_backfill(self, cols, inplace=True):
"""
Groups dataframe by index name then replaces null values in selected
columns with front/backfilled values if available.
Changes self.df inplace.
Parameters
----------
self : MagicDataFrame
cols : array-like
list of column names
Returns
---------
self.df
"""
cols = list(cols)
for col in cols:
if col not in self.df.columns: self.df[col] = np.nan
short_df = self.df[cols]
# horrible, bizarre hack to test for pandas malfunction
tester = short_df.groupby(short_df.index, sort=False).fillna(method='ffill')
if not_null(tester):
short_df = short_df.groupby(short_df.index, sort=False).fillna(method='ffill').groupby(short_df.index, sort=False).fillna(method='bfill')
else:
print('-W- Was not able to front/back fill table {} with these columns: {}'.format(self.dtype, ', '.join(cols)))
if inplace:
self.df[cols] = short_df[cols]
return self.df
return short_df | Groups dataframe by index name then replaces null values in selected
columns with front/backfilled values if available.
Changes self.df inplace.
Parameters
----------
self : MagicDataFrame
cols : array-like
list of column names
Returns
---------
self.df | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1793-L1822 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.sort_dataframe_cols | def sort_dataframe_cols(self):
"""
Sort self.df so that self.name is the first column,
and the rest of the columns are sorted by group.
"""
# get the group for each column
cols = self.df.columns
groups = list(map(lambda x: self.data_model.get_group_for_col(self.dtype, x), cols))
sorted_cols = cols.groupby(groups)
ordered_cols = []
# put names first
try:
names = sorted_cols.pop('Names')
except KeyError:
names = []
ordered_cols.extend(list(names))
no_group = []
# remove ungrouped columns
if '' in sorted_cols:
no_group = sorted_cols.pop('')
# flatten list of columns
for k in sorted(sorted_cols):
ordered_cols.extend(sorted(sorted_cols[k]))
# add back in ungrouped columns
ordered_cols.extend(no_group)
# put name first
try:
if self.name in ordered_cols:
ordered_cols.remove(self.name)
ordered_cols[:0] = [self.name]
except AttributeError:
pass
#
self.df = self.df[ordered_cols]
return self.df | python | def sort_dataframe_cols(self):
"""
Sort self.df so that self.name is the first column,
and the rest of the columns are sorted by group.
"""
# get the group for each column
cols = self.df.columns
groups = list(map(lambda x: self.data_model.get_group_for_col(self.dtype, x), cols))
sorted_cols = cols.groupby(groups)
ordered_cols = []
# put names first
try:
names = sorted_cols.pop('Names')
except KeyError:
names = []
ordered_cols.extend(list(names))
no_group = []
# remove ungrouped columns
if '' in sorted_cols:
no_group = sorted_cols.pop('')
# flatten list of columns
for k in sorted(sorted_cols):
ordered_cols.extend(sorted(sorted_cols[k]))
# add back in ungrouped columns
ordered_cols.extend(no_group)
# put name first
try:
if self.name in ordered_cols:
ordered_cols.remove(self.name)
ordered_cols[:0] = [self.name]
except AttributeError:
pass
#
self.df = self.df[ordered_cols]
return self.df | Sort self.df so that self.name is the first column,
and the rest of the columns are sorted by group. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1825-L1859 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.find_filled_col | def find_filled_col(self, col_list):
"""
return the first col_name from the list that is both
a. present in self.df.columns and
b. self.df[col_name] has at least one non-null value
Parameters
----------
self: MagicDataFrame
col_list : iterable
list of columns to check
Returns
----------
col_name : str
"""
for col in col_list:
if col in self.df.columns:
if not all([is_null(val, False) for val in self.df[col]]):
return col | python | def find_filled_col(self, col_list):
"""
return the first col_name from the list that is both
a. present in self.df.columns and
b. self.df[col_name] has at least one non-null value
Parameters
----------
self: MagicDataFrame
col_list : iterable
list of columns to check
Returns
----------
col_name : str
"""
for col in col_list:
if col in self.df.columns:
if not all([is_null(val, False) for val in self.df[col]]):
return col | return the first col_name from the list that is both
a. present in self.df.columns and
b. self.df[col_name] has at least one non-null value
Parameters
----------
self: MagicDataFrame
col_list : iterable
list of columns to check
Returns
----------
col_name : str | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1867-L1886 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.convert_to_pmag_data_list | def convert_to_pmag_data_list(self, lst_or_dict="lst", df=None):
"""
Take MagicDataFrame and turn it into a list of dictionaries.
This will have the same format as reading in a 2.5 file
with pmag.magic_read(), i.e.:
if "lst":
[{"sample": "samp_name", "azimuth": 12, ...}, {...}]
if "dict":
{"samp_name": {"azimuth": 12, ...}, "samp_name2": {...}, ...}
NOTE: "dict" not recommended with 3.0, as one sample can have
many rows, which means that dictionary items can be overwritten
"""
if isinstance(df, type(None)):
df = self.df
# replace np.nan / None with ""
df = df.where(df.notnull(), "")
# string-i-fy everything
df = df.astype(str)
if lst_or_dict == "lst":
return list(df.T.apply(dict))
else:
return {str(i[df.index.name.split(' ')[0]]): dict(i) for i in list(df.T.apply(dict))} | python | def convert_to_pmag_data_list(self, lst_or_dict="lst", df=None):
"""
Take MagicDataFrame and turn it into a list of dictionaries.
This will have the same format as reading in a 2.5 file
with pmag.magic_read(), i.e.:
if "lst":
[{"sample": "samp_name", "azimuth": 12, ...}, {...}]
if "dict":
{"samp_name": {"azimuth": 12, ...}, "samp_name2": {...}, ...}
NOTE: "dict" not recommended with 3.0, as one sample can have
many rows, which means that dictionary items can be overwritten
"""
if isinstance(df, type(None)):
df = self.df
# replace np.nan / None with ""
df = df.where(df.notnull(), "")
# string-i-fy everything
df = df.astype(str)
if lst_or_dict == "lst":
return list(df.T.apply(dict))
else:
return {str(i[df.index.name.split(' ')[0]]): dict(i) for i in list(df.T.apply(dict))} | Take MagicDataFrame and turn it into a list of dictionaries.
This will have the same format as reading in a 2.5 file
with pmag.magic_read(), i.e.:
if "lst":
[{"sample": "samp_name", "azimuth": 12, ...}, {...}]
if "dict":
{"samp_name": {"azimuth": 12, ...}, "samp_name2": {...}, ...}
NOTE: "dict" not recommended with 3.0, as one sample can have
many rows, which means that dictionary items can be overwritten | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1888-L1911 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.get_name | def get_name(self, col_name, df_slice="", index_names=""):
"""
Takes in a column name, and either a DataFrame slice or
a list of index_names to slice self.df using fancy indexing.
Then return the value for that column in the relevant slice.
(Assumes that all values for column will be the same in the
chosen slice, so return the first one.)
"""
# if slice is provided, use it
if any(df_slice):
df_slice = df_slice
# if given index_names, grab a slice using fancy indexing
elif index_names:
df_slice = self.df.loc[index_names]
# otherwise, use the full DataFrame
else:
df_slice = self.df
# if the slice is empty, return ""
if len(df_slice) == 0:
return ""
# if the column name isn't present in the slice, return ""
if col_name not in df_slice.columns:
return ""
# otherwise, return the first value from that column
first_val = list(df_slice[col_name].dropna())
if any(first_val):
return first_val[0]
else:
return "" | python | def get_name(self, col_name, df_slice="", index_names=""):
"""
Takes in a column name, and either a DataFrame slice or
a list of index_names to slice self.df using fancy indexing.
Then return the value for that column in the relevant slice.
(Assumes that all values for column will be the same in the
chosen slice, so return the first one.)
"""
# if slice is provided, use it
if any(df_slice):
df_slice = df_slice
# if given index_names, grab a slice using fancy indexing
elif index_names:
df_slice = self.df.loc[index_names]
# otherwise, use the full DataFrame
else:
df_slice = self.df
# if the slice is empty, return ""
if len(df_slice) == 0:
return ""
# if the column name isn't present in the slice, return ""
if col_name not in df_slice.columns:
return ""
# otherwise, return the first value from that column
first_val = list(df_slice[col_name].dropna())
if any(first_val):
return first_val[0]
else:
return "" | Takes in a column name, and either a DataFrame slice or
a list of index_names to slice self.df using fancy indexing.
Then return the value for that column in the relevant slice.
(Assumes that all values for column will be the same in the
chosen slice, so return the first one.) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1913-L1941 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.get_di_block | def get_di_block(self, df_slice=None, do_index=False,
item_names=None, tilt_corr='100',
excl=None, ignore_tilt=False):
"""
Input either a DataFrame slice
or
do_index=True and a list of index_names.
Optional arguments:
Provide tilt_corr (default 100).
Excl is a list of method codes to exclude.
Output dec/inc from the slice in this format:
[[dec1, inc1], [dec2, inc2], ...].
Not inplace
"""
tilt_corr = int(tilt_corr)
if isinstance(df_slice, str):
if df_slice.lower() == "all":
# use entire DataFrame
df_slice = self.df
elif do_index:
# use fancy indexing (but note this will give duplicates)
df_slice = self.df.loc[item_names]
elif not do_index:
# otherwise use the provided slice
df_slice = df_slice
# once you have the slice, fix up the data
# tilt correction must match
if not ignore_tilt:
if tilt_corr != 0:
df_slice = df_slice[df_slice['dir_tilt_correction'] == tilt_corr]
else:
# if geographic ("0"),
# use records with no tilt_corr and assume geographic
cond1 = df_slice['dir_tilt_correction'] == None
cond2 = df_slice['dir_tilt_correction'] == tilt_corr
df_slice = df_slice[cond1 | cond2]
# exclude data with unwanted codes
if excl:
for ex in excl:
df_slice = self.get_records_for_code(ex, incl=False,
use_slice=True,
sli=df_slice)
df_slice = df_slice[df_slice['dir_inc'].notnull() & df_slice['dir_dec'].notnull()]
# possible add in:
# split out di_block from this study from di_block from other studies (in citations column)
# previously just used "This study", but it is no longer required
#if 'citations' in df_slice.columns:
# df_slice = df_slice[df_slice['citations'].str.contains("This study")]
# convert values into DIblock format
di_block = [[float(row['dir_dec']), float(row['dir_inc'])] for ind, row in df_slice.iterrows()]
return di_block | python | def get_di_block(self, df_slice=None, do_index=False,
item_names=None, tilt_corr='100',
excl=None, ignore_tilt=False):
"""
Input either a DataFrame slice
or
do_index=True and a list of index_names.
Optional arguments:
Provide tilt_corr (default 100).
Excl is a list of method codes to exclude.
Output dec/inc from the slice in this format:
[[dec1, inc1], [dec2, inc2], ...].
Not inplace
"""
tilt_corr = int(tilt_corr)
if isinstance(df_slice, str):
if df_slice.lower() == "all":
# use entire DataFrame
df_slice = self.df
elif do_index:
# use fancy indexing (but note this will give duplicates)
df_slice = self.df.loc[item_names]
elif not do_index:
# otherwise use the provided slice
df_slice = df_slice
# once you have the slice, fix up the data
# tilt correction must match
if not ignore_tilt:
if tilt_corr != 0:
df_slice = df_slice[df_slice['dir_tilt_correction'] == tilt_corr]
else:
# if geographic ("0"),
# use records with no tilt_corr and assume geographic
cond1 = df_slice['dir_tilt_correction'] == None
cond2 = df_slice['dir_tilt_correction'] == tilt_corr
df_slice = df_slice[cond1 | cond2]
# exclude data with unwanted codes
if excl:
for ex in excl:
df_slice = self.get_records_for_code(ex, incl=False,
use_slice=True,
sli=df_slice)
df_slice = df_slice[df_slice['dir_inc'].notnull() & df_slice['dir_dec'].notnull()]
# possible add in:
# split out di_block from this study from di_block from other studies (in citations column)
# previously just used "This study", but it is no longer required
#if 'citations' in df_slice.columns:
# df_slice = df_slice[df_slice['citations'].str.contains("This study")]
# convert values into DIblock format
di_block = [[float(row['dir_dec']), float(row['dir_inc'])] for ind, row in df_slice.iterrows()]
return di_block | Input either a DataFrame slice
or
do_index=True and a list of index_names.
Optional arguments:
Provide tilt_corr (default 100).
Excl is a list of method codes to exclude.
Output dec/inc from the slice in this format:
[[dec1, inc1], [dec2, inc2], ...].
Not inplace | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L1945-L1998 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.get_records_for_code | def get_records_for_code(self, meth_code, incl=True, use_slice=False,
sli=None, strict_match=True):
"""
Use regex to see if meth_code is in the method_codes ":" delimited list.
If incl == True, return all records WITH meth_code.
If incl == False, return all records WITHOUT meth_code.
If strict_match == True, return only records with the exact meth_code.
If strict_match == False, return records that contain the meth_code partial string,
(i.e., "DE-").
Not inplace
"""
# (must use fillna to replace np.nan with False for indexing)
if use_slice:
df = sli.copy()
else:
df = self.df.copy()
# if meth_code not provided, return unchanged dataframe
if not meth_code:
return df
# get regex
if not strict_match:
# grab any record that contains any part of meth_code
cond = df['method_codes'].str.contains(meth_code).fillna(False)
else:
# grab only an exact match
pattern = re.compile('{}(?=:|\s|\Z)'.format(meth_code))
cond = df['method_codes'].str.contains(pattern).fillna(False)
if incl:
# return a copy of records with that method code:
return df[cond]
else:
# return a copy of records without that method code
return df[~cond] | python | def get_records_for_code(self, meth_code, incl=True, use_slice=False,
sli=None, strict_match=True):
"""
Use regex to see if meth_code is in the method_codes ":" delimited list.
If incl == True, return all records WITH meth_code.
If incl == False, return all records WITHOUT meth_code.
If strict_match == True, return only records with the exact meth_code.
If strict_match == False, return records that contain the meth_code partial string,
(i.e., "DE-").
Not inplace
"""
# (must use fillna to replace np.nan with False for indexing)
if use_slice:
df = sli.copy()
else:
df = self.df.copy()
# if meth_code not provided, return unchanged dataframe
if not meth_code:
return df
# get regex
if not strict_match:
# grab any record that contains any part of meth_code
cond = df['method_codes'].str.contains(meth_code).fillna(False)
else:
# grab only an exact match
pattern = re.compile('{}(?=:|\s|\Z)'.format(meth_code))
cond = df['method_codes'].str.contains(pattern).fillna(False)
if incl:
# return a copy of records with that method code:
return df[cond]
else:
# return a copy of records without that method code
return df[~cond] | Use regex to see if meth_code is in the method_codes ":" delimited list.
If incl == True, return all records WITH meth_code.
If incl == False, return all records WITHOUT meth_code.
If strict_match == True, return only records with the exact meth_code.
If strict_match == False, return records that contain the meth_code partial string,
(i.e., "DE-").
Not inplace | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L2001-L2033 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.merge_dfs | def merge_dfs(self, df1):
"""
Description: takes new calculated data and replaces the corresponding data in self.df with the new input data preserving the most important metadata if they are not otherwise saved. Note this does not mutate self.df it simply returns the merged dataframe if you want to replace self.df you'll have to do that yourself.
@param: df1 - first DataFrame whose data will preferentially be used.
"""
if self.df.empty:
return df1
elif df1.empty:
return self.df
#copy to prevent mutation
cdf2 = self.df.copy()
#split data into types and decide which to replace
# if replace_dir_or_int == 'dir' and 'method_codes' in cdf2.columns:
# cdf2 = cdf2[cdf2['method_codes'].notnull()]
# acdf2 = cdf2[cdf2['method_codes'].str.contains('LP-PI')]
# mcdf2 = cdf2[cdf2['method_codes'].str.contains('LP-DIR')]
# elif replace_dir_or_int == 'int' and 'method_codes' in cdf2.columns:
# cdf2 = cdf2[cdf2['method_codes'].notnull()]
# mcdf2 = cdf2[cdf2['method_codes'].str.contains('LP-PI')]
# acdf2 = cdf2[cdf2['method_codes'].str.contains('LP-DIR')]
# else:
# mcdf2 = cdf2
# acdf2 = pd.DataFrame(columns=mcdf2.columns)
#get rid of stupid duplicates
# [mcdf2.drop(cx,inplace=True,axis=1) for cx in mcdf2.columns if cx in df1.columns]
#join the new calculated data with the old data of same type
if self.dtype.endswith('s'): dtype = self.dtype[:-1]
else: dtype = self.dtype
index_name = dtype + "_name"
for df in [df1, cdf2]:
df.index.name = index_name
mdf = df1.join(cdf2, how='outer', rsuffix='_remove', on=index_name)
def keep_non_null_vals(column):
extra_column = column + "_remove"
if column in mdf.columns and extra_column in mdf.columns:
mdf[column] = np.where(mdf[column].apply(lambda x: not_null(x, False)), mdf[column], mdf[extra_column])
# merge values in the following columns
# e.g., combine info from specimen + specimen_remove into specimen column
for col in ['specimen', 'sample', 'site', 'location', 'lat', 'lon']:
keep_non_null_vals(col)
#drop duplicate columns if they were created
[mdf.drop(col,inplace=True,axis=1) for col in mdf.columns if col.endswith("_remove")]
#duplicates rows for some freaking reason
mdf.drop_duplicates(inplace=True,subset=[col for col in mdf.columns if col != 'description'])
#merge the data of the other type with the new data
# mdf = mdf.merge(acdf2, how='outer')
if dtype in mdf.columns:
#fix freaking indecies because pandas
mdf = mdf.set_index(dtype)
#really? I wanted the index changed not a column deleted?!?
mdf[dtype] = mdf.index
mdf.index.name = index_name
mdf.sort_index(inplace=True)
return mdf | python | def merge_dfs(self, df1):
"""
Description: takes new calculated data and replaces the corresponding data in self.df with the new input data preserving the most important metadata if they are not otherwise saved. Note this does not mutate self.df it simply returns the merged dataframe if you want to replace self.df you'll have to do that yourself.
@param: df1 - first DataFrame whose data will preferentially be used.
"""
if self.df.empty:
return df1
elif df1.empty:
return self.df
#copy to prevent mutation
cdf2 = self.df.copy()
#split data into types and decide which to replace
# if replace_dir_or_int == 'dir' and 'method_codes' in cdf2.columns:
# cdf2 = cdf2[cdf2['method_codes'].notnull()]
# acdf2 = cdf2[cdf2['method_codes'].str.contains('LP-PI')]
# mcdf2 = cdf2[cdf2['method_codes'].str.contains('LP-DIR')]
# elif replace_dir_or_int == 'int' and 'method_codes' in cdf2.columns:
# cdf2 = cdf2[cdf2['method_codes'].notnull()]
# mcdf2 = cdf2[cdf2['method_codes'].str.contains('LP-PI')]
# acdf2 = cdf2[cdf2['method_codes'].str.contains('LP-DIR')]
# else:
# mcdf2 = cdf2
# acdf2 = pd.DataFrame(columns=mcdf2.columns)
#get rid of stupid duplicates
# [mcdf2.drop(cx,inplace=True,axis=1) for cx in mcdf2.columns if cx in df1.columns]
#join the new calculated data with the old data of same type
if self.dtype.endswith('s'): dtype = self.dtype[:-1]
else: dtype = self.dtype
index_name = dtype + "_name"
for df in [df1, cdf2]:
df.index.name = index_name
mdf = df1.join(cdf2, how='outer', rsuffix='_remove', on=index_name)
def keep_non_null_vals(column):
extra_column = column + "_remove"
if column in mdf.columns and extra_column in mdf.columns:
mdf[column] = np.where(mdf[column].apply(lambda x: not_null(x, False)), mdf[column], mdf[extra_column])
# merge values in the following columns
# e.g., combine info from specimen + specimen_remove into specimen column
for col in ['specimen', 'sample', 'site', 'location', 'lat', 'lon']:
keep_non_null_vals(col)
#drop duplicate columns if they were created
[mdf.drop(col,inplace=True,axis=1) for col in mdf.columns if col.endswith("_remove")]
#duplicates rows for some freaking reason
mdf.drop_duplicates(inplace=True,subset=[col for col in mdf.columns if col != 'description'])
#merge the data of the other type with the new data
# mdf = mdf.merge(acdf2, how='outer')
if dtype in mdf.columns:
#fix freaking indecies because pandas
mdf = mdf.set_index(dtype)
#really? I wanted the index changed not a column deleted?!?
mdf[dtype] = mdf.index
mdf.index.name = index_name
mdf.sort_index(inplace=True)
return mdf | Description: takes new calculated data and replaces the corresponding data in self.df with the new input data preserving the most important metadata if they are not otherwise saved. Note this does not mutate self.df it simply returns the merged dataframe if you want to replace self.df you'll have to do that yourself.
@param: df1 - first DataFrame whose data will preferentially be used. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L2038-L2097 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.write_magic_file | def write_magic_file(self, custom_name=None, dir_path=".",
append=False, multi_type=False, df=None):
"""
Write self.df out to tab-delimited file.
By default will use standard MagIC filenames (specimens.txt, etc.),
or you can provide a custom_name to write to instead.
By default will write to custom_name if custom_name is a full path,
or will write to dir_path + custom_name if custom_name
is not a full path.
Parameters
----------
self : MagIC DataFrame
custom_name : str
custom file name
dir_path : str
dir_path (used if custom_name is not a full path), default "."
append : bool
append to existing file, default False
multi_type : bool
for creating upload file
Return
--------
fname : str
output file name
"""
# don't let custom name start with "./"
if custom_name:
if custom_name.startswith('.'):
custom_name = os.path.split(custom_name)[1]
# put columns in logical order (by group)
self.sort_dataframe_cols()
# if indexing column was put in, remove it
if "num" in self.df.columns:
self.df = self.df.drop("num", axis=1)
#
# make sure name is a string
name = self.get_singular_and_plural_dtype(self.dtype)[0]
if name in self.df.columns:
self.df[name] = self.df[name].astype(str)
#
if df is None:
df = self.df
# get full file path
dir_path = os.path.realpath(dir_path)
if custom_name:
fname = pmag.resolve_file_name(custom_name, dir_path) # os.path.join(dir_path, custom_name)
elif self.magic_file:
fname = pmag.resolve_file_name(self.magic_file, dir_path)
else:
fname = os.path.join(dir_path, self.dtype + ".txt")
# see if there's any data
if not len(df):
print('-W- No data to write to {}'.format(fname))
return False
# add to existing file
if append:
print('-I- appending {} data to {}'.format(self.dtype, fname))
mode = "a"
# overwrite existing file
elif os.path.exists(fname):
print('-I- overwriting {}'.format(fname))
mode = "w"
# or create new file
else:
print('-I- writing {} records to {}'.format(self.dtype, fname))
mode = "w"
f = open(fname, mode)
if append:
header = False
if multi_type:
header = True
f.write('tab\t{}\n'.format(self.dtype))
f.flush()
df.to_csv(f, sep="\t", header=header, index=False, mode='a')
else:
f.write('tab\t{}\n'.format(self.dtype))
f.flush()
df.to_csv(f, sep="\t", header=True, index=False, mode='a')
print('-I- {} records written to {} file'.format(len(df), self.dtype))
f.close()
return fname | python | def write_magic_file(self, custom_name=None, dir_path=".",
append=False, multi_type=False, df=None):
"""
Write self.df out to tab-delimited file.
By default will use standard MagIC filenames (specimens.txt, etc.),
or you can provide a custom_name to write to instead.
By default will write to custom_name if custom_name is a full path,
or will write to dir_path + custom_name if custom_name
is not a full path.
Parameters
----------
self : MagIC DataFrame
custom_name : str
custom file name
dir_path : str
dir_path (used if custom_name is not a full path), default "."
append : bool
append to existing file, default False
multi_type : bool
for creating upload file
Return
--------
fname : str
output file name
"""
# don't let custom name start with "./"
if custom_name:
if custom_name.startswith('.'):
custom_name = os.path.split(custom_name)[1]
# put columns in logical order (by group)
self.sort_dataframe_cols()
# if indexing column was put in, remove it
if "num" in self.df.columns:
self.df = self.df.drop("num", axis=1)
#
# make sure name is a string
name = self.get_singular_and_plural_dtype(self.dtype)[0]
if name in self.df.columns:
self.df[name] = self.df[name].astype(str)
#
if df is None:
df = self.df
# get full file path
dir_path = os.path.realpath(dir_path)
if custom_name:
fname = pmag.resolve_file_name(custom_name, dir_path) # os.path.join(dir_path, custom_name)
elif self.magic_file:
fname = pmag.resolve_file_name(self.magic_file, dir_path)
else:
fname = os.path.join(dir_path, self.dtype + ".txt")
# see if there's any data
if not len(df):
print('-W- No data to write to {}'.format(fname))
return False
# add to existing file
if append:
print('-I- appending {} data to {}'.format(self.dtype, fname))
mode = "a"
# overwrite existing file
elif os.path.exists(fname):
print('-I- overwriting {}'.format(fname))
mode = "w"
# or create new file
else:
print('-I- writing {} records to {}'.format(self.dtype, fname))
mode = "w"
f = open(fname, mode)
if append:
header = False
if multi_type:
header = True
f.write('tab\t{}\n'.format(self.dtype))
f.flush()
df.to_csv(f, sep="\t", header=header, index=False, mode='a')
else:
f.write('tab\t{}\n'.format(self.dtype))
f.flush()
df.to_csv(f, sep="\t", header=True, index=False, mode='a')
print('-I- {} records written to {} file'.format(len(df), self.dtype))
f.close()
return fname | Write self.df out to tab-delimited file.
By default will use standard MagIC filenames (specimens.txt, etc.),
or you can provide a custom_name to write to instead.
By default will write to custom_name if custom_name is a full path,
or will write to dir_path + custom_name if custom_name
is not a full path.
Parameters
----------
self : MagIC DataFrame
custom_name : str
custom file name
dir_path : str
dir_path (used if custom_name is not a full path), default "."
append : bool
append to existing file, default False
multi_type : bool
for creating upload file
Return
--------
fname : str
output file name | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L2102-L2184 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.get_non_magic_cols | def get_non_magic_cols(self):
"""
Find all columns in self.df that are not real MagIC 3 columns.
Returns
--------
unrecognized_cols : list
"""
table_dm = self.data_model.dm[self.dtype]
approved_cols = table_dm.index
unrecognized_cols = (set(self.df.columns) - set(approved_cols))
return unrecognized_cols | python | def get_non_magic_cols(self):
"""
Find all columns in self.df that are not real MagIC 3 columns.
Returns
--------
unrecognized_cols : list
"""
table_dm = self.data_model.dm[self.dtype]
approved_cols = table_dm.index
unrecognized_cols = (set(self.df.columns) - set(approved_cols))
return unrecognized_cols | Find all columns in self.df that are not real MagIC 3 columns.
Returns
--------
unrecognized_cols : list | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L2189-L2200 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.get_first_non_null_value | def get_first_non_null_value(self, ind_name, col_name):
"""
For a given index and column, find the first non-null value.
Parameters
----------
self : MagicDataFrame
ind_name : str
index name for indexing
col_name : str
column name for indexing
Returns
---------
single value of str, float, or int
"""
short_df = self.df.loc[ind_name, col_name]
mask = pd.notnull(short_df)
print(short_df[mask])
try:
val = short_df[mask].unique()[0]
except IndexError:
val = None
return val | python | def get_first_non_null_value(self, ind_name, col_name):
"""
For a given index and column, find the first non-null value.
Parameters
----------
self : MagicDataFrame
ind_name : str
index name for indexing
col_name : str
column name for indexing
Returns
---------
single value of str, float, or int
"""
short_df = self.df.loc[ind_name, col_name]
mask = pd.notnull(short_df)
print(short_df[mask])
try:
val = short_df[mask].unique()[0]
except IndexError:
val = None
return val | For a given index and column, find the first non-null value.
Parameters
----------
self : MagicDataFrame
ind_name : str
index name for indexing
col_name : str
column name for indexing
Returns
---------
single value of str, float, or int | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L2203-L2226 |
PmagPy/PmagPy | pmagpy/contribution_builder.py | MagicDataFrame.get_singular_and_plural_dtype | def get_singular_and_plural_dtype(self, dtype):
"""
Parameters
----------
dtype : str
MagIC table type (specimens, samples, contribution, etc.)
Returns
---------
name : str
singular name for MagIC table ('specimen' for specimens table, etc.)
dtype : str
plural dtype for MagIC table ('specimens' for specimens table, etc.)
"""
dtype = dtype.strip()
if dtype.endswith('s'):
return dtype[:-1], dtype
elif dtype == 'criteria':
return 'table_column', 'criteria'
elif dtype == 'contribution':
return 'doi', 'contribution' | python | def get_singular_and_plural_dtype(self, dtype):
"""
Parameters
----------
dtype : str
MagIC table type (specimens, samples, contribution, etc.)
Returns
---------
name : str
singular name for MagIC table ('specimen' for specimens table, etc.)
dtype : str
plural dtype for MagIC table ('specimens' for specimens table, etc.)
"""
dtype = dtype.strip()
if dtype.endswith('s'):
return dtype[:-1], dtype
elif dtype == 'criteria':
return 'table_column', 'criteria'
elif dtype == 'contribution':
return 'doi', 'contribution' | Parameters
----------
dtype : str
MagIC table type (specimens, samples, contribution, etc.)
Returns
---------
name : str
singular name for MagIC table ('specimen' for specimens table, etc.)
dtype : str
plural dtype for MagIC table ('specimens' for specimens table, etc.) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/contribution_builder.py#L2229-L2249 |
PmagPy/PmagPy | programs/chi_magic2.py | main | def main():
"""
NAME
chi_magic.py
DESCRIPTION
plots magnetic susceptibility as a function of frequency and temperature and AC field
SYNTAX
chi_magic.py [command line options]
OPTIONS
-h prints help message and quits
-i allows interactive setting of FILE and temperature step
-f FILE, specify magic_measurements format file
-T IND, specify temperature step to plot
-e EXP, specify experiment name to plot
-fmt [svg,jpg,png,pdf] set figure format [default is svg]
-sav save figure and quit
DEFAULTS
FILE: magic_measurements.txt
IND: first
SPEC: step through one by one
"""
cont, FTinit, BTinit, k = "", 0, 0, 0
meas_file = "magic_measurements.txt"
spec = ""
Tind, cont = 0, ""
EXP = ""
fmt = 'svg' # default image type for saving
plot = 0
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-i' in sys.argv:
fname = input(
"Input magic_measurements file name? [magic_measurements.txt] ")
if fname != "":
meas_file = fname
if '-e' in sys.argv:
ind = sys.argv.index('-e')
EXP = sys.argv[ind+1]
if '-f' in sys.argv:
ind = sys.argv.index('-f')
meas_file = sys.argv[ind+1]
if '-T' in sys.argv:
ind = sys.argv.index('-T')
Tind = int(sys.argv[ind+1])
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt = sys.argv[ind+1]
if '-sav' in sys.argv:
plot = 1
#
meas_data, file_type = pmag.magic_read(meas_file)
#
# get list of unique experiment names
#
# initialize some variables (a continuation flag, plot initialization flags and the experiment counter
experiment_names = []
for rec in meas_data:
if rec['magic_experiment_name'] not in experiment_names:
experiment_names.append(rec['magic_experiment_name'])
#
# hunt through by experiment name
if EXP != "":
try:
k = experiment_names.index(EXP)
except:
print("Bad experiment name")
sys.exit()
while k < len(experiment_names):
e = experiment_names[k]
if EXP == "":
print(e, k+1, 'out of ', len(experiment_names))
#
# initialize lists of data, susceptibility, temperature, frequency and field
X, T, F, B = [], [], [], []
for rec in meas_data:
methcodes = rec['magic_method_codes']
meths = methcodes.strip().split(':')
if rec['magic_experiment_name'] == e and "LP-X" in meths: # looking for chi measurement
if 'measurement_temp' not in list(rec.keys()):
rec['measurement_temp'] = '300' # set defaults
if 'measurement_freq' not in list(rec.keys()):
rec['measurement_freq'] = '0' # set defaults
if 'measurement_lab_field_ac' not in list(rec.keys()):
rec['measurement_lab_field_ac'] = '0' # set default
if 'measurement_x' in rec.keys():
# backward compatibility
X.append(float(rec['measurement_x']))
else:
# data model 2.5
X.append(float(rec['measurement_chi_volume']))
T.append(float(rec['measurement_temp']))
F.append(float(rec['measurement_freq']))
B.append(float(rec['measurement_lab_field_ac']))
#
# get unique list of Ts,Fs, and Bs
#
Ts, Fs, Bs = [], [], []
for k in range(len(X)): # hunt through all the measurements
if T[k] not in Ts:
Ts.append(T[k]) # append if not in list
if F[k] not in Fs:
Fs.append(F[k])
if B[k] not in Bs:
Bs.append(B[k])
Ts.sort() # sort list of temperatures, frequencies and fields
Fs.sort()
Bs.sort()
if '-x' in sys.argv:
k = len(experiment_names)+1 # just plot the one
else:
k += 1 # increment experiment number
#
# plot chi versus T and F holding B constant
#
plotnum = 1 # initialize plot number to 1
if len(X) > 2: # if there are any data to plot, continue
b = Bs[-1] # keeping field constant and at maximum
XTF = [] # initialize list of chi versus Temp and freq
for f in Fs: # step through frequencies sequentially
XT = [] # initialize list of chi versus temp
for kk in range(len(X)): # hunt through all the data
if F[kk] == f and B[kk] == b: # select data with given freq and field
XT.append([X[kk], T[kk]]) # append to list
XTF.append(XT) # append list to list of frequencies
if len(XT) > 1: # if there are any temperature dependent data
pmagplotlib.plot_init(plotnum, 5, 5) # initialize plot
# call the plotting function
pmagplotlib.plot_xtf(plotnum, XTF, Fs, e, b)
if plot == 0:
pmagplotlib.draw_figs({'fig': plotnum}) # make it visible
plotnum += 1 # increment plot number
f = Fs[0] # set frequency to minimum
XTB = [] # initialize list if chi versus Temp and field
for b in Bs: # step through field values
XT = [] # initial chi versus temp list for this field
for kk in range(len(X)): # hunt through all the data
if F[kk] == f and B[kk] == b: # select data with given freq and field
XT.append([X[kk], T[kk]]) # append to list
XTB.append(XT)
if len(XT) > 1: # if there are any temperature dependent data
pmagplotlib.plot_init(plotnum, 5, 5) # set up plot
# call the plotting function
pmagplotlib.plot_xtb(plotnum, XTB, Bs, e, f)
if plot == 0:
pmagplotlib.draw_figs({'fig': plotnum})
plotnum += 1 # increment plot number
if '-i' in sys.argv:
for ind in range(len(Ts)): # print list of temperatures available
print(ind, int(Ts[ind]))
cont = input(
"Enter index of desired temperature step, s[a]ve plots, [return] to quit ")
if cont == 'a':
files = {}
PLTS = {}
for p in range(1, plotnum):
key = str(p)
files[key] = e+'_'+key+'.'+fmt
PLTS[key] = key
pmagplotlib.save_plots(PLTS, files)
cont = input(
"Enter index of desired temperature step, s[a]ve plots, [return] to quit ")
if cont == "":
cont = 'q'
while cont != "q":
if '-i' in sys.argv:
Tind = int(cont) # set temperature index
b = Bs[-1] # set field to max available
XF = [] # initial chi versus frequency list
for kk in range(len(X)): # hunt through the data
if T[kk] == Ts[Tind] and B[kk] == b: # if temperature and field match,
XF.append([X[kk], F[kk]]) # append the data
if len(XF) > 1: # if there are any data to plot
if FTinit == 0: # if not already initialized, initialize plot
# print 'initializing ',plotnum
pmagplotlib.plot_init(plotnum, 5, 5)
FTinit = 1
XFplot = plotnum
plotnum += 1 # increment plotnum
pmagplotlib.plot_xft(XFplot, XF, Ts[Tind], e, b)
if plot == 0:
pmagplotlib.draw_figs({'fig': plotnum})
else:
print(
'\n *** Skipping susceptibitily-frequency plot as a function of temperature *** \n')
f = Fs[0] # set frequency to minimum available
XB = [] # initialize chi versus field list
for kk in range(len(X)): # hunt through the data
# if temperature and field match those desired
if T[kk] == Ts[Tind] and F[kk] == f:
XB.append([X[kk], B[kk]]) # append the data to list
if len(XB) > 4: # if there are any data
if BTinit == 0: # if plot not already initialized
pmagplotlib.plot_init(plotnum, 5, 5) # do it
BTinit = 1
# and call plotting function
pmagplotlib.plot_xbt(plotnum, XB, Ts[Tind], e, f)
if plot == 0:
pmagplotlib.draw_figs({'fig': plotnum})
else:
print(
'Skipping susceptibitily - AC field plot as a function of temperature')
files = {}
PLTS = {}
for p in range(1, plotnum):
key = str(p)
files[key] = e+'_'+key+'.'+fmt
PLTS[key] = p
if '-i' in sys.argv:
# just in case you forgot, print out a new list of temperatures
for ind in range(len(Ts)):
print(ind, int(Ts[ind]))
# ask for new temp
cont = input(
"Enter index of next temperature step, s[a]ve plots, [return] to quit ")
if cont == "":
sys.exit()
if cont == 'a':
pmagplotlib.save_plots(PLTS, files)
cont = input(
"Enter index of desired temperature step, s[a]ve plots, [return] to quit ")
if cont == "":
sys.exit()
elif plot == 0:
ans = input(
"enter s[a]ve to save files, [return] to quit ")
if ans == 'a':
pmagplotlib.save_plots(PLTS, files)
sys.exit()
else:
sys.exit()
else:
pmagplotlib.save_plots(PLTS, files)
sys.exit() | python | def main():
"""
NAME
chi_magic.py
DESCRIPTION
plots magnetic susceptibility as a function of frequency and temperature and AC field
SYNTAX
chi_magic.py [command line options]
OPTIONS
-h prints help message and quits
-i allows interactive setting of FILE and temperature step
-f FILE, specify magic_measurements format file
-T IND, specify temperature step to plot
-e EXP, specify experiment name to plot
-fmt [svg,jpg,png,pdf] set figure format [default is svg]
-sav save figure and quit
DEFAULTS
FILE: magic_measurements.txt
IND: first
SPEC: step through one by one
"""
cont, FTinit, BTinit, k = "", 0, 0, 0
meas_file = "magic_measurements.txt"
spec = ""
Tind, cont = 0, ""
EXP = ""
fmt = 'svg' # default image type for saving
plot = 0
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-i' in sys.argv:
fname = input(
"Input magic_measurements file name? [magic_measurements.txt] ")
if fname != "":
meas_file = fname
if '-e' in sys.argv:
ind = sys.argv.index('-e')
EXP = sys.argv[ind+1]
if '-f' in sys.argv:
ind = sys.argv.index('-f')
meas_file = sys.argv[ind+1]
if '-T' in sys.argv:
ind = sys.argv.index('-T')
Tind = int(sys.argv[ind+1])
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt = sys.argv[ind+1]
if '-sav' in sys.argv:
plot = 1
#
meas_data, file_type = pmag.magic_read(meas_file)
#
# get list of unique experiment names
#
# initialize some variables (a continuation flag, plot initialization flags and the experiment counter
experiment_names = []
for rec in meas_data:
if rec['magic_experiment_name'] not in experiment_names:
experiment_names.append(rec['magic_experiment_name'])
#
# hunt through by experiment name
if EXP != "":
try:
k = experiment_names.index(EXP)
except:
print("Bad experiment name")
sys.exit()
while k < len(experiment_names):
e = experiment_names[k]
if EXP == "":
print(e, k+1, 'out of ', len(experiment_names))
#
# initialize lists of data, susceptibility, temperature, frequency and field
X, T, F, B = [], [], [], []
for rec in meas_data:
methcodes = rec['magic_method_codes']
meths = methcodes.strip().split(':')
if rec['magic_experiment_name'] == e and "LP-X" in meths: # looking for chi measurement
if 'measurement_temp' not in list(rec.keys()):
rec['measurement_temp'] = '300' # set defaults
if 'measurement_freq' not in list(rec.keys()):
rec['measurement_freq'] = '0' # set defaults
if 'measurement_lab_field_ac' not in list(rec.keys()):
rec['measurement_lab_field_ac'] = '0' # set default
if 'measurement_x' in rec.keys():
# backward compatibility
X.append(float(rec['measurement_x']))
else:
# data model 2.5
X.append(float(rec['measurement_chi_volume']))
T.append(float(rec['measurement_temp']))
F.append(float(rec['measurement_freq']))
B.append(float(rec['measurement_lab_field_ac']))
#
# get unique list of Ts,Fs, and Bs
#
Ts, Fs, Bs = [], [], []
for k in range(len(X)): # hunt through all the measurements
if T[k] not in Ts:
Ts.append(T[k]) # append if not in list
if F[k] not in Fs:
Fs.append(F[k])
if B[k] not in Bs:
Bs.append(B[k])
Ts.sort() # sort list of temperatures, frequencies and fields
Fs.sort()
Bs.sort()
if '-x' in sys.argv:
k = len(experiment_names)+1 # just plot the one
else:
k += 1 # increment experiment number
#
# plot chi versus T and F holding B constant
#
plotnum = 1 # initialize plot number to 1
if len(X) > 2: # if there are any data to plot, continue
b = Bs[-1] # keeping field constant and at maximum
XTF = [] # initialize list of chi versus Temp and freq
for f in Fs: # step through frequencies sequentially
XT = [] # initialize list of chi versus temp
for kk in range(len(X)): # hunt through all the data
if F[kk] == f and B[kk] == b: # select data with given freq and field
XT.append([X[kk], T[kk]]) # append to list
XTF.append(XT) # append list to list of frequencies
if len(XT) > 1: # if there are any temperature dependent data
pmagplotlib.plot_init(plotnum, 5, 5) # initialize plot
# call the plotting function
pmagplotlib.plot_xtf(plotnum, XTF, Fs, e, b)
if plot == 0:
pmagplotlib.draw_figs({'fig': plotnum}) # make it visible
plotnum += 1 # increment plot number
f = Fs[0] # set frequency to minimum
XTB = [] # initialize list if chi versus Temp and field
for b in Bs: # step through field values
XT = [] # initial chi versus temp list for this field
for kk in range(len(X)): # hunt through all the data
if F[kk] == f and B[kk] == b: # select data with given freq and field
XT.append([X[kk], T[kk]]) # append to list
XTB.append(XT)
if len(XT) > 1: # if there are any temperature dependent data
pmagplotlib.plot_init(plotnum, 5, 5) # set up plot
# call the plotting function
pmagplotlib.plot_xtb(plotnum, XTB, Bs, e, f)
if plot == 0:
pmagplotlib.draw_figs({'fig': plotnum})
plotnum += 1 # increment plot number
if '-i' in sys.argv:
for ind in range(len(Ts)): # print list of temperatures available
print(ind, int(Ts[ind]))
cont = input(
"Enter index of desired temperature step, s[a]ve plots, [return] to quit ")
if cont == 'a':
files = {}
PLTS = {}
for p in range(1, plotnum):
key = str(p)
files[key] = e+'_'+key+'.'+fmt
PLTS[key] = key
pmagplotlib.save_plots(PLTS, files)
cont = input(
"Enter index of desired temperature step, s[a]ve plots, [return] to quit ")
if cont == "":
cont = 'q'
while cont != "q":
if '-i' in sys.argv:
Tind = int(cont) # set temperature index
b = Bs[-1] # set field to max available
XF = [] # initial chi versus frequency list
for kk in range(len(X)): # hunt through the data
if T[kk] == Ts[Tind] and B[kk] == b: # if temperature and field match,
XF.append([X[kk], F[kk]]) # append the data
if len(XF) > 1: # if there are any data to plot
if FTinit == 0: # if not already initialized, initialize plot
# print 'initializing ',plotnum
pmagplotlib.plot_init(plotnum, 5, 5)
FTinit = 1
XFplot = plotnum
plotnum += 1 # increment plotnum
pmagplotlib.plot_xft(XFplot, XF, Ts[Tind], e, b)
if plot == 0:
pmagplotlib.draw_figs({'fig': plotnum})
else:
print(
'\n *** Skipping susceptibitily-frequency plot as a function of temperature *** \n')
f = Fs[0] # set frequency to minimum available
XB = [] # initialize chi versus field list
for kk in range(len(X)): # hunt through the data
# if temperature and field match those desired
if T[kk] == Ts[Tind] and F[kk] == f:
XB.append([X[kk], B[kk]]) # append the data to list
if len(XB) > 4: # if there are any data
if BTinit == 0: # if plot not already initialized
pmagplotlib.plot_init(plotnum, 5, 5) # do it
BTinit = 1
# and call plotting function
pmagplotlib.plot_xbt(plotnum, XB, Ts[Tind], e, f)
if plot == 0:
pmagplotlib.draw_figs({'fig': plotnum})
else:
print(
'Skipping susceptibitily - AC field plot as a function of temperature')
files = {}
PLTS = {}
for p in range(1, plotnum):
key = str(p)
files[key] = e+'_'+key+'.'+fmt
PLTS[key] = p
if '-i' in sys.argv:
# just in case you forgot, print out a new list of temperatures
for ind in range(len(Ts)):
print(ind, int(Ts[ind]))
# ask for new temp
cont = input(
"Enter index of next temperature step, s[a]ve plots, [return] to quit ")
if cont == "":
sys.exit()
if cont == 'a':
pmagplotlib.save_plots(PLTS, files)
cont = input(
"Enter index of desired temperature step, s[a]ve plots, [return] to quit ")
if cont == "":
sys.exit()
elif plot == 0:
ans = input(
"enter s[a]ve to save files, [return] to quit ")
if ans == 'a':
pmagplotlib.save_plots(PLTS, files)
sys.exit()
else:
sys.exit()
else:
pmagplotlib.save_plots(PLTS, files)
sys.exit() | NAME
chi_magic.py
DESCRIPTION
plots magnetic susceptibility as a function of frequency and temperature and AC field
SYNTAX
chi_magic.py [command line options]
OPTIONS
-h prints help message and quits
-i allows interactive setting of FILE and temperature step
-f FILE, specify magic_measurements format file
-T IND, specify temperature step to plot
-e EXP, specify experiment name to plot
-fmt [svg,jpg,png,pdf] set figure format [default is svg]
-sav save figure and quit
DEFAULTS
FILE: magic_measurements.txt
IND: first
SPEC: step through one by one | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/chi_magic2.py#L10-L247 |
PmagPy/PmagPy | programs/hysteresis_magic.py | main | def main():
"""
NAME
hysteresis_magic.py
DESCRIPTION
calculates hystereis parameters and saves them in 3.0 specimen format file
makes plots if option selected
SYNTAX
hysteresis_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f: specify input file, default is agm_measurements.txt
-F: specify specimens.txt output file
-WD: directory to output files to (default : current directory)
Note: if using Windows, all figures will output to current directory
-ID: directory to read files from (default : same as -WD)
-P: do not make the plots
-spc SPEC: specify specimen name to plot and quit
-sav save all plots and quit
-fmt [png,svg,eps,jpg]
"""
args = sys.argv
fmt = pmag.get_named_arg('-fmt', 'svg')
output_dir_path = pmag.get_named_arg('-WD', '.')
input_dir_path = pmag.get_named_arg('-ID', "")
if "-h" in args:
print(main.__doc__)
sys.exit()
meas_file = pmag.get_named_arg('-f', 'measurements.txt')
spec_file = pmag.get_named_arg('-F', 'specimens.txt')
make_plots = True
save_plots = False
if '-P' in args:
make_plots = False
if '-sav' in args:
save_plots = True
pltspec = pmag.get_named_arg('-spc', 0)
ipmag.hysteresis_magic(output_dir_path, input_dir_path, spec_file, meas_file,
fmt, save_plots, make_plots, pltspec) | python | def main():
"""
NAME
hysteresis_magic.py
DESCRIPTION
calculates hystereis parameters and saves them in 3.0 specimen format file
makes plots if option selected
SYNTAX
hysteresis_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f: specify input file, default is agm_measurements.txt
-F: specify specimens.txt output file
-WD: directory to output files to (default : current directory)
Note: if using Windows, all figures will output to current directory
-ID: directory to read files from (default : same as -WD)
-P: do not make the plots
-spc SPEC: specify specimen name to plot and quit
-sav save all plots and quit
-fmt [png,svg,eps,jpg]
"""
args = sys.argv
fmt = pmag.get_named_arg('-fmt', 'svg')
output_dir_path = pmag.get_named_arg('-WD', '.')
input_dir_path = pmag.get_named_arg('-ID', "")
if "-h" in args:
print(main.__doc__)
sys.exit()
meas_file = pmag.get_named_arg('-f', 'measurements.txt')
spec_file = pmag.get_named_arg('-F', 'specimens.txt')
make_plots = True
save_plots = False
if '-P' in args:
make_plots = False
if '-sav' in args:
save_plots = True
pltspec = pmag.get_named_arg('-spc', 0)
ipmag.hysteresis_magic(output_dir_path, input_dir_path, spec_file, meas_file,
fmt, save_plots, make_plots, pltspec) | NAME
hysteresis_magic.py
DESCRIPTION
calculates hystereis parameters and saves them in 3.0 specimen format file
makes plots if option selected
SYNTAX
hysteresis_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f: specify input file, default is agm_measurements.txt
-F: specify specimens.txt output file
-WD: directory to output files to (default : current directory)
Note: if using Windows, all figures will output to current directory
-ID: directory to read files from (default : same as -WD)
-P: do not make the plots
-spc SPEC: specify specimen name to plot and quit
-sav save all plots and quit
-fmt [png,svg,eps,jpg] | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/hysteresis_magic.py#L12-L53 |
PmagPy/PmagPy | pmagpy/find_pmag_dir.py | get_data_files_dir | def get_data_files_dir():
"""
Find directory with data_files (sys.prefix or local PmagPy/data_files)
and return the path.
"""
if 'data_files' in os.listdir(sys.prefix):
return os.path.join(sys.prefix, 'data_files')
else:
return os.path.join(get_pmag_dir(), 'data_files') | python | def get_data_files_dir():
"""
Find directory with data_files (sys.prefix or local PmagPy/data_files)
and return the path.
"""
if 'data_files' in os.listdir(sys.prefix):
return os.path.join(sys.prefix, 'data_files')
else:
return os.path.join(get_pmag_dir(), 'data_files') | Find directory with data_files (sys.prefix or local PmagPy/data_files)
and return the path. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/find_pmag_dir.py#L13-L21 |
PmagPy/PmagPy | pmagpy/find_pmag_dir.py | get_pmag_dir | def get_pmag_dir():
"""
Returns directory in which PmagPy is installed
"""
# this is correct for py2exe (DEPRECATED)
#win_frozen = is_frozen()
#if win_frozen:
# path = os.path.abspath(unicode(sys.executable, sys.getfilesystemencoding()))
# path = os.path.split(path)[0]
# return path
# this is correct for py2app
try:
return os.environ['RESOURCEPATH']
# this works for everything else
except KeyError: pass
# new way:
# if we're in the local PmagPy directory:
if os.path.isfile(os.path.join(os.getcwd(), 'pmagpy', 'pmag.py')):
lib_dir = os.path.join(os.getcwd(), 'pmagpy')
# if we're anywhere else:
elif getattr(sys, 'frozen', False): #pyinstaller datafile directory
return sys._MEIPASS
else:
# horrible, hack-y fix
# (prevents namespace issue between
# local github PmagPy and pip-installed PmagPy).
# must reload because we may have
# changed directories since importing
temp = os.getcwd()
os.chdir('..')
reload(locator)
lib_file = resource_filename('locator', 'resource.py')
full_dir = os.path.split(lib_file)[0]
ind = full_dir.rfind(os.sep)
lib_dir = full_dir[:ind+1]
lib_dir = os.path.realpath(os.path.join(lib_dir, 'pmagpy'))
os.chdir(temp)
# end fix
# old way:
#lib_dir = os.path.dirname(os.path.realpath(__file__))
if not os.path.isfile(os.path.join(lib_dir, 'pmag.py')):
lib_dir = os.getcwd()
fname = os.path.join(lib_dir, 'pmag.py')
if not os.path.isfile(fname):
pmag_dir = os.path.split(os.path.split(__file__)[0])[0]
if os.path.isfile(os.path.join(pmag_dir,'pmagpy','pmag.py')):
return pmag_dir
else:
print('-W- Can\'t find the data model! Make sure you have installed pmagpy using pip: "pip install pmagpy --upgrade"')
return '.'
# strip "/" or "\" and "pmagpy" to return proper PmagPy directory
if lib_dir.endswith(os.sep):
lib_dir = lib_dir[:-1]
if lib_dir.endswith('pmagpy'):
pmag_dir = os.path.split(lib_dir)[0]
else:
pmag_dir = lib_dir
return pmag_dir | python | def get_pmag_dir():
"""
Returns directory in which PmagPy is installed
"""
# this is correct for py2exe (DEPRECATED)
#win_frozen = is_frozen()
#if win_frozen:
# path = os.path.abspath(unicode(sys.executable, sys.getfilesystemencoding()))
# path = os.path.split(path)[0]
# return path
# this is correct for py2app
try:
return os.environ['RESOURCEPATH']
# this works for everything else
except KeyError: pass
# new way:
# if we're in the local PmagPy directory:
if os.path.isfile(os.path.join(os.getcwd(), 'pmagpy', 'pmag.py')):
lib_dir = os.path.join(os.getcwd(), 'pmagpy')
# if we're anywhere else:
elif getattr(sys, 'frozen', False): #pyinstaller datafile directory
return sys._MEIPASS
else:
# horrible, hack-y fix
# (prevents namespace issue between
# local github PmagPy and pip-installed PmagPy).
# must reload because we may have
# changed directories since importing
temp = os.getcwd()
os.chdir('..')
reload(locator)
lib_file = resource_filename('locator', 'resource.py')
full_dir = os.path.split(lib_file)[0]
ind = full_dir.rfind(os.sep)
lib_dir = full_dir[:ind+1]
lib_dir = os.path.realpath(os.path.join(lib_dir, 'pmagpy'))
os.chdir(temp)
# end fix
# old way:
#lib_dir = os.path.dirname(os.path.realpath(__file__))
if not os.path.isfile(os.path.join(lib_dir, 'pmag.py')):
lib_dir = os.getcwd()
fname = os.path.join(lib_dir, 'pmag.py')
if not os.path.isfile(fname):
pmag_dir = os.path.split(os.path.split(__file__)[0])[0]
if os.path.isfile(os.path.join(pmag_dir,'pmagpy','pmag.py')):
return pmag_dir
else:
print('-W- Can\'t find the data model! Make sure you have installed pmagpy using pip: "pip install pmagpy --upgrade"')
return '.'
# strip "/" or "\" and "pmagpy" to return proper PmagPy directory
if lib_dir.endswith(os.sep):
lib_dir = lib_dir[:-1]
if lib_dir.endswith('pmagpy'):
pmag_dir = os.path.split(lib_dir)[0]
else:
pmag_dir = lib_dir
return pmag_dir | Returns directory in which PmagPy is installed | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/find_pmag_dir.py#L23-L80 |
PmagPy/PmagPy | programs/plot_magmap_basemap.py | main | def main():
"""
NAME
plot_magmap.py
DESCRIPTION
makes a color contour map of desired field model
SYNTAX
plot_magmap.py [command line options]
OPTIONS
-h prints help and quits
-f FILE specify field model file with format: l m g h
-fmt [pdf,eps,svg,png] specify format for output figure (default is png)
-mod [arch3k,cals3k,pfm9k,hfm10k,cals10k.2,shadif14k,cals10k.1b] specify model for 3ka to 1900 CE, default is cals10k
-alt ALT; specify altitude in km, default is sealevel (0)
-age specify date in decimal year, default is 2016
-lon0: 0 longitude for map, default is 0
-el: [D,I,B,Br] specify element for plotting
-cm: [see https://matplotlib.org/users/colormaps.html] specify color map for plotting (default is RdYlBu)
"""
cmap = 'RdYlBu'
date = 2016.
if not Basemap:
print(
"-W- Cannot access the Basemap module, which is required to run plot_magmap.py")
sys.exit()
dir_path = '.'
lincr = 1 # level increment for contours
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt = sys.argv[ind+1]
if fmt == 'jpg':
print('jpg not a supported option')
print(main.__doc__)
sys.exit()
else:
fmt = 'png'
if '-cm' in sys.argv:
ind = sys.argv.index('-cm')
cmap = sys.argv[ind+1]
if '-el' in sys.argv:
ind = sys.argv.index('-el')
el = sys.argv[ind+1]
else:
el = 'B'
if '-alt' in sys.argv:
ind = sys.argv.index('-alt')
alt = sys.argv[ind+1]
else:
alt = 0
if '-lon0' in sys.argv:
ind = sys.argv.index('-lon0')
lon_0 = float(sys.argv[ind+1])
else:
lon_0 = 0
if '-mod' in sys.argv:
ind = sys.argv.index('-mod')
mod = sys.argv[ind+1]
ghfile = ''
elif '-f' in sys.argv:
ind = sys.argv.index('-f')
ghfile = sys.argv[ind+1]
mod = 'custom'
date = ''
else:
mod, ghfile = 'cals10k', ''
if '-age' in sys.argv:
ind = sys.argv.index('-age')
date = float(sys.argv[ind+1])
if '-alt' in sys.argv:
ind = sys.argv.index('-alt')
alt = float(sys.argv[ind+1])
else:
alt = 0
save = pmag.get_flag_arg_from_sys("-sav")
if mod == 'custom':
d = 'Custom'
else:
d = str(date)
Ds, Is, Bs, Brs, lons, lats = pmag.do_mag_map(
date, mod=mod, lon_0=lon_0, alt=alt, file=ghfile)
if el == 'D':
element = Ds
elif el == 'I':
element = Is
elif el == 'B':
element = Bs
elif el == 'Br':
element = Brs
elif el == 'I':
element = Is
else:
print(main.__doc__)
sys.exit()
pmagplotlib.plot_mag_map(1, element, lons, lats, el, lon_0=0, date=date)
if not save:
pmagplotlib.draw_figs({'map': 1})
res = pmagplotlib.save_or_quit()
if res == 'a':
figname = 'igrf'+d+'.'+fmt
print("1 saved in ", figname)
plt.savefig('igrf'+d+'.'+fmt)
sys.exit()
plt.savefig('igrf'+d+'.'+fmt)
print('Figure saved as: ', 'igrf'+d+'.'+fmt) | python | def main():
"""
NAME
plot_magmap.py
DESCRIPTION
makes a color contour map of desired field model
SYNTAX
plot_magmap.py [command line options]
OPTIONS
-h prints help and quits
-f FILE specify field model file with format: l m g h
-fmt [pdf,eps,svg,png] specify format for output figure (default is png)
-mod [arch3k,cals3k,pfm9k,hfm10k,cals10k.2,shadif14k,cals10k.1b] specify model for 3ka to 1900 CE, default is cals10k
-alt ALT; specify altitude in km, default is sealevel (0)
-age specify date in decimal year, default is 2016
-lon0: 0 longitude for map, default is 0
-el: [D,I,B,Br] specify element for plotting
-cm: [see https://matplotlib.org/users/colormaps.html] specify color map for plotting (default is RdYlBu)
"""
cmap = 'RdYlBu'
date = 2016.
if not Basemap:
print(
"-W- Cannot access the Basemap module, which is required to run plot_magmap.py")
sys.exit()
dir_path = '.'
lincr = 1 # level increment for contours
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt = sys.argv[ind+1]
if fmt == 'jpg':
print('jpg not a supported option')
print(main.__doc__)
sys.exit()
else:
fmt = 'png'
if '-cm' in sys.argv:
ind = sys.argv.index('-cm')
cmap = sys.argv[ind+1]
if '-el' in sys.argv:
ind = sys.argv.index('-el')
el = sys.argv[ind+1]
else:
el = 'B'
if '-alt' in sys.argv:
ind = sys.argv.index('-alt')
alt = sys.argv[ind+1]
else:
alt = 0
if '-lon0' in sys.argv:
ind = sys.argv.index('-lon0')
lon_0 = float(sys.argv[ind+1])
else:
lon_0 = 0
if '-mod' in sys.argv:
ind = sys.argv.index('-mod')
mod = sys.argv[ind+1]
ghfile = ''
elif '-f' in sys.argv:
ind = sys.argv.index('-f')
ghfile = sys.argv[ind+1]
mod = 'custom'
date = ''
else:
mod, ghfile = 'cals10k', ''
if '-age' in sys.argv:
ind = sys.argv.index('-age')
date = float(sys.argv[ind+1])
if '-alt' in sys.argv:
ind = sys.argv.index('-alt')
alt = float(sys.argv[ind+1])
else:
alt = 0
save = pmag.get_flag_arg_from_sys("-sav")
if mod == 'custom':
d = 'Custom'
else:
d = str(date)
Ds, Is, Bs, Brs, lons, lats = pmag.do_mag_map(
date, mod=mod, lon_0=lon_0, alt=alt, file=ghfile)
if el == 'D':
element = Ds
elif el == 'I':
element = Is
elif el == 'B':
element = Bs
elif el == 'Br':
element = Brs
elif el == 'I':
element = Is
else:
print(main.__doc__)
sys.exit()
pmagplotlib.plot_mag_map(1, element, lons, lats, el, lon_0=0, date=date)
if not save:
pmagplotlib.draw_figs({'map': 1})
res = pmagplotlib.save_or_quit()
if res == 'a':
figname = 'igrf'+d+'.'+fmt
print("1 saved in ", figname)
plt.savefig('igrf'+d+'.'+fmt)
sys.exit()
plt.savefig('igrf'+d+'.'+fmt)
print('Figure saved as: ', 'igrf'+d+'.'+fmt) | NAME
plot_magmap.py
DESCRIPTION
makes a color contour map of desired field model
SYNTAX
plot_magmap.py [command line options]
OPTIONS
-h prints help and quits
-f FILE specify field model file with format: l m g h
-fmt [pdf,eps,svg,png] specify format for output figure (default is png)
-mod [arch3k,cals3k,pfm9k,hfm10k,cals10k.2,shadif14k,cals10k.1b] specify model for 3ka to 1900 CE, default is cals10k
-alt ALT; specify altitude in km, default is sealevel (0)
-age specify date in decimal year, default is 2016
-lon0: 0 longitude for map, default is 0
-el: [D,I,B,Br] specify element for plotting
-cm: [see https://matplotlib.org/users/colormaps.html] specify color map for plotting (default is RdYlBu) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/plot_magmap_basemap.py#L20-L133 |
PmagPy/PmagPy | programs/aniso_magic.py | main | def main():
"""
NAME
aniso_magic.py
DESCRIPTION
plots anisotropy data with either bootstrap or hext ellipses
SYNTAX
aniso_magic.py [-h] [command line options]
OPTIONS
-h plots help message and quits
-usr USER: set the user name
-f AFILE, specify specimens.txt formatted file for input
-fsa SAMPFILE, specify samples.txt file (required to plot by site)
-fsi SITEFILE, specify site file (required to include location information)
-x Hext [1963] and bootstrap
-B DON'T do bootstrap, do Hext
-par Tauxe [1998] parametric bootstrap
-v plot bootstrap eigenvectors instead of ellipses
-sit plot by site instead of entire file
-crd [s,g,t] coordinate system, default is specimen (g=geographic, t=tilt corrected)
-P don't make any plots - just fill in the specimens, samples, sites tables
-sav don't make the tables - just save all the plots
-fmt [svg, jpg, eps] format for output images, png default
-gtc DEC INC dec,inc of pole to great circle [down(up) in green (cyan)
-d Vi DEC INC; Vi (1,2,3) to compare to direction DEC INC
-n N; specifies the number of bootstraps - default is 1000
DEFAULTS
AFILE: specimens.txt
plot bootstrap ellipses of Constable & Tauxe [1987]
NOTES
minor axis: circles
major axis: triangles
principal axis: squares
directions are plotted on the lower hemisphere
for bootstrapped eigenvector components: Xs: blue, Ys: red, Zs: black
"""
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
verbose = pmagplotlib.verbose
dir_path = pmag.get_named_arg("-WD", ".")
input_dir_path = pmag.get_named_arg("-ID", "")
num_bootstraps = pmag.get_named_arg("-n", 1000)
ipar = pmag.get_flag_arg_from_sys("-par", true=1, false=0)
ihext = pmag.get_flag_arg_from_sys("-x", true=1, false=0)
ivec = pmag.get_flag_arg_from_sys("-v", true=1, false=0)
iplot = pmag.get_flag_arg_from_sys("-P", true=0, false=1)
isite = pmag.get_flag_arg_from_sys("-sit", true=1, false=0)
iboot, vec = 1, 0
infile = pmag.get_named_arg('-f', 'specimens.txt')
samp_file = pmag.get_named_arg('-fsa', 'samples.txt')
site_file = pmag.get_named_arg('-fsi', 'sites.txt')
#outfile = pmag.get_named_arg("-F", "rmag_results.txt")
fmt = pmag.get_named_arg("-fmt", "png")
crd = pmag.get_named_arg("-crd", "s")
comp, Dir, PDir = 0, [], []
user = pmag.get_named_arg("-usr", "")
if '-B' in args:
iboot, ihext = 0, 1
plots, verbose = 0, True
if '-sav' in args:
plots = 1
verbose = 0
if '-gtc' in args:
ind = args.index('-gtc')
d, i = float(args[ind+1]), float(args[ind+2])
PDir.append(d)
PDir.append(i)
if '-d' in args:
comp = 1
ind = args.index('-d')
vec = int(args[ind+1])-1
Dir = [float(args[ind+2]), float(args[ind+3])]
ipmag.aniso_magic(infile=infile, samp_file=samp_file, site_file=site_file,
ipar=ipar, ihext=ihext, ivec=ivec, iplot=iplot, isite=isite, iboot=iboot, vec=vec,
Dir=Dir, PDir=PDir, comp=comp, user=user,
fmt=fmt, crd=crd, verbose=verbose, plots=plots,
num_bootstraps=num_bootstraps, dir_path=dir_path,
input_dir_path=input_dir_path) | python | def main():
"""
NAME
aniso_magic.py
DESCRIPTION
plots anisotropy data with either bootstrap or hext ellipses
SYNTAX
aniso_magic.py [-h] [command line options]
OPTIONS
-h plots help message and quits
-usr USER: set the user name
-f AFILE, specify specimens.txt formatted file for input
-fsa SAMPFILE, specify samples.txt file (required to plot by site)
-fsi SITEFILE, specify site file (required to include location information)
-x Hext [1963] and bootstrap
-B DON'T do bootstrap, do Hext
-par Tauxe [1998] parametric bootstrap
-v plot bootstrap eigenvectors instead of ellipses
-sit plot by site instead of entire file
-crd [s,g,t] coordinate system, default is specimen (g=geographic, t=tilt corrected)
-P don't make any plots - just fill in the specimens, samples, sites tables
-sav don't make the tables - just save all the plots
-fmt [svg, jpg, eps] format for output images, png default
-gtc DEC INC dec,inc of pole to great circle [down(up) in green (cyan)
-d Vi DEC INC; Vi (1,2,3) to compare to direction DEC INC
-n N; specifies the number of bootstraps - default is 1000
DEFAULTS
AFILE: specimens.txt
plot bootstrap ellipses of Constable & Tauxe [1987]
NOTES
minor axis: circles
major axis: triangles
principal axis: squares
directions are plotted on the lower hemisphere
for bootstrapped eigenvector components: Xs: blue, Ys: red, Zs: black
"""
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
verbose = pmagplotlib.verbose
dir_path = pmag.get_named_arg("-WD", ".")
input_dir_path = pmag.get_named_arg("-ID", "")
num_bootstraps = pmag.get_named_arg("-n", 1000)
ipar = pmag.get_flag_arg_from_sys("-par", true=1, false=0)
ihext = pmag.get_flag_arg_from_sys("-x", true=1, false=0)
ivec = pmag.get_flag_arg_from_sys("-v", true=1, false=0)
iplot = pmag.get_flag_arg_from_sys("-P", true=0, false=1)
isite = pmag.get_flag_arg_from_sys("-sit", true=1, false=0)
iboot, vec = 1, 0
infile = pmag.get_named_arg('-f', 'specimens.txt')
samp_file = pmag.get_named_arg('-fsa', 'samples.txt')
site_file = pmag.get_named_arg('-fsi', 'sites.txt')
#outfile = pmag.get_named_arg("-F", "rmag_results.txt")
fmt = pmag.get_named_arg("-fmt", "png")
crd = pmag.get_named_arg("-crd", "s")
comp, Dir, PDir = 0, [], []
user = pmag.get_named_arg("-usr", "")
if '-B' in args:
iboot, ihext = 0, 1
plots, verbose = 0, True
if '-sav' in args:
plots = 1
verbose = 0
if '-gtc' in args:
ind = args.index('-gtc')
d, i = float(args[ind+1]), float(args[ind+2])
PDir.append(d)
PDir.append(i)
if '-d' in args:
comp = 1
ind = args.index('-d')
vec = int(args[ind+1])-1
Dir = [float(args[ind+2]), float(args[ind+3])]
ipmag.aniso_magic(infile=infile, samp_file=samp_file, site_file=site_file,
ipar=ipar, ihext=ihext, ivec=ivec, iplot=iplot, isite=isite, iboot=iboot, vec=vec,
Dir=Dir, PDir=PDir, comp=comp, user=user,
fmt=fmt, crd=crd, verbose=verbose, plots=plots,
num_bootstraps=num_bootstraps, dir_path=dir_path,
input_dir_path=input_dir_path) | NAME
aniso_magic.py
DESCRIPTION
plots anisotropy data with either bootstrap or hext ellipses
SYNTAX
aniso_magic.py [-h] [command line options]
OPTIONS
-h plots help message and quits
-usr USER: set the user name
-f AFILE, specify specimens.txt formatted file for input
-fsa SAMPFILE, specify samples.txt file (required to plot by site)
-fsi SITEFILE, specify site file (required to include location information)
-x Hext [1963] and bootstrap
-B DON'T do bootstrap, do Hext
-par Tauxe [1998] parametric bootstrap
-v plot bootstrap eigenvectors instead of ellipses
-sit plot by site instead of entire file
-crd [s,g,t] coordinate system, default is specimen (g=geographic, t=tilt corrected)
-P don't make any plots - just fill in the specimens, samples, sites tables
-sav don't make the tables - just save all the plots
-fmt [svg, jpg, eps] format for output images, png default
-gtc DEC INC dec,inc of pole to great circle [down(up) in green (cyan)
-d Vi DEC INC; Vi (1,2,3) to compare to direction DEC INC
-n N; specifies the number of bootstraps - default is 1000
DEFAULTS
AFILE: specimens.txt
plot bootstrap ellipses of Constable & Tauxe [1987]
NOTES
minor axis: circles
major axis: triangles
principal axis: squares
directions are plotted on the lower hemisphere
for bootstrapped eigenvector components: Xs: blue, Ys: red, Zs: black | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/aniso_magic.py#L16-L98 |
PmagPy/PmagPy | programs/aniso_magic.py | new | def new():
"""
NAME
aniso_magic.py
DESCRIPTION
plots anisotropy data with either bootstrap or hext ellipses
SYNTAX
aniso_magic.py [-h] [command line options]
OPTIONS
-h plots help message and quits
-f AFILE, specify specimens.txt formatted file for input
-fsa SAMPFILE, specify samples.txt file (required to plot by site)
-fsi SITEFILE, specify site file (required to include location information)
-x Hext [1963] and bootstrap
-B DON'T do bootstrap, do Hext
-par Tauxe [1998] parametric bootstrap
-v plot bootstrap eigenvectors instead of ellipses
-sit plot by site instead of entire file
-crd [s,g,t] coordinate system, default is specimen (g=geographic, t=tilt corrected)
-P don't make any plots - just fill in the specimens, samples, sites tables
-sav don't make the tables - just save all the plots
-fmt [svg, jpg, eps] format for output images, png default
-gtc DEC INC dec,inc of pole to great circle [down(up) in green (cyan)
-d Vi DEC INC; Vi (1,2,3) to compare to direction DEC INC
-n N; specifies the number of bootstraps - default is 1000
DEFAULTS
AFILE: specimens.txt
plot bootstrap ellipses of Constable & Tauxe [1987]
NOTES
minor axis: circles
major axis: triangles
principal axis: squares
directions are plotted on the lower hemisphere
for bootstrapped eigenvector components: Xs: blue, Ys: red, Zs: black
"""
args = sys.argv
if '-h' in args:
print(new.__doc__)
return
dir_path = pmag.get_named_arg("-WD", ".")
if '-ID' in args and dir_path == '.':
dir_path = pmag.get_named_arg("-ID", ".")
iboot, vec = 1, 0
num_bootstraps = pmag.get_named_arg("-n", 1000)
ipar = pmag.get_flag_arg_from_sys("-par", true=1, false=0)
ihext = pmag.get_flag_arg_from_sys("-x", true=1, false=0)
ivec = pmag.get_flag_arg_from_sys("-v", true=1, false=0)
if ivec:
vec = 3
#iplot = pmag.get_flag_arg_from_sys("-P", true=0, false=1)
isite = pmag.get_flag_arg_from_sys("-sit", true=1, false=0)
infile = pmag.get_named_arg('-f', 'specimens.txt')
samp_file = pmag.get_named_arg('-fsa', 'samples.txt')
site_file = pmag.get_named_arg('-fsi', 'sites.txt')
#outfile = pmag.get_named_arg("-F", "rmag_results.txt")
fmt = pmag.get_named_arg("-fmt", "png")
crd = pmag.get_named_arg("-crd", "s")
comp, Dir, PDir = 0, [], []
user = pmag.get_named_arg("-usr", "")
if '-B' in args:
iboot, ihext = 0, 1
save_plots, verbose, interactive = False, True, True
if '-sav' in args:
save_plots = True
verbose = False
interactive = False
if '-gtc' in args:
ind = args.index('-gtc')
d, i = float(args[ind+1]), float(args[ind+2])
PDir.append(d)
PDir.append(i)
if '-d' in args:
comp = 1
ind = args.index('-d')
vec = int(args[ind+1])-1
Dir = [float(args[ind+2]), float(args[ind+3])]
ipmag.aniso_magic_nb(infile, samp_file, site_file, verbose,
ipar, ihext, ivec, isite, False, iboot,
vec, Dir, PDir, crd, num_bootstraps,
dir_path, save_plots=save_plots, interactive=interactive,
fmt=fmt) | python | def new():
"""
NAME
aniso_magic.py
DESCRIPTION
plots anisotropy data with either bootstrap or hext ellipses
SYNTAX
aniso_magic.py [-h] [command line options]
OPTIONS
-h plots help message and quits
-f AFILE, specify specimens.txt formatted file for input
-fsa SAMPFILE, specify samples.txt file (required to plot by site)
-fsi SITEFILE, specify site file (required to include location information)
-x Hext [1963] and bootstrap
-B DON'T do bootstrap, do Hext
-par Tauxe [1998] parametric bootstrap
-v plot bootstrap eigenvectors instead of ellipses
-sit plot by site instead of entire file
-crd [s,g,t] coordinate system, default is specimen (g=geographic, t=tilt corrected)
-P don't make any plots - just fill in the specimens, samples, sites tables
-sav don't make the tables - just save all the plots
-fmt [svg, jpg, eps] format for output images, png default
-gtc DEC INC dec,inc of pole to great circle [down(up) in green (cyan)
-d Vi DEC INC; Vi (1,2,3) to compare to direction DEC INC
-n N; specifies the number of bootstraps - default is 1000
DEFAULTS
AFILE: specimens.txt
plot bootstrap ellipses of Constable & Tauxe [1987]
NOTES
minor axis: circles
major axis: triangles
principal axis: squares
directions are plotted on the lower hemisphere
for bootstrapped eigenvector components: Xs: blue, Ys: red, Zs: black
"""
args = sys.argv
if '-h' in args:
print(new.__doc__)
return
dir_path = pmag.get_named_arg("-WD", ".")
if '-ID' in args and dir_path == '.':
dir_path = pmag.get_named_arg("-ID", ".")
iboot, vec = 1, 0
num_bootstraps = pmag.get_named_arg("-n", 1000)
ipar = pmag.get_flag_arg_from_sys("-par", true=1, false=0)
ihext = pmag.get_flag_arg_from_sys("-x", true=1, false=0)
ivec = pmag.get_flag_arg_from_sys("-v", true=1, false=0)
if ivec:
vec = 3
#iplot = pmag.get_flag_arg_from_sys("-P", true=0, false=1)
isite = pmag.get_flag_arg_from_sys("-sit", true=1, false=0)
infile = pmag.get_named_arg('-f', 'specimens.txt')
samp_file = pmag.get_named_arg('-fsa', 'samples.txt')
site_file = pmag.get_named_arg('-fsi', 'sites.txt')
#outfile = pmag.get_named_arg("-F", "rmag_results.txt")
fmt = pmag.get_named_arg("-fmt", "png")
crd = pmag.get_named_arg("-crd", "s")
comp, Dir, PDir = 0, [], []
user = pmag.get_named_arg("-usr", "")
if '-B' in args:
iboot, ihext = 0, 1
save_plots, verbose, interactive = False, True, True
if '-sav' in args:
save_plots = True
verbose = False
interactive = False
if '-gtc' in args:
ind = args.index('-gtc')
d, i = float(args[ind+1]), float(args[ind+2])
PDir.append(d)
PDir.append(i)
if '-d' in args:
comp = 1
ind = args.index('-d')
vec = int(args[ind+1])-1
Dir = [float(args[ind+2]), float(args[ind+3])]
ipmag.aniso_magic_nb(infile, samp_file, site_file, verbose,
ipar, ihext, ivec, isite, False, iboot,
vec, Dir, PDir, crd, num_bootstraps,
dir_path, save_plots=save_plots, interactive=interactive,
fmt=fmt) | NAME
aniso_magic.py
DESCRIPTION
plots anisotropy data with either bootstrap or hext ellipses
SYNTAX
aniso_magic.py [-h] [command line options]
OPTIONS
-h plots help message and quits
-f AFILE, specify specimens.txt formatted file for input
-fsa SAMPFILE, specify samples.txt file (required to plot by site)
-fsi SITEFILE, specify site file (required to include location information)
-x Hext [1963] and bootstrap
-B DON'T do bootstrap, do Hext
-par Tauxe [1998] parametric bootstrap
-v plot bootstrap eigenvectors instead of ellipses
-sit plot by site instead of entire file
-crd [s,g,t] coordinate system, default is specimen (g=geographic, t=tilt corrected)
-P don't make any plots - just fill in the specimens, samples, sites tables
-sav don't make the tables - just save all the plots
-fmt [svg, jpg, eps] format for output images, png default
-gtc DEC INC dec,inc of pole to great circle [down(up) in green (cyan)
-d Vi DEC INC; Vi (1,2,3) to compare to direction DEC INC
-n N; specifies the number of bootstraps - default is 1000
DEFAULTS
AFILE: specimens.txt
plot bootstrap ellipses of Constable & Tauxe [1987]
NOTES
minor axis: circles
major axis: triangles
principal axis: squares
directions are plotted on the lower hemisphere
for bootstrapped eigenvector components: Xs: blue, Ys: red, Zs: black | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/aniso_magic.py#L100-L183 |
PmagPy/PmagPy | programs/scalc_magic.py | main | def main():
"""
NAME
scalc_magic.py
DESCRIPTION
calculates Sb from pmag_results files
SYNTAX
scalc_magic -h [command line options]
INPUT
takes magic formatted pmag_results (2.5) or sites (3.0) table
pmag_result_name (2.5) must start with "VGP: Site"
must have average_lat (2.5) or lat (3.0) if spin axis is reference
OPTIONS
-h prints help message and quits
-f FILE: specify input results file, default is 'sites.txt'
-c cutoff: specify VGP colatitude cutoff value, default is no cutoff
-k cutoff: specify kappa cutoff, default is 0
-crd [s,g,t]: specify coordinate system, default is geographic
-v : use the VanDammme criterion
-a: use antipodes of reverse data: default is to use only normal
-r: use reverse data only
-p: do relative to principle axis
-b: do bootstrap confidence bounds
-n: set minimum n for samples (specimens) per site
-dm: data model [3.0 is default, otherwise, 2.5]
-mm97: correct for within site scatter (McElhinny & McFadden, 1997)
NOTES
if kappa, N_site, lat supplied, will consider within site scatter
OUTPUT
N Sb Sb_lower Sb_upper Co-lat. Cutoff
OUTPUT:
if option -b used: N, S_B, lower and upper bounds
otherwise: N, S_B, cutoff
"""
coord, kappa, cutoff, n = 0, 0, 180., 0
nb, anti, spin, v, boot = 1000, 0, 0, 0, 0
data_model = 3
rev = 0
if '-dm' in sys.argv:
ind = sys.argv.index("-dm")
data_model = int(sys.argv[ind+1])
if data_model == 2:
coord_key = 'tilt_correction'
in_file = 'pmag_results.txt'
k_key, n_key, lat_key = 'average_k', 'average_nn', 'average_lat'
else:
coord_key = 'dir_tilt_correction'
in_file = 'sites.txt'
k_key, n_key, lat_key = 'dir_k', 'dir_n_samples`', 'lat'
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind = sys.argv.index("-f")
in_file = sys.argv[ind + 1]
vgp_df = pd.read_csv(in_file, sep='\t', header=1)
else:
vgp_df = pd.read_csv(sys.stdin, sep='\t', header=1)
if '-c' in sys.argv:
ind = sys.argv.index('-c')
cutoff = float(sys.argv[ind+1])
if '-k' in sys.argv:
ind = sys.argv.index('-k')
kappa = float(sys.argv[ind+1])
if '-n' in sys.argv:
ind = sys.argv.index('-n')
n = float(sys.argv[ind+1])
if '-crd' in sys.argv:
ind = sys.argv.index("-crd")
coord = sys.argv[ind+1]
if coord == 's':
coord = -1
if coord == 'g':
coord = 0
if coord == 't':
coord = 100
if '-a' in sys.argv:
anti = 1
if '-r' in sys.argv:
rev = 1
if '-p' in sys.argv:
spin = 1
if '-v' in sys.argv:
v = 1
if '-b' in sys.argv:
boot = 1
if '-mm97' in sys.argv:
mm97 = 1
else:
mm97 = 0
#
# find desired vgp lat,lon, kappa,N_site data:
#
vgp_df.dropna(subset=['vgp_lat', 'vgp_lon'])
keys = [coord_key, k_key, n_key, lat_key]
for key in keys:
if key not in vgp_df.columns:
vgp_df[key] = 0
vgp_df = vgp_df[vgp_df[coord_key] == coord]
if data_model != 3: # convert
vgp_df['dir_k'] = vgp_df[k_key]
vgp_df['dir_n_samples'] = vgp_df[n_key]
vgp_df['lat'] = vgp_df[lat_key]
N, S_B, low, high, cutoff = pmag.scalc_vgp_df(
vgp_df, anti=anti, rev=rev, cutoff=cutoff, kappa=kappa, n=n, spin=spin, v=v, boot=boot, mm97=mm97)
if high != 0:
print(N, '%7.1f %7.1f %7.1f %7.1f ' % (S_B, low, high, cutoff))
else:
print(N, '%7.1f %7.1f ' % (S_B, cutoff)) | python | def main():
"""
NAME
scalc_magic.py
DESCRIPTION
calculates Sb from pmag_results files
SYNTAX
scalc_magic -h [command line options]
INPUT
takes magic formatted pmag_results (2.5) or sites (3.0) table
pmag_result_name (2.5) must start with "VGP: Site"
must have average_lat (2.5) or lat (3.0) if spin axis is reference
OPTIONS
-h prints help message and quits
-f FILE: specify input results file, default is 'sites.txt'
-c cutoff: specify VGP colatitude cutoff value, default is no cutoff
-k cutoff: specify kappa cutoff, default is 0
-crd [s,g,t]: specify coordinate system, default is geographic
-v : use the VanDammme criterion
-a: use antipodes of reverse data: default is to use only normal
-r: use reverse data only
-p: do relative to principle axis
-b: do bootstrap confidence bounds
-n: set minimum n for samples (specimens) per site
-dm: data model [3.0 is default, otherwise, 2.5]
-mm97: correct for within site scatter (McElhinny & McFadden, 1997)
NOTES
if kappa, N_site, lat supplied, will consider within site scatter
OUTPUT
N Sb Sb_lower Sb_upper Co-lat. Cutoff
OUTPUT:
if option -b used: N, S_B, lower and upper bounds
otherwise: N, S_B, cutoff
"""
coord, kappa, cutoff, n = 0, 0, 180., 0
nb, anti, spin, v, boot = 1000, 0, 0, 0, 0
data_model = 3
rev = 0
if '-dm' in sys.argv:
ind = sys.argv.index("-dm")
data_model = int(sys.argv[ind+1])
if data_model == 2:
coord_key = 'tilt_correction'
in_file = 'pmag_results.txt'
k_key, n_key, lat_key = 'average_k', 'average_nn', 'average_lat'
else:
coord_key = 'dir_tilt_correction'
in_file = 'sites.txt'
k_key, n_key, lat_key = 'dir_k', 'dir_n_samples`', 'lat'
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind = sys.argv.index("-f")
in_file = sys.argv[ind + 1]
vgp_df = pd.read_csv(in_file, sep='\t', header=1)
else:
vgp_df = pd.read_csv(sys.stdin, sep='\t', header=1)
if '-c' in sys.argv:
ind = sys.argv.index('-c')
cutoff = float(sys.argv[ind+1])
if '-k' in sys.argv:
ind = sys.argv.index('-k')
kappa = float(sys.argv[ind+1])
if '-n' in sys.argv:
ind = sys.argv.index('-n')
n = float(sys.argv[ind+1])
if '-crd' in sys.argv:
ind = sys.argv.index("-crd")
coord = sys.argv[ind+1]
if coord == 's':
coord = -1
if coord == 'g':
coord = 0
if coord == 't':
coord = 100
if '-a' in sys.argv:
anti = 1
if '-r' in sys.argv:
rev = 1
if '-p' in sys.argv:
spin = 1
if '-v' in sys.argv:
v = 1
if '-b' in sys.argv:
boot = 1
if '-mm97' in sys.argv:
mm97 = 1
else:
mm97 = 0
#
# find desired vgp lat,lon, kappa,N_site data:
#
vgp_df.dropna(subset=['vgp_lat', 'vgp_lon'])
keys = [coord_key, k_key, n_key, lat_key]
for key in keys:
if key not in vgp_df.columns:
vgp_df[key] = 0
vgp_df = vgp_df[vgp_df[coord_key] == coord]
if data_model != 3: # convert
vgp_df['dir_k'] = vgp_df[k_key]
vgp_df['dir_n_samples'] = vgp_df[n_key]
vgp_df['lat'] = vgp_df[lat_key]
N, S_B, low, high, cutoff = pmag.scalc_vgp_df(
vgp_df, anti=anti, rev=rev, cutoff=cutoff, kappa=kappa, n=n, spin=spin, v=v, boot=boot, mm97=mm97)
if high != 0:
print(N, '%7.1f %7.1f %7.1f %7.1f ' % (S_B, low, high, cutoff))
else:
print(N, '%7.1f %7.1f ' % (S_B, cutoff)) | NAME
scalc_magic.py
DESCRIPTION
calculates Sb from pmag_results files
SYNTAX
scalc_magic -h [command line options]
INPUT
takes magic formatted pmag_results (2.5) or sites (3.0) table
pmag_result_name (2.5) must start with "VGP: Site"
must have average_lat (2.5) or lat (3.0) if spin axis is reference
OPTIONS
-h prints help message and quits
-f FILE: specify input results file, default is 'sites.txt'
-c cutoff: specify VGP colatitude cutoff value, default is no cutoff
-k cutoff: specify kappa cutoff, default is 0
-crd [s,g,t]: specify coordinate system, default is geographic
-v : use the VanDammme criterion
-a: use antipodes of reverse data: default is to use only normal
-r: use reverse data only
-p: do relative to principle axis
-b: do bootstrap confidence bounds
-n: set minimum n for samples (specimens) per site
-dm: data model [3.0 is default, otherwise, 2.5]
-mm97: correct for within site scatter (McElhinny & McFadden, 1997)
NOTES
if kappa, N_site, lat supplied, will consider within site scatter
OUTPUT
N Sb Sb_lower Sb_upper Co-lat. Cutoff
OUTPUT:
if option -b used: N, S_B, lower and upper bounds
otherwise: N, S_B, cutoff | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/scalc_magic.py#L7-L121 |
PmagPy/PmagPy | programs/deprecated/plot_magic_keys.py | main | def main():
"""
NAME
plot_magic_keys.py
DESCRIPTION
picks out keys and makes and xy plot
SYNTAX
plot_magic_keys.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input magic format file
-xkey KEY: specify key for X
-ykey KEY: specify key for Y
-b xmin xmax ymin ymax, sets bounds
"""
dir_path="./"
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
magic_file=dir_path+'/'+sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
if '-xkey' in sys.argv:
ind=sys.argv.index('-xkey')
xkey=sys.argv[ind+1]
if '-ykey' in sys.argv:
ind=sys.argv.index('-ykey')
ykey=sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
if '-b' in sys.argv:
ind=sys.argv.index('-b')
xmin=float(sys.argv[ind+1])
xmax=float(sys.argv[ind+2])
ymin=float(sys.argv[ind+3])
ymax=float(sys.argv[ind+4])
#
#
# get data read in
X,Y=[],[]
Data,file_type=pmag.magic_read(magic_file)
if len(Data)>0:
for rec in Data:
if xkey in list(rec.keys()) and rec[xkey]!="" and ykey in list(rec.keys()) and rec[ykey]!="":
try:
X.append(float(rec[xkey]))
Y.append(float(rec[ykey]))
except:
pass
FIG={'fig':1}
pmagplotlib.plot_init(FIG['fig'],5,5)
if '-b' in sys.argv:
pmagplotlib.plot_xy(FIG['fig'],X,Y,sym='ro',xlab=xkey,ylab=ykey,xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax )
else:
pmagplotlib.plot_xy(FIG['fig'],X,Y,sym='ro',xlab=xkey,ylab=ykey)
pmagplotlib.draw_figs(FIG)
ans=input(" S[a]ve to save plot, [q]uit, Return to continue: ")
if ans=="q": sys.exit()
if ans=="a":
files = {}
for key in list(FIG.keys()):
files[key]=str(key) + ".svg"
pmagplotlib.save_plots(FIG,files)
sys.exit()
else:
print('no data to plot') | python | def main():
"""
NAME
plot_magic_keys.py
DESCRIPTION
picks out keys and makes and xy plot
SYNTAX
plot_magic_keys.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input magic format file
-xkey KEY: specify key for X
-ykey KEY: specify key for Y
-b xmin xmax ymin ymax, sets bounds
"""
dir_path="./"
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
magic_file=dir_path+'/'+sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
if '-xkey' in sys.argv:
ind=sys.argv.index('-xkey')
xkey=sys.argv[ind+1]
if '-ykey' in sys.argv:
ind=sys.argv.index('-ykey')
ykey=sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
if '-b' in sys.argv:
ind=sys.argv.index('-b')
xmin=float(sys.argv[ind+1])
xmax=float(sys.argv[ind+2])
ymin=float(sys.argv[ind+3])
ymax=float(sys.argv[ind+4])
#
#
# get data read in
X,Y=[],[]
Data,file_type=pmag.magic_read(magic_file)
if len(Data)>0:
for rec in Data:
if xkey in list(rec.keys()) and rec[xkey]!="" and ykey in list(rec.keys()) and rec[ykey]!="":
try:
X.append(float(rec[xkey]))
Y.append(float(rec[ykey]))
except:
pass
FIG={'fig':1}
pmagplotlib.plot_init(FIG['fig'],5,5)
if '-b' in sys.argv:
pmagplotlib.plot_xy(FIG['fig'],X,Y,sym='ro',xlab=xkey,ylab=ykey,xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax )
else:
pmagplotlib.plot_xy(FIG['fig'],X,Y,sym='ro',xlab=xkey,ylab=ykey)
pmagplotlib.draw_figs(FIG)
ans=input(" S[a]ve to save plot, [q]uit, Return to continue: ")
if ans=="q": sys.exit()
if ans=="a":
files = {}
for key in list(FIG.keys()):
files[key]=str(key) + ".svg"
pmagplotlib.save_plots(FIG,files)
sys.exit()
else:
print('no data to plot') | NAME
plot_magic_keys.py
DESCRIPTION
picks out keys and makes and xy plot
SYNTAX
plot_magic_keys.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input magic format file
-xkey KEY: specify key for X
-ykey KEY: specify key for Y
-b xmin xmax ymin ymax, sets bounds | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/deprecated/plot_magic_keys.py#L11-L87 |
PmagPy/PmagPy | programs/eqarea.py | main | def main():
"""
NAME
eqarea.py
DESCRIPTION
makes equal area projections from declination/inclination data
INPUT FORMAT
takes dec/inc as first two columns in space delimited file
SYNTAX
eqarea.py [options]
OPTIONS
-f FILE, specify file on command line
-sav save figure and quit
-fmt [svg,jpg,png,pdf] set figure format [default is svg]
-s SIZE specify symbol size - default is 20
-Lsym SHAPE COLOR specify shape and color for lower hemisphere
-Usym SHAPE COLOR specify shape and color for upper hemisphere
shapes: 's': square,'o': circle,'^,>,v,<': [up,right,down,left] triangle, 'd': diamond,
'p': pentagram, 'h': hexagon, '8': octagon, '+': plus, 'x': cross
colors: [b]lue,[g]reen,[r]ed,[c]yan,[m]agenta,[y]ellow,blac[k],[w]hite
"""
title = ""
files, fmt = {}, 'svg'
sym = {'lower': ['o', 'r'], 'upper': ['o', 'w']}
plot = 0
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-sav' in sys.argv:
plot = 1
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt = sys.argv[ind + 1]
if '-s' in sys.argv:
ind = sys.argv.index('-s')
sym['size'] = int(sys.argv[ind + 1])
else:
sym['size'] = 20
if '-Lsym' in sys.argv:
ind = sys.argv.index('-Lsym')
sym['lower'][0] = sys.argv[ind + 1]
sym['lower'][1] = sys.argv[ind + 2]
if '-Usym' in sys.argv:
ind = sys.argv.index('-Usym')
sym['upper'][0] = sys.argv[ind + 1]
sym['upper'][1] = sys.argv[ind + 2]
if '-f' in sys.argv: # ask for filename
ind = sys.argv.index('-f')
fname = sys.argv[ind + 1]
else:
print(main.__doc__)
print(' \n -f option required')
sys.exit() # graceful quit
DI = numpy.loadtxt(fname)
EQ = {'eq': 1}
pmagplotlib.plot_init(EQ['eq'], 5, 5)
pmagplotlib.plot_eq_sym(EQ['eq'], DI, 'Equal Area Plot', sym) # make plot
if plot == 0:
pmagplotlib.draw_figs(EQ) # make it visible
for key in list(EQ.keys()):
files[key] = key + '.' + fmt
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles = {}
titles['eq'] = 'Equal Area Plot'
EQ = pmagplotlib.add_borders(EQ, titles, black, purple)
pmagplotlib.save_plots(EQ, files)
elif plot == 1:
fname = os.path.split(fname)[1].split('.')[0]
files['eq'] = fname + '_eq.' + fmt
pmagplotlib.save_plots(EQ, files)
else:
ans = input(" S[a]ve to save plot, [q]uit without saving: ")
if ans == "a":
pmagplotlib.save_plots(EQ, files) | python | def main():
"""
NAME
eqarea.py
DESCRIPTION
makes equal area projections from declination/inclination data
INPUT FORMAT
takes dec/inc as first two columns in space delimited file
SYNTAX
eqarea.py [options]
OPTIONS
-f FILE, specify file on command line
-sav save figure and quit
-fmt [svg,jpg,png,pdf] set figure format [default is svg]
-s SIZE specify symbol size - default is 20
-Lsym SHAPE COLOR specify shape and color for lower hemisphere
-Usym SHAPE COLOR specify shape and color for upper hemisphere
shapes: 's': square,'o': circle,'^,>,v,<': [up,right,down,left] triangle, 'd': diamond,
'p': pentagram, 'h': hexagon, '8': octagon, '+': plus, 'x': cross
colors: [b]lue,[g]reen,[r]ed,[c]yan,[m]agenta,[y]ellow,blac[k],[w]hite
"""
title = ""
files, fmt = {}, 'svg'
sym = {'lower': ['o', 'r'], 'upper': ['o', 'w']}
plot = 0
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-sav' in sys.argv:
plot = 1
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt = sys.argv[ind + 1]
if '-s' in sys.argv:
ind = sys.argv.index('-s')
sym['size'] = int(sys.argv[ind + 1])
else:
sym['size'] = 20
if '-Lsym' in sys.argv:
ind = sys.argv.index('-Lsym')
sym['lower'][0] = sys.argv[ind + 1]
sym['lower'][1] = sys.argv[ind + 2]
if '-Usym' in sys.argv:
ind = sys.argv.index('-Usym')
sym['upper'][0] = sys.argv[ind + 1]
sym['upper'][1] = sys.argv[ind + 2]
if '-f' in sys.argv: # ask for filename
ind = sys.argv.index('-f')
fname = sys.argv[ind + 1]
else:
print(main.__doc__)
print(' \n -f option required')
sys.exit() # graceful quit
DI = numpy.loadtxt(fname)
EQ = {'eq': 1}
pmagplotlib.plot_init(EQ['eq'], 5, 5)
pmagplotlib.plot_eq_sym(EQ['eq'], DI, 'Equal Area Plot', sym) # make plot
if plot == 0:
pmagplotlib.draw_figs(EQ) # make it visible
for key in list(EQ.keys()):
files[key] = key + '.' + fmt
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles = {}
titles['eq'] = 'Equal Area Plot'
EQ = pmagplotlib.add_borders(EQ, titles, black, purple)
pmagplotlib.save_plots(EQ, files)
elif plot == 1:
fname = os.path.split(fname)[1].split('.')[0]
files['eq'] = fname + '_eq.' + fmt
pmagplotlib.save_plots(EQ, files)
else:
ans = input(" S[a]ve to save plot, [q]uit without saving: ")
if ans == "a":
pmagplotlib.save_plots(EQ, files) | NAME
eqarea.py
DESCRIPTION
makes equal area projections from declination/inclination data
INPUT FORMAT
takes dec/inc as first two columns in space delimited file
SYNTAX
eqarea.py [options]
OPTIONS
-f FILE, specify file on command line
-sav save figure and quit
-fmt [svg,jpg,png,pdf] set figure format [default is svg]
-s SIZE specify symbol size - default is 20
-Lsym SHAPE COLOR specify shape and color for lower hemisphere
-Usym SHAPE COLOR specify shape and color for upper hemisphere
shapes: 's': square,'o': circle,'^,>,v,<': [up,right,down,left] triangle, 'd': diamond,
'p': pentagram, 'h': hexagon, '8': octagon, '+': plus, 'x': cross
colors: [b]lue,[g]reen,[r]ed,[c]yan,[m]agenta,[y]ellow,blac[k],[w]hite | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/eqarea.py#L12-L92 |
PmagPy/PmagPy | programs/find_ei.py | main | def main():
"""
NAME
find_EI.py
DESCRIPTION
Applies series of assumed flattening factor and "unsquishes" inclinations assuming tangent function.
Finds flattening factor that gives elongation/inclination pair consistent with TK03.
Finds bootstrap confidence bounds
SYNTAX
find_EI.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE specify input file name
-n N specify number of bootstraps - the more the better, but slower!, default is 1000
-sc uses a "site-level" correction to a Fisherian distribution instead
of a "study-level" correction to a TK03-consistent distribution.
Note that many directions (~ 100) are needed for this correction to be reliable.
-fmt [svg,png,eps,pdf..] change plot format, default is svg
-sav saves the figures and quits
INPUT
dec/inc pairs, delimited with space or tabs
OUTPUT
four plots: 1) equal area plot of original directions
2) Elongation/inclination pairs as a function of f, data plus 25 bootstrap samples
3) Cumulative distribution of bootstrapped optimal inclinations plus uncertainties.
Estimate from original data set plotted as solid line
4) Orientation of principle direction through unflattening
NOTE: If distribution does not have a solution, plot labeled: Pathological. Some bootstrap samples may have
valid solutions and those are plotted in the CDFs and E/I plot.
"""
fmt,nb='svg',1000
plot=0
if '-h' in sys.argv:
print(main.__doc__)
sys.exit() # graceful quit
elif '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
if '-n' in sys.argv:
ind=sys.argv.index('-n')
nb=int(sys.argv[ind+1])
if '-sc' in sys.argv:
site_correction = True
else:
site_correction = False
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-sav' in sys.argv:plot=1
data=numpy.loadtxt(file)
upper,lower=int(round(.975*nb)),int(round(.025*nb))
E,I=[],[]
PLTS={'eq':1,'ei':2,'cdf':3,'v2':4}
pmagplotlib.plot_init(PLTS['eq'],6,6)
pmagplotlib.plot_init(PLTS['ei'],5,5)
pmagplotlib.plot_init(PLTS['cdf'],5,5)
pmagplotlib.plot_init(PLTS['v2'],5,5)
pmagplotlib.plot_eq(PLTS['eq'],data,'Data')
# this is a problem
#if plot==0:pmagplotlib.draw_figs(PLTS)
ppars=pmag.doprinc(data)
Io=ppars['inc']
n=ppars["N"]
Es,Is,Fs,V2s=pmag.find_f(data)
if site_correction:
Inc,Elong=Is[Es.index(min(Es))],Es[Es.index(min(Es))]
flat_f = Fs[Es.index(min(Es))]
else:
Inc,Elong=Is[-1],Es[-1]
flat_f = Fs[-1]
pmagplotlib.plot_ei(PLTS['ei'],Es,Is,flat_f)
pmagplotlib.plot_v2s(PLTS['v2'],V2s,Is,flat_f)
b=0
print("Bootstrapping.... be patient")
while b<nb:
bdata=pmag.pseudo(data)
Esb,Isb,Fsb,V2sb=pmag.find_f(bdata)
if b<25:
pmagplotlib.plot_ei(PLTS['ei'],Esb,Isb,Fsb[-1])
if Esb[-1]!=0:
ppars=pmag.doprinc(bdata)
if site_correction:
I.append(abs(Isb[Esb.index(min(Esb))]))
E.append(Esb[Esb.index(min(Esb))])
else:
I.append(abs(Isb[-1]))
E.append(Esb[-1])
b+=1
if b%25==0:print(b,' out of ',nb)
I.sort()
E.sort()
Eexp=[]
for i in I:
Eexp.append(pmag.EI(i))
if Inc==0:
title= 'Pathological Distribution: '+'[%7.1f, %7.1f]' %(I[lower],I[upper])
else:
title= '%7.1f [%7.1f, %7.1f]' %( Inc, I[lower],I[upper])
pmagplotlib.plot_ei(PLTS['ei'],Eexp,I,1)
pmagplotlib.plot_cdf(PLTS['cdf'],I,'Inclinations','r',title)
pmagplotlib.plot_vs(PLTS['cdf'],[I[lower],I[upper]],'b','--')
pmagplotlib.plot_vs(PLTS['cdf'],[Inc],'g','-')
pmagplotlib.plot_vs(PLTS['cdf'],[Io],'k','-')
if plot==0:
print('%7.1f %s %7.1f _ %7.1f ^ %7.1f: %6.4f _ %6.4f ^ %6.4f' %(Io, " => ", Inc, I[lower],I[upper], Elong, E[lower],E[upper]))
print("Io Inc I_lower, I_upper, Elon, E_lower, E_upper")
pmagplotlib.draw_figs(PLTS)
ans = ""
while ans not in ['q', 'a']:
ans= input("S[a]ve plots - <q> to quit: ")
if ans=='q':
print("\n Good bye\n")
sys.exit()
files={}
files['eq']='findEI_eq.'+fmt
files['ei']='findEI_ei.'+fmt
files['cdf']='findEI_cdf.'+fmt
files['v2']='findEI_v2.'+fmt
pmagplotlib.save_plots(PLTS,files) | python | def main():
"""
NAME
find_EI.py
DESCRIPTION
Applies series of assumed flattening factor and "unsquishes" inclinations assuming tangent function.
Finds flattening factor that gives elongation/inclination pair consistent with TK03.
Finds bootstrap confidence bounds
SYNTAX
find_EI.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE specify input file name
-n N specify number of bootstraps - the more the better, but slower!, default is 1000
-sc uses a "site-level" correction to a Fisherian distribution instead
of a "study-level" correction to a TK03-consistent distribution.
Note that many directions (~ 100) are needed for this correction to be reliable.
-fmt [svg,png,eps,pdf..] change plot format, default is svg
-sav saves the figures and quits
INPUT
dec/inc pairs, delimited with space or tabs
OUTPUT
four plots: 1) equal area plot of original directions
2) Elongation/inclination pairs as a function of f, data plus 25 bootstrap samples
3) Cumulative distribution of bootstrapped optimal inclinations plus uncertainties.
Estimate from original data set plotted as solid line
4) Orientation of principle direction through unflattening
NOTE: If distribution does not have a solution, plot labeled: Pathological. Some bootstrap samples may have
valid solutions and those are plotted in the CDFs and E/I plot.
"""
fmt,nb='svg',1000
plot=0
if '-h' in sys.argv:
print(main.__doc__)
sys.exit() # graceful quit
elif '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
if '-n' in sys.argv:
ind=sys.argv.index('-n')
nb=int(sys.argv[ind+1])
if '-sc' in sys.argv:
site_correction = True
else:
site_correction = False
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-sav' in sys.argv:plot=1
data=numpy.loadtxt(file)
upper,lower=int(round(.975*nb)),int(round(.025*nb))
E,I=[],[]
PLTS={'eq':1,'ei':2,'cdf':3,'v2':4}
pmagplotlib.plot_init(PLTS['eq'],6,6)
pmagplotlib.plot_init(PLTS['ei'],5,5)
pmagplotlib.plot_init(PLTS['cdf'],5,5)
pmagplotlib.plot_init(PLTS['v2'],5,5)
pmagplotlib.plot_eq(PLTS['eq'],data,'Data')
# this is a problem
#if plot==0:pmagplotlib.draw_figs(PLTS)
ppars=pmag.doprinc(data)
Io=ppars['inc']
n=ppars["N"]
Es,Is,Fs,V2s=pmag.find_f(data)
if site_correction:
Inc,Elong=Is[Es.index(min(Es))],Es[Es.index(min(Es))]
flat_f = Fs[Es.index(min(Es))]
else:
Inc,Elong=Is[-1],Es[-1]
flat_f = Fs[-1]
pmagplotlib.plot_ei(PLTS['ei'],Es,Is,flat_f)
pmagplotlib.plot_v2s(PLTS['v2'],V2s,Is,flat_f)
b=0
print("Bootstrapping.... be patient")
while b<nb:
bdata=pmag.pseudo(data)
Esb,Isb,Fsb,V2sb=pmag.find_f(bdata)
if b<25:
pmagplotlib.plot_ei(PLTS['ei'],Esb,Isb,Fsb[-1])
if Esb[-1]!=0:
ppars=pmag.doprinc(bdata)
if site_correction:
I.append(abs(Isb[Esb.index(min(Esb))]))
E.append(Esb[Esb.index(min(Esb))])
else:
I.append(abs(Isb[-1]))
E.append(Esb[-1])
b+=1
if b%25==0:print(b,' out of ',nb)
I.sort()
E.sort()
Eexp=[]
for i in I:
Eexp.append(pmag.EI(i))
if Inc==0:
title= 'Pathological Distribution: '+'[%7.1f, %7.1f]' %(I[lower],I[upper])
else:
title= '%7.1f [%7.1f, %7.1f]' %( Inc, I[lower],I[upper])
pmagplotlib.plot_ei(PLTS['ei'],Eexp,I,1)
pmagplotlib.plot_cdf(PLTS['cdf'],I,'Inclinations','r',title)
pmagplotlib.plot_vs(PLTS['cdf'],[I[lower],I[upper]],'b','--')
pmagplotlib.plot_vs(PLTS['cdf'],[Inc],'g','-')
pmagplotlib.plot_vs(PLTS['cdf'],[Io],'k','-')
if plot==0:
print('%7.1f %s %7.1f _ %7.1f ^ %7.1f: %6.4f _ %6.4f ^ %6.4f' %(Io, " => ", Inc, I[lower],I[upper], Elong, E[lower],E[upper]))
print("Io Inc I_lower, I_upper, Elon, E_lower, E_upper")
pmagplotlib.draw_figs(PLTS)
ans = ""
while ans not in ['q', 'a']:
ans= input("S[a]ve plots - <q> to quit: ")
if ans=='q':
print("\n Good bye\n")
sys.exit()
files={}
files['eq']='findEI_eq.'+fmt
files['ei']='findEI_ei.'+fmt
files['cdf']='findEI_cdf.'+fmt
files['v2']='findEI_v2.'+fmt
pmagplotlib.save_plots(PLTS,files) | NAME
find_EI.py
DESCRIPTION
Applies series of assumed flattening factor and "unsquishes" inclinations assuming tangent function.
Finds flattening factor that gives elongation/inclination pair consistent with TK03.
Finds bootstrap confidence bounds
SYNTAX
find_EI.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE specify input file name
-n N specify number of bootstraps - the more the better, but slower!, default is 1000
-sc uses a "site-level" correction to a Fisherian distribution instead
of a "study-level" correction to a TK03-consistent distribution.
Note that many directions (~ 100) are needed for this correction to be reliable.
-fmt [svg,png,eps,pdf..] change plot format, default is svg
-sav saves the figures and quits
INPUT
dec/inc pairs, delimited with space or tabs
OUTPUT
four plots: 1) equal area plot of original directions
2) Elongation/inclination pairs as a function of f, data plus 25 bootstrap samples
3) Cumulative distribution of bootstrapped optimal inclinations plus uncertainties.
Estimate from original data set plotted as solid line
4) Orientation of principle direction through unflattening
NOTE: If distribution does not have a solution, plot labeled: Pathological. Some bootstrap samples may have
valid solutions and those are plotted in the CDFs and E/I plot. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/find_ei.py#L13-L141 |
PmagPy/PmagPy | programs/magic_gui2.py | MainFrame.on_change_dir_button | def on_change_dir_button(self, event):
"""
create change directory frame
"""
currentDirectory = self.WD #os.getcwd()
change_dir_dialog = wx.DirDialog(self.panel,
"Choose your working directory to create or edit a MagIC contribution:",
defaultPath=currentDirectory,
style=wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON | wx.DD_CHANGE_DIR)
result = change_dir_dialog.ShowModal()
if result == wx.ID_CANCEL:
return
if result == wx.ID_OK:
self.WD = change_dir_dialog.GetPath()
self.dir_path.SetValue(self.WD)
change_dir_dialog.Destroy()
wait = wx.BusyInfo('Initializing data object in new directory, please wait...')
wx.SafeYield()
print('-I- Initializing magic data object')
# make new builder object, but reuse old data_model
self.er_magic = builder.ErMagicBuilder(self.WD, self.er_magic.data_model)
print('-I- Read in any available data from working directory')
self.er_magic.get_all_magic_info()
print('-I- Initializing headers')
self.er_magic.init_default_headers()
self.er_magic.init_actual_headers()
del wait | python | def on_change_dir_button(self, event):
"""
create change directory frame
"""
currentDirectory = self.WD #os.getcwd()
change_dir_dialog = wx.DirDialog(self.panel,
"Choose your working directory to create or edit a MagIC contribution:",
defaultPath=currentDirectory,
style=wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON | wx.DD_CHANGE_DIR)
result = change_dir_dialog.ShowModal()
if result == wx.ID_CANCEL:
return
if result == wx.ID_OK:
self.WD = change_dir_dialog.GetPath()
self.dir_path.SetValue(self.WD)
change_dir_dialog.Destroy()
wait = wx.BusyInfo('Initializing data object in new directory, please wait...')
wx.SafeYield()
print('-I- Initializing magic data object')
# make new builder object, but reuse old data_model
self.er_magic = builder.ErMagicBuilder(self.WD, self.er_magic.data_model)
print('-I- Read in any available data from working directory')
self.er_magic.get_all_magic_info()
print('-I- Initializing headers')
self.er_magic.init_default_headers()
self.er_magic.init_actual_headers()
del wait | create change directory frame | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/magic_gui2.py#L210-L236 |
PmagPy/PmagPy | programs/magic_gui2.py | MainFrame.make_grid_frame | def make_grid_frame(self, event):
"""
Create a GridFrame for data type of the button that was clicked
"""
if self.grid_frame:
print('-I- You already have a grid frame open')
pw.simple_warning("You already have a grid open")
return
try:
grid_type = event.GetButtonObj().Name[:-4] # remove '_btn'
except AttributeError:
grid_type = self.FindWindowById(event.Id).Name[:-4] # remove ('_btn')
wait = wx.BusyInfo('Making {} grid, please wait...'.format(grid_type))
wx.SafeYield()
# hide mainframe
self.on_open_grid_frame()
self.grid_frame = grid_frame.GridFrame(self.er_magic, self.WD, grid_type, grid_type, self.panel)
if self.validation_mode:
if grid_type in self.validation_mode:
self.grid_frame.grid.paint_invalid_cells(self.warn_dict[grid_type])
#self.grid_frame.msg_boxsizer
current_label = self.grid_frame.msg_text.GetLabel()
add_text = """\n\nColumns and rows with problem data have been highlighted in blue.
Cells with problem data are highlighted with different colors according to the type of problem.
Red: missing required data
Green: missing or invalid parent
Blue: non-numeric data provided in a numeric field
Gray: unrecognized column
Purple: invalid result child
Yellow: Out-of-range latitude (should be -90 - 90) or longitude (should be 0-360)
Light gray: Unrecognized term in controlled vocabulary
Note: It is possible to have a row highlighted that has no highlighted column.
This means that you are missing information higher up in the data.
For example: a specimen could be missing a site name.
However, you need to fix this in the sample grid, not the specimen grid.
Once each item in the data has its proper parent, validations will be correct.
"""
self.grid_frame.msg_text.SetLabel(add_text)
#self.on_finish_change_dir(self.change_dir_dialog)
del wait | python | def make_grid_frame(self, event):
"""
Create a GridFrame for data type of the button that was clicked
"""
if self.grid_frame:
print('-I- You already have a grid frame open')
pw.simple_warning("You already have a grid open")
return
try:
grid_type = event.GetButtonObj().Name[:-4] # remove '_btn'
except AttributeError:
grid_type = self.FindWindowById(event.Id).Name[:-4] # remove ('_btn')
wait = wx.BusyInfo('Making {} grid, please wait...'.format(grid_type))
wx.SafeYield()
# hide mainframe
self.on_open_grid_frame()
self.grid_frame = grid_frame.GridFrame(self.er_magic, self.WD, grid_type, grid_type, self.panel)
if self.validation_mode:
if grid_type in self.validation_mode:
self.grid_frame.grid.paint_invalid_cells(self.warn_dict[grid_type])
#self.grid_frame.msg_boxsizer
current_label = self.grid_frame.msg_text.GetLabel()
add_text = """\n\nColumns and rows with problem data have been highlighted in blue.
Cells with problem data are highlighted with different colors according to the type of problem.
Red: missing required data
Green: missing or invalid parent
Blue: non-numeric data provided in a numeric field
Gray: unrecognized column
Purple: invalid result child
Yellow: Out-of-range latitude (should be -90 - 90) or longitude (should be 0-360)
Light gray: Unrecognized term in controlled vocabulary
Note: It is possible to have a row highlighted that has no highlighted column.
This means that you are missing information higher up in the data.
For example: a specimen could be missing a site name.
However, you need to fix this in the sample grid, not the specimen grid.
Once each item in the data has its proper parent, validations will be correct.
"""
self.grid_frame.msg_text.SetLabel(add_text)
#self.on_finish_change_dir(self.change_dir_dialog)
del wait | Create a GridFrame for data type of the button that was clicked | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/magic_gui2.py#L251-L292 |
PmagPy/PmagPy | programs/magic_gui2.py | MainFrame.on_upload_file | def on_upload_file(self, event):
"""
Write all data to appropriate er_* and pmag_* files.
Then use those files to create a MagIC upload format file.
Validate the upload file.
"""
# coherence validations
wait = wx.BusyInfo('Validating data, please wait...')
wx.SafeYield()
spec_warnings, samp_warnings, site_warnings, loc_warnings = self.er_magic.validate_data()
result_warnings = self.er_magic.validate_results(self.er_magic.results)
meas_warnings = self.er_magic.validate_measurements(self.er_magic.measurements)
self.warn_dict = {'specimen': spec_warnings, 'sample': samp_warnings,
'site': site_warnings, 'location': loc_warnings,
'result': result_warnings, 'age': {}, 'measurement': meas_warnings}
# done coherence validations
del wait
# write upload file and perform data validations
wait = wx.BusyInfo('Making upload file, please wait...')
wx.SafeYield()
self.er_magic.write_files()
upfile, error_message, errors = ipmag.upload_magic(dir_path=self.WD,
data_model=self.data_model)
del wait
if upfile:
text = "You are ready to upload.\nYour file:\n{}\nwas generated in directory: \n{}\nDrag and drop this file in the MagIC database.".format(os.path.split(upfile)[1], self.WD)
dlg = wx.MessageDialog(self, caption="Saved", message=text, style=wx.OK)
else:
text = "There were some problems with the creation of your upload file.\nError message: {}\nSee Terminal/Command Prompt for details".format(error_message)
dlg = wx.MessageDialog(self, caption="Error", message=text, style=wx.OK)
result = dlg.ShowModal()
if result == wx.ID_OK:
dlg.Destroy()
self.edited = False
## add together data & coherence errors into one dictionary
if errors:
for item_type in errors:
for item_name in errors[item_type]:
if item_name in self.warn_dict[item_type]:
self.warn_dict[item_type][item_name].update(errors[item_type][item_name])
else:
self.warn_dict[item_type][item_name] = errors[item_type][item_name]
has_problems = []
for item_type, warnings in list(self.warn_dict.items()):
if warnings:
has_problems.append(item_type)
# for any dtypes with validation problems (data or coherence),
# highlight the button to the corresponding grid
# skip this step for Windows
if sys.platform in ['win32', 'win62']:
pass
else:
for dtype in self.warn_dict:
wind = self.FindWindowByName(dtype + '_btn')
if wind:
if dtype in has_problems:
wind.Bind(wx.EVT_PAINT, self.highlight_button)
else:
wind.Unbind(wx.EVT_PAINT, handler=self.highlight_button)
self.Refresh()
if has_problems:
self.validation_mode = set(has_problems)
if sys.platform in ['win32', 'win62']:
self.message.SetLabel('The following grid(s) have incorrect or incomplete data:\n{}'.format(', '.join(self.validation_mode)))
else:
self.message.SetLabel('Highlighted grids have incorrect or incomplete data')
self.bSizer_msg.ShowItems(True)
self.hbox.Fit(self)
if not has_problems:
self.validation_mode = set()
self.message.SetLabel('')
self.bSizer_msg.ShowItems(False)
self.hbox.Fit(self) | python | def on_upload_file(self, event):
"""
Write all data to appropriate er_* and pmag_* files.
Then use those files to create a MagIC upload format file.
Validate the upload file.
"""
# coherence validations
wait = wx.BusyInfo('Validating data, please wait...')
wx.SafeYield()
spec_warnings, samp_warnings, site_warnings, loc_warnings = self.er_magic.validate_data()
result_warnings = self.er_magic.validate_results(self.er_magic.results)
meas_warnings = self.er_magic.validate_measurements(self.er_magic.measurements)
self.warn_dict = {'specimen': spec_warnings, 'sample': samp_warnings,
'site': site_warnings, 'location': loc_warnings,
'result': result_warnings, 'age': {}, 'measurement': meas_warnings}
# done coherence validations
del wait
# write upload file and perform data validations
wait = wx.BusyInfo('Making upload file, please wait...')
wx.SafeYield()
self.er_magic.write_files()
upfile, error_message, errors = ipmag.upload_magic(dir_path=self.WD,
data_model=self.data_model)
del wait
if upfile:
text = "You are ready to upload.\nYour file:\n{}\nwas generated in directory: \n{}\nDrag and drop this file in the MagIC database.".format(os.path.split(upfile)[1], self.WD)
dlg = wx.MessageDialog(self, caption="Saved", message=text, style=wx.OK)
else:
text = "There were some problems with the creation of your upload file.\nError message: {}\nSee Terminal/Command Prompt for details".format(error_message)
dlg = wx.MessageDialog(self, caption="Error", message=text, style=wx.OK)
result = dlg.ShowModal()
if result == wx.ID_OK:
dlg.Destroy()
self.edited = False
## add together data & coherence errors into one dictionary
if errors:
for item_type in errors:
for item_name in errors[item_type]:
if item_name in self.warn_dict[item_type]:
self.warn_dict[item_type][item_name].update(errors[item_type][item_name])
else:
self.warn_dict[item_type][item_name] = errors[item_type][item_name]
has_problems = []
for item_type, warnings in list(self.warn_dict.items()):
if warnings:
has_problems.append(item_type)
# for any dtypes with validation problems (data or coherence),
# highlight the button to the corresponding grid
# skip this step for Windows
if sys.platform in ['win32', 'win62']:
pass
else:
for dtype in self.warn_dict:
wind = self.FindWindowByName(dtype + '_btn')
if wind:
if dtype in has_problems:
wind.Bind(wx.EVT_PAINT, self.highlight_button)
else:
wind.Unbind(wx.EVT_PAINT, handler=self.highlight_button)
self.Refresh()
if has_problems:
self.validation_mode = set(has_problems)
if sys.platform in ['win32', 'win62']:
self.message.SetLabel('The following grid(s) have incorrect or incomplete data:\n{}'.format(', '.join(self.validation_mode)))
else:
self.message.SetLabel('Highlighted grids have incorrect or incomplete data')
self.bSizer_msg.ShowItems(True)
self.hbox.Fit(self)
if not has_problems:
self.validation_mode = set()
self.message.SetLabel('')
self.bSizer_msg.ShowItems(False)
self.hbox.Fit(self) | Write all data to appropriate er_* and pmag_* files.
Then use those files to create a MagIC upload format file.
Validate the upload file. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/magic_gui2.py#L294-L367 |
PmagPy/PmagPy | programs/magic_gui2.py | MagICMenu.on_quit | def on_quit(self, event):
"""
shut down application
"""
if self.parent.grid_frame:
if self.parent.grid_frame.grid.changes:
dlg = wx.MessageDialog(self,caption="Message:", message="Are you sure you want to exit the program?\nYou have a grid open with unsaved changes.\n ", style=wx.OK|wx.CANCEL)
result = dlg.ShowModal()
if result == wx.ID_OK:
dlg.Destroy()
else:
dlg.Destroy()
return
if self.parent.grid_frame:
self.parent.grid_frame.Destroy()
# if there have been edits, save all data to files
# before quitting
if self.parent.edited:
self.parent.er_magic.write_files()
self.parent.Close()
try:
sys.exit()
except TypeError:
pass | python | def on_quit(self, event):
"""
shut down application
"""
if self.parent.grid_frame:
if self.parent.grid_frame.grid.changes:
dlg = wx.MessageDialog(self,caption="Message:", message="Are you sure you want to exit the program?\nYou have a grid open with unsaved changes.\n ", style=wx.OK|wx.CANCEL)
result = dlg.ShowModal()
if result == wx.ID_OK:
dlg.Destroy()
else:
dlg.Destroy()
return
if self.parent.grid_frame:
self.parent.grid_frame.Destroy()
# if there have been edits, save all data to files
# before quitting
if self.parent.edited:
self.parent.er_magic.write_files()
self.parent.Close()
try:
sys.exit()
except TypeError:
pass | shut down application | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/magic_gui2.py#L423-L446 |
PmagPy/PmagPy | programs/magic_gui2.py | MagICMenu.on_clear | def on_clear(self, event):
"""
initialize window to allow user to empty the working directory
"""
dia = pmag_menu_dialogs.ClearWD(self.parent, self.parent.WD)
clear = dia.do_clear()
if clear:
print('-I- Clear data object')
self.parent.er_magic = builder.ErMagicBuilder(self.parent.WD, self.parent.data_model)
print('-I- Initializing headers')
self.parent.er_magic.init_default_headers()
self.parent.er_magic.init_actual_headers() | python | def on_clear(self, event):
"""
initialize window to allow user to empty the working directory
"""
dia = pmag_menu_dialogs.ClearWD(self.parent, self.parent.WD)
clear = dia.do_clear()
if clear:
print('-I- Clear data object')
self.parent.er_magic = builder.ErMagicBuilder(self.parent.WD, self.parent.data_model)
print('-I- Initializing headers')
self.parent.er_magic.init_default_headers()
self.parent.er_magic.init_actual_headers() | initialize window to allow user to empty the working directory | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/magic_gui2.py#L448-L459 |
PmagPy/PmagPy | programs/deprecated/convert_samples.py | main | def main():
"""
NAME
convert_samples.py
DESCRIPTION
takes an er_samples or magic_measurements format file and creates an orient.txt template
SYNTAX
convert_samples.py [command line options]
OPTIONS
-f FILE: specify input file, default is er_samples.txt
-F FILE: specify output file, default is: orient_LOCATION.txt
INPUT FORMAT
er_samples.txt or magic_measurements format file
OUTPUT
orient.txt format file
"""
#
# initialize variables
#
version_num=pmag.get_version()
orient_file,samp_file = "orient","er_samples.txt"
args=sys.argv
dir_path,out_path='.','.'
default_outfile = True
#
#
if '-WD' in args:
ind=args.index('-WD')
dir_path=args[ind+1]
if '-OD' in args:
ind=args.index('-OD')
out_path=args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if "-F" in args:
ind=args.index("-F")
orient_file=sys.argv[ind+1]
default_outfile = False
if "-f" in args:
ind=args.index("-f")
samp_file=sys.argv[ind+1]
orient_file=out_path+'/'+orient_file
samp_file=dir_path+'/'+samp_file
#
# read in file to convert
#
ErSamples=[]
Required=['sample_class','sample_type','sample_lithology','lat','long']
Samps,file_type=pmag.magic_read(samp_file)
Locs=[]
OrKeys=['sample_name','site_name','mag_azimuth','field_dip','sample_class','sample_type','sample_lithology','lat','long','stratigraphic_height','method_codes','site_description']
print("file_type", file_type) # LJ
if file_type.lower()=='er_samples':
SampKeys=['er_sample_name','er_site_name','sample_azimuth','sample_dip','sample_class','sample_type','sample_lithology','sample_lat','sample_lon','sample_height','magic_method_codes','er_sample_description']
elif file_type.lower()=='magic_measurements':
SampKeys=['er_sample_name','er_site_name']
else:
print('wrong file format; must be er_samples or magic_measurements only')
for samp in Samps:
if samp['er_location_name'] not in Locs:Locs.append(samp['er_location_name']) # get all the location names
for location_name in Locs:
loc_samps=pmag.get_dictitem(Samps,'er_location_name',location_name,'T')
OrOut=[]
for samp in loc_samps:
if samp['er_sample_name'] not in ErSamples:
ErSamples.append(samp['er_sample_name'])
OrRec={}
if 'sample_date' in list(samp.keys()) and samp['sample_date'].strip()!="":
date=samp['sample_date'].split(':')
OrRec['date']=date[1]+'/'+date[2]+'/'+date[0][2:4]
for i in range(len(SampKeys)):
if SampKeys[i] in list(samp.keys()):OrRec[OrKeys[i]]=samp[SampKeys[i]]
for key in Required:
if key not in list(OrRec.keys()):OrRec[key]="" # fill in blank required keys
OrOut.append(OrRec)
loc=location_name.replace(" ","_")
if default_outfile:
outfile=orient_file+'_'+loc+'.txt'
else:
outfile=orient_file
pmag.magic_write(outfile,OrOut,location_name)
print("Data saved in: ", outfile) | python | def main():
"""
NAME
convert_samples.py
DESCRIPTION
takes an er_samples or magic_measurements format file and creates an orient.txt template
SYNTAX
convert_samples.py [command line options]
OPTIONS
-f FILE: specify input file, default is er_samples.txt
-F FILE: specify output file, default is: orient_LOCATION.txt
INPUT FORMAT
er_samples.txt or magic_measurements format file
OUTPUT
orient.txt format file
"""
#
# initialize variables
#
version_num=pmag.get_version()
orient_file,samp_file = "orient","er_samples.txt"
args=sys.argv
dir_path,out_path='.','.'
default_outfile = True
#
#
if '-WD' in args:
ind=args.index('-WD')
dir_path=args[ind+1]
if '-OD' in args:
ind=args.index('-OD')
out_path=args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if "-F" in args:
ind=args.index("-F")
orient_file=sys.argv[ind+1]
default_outfile = False
if "-f" in args:
ind=args.index("-f")
samp_file=sys.argv[ind+1]
orient_file=out_path+'/'+orient_file
samp_file=dir_path+'/'+samp_file
#
# read in file to convert
#
ErSamples=[]
Required=['sample_class','sample_type','sample_lithology','lat','long']
Samps,file_type=pmag.magic_read(samp_file)
Locs=[]
OrKeys=['sample_name','site_name','mag_azimuth','field_dip','sample_class','sample_type','sample_lithology','lat','long','stratigraphic_height','method_codes','site_description']
print("file_type", file_type) # LJ
if file_type.lower()=='er_samples':
SampKeys=['er_sample_name','er_site_name','sample_azimuth','sample_dip','sample_class','sample_type','sample_lithology','sample_lat','sample_lon','sample_height','magic_method_codes','er_sample_description']
elif file_type.lower()=='magic_measurements':
SampKeys=['er_sample_name','er_site_name']
else:
print('wrong file format; must be er_samples or magic_measurements only')
for samp in Samps:
if samp['er_location_name'] not in Locs:Locs.append(samp['er_location_name']) # get all the location names
for location_name in Locs:
loc_samps=pmag.get_dictitem(Samps,'er_location_name',location_name,'T')
OrOut=[]
for samp in loc_samps:
if samp['er_sample_name'] not in ErSamples:
ErSamples.append(samp['er_sample_name'])
OrRec={}
if 'sample_date' in list(samp.keys()) and samp['sample_date'].strip()!="":
date=samp['sample_date'].split(':')
OrRec['date']=date[1]+'/'+date[2]+'/'+date[0][2:4]
for i in range(len(SampKeys)):
if SampKeys[i] in list(samp.keys()):OrRec[OrKeys[i]]=samp[SampKeys[i]]
for key in Required:
if key not in list(OrRec.keys()):OrRec[key]="" # fill in blank required keys
OrOut.append(OrRec)
loc=location_name.replace(" ","_")
if default_outfile:
outfile=orient_file+'_'+loc+'.txt'
else:
outfile=orient_file
pmag.magic_write(outfile,OrOut,location_name)
print("Data saved in: ", outfile) | NAME
convert_samples.py
DESCRIPTION
takes an er_samples or magic_measurements format file and creates an orient.txt template
SYNTAX
convert_samples.py [command line options]
OPTIONS
-f FILE: specify input file, default is er_samples.txt
-F FILE: specify output file, default is: orient_LOCATION.txt
INPUT FORMAT
er_samples.txt or magic_measurements format file
OUTPUT
orient.txt format file | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/deprecated/convert_samples.py#L8-L94 |
PmagPy/PmagPy | programs/gobing.py | main | def main():
"""
NAME
gobing.py
DESCRIPTION
calculates Bingham parameters from dec inc data
INPUT FORMAT
takes dec/inc as first two columns in space delimited file
SYNTAX
gobing.py [options]
OPTIONS
-f FILE to read from FILE
-F, specifies output file name
< filename for reading from standard input
OUTPUT
mean dec, mean inc, Eta, Deta, Ieta, Zeta, Zdec, Zinc, N
"""
if len(sys.argv) > 0:
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-f' in sys.argv: # ask for filename
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
else:
data=sys.stdin.readlines() # read in data from standard input
DIs= [] # set up list for dec inc data
ofile = ""
if '-F' in sys.argv: # set up output file
ind = sys.argv.index('-F')
ofile= sys.argv[ind+1]
out = open(ofile, 'w + a')
for line in data: # read in the data from standard input
if '\t' in line:
rec=line.split('\t') # split each line on space to get records
else:
rec=line.split() # split each line on space to get records
DIs.append((float(rec[0]),float(rec[1])))
#
bpars=pmag.dobingham(DIs)
output = '%7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %i' % (bpars["dec"],bpars["inc"],bpars["Eta"],bpars["Edec"],bpars["Einc"],bpars["Zeta"],bpars["Zdec"],bpars["Zinc"],bpars["n"])
if ofile == "":
print(output)
else:
out.write(output+'\n') | python | def main():
"""
NAME
gobing.py
DESCRIPTION
calculates Bingham parameters from dec inc data
INPUT FORMAT
takes dec/inc as first two columns in space delimited file
SYNTAX
gobing.py [options]
OPTIONS
-f FILE to read from FILE
-F, specifies output file name
< filename for reading from standard input
OUTPUT
mean dec, mean inc, Eta, Deta, Ieta, Zeta, Zdec, Zinc, N
"""
if len(sys.argv) > 0:
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-f' in sys.argv: # ask for filename
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
else:
data=sys.stdin.readlines() # read in data from standard input
DIs= [] # set up list for dec inc data
ofile = ""
if '-F' in sys.argv: # set up output file
ind = sys.argv.index('-F')
ofile= sys.argv[ind+1]
out = open(ofile, 'w + a')
for line in data: # read in the data from standard input
if '\t' in line:
rec=line.split('\t') # split each line on space to get records
else:
rec=line.split() # split each line on space to get records
DIs.append((float(rec[0]),float(rec[1])))
#
bpars=pmag.dobingham(DIs)
output = '%7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %i' % (bpars["dec"],bpars["inc"],bpars["Eta"],bpars["Edec"],bpars["Einc"],bpars["Zeta"],bpars["Zdec"],bpars["Zinc"],bpars["n"])
if ofile == "":
print(output)
else:
out.write(output+'\n') | NAME
gobing.py
DESCRIPTION
calculates Bingham parameters from dec inc data
INPUT FORMAT
takes dec/inc as first two columns in space delimited file
SYNTAX
gobing.py [options]
OPTIONS
-f FILE to read from FILE
-F, specifies output file name
< filename for reading from standard input
OUTPUT
mean dec, mean inc, Eta, Deta, Ieta, Zeta, Zdec, Zinc, N | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/gobing.py#L6-L57 |
PmagPy/PmagPy | programs/atrm_magic.py | main | def main():
"""
NAME
atrm_magic.py
DESCRIPTION
Converts ATRM data to best-fit tensor (6 elements plus sigma)
Original program ARMcrunch written to accomodate ARM anisotropy data
collected from 6 axial directions (+X,+Y,+Z,-X,-Y,-Z) using the
off-axis remanence terms to construct the tensor. A better way to
do the anisotropy of ARMs is to use 9,12 or 15 measurements in
the Hext rotational scheme.
SYNTAX
atrm_magic.py [-h][command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input file, default is atrm_measurements.txt
-fsp FILE: specimen input file, default is specimens.txt (optional)
-Fsp FILE: specify output file, default is specimens.txt (MagIC 3 only)
-DM DATA_MODEL: specify MagIC 2 or MagIC 3, default is 3
INPUT
Input for the present program is a TRM acquisition data with an optional baseline.
The order of the measurements is:
Decs=[0,90,0,180,270,0,0,90,0]
Incs=[0,0,90,0,0,-90,0,0,90]
The last two measurements are optional
"""
# initialize some parameters
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
#if "-Fa" in args:
# ind = args.index("-Fa")
# rmag_anis = args[ind + 1]
#if "-Fr" in args:
# ind = args.index("-Fr")
# rmag_res = args[ind + 1]
#meas_file = "atrm_measurements.txt"
#rmag_anis = "trm_anisotropy.txt"
#rmag_res = "atrm_results.txt"
dir_path = pmag.get_named_arg("-WD", ".")
input_dir_path = pmag.get_named_arg("-ID", "")
meas_file = pmag.get_named_arg("-f", "measurements.txt")
data_model_num = int(pmag.get_named_arg("-DM", 3))
spec_outfile = pmag.get_named_arg("-Fsp", "specimens.txt")
spec_infile = pmag.get_named_arg("-fsp", "specimens.txt")
ipmag.atrm_magic(meas_file, dir_path, input_dir_path,
spec_infile, spec_outfile, data_model_num) | python | def main():
"""
NAME
atrm_magic.py
DESCRIPTION
Converts ATRM data to best-fit tensor (6 elements plus sigma)
Original program ARMcrunch written to accomodate ARM anisotropy data
collected from 6 axial directions (+X,+Y,+Z,-X,-Y,-Z) using the
off-axis remanence terms to construct the tensor. A better way to
do the anisotropy of ARMs is to use 9,12 or 15 measurements in
the Hext rotational scheme.
SYNTAX
atrm_magic.py [-h][command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input file, default is atrm_measurements.txt
-fsp FILE: specimen input file, default is specimens.txt (optional)
-Fsp FILE: specify output file, default is specimens.txt (MagIC 3 only)
-DM DATA_MODEL: specify MagIC 2 or MagIC 3, default is 3
INPUT
Input for the present program is a TRM acquisition data with an optional baseline.
The order of the measurements is:
Decs=[0,90,0,180,270,0,0,90,0]
Incs=[0,0,90,0,0,-90,0,0,90]
The last two measurements are optional
"""
# initialize some parameters
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
#if "-Fa" in args:
# ind = args.index("-Fa")
# rmag_anis = args[ind + 1]
#if "-Fr" in args:
# ind = args.index("-Fr")
# rmag_res = args[ind + 1]
#meas_file = "atrm_measurements.txt"
#rmag_anis = "trm_anisotropy.txt"
#rmag_res = "atrm_results.txt"
dir_path = pmag.get_named_arg("-WD", ".")
input_dir_path = pmag.get_named_arg("-ID", "")
meas_file = pmag.get_named_arg("-f", "measurements.txt")
data_model_num = int(pmag.get_named_arg("-DM", 3))
spec_outfile = pmag.get_named_arg("-Fsp", "specimens.txt")
spec_infile = pmag.get_named_arg("-fsp", "specimens.txt")
ipmag.atrm_magic(meas_file, dir_path, input_dir_path,
spec_infile, spec_outfile, data_model_num) | NAME
atrm_magic.py
DESCRIPTION
Converts ATRM data to best-fit tensor (6 elements plus sigma)
Original program ARMcrunch written to accomodate ARM anisotropy data
collected from 6 axial directions (+X,+Y,+Z,-X,-Y,-Z) using the
off-axis remanence terms to construct the tensor. A better way to
do the anisotropy of ARMs is to use 9,12 or 15 measurements in
the Hext rotational scheme.
SYNTAX
atrm_magic.py [-h][command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input file, default is atrm_measurements.txt
-fsp FILE: specimen input file, default is specimens.txt (optional)
-Fsp FILE: specify output file, default is specimens.txt (MagIC 3 only)
-DM DATA_MODEL: specify MagIC 2 or MagIC 3, default is 3
INPUT
Input for the present program is a TRM acquisition data with an optional baseline.
The order of the measurements is:
Decs=[0,90,0,180,270,0,0,90,0]
Incs=[0,0,90,0,0,-90,0,0,90]
The last two measurements are optional | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/atrm_magic.py#L9-L68 |
PmagPy/PmagPy | programs/conversion_scripts2/iodp_srm_magic2.py | main | def main(command_line=True, **kwargs):
"""
NAME
iodp_srm_magic.py
DESCRIPTION
converts IODP LIMS and LORE SRM archive half sample format files to magic_measurements format files
SYNTAX
iodp_srm_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input .csv file, default is all in directory
-F FILE: specify output measurements file, default is magic_measurements.txt
-Fsp FILE: specify output er_specimens.txt file, default is er_specimens.txt
-Fsa FILE: specify output er_samples.txt file, default is er_samples.txt
-Fsi FILE: specify output er_sites.txt file, default is er_sites.txt
-A : don't average replicate measurements
INPUTS
IODP .csv file format exported from LIMS database
"""
#
# initialize defaults
version_num=pmag.get_version()
meas_file='magic_measurements.txt'
spec_file='er_specimens.txt'
samp_file='er_samples.txt'
site_file='er_sites.txt'
csv_file=''
ErSpecs,ErSamps,ErSites,ErLocs,ErCits=[],[],[],[],[]
MagRecs=[]
citation="This study"
dir_path,demag='.','NRM'
args=sys.argv
noave=0
depth_method='a'
# get command line args
if command_line:
if '-WD' in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if '-ID' in args:
ind = args.index('-ID')
input_dir_path = args[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
if "-h" in args:
print(main.__doc__)
return False
if "-A" in args: noave=1
if '-f' in args:
ind=args.index("-f")
csv_file=args[ind+1]
if '-F' in args:
ind=args.index("-F")
meas_file=args[ind+1]
if '-Fsp' in args:
ind=args.index("-Fsp")
spec_file = args[ind+1]
if '-Fsi' in args:
ind=args.index("-Fsi")
site_file=args[ind+1]
if '-Fsa' in args:
ind=args.index("-Fsa")
samp_file = args[ind+1]
if not command_line:
dir_path = kwargs.get('dir_path', '.')
input_dir_path = kwargs.get('input_dir_path', dir_path)
output_dir_path = dir_path # rename dir_path after input_dir_path is set
noave = kwargs.get('noave', 0) # default (0) is DO average
csv_file = kwargs.get('csv_file', '')
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
spec_file = kwargs.get('spec_file', 'er_specimens.txt')
samp_file = kwargs.get('samp_file', 'er_samples.txt')
site_file = kwargs.get('site_file', 'er_sites.txt')
# format variables
meas_file = os.path.join(output_dir_path, meas_file)
spec_file = os.path.join(output_dir_path, spec_file)
Specs,file_type = pmag.magic_read(spec_file)
samp_file = os.path.join(output_dir_path, samp_file)
ErSamps,file_type = pmag.magic_read(samp_file)
site_file = os.path.join(output_dir_path, site_file)
if csv_file=="":
filelist=os.listdir(input_dir_path) # read in list of files to import
else:
csv_file = os.path.join(input_dir_path, csv_file)
filelist=[csv_file]
# parsing the data
specimens,samples,sites=[],[],[]
MagRecs,SpecRecs,SampRecs,SiteRecs=[],[],[],[]
for samp in ErSamps:
if samp['er_sample_name'] not in samples:
samples.append(samp['er_sample_name'])
SampRecs.append(samp)
file_found = False
for f in filelist: # parse each file
if f[-3:].lower()=='csv':
file_found = True
print('processing: ',f)
full_file = os.path.join(input_dir_path, f)
with open(full_file, 'r') as fin:
file_input = fin.readlines()
keys=file_input[0].replace('\n','').split(',') # splits on underscores
if "Interval Top (cm) on SHLF" in keys:interval_key="Interval Top (cm) on SHLF"
if " Interval Bot (cm) on SECT" in keys:interval_key=" Interval Bot (cm) on SECT"
if "Offset (cm)" in keys: interval_key="Offset (cm)"
if "Top Depth (m)" in keys:depth_key="Top Depth (m)"
if "CSF-A Top (m)" in keys:depth_key="CSF-A Top (m)"
if "Depth CSF-A (m)" in keys:depth_key="Depth CSF-A (m)"
if "CSF-B Top (m)" in keys:
comp_depth_key="CSF-B Top (m)" # use this model if available
elif "Depth CSF-B (m)" in keys:
comp_depth_key="Depth CSF-B (m)"
else:
comp_depth_key=""
if "Demag level (mT)" in keys:demag_key="Demag level (mT)"
if "Demag Level (mT)" in keys: demag_key="Demag Level (mT)"
if "Inclination (Tray- and Bkgrd-Corrected) (deg)" in keys:inc_key="Inclination (Tray- and Bkgrd-Corrected) (deg)"
if "Inclination background + tray corrected (deg)" in keys:inc_key="Inclination background + tray corrected (deg)"
if "Inclination background + tray corrected (\xc2\xb0)" in keys:inc_key="Inclination background + tray corrected (\xc2\xb0)"
if "Inclination background & tray corrected (deg)" in keys:inc_key="Inclination background & tray corrected (deg)"
if "Declination (Tray- and Bkgrd-Corrected) (deg)" in keys:dec_key="Declination (Tray- and Bkgrd-Corrected) (deg)"
if "Declination background + tray corrected (deg)" in keys:dec_key="Declination background + tray corrected (deg)"
if "Declination background + tray corrected (\xc2\xb0)" in keys:dec_key="Declination background + tray corrected (\xc2\xb0)"
if "Declination background & tray corrected (deg)" in keys:dec_key="Declination background & tray corrected (deg)"
if "Intensity (Tray- and Bkgrd-Corrected) (A/m)" in keys:int_key="Intensity (Tray- and Bkgrd-Corrected) (A/m)"
if "Intensity background + tray corrected (A/m)" in keys:int_key="Intensity background + tray corrected (A/m)"
if "Intensity background & tray corrected (A/m)" in keys:int_key="Intensity background & tray corrected (A/m)"
if "Core Type" in keys:
core_type="Core Type"
else: core_type="Type"
if 'Run Number' in keys: run_number_key='Run Number'
if 'Test No.' in keys: run_number_key='Test No.'
if 'Test Changed On' in keys: date_key='Test Changed On'
if "Timestamp (UTC)" in keys: date_key="Timestamp (UTC)"
if "Section" in keys: sect_key="Section"
if "Sect" in keys: sect_key="Sect"
if 'Section Half' in keys: half_key='Section Half'
if "A/W" in keys: half_key="A/W"
if "Text ID" in keys: text_id="Text ID"
if "Text Id" in keys: text_id="Text Id"
for line in file_input[1:]:
InRec={}
test=0
recs=line.split(',')
for k in range(len(keys)):
if len(recs)==len(keys):
InRec[keys[k]]=line.split(',')[k]
if InRec['Exp']!="": test=1 # get rid of pesky blank lines
if test==1:
run_number=""
inst="IODP-SRM"
volume='15.59' # set default volume to this
MagRec,SpecRec,SampRec,SiteRec={},{},{},{}
expedition=InRec['Exp']
location=InRec['Site']+InRec['Hole']
# Maintain backward compatibility for the ever-changing LIMS format (Argh!)
while len(InRec['Core'])<3:
InRec['Core']='0'+InRec['Core']
if "Last Tray Measurment" in list(InRec.keys()) and "SHLF" not in InRec[text_id] or 'dscr' in csv_file : # assume discrete sample
specimen=expedition+'-'+location+'-'+InRec['Core']+InRec[core_type]+"-"+InRec[sect_key]+'-'+InRec[half_key]+'-'+str(InRec[interval_key])
else: # mark as continuous measurements
specimen=expedition+'-'+location+'-'+InRec['Core']+InRec[core_type]+"_"+InRec[sect_key]+InRec[half_key]+'-'+str(InRec[interval_key])
SpecRec['er_expedition_name']=expedition
SpecRec['er_location_name']=location
SpecRec['er_site_name']=specimen
SpecRec['er_citation_names']=citation
for key in list(SpecRec.keys()):SampRec[key]=SpecRec[key]
for key in list(SpecRec.keys()):SiteRec[key]=SpecRec[key]
SampRec['sample_azimuth']='0'
SampRec['sample_dip']='0'
SampRec['sample_core_depth']=InRec[depth_key]
if comp_depth_key!='':
SampRec['sample_composite_depth']=InRec[comp_depth_key]
if "SHLF" not in InRec[text_id]:
SampRec['magic_method_codes']='FS-C-DRILL-IODP:SP-SS-C:SO-V'
else:
SampRec['magic_method_codes']='FS-C-DRILL-IODP:SO-V'
SpecRec['er_specimen_name']=specimen
SpecRec['er_sample_name']=specimen
SampRec['er_sample_name']=specimen
SampRec['er_specimen_names']=specimen
SiteRec['er_specimen_names']=specimen
for key in list(SpecRec.keys()):MagRec[key]=SpecRec[key]
# set up measurement record - default is NRM
#MagRec['er_analyst_mail_names']=InRec['Test Entered By']
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["treatment_ac_field"]=0
MagRec["treatment_dc_field"]='0'
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
MagRec["measurement_flag"]='g' # assume all data are "good"
MagRec["measurement_standard"]='u' # assume all data are "good"
SpecRec['er_specimen_alternatives']=InRec[text_id]
if 'Sample Area (cm?)' in list(InRec.keys()) and InRec['Sample Area (cm?)']!= "": volume=InRec['Sample Area (cm?)']
if InRec[run_number_key]!= "": run_number=InRec[run_number_key]
datestamp=InRec[date_key].split() # date time is second line of file
if '/' in datestamp[0]:
mmddyy=datestamp[0].split('/') # break into month day year
if len(mmddyy[0])==1: mmddyy[0]='0'+mmddyy[0] # make 2 characters
if len(mmddyy[1])==1: mmddyy[1]='0'+mmddyy[1] # make 2 characters
if len(datestamp[1])==1: datestamp[1]='0'+datestamp[1] # make 2 characters
date='20'+mmddyy[2]+':'+mmddyy[0]+":"+mmddyy[1] +':' +datestamp[1]+":00.00"
if '-' in datestamp[0]:
mmddyy=datestamp[0].split('-') # break into month day year
date=mmddyy[0]+':'+mmddyy[1]+":"+mmddyy[2] +':' +datestamp[1]+":00.00"
MagRec["measurement_date"]=date
MagRec["magic_method_codes"]='LT-NO'
if InRec[demag_key]!="0":
MagRec['magic_method_codes'] = 'LT-AF-Z'
inst=inst+':IODP-SRM-AF' # measured on shipboard in-line 2G AF
treatment_value=float(InRec[demag_key].strip('"'))*1e-3 # convert mT => T
MagRec["treatment_ac_field"]=treatment_value # AF demag in treat mT => T
if 'Treatment Type' in list(InRec.keys()) and InRec['Treatment Type']!="":
if 'Alternating Frequency' in InRec['Treatment Type']:
MagRec['magic_method_codes'] = 'LT-AF-Z'
inst=inst+':I`ODP-DTECH' # measured on shipboard Dtech D2000
treatment_value=float(InRec['Treatment Value'])*1e-3 # convert mT => T
MagRec["treatment_ac_field"]=treatment_value # AF demag in treat mT => T
elif 'Thermal' in InRec['Treatment Type']:
MagRec['magic_method_codes'] = 'LT-T-Z'
inst=inst+':IODP-TDS' # measured on shipboard Schonstedt thermal demagnetizer
treatment_value=float(InRec['Treatment Value'])+273 # convert C => K
MagRec["treatment_temp"]='%8.3e'%(treatment_value) #
MagRec["measurement_standard"]='u' # assume all data are "good"
vol=float(volume)*1e-6 # convert from cc to m^3
if run_number!="":
MagRec['external_database_ids']=run_number
MagRec['external_database_names']='LIMS'
else:
MagRec['external_database_ids']=""
MagRec['external_database_names']=''
MagRec['measurement_inc']=InRec[inc_key].strip('"')
MagRec['measurement_dec']=InRec[dec_key].strip('"')
intens= InRec[int_key].strip('"')
MagRec['measurement_magn_moment']='%8.3e'%(float(intens)*vol) # convert intensity from A/m to Am^2 using vol
MagRec['magic_instrument_codes']=inst
MagRec['measurement_number']='1'
MagRec['measurement_csd']=''
MagRec['measurement_positions']=''
MagRecs.append(MagRec)
if specimen not in specimens:
specimens.append(specimen)
SpecRecs.append(SpecRec)
if MagRec['er_sample_name'] not in samples:
samples.append(MagRec['er_sample_name'])
SampRecs.append(SampRec)
if MagRec['er_site_name'] not in sites:
sites.append(MagRec['er_site_name'])
SiteRecs.append(SiteRec)
#except:
# print 'Boo-boo somewhere - no idea where'
if not file_found:
print("No .csv files were found")
return False, "No .csv files were found"
if len(SpecRecs)>0:
print('spec_file', spec_file)
pmag.magic_write(spec_file,SpecRecs,'er_specimens')
#print 'specimens stored in ',spec_file
if len(SampRecs)>0:
SampOut,keys=pmag.fillkeys(SampRecs)
pmag.magic_write(samp_file,SampOut,'er_samples')
#print 'samples stored in ',samp_file
if len(SiteRecs)>0:
pmag.magic_write(site_file,SiteRecs,'er_sites')
#print 'sites stored in ',site_file
MagSort=pmag.sortbykeys(MagRecs,["er_specimen_name","treatment_ac_field"])
MagOuts=[]
for MagRec in MagSort:
MagRec["treatment_ac_field"]='%8.3e'%(MagRec['treatment_ac_field']) # convert to string
MagOuts.append(MagRec)
Fixed=pmag.measurements_methods(MagOuts,noave)
if pmag.magic_write(meas_file,Fixed,'magic_measurements'):
print('data stored in ',meas_file)
return True, meas_file
else:
print('no data found. bad magfile?')
return False, 'no data found. bad magfile?' | python | def main(command_line=True, **kwargs):
"""
NAME
iodp_srm_magic.py
DESCRIPTION
converts IODP LIMS and LORE SRM archive half sample format files to magic_measurements format files
SYNTAX
iodp_srm_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input .csv file, default is all in directory
-F FILE: specify output measurements file, default is magic_measurements.txt
-Fsp FILE: specify output er_specimens.txt file, default is er_specimens.txt
-Fsa FILE: specify output er_samples.txt file, default is er_samples.txt
-Fsi FILE: specify output er_sites.txt file, default is er_sites.txt
-A : don't average replicate measurements
INPUTS
IODP .csv file format exported from LIMS database
"""
#
# initialize defaults
version_num=pmag.get_version()
meas_file='magic_measurements.txt'
spec_file='er_specimens.txt'
samp_file='er_samples.txt'
site_file='er_sites.txt'
csv_file=''
ErSpecs,ErSamps,ErSites,ErLocs,ErCits=[],[],[],[],[]
MagRecs=[]
citation="This study"
dir_path,demag='.','NRM'
args=sys.argv
noave=0
depth_method='a'
# get command line args
if command_line:
if '-WD' in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if '-ID' in args:
ind = args.index('-ID')
input_dir_path = args[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
if "-h" in args:
print(main.__doc__)
return False
if "-A" in args: noave=1
if '-f' in args:
ind=args.index("-f")
csv_file=args[ind+1]
if '-F' in args:
ind=args.index("-F")
meas_file=args[ind+1]
if '-Fsp' in args:
ind=args.index("-Fsp")
spec_file = args[ind+1]
if '-Fsi' in args:
ind=args.index("-Fsi")
site_file=args[ind+1]
if '-Fsa' in args:
ind=args.index("-Fsa")
samp_file = args[ind+1]
if not command_line:
dir_path = kwargs.get('dir_path', '.')
input_dir_path = kwargs.get('input_dir_path', dir_path)
output_dir_path = dir_path # rename dir_path after input_dir_path is set
noave = kwargs.get('noave', 0) # default (0) is DO average
csv_file = kwargs.get('csv_file', '')
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
spec_file = kwargs.get('spec_file', 'er_specimens.txt')
samp_file = kwargs.get('samp_file', 'er_samples.txt')
site_file = kwargs.get('site_file', 'er_sites.txt')
# format variables
meas_file = os.path.join(output_dir_path, meas_file)
spec_file = os.path.join(output_dir_path, spec_file)
Specs,file_type = pmag.magic_read(spec_file)
samp_file = os.path.join(output_dir_path, samp_file)
ErSamps,file_type = pmag.magic_read(samp_file)
site_file = os.path.join(output_dir_path, site_file)
if csv_file=="":
filelist=os.listdir(input_dir_path) # read in list of files to import
else:
csv_file = os.path.join(input_dir_path, csv_file)
filelist=[csv_file]
# parsing the data
specimens,samples,sites=[],[],[]
MagRecs,SpecRecs,SampRecs,SiteRecs=[],[],[],[]
for samp in ErSamps:
if samp['er_sample_name'] not in samples:
samples.append(samp['er_sample_name'])
SampRecs.append(samp)
file_found = False
for f in filelist: # parse each file
if f[-3:].lower()=='csv':
file_found = True
print('processing: ',f)
full_file = os.path.join(input_dir_path, f)
with open(full_file, 'r') as fin:
file_input = fin.readlines()
keys=file_input[0].replace('\n','').split(',') # splits on underscores
if "Interval Top (cm) on SHLF" in keys:interval_key="Interval Top (cm) on SHLF"
if " Interval Bot (cm) on SECT" in keys:interval_key=" Interval Bot (cm) on SECT"
if "Offset (cm)" in keys: interval_key="Offset (cm)"
if "Top Depth (m)" in keys:depth_key="Top Depth (m)"
if "CSF-A Top (m)" in keys:depth_key="CSF-A Top (m)"
if "Depth CSF-A (m)" in keys:depth_key="Depth CSF-A (m)"
if "CSF-B Top (m)" in keys:
comp_depth_key="CSF-B Top (m)" # use this model if available
elif "Depth CSF-B (m)" in keys:
comp_depth_key="Depth CSF-B (m)"
else:
comp_depth_key=""
if "Demag level (mT)" in keys:demag_key="Demag level (mT)"
if "Demag Level (mT)" in keys: demag_key="Demag Level (mT)"
if "Inclination (Tray- and Bkgrd-Corrected) (deg)" in keys:inc_key="Inclination (Tray- and Bkgrd-Corrected) (deg)"
if "Inclination background + tray corrected (deg)" in keys:inc_key="Inclination background + tray corrected (deg)"
if "Inclination background + tray corrected (\xc2\xb0)" in keys:inc_key="Inclination background + tray corrected (\xc2\xb0)"
if "Inclination background & tray corrected (deg)" in keys:inc_key="Inclination background & tray corrected (deg)"
if "Declination (Tray- and Bkgrd-Corrected) (deg)" in keys:dec_key="Declination (Tray- and Bkgrd-Corrected) (deg)"
if "Declination background + tray corrected (deg)" in keys:dec_key="Declination background + tray corrected (deg)"
if "Declination background + tray corrected (\xc2\xb0)" in keys:dec_key="Declination background + tray corrected (\xc2\xb0)"
if "Declination background & tray corrected (deg)" in keys:dec_key="Declination background & tray corrected (deg)"
if "Intensity (Tray- and Bkgrd-Corrected) (A/m)" in keys:int_key="Intensity (Tray- and Bkgrd-Corrected) (A/m)"
if "Intensity background + tray corrected (A/m)" in keys:int_key="Intensity background + tray corrected (A/m)"
if "Intensity background & tray corrected (A/m)" in keys:int_key="Intensity background & tray corrected (A/m)"
if "Core Type" in keys:
core_type="Core Type"
else: core_type="Type"
if 'Run Number' in keys: run_number_key='Run Number'
if 'Test No.' in keys: run_number_key='Test No.'
if 'Test Changed On' in keys: date_key='Test Changed On'
if "Timestamp (UTC)" in keys: date_key="Timestamp (UTC)"
if "Section" in keys: sect_key="Section"
if "Sect" in keys: sect_key="Sect"
if 'Section Half' in keys: half_key='Section Half'
if "A/W" in keys: half_key="A/W"
if "Text ID" in keys: text_id="Text ID"
if "Text Id" in keys: text_id="Text Id"
for line in file_input[1:]:
InRec={}
test=0
recs=line.split(',')
for k in range(len(keys)):
if len(recs)==len(keys):
InRec[keys[k]]=line.split(',')[k]
if InRec['Exp']!="": test=1 # get rid of pesky blank lines
if test==1:
run_number=""
inst="IODP-SRM"
volume='15.59' # set default volume to this
MagRec,SpecRec,SampRec,SiteRec={},{},{},{}
expedition=InRec['Exp']
location=InRec['Site']+InRec['Hole']
# Maintain backward compatibility for the ever-changing LIMS format (Argh!)
while len(InRec['Core'])<3:
InRec['Core']='0'+InRec['Core']
if "Last Tray Measurment" in list(InRec.keys()) and "SHLF" not in InRec[text_id] or 'dscr' in csv_file : # assume discrete sample
specimen=expedition+'-'+location+'-'+InRec['Core']+InRec[core_type]+"-"+InRec[sect_key]+'-'+InRec[half_key]+'-'+str(InRec[interval_key])
else: # mark as continuous measurements
specimen=expedition+'-'+location+'-'+InRec['Core']+InRec[core_type]+"_"+InRec[sect_key]+InRec[half_key]+'-'+str(InRec[interval_key])
SpecRec['er_expedition_name']=expedition
SpecRec['er_location_name']=location
SpecRec['er_site_name']=specimen
SpecRec['er_citation_names']=citation
for key in list(SpecRec.keys()):SampRec[key]=SpecRec[key]
for key in list(SpecRec.keys()):SiteRec[key]=SpecRec[key]
SampRec['sample_azimuth']='0'
SampRec['sample_dip']='0'
SampRec['sample_core_depth']=InRec[depth_key]
if comp_depth_key!='':
SampRec['sample_composite_depth']=InRec[comp_depth_key]
if "SHLF" not in InRec[text_id]:
SampRec['magic_method_codes']='FS-C-DRILL-IODP:SP-SS-C:SO-V'
else:
SampRec['magic_method_codes']='FS-C-DRILL-IODP:SO-V'
SpecRec['er_specimen_name']=specimen
SpecRec['er_sample_name']=specimen
SampRec['er_sample_name']=specimen
SampRec['er_specimen_names']=specimen
SiteRec['er_specimen_names']=specimen
for key in list(SpecRec.keys()):MagRec[key]=SpecRec[key]
# set up measurement record - default is NRM
#MagRec['er_analyst_mail_names']=InRec['Test Entered By']
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["treatment_ac_field"]=0
MagRec["treatment_dc_field"]='0'
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
MagRec["measurement_flag"]='g' # assume all data are "good"
MagRec["measurement_standard"]='u' # assume all data are "good"
SpecRec['er_specimen_alternatives']=InRec[text_id]
if 'Sample Area (cm?)' in list(InRec.keys()) and InRec['Sample Area (cm?)']!= "": volume=InRec['Sample Area (cm?)']
if InRec[run_number_key]!= "": run_number=InRec[run_number_key]
datestamp=InRec[date_key].split() # date time is second line of file
if '/' in datestamp[0]:
mmddyy=datestamp[0].split('/') # break into month day year
if len(mmddyy[0])==1: mmddyy[0]='0'+mmddyy[0] # make 2 characters
if len(mmddyy[1])==1: mmddyy[1]='0'+mmddyy[1] # make 2 characters
if len(datestamp[1])==1: datestamp[1]='0'+datestamp[1] # make 2 characters
date='20'+mmddyy[2]+':'+mmddyy[0]+":"+mmddyy[1] +':' +datestamp[1]+":00.00"
if '-' in datestamp[0]:
mmddyy=datestamp[0].split('-') # break into month day year
date=mmddyy[0]+':'+mmddyy[1]+":"+mmddyy[2] +':' +datestamp[1]+":00.00"
MagRec["measurement_date"]=date
MagRec["magic_method_codes"]='LT-NO'
if InRec[demag_key]!="0":
MagRec['magic_method_codes'] = 'LT-AF-Z'
inst=inst+':IODP-SRM-AF' # measured on shipboard in-line 2G AF
treatment_value=float(InRec[demag_key].strip('"'))*1e-3 # convert mT => T
MagRec["treatment_ac_field"]=treatment_value # AF demag in treat mT => T
if 'Treatment Type' in list(InRec.keys()) and InRec['Treatment Type']!="":
if 'Alternating Frequency' in InRec['Treatment Type']:
MagRec['magic_method_codes'] = 'LT-AF-Z'
inst=inst+':I`ODP-DTECH' # measured on shipboard Dtech D2000
treatment_value=float(InRec['Treatment Value'])*1e-3 # convert mT => T
MagRec["treatment_ac_field"]=treatment_value # AF demag in treat mT => T
elif 'Thermal' in InRec['Treatment Type']:
MagRec['magic_method_codes'] = 'LT-T-Z'
inst=inst+':IODP-TDS' # measured on shipboard Schonstedt thermal demagnetizer
treatment_value=float(InRec['Treatment Value'])+273 # convert C => K
MagRec["treatment_temp"]='%8.3e'%(treatment_value) #
MagRec["measurement_standard"]='u' # assume all data are "good"
vol=float(volume)*1e-6 # convert from cc to m^3
if run_number!="":
MagRec['external_database_ids']=run_number
MagRec['external_database_names']='LIMS'
else:
MagRec['external_database_ids']=""
MagRec['external_database_names']=''
MagRec['measurement_inc']=InRec[inc_key].strip('"')
MagRec['measurement_dec']=InRec[dec_key].strip('"')
intens= InRec[int_key].strip('"')
MagRec['measurement_magn_moment']='%8.3e'%(float(intens)*vol) # convert intensity from A/m to Am^2 using vol
MagRec['magic_instrument_codes']=inst
MagRec['measurement_number']='1'
MagRec['measurement_csd']=''
MagRec['measurement_positions']=''
MagRecs.append(MagRec)
if specimen not in specimens:
specimens.append(specimen)
SpecRecs.append(SpecRec)
if MagRec['er_sample_name'] not in samples:
samples.append(MagRec['er_sample_name'])
SampRecs.append(SampRec)
if MagRec['er_site_name'] not in sites:
sites.append(MagRec['er_site_name'])
SiteRecs.append(SiteRec)
#except:
# print 'Boo-boo somewhere - no idea where'
if not file_found:
print("No .csv files were found")
return False, "No .csv files were found"
if len(SpecRecs)>0:
print('spec_file', spec_file)
pmag.magic_write(spec_file,SpecRecs,'er_specimens')
#print 'specimens stored in ',spec_file
if len(SampRecs)>0:
SampOut,keys=pmag.fillkeys(SampRecs)
pmag.magic_write(samp_file,SampOut,'er_samples')
#print 'samples stored in ',samp_file
if len(SiteRecs)>0:
pmag.magic_write(site_file,SiteRecs,'er_sites')
#print 'sites stored in ',site_file
MagSort=pmag.sortbykeys(MagRecs,["er_specimen_name","treatment_ac_field"])
MagOuts=[]
for MagRec in MagSort:
MagRec["treatment_ac_field"]='%8.3e'%(MagRec['treatment_ac_field']) # convert to string
MagOuts.append(MagRec)
Fixed=pmag.measurements_methods(MagOuts,noave)
if pmag.magic_write(meas_file,Fixed,'magic_measurements'):
print('data stored in ',meas_file)
return True, meas_file
else:
print('no data found. bad magfile?')
return False, 'no data found. bad magfile?' | NAME
iodp_srm_magic.py
DESCRIPTION
converts IODP LIMS and LORE SRM archive half sample format files to magic_measurements format files
SYNTAX
iodp_srm_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input .csv file, default is all in directory
-F FILE: specify output measurements file, default is magic_measurements.txt
-Fsp FILE: specify output er_specimens.txt file, default is er_specimens.txt
-Fsa FILE: specify output er_samples.txt file, default is er_samples.txt
-Fsi FILE: specify output er_sites.txt file, default is er_sites.txt
-A : don't average replicate measurements
INPUTS
IODP .csv file format exported from LIMS database | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/conversion_scripts2/iodp_srm_magic2.py#L9-L297 |
PmagPy/PmagPy | programs/agm_magic2.py | main | def main():
"""
NAME
agm_magic.py
DESCRIPTION
converts Micromag agm files to magic format
SYNTAX
agm_magic.py [-h] [command line options]
OPTIONS
-usr USER: identify user, default is "" - put in quotation marks!
-bak: this is a IRM backfield curve
-f FILE, specify input file, required
-fsa SAMPFILE, specify er_samples.txt file relating samples, site and locations names,default is none
-F MFILE, specify magic measurements formatted output file, default is agm_measurements.txt
-spn SPEC, specimen name, default is base of input file name, e.g. SPECNAME.agm
-spc NUM, specify number of characters to designate a specimen, default = 0
-Fsp SPECFILE : name of er_specimens.txt file for appending data to
[default: er_specimens.txt]
-ncn NCON,: specify naming convention: default is #1 below
-syn SYN, synthetic specimen name
-loc LOCNAME : specify location/study name,
should have either LOCNAME or SAMPFILE (unless synthetic)
-ins INST : specify which instrument was used (e.g, SIO-Maud), default is ""
-u units: [cgs,SI], default is cgs
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
[8] specimen is a synthetic - it has no sample, site, location information
NB: all others you will have to customize your self
or e-mail [email protected] for help.
OUTPUT
MagIC format files: magic_measurements, er_specimens, er_sample, er_site
"""
citation='This study'
MeasRecs=[]
units='cgs'
meth="LP-HYS"
version_num=pmag.get_version()
args=sys.argv
fmt='old'
er_sample_name,er_site_name,er_location_name="","",""
inst=""
er_location_name="unknown"
er_synthetic_name=""
user=""
er_site_name=""
dir_path='.'
dm=3
if "-WD" in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if "-ID" in args:
ind = args.index("-ID")
input_dir_path = args[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
specfile = output_dir_path+'/er_specimens.txt'
output = output_dir_path+"/agm_measurements.txt"
if "-h" in args:
print(main.__doc__)
sys.exit()
if "-bak" in args:
meth="LP-IRM-DCD"
output = output_dir_path+"/irm_measurements.txt"
if "-new" in args: fmt='new'
if "-usr" in args:
ind=args.index("-usr")
user=args[ind+1]
if '-F' in args:
ind=args.index("-F")
output = output_dir_path+'/'+args[ind+1]
if '-f' in args:
ind=args.index("-f")
agm_file= input_dir_path+'/'+args[ind+1]
er_specimen_name=args[ind+1].split('.')[0]
else:
print("agm_file field is required option")
print(main.__doc__)
sys.exit()
if '-Fsp' in args:
ind=args.index("-Fsp")
specfile= output_dir_path+'/'+args[ind+1]
specnum,samp_con,Z=0,'1',1
if "-spc" in args:
ind=args.index("-spc")
specnum=int(args[ind+1])
if specnum!=0:specnum=-specnum
if "-spn" in args:
ind=args.index("-spn")
er_specimen_name=args[ind+1]
#elif "-syn" not in args:
# print "you must specify a specimen name"
# sys.exit()
if "-syn" in args:
ind=args.index("-syn")
er_synthetic_name=args[ind+1]
er_specimen_name=""
if "-loc" in args:
ind=args.index("-loc")
er_location_name=args[ind+1]
if "-fsa" in args:
ind=args.index("-fsa")
sampfile = input_dir_path+'/'+args[ind+1]
Samps,file_type=pmag.magic_read(sampfile)
print('sample_file successfully read in')
if "-ncn" in args:
ind=args.index("-ncn")
samp_con=sys.argv[ind+1]
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
sys.exit()
else:
Z=samp_con.split("-")[1]
samp_con="4"
if "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
sys.exit()
else:
Z=samp_con.split("-")[1]
samp_con="7"
if "-ins" in args:
ind=args.index("-ins")
inst=args[ind+1]
if "-u" in args:
ind=args.index("-u")
units=args[ind+1]
dm = pmag.get_named_arg("-DM", 2)
ErSpecRecs,filetype=pmag.magic_read(specfile)
ErSpecRec,MeasRec={},{}
ErSpecRec['er_citation_names']="This study"
ErSpecRec['er_specimen_name']=er_specimen_name
ErSpecRec['er_synthetic_name']=er_synthetic_name
if specnum!=0:
ErSpecRec["er_sample_name"]=er_specimen_name[:specnum]
else:
ErSpecRec["er_sample_name"]=er_specimen_name
if "-fsa" in args and er_synthetic_name=="":
for samp in Samps:
if samp["er_sample_name"] == ErSpecRec["er_sample_name"]:
ErSpecRec["er_location_name"]=samp["er_location_name"]
ErSpecRec["er_site_name"]=samp["er_site_name"]
break
elif int(samp_con)!=6 and int(samp_con)!=8:
site=pmag.parse_site(ErSpecRec['er_sample_name'],samp_con,Z)
ErSpecRec["er_site_name"]=site
ErSpecRec["er_location_name"]=er_location_name
ErSpecRec['er_scientist_mail_names']=user.strip()
insert=1
for rec in ErSpecRecs:
if rec['er_specimen_name']==er_specimen_name:
insert=0
break
if insert==1:
ErSpecRecs.append(ErSpecRec)
ErSpecRecs,keylist=pmag.fillkeys(ErSpecRecs)
pmag.magic_write(specfile,ErSpecRecs,'er_specimens')
print("specimen name put in ",specfile)
f=open(agm_file,'r')
Data=f.readlines()
if "ASCII" not in Data[0]:fmt='new'
measnum,start=1,""
if fmt=='new': # new Micromag formatted file
end=2
for skip in range(len(Data)):
line=Data[skip]
rec=line.split()
if 'Units' in line:units=rec[-1]
if "Raw" in rec:
start=skip+2
if "Field" in rec and "Moment" in rec and start=="":
start=skip+2
break
else:
start = 2
end=1
for i in range(start,len(Data)-end): # skip header stuff
MeasRec={}
for key in list(ErSpecRec.keys()):
MeasRec[key]=ErSpecRec[key]
MeasRec['magic_instrument_codes']=inst
MeasRec['magic_method_codes']=meth
if 'er_synthetic_name' in list(MeasRec.keys()) and MeasRec['er_synthetic_name']!="":
MeasRec['magic_experiment_name']=er_synthetic_name+':'+meth
else:
MeasRec['magic_experiment_name']=er_specimen_name+':'+meth
line=Data[i]
rec=line.split(',') # data comma delimited
if rec[0]!='\n':
if units=='cgs':
field =float(rec[0])*1e-4 # convert from oe to tesla
else:
field =float(rec[0]) # field in tesla
if meth=="LP-HYS":
MeasRec['measurement_lab_field_dc']='%10.3e'%(field)
MeasRec['treatment_dc_field']=''
else:
MeasRec['measurement_lab_field_dc']=''
MeasRec['treatment_dc_field']='%10.3e'%(field)
if units=='cgs':
MeasRec['measurement_magn_moment']='%10.3e'%(float(rec[1])*1e-3) # convert from emu to Am^2
else:
MeasRec['measurement_magn_moment']='%10.3e'%(float(rec[1])) # Am^2
MeasRec['treatment_temp']='273' # temp in kelvin
MeasRec['measurement_temp']='273' # temp in kelvin
MeasRec['measurement_flag']='g'
MeasRec['measurement_standard']='u'
MeasRec['measurement_number']='%i'%(measnum)
measnum+=1
MeasRec['magic_software_packages']=version_num
MeasRecs.append(MeasRec)
# now we have to relabel LP-HYS method codes. initial loop is LP-IMT, minor loops are LP-M - do this in measurements_methods function
if meth=='LP-HYS':
recnum=0
while float(MeasRecs[recnum]['measurement_lab_field_dc'])<float(MeasRecs[recnum+1]['measurement_lab_field_dc']) and recnum+1<len(MeasRecs): # this is LP-IMAG
MeasRecs[recnum]['magic_method_codes']='LP-IMAG'
MeasRecs[recnum]['magic_experiment_name']=MeasRecs[recnum]['er_specimen_name']+":"+'LP-IMAG'
recnum+=1
#
if int(dm)==2:
pmag.magic_write(output,MeasRecs,'magic_measurements')
else:
print ('MagIC 3 is not supported yet')
sys.exit()
pmag.magic_write(output,MeasRecs,'measurements')
print("results put in ", output) | python | def main():
"""
NAME
agm_magic.py
DESCRIPTION
converts Micromag agm files to magic format
SYNTAX
agm_magic.py [-h] [command line options]
OPTIONS
-usr USER: identify user, default is "" - put in quotation marks!
-bak: this is a IRM backfield curve
-f FILE, specify input file, required
-fsa SAMPFILE, specify er_samples.txt file relating samples, site and locations names,default is none
-F MFILE, specify magic measurements formatted output file, default is agm_measurements.txt
-spn SPEC, specimen name, default is base of input file name, e.g. SPECNAME.agm
-spc NUM, specify number of characters to designate a specimen, default = 0
-Fsp SPECFILE : name of er_specimens.txt file for appending data to
[default: er_specimens.txt]
-ncn NCON,: specify naming convention: default is #1 below
-syn SYN, synthetic specimen name
-loc LOCNAME : specify location/study name,
should have either LOCNAME or SAMPFILE (unless synthetic)
-ins INST : specify which instrument was used (e.g, SIO-Maud), default is ""
-u units: [cgs,SI], default is cgs
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
[8] specimen is a synthetic - it has no sample, site, location information
NB: all others you will have to customize your self
or e-mail [email protected] for help.
OUTPUT
MagIC format files: magic_measurements, er_specimens, er_sample, er_site
"""
citation='This study'
MeasRecs=[]
units='cgs'
meth="LP-HYS"
version_num=pmag.get_version()
args=sys.argv
fmt='old'
er_sample_name,er_site_name,er_location_name="","",""
inst=""
er_location_name="unknown"
er_synthetic_name=""
user=""
er_site_name=""
dir_path='.'
dm=3
if "-WD" in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if "-ID" in args:
ind = args.index("-ID")
input_dir_path = args[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
specfile = output_dir_path+'/er_specimens.txt'
output = output_dir_path+"/agm_measurements.txt"
if "-h" in args:
print(main.__doc__)
sys.exit()
if "-bak" in args:
meth="LP-IRM-DCD"
output = output_dir_path+"/irm_measurements.txt"
if "-new" in args: fmt='new'
if "-usr" in args:
ind=args.index("-usr")
user=args[ind+1]
if '-F' in args:
ind=args.index("-F")
output = output_dir_path+'/'+args[ind+1]
if '-f' in args:
ind=args.index("-f")
agm_file= input_dir_path+'/'+args[ind+1]
er_specimen_name=args[ind+1].split('.')[0]
else:
print("agm_file field is required option")
print(main.__doc__)
sys.exit()
if '-Fsp' in args:
ind=args.index("-Fsp")
specfile= output_dir_path+'/'+args[ind+1]
specnum,samp_con,Z=0,'1',1
if "-spc" in args:
ind=args.index("-spc")
specnum=int(args[ind+1])
if specnum!=0:specnum=-specnum
if "-spn" in args:
ind=args.index("-spn")
er_specimen_name=args[ind+1]
#elif "-syn" not in args:
# print "you must specify a specimen name"
# sys.exit()
if "-syn" in args:
ind=args.index("-syn")
er_synthetic_name=args[ind+1]
er_specimen_name=""
if "-loc" in args:
ind=args.index("-loc")
er_location_name=args[ind+1]
if "-fsa" in args:
ind=args.index("-fsa")
sampfile = input_dir_path+'/'+args[ind+1]
Samps,file_type=pmag.magic_read(sampfile)
print('sample_file successfully read in')
if "-ncn" in args:
ind=args.index("-ncn")
samp_con=sys.argv[ind+1]
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
sys.exit()
else:
Z=samp_con.split("-")[1]
samp_con="4"
if "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
sys.exit()
else:
Z=samp_con.split("-")[1]
samp_con="7"
if "-ins" in args:
ind=args.index("-ins")
inst=args[ind+1]
if "-u" in args:
ind=args.index("-u")
units=args[ind+1]
dm = pmag.get_named_arg("-DM", 2)
ErSpecRecs,filetype=pmag.magic_read(specfile)
ErSpecRec,MeasRec={},{}
ErSpecRec['er_citation_names']="This study"
ErSpecRec['er_specimen_name']=er_specimen_name
ErSpecRec['er_synthetic_name']=er_synthetic_name
if specnum!=0:
ErSpecRec["er_sample_name"]=er_specimen_name[:specnum]
else:
ErSpecRec["er_sample_name"]=er_specimen_name
if "-fsa" in args and er_synthetic_name=="":
for samp in Samps:
if samp["er_sample_name"] == ErSpecRec["er_sample_name"]:
ErSpecRec["er_location_name"]=samp["er_location_name"]
ErSpecRec["er_site_name"]=samp["er_site_name"]
break
elif int(samp_con)!=6 and int(samp_con)!=8:
site=pmag.parse_site(ErSpecRec['er_sample_name'],samp_con,Z)
ErSpecRec["er_site_name"]=site
ErSpecRec["er_location_name"]=er_location_name
ErSpecRec['er_scientist_mail_names']=user.strip()
insert=1
for rec in ErSpecRecs:
if rec['er_specimen_name']==er_specimen_name:
insert=0
break
if insert==1:
ErSpecRecs.append(ErSpecRec)
ErSpecRecs,keylist=pmag.fillkeys(ErSpecRecs)
pmag.magic_write(specfile,ErSpecRecs,'er_specimens')
print("specimen name put in ",specfile)
f=open(agm_file,'r')
Data=f.readlines()
if "ASCII" not in Data[0]:fmt='new'
measnum,start=1,""
if fmt=='new': # new Micromag formatted file
end=2
for skip in range(len(Data)):
line=Data[skip]
rec=line.split()
if 'Units' in line:units=rec[-1]
if "Raw" in rec:
start=skip+2
if "Field" in rec and "Moment" in rec and start=="":
start=skip+2
break
else:
start = 2
end=1
for i in range(start,len(Data)-end): # skip header stuff
MeasRec={}
for key in list(ErSpecRec.keys()):
MeasRec[key]=ErSpecRec[key]
MeasRec['magic_instrument_codes']=inst
MeasRec['magic_method_codes']=meth
if 'er_synthetic_name' in list(MeasRec.keys()) and MeasRec['er_synthetic_name']!="":
MeasRec['magic_experiment_name']=er_synthetic_name+':'+meth
else:
MeasRec['magic_experiment_name']=er_specimen_name+':'+meth
line=Data[i]
rec=line.split(',') # data comma delimited
if rec[0]!='\n':
if units=='cgs':
field =float(rec[0])*1e-4 # convert from oe to tesla
else:
field =float(rec[0]) # field in tesla
if meth=="LP-HYS":
MeasRec['measurement_lab_field_dc']='%10.3e'%(field)
MeasRec['treatment_dc_field']=''
else:
MeasRec['measurement_lab_field_dc']=''
MeasRec['treatment_dc_field']='%10.3e'%(field)
if units=='cgs':
MeasRec['measurement_magn_moment']='%10.3e'%(float(rec[1])*1e-3) # convert from emu to Am^2
else:
MeasRec['measurement_magn_moment']='%10.3e'%(float(rec[1])) # Am^2
MeasRec['treatment_temp']='273' # temp in kelvin
MeasRec['measurement_temp']='273' # temp in kelvin
MeasRec['measurement_flag']='g'
MeasRec['measurement_standard']='u'
MeasRec['measurement_number']='%i'%(measnum)
measnum+=1
MeasRec['magic_software_packages']=version_num
MeasRecs.append(MeasRec)
# now we have to relabel LP-HYS method codes. initial loop is LP-IMT, minor loops are LP-M - do this in measurements_methods function
if meth=='LP-HYS':
recnum=0
while float(MeasRecs[recnum]['measurement_lab_field_dc'])<float(MeasRecs[recnum+1]['measurement_lab_field_dc']) and recnum+1<len(MeasRecs): # this is LP-IMAG
MeasRecs[recnum]['magic_method_codes']='LP-IMAG'
MeasRecs[recnum]['magic_experiment_name']=MeasRecs[recnum]['er_specimen_name']+":"+'LP-IMAG'
recnum+=1
#
if int(dm)==2:
pmag.magic_write(output,MeasRecs,'magic_measurements')
else:
print ('MagIC 3 is not supported yet')
sys.exit()
pmag.magic_write(output,MeasRecs,'measurements')
print("results put in ", output) | NAME
agm_magic.py
DESCRIPTION
converts Micromag agm files to magic format
SYNTAX
agm_magic.py [-h] [command line options]
OPTIONS
-usr USER: identify user, default is "" - put in quotation marks!
-bak: this is a IRM backfield curve
-f FILE, specify input file, required
-fsa SAMPFILE, specify er_samples.txt file relating samples, site and locations names,default is none
-F MFILE, specify magic measurements formatted output file, default is agm_measurements.txt
-spn SPEC, specimen name, default is base of input file name, e.g. SPECNAME.agm
-spc NUM, specify number of characters to designate a specimen, default = 0
-Fsp SPECFILE : name of er_specimens.txt file for appending data to
[default: er_specimens.txt]
-ncn NCON,: specify naming convention: default is #1 below
-syn SYN, synthetic specimen name
-loc LOCNAME : specify location/study name,
should have either LOCNAME or SAMPFILE (unless synthetic)
-ins INST : specify which instrument was used (e.g, SIO-Maud), default is ""
-u units: [cgs,SI], default is cgs
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
[8] specimen is a synthetic - it has no sample, site, location information
NB: all others you will have to customize your self
or e-mail [email protected] for help.
OUTPUT
MagIC format files: magic_measurements, er_specimens, er_sample, er_site | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/agm_magic2.py#L10-L250 |
PmagPy/PmagPy | programs/di_vgp.py | main | def main():
"""
NAME
di_vgp.py
DESCRIPTION
converts declination/inclination to virtual geomagnetic pole
SYNTAX
di_vgp.py [-h] [options]
OPTIONS
-h prints help message and quits
-i interactive data entry
-f FILE to specify intput file
-F FILE to specify output file
<filename to read/write from/to standard input
INPUT
for file entry:
D I SLAT SLON
where:
D: declination
I: inclination
SLAT: site latitude (positive north)
SLON: site longitude (positive east)
OUTPUT
PLON PLAT
where:
PLAT: pole latitude
PLON: pole longitude (positive east)
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
ofile=sys.argv[ind+1]
out=open(ofile,'w')
else:
out=''
if '-i' in sys.argv: # if one is -i
a95=0
while 1:
try:
ans = input("Input Declination: <cntrl-D to quit> ")
Dec = float(ans) # assign input to Dec, after conversion to floating point
ans = input("Input Inclination: ")
Inc = float(ans)
ans = input("Input Site Latitude: ")
slat = float(ans)
ans = input("Input Site Longitude: ")
slong = float(ans)
output = pmag.dia_vgp(Dec,Inc,a95,slat,slong)
print('%7.1f %7.1f'%(output[0],output[1]))
except:
print("\n Good-bye\n")
sys.exit()
elif '-f' in sys.argv: # input of file name
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
data=numpy.loadtxt(file)
else: #
data = numpy.loadtxt(sys.stdin,dtype=numpy.float) # read from S/I
if len(data.shape)>1: # 2-D array
N=data.shape[0]
if data.shape[1]==4: # only dec,inc,sitelat, site long -no alpha95
data=data.transpose()
inlist=numpy.array([data[0],data[1],numpy.zeros(N),data[2],data[3]]).transpose()
output = pmag.dia_vgp(inlist)
for k in range(N):
if out=='':
print('%7.1f %7.1f'%(output[0][k],output[1][k]))
else:
out.write('%7.1f %7.1f\n'%(output[0][k],output[1][k]))
else: # single line of data
if len(data)==4:
data=[data[0],data[1],0,data[2],data[3]]
output = pmag.dia_vgp(data)
if out=='': # spit to standard output
print('%7.1f %7.1f'%(output[0],output[1]))
else: # write to file
out.write('%7.1f %7.1f\n'%(output[0],output[1])) | python | def main():
"""
NAME
di_vgp.py
DESCRIPTION
converts declination/inclination to virtual geomagnetic pole
SYNTAX
di_vgp.py [-h] [options]
OPTIONS
-h prints help message and quits
-i interactive data entry
-f FILE to specify intput file
-F FILE to specify output file
<filename to read/write from/to standard input
INPUT
for file entry:
D I SLAT SLON
where:
D: declination
I: inclination
SLAT: site latitude (positive north)
SLON: site longitude (positive east)
OUTPUT
PLON PLAT
where:
PLAT: pole latitude
PLON: pole longitude (positive east)
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
ofile=sys.argv[ind+1]
out=open(ofile,'w')
else:
out=''
if '-i' in sys.argv: # if one is -i
a95=0
while 1:
try:
ans = input("Input Declination: <cntrl-D to quit> ")
Dec = float(ans) # assign input to Dec, after conversion to floating point
ans = input("Input Inclination: ")
Inc = float(ans)
ans = input("Input Site Latitude: ")
slat = float(ans)
ans = input("Input Site Longitude: ")
slong = float(ans)
output = pmag.dia_vgp(Dec,Inc,a95,slat,slong)
print('%7.1f %7.1f'%(output[0],output[1]))
except:
print("\n Good-bye\n")
sys.exit()
elif '-f' in sys.argv: # input of file name
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
data=numpy.loadtxt(file)
else: #
data = numpy.loadtxt(sys.stdin,dtype=numpy.float) # read from S/I
if len(data.shape)>1: # 2-D array
N=data.shape[0]
if data.shape[1]==4: # only dec,inc,sitelat, site long -no alpha95
data=data.transpose()
inlist=numpy.array([data[0],data[1],numpy.zeros(N),data[2],data[3]]).transpose()
output = pmag.dia_vgp(inlist)
for k in range(N):
if out=='':
print('%7.1f %7.1f'%(output[0][k],output[1][k]))
else:
out.write('%7.1f %7.1f\n'%(output[0][k],output[1][k]))
else: # single line of data
if len(data)==4:
data=[data[0],data[1],0,data[2],data[3]]
output = pmag.dia_vgp(data)
if out=='': # spit to standard output
print('%7.1f %7.1f'%(output[0],output[1]))
else: # write to file
out.write('%7.1f %7.1f\n'%(output[0],output[1])) | NAME
di_vgp.py
DESCRIPTION
converts declination/inclination to virtual geomagnetic pole
SYNTAX
di_vgp.py [-h] [options]
OPTIONS
-h prints help message and quits
-i interactive data entry
-f FILE to specify intput file
-F FILE to specify output file
<filename to read/write from/to standard input
INPUT
for file entry:
D I SLAT SLON
where:
D: declination
I: inclination
SLAT: site latitude (positive north)
SLON: site longitude (positive east)
OUTPUT
PLON PLAT
where:
PLAT: pole latitude
PLON: pole longitude (positive east) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/di_vgp.py#L9-L91 |
PmagPy/PmagPy | dialogs/pmag_gui_dialogs.py | convert_HUJI_files_to_MagIC.on_okButton | def on_okButton(self, event):
"""
grab user input values, format them, and run huji_magic.py with the appropriate flags
"""
os.chdir(self.WD)
options = {}
HUJI_file = self.bSizer0.return_value()
if not HUJI_file:
pw.simple_warning("You must select a HUJI format file")
return False
options['magfile'] = HUJI_file
dat_file = self.bSizer0A.return_value()
if os.path.isfile(dat_file): options['datafile'] = dat_file
else: dat_file=""
magicoutfile=os.path.split(HUJI_file)[1]+".magic"
outfile=os.path.join(self.WD, magicoutfile)
options['meas_file'] = outfile
magicoutfile=os.path.split(HUJI_file)[1]+"_specimens.txt"
spec_outfile=os.path.join(self.WD, magicoutfile)
options['spec_file'] = spec_outfile
magicoutfile=os.path.split(HUJI_file)[1]+"_samples.txt"
samp_outfile=os.path.join(self.WD, magicoutfile)
options['samp_file'] = samp_outfile
magicoutfile=os.path.split(HUJI_file)[1]+"_sites.txt"
site_outfile=os.path.join(self.WD, magicoutfile)
options['site_file'] = site_outfile
magicoutfile=os.path.split(HUJI_file)[1]+"_locations.txt"
loc_outfile=os.path.join(self.WD, magicoutfile)
options['loc_file'] = loc_outfile
user = self.bSizer1.return_value()
options['user'] = user
if user:
user = '-usr ' + user
experiment_type = self.bSizer2.return_value()
options['codelist'] = experiment_type
if not experiment_type:
pw.simple_warning("You must select an experiment type")
return False
cooling_rate = self.cooling_rate.GetValue() or 0
if cooling_rate:
experiment_type = experiment_type + " " + cooling_rate
lab_field = self.bSizer3.return_value()
if not lab_field:
lab_field = "0 0 0"
lab_field_list = lab_field.split()
options['labfield'] = lab_field_list[0]
options['phi'] = lab_field_list[1]
options['theta'] = lab_field_list[2]
lab_field = '-dc ' + lab_field
spc = self.bSizer4.return_value()
options['specnum'] = spc or 0
if not spc:
spc = '-spc 0'
else:
spc = '-spc ' + spc
ncn = self.bSizer5.return_value()
options['samp_con'] = ncn
loc_name = self.bSizer6.return_value()
options['location'] = loc_name
if loc_name:
loc_name = '-loc ' + loc_name
#peak_AF = self.bSizer7.return_value()
#options['peakfield'] = peak_AF
replicate = self.bSizer8.return_value()
if replicate:
options['noave'] = 0
replicate = ''
else:
options['noave'] = 1
replicate = '-A'
COMMAND = "huji_magic_new.py -f {} -fd {} -F {} -Fsp {} -Fsa {} -Fsi {} -Flo {} {} -LP {} {} -ncn {} {} {} {}".format(HUJI_file, dat_file, outfile, spec_outfile, samp_outfile, site_outfile, loc_outfile, user, experiment_type, loc_name, ncn, lab_field, spc, replicate)
program_ran, error_message = convert.huji(**options)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message) | python | def on_okButton(self, event):
"""
grab user input values, format them, and run huji_magic.py with the appropriate flags
"""
os.chdir(self.WD)
options = {}
HUJI_file = self.bSizer0.return_value()
if not HUJI_file:
pw.simple_warning("You must select a HUJI format file")
return False
options['magfile'] = HUJI_file
dat_file = self.bSizer0A.return_value()
if os.path.isfile(dat_file): options['datafile'] = dat_file
else: dat_file=""
magicoutfile=os.path.split(HUJI_file)[1]+".magic"
outfile=os.path.join(self.WD, magicoutfile)
options['meas_file'] = outfile
magicoutfile=os.path.split(HUJI_file)[1]+"_specimens.txt"
spec_outfile=os.path.join(self.WD, magicoutfile)
options['spec_file'] = spec_outfile
magicoutfile=os.path.split(HUJI_file)[1]+"_samples.txt"
samp_outfile=os.path.join(self.WD, magicoutfile)
options['samp_file'] = samp_outfile
magicoutfile=os.path.split(HUJI_file)[1]+"_sites.txt"
site_outfile=os.path.join(self.WD, magicoutfile)
options['site_file'] = site_outfile
magicoutfile=os.path.split(HUJI_file)[1]+"_locations.txt"
loc_outfile=os.path.join(self.WD, magicoutfile)
options['loc_file'] = loc_outfile
user = self.bSizer1.return_value()
options['user'] = user
if user:
user = '-usr ' + user
experiment_type = self.bSizer2.return_value()
options['codelist'] = experiment_type
if not experiment_type:
pw.simple_warning("You must select an experiment type")
return False
cooling_rate = self.cooling_rate.GetValue() or 0
if cooling_rate:
experiment_type = experiment_type + " " + cooling_rate
lab_field = self.bSizer3.return_value()
if not lab_field:
lab_field = "0 0 0"
lab_field_list = lab_field.split()
options['labfield'] = lab_field_list[0]
options['phi'] = lab_field_list[1]
options['theta'] = lab_field_list[2]
lab_field = '-dc ' + lab_field
spc = self.bSizer4.return_value()
options['specnum'] = spc or 0
if not spc:
spc = '-spc 0'
else:
spc = '-spc ' + spc
ncn = self.bSizer5.return_value()
options['samp_con'] = ncn
loc_name = self.bSizer6.return_value()
options['location'] = loc_name
if loc_name:
loc_name = '-loc ' + loc_name
#peak_AF = self.bSizer7.return_value()
#options['peakfield'] = peak_AF
replicate = self.bSizer8.return_value()
if replicate:
options['noave'] = 0
replicate = ''
else:
options['noave'] = 1
replicate = '-A'
COMMAND = "huji_magic_new.py -f {} -fd {} -F {} -Fsp {} -Fsa {} -Fsi {} -Flo {} {} -LP {} {} -ncn {} {} {} {}".format(HUJI_file, dat_file, outfile, spec_outfile, samp_outfile, site_outfile, loc_outfile, user, experiment_type, loc_name, ncn, lab_field, spc, replicate)
program_ran, error_message = convert.huji(**options)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message) | grab user input values, format them, and run huji_magic.py with the appropriate flags | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_gui_dialogs.py#L1214-L1291 |
PmagPy/PmagPy | dialogs/pmag_gui_dialogs.py | OrientFrameGrid3.create_sheet | def create_sheet(self):
'''
create an editable grid showing demag_orient.txt
'''
#--------------------------------
# orient.txt supports many other headers
# but we will only initialize with
# the essential headers for
# sample orientation and headers present
# in existing demag_orient.txt file
#--------------------------------
#--------------------------------
# create the grid
#--------------------------------
samples_list = list(self.orient_data.keys())
samples_list.sort()
self.samples_list = [ sample for sample in samples_list if sample is not "" ]
#self.headers.extend(self.add_extra_headers(samples_list))
display_headers = [header[1] for header in self.headers]
self.grid = magic_grid.MagicGrid(self.panel, 'orient grid',
self.samples_list, display_headers)
self.grid.InitUI()
#--------------------------------
# color the columns by groups
#--------------------------------
for i in range(len(self.samples_list)):
self.grid.SetCellBackgroundColour(i, 0, "LIGHT GREY")
self.grid.SetCellBackgroundColour(i, 1, "LIGHT STEEL BLUE")
self.grid.SetCellBackgroundColour(i, 2, "YELLOW")
self.grid.SetCellBackgroundColour(i, 3, "YELLOW")
self.grid.SetCellBackgroundColour(i, 4, "PALE GREEN")
self.grid.SetCellBackgroundColour(i, 5, "PALE GREEN")
self.grid.SetCellBackgroundColour(i, 6, "KHAKI")
self.grid.SetCellBackgroundColour(i, 7, "KHAKI")
self.grid.SetCellBackgroundColour(i, 8, "KHAKI")
self.grid.SetCellBackgroundColour(i, 9, "KHAKI")
self.grid.SetCellBackgroundColour(i, 10, "KHAKI")
self.grid.SetCellBackgroundColour(i, 11, "LIGHT MAGENTA")
self.grid.SetCellBackgroundColour(i, 12, "LIGHT MAGENTA")
#--------------------------------
# fill data from self.orient_data
#--------------------------------
headers = [header[0] for header in self.headers]
for sample in self.samples_list:
for key in list(self.orient_data[sample].keys()):
if key in headers:
sample_index = self.samples_list.index(sample)
i = headers.index(key)
val = str(self.orient_data[sample][key])
# if it's a pmag_object, use its name
try:
val = val.name
except AttributeError:
pass
if val and val != "None":
self.grid.SetCellValue(sample_index, i, val)
#--------------------------------
#--------------------------------
# fill in some default values
#--------------------------------
for row in range(self.grid.GetNumberRows()):
col = 1
if not self.grid.GetCellValue(row, col):
self.grid.SetCellValue(row, col, 'g')
#--------------------------------
# temporary trick to get drop-down-menus to work
self.grid.changes = {'a'}
self.grid.AutoSize()
#self.drop_down_menu = drop_down_menus.Menus("orient", self, self.grid, '')
self.drop_down_menu = drop_down_menus3.Menus("orient", self.contribution, self.grid)
self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid) | python | def create_sheet(self):
'''
create an editable grid showing demag_orient.txt
'''
#--------------------------------
# orient.txt supports many other headers
# but we will only initialize with
# the essential headers for
# sample orientation and headers present
# in existing demag_orient.txt file
#--------------------------------
#--------------------------------
# create the grid
#--------------------------------
samples_list = list(self.orient_data.keys())
samples_list.sort()
self.samples_list = [ sample for sample in samples_list if sample is not "" ]
#self.headers.extend(self.add_extra_headers(samples_list))
display_headers = [header[1] for header in self.headers]
self.grid = magic_grid.MagicGrid(self.panel, 'orient grid',
self.samples_list, display_headers)
self.grid.InitUI()
#--------------------------------
# color the columns by groups
#--------------------------------
for i in range(len(self.samples_list)):
self.grid.SetCellBackgroundColour(i, 0, "LIGHT GREY")
self.grid.SetCellBackgroundColour(i, 1, "LIGHT STEEL BLUE")
self.grid.SetCellBackgroundColour(i, 2, "YELLOW")
self.grid.SetCellBackgroundColour(i, 3, "YELLOW")
self.grid.SetCellBackgroundColour(i, 4, "PALE GREEN")
self.grid.SetCellBackgroundColour(i, 5, "PALE GREEN")
self.grid.SetCellBackgroundColour(i, 6, "KHAKI")
self.grid.SetCellBackgroundColour(i, 7, "KHAKI")
self.grid.SetCellBackgroundColour(i, 8, "KHAKI")
self.grid.SetCellBackgroundColour(i, 9, "KHAKI")
self.grid.SetCellBackgroundColour(i, 10, "KHAKI")
self.grid.SetCellBackgroundColour(i, 11, "LIGHT MAGENTA")
self.grid.SetCellBackgroundColour(i, 12, "LIGHT MAGENTA")
#--------------------------------
# fill data from self.orient_data
#--------------------------------
headers = [header[0] for header in self.headers]
for sample in self.samples_list:
for key in list(self.orient_data[sample].keys()):
if key in headers:
sample_index = self.samples_list.index(sample)
i = headers.index(key)
val = str(self.orient_data[sample][key])
# if it's a pmag_object, use its name
try:
val = val.name
except AttributeError:
pass
if val and val != "None":
self.grid.SetCellValue(sample_index, i, val)
#--------------------------------
#--------------------------------
# fill in some default values
#--------------------------------
for row in range(self.grid.GetNumberRows()):
col = 1
if not self.grid.GetCellValue(row, col):
self.grid.SetCellValue(row, col, 'g')
#--------------------------------
# temporary trick to get drop-down-menus to work
self.grid.changes = {'a'}
self.grid.AutoSize()
#self.drop_down_menu = drop_down_menus.Menus("orient", self, self.grid, '')
self.drop_down_menu = drop_down_menus3.Menus("orient", self.contribution, self.grid)
self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid) | create an editable grid showing demag_orient.txt | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_gui_dialogs.py#L2704-L2787 |
PmagPy/PmagPy | dialogs/pmag_gui_dialogs.py | OrientFrameGrid3.on_m_open_file | def on_m_open_file(self,event):
'''
open orient.txt
read the data
display the data from the file in a new grid
'''
dlg = wx.FileDialog(
self, message="choose orient file",
defaultDir=self.WD,
defaultFile="",
style=wx.FD_OPEN | wx.FD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
orient_file = dlg.GetPath()
dlg.Destroy()
new_data, dtype, keys = pmag.magic_read_dict(orient_file,
sort_by_this_name="sample_name",
return_keys=True)
if len(new_data) > 0:
self.orient_data={}
self.orient_data=new_data
#self.create_sheet()
self.update_sheet()
print("-I- If you don't see a change in the spreadsheet, you may need to manually re-size the window") | python | def on_m_open_file(self,event):
'''
open orient.txt
read the data
display the data from the file in a new grid
'''
dlg = wx.FileDialog(
self, message="choose orient file",
defaultDir=self.WD,
defaultFile="",
style=wx.FD_OPEN | wx.FD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
orient_file = dlg.GetPath()
dlg.Destroy()
new_data, dtype, keys = pmag.magic_read_dict(orient_file,
sort_by_this_name="sample_name",
return_keys=True)
if len(new_data) > 0:
self.orient_data={}
self.orient_data=new_data
#self.create_sheet()
self.update_sheet()
print("-I- If you don't see a change in the spreadsheet, you may need to manually re-size the window") | open orient.txt
read the data
display the data from the file in a new grid | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_gui_dialogs.py#L2814-L2838 |
PmagPy/PmagPy | dialogs/pmag_gui_dialogs.py | OrientFrameGrid.add_extra_headers | def add_extra_headers(self, sample_names):
"""
If there are samples, add any additional keys they might use
to supplement the default headers.
Return the headers headers for adding, with the format:
[(header_name, header_display_name), ....]
"""
if not sample_names:
return []
full_headers = list(self.orient_data[sample_names[0]].keys())
add_ons = []
for head in full_headers:
if head not in self.header_names:
add_ons.append((head, head))
return add_ons | python | def add_extra_headers(self, sample_names):
"""
If there are samples, add any additional keys they might use
to supplement the default headers.
Return the headers headers for adding, with the format:
[(header_name, header_display_name), ....]
"""
if not sample_names:
return []
full_headers = list(self.orient_data[sample_names[0]].keys())
add_ons = []
for head in full_headers:
if head not in self.header_names:
add_ons.append((head, head))
return add_ons | If there are samples, add any additional keys they might use
to supplement the default headers.
Return the headers headers for adding, with the format:
[(header_name, header_display_name), ....] | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_gui_dialogs.py#L3086-L3100 |
PmagPy/PmagPy | dialogs/pmag_gui_dialogs.py | OrientFrameGrid.on_m_open_file | def on_m_open_file(self,event):
'''
open orient.txt
read the data
display the data from the file in a new grid
'''
dlg = wx.FileDialog(
self, message="choose orient file",
defaultDir=self.WD,
defaultFile="",
style=wx.FD_OPEN | wx.FD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
orient_file = dlg.GetPath()
dlg.Destroy()
new_data = self.er_magic_data.read_magic_file(orient_file, "sample_name")[0]
if len(new_data) > 0:
self.orient_data={}
self.orient_data=new_data
#self.create_sheet()
self.update_sheet()
print("-I- If you don't see a change in the spreadsheet, you may need to manually re-size the window") | python | def on_m_open_file(self,event):
'''
open orient.txt
read the data
display the data from the file in a new grid
'''
dlg = wx.FileDialog(
self, message="choose orient file",
defaultDir=self.WD,
defaultFile="",
style=wx.FD_OPEN | wx.FD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
orient_file = dlg.GetPath()
dlg.Destroy()
new_data = self.er_magic_data.read_magic_file(orient_file, "sample_name")[0]
if len(new_data) > 0:
self.orient_data={}
self.orient_data=new_data
#self.create_sheet()
self.update_sheet()
print("-I- If you don't see a change in the spreadsheet, you may need to manually re-size the window") | open orient.txt
read the data
display the data from the file in a new grid | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_gui_dialogs.py#L3211-L3232 |
PmagPy/PmagPy | dialogs/pmag_gui_dialogs.py | OrientFrameGrid.on_m_save_file | def on_m_save_file(self,event):
'''
save demag_orient.txt
(only the columns that appear on the grid frame)
'''
fout = open(os.path.join(self.WD, "demag_orient.txt"), 'w')
STR = "tab\tdemag_orient\n"
fout.write(STR)
headers = [header[0] for header in self.headers]
STR = "\t".join(headers) + "\n"
fout.write(STR)
for sample in self.samples_list:
STR = ""
for header in headers:
sample_index = self.samples_list.index(sample)
i = headers.index(header)
value = self.grid.GetCellValue(sample_index, i)
STR = STR + value + "\t"
fout.write(STR[:-1] + "\n")
if event != None:
dlg1 = wx.MessageDialog(None,caption="Message:", message="data saved in file demag_orient.txt" ,style=wx.OK|wx.ICON_INFORMATION)
dlg1.ShowModal()
dlg1.Destroy() | python | def on_m_save_file(self,event):
'''
save demag_orient.txt
(only the columns that appear on the grid frame)
'''
fout = open(os.path.join(self.WD, "demag_orient.txt"), 'w')
STR = "tab\tdemag_orient\n"
fout.write(STR)
headers = [header[0] for header in self.headers]
STR = "\t".join(headers) + "\n"
fout.write(STR)
for sample in self.samples_list:
STR = ""
for header in headers:
sample_index = self.samples_list.index(sample)
i = headers.index(header)
value = self.grid.GetCellValue(sample_index, i)
STR = STR + value + "\t"
fout.write(STR[:-1] + "\n")
if event != None:
dlg1 = wx.MessageDialog(None,caption="Message:", message="data saved in file demag_orient.txt" ,style=wx.OK|wx.ICON_INFORMATION)
dlg1.ShowModal()
dlg1.Destroy() | save demag_orient.txt
(only the columns that appear on the grid frame) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_gui_dialogs.py#L3234-L3257 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.