repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
PmagPy/PmagPy | dialogs/grid_frame3.py | GridBuilder.save_grid_data | def save_grid_data(self):
"""
Save grid data in the data object
"""
if not self.grid.changes:
print('-I- No changes to save')
return
starred_cols = self.grid.remove_starred_labels()
# locks in value in cell currently edited
self.grid.SaveEditControlValue()
# changes is a dict with key values == row number
if self.grid.changes:
new_data = self.grid.save_items()
# HugeMagicGrid will return a pandas dataframe
if self.huge:
self.magic_dataframe.df = new_data
# MagicGrid will return a dictionary with
# new/updated data that must be incorporated
# into the dataframe
else:
for key in new_data:
data = new_data[key]
# update the row if it exists already,
# otherwise create a new row
updated = self.magic_dataframe.update_row(key, data)
if not isinstance(updated, pd.DataFrame):
if self.grid_type == 'ages':
label = key
else:
label = self.grid_type[:-1]
self.magic_dataframe.add_row(label, data,
self.grid.col_labels)
# update the contribution with the new dataframe
self.contribution.tables[self.grid_type] = self.magic_dataframe
# *** probably don't actually want to write to file, here (but maybe)
self.contribution.write_table_to_file(self.grid_type)
#self.magic_dataframe.write_magic_file("{}.txt".format(self.grid_type),
# self.contribution.directory)
# propagate age info if age table was edited
if self.grid_type == 'ages':
self.contribution.propagate_ages()
return | python | def save_grid_data(self):
"""
Save grid data in the data object
"""
if not self.grid.changes:
print('-I- No changes to save')
return
starred_cols = self.grid.remove_starred_labels()
# locks in value in cell currently edited
self.grid.SaveEditControlValue()
# changes is a dict with key values == row number
if self.grid.changes:
new_data = self.grid.save_items()
# HugeMagicGrid will return a pandas dataframe
if self.huge:
self.magic_dataframe.df = new_data
# MagicGrid will return a dictionary with
# new/updated data that must be incorporated
# into the dataframe
else:
for key in new_data:
data = new_data[key]
# update the row if it exists already,
# otherwise create a new row
updated = self.magic_dataframe.update_row(key, data)
if not isinstance(updated, pd.DataFrame):
if self.grid_type == 'ages':
label = key
else:
label = self.grid_type[:-1]
self.magic_dataframe.add_row(label, data,
self.grid.col_labels)
# update the contribution with the new dataframe
self.contribution.tables[self.grid_type] = self.magic_dataframe
# *** probably don't actually want to write to file, here (but maybe)
self.contribution.write_table_to_file(self.grid_type)
#self.magic_dataframe.write_magic_file("{}.txt".format(self.grid_type),
# self.contribution.directory)
# propagate age info if age table was edited
if self.grid_type == 'ages':
self.contribution.propagate_ages()
return | Save grid data in the data object | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/grid_frame3.py#L1094-L1138 |
PmagPy/PmagPy | dialogs/grid_frame3.py | GridBuilder.fill_defaults | def fill_defaults(self):
"""
Fill in self.grid with default values in certain columns.
Only fill in new values if grid is missing those values.
"""
defaults = {'result_quality': 'g',
'result_type': 'i',
'orientation_quality': 'g',
'citations': 'This study'}
for col_name in defaults:
if col_name in self.grid.col_labels:
# try to grab existing values from contribution
if self.grid_type in self.contribution.tables:
if col_name in self.contribution.tables[self.grid_type].df.columns:
old_vals = self.contribution.tables[self.grid_type].df[col_name]
# if column is completely filled in, skip
if all([cb.not_null(val, False) for val in old_vals]):
continue
new_val = defaults[col_name]
vals = list(np.where((old_vals.notnull()) & (old_vals != ''), old_vals, new_val))
else:
vals = [defaults[col_name]] * self.grid.GetNumberRows()
# if values not available in contribution, use defaults
else:
vals = [defaults[col_name]] * self.grid.GetNumberRows()
# if col_name not present in grid, skip
else:
vals = None
#
if vals:
print('-I- Updating column "{}" with default values'.format(col_name))
if self.huge:
self.grid.SetColumnValues(col_name, vals)
else:
col_ind = self.grid.col_labels.index(col_name)
for row, val in enumerate(vals):
self.grid.SetCellValue(row, col_ind, val)
self.grid.changes = set(range(self.grid.GetNumberRows())) | python | def fill_defaults(self):
"""
Fill in self.grid with default values in certain columns.
Only fill in new values if grid is missing those values.
"""
defaults = {'result_quality': 'g',
'result_type': 'i',
'orientation_quality': 'g',
'citations': 'This study'}
for col_name in defaults:
if col_name in self.grid.col_labels:
# try to grab existing values from contribution
if self.grid_type in self.contribution.tables:
if col_name in self.contribution.tables[self.grid_type].df.columns:
old_vals = self.contribution.tables[self.grid_type].df[col_name]
# if column is completely filled in, skip
if all([cb.not_null(val, False) for val in old_vals]):
continue
new_val = defaults[col_name]
vals = list(np.where((old_vals.notnull()) & (old_vals != ''), old_vals, new_val))
else:
vals = [defaults[col_name]] * self.grid.GetNumberRows()
# if values not available in contribution, use defaults
else:
vals = [defaults[col_name]] * self.grid.GetNumberRows()
# if col_name not present in grid, skip
else:
vals = None
#
if vals:
print('-I- Updating column "{}" with default values'.format(col_name))
if self.huge:
self.grid.SetColumnValues(col_name, vals)
else:
col_ind = self.grid.col_labels.index(col_name)
for row, val in enumerate(vals):
self.grid.SetCellValue(row, col_ind, val)
self.grid.changes = set(range(self.grid.GetNumberRows())) | Fill in self.grid with default values in certain columns.
Only fill in new values if grid is missing those values. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/grid_frame3.py#L1140-L1177 |
PmagPy/PmagPy | dialogs/grid_frame3.py | GridBuilder.get_result_children | def get_result_children(self, result_data):
"""
takes in dict in form of {'er_specimen_names': 'name1:name2:name3'}
and so forth.
returns lists of specimens, samples, sites, and locations
"""
specimens, samples, sites, locations = "", "", "", ""
children = {'specimen': specimens, 'sample': samples,
'site': sites, 'location': locations}
for dtype in children:
header_name = 'er_' + dtype + '_names'
if result_data[header_name]:
children[dtype] = result_data[header_name].split(":")
# make sure there are no extra spaces in names
children[dtype] = [child.strip() for child in children[dtype]]
return children['specimen'], children['sample'], children['site'], children['location'] | python | def get_result_children(self, result_data):
"""
takes in dict in form of {'er_specimen_names': 'name1:name2:name3'}
and so forth.
returns lists of specimens, samples, sites, and locations
"""
specimens, samples, sites, locations = "", "", "", ""
children = {'specimen': specimens, 'sample': samples,
'site': sites, 'location': locations}
for dtype in children:
header_name = 'er_' + dtype + '_names'
if result_data[header_name]:
children[dtype] = result_data[header_name].split(":")
# make sure there are no extra spaces in names
children[dtype] = [child.strip() for child in children[dtype]]
return children['specimen'], children['sample'], children['site'], children['location'] | takes in dict in form of {'er_specimen_names': 'name1:name2:name3'}
and so forth.
returns lists of specimens, samples, sites, and locations | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/grid_frame3.py#L1179-L1195 |
PmagPy/PmagPy | programs/deprecated/specimens_results_magic.py | main | def main():
"""
NAME
specimens_results_magic.py
DESCRIPTION
combines pmag_specimens.txt file with age, location, acceptance criteria and
outputs pmag_results table along with other MagIC tables necessary for uploading to the database
SYNTAX
specimens_results_magic.py [command line options]
OPTIONS
-h prints help message and quits
-usr USER: identify user, default is ""
-f: specimen input magic_measurements format file, default is "magic_measurements.txt"
-fsp: specimen input pmag_specimens format file, default is "pmag_specimens.txt"
-fsm: sample input er_samples format file, default is "er_samples.txt"
-fsi: specimen input er_sites format file, default is "er_sites.txt"
-fla: specify a file with paleolatitudes for calculating VADMs, default is not to calculate VADMS
format is: site_name paleolatitude (space delimited file)
-fa AGES: specify er_ages format file with age information
-crd [s,g,t,b]: specify coordinate system
(s, specimen, g geographic, t, tilt corrected, b, geographic and tilt corrected)
Default is to assume geographic
NB: only the tilt corrected data will appear on the results table, if both g and t are selected.
-cor [AC:CR:NL]: colon delimited list of required data adjustments for all specimens
included in intensity calculations (anisotropy, cooling rate, non-linear TRM)
unless specified, corrections will not be applied
-pri [TRM:ARM] colon delimited list of priorities for anisotropy correction (-cor must also be set to include AC). default is TRM, then ARM
-age MIN MAX UNITS: specify age boundaries and units
-exc: use exiting selection criteria (in pmag_criteria.txt file), default is default criteria
-C: no acceptance criteria
-aD: average directions per sample, default is NOT
-aI: average multiple specimen intensities per sample, default is by site
-aC: average all components together, default is NOT
-pol: calculate polarity averages
-sam: save sample level vgps and v[a]dms, default is by site
-xSi: skip the site level intensity calculation
-p: plot directions and look at intensities by site, default is NOT
-fmt: specify output for saved images, default is svg (only if -p set)
-lat: use present latitude for calculating VADMs, default is not to calculate VADMs
-xD: skip directions
-xI: skip intensities
OUPUT
writes pmag_samples, pmag_sites, pmag_results tables
"""
# set defaults
Comps=[] # list of components
version_num=pmag.get_version()
args=sys.argv
DefaultAge=["none"]
skipdirs,coord,excrit,custom,vgps,average,Iaverage,plotsites,opt=1,0,0,0,0,0,0,0,0
get_model_lat=0 # this skips VADM calculation altogether, when get_model_lat=1, uses present day
fmt='svg'
dir_path="."
model_lat_file=""
Caverage=0
infile='pmag_specimens.txt'
measfile="magic_measurements.txt"
sampfile="er_samples.txt"
sitefile="er_sites.txt"
agefile="er_ages.txt"
specout="er_specimens.txt"
sampout="pmag_samples.txt"
siteout="pmag_sites.txt"
resout="pmag_results.txt"
critout="pmag_criteria.txt"
instout="magic_instruments.txt"
sigcutoff,OBJ="",""
noDir,noInt=0,0
polarity=0
coords=['0']
Dcrit,Icrit,nocrit=0,0,0
corrections=[]
nocorrection=['DA-NL','DA-AC','DA-CR']
priorities=['DA-AC-ARM','DA-AC-TRM'] # priorities for anisotropy correction
# get command line stuff
if "-h" in args:
print(main.__doc__)
sys.exit()
if '-WD' in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if '-cor' in args:
ind=args.index('-cor')
cors=args[ind+1].split(':') # list of required data adjustments
for cor in cors:
nocorrection.remove('DA-'+cor)
corrections.append('DA-'+cor)
if '-pri' in args:
ind=args.index('-pri')
priorities=args[ind+1].split(':') # list of required data adjustments
for p in priorities:
p='DA-AC-'+p
if '-f' in args:
ind=args.index("-f")
measfile=args[ind+1]
if '-fsp' in args:
ind=args.index("-fsp")
infile=args[ind+1]
if '-fsi' in args:
ind=args.index("-fsi")
sitefile=args[ind+1]
if "-crd" in args:
ind=args.index("-crd")
coord=args[ind+1]
if coord=='s':coords=['-1']
if coord=='g':coords=['0']
if coord=='t':coords=['100']
if coord=='b':coords=['0','100']
if "-usr" in args:
ind=args.index("-usr")
user=sys.argv[ind+1]
else: user=""
if "-C" in args: Dcrit,Icrit,nocrit=1,1,1 # no selection criteria
if "-sam" in args: vgps=1 # save sample level VGPS/VADMs
if "-xSi" in args:
nositeints=1 # skip site level intensity
else:
nositeints=0
if "-age" in args:
ind=args.index("-age")
DefaultAge[0]=args[ind+1]
DefaultAge.append(args[ind+2])
DefaultAge.append(args[ind+3])
Daverage,Iaverage,Caverage=0,0,0
if "-aD" in args: Daverage=1 # average by sample directions
if "-aI" in args: Iaverage=1 # average by sample intensities
if "-aC" in args: Caverage=1 # average all components together ??? why???
if "-pol" in args: polarity=1 # calculate averages by polarity
if '-xD' in args:noDir=1
if '-xI' in args:
noInt=1
elif "-fla" in args:
if '-lat' in args:
print("you should set a paleolatitude file OR use present day lat - not both")
sys.exit()
ind=args.index("-fla")
model_lat_file=dir_path+'/'+args[ind+1]
get_model_lat=2
mlat=open(model_lat_file,'r')
ModelLats=[]
for line in mlat.readlines():
ModelLat={}
tmp=line.split()
ModelLat["er_site_name"]=tmp[0]
ModelLat["site_model_lat"]=tmp[1]
ModelLat["er_sample_name"]=tmp[0]
ModelLat["sample_lat"]=tmp[1]
ModelLats.append(ModelLat)
get_model_lat=2
elif '-lat' in args:
get_model_lat=1
if "-p" in args:
plotsites=1
if "-fmt" in args:
ind=args.index("-fmt")
fmt=args[ind+1]
if noDir==0: # plot by site - set up plot window
import pmagplotlib
EQ={}
EQ['eqarea']=1
pmagplotlib.plot_init(EQ['eqarea'],5,5) # define figure 1 as equal area projection
pmagplotlib.plot_net(EQ['eqarea']) # I don't know why this has to be here, but otherwise the first plot never plots...
pmagplotlib.draw_figs(EQ)
if '-WD' in args:
infile=dir_path+'/'+infile
measfile=dir_path+'/'+measfile
instout=dir_path+'/'+instout
sampfile=dir_path+'/'+sampfile
sitefile=dir_path+'/'+sitefile
agefile=dir_path+'/'+agefile
specout=dir_path+'/'+specout
sampout=dir_path+'/'+sampout
siteout=dir_path+'/'+siteout
resout=dir_path+'/'+resout
critout=dir_path+'/'+critout
if "-exc" in args: # use existing pmag_criteria file
if "-C" in args:
print('you can not use both existing and no criteria - choose either -exc OR -C OR neither (for default)')
sys.exit()
crit_data,file_type=pmag.magic_read(critout)
print("Acceptance criteria read in from ", critout)
else : # use default criteria (if nocrit set, then get really loose criteria as default)
crit_data=pmag.default_criteria(nocrit)
if nocrit==0:
print("Acceptance criteria are defaults")
else:
print("No acceptance criteria used ")
accept={}
for critrec in crit_data:
for key in list(critrec.keys()):
# need to migrate specimen_dang to specimen_int_dang for intensity data using old format
if 'IE-SPEC' in list(critrec.keys()) and 'specimen_dang' in list(critrec.keys()) and 'specimen_int_dang' not in list(critrec.keys()):
critrec['specimen_int_dang']=critrec['specimen_dang']
del critrec['specimen_dang']
# need to get rid of ron shaars sample_int_sigma_uT
if 'sample_int_sigma_uT' in list(critrec.keys()):
critrec['sample_int_sigma']='%10.3e'%(eval(critrec['sample_int_sigma_uT'])*1e-6)
if key not in list(accept.keys()) and critrec[key]!='':
accept[key]=critrec[key]
#
#
if "-exc" not in args and "-C" not in args:
print("args",args)
pmag.magic_write(critout,[accept],'pmag_criteria')
print("\n Pmag Criteria stored in ",critout,'\n')
#
# now we're done slow dancing
#
SiteNFO,file_type=pmag.magic_read(sitefile) # read in site data - has the lats and lons
SampNFO,file_type=pmag.magic_read(sampfile) # read in site data - has the lats and lons
height_nfo=pmag.get_dictitem(SiteNFO,'site_height','','F') # find all the sites with height info.
if agefile !="":AgeNFO,file_type=pmag.magic_read(agefile) # read in the age information
Data,file_type=pmag.magic_read(infile) # read in specimen interpretations
IntData=pmag.get_dictitem(Data,'specimen_int','','F') # retrieve specimens with intensity data
comment,orient="",[]
samples,sites=[],[]
for rec in Data: # run through the data filling in missing keys and finding all components, coordinates available
# fill in missing fields, collect unique sample and site names
if 'er_sample_name' not in list(rec.keys()):
rec['er_sample_name']=""
elif rec['er_sample_name'] not in samples:
samples.append(rec['er_sample_name'])
if 'er_site_name' not in list(rec.keys()):
rec['er_site_name']=""
elif rec['er_site_name'] not in sites:
sites.append(rec['er_site_name'])
if 'specimen_int' not in list(rec.keys()):rec['specimen_int']=''
if 'specimen_comp_name' not in list(rec.keys()) or rec['specimen_comp_name']=="":rec['specimen_comp_name']='A'
if rec['specimen_comp_name'] not in Comps:Comps.append(rec['specimen_comp_name'])
rec['specimen_tilt_correction']=rec['specimen_tilt_correction'].strip('\n')
if "specimen_tilt_correction" not in list(rec.keys()): rec["specimen_tilt_correction"]="-1" # assume sample coordinates
if rec["specimen_tilt_correction"] not in orient: orient.append(rec["specimen_tilt_correction"]) # collect available coordinate systems
if "specimen_direction_type" not in list(rec.keys()): rec["specimen_direction_type"]='l' # assume direction is line - not plane
if "specimen_dec" not in list(rec.keys()): rec["specimen_direction_type"]='' # if no declination, set direction type to blank
if "specimen_n" not in list(rec.keys()): rec["specimen_n"]='' # put in n
if "specimen_alpha95" not in list(rec.keys()): rec["specimen_alpha95"]='' # put in alpha95
if "magic_method_codes" not in list(rec.keys()): rec["magic_method_codes"]=''
#
# start parsing data into SpecDirs, SpecPlanes, SpecInts
SpecInts,SpecDirs,SpecPlanes=[],[],[]
samples.sort() # get sorted list of samples and sites
sites.sort()
if noInt==0: # don't skip intensities
IntData=pmag.get_dictitem(Data,'specimen_int','','F') # retrieve specimens with intensity data
if nocrit==0: # use selection criteria
for rec in IntData: # do selection criteria
kill=pmag.grade(rec,accept,'specimen_int')
if len(kill)==0: SpecInts.append(rec) # intensity record to be included in sample, site calculations
else:
SpecInts=IntData[:] # take everything - no selection criteria
# check for required data adjustments
if len(corrections)>0 and len(SpecInts)>0:
for cor in corrections:
SpecInts=pmag.get_dictitem(SpecInts,'magic_method_codes',cor,'has') # only take specimens with the required corrections
if len(nocorrection)>0 and len(SpecInts)>0:
for cor in nocorrection:
SpecInts=pmag.get_dictitem(SpecInts,'magic_method_codes',cor,'not') # exclude the corrections not specified for inclusion
# take top priority specimen of its name in remaining specimens (only one per customer)
PrioritySpecInts=[]
specimens=pmag.get_specs(SpecInts) # get list of uniq specimen names
for spec in specimens:
ThisSpecRecs=pmag.get_dictitem(SpecInts,'er_specimen_name',spec,'T') # all the records for this specimen
if len(ThisSpecRecs)==1:
PrioritySpecInts.append(ThisSpecRecs[0])
elif len(ThisSpecRecs)>1: # more than one
prec=[]
for p in priorities:
ThisSpecRecs=pmag.get_dictitem(SpecInts,'magic_method_codes',p,'has') # all the records for this specimen
if len(ThisSpecRecs)>0:prec.append(ThisSpecRecs[0])
PrioritySpecInts.append(prec[0]) # take the best one
SpecInts=PrioritySpecInts # this has the first specimen record
if noDir==0: # don't skip directions
AllDirs=pmag.get_dictitem(Data,'specimen_direction_type','','F') # retrieve specimens with directed lines and planes
Ns=pmag.get_dictitem(AllDirs,'specimen_n','','F') # get all specimens with specimen_n information
if nocrit!=1: # use selection criteria
for rec in Ns: # look through everything with specimen_n for "good" data
kill=pmag.grade(rec,accept,'specimen_dir')
if len(kill)==0: # nothing killed it
SpecDirs.append(rec)
else: # no criteria
SpecDirs=AllDirs[:] # take them all
# SpecDirs is now the list of all specimen directions (lines and planes) that pass muster
#
PmagSamps,SampDirs=[],[] # list of all sample data and list of those that pass the DE-SAMP criteria
PmagSites,PmagResults=[],[] # list of all site data and selected results
SampInts=[]
for samp in samples: # run through the sample names
if Daverage==1: # average by sample if desired
SampDir=pmag.get_dictitem(SpecDirs,'er_sample_name',samp,'T') # get all the directional data for this sample
if len(SampDir)>0: # there are some directions
for coord in coords: # step through desired coordinate systems
CoordDir=pmag.get_dictitem(SampDir,'specimen_tilt_correction',coord,'T') # get all the directions for this sample
if len(CoordDir)>0: # there are some with this coordinate system
if Caverage==0: # look component by component
for comp in Comps:
CompDir=pmag.get_dictitem(CoordDir,'specimen_comp_name',comp,'T') # get all directions from this component
if len(CompDir)>0: # there are some
PmagSampRec=pmag.lnpbykey(CompDir,'sample','specimen') # get a sample average from all specimens
PmagSampRec["er_location_name"]=CompDir[0]['er_location_name'] # decorate the sample record
PmagSampRec["er_site_name"]=CompDir[0]['er_site_name']
PmagSampRec["er_sample_name"]=samp
PmagSampRec["er_citation_names"]="This study"
PmagSampRec["er_analyst_mail_names"]=user
PmagSampRec['magic_software_packages']=version_num
if nocrit!=1:PmagSampRec['pmag_criteria_codes']="ACCEPT"
if agefile != "": PmagSampRec= pmag.get_age(PmagSampRec,"er_site_name","sample_inferred_",AgeNFO,DefaultAge)
site_height=pmag.get_dictitem(height_nfo,'er_site_name',PmagSampRec['er_site_name'],'T')
if len(site_height)>0:PmagSampRec["sample_height"]=site_height[0]['site_height'] # add in height if available
PmagSampRec['sample_comp_name']=comp
PmagSampRec['sample_tilt_correction']=coord
PmagSampRec['er_specimen_names']= pmag.get_list(CompDir,'er_specimen_name') # get a list of the specimen names used
PmagSampRec['magic_method_codes']= pmag.get_list(CompDir,'magic_method_codes') # get a list of the methods used
if nocrit!=1: # apply selection criteria
kill=pmag.grade(PmagSampRec,accept,'sample_dir')
else:
kill=[]
if len(kill)==0:
SampDirs.append(PmagSampRec)
if vgps==1: # if sample level VGP info desired, do that now
PmagResRec=pmag.getsampVGP(PmagSampRec,SiteNFO)
if PmagResRec!="":PmagResults.append(PmagResRec)
PmagSamps.append(PmagSampRec)
if Caverage==1: # average all components together basically same as above
PmagSampRec=pmag.lnpbykey(CoordDir,'sample','specimen')
PmagSampRec["er_location_name"]=CoordDir[0]['er_location_name']
PmagSampRec["er_site_name"]=CoordDir[0]['er_site_name']
PmagSampRec["er_sample_name"]=samp
PmagSampRec["er_citation_names"]="This study"
PmagSampRec["er_analyst_mail_names"]=user
PmagSampRec['magic_software_packages']=version_num
if nocrit!=1:PmagSampRec['pmag_criteria_codes']=""
if agefile != "": PmagSampRec= pmag.get_age(PmagSampRec,"er_site_name","sample_inferred_",AgeNFO,DefaultAge)
site_height=pmag.get_dictitem(height_nfo,'er_site_name',site,'T')
if len(site_height)>0:PmagSampRec["sample_height"]=site_height[0]['site_height'] # add in height if available
PmagSampRec['sample_tilt_correction']=coord
PmagSampRec['sample_comp_name']= pmag.get_list(CoordDir,'specimen_comp_name') # get components used
PmagSampRec['er_specimen_names']= pmag.get_list(CoordDir,'er_specimen_name') # get specimne names averaged
PmagSampRec['magic_method_codes']= pmag.get_list(CoordDir,'magic_method_codes') # assemble method codes
if nocrit!=1: # apply selection criteria
kill=pmag.grade(PmagSampRec,accept,'sample_dir')
if len(kill)==0: # passes the mustard
SampDirs.append(PmagSampRec)
if vgps==1:
PmagResRec=pmag.getsampVGP(PmagSampRec,SiteNFO)
if PmagResRec!="":PmagResults.append(PmagResRec)
else: # take everything
SampDirs.append(PmagSampRec)
if vgps==1:
PmagResRec=pmag.getsampVGP(PmagSampRec,SiteNFO)
if PmagResRec!="":PmagResults.append(PmagResRec)
PmagSamps.append(PmagSampRec)
if Iaverage==1: # average by sample if desired
SampI=pmag.get_dictitem(SpecInts,'er_sample_name',samp,'T') # get all the intensity data for this sample
if len(SampI)>0: # there are some
PmagSampRec=pmag.average_int(SampI,'specimen','sample') # get average intensity stuff
PmagSampRec["sample_description"]="sample intensity" # decorate sample record
PmagSampRec["sample_direction_type"]=""
PmagSampRec['er_site_name']=SampI[0]["er_site_name"]
PmagSampRec['er_sample_name']=samp
PmagSampRec['er_location_name']=SampI[0]["er_location_name"]
PmagSampRec["er_citation_names"]="This study"
PmagSampRec["er_analyst_mail_names"]=user
if agefile != "": PmagSampRec=pmag.get_age(PmagSampRec,"er_site_name","sample_inferred_", AgeNFO,DefaultAge)
site_height=pmag.get_dictitem(height_nfo,'er_site_name',PmagSampRec['er_site_name'],'T')
if len(site_height)>0:PmagSampRec["sample_height"]=site_height[0]['site_height'] # add in height if available
PmagSampRec['er_specimen_names']= pmag.get_list(SampI,'er_specimen_name')
PmagSampRec['magic_method_codes']= pmag.get_list(SampI,'magic_method_codes')
if nocrit!=1: # apply criteria!
kill=pmag.grade(PmagSampRec,accept,'sample_int')
if len(kill)==0:
PmagSampRec['pmag_criteria_codes']="ACCEPT"
SampInts.append(PmagSampRec)
PmagSamps.append(PmagSampRec)
else:PmagSampRec={} # sample rejected
else: # no criteria
SampInts.append(PmagSampRec)
PmagSamps.append(PmagSampRec)
PmagSampRec['pmag_criteria_codes']=""
if vgps==1 and get_model_lat!=0 and PmagSampRec!={}: #
if get_model_lat==1: # use sample latitude
PmagResRec=pmag.getsampVDM(PmagSampRec,SampNFO)
del(PmagResRec['model_lat']) # get rid of the model lat key
elif get_model_lat==2: # use model latitude
PmagResRec=pmag.getsampVDM(PmagSampRec,ModelLats)
if PmagResRec!={}:PmagResRec['magic_method_codes']=PmagResRec['magic_method_codes']+":IE-MLAT"
if PmagResRec!={}:
PmagResRec['er_specimen_names']=PmagSampRec['er_specimen_names']
PmagResRec['er_sample_names']=PmagSampRec['er_sample_name']
PmagResRec['pmag_criteria_codes']='ACCEPT'
PmagResRec['average_int_sigma_perc']=PmagSampRec['sample_int_sigma_perc']
PmagResRec['average_int_sigma']=PmagSampRec['sample_int_sigma']
PmagResRec['average_int_n']=PmagSampRec['sample_int_n']
PmagResRec['vadm_n']=PmagSampRec['sample_int_n']
PmagResRec['data_type']='i'
PmagResults.append(PmagResRec)
if len(PmagSamps)>0:
TmpSamps,keylist=pmag.fillkeys(PmagSamps) # fill in missing keys from different types of records
pmag.magic_write(sampout,TmpSamps,'pmag_samples') # save in sample output file
print(' sample averages written to ',sampout)
#
#create site averages from specimens or samples as specified
#
for site in sites:
if Daverage==0: key,dirlist='specimen',SpecDirs # if specimen averages at site level desired
if Daverage==1: key,dirlist='sample',SampDirs # if sample averages at site level desired
tmp=pmag.get_dictitem(dirlist,'er_site_name',site,'T') # get all the sites with directions
tmp1=pmag.get_dictitem(tmp,key+'_tilt_correction',coords[-1],'T') # use only the last coordinate if Caverage==0
sd=pmag.get_dictitem(SiteNFO,'er_site_name',site,'T') # fish out site information (lat/lon, etc.)
if len(sd)>0:
sitedat=sd[0]
if Caverage==0: # do component wise averaging
for comp in Comps:
siteD=pmag.get_dictitem(tmp1,key+'_comp_name',comp,'T') # get all components comp
if len(siteD)>0: # there are some for this site and component name
PmagSiteRec=pmag.lnpbykey(siteD,'site',key) # get an average for this site
PmagSiteRec['site_comp_name']=comp # decorate the site record
PmagSiteRec["er_location_name"]=siteD[0]['er_location_name']
PmagSiteRec["er_site_name"]=siteD[0]['er_site_name']
PmagSiteRec['site_tilt_correction']=coords[-1]
PmagSiteRec['site_comp_name']= pmag.get_list(siteD,key+'_comp_name')
if Daverage==1:
PmagSiteRec['er_sample_names']= pmag.get_list(siteD,'er_sample_name')
else:
PmagSiteRec['er_specimen_names']= pmag.get_list(siteD,'er_specimen_name')
# determine the demagnetization code (DC3,4 or 5) for this site
AFnum=len(pmag.get_dictitem(siteD,'magic_method_codes','LP-DIR-AF','has'))
Tnum=len(pmag.get_dictitem(siteD,'magic_method_codes','LP-DIR-T','has'))
DC=3
if AFnum>0:DC+=1
if Tnum>0:DC+=1
PmagSiteRec['magic_method_codes']= pmag.get_list(siteD,'magic_method_codes')+':'+ 'LP-DC'+str(DC)
PmagSiteRec['magic_method_codes'].strip(":")
if plotsites==1:
print(PmagSiteRec['er_site_name'])
pmagplotlib.plot_site(EQ['eqarea'],PmagSiteRec,siteD,key) # plot and list the data
pmagplotlib.draw_figs(EQ)
PmagSites.append(PmagSiteRec)
else: # last component only
siteD=tmp1[:] # get the last orientation system specified
if len(siteD)>0: # there are some
PmagSiteRec=pmag.lnpbykey(siteD,'site',key) # get the average for this site
PmagSiteRec["er_location_name"]=siteD[0]['er_location_name'] # decorate the record
PmagSiteRec["er_site_name"]=siteD[0]['er_site_name']
PmagSiteRec['site_comp_name']=comp
PmagSiteRec['site_tilt_correction']=coords[-1]
PmagSiteRec['site_comp_name']= pmag.get_list(siteD,key+'_comp_name')
PmagSiteRec['er_specimen_names']= pmag.get_list(siteD,'er_specimen_name')
PmagSiteRec['er_sample_names']= pmag.get_list(siteD,'er_sample_name')
AFnum=len(pmag.get_dictitem(siteD,'magic_method_codes','LP-DIR-AF','has'))
Tnum=len(pmag.get_dictitem(siteD,'magic_method_codes','LP-DIR-T','has'))
DC=3
if AFnum>0:DC+=1
if Tnum>0:DC+=1
PmagSiteRec['magic_method_codes']= pmag.get_list(siteD,'magic_method_codes')+':'+ 'LP-DC'+str(DC)
PmagSiteRec['magic_method_codes'].strip(":")
if Daverage==0:PmagSiteRec['site_comp_name']= pmag.get_list(siteD,key+'_comp_name')
if plotsites==1:
pmagplotlib.plot_site(EQ['eqarea'],PmagSiteRec,siteD,key)
pmagplotlib.draw_figs(EQ)
PmagSites.append(PmagSiteRec)
else:
print('site information not found in er_sites for site, ',site,' site will be skipped')
for PmagSiteRec in PmagSites: # now decorate each dictionary some more, and calculate VGPs etc. for results table
PmagSiteRec["er_citation_names"]="This study"
PmagSiteRec["er_analyst_mail_names"]=user
PmagSiteRec['magic_software_packages']=version_num
if agefile != "": PmagSiteRec= pmag.get_age(PmagSiteRec,"er_site_name","site_inferred_",AgeNFO,DefaultAge)
PmagSiteRec['pmag_criteria_codes']='ACCEPT'
if 'site_n_lines' in list(PmagSiteRec.keys()) and 'site_n_planes' in list(PmagSiteRec.keys()) and PmagSiteRec['site_n_lines']!="" and PmagSiteRec['site_n_planes']!="":
if int(PmagSiteRec["site_n_planes"])>0:
PmagSiteRec["magic_method_codes"]=PmagSiteRec['magic_method_codes']+":DE-FM-LP"
elif int(PmagSiteRec["site_n_lines"])>2:
PmagSiteRec["magic_method_codes"]=PmagSiteRec['magic_method_codes']+":DE-FM"
kill=pmag.grade(PmagSiteRec,accept,'site_dir')
if len(kill)==0:
PmagResRec={} # set up dictionary for the pmag_results table entry
PmagResRec['data_type']='i' # decorate it a bit
PmagResRec['magic_software_packages']=version_num
PmagSiteRec['site_description']='Site direction included in results table'
PmagResRec['pmag_criteria_codes']='ACCEPT'
dec=float(PmagSiteRec["site_dec"])
inc=float(PmagSiteRec["site_inc"])
if 'site_alpha95' in list(PmagSiteRec.keys()) and PmagSiteRec['site_alpha95']!="":
a95=float(PmagSiteRec["site_alpha95"])
else:a95=180.
sitedat=pmag.get_dictitem(SiteNFO,'er_site_name',PmagSiteRec['er_site_name'],'T')[0] # fish out site information (lat/lon, etc.)
lat=float(sitedat['site_lat'])
lon=float(sitedat['site_lon'])
plong,plat,dp,dm=pmag.dia_vgp(dec,inc,a95,lat,lon) # get the VGP for this site
if PmagSiteRec['site_tilt_correction']=='-1':C=' (spec coord) '
if PmagSiteRec['site_tilt_correction']=='0':C=' (geog. coord) '
if PmagSiteRec['site_tilt_correction']=='100':C=' (strat. coord) '
PmagResRec["pmag_result_name"]="VGP Site: "+PmagSiteRec["er_site_name"] # decorate some more
PmagResRec["result_description"]="Site VGP, coord system = "+str(coord)+' component: '+comp
PmagResRec['er_site_names']=PmagSiteRec['er_site_name']
PmagResRec['pmag_criteria_codes']='ACCEPT'
PmagResRec['er_citation_names']='This study'
PmagResRec['er_analyst_mail_names']=user
PmagResRec["er_location_names"]=PmagSiteRec["er_location_name"]
if Daverage==1:
PmagResRec["er_sample_names"]=PmagSiteRec["er_sample_names"]
else:
PmagResRec["er_specimen_names"]=PmagSiteRec["er_specimen_names"]
PmagResRec["tilt_correction"]=PmagSiteRec['site_tilt_correction']
PmagResRec["pole_comp_name"]=PmagSiteRec['site_comp_name']
PmagResRec["average_dec"]=PmagSiteRec["site_dec"]
PmagResRec["average_inc"]=PmagSiteRec["site_inc"]
PmagResRec["average_alpha95"]=PmagSiteRec["site_alpha95"]
PmagResRec["average_n"]=PmagSiteRec["site_n"]
PmagResRec["average_n_lines"]=PmagSiteRec["site_n_lines"]
PmagResRec["average_n_planes"]=PmagSiteRec["site_n_planes"]
PmagResRec["vgp_n"]=PmagSiteRec["site_n"]
PmagResRec["average_k"]=PmagSiteRec["site_k"]
PmagResRec["average_r"]=PmagSiteRec["site_r"]
PmagResRec["average_lat"]='%10.4f ' %(lat)
PmagResRec["average_lon"]='%10.4f ' %(lon)
if agefile != "": PmagResRec= pmag.get_age(PmagResRec,"er_site_names","average_",AgeNFO,DefaultAge)
site_height=pmag.get_dictitem(height_nfo,'er_site_name',site,'T')
if len(site_height)>0:PmagResRec["average_height"]=site_height[0]['site_height']
PmagResRec["vgp_lat"]='%7.1f ' % (plat)
PmagResRec["vgp_lon"]='%7.1f ' % (plong)
PmagResRec["vgp_dp"]='%7.1f ' % (dp)
PmagResRec["vgp_dm"]='%7.1f ' % (dm)
PmagResRec["magic_method_codes"]= PmagSiteRec["magic_method_codes"]
if PmagSiteRec['site_tilt_correction']=='0':PmagSiteRec['magic_method_codes']=PmagSiteRec['magic_method_codes']+":DA-DIR-GEO"
if PmagSiteRec['site_tilt_correction']=='100':PmagSiteRec['magic_method_codes']=PmagSiteRec['magic_method_codes']+":DA-DIR-TILT"
PmagSiteRec['site_polarity']=""
if polarity==1: # assign polarity based on angle of pole lat to spin axis - may want to re-think this sometime
angle=pmag.angle([0,0],[0,(90-plat)])
if angle <= 55.: PmagSiteRec["site_polarity"]='n'
if angle > 55. and angle < 125.: PmagSiteRec["site_polarity"]='t'
if angle >= 125.: PmagSiteRec["site_polarity"]='r'
PmagResults.append(PmagResRec)
if polarity==1:
crecs=pmag.get_dictitem(PmagSites,'site_tilt_correction','100','T') # find the tilt corrected data
if len(crecs)<2:crecs=pmag.get_dictitem(PmagSites,'site_tilt_correction','0','T') # if there aren't any, find the geographic corrected data
if len(crecs)>2: # if there are some,
comp=pmag.get_list(crecs,'site_comp_name').split(':')[0] # find the first component
crecs=pmag.get_dictitem(crecs,'site_comp_name',comp,'T') # fish out all of the first component
precs=[]
for rec in crecs:
precs.append({'dec':rec['site_dec'],'inc':rec['site_inc'],'name':rec['er_site_name'],'loc':rec['er_location_name']})
polpars=pmag.fisher_by_pol(precs) # calculate average by polarity
for mode in list(polpars.keys()): # hunt through all the modes (normal=A, reverse=B, all=ALL)
PolRes={}
PolRes['er_citation_names']='This study'
PolRes["pmag_result_name"]="Polarity Average: Polarity "+mode #
PolRes["data_type"]="a"
PolRes["average_dec"]='%7.1f'%(polpars[mode]['dec'])
PolRes["average_inc"]='%7.1f'%(polpars[mode]['inc'])
PolRes["average_n"]='%i'%(polpars[mode]['n'])
PolRes["average_r"]='%5.4f'%(polpars[mode]['r'])
PolRes["average_k"]='%6.0f'%(polpars[mode]['k'])
PolRes["average_alpha95"]='%7.1f'%(polpars[mode]['alpha95'])
PolRes['er_site_names']= polpars[mode]['sites']
PolRes['er_location_names']= polpars[mode]['locs']
PolRes['magic_software_packages']=version_num
PmagResults.append(PolRes)
if noInt!=1 and nositeints!=1:
for site in sites: # now do intensities for each site
if plotsites==1:print(site)
if Iaverage==0: key,intlist='specimen',SpecInts # if using specimen level data
if Iaverage==1: key,intlist='sample',PmagSamps # if using sample level data
Ints=pmag.get_dictitem(intlist,'er_site_name',site,'T') # get all the intensities for this site
if len(Ints)>0: # there are some
PmagSiteRec=pmag.average_int(Ints,key,'site') # get average intensity stuff for site table
PmagResRec=pmag.average_int(Ints,key,'average') # get average intensity stuff for results table
if plotsites==1: # if site by site examination requested - print this site out to the screen
for rec in Ints:print(rec['er_'+key+'_name'],' %7.1f'%(1e6*float(rec[key+'_int'])))
if len(Ints)>1:
print('Average: ','%7.1f'%(1e6*float(PmagResRec['average_int'])),'N: ',len(Ints))
print('Sigma: ','%7.1f'%(1e6*float(PmagResRec['average_int_sigma'])),'Sigma %: ',PmagResRec['average_int_sigma_perc'])
input('Press any key to continue\n')
er_location_name=Ints[0]["er_location_name"]
PmagSiteRec["er_location_name"]=er_location_name # decorate the records
PmagSiteRec["er_citation_names"]="This study"
PmagResRec["er_location_names"]=er_location_name
PmagResRec["er_citation_names"]="This study"
PmagSiteRec["er_analyst_mail_names"]=user
PmagResRec["er_analyst_mail_names"]=user
PmagResRec["data_type"]='i'
if Iaverage==0:
PmagSiteRec['er_specimen_names']= pmag.get_list(Ints,'er_specimen_name') # list of all specimens used
PmagResRec['er_specimen_names']= pmag.get_list(Ints,'er_specimen_name')
PmagSiteRec['er_sample_names']= pmag.get_list(Ints,'er_sample_name') # list of all samples used
PmagResRec['er_sample_names']= pmag.get_list(Ints,'er_sample_name')
PmagSiteRec['er_site_name']= site
PmagResRec['er_site_names']= site
PmagSiteRec['magic_method_codes']= pmag.get_list(Ints,'magic_method_codes')
PmagResRec['magic_method_codes']= pmag.get_list(Ints,'magic_method_codes')
kill=pmag.grade(PmagSiteRec,accept,'site_int')
if nocrit==1 or len(kill)==0:
b,sig=float(PmagResRec['average_int']),""
if(PmagResRec['average_int_sigma'])!="":sig=float(PmagResRec['average_int_sigma'])
sdir=pmag.get_dictitem(PmagResults,'er_site_names',site,'T') # fish out site direction
if len(sdir)>0 and sdir[-1]['average_inc']!="": # get the VDM for this record using last average inclination (hope it is the right one!)
inc=float(sdir[0]['average_inc']) #
mlat=pmag.magnetic_lat(inc) # get magnetic latitude using dipole formula
PmagResRec["vdm"]='%8.3e '% (pmag.b_vdm(b,mlat)) # get VDM with magnetic latitude
PmagResRec["vdm_n"]=PmagResRec['average_int_n']
if 'average_int_sigma' in list(PmagResRec.keys()) and PmagResRec['average_int_sigma']!="":
vdm_sig=pmag.b_vdm(float(PmagResRec['average_int_sigma']),mlat)
PmagResRec["vdm_sigma"]='%8.3e '% (vdm_sig)
else:
PmagResRec["vdm_sigma"]=""
mlat="" # define a model latitude
if get_model_lat==1: # use present site latitude
mlats=pmag.get_dictitem(SiteNFO,'er_site_name',site,'T')
if len(mlats)>0: mlat=mlats[0]['site_lat']
elif get_model_lat==2: # use a model latitude from some plate reconstruction model (or something)
mlats=pmag.get_dictitem(ModelLats,'er_site_name',site,'T')
if len(mlats)>0: PmagResRec['model_lat']=mlats[0]['site_model_lat']
mlat=PmagResRec['model_lat']
if mlat!="":
PmagResRec["vadm"]='%8.3e '% (pmag.b_vdm(b,float(mlat))) # get the VADM using the desired latitude
if sig!="":
vdm_sig=pmag.b_vdm(float(PmagResRec['average_int_sigma']),float(mlat))
PmagResRec["vadm_sigma"]='%8.3e '% (vdm_sig)
PmagResRec["vadm_n"]=PmagResRec['average_int_n']
else:
PmagResRec["vadm_sigma"]=""
sitedat=pmag.get_dictitem(SiteNFO,'er_site_name',PmagSiteRec['er_site_name'],'T') # fish out site information (lat/lon, etc.)
if len(sitedat)>0:
sitedat=sitedat[0]
PmagResRec['average_lat']=sitedat['site_lat']
PmagResRec['average_lon']=sitedat['site_lon']
else:
PmagResRec['average_lon']='UNKNOWN'
PmagResRec['average_lon']='UNKNOWN'
PmagResRec['magic_software_packages']=version_num
PmagResRec["pmag_result_name"]="V[A]DM: Site "+site
PmagResRec["result_description"]="V[A]DM of site"
PmagResRec["pmag_criteria_codes"]="ACCEPT"
if agefile != "": PmagResRec= pmag.get_age(PmagResRec,"er_site_names","average_",AgeNFO,DefaultAge)
site_height=pmag.get_dictitem(height_nfo,'er_site_name',site,'T')
if len(site_height)>0:PmagResRec["average_height"]=site_height[0]['site_height']
PmagSites.append(PmagSiteRec)
PmagResults.append(PmagResRec)
if len(PmagSites)>0:
Tmp,keylist=pmag.fillkeys(PmagSites)
pmag.magic_write(siteout,Tmp,'pmag_sites')
print(' sites written to ',siteout)
else: print("No Site level table")
if len(PmagResults)>0:
TmpRes,keylist=pmag.fillkeys(PmagResults)
pmag.magic_write(resout,TmpRes,'pmag_results')
print(' results written to ',resout)
else: print("No Results level table") | python | def main():
"""
NAME
specimens_results_magic.py
DESCRIPTION
combines pmag_specimens.txt file with age, location, acceptance criteria and
outputs pmag_results table along with other MagIC tables necessary for uploading to the database
SYNTAX
specimens_results_magic.py [command line options]
OPTIONS
-h prints help message and quits
-usr USER: identify user, default is ""
-f: specimen input magic_measurements format file, default is "magic_measurements.txt"
-fsp: specimen input pmag_specimens format file, default is "pmag_specimens.txt"
-fsm: sample input er_samples format file, default is "er_samples.txt"
-fsi: specimen input er_sites format file, default is "er_sites.txt"
-fla: specify a file with paleolatitudes for calculating VADMs, default is not to calculate VADMS
format is: site_name paleolatitude (space delimited file)
-fa AGES: specify er_ages format file with age information
-crd [s,g,t,b]: specify coordinate system
(s, specimen, g geographic, t, tilt corrected, b, geographic and tilt corrected)
Default is to assume geographic
NB: only the tilt corrected data will appear on the results table, if both g and t are selected.
-cor [AC:CR:NL]: colon delimited list of required data adjustments for all specimens
included in intensity calculations (anisotropy, cooling rate, non-linear TRM)
unless specified, corrections will not be applied
-pri [TRM:ARM] colon delimited list of priorities for anisotropy correction (-cor must also be set to include AC). default is TRM, then ARM
-age MIN MAX UNITS: specify age boundaries and units
-exc: use exiting selection criteria (in pmag_criteria.txt file), default is default criteria
-C: no acceptance criteria
-aD: average directions per sample, default is NOT
-aI: average multiple specimen intensities per sample, default is by site
-aC: average all components together, default is NOT
-pol: calculate polarity averages
-sam: save sample level vgps and v[a]dms, default is by site
-xSi: skip the site level intensity calculation
-p: plot directions and look at intensities by site, default is NOT
-fmt: specify output for saved images, default is svg (only if -p set)
-lat: use present latitude for calculating VADMs, default is not to calculate VADMs
-xD: skip directions
-xI: skip intensities
OUPUT
writes pmag_samples, pmag_sites, pmag_results tables
"""
# set defaults
Comps=[] # list of components
version_num=pmag.get_version()
args=sys.argv
DefaultAge=["none"]
skipdirs,coord,excrit,custom,vgps,average,Iaverage,plotsites,opt=1,0,0,0,0,0,0,0,0
get_model_lat=0 # this skips VADM calculation altogether, when get_model_lat=1, uses present day
fmt='svg'
dir_path="."
model_lat_file=""
Caverage=0
infile='pmag_specimens.txt'
measfile="magic_measurements.txt"
sampfile="er_samples.txt"
sitefile="er_sites.txt"
agefile="er_ages.txt"
specout="er_specimens.txt"
sampout="pmag_samples.txt"
siteout="pmag_sites.txt"
resout="pmag_results.txt"
critout="pmag_criteria.txt"
instout="magic_instruments.txt"
sigcutoff,OBJ="",""
noDir,noInt=0,0
polarity=0
coords=['0']
Dcrit,Icrit,nocrit=0,0,0
corrections=[]
nocorrection=['DA-NL','DA-AC','DA-CR']
priorities=['DA-AC-ARM','DA-AC-TRM'] # priorities for anisotropy correction
# get command line stuff
if "-h" in args:
print(main.__doc__)
sys.exit()
if '-WD' in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if '-cor' in args:
ind=args.index('-cor')
cors=args[ind+1].split(':') # list of required data adjustments
for cor in cors:
nocorrection.remove('DA-'+cor)
corrections.append('DA-'+cor)
if '-pri' in args:
ind=args.index('-pri')
priorities=args[ind+1].split(':') # list of required data adjustments
for p in priorities:
p='DA-AC-'+p
if '-f' in args:
ind=args.index("-f")
measfile=args[ind+1]
if '-fsp' in args:
ind=args.index("-fsp")
infile=args[ind+1]
if '-fsi' in args:
ind=args.index("-fsi")
sitefile=args[ind+1]
if "-crd" in args:
ind=args.index("-crd")
coord=args[ind+1]
if coord=='s':coords=['-1']
if coord=='g':coords=['0']
if coord=='t':coords=['100']
if coord=='b':coords=['0','100']
if "-usr" in args:
ind=args.index("-usr")
user=sys.argv[ind+1]
else: user=""
if "-C" in args: Dcrit,Icrit,nocrit=1,1,1 # no selection criteria
if "-sam" in args: vgps=1 # save sample level VGPS/VADMs
if "-xSi" in args:
nositeints=1 # skip site level intensity
else:
nositeints=0
if "-age" in args:
ind=args.index("-age")
DefaultAge[0]=args[ind+1]
DefaultAge.append(args[ind+2])
DefaultAge.append(args[ind+3])
Daverage,Iaverage,Caverage=0,0,0
if "-aD" in args: Daverage=1 # average by sample directions
if "-aI" in args: Iaverage=1 # average by sample intensities
if "-aC" in args: Caverage=1 # average all components together ??? why???
if "-pol" in args: polarity=1 # calculate averages by polarity
if '-xD' in args:noDir=1
if '-xI' in args:
noInt=1
elif "-fla" in args:
if '-lat' in args:
print("you should set a paleolatitude file OR use present day lat - not both")
sys.exit()
ind=args.index("-fla")
model_lat_file=dir_path+'/'+args[ind+1]
get_model_lat=2
mlat=open(model_lat_file,'r')
ModelLats=[]
for line in mlat.readlines():
ModelLat={}
tmp=line.split()
ModelLat["er_site_name"]=tmp[0]
ModelLat["site_model_lat"]=tmp[1]
ModelLat["er_sample_name"]=tmp[0]
ModelLat["sample_lat"]=tmp[1]
ModelLats.append(ModelLat)
get_model_lat=2
elif '-lat' in args:
get_model_lat=1
if "-p" in args:
plotsites=1
if "-fmt" in args:
ind=args.index("-fmt")
fmt=args[ind+1]
if noDir==0: # plot by site - set up plot window
import pmagplotlib
EQ={}
EQ['eqarea']=1
pmagplotlib.plot_init(EQ['eqarea'],5,5) # define figure 1 as equal area projection
pmagplotlib.plot_net(EQ['eqarea']) # I don't know why this has to be here, but otherwise the first plot never plots...
pmagplotlib.draw_figs(EQ)
if '-WD' in args:
infile=dir_path+'/'+infile
measfile=dir_path+'/'+measfile
instout=dir_path+'/'+instout
sampfile=dir_path+'/'+sampfile
sitefile=dir_path+'/'+sitefile
agefile=dir_path+'/'+agefile
specout=dir_path+'/'+specout
sampout=dir_path+'/'+sampout
siteout=dir_path+'/'+siteout
resout=dir_path+'/'+resout
critout=dir_path+'/'+critout
if "-exc" in args: # use existing pmag_criteria file
if "-C" in args:
print('you can not use both existing and no criteria - choose either -exc OR -C OR neither (for default)')
sys.exit()
crit_data,file_type=pmag.magic_read(critout)
print("Acceptance criteria read in from ", critout)
else : # use default criteria (if nocrit set, then get really loose criteria as default)
crit_data=pmag.default_criteria(nocrit)
if nocrit==0:
print("Acceptance criteria are defaults")
else:
print("No acceptance criteria used ")
accept={}
for critrec in crit_data:
for key in list(critrec.keys()):
# need to migrate specimen_dang to specimen_int_dang for intensity data using old format
if 'IE-SPEC' in list(critrec.keys()) and 'specimen_dang' in list(critrec.keys()) and 'specimen_int_dang' not in list(critrec.keys()):
critrec['specimen_int_dang']=critrec['specimen_dang']
del critrec['specimen_dang']
# need to get rid of ron shaars sample_int_sigma_uT
if 'sample_int_sigma_uT' in list(critrec.keys()):
critrec['sample_int_sigma']='%10.3e'%(eval(critrec['sample_int_sigma_uT'])*1e-6)
if key not in list(accept.keys()) and critrec[key]!='':
accept[key]=critrec[key]
#
#
if "-exc" not in args and "-C" not in args:
print("args",args)
pmag.magic_write(critout,[accept],'pmag_criteria')
print("\n Pmag Criteria stored in ",critout,'\n')
#
# now we're done slow dancing
#
SiteNFO,file_type=pmag.magic_read(sitefile) # read in site data - has the lats and lons
SampNFO,file_type=pmag.magic_read(sampfile) # read in site data - has the lats and lons
height_nfo=pmag.get_dictitem(SiteNFO,'site_height','','F') # find all the sites with height info.
if agefile !="":AgeNFO,file_type=pmag.magic_read(agefile) # read in the age information
Data,file_type=pmag.magic_read(infile) # read in specimen interpretations
IntData=pmag.get_dictitem(Data,'specimen_int','','F') # retrieve specimens with intensity data
comment,orient="",[]
samples,sites=[],[]
for rec in Data: # run through the data filling in missing keys and finding all components, coordinates available
# fill in missing fields, collect unique sample and site names
if 'er_sample_name' not in list(rec.keys()):
rec['er_sample_name']=""
elif rec['er_sample_name'] not in samples:
samples.append(rec['er_sample_name'])
if 'er_site_name' not in list(rec.keys()):
rec['er_site_name']=""
elif rec['er_site_name'] not in sites:
sites.append(rec['er_site_name'])
if 'specimen_int' not in list(rec.keys()):rec['specimen_int']=''
if 'specimen_comp_name' not in list(rec.keys()) or rec['specimen_comp_name']=="":rec['specimen_comp_name']='A'
if rec['specimen_comp_name'] not in Comps:Comps.append(rec['specimen_comp_name'])
rec['specimen_tilt_correction']=rec['specimen_tilt_correction'].strip('\n')
if "specimen_tilt_correction" not in list(rec.keys()): rec["specimen_tilt_correction"]="-1" # assume sample coordinates
if rec["specimen_tilt_correction"] not in orient: orient.append(rec["specimen_tilt_correction"]) # collect available coordinate systems
if "specimen_direction_type" not in list(rec.keys()): rec["specimen_direction_type"]='l' # assume direction is line - not plane
if "specimen_dec" not in list(rec.keys()): rec["specimen_direction_type"]='' # if no declination, set direction type to blank
if "specimen_n" not in list(rec.keys()): rec["specimen_n"]='' # put in n
if "specimen_alpha95" not in list(rec.keys()): rec["specimen_alpha95"]='' # put in alpha95
if "magic_method_codes" not in list(rec.keys()): rec["magic_method_codes"]=''
#
# start parsing data into SpecDirs, SpecPlanes, SpecInts
SpecInts,SpecDirs,SpecPlanes=[],[],[]
samples.sort() # get sorted list of samples and sites
sites.sort()
if noInt==0: # don't skip intensities
IntData=pmag.get_dictitem(Data,'specimen_int','','F') # retrieve specimens with intensity data
if nocrit==0: # use selection criteria
for rec in IntData: # do selection criteria
kill=pmag.grade(rec,accept,'specimen_int')
if len(kill)==0: SpecInts.append(rec) # intensity record to be included in sample, site calculations
else:
SpecInts=IntData[:] # take everything - no selection criteria
# check for required data adjustments
if len(corrections)>0 and len(SpecInts)>0:
for cor in corrections:
SpecInts=pmag.get_dictitem(SpecInts,'magic_method_codes',cor,'has') # only take specimens with the required corrections
if len(nocorrection)>0 and len(SpecInts)>0:
for cor in nocorrection:
SpecInts=pmag.get_dictitem(SpecInts,'magic_method_codes',cor,'not') # exclude the corrections not specified for inclusion
# take top priority specimen of its name in remaining specimens (only one per customer)
PrioritySpecInts=[]
specimens=pmag.get_specs(SpecInts) # get list of uniq specimen names
for spec in specimens:
ThisSpecRecs=pmag.get_dictitem(SpecInts,'er_specimen_name',spec,'T') # all the records for this specimen
if len(ThisSpecRecs)==1:
PrioritySpecInts.append(ThisSpecRecs[0])
elif len(ThisSpecRecs)>1: # more than one
prec=[]
for p in priorities:
ThisSpecRecs=pmag.get_dictitem(SpecInts,'magic_method_codes',p,'has') # all the records for this specimen
if len(ThisSpecRecs)>0:prec.append(ThisSpecRecs[0])
PrioritySpecInts.append(prec[0]) # take the best one
SpecInts=PrioritySpecInts # this has the first specimen record
if noDir==0: # don't skip directions
AllDirs=pmag.get_dictitem(Data,'specimen_direction_type','','F') # retrieve specimens with directed lines and planes
Ns=pmag.get_dictitem(AllDirs,'specimen_n','','F') # get all specimens with specimen_n information
if nocrit!=1: # use selection criteria
for rec in Ns: # look through everything with specimen_n for "good" data
kill=pmag.grade(rec,accept,'specimen_dir')
if len(kill)==0: # nothing killed it
SpecDirs.append(rec)
else: # no criteria
SpecDirs=AllDirs[:] # take them all
# SpecDirs is now the list of all specimen directions (lines and planes) that pass muster
#
PmagSamps,SampDirs=[],[] # list of all sample data and list of those that pass the DE-SAMP criteria
PmagSites,PmagResults=[],[] # list of all site data and selected results
SampInts=[]
for samp in samples: # run through the sample names
if Daverage==1: # average by sample if desired
SampDir=pmag.get_dictitem(SpecDirs,'er_sample_name',samp,'T') # get all the directional data for this sample
if len(SampDir)>0: # there are some directions
for coord in coords: # step through desired coordinate systems
CoordDir=pmag.get_dictitem(SampDir,'specimen_tilt_correction',coord,'T') # get all the directions for this sample
if len(CoordDir)>0: # there are some with this coordinate system
if Caverage==0: # look component by component
for comp in Comps:
CompDir=pmag.get_dictitem(CoordDir,'specimen_comp_name',comp,'T') # get all directions from this component
if len(CompDir)>0: # there are some
PmagSampRec=pmag.lnpbykey(CompDir,'sample','specimen') # get a sample average from all specimens
PmagSampRec["er_location_name"]=CompDir[0]['er_location_name'] # decorate the sample record
PmagSampRec["er_site_name"]=CompDir[0]['er_site_name']
PmagSampRec["er_sample_name"]=samp
PmagSampRec["er_citation_names"]="This study"
PmagSampRec["er_analyst_mail_names"]=user
PmagSampRec['magic_software_packages']=version_num
if nocrit!=1:PmagSampRec['pmag_criteria_codes']="ACCEPT"
if agefile != "": PmagSampRec= pmag.get_age(PmagSampRec,"er_site_name","sample_inferred_",AgeNFO,DefaultAge)
site_height=pmag.get_dictitem(height_nfo,'er_site_name',PmagSampRec['er_site_name'],'T')
if len(site_height)>0:PmagSampRec["sample_height"]=site_height[0]['site_height'] # add in height if available
PmagSampRec['sample_comp_name']=comp
PmagSampRec['sample_tilt_correction']=coord
PmagSampRec['er_specimen_names']= pmag.get_list(CompDir,'er_specimen_name') # get a list of the specimen names used
PmagSampRec['magic_method_codes']= pmag.get_list(CompDir,'magic_method_codes') # get a list of the methods used
if nocrit!=1: # apply selection criteria
kill=pmag.grade(PmagSampRec,accept,'sample_dir')
else:
kill=[]
if len(kill)==0:
SampDirs.append(PmagSampRec)
if vgps==1: # if sample level VGP info desired, do that now
PmagResRec=pmag.getsampVGP(PmagSampRec,SiteNFO)
if PmagResRec!="":PmagResults.append(PmagResRec)
PmagSamps.append(PmagSampRec)
if Caverage==1: # average all components together basically same as above
PmagSampRec=pmag.lnpbykey(CoordDir,'sample','specimen')
PmagSampRec["er_location_name"]=CoordDir[0]['er_location_name']
PmagSampRec["er_site_name"]=CoordDir[0]['er_site_name']
PmagSampRec["er_sample_name"]=samp
PmagSampRec["er_citation_names"]="This study"
PmagSampRec["er_analyst_mail_names"]=user
PmagSampRec['magic_software_packages']=version_num
if nocrit!=1:PmagSampRec['pmag_criteria_codes']=""
if agefile != "": PmagSampRec= pmag.get_age(PmagSampRec,"er_site_name","sample_inferred_",AgeNFO,DefaultAge)
site_height=pmag.get_dictitem(height_nfo,'er_site_name',site,'T')
if len(site_height)>0:PmagSampRec["sample_height"]=site_height[0]['site_height'] # add in height if available
PmagSampRec['sample_tilt_correction']=coord
PmagSampRec['sample_comp_name']= pmag.get_list(CoordDir,'specimen_comp_name') # get components used
PmagSampRec['er_specimen_names']= pmag.get_list(CoordDir,'er_specimen_name') # get specimne names averaged
PmagSampRec['magic_method_codes']= pmag.get_list(CoordDir,'magic_method_codes') # assemble method codes
if nocrit!=1: # apply selection criteria
kill=pmag.grade(PmagSampRec,accept,'sample_dir')
if len(kill)==0: # passes the mustard
SampDirs.append(PmagSampRec)
if vgps==1:
PmagResRec=pmag.getsampVGP(PmagSampRec,SiteNFO)
if PmagResRec!="":PmagResults.append(PmagResRec)
else: # take everything
SampDirs.append(PmagSampRec)
if vgps==1:
PmagResRec=pmag.getsampVGP(PmagSampRec,SiteNFO)
if PmagResRec!="":PmagResults.append(PmagResRec)
PmagSamps.append(PmagSampRec)
if Iaverage==1: # average by sample if desired
SampI=pmag.get_dictitem(SpecInts,'er_sample_name',samp,'T') # get all the intensity data for this sample
if len(SampI)>0: # there are some
PmagSampRec=pmag.average_int(SampI,'specimen','sample') # get average intensity stuff
PmagSampRec["sample_description"]="sample intensity" # decorate sample record
PmagSampRec["sample_direction_type"]=""
PmagSampRec['er_site_name']=SampI[0]["er_site_name"]
PmagSampRec['er_sample_name']=samp
PmagSampRec['er_location_name']=SampI[0]["er_location_name"]
PmagSampRec["er_citation_names"]="This study"
PmagSampRec["er_analyst_mail_names"]=user
if agefile != "": PmagSampRec=pmag.get_age(PmagSampRec,"er_site_name","sample_inferred_", AgeNFO,DefaultAge)
site_height=pmag.get_dictitem(height_nfo,'er_site_name',PmagSampRec['er_site_name'],'T')
if len(site_height)>0:PmagSampRec["sample_height"]=site_height[0]['site_height'] # add in height if available
PmagSampRec['er_specimen_names']= pmag.get_list(SampI,'er_specimen_name')
PmagSampRec['magic_method_codes']= pmag.get_list(SampI,'magic_method_codes')
if nocrit!=1: # apply criteria!
kill=pmag.grade(PmagSampRec,accept,'sample_int')
if len(kill)==0:
PmagSampRec['pmag_criteria_codes']="ACCEPT"
SampInts.append(PmagSampRec)
PmagSamps.append(PmagSampRec)
else:PmagSampRec={} # sample rejected
else: # no criteria
SampInts.append(PmagSampRec)
PmagSamps.append(PmagSampRec)
PmagSampRec['pmag_criteria_codes']=""
if vgps==1 and get_model_lat!=0 and PmagSampRec!={}: #
if get_model_lat==1: # use sample latitude
PmagResRec=pmag.getsampVDM(PmagSampRec,SampNFO)
del(PmagResRec['model_lat']) # get rid of the model lat key
elif get_model_lat==2: # use model latitude
PmagResRec=pmag.getsampVDM(PmagSampRec,ModelLats)
if PmagResRec!={}:PmagResRec['magic_method_codes']=PmagResRec['magic_method_codes']+":IE-MLAT"
if PmagResRec!={}:
PmagResRec['er_specimen_names']=PmagSampRec['er_specimen_names']
PmagResRec['er_sample_names']=PmagSampRec['er_sample_name']
PmagResRec['pmag_criteria_codes']='ACCEPT'
PmagResRec['average_int_sigma_perc']=PmagSampRec['sample_int_sigma_perc']
PmagResRec['average_int_sigma']=PmagSampRec['sample_int_sigma']
PmagResRec['average_int_n']=PmagSampRec['sample_int_n']
PmagResRec['vadm_n']=PmagSampRec['sample_int_n']
PmagResRec['data_type']='i'
PmagResults.append(PmagResRec)
if len(PmagSamps)>0:
TmpSamps,keylist=pmag.fillkeys(PmagSamps) # fill in missing keys from different types of records
pmag.magic_write(sampout,TmpSamps,'pmag_samples') # save in sample output file
print(' sample averages written to ',sampout)
#
#create site averages from specimens or samples as specified
#
for site in sites:
if Daverage==0: key,dirlist='specimen',SpecDirs # if specimen averages at site level desired
if Daverage==1: key,dirlist='sample',SampDirs # if sample averages at site level desired
tmp=pmag.get_dictitem(dirlist,'er_site_name',site,'T') # get all the sites with directions
tmp1=pmag.get_dictitem(tmp,key+'_tilt_correction',coords[-1],'T') # use only the last coordinate if Caverage==0
sd=pmag.get_dictitem(SiteNFO,'er_site_name',site,'T') # fish out site information (lat/lon, etc.)
if len(sd)>0:
sitedat=sd[0]
if Caverage==0: # do component wise averaging
for comp in Comps:
siteD=pmag.get_dictitem(tmp1,key+'_comp_name',comp,'T') # get all components comp
if len(siteD)>0: # there are some for this site and component name
PmagSiteRec=pmag.lnpbykey(siteD,'site',key) # get an average for this site
PmagSiteRec['site_comp_name']=comp # decorate the site record
PmagSiteRec["er_location_name"]=siteD[0]['er_location_name']
PmagSiteRec["er_site_name"]=siteD[0]['er_site_name']
PmagSiteRec['site_tilt_correction']=coords[-1]
PmagSiteRec['site_comp_name']= pmag.get_list(siteD,key+'_comp_name')
if Daverage==1:
PmagSiteRec['er_sample_names']= pmag.get_list(siteD,'er_sample_name')
else:
PmagSiteRec['er_specimen_names']= pmag.get_list(siteD,'er_specimen_name')
# determine the demagnetization code (DC3,4 or 5) for this site
AFnum=len(pmag.get_dictitem(siteD,'magic_method_codes','LP-DIR-AF','has'))
Tnum=len(pmag.get_dictitem(siteD,'magic_method_codes','LP-DIR-T','has'))
DC=3
if AFnum>0:DC+=1
if Tnum>0:DC+=1
PmagSiteRec['magic_method_codes']= pmag.get_list(siteD,'magic_method_codes')+':'+ 'LP-DC'+str(DC)
PmagSiteRec['magic_method_codes'].strip(":")
if plotsites==1:
print(PmagSiteRec['er_site_name'])
pmagplotlib.plot_site(EQ['eqarea'],PmagSiteRec,siteD,key) # plot and list the data
pmagplotlib.draw_figs(EQ)
PmagSites.append(PmagSiteRec)
else: # last component only
siteD=tmp1[:] # get the last orientation system specified
if len(siteD)>0: # there are some
PmagSiteRec=pmag.lnpbykey(siteD,'site',key) # get the average for this site
PmagSiteRec["er_location_name"]=siteD[0]['er_location_name'] # decorate the record
PmagSiteRec["er_site_name"]=siteD[0]['er_site_name']
PmagSiteRec['site_comp_name']=comp
PmagSiteRec['site_tilt_correction']=coords[-1]
PmagSiteRec['site_comp_name']= pmag.get_list(siteD,key+'_comp_name')
PmagSiteRec['er_specimen_names']= pmag.get_list(siteD,'er_specimen_name')
PmagSiteRec['er_sample_names']= pmag.get_list(siteD,'er_sample_name')
AFnum=len(pmag.get_dictitem(siteD,'magic_method_codes','LP-DIR-AF','has'))
Tnum=len(pmag.get_dictitem(siteD,'magic_method_codes','LP-DIR-T','has'))
DC=3
if AFnum>0:DC+=1
if Tnum>0:DC+=1
PmagSiteRec['magic_method_codes']= pmag.get_list(siteD,'magic_method_codes')+':'+ 'LP-DC'+str(DC)
PmagSiteRec['magic_method_codes'].strip(":")
if Daverage==0:PmagSiteRec['site_comp_name']= pmag.get_list(siteD,key+'_comp_name')
if plotsites==1:
pmagplotlib.plot_site(EQ['eqarea'],PmagSiteRec,siteD,key)
pmagplotlib.draw_figs(EQ)
PmagSites.append(PmagSiteRec)
else:
print('site information not found in er_sites for site, ',site,' site will be skipped')
for PmagSiteRec in PmagSites: # now decorate each dictionary some more, and calculate VGPs etc. for results table
PmagSiteRec["er_citation_names"]="This study"
PmagSiteRec["er_analyst_mail_names"]=user
PmagSiteRec['magic_software_packages']=version_num
if agefile != "": PmagSiteRec= pmag.get_age(PmagSiteRec,"er_site_name","site_inferred_",AgeNFO,DefaultAge)
PmagSiteRec['pmag_criteria_codes']='ACCEPT'
if 'site_n_lines' in list(PmagSiteRec.keys()) and 'site_n_planes' in list(PmagSiteRec.keys()) and PmagSiteRec['site_n_lines']!="" and PmagSiteRec['site_n_planes']!="":
if int(PmagSiteRec["site_n_planes"])>0:
PmagSiteRec["magic_method_codes"]=PmagSiteRec['magic_method_codes']+":DE-FM-LP"
elif int(PmagSiteRec["site_n_lines"])>2:
PmagSiteRec["magic_method_codes"]=PmagSiteRec['magic_method_codes']+":DE-FM"
kill=pmag.grade(PmagSiteRec,accept,'site_dir')
if len(kill)==0:
PmagResRec={} # set up dictionary for the pmag_results table entry
PmagResRec['data_type']='i' # decorate it a bit
PmagResRec['magic_software_packages']=version_num
PmagSiteRec['site_description']='Site direction included in results table'
PmagResRec['pmag_criteria_codes']='ACCEPT'
dec=float(PmagSiteRec["site_dec"])
inc=float(PmagSiteRec["site_inc"])
if 'site_alpha95' in list(PmagSiteRec.keys()) and PmagSiteRec['site_alpha95']!="":
a95=float(PmagSiteRec["site_alpha95"])
else:a95=180.
sitedat=pmag.get_dictitem(SiteNFO,'er_site_name',PmagSiteRec['er_site_name'],'T')[0] # fish out site information (lat/lon, etc.)
lat=float(sitedat['site_lat'])
lon=float(sitedat['site_lon'])
plong,plat,dp,dm=pmag.dia_vgp(dec,inc,a95,lat,lon) # get the VGP for this site
if PmagSiteRec['site_tilt_correction']=='-1':C=' (spec coord) '
if PmagSiteRec['site_tilt_correction']=='0':C=' (geog. coord) '
if PmagSiteRec['site_tilt_correction']=='100':C=' (strat. coord) '
PmagResRec["pmag_result_name"]="VGP Site: "+PmagSiteRec["er_site_name"] # decorate some more
PmagResRec["result_description"]="Site VGP, coord system = "+str(coord)+' component: '+comp
PmagResRec['er_site_names']=PmagSiteRec['er_site_name']
PmagResRec['pmag_criteria_codes']='ACCEPT'
PmagResRec['er_citation_names']='This study'
PmagResRec['er_analyst_mail_names']=user
PmagResRec["er_location_names"]=PmagSiteRec["er_location_name"]
if Daverage==1:
PmagResRec["er_sample_names"]=PmagSiteRec["er_sample_names"]
else:
PmagResRec["er_specimen_names"]=PmagSiteRec["er_specimen_names"]
PmagResRec["tilt_correction"]=PmagSiteRec['site_tilt_correction']
PmagResRec["pole_comp_name"]=PmagSiteRec['site_comp_name']
PmagResRec["average_dec"]=PmagSiteRec["site_dec"]
PmagResRec["average_inc"]=PmagSiteRec["site_inc"]
PmagResRec["average_alpha95"]=PmagSiteRec["site_alpha95"]
PmagResRec["average_n"]=PmagSiteRec["site_n"]
PmagResRec["average_n_lines"]=PmagSiteRec["site_n_lines"]
PmagResRec["average_n_planes"]=PmagSiteRec["site_n_planes"]
PmagResRec["vgp_n"]=PmagSiteRec["site_n"]
PmagResRec["average_k"]=PmagSiteRec["site_k"]
PmagResRec["average_r"]=PmagSiteRec["site_r"]
PmagResRec["average_lat"]='%10.4f ' %(lat)
PmagResRec["average_lon"]='%10.4f ' %(lon)
if agefile != "": PmagResRec= pmag.get_age(PmagResRec,"er_site_names","average_",AgeNFO,DefaultAge)
site_height=pmag.get_dictitem(height_nfo,'er_site_name',site,'T')
if len(site_height)>0:PmagResRec["average_height"]=site_height[0]['site_height']
PmagResRec["vgp_lat"]='%7.1f ' % (plat)
PmagResRec["vgp_lon"]='%7.1f ' % (plong)
PmagResRec["vgp_dp"]='%7.1f ' % (dp)
PmagResRec["vgp_dm"]='%7.1f ' % (dm)
PmagResRec["magic_method_codes"]= PmagSiteRec["magic_method_codes"]
if PmagSiteRec['site_tilt_correction']=='0':PmagSiteRec['magic_method_codes']=PmagSiteRec['magic_method_codes']+":DA-DIR-GEO"
if PmagSiteRec['site_tilt_correction']=='100':PmagSiteRec['magic_method_codes']=PmagSiteRec['magic_method_codes']+":DA-DIR-TILT"
PmagSiteRec['site_polarity']=""
if polarity==1: # assign polarity based on angle of pole lat to spin axis - may want to re-think this sometime
angle=pmag.angle([0,0],[0,(90-plat)])
if angle <= 55.: PmagSiteRec["site_polarity"]='n'
if angle > 55. and angle < 125.: PmagSiteRec["site_polarity"]='t'
if angle >= 125.: PmagSiteRec["site_polarity"]='r'
PmagResults.append(PmagResRec)
if polarity==1:
crecs=pmag.get_dictitem(PmagSites,'site_tilt_correction','100','T') # find the tilt corrected data
if len(crecs)<2:crecs=pmag.get_dictitem(PmagSites,'site_tilt_correction','0','T') # if there aren't any, find the geographic corrected data
if len(crecs)>2: # if there are some,
comp=pmag.get_list(crecs,'site_comp_name').split(':')[0] # find the first component
crecs=pmag.get_dictitem(crecs,'site_comp_name',comp,'T') # fish out all of the first component
precs=[]
for rec in crecs:
precs.append({'dec':rec['site_dec'],'inc':rec['site_inc'],'name':rec['er_site_name'],'loc':rec['er_location_name']})
polpars=pmag.fisher_by_pol(precs) # calculate average by polarity
for mode in list(polpars.keys()): # hunt through all the modes (normal=A, reverse=B, all=ALL)
PolRes={}
PolRes['er_citation_names']='This study'
PolRes["pmag_result_name"]="Polarity Average: Polarity "+mode #
PolRes["data_type"]="a"
PolRes["average_dec"]='%7.1f'%(polpars[mode]['dec'])
PolRes["average_inc"]='%7.1f'%(polpars[mode]['inc'])
PolRes["average_n"]='%i'%(polpars[mode]['n'])
PolRes["average_r"]='%5.4f'%(polpars[mode]['r'])
PolRes["average_k"]='%6.0f'%(polpars[mode]['k'])
PolRes["average_alpha95"]='%7.1f'%(polpars[mode]['alpha95'])
PolRes['er_site_names']= polpars[mode]['sites']
PolRes['er_location_names']= polpars[mode]['locs']
PolRes['magic_software_packages']=version_num
PmagResults.append(PolRes)
if noInt!=1 and nositeints!=1:
for site in sites: # now do intensities for each site
if plotsites==1:print(site)
if Iaverage==0: key,intlist='specimen',SpecInts # if using specimen level data
if Iaverage==1: key,intlist='sample',PmagSamps # if using sample level data
Ints=pmag.get_dictitem(intlist,'er_site_name',site,'T') # get all the intensities for this site
if len(Ints)>0: # there are some
PmagSiteRec=pmag.average_int(Ints,key,'site') # get average intensity stuff for site table
PmagResRec=pmag.average_int(Ints,key,'average') # get average intensity stuff for results table
if plotsites==1: # if site by site examination requested - print this site out to the screen
for rec in Ints:print(rec['er_'+key+'_name'],' %7.1f'%(1e6*float(rec[key+'_int'])))
if len(Ints)>1:
print('Average: ','%7.1f'%(1e6*float(PmagResRec['average_int'])),'N: ',len(Ints))
print('Sigma: ','%7.1f'%(1e6*float(PmagResRec['average_int_sigma'])),'Sigma %: ',PmagResRec['average_int_sigma_perc'])
input('Press any key to continue\n')
er_location_name=Ints[0]["er_location_name"]
PmagSiteRec["er_location_name"]=er_location_name # decorate the records
PmagSiteRec["er_citation_names"]="This study"
PmagResRec["er_location_names"]=er_location_name
PmagResRec["er_citation_names"]="This study"
PmagSiteRec["er_analyst_mail_names"]=user
PmagResRec["er_analyst_mail_names"]=user
PmagResRec["data_type"]='i'
if Iaverage==0:
PmagSiteRec['er_specimen_names']= pmag.get_list(Ints,'er_specimen_name') # list of all specimens used
PmagResRec['er_specimen_names']= pmag.get_list(Ints,'er_specimen_name')
PmagSiteRec['er_sample_names']= pmag.get_list(Ints,'er_sample_name') # list of all samples used
PmagResRec['er_sample_names']= pmag.get_list(Ints,'er_sample_name')
PmagSiteRec['er_site_name']= site
PmagResRec['er_site_names']= site
PmagSiteRec['magic_method_codes']= pmag.get_list(Ints,'magic_method_codes')
PmagResRec['magic_method_codes']= pmag.get_list(Ints,'magic_method_codes')
kill=pmag.grade(PmagSiteRec,accept,'site_int')
if nocrit==1 or len(kill)==0:
b,sig=float(PmagResRec['average_int']),""
if(PmagResRec['average_int_sigma'])!="":sig=float(PmagResRec['average_int_sigma'])
sdir=pmag.get_dictitem(PmagResults,'er_site_names',site,'T') # fish out site direction
if len(sdir)>0 and sdir[-1]['average_inc']!="": # get the VDM for this record using last average inclination (hope it is the right one!)
inc=float(sdir[0]['average_inc']) #
mlat=pmag.magnetic_lat(inc) # get magnetic latitude using dipole formula
PmagResRec["vdm"]='%8.3e '% (pmag.b_vdm(b,mlat)) # get VDM with magnetic latitude
PmagResRec["vdm_n"]=PmagResRec['average_int_n']
if 'average_int_sigma' in list(PmagResRec.keys()) and PmagResRec['average_int_sigma']!="":
vdm_sig=pmag.b_vdm(float(PmagResRec['average_int_sigma']),mlat)
PmagResRec["vdm_sigma"]='%8.3e '% (vdm_sig)
else:
PmagResRec["vdm_sigma"]=""
mlat="" # define a model latitude
if get_model_lat==1: # use present site latitude
mlats=pmag.get_dictitem(SiteNFO,'er_site_name',site,'T')
if len(mlats)>0: mlat=mlats[0]['site_lat']
elif get_model_lat==2: # use a model latitude from some plate reconstruction model (or something)
mlats=pmag.get_dictitem(ModelLats,'er_site_name',site,'T')
if len(mlats)>0: PmagResRec['model_lat']=mlats[0]['site_model_lat']
mlat=PmagResRec['model_lat']
if mlat!="":
PmagResRec["vadm"]='%8.3e '% (pmag.b_vdm(b,float(mlat))) # get the VADM using the desired latitude
if sig!="":
vdm_sig=pmag.b_vdm(float(PmagResRec['average_int_sigma']),float(mlat))
PmagResRec["vadm_sigma"]='%8.3e '% (vdm_sig)
PmagResRec["vadm_n"]=PmagResRec['average_int_n']
else:
PmagResRec["vadm_sigma"]=""
sitedat=pmag.get_dictitem(SiteNFO,'er_site_name',PmagSiteRec['er_site_name'],'T') # fish out site information (lat/lon, etc.)
if len(sitedat)>0:
sitedat=sitedat[0]
PmagResRec['average_lat']=sitedat['site_lat']
PmagResRec['average_lon']=sitedat['site_lon']
else:
PmagResRec['average_lon']='UNKNOWN'
PmagResRec['average_lon']='UNKNOWN'
PmagResRec['magic_software_packages']=version_num
PmagResRec["pmag_result_name"]="V[A]DM: Site "+site
PmagResRec["result_description"]="V[A]DM of site"
PmagResRec["pmag_criteria_codes"]="ACCEPT"
if agefile != "": PmagResRec= pmag.get_age(PmagResRec,"er_site_names","average_",AgeNFO,DefaultAge)
site_height=pmag.get_dictitem(height_nfo,'er_site_name',site,'T')
if len(site_height)>0:PmagResRec["average_height"]=site_height[0]['site_height']
PmagSites.append(PmagSiteRec)
PmagResults.append(PmagResRec)
if len(PmagSites)>0:
Tmp,keylist=pmag.fillkeys(PmagSites)
pmag.magic_write(siteout,Tmp,'pmag_sites')
print(' sites written to ',siteout)
else: print("No Site level table")
if len(PmagResults)>0:
TmpRes,keylist=pmag.fillkeys(PmagResults)
pmag.magic_write(resout,TmpRes,'pmag_results')
print(' results written to ',resout)
else: print("No Results level table") | NAME
specimens_results_magic.py
DESCRIPTION
combines pmag_specimens.txt file with age, location, acceptance criteria and
outputs pmag_results table along with other MagIC tables necessary for uploading to the database
SYNTAX
specimens_results_magic.py [command line options]
OPTIONS
-h prints help message and quits
-usr USER: identify user, default is ""
-f: specimen input magic_measurements format file, default is "magic_measurements.txt"
-fsp: specimen input pmag_specimens format file, default is "pmag_specimens.txt"
-fsm: sample input er_samples format file, default is "er_samples.txt"
-fsi: specimen input er_sites format file, default is "er_sites.txt"
-fla: specify a file with paleolatitudes for calculating VADMs, default is not to calculate VADMS
format is: site_name paleolatitude (space delimited file)
-fa AGES: specify er_ages format file with age information
-crd [s,g,t,b]: specify coordinate system
(s, specimen, g geographic, t, tilt corrected, b, geographic and tilt corrected)
Default is to assume geographic
NB: only the tilt corrected data will appear on the results table, if both g and t are selected.
-cor [AC:CR:NL]: colon delimited list of required data adjustments for all specimens
included in intensity calculations (anisotropy, cooling rate, non-linear TRM)
unless specified, corrections will not be applied
-pri [TRM:ARM] colon delimited list of priorities for anisotropy correction (-cor must also be set to include AC). default is TRM, then ARM
-age MIN MAX UNITS: specify age boundaries and units
-exc: use exiting selection criteria (in pmag_criteria.txt file), default is default criteria
-C: no acceptance criteria
-aD: average directions per sample, default is NOT
-aI: average multiple specimen intensities per sample, default is by site
-aC: average all components together, default is NOT
-pol: calculate polarity averages
-sam: save sample level vgps and v[a]dms, default is by site
-xSi: skip the site level intensity calculation
-p: plot directions and look at intensities by site, default is NOT
-fmt: specify output for saved images, default is svg (only if -p set)
-lat: use present latitude for calculating VADMs, default is not to calculate VADMs
-xD: skip directions
-xI: skip intensities
OUPUT
writes pmag_samples, pmag_sites, pmag_results tables | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/deprecated/specimens_results_magic.py#L8-L660 |
PmagPy/PmagPy | programs/eqarea_magic.py | main | def main():
"""
NAME
eqarea_magic.py
DESCRIPTION
makes equal area projections from declination/inclination data
SYNTAX
eqarea_magic.py [command line options]
INPUT
takes magic formatted sites, samples, specimens, or measurements
OPTIONS
-h prints help message and quits
-f FILE: specify input magic format file from magic, default='sites.txt'
supported types=[measurements, specimens, samples, sites]
-fsp FILE: specify specimen file name, (required if you want to plot measurements by sample)
default='specimens.txt'
-fsa FILE: specify sample file name, (required if you want to plot specimens by site)
default='samples.txt'
-fsi FILE: specify site file name, default='sites.txt'
-flo FILE: specify location file name, default='locations.txt'
-obj OBJ: specify level of plot [all, sit, sam, spc], default is all
-crd [s,g,t]: specify coordinate system, [s]pecimen, [g]eographic, [t]ilt adjusted
default is geographic, unspecified assumed geographic
-fmt [svg,png,jpg] format for output plots
-ell [F,K,B,Be,Bv] plot Fisher, Kent, Bingham, Bootstrap ellipses or Boostrap eigenvectors
-c plot as colour contour
-cm CM use color map CM [default is coolwarm]
-sav save plot and quit quietly
-no-tilt data are unoriented, allows plotting of measurement dec/inc
NOTE
all: entire file; sit: site; sam: sample; spc: specimen
"""
# extract arguments from sys.argv
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
dir_path = pmag.get_named_arg("-WD", default_val=".")
input_dir_path = pmag.get_named_arg('-ID', '')
if not input_dir_path:
input_dir_path = dir_path
in_file = pmag.get_named_arg("-f", default_val="sites.txt")
in_file = pmag.resolve_file_name(in_file, input_dir_path)
if "-ID" not in sys.argv:
input_dir_path = os.path.split(in_file)[0]
plot_by = pmag.get_named_arg("-obj", default_val="all").lower()
spec_file = pmag.get_named_arg("-fsp", default_val="specimens.txt")
samp_file = pmag.get_named_arg("-fsa", default_val="samples.txt")
site_file = pmag.get_named_arg("-fsi", default_val="sites.txt")
loc_file = pmag.get_named_arg("-flo", default_val="locations.txt")
ignore_tilt = False
if '-no-tilt' in sys.argv:
ignore_tilt = True
color_map = "coolwarm"
if '-c' in sys.argv:
contour = True
if '-cm' in sys.argv:
ind = sys.argv.index('-cm')
color_map = sys.argv[ind+1]
else:
color_map = 'coolwarm'
else:
contour = False
interactive = True
save_plots = False
if '-sav' in sys.argv:
save_plots = True
interactive = False
plot_ell = False
if '-ell' in sys.argv:
plot_ell = pmag.get_named_arg("-ell", "F")
crd = pmag.get_named_arg("-crd", default_val="g")
fmt = pmag.get_named_arg("-fmt", "svg")
ipmag.eqarea_magic(in_file, dir_path, input_dir_path, spec_file, samp_file, site_file, loc_file,
plot_by, crd, ignore_tilt, save_plots, fmt, contour, color_map,
plot_ell, "all", interactive) | python | def main():
"""
NAME
eqarea_magic.py
DESCRIPTION
makes equal area projections from declination/inclination data
SYNTAX
eqarea_magic.py [command line options]
INPUT
takes magic formatted sites, samples, specimens, or measurements
OPTIONS
-h prints help message and quits
-f FILE: specify input magic format file from magic, default='sites.txt'
supported types=[measurements, specimens, samples, sites]
-fsp FILE: specify specimen file name, (required if you want to plot measurements by sample)
default='specimens.txt'
-fsa FILE: specify sample file name, (required if you want to plot specimens by site)
default='samples.txt'
-fsi FILE: specify site file name, default='sites.txt'
-flo FILE: specify location file name, default='locations.txt'
-obj OBJ: specify level of plot [all, sit, sam, spc], default is all
-crd [s,g,t]: specify coordinate system, [s]pecimen, [g]eographic, [t]ilt adjusted
default is geographic, unspecified assumed geographic
-fmt [svg,png,jpg] format for output plots
-ell [F,K,B,Be,Bv] plot Fisher, Kent, Bingham, Bootstrap ellipses or Boostrap eigenvectors
-c plot as colour contour
-cm CM use color map CM [default is coolwarm]
-sav save plot and quit quietly
-no-tilt data are unoriented, allows plotting of measurement dec/inc
NOTE
all: entire file; sit: site; sam: sample; spc: specimen
"""
# extract arguments from sys.argv
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
dir_path = pmag.get_named_arg("-WD", default_val=".")
input_dir_path = pmag.get_named_arg('-ID', '')
if not input_dir_path:
input_dir_path = dir_path
in_file = pmag.get_named_arg("-f", default_val="sites.txt")
in_file = pmag.resolve_file_name(in_file, input_dir_path)
if "-ID" not in sys.argv:
input_dir_path = os.path.split(in_file)[0]
plot_by = pmag.get_named_arg("-obj", default_val="all").lower()
spec_file = pmag.get_named_arg("-fsp", default_val="specimens.txt")
samp_file = pmag.get_named_arg("-fsa", default_val="samples.txt")
site_file = pmag.get_named_arg("-fsi", default_val="sites.txt")
loc_file = pmag.get_named_arg("-flo", default_val="locations.txt")
ignore_tilt = False
if '-no-tilt' in sys.argv:
ignore_tilt = True
color_map = "coolwarm"
if '-c' in sys.argv:
contour = True
if '-cm' in sys.argv:
ind = sys.argv.index('-cm')
color_map = sys.argv[ind+1]
else:
color_map = 'coolwarm'
else:
contour = False
interactive = True
save_plots = False
if '-sav' in sys.argv:
save_plots = True
interactive = False
plot_ell = False
if '-ell' in sys.argv:
plot_ell = pmag.get_named_arg("-ell", "F")
crd = pmag.get_named_arg("-crd", default_val="g")
fmt = pmag.get_named_arg("-fmt", "svg")
ipmag.eqarea_magic(in_file, dir_path, input_dir_path, spec_file, samp_file, site_file, loc_file,
plot_by, crd, ignore_tilt, save_plots, fmt, contour, color_map,
plot_ell, "all", interactive) | NAME
eqarea_magic.py
DESCRIPTION
makes equal area projections from declination/inclination data
SYNTAX
eqarea_magic.py [command line options]
INPUT
takes magic formatted sites, samples, specimens, or measurements
OPTIONS
-h prints help message and quits
-f FILE: specify input magic format file from magic, default='sites.txt'
supported types=[measurements, specimens, samples, sites]
-fsp FILE: specify specimen file name, (required if you want to plot measurements by sample)
default='specimens.txt'
-fsa FILE: specify sample file name, (required if you want to plot specimens by site)
default='samples.txt'
-fsi FILE: specify site file name, default='sites.txt'
-flo FILE: specify location file name, default='locations.txt'
-obj OBJ: specify level of plot [all, sit, sam, spc], default is all
-crd [s,g,t]: specify coordinate system, [s]pecimen, [g]eographic, [t]ilt adjusted
default is geographic, unspecified assumed geographic
-fmt [svg,png,jpg] format for output plots
-ell [F,K,B,Be,Bv] plot Fisher, Kent, Bingham, Bootstrap ellipses or Boostrap eigenvectors
-c plot as colour contour
-cm CM use color map CM [default is coolwarm]
-sav save plot and quit quietly
-no-tilt data are unoriented, allows plotting of measurement dec/inc
NOTE
all: entire file; sit: site; sam: sample; spc: specimen | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/eqarea_magic.py#L18-L98 |
PmagPy/PmagPy | dialogs/grid_frame2.py | GridFrame.InitUI | def InitUI(self):
"""
initialize window
"""
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.init_grid_headers()
self.grid_builder = GridBuilder(self.er_magic, self.grid_type, self.grid_headers,
self.panel, self.parent_type)
self.grid = self.grid_builder.make_grid()
self.grid.InitUI()
## Column management buttons
self.add_cols_button = wx.Button(self.panel, label="Add additional columns",
name='add_cols_btn')
self.Bind(wx.EVT_BUTTON, self.on_add_cols, self.add_cols_button)
self.remove_cols_button = wx.Button(self.panel, label="Remove columns",
name='remove_cols_btn')
self.Bind(wx.EVT_BUTTON, self.on_remove_cols, self.remove_cols_button)
## Row management buttons
self.remove_row_button = wx.Button(self.panel, label="Remove last row",
name='remove_last_row_btn')
self.Bind(wx.EVT_BUTTON, self.on_remove_row, self.remove_row_button)
many_rows_box = wx.BoxSizer(wx.HORIZONTAL)
self.add_many_rows_button = wx.Button(self.panel, label="Add row(s)",
name='add_many_rows_btn')
self.rows_spin_ctrl = wx.SpinCtrl(self.panel, value='1', initial=1,
name='rows_spin_ctrl')
many_rows_box.Add(self.add_many_rows_button, flag=wx.ALIGN_CENTRE)
many_rows_box.Add(self.rows_spin_ctrl)
self.Bind(wx.EVT_BUTTON, self.on_add_rows, self.add_many_rows_button)
self.deleteRowButton = wx.Button(self.panel, id=-1, label='Delete selected row(s)', name='delete_row_btn')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_remove_row(event, False), self.deleteRowButton)
self.deleteRowButton.Disable()
## Data management buttons
self.importButton = wx.Button(self.panel, id=-1,
label='Import MagIC-format file', name='import_btn')
self.Bind(wx.EVT_BUTTON, self.onImport, self.importButton)
self.exitButton = wx.Button(self.panel, id=-1,
label='Save and close grid', name='save_and_quit_btn')
self.Bind(wx.EVT_BUTTON, self.onSave, self.exitButton)
self.cancelButton = wx.Button(self.panel, id=-1, label='Cancel', name='cancel_btn')
self.Bind(wx.EVT_BUTTON, self.onCancelButton, self.cancelButton)
## Help message and button
# button
self.toggle_help_btn = wx.Button(self.panel, id=-1, label="Show help",
name='toggle_help_btn')
self.Bind(wx.EVT_BUTTON, self.toggle_help, self.toggle_help_btn)
# message
self.help_msg_boxsizer = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, name='help_msg_boxsizer'), wx.VERTICAL)
self.default_msg_text = 'Edit {} here.\nYou can add or remove both rows and columns, however required columns may not be deleted.\nControlled vocabularies are indicated by **, and will have drop-down-menus.\nTo edit all values in a column, click the column header.\nYou can cut and paste a block of cells from an Excel-like file.\nJust click the top left cell and use command "v".\nColumns that pertain to interpretations will be marked with "++".'.format(self.grid_type + 's')
txt = ''
if self.grid_type == 'location':
txt = '\n\nNote: you can fill in location start/end latitude/longitude here.\nHowever, if you add sites in step 2, the program will calculate those values automatically,\nbased on site latitudes/logitudes.\nThese values will be written to your upload file.'
if self.grid_type == 'sample':
txt = "\n\nNote: you can fill in lithology, class, and type for each sample here.\nHowever, if the sample's class, lithology, and type are the same as its parent site,\nthose values will propagate down, and will be written to your sample file automatically."
if self.grid_type == 'specimen':
txt = "\n\nNote: you can fill in lithology, class, and type for each specimen here.\nHowever, if the specimen's class, lithology, and type are the same as its parent sample,\nthose values will propagate down, and will be written to your specimen file automatically."
if self.grid_type == 'age':
txt = "\n\nNote: only ages for which you provide data will be written to your upload file."
self.default_msg_text += txt
self.msg_text = wx.StaticText(self.panel, label=self.default_msg_text,
style=wx.TE_CENTER, name='msg text')
self.help_msg_boxsizer.Add(self.msg_text)
self.help_msg_boxsizer.ShowItems(False)
## Code message and button
# button
self.toggle_codes_btn = wx.Button(self.panel, id=-1, label="Show method codes",
name='toggle_codes_btn')
self.Bind(wx.EVT_BUTTON, self.toggle_codes, self.toggle_codes_btn)
# message
self.code_msg_boxsizer = pw.MethodCodeDemystifier(self.panel, vocab)
self.code_msg_boxsizer.ShowItems(False)
## Add content to sizers
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
col_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Columns',
name='manage columns'), wx.VERTICAL)
row_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Rows',
name='manage rows'), wx.VERTICAL)
main_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Manage data',
name='manage data'), wx.VERTICAL)
col_btn_vbox.Add(self.add_cols_button, 1, flag=wx.ALL, border=5)
col_btn_vbox.Add(self.remove_cols_button, 1, flag=wx.ALL, border=5)
row_btn_vbox.Add(many_rows_box, 1, flag=wx.ALL, border=5)
row_btn_vbox.Add(self.remove_row_button, 1, flag=wx.ALL, border=5)
row_btn_vbox.Add(self.deleteRowButton, 1, flag=wx.ALL, border=5)
main_btn_vbox.Add(self.importButton, 1, flag=wx.ALL, border=5)
main_btn_vbox.Add(self.exitButton, 1, flag=wx.ALL, border=5)
main_btn_vbox.Add(self.cancelButton, 1, flag=wx.ALL, border=5)
self.hbox.Add(col_btn_vbox, 1)
self.hbox.Add(row_btn_vbox, 1)
self.hbox.Add(main_btn_vbox, 1)
self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
self.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
self.panel.Bind(wx.EVT_TEXT_PASTE, self.do_fit)
# add actual data!
self.grid_builder.add_data_to_grid(self.grid, self.grid_type)
if self.grid_type == 'age':
self.grid_builder.add_age_data_to_grid()
# add drop_down menus
if self.parent_type:
belongs_to = sorted(self.er_magic.data_lists[self.parent_type][0], key=lambda item: item.name)
else:
belongs_to = ''
self.drop_down_menu = drop_down_menus.Menus(self.grid_type, self, self.grid, belongs_to)
self.grid_box = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, name='grid container'), wx.VERTICAL)
self.grid_box.Add(self.grid, 1, flag=wx.ALL|wx.EXPAND, border=5)
# a few special touches if it is a location grid
if self.grid_type == 'location':
lat_lon_dict = self.er_magic.get_min_max_lat_lon(self.er_magic.locations)
for loc in self.er_magic.locations:
# try to fill in min/max latitudes/longitudes from sites
d = lat_lon_dict[loc.name]
col_labels = [self.grid.GetColLabelValue(col) for col in range(self.grid.GetNumberCols())]
row_labels = [self.grid.GetCellValue(row, 0) for row in range(self.grid.GetNumberRows())]
for key, value in list(d.items()):
if value:
if str(loc.er_data[key]) == str(value):
# no need to update
pass
else:
# update
loc.er_data[key] = value
col_ind = col_labels.index(key)
row_ind = row_labels.index(loc.name)
self.grid.SetCellValue(row_ind, col_ind, str(value))
if not self.grid.changes:
self.grid.changes = set([row_ind])
else:
self.grid.changes.add(row_ind)
# a few special touches if it is an age grid
if self.grid_type == 'age':
self.remove_row_button.Disable()
self.add_many_rows_button.Disable()
self.grid.SetColLabelValue(0, 'er_site_name')
toggle_box = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Ages level', name='Ages level'), wx.VERTICAL)
levels = ['specimen', 'sample', 'site', 'location']
age_level = pw.radio_buttons(self.panel, levels, 'Choose level to assign ages')
level_ind = levels.index(self.er_magic.age_type)
age_level.radio_buttons[level_ind].SetValue(True)
toggle_box.Add(age_level)
self.Bind(wx.EVT_RADIOBUTTON, self.toggle_ages)
self.hbox.Add(toggle_box)
# a few special touches if it is a result grid
if self.grid_type == 'result':
# populate specimen_names, sample_names, etc.
self.drop_down_menu.choices[2] = [sorted([spec.name for spec in self.er_magic.specimens if spec]), False]
self.drop_down_menu.choices[3] = [sorted([samp.name for samp in self.er_magic.samples if samp]), False]
self.drop_down_menu.choices[4] = [sorted([site.name for site in self.er_magic.sites if site]), False]
self.drop_down_menu.choices[5] = [sorted([loc.name for loc in self.er_magic.locations if loc]), False]
for row in range(self.grid.GetNumberRows()):
result_name = self.grid.GetCellValue(row, 0)
result = self.er_magic.find_by_name(result_name, self.er_magic.results)
if result:
if result.specimens:
self.grid.SetCellValue(row, 2, ' : '.join([pmag.get_attr(spec) for spec in result.specimens]))
if result.samples:
self.grid.SetCellValue(row, 3, ' : '.join([pmag.get_attr(samp) for samp in result.samples]))
if result.sites:
self.grid.SetCellValue(row, 4, ' : '.join([pmag.get_attr(site) for site in result.sites]))
if result.locations:
self.grid.SetCellValue(row, 5, ' : '.join([pmag.get_attr(loc) for loc in result.locations]))
self.drop_down_menu.choices[5] = [sorted([loc.name for loc in self.er_magic.locations if loc]), False]
# final layout, set size
self.main_sizer.Add(self.hbox, flag=wx.ALL|wx.ALIGN_CENTER|wx.SHAPED, border=20)
self.main_sizer.Add(self.toggle_help_btn, .5, flag=wx.BOTTOM|wx.ALIGN_CENTRE|wx.SHAPED, border=5)
self.main_sizer.Add(self.help_msg_boxsizer, .5, flag=wx.BOTTOM|wx.ALIGN_CENTRE|wx.SHAPED, border=10)
self.main_sizer.Add(self.toggle_codes_btn, .5, flag=wx.BOTTOM|wx.ALIGN_CENTRE|wx.SHAPED, border=5)
self.main_sizer.Add(self.code_msg_boxsizer, .5, flag=wx.BOTTOM|wx.ALIGN_CENTRE|wx.SHAPED, border=5)
self.main_sizer.Add(self.grid_box, 2, flag=wx.ALL|wx.EXPAND, border=10)
self.panel.SetSizer(self.main_sizer)
self.main_sizer.Fit(self)
## this keeps sizing correct if the user resizes the window manually
#self.Bind(wx.EVT_SIZE, self.do_fit)
self.Centre()
self.Show() | python | def InitUI(self):
"""
initialize window
"""
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.init_grid_headers()
self.grid_builder = GridBuilder(self.er_magic, self.grid_type, self.grid_headers,
self.panel, self.parent_type)
self.grid = self.grid_builder.make_grid()
self.grid.InitUI()
## Column management buttons
self.add_cols_button = wx.Button(self.panel, label="Add additional columns",
name='add_cols_btn')
self.Bind(wx.EVT_BUTTON, self.on_add_cols, self.add_cols_button)
self.remove_cols_button = wx.Button(self.panel, label="Remove columns",
name='remove_cols_btn')
self.Bind(wx.EVT_BUTTON, self.on_remove_cols, self.remove_cols_button)
## Row management buttons
self.remove_row_button = wx.Button(self.panel, label="Remove last row",
name='remove_last_row_btn')
self.Bind(wx.EVT_BUTTON, self.on_remove_row, self.remove_row_button)
many_rows_box = wx.BoxSizer(wx.HORIZONTAL)
self.add_many_rows_button = wx.Button(self.panel, label="Add row(s)",
name='add_many_rows_btn')
self.rows_spin_ctrl = wx.SpinCtrl(self.panel, value='1', initial=1,
name='rows_spin_ctrl')
many_rows_box.Add(self.add_many_rows_button, flag=wx.ALIGN_CENTRE)
many_rows_box.Add(self.rows_spin_ctrl)
self.Bind(wx.EVT_BUTTON, self.on_add_rows, self.add_many_rows_button)
self.deleteRowButton = wx.Button(self.panel, id=-1, label='Delete selected row(s)', name='delete_row_btn')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_remove_row(event, False), self.deleteRowButton)
self.deleteRowButton.Disable()
## Data management buttons
self.importButton = wx.Button(self.panel, id=-1,
label='Import MagIC-format file', name='import_btn')
self.Bind(wx.EVT_BUTTON, self.onImport, self.importButton)
self.exitButton = wx.Button(self.panel, id=-1,
label='Save and close grid', name='save_and_quit_btn')
self.Bind(wx.EVT_BUTTON, self.onSave, self.exitButton)
self.cancelButton = wx.Button(self.panel, id=-1, label='Cancel', name='cancel_btn')
self.Bind(wx.EVT_BUTTON, self.onCancelButton, self.cancelButton)
## Help message and button
# button
self.toggle_help_btn = wx.Button(self.panel, id=-1, label="Show help",
name='toggle_help_btn')
self.Bind(wx.EVT_BUTTON, self.toggle_help, self.toggle_help_btn)
# message
self.help_msg_boxsizer = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, name='help_msg_boxsizer'), wx.VERTICAL)
self.default_msg_text = 'Edit {} here.\nYou can add or remove both rows and columns, however required columns may not be deleted.\nControlled vocabularies are indicated by **, and will have drop-down-menus.\nTo edit all values in a column, click the column header.\nYou can cut and paste a block of cells from an Excel-like file.\nJust click the top left cell and use command "v".\nColumns that pertain to interpretations will be marked with "++".'.format(self.grid_type + 's')
txt = ''
if self.grid_type == 'location':
txt = '\n\nNote: you can fill in location start/end latitude/longitude here.\nHowever, if you add sites in step 2, the program will calculate those values automatically,\nbased on site latitudes/logitudes.\nThese values will be written to your upload file.'
if self.grid_type == 'sample':
txt = "\n\nNote: you can fill in lithology, class, and type for each sample here.\nHowever, if the sample's class, lithology, and type are the same as its parent site,\nthose values will propagate down, and will be written to your sample file automatically."
if self.grid_type == 'specimen':
txt = "\n\nNote: you can fill in lithology, class, and type for each specimen here.\nHowever, if the specimen's class, lithology, and type are the same as its parent sample,\nthose values will propagate down, and will be written to your specimen file automatically."
if self.grid_type == 'age':
txt = "\n\nNote: only ages for which you provide data will be written to your upload file."
self.default_msg_text += txt
self.msg_text = wx.StaticText(self.panel, label=self.default_msg_text,
style=wx.TE_CENTER, name='msg text')
self.help_msg_boxsizer.Add(self.msg_text)
self.help_msg_boxsizer.ShowItems(False)
## Code message and button
# button
self.toggle_codes_btn = wx.Button(self.panel, id=-1, label="Show method codes",
name='toggle_codes_btn')
self.Bind(wx.EVT_BUTTON, self.toggle_codes, self.toggle_codes_btn)
# message
self.code_msg_boxsizer = pw.MethodCodeDemystifier(self.panel, vocab)
self.code_msg_boxsizer.ShowItems(False)
## Add content to sizers
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
col_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Columns',
name='manage columns'), wx.VERTICAL)
row_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Rows',
name='manage rows'), wx.VERTICAL)
main_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Manage data',
name='manage data'), wx.VERTICAL)
col_btn_vbox.Add(self.add_cols_button, 1, flag=wx.ALL, border=5)
col_btn_vbox.Add(self.remove_cols_button, 1, flag=wx.ALL, border=5)
row_btn_vbox.Add(many_rows_box, 1, flag=wx.ALL, border=5)
row_btn_vbox.Add(self.remove_row_button, 1, flag=wx.ALL, border=5)
row_btn_vbox.Add(self.deleteRowButton, 1, flag=wx.ALL, border=5)
main_btn_vbox.Add(self.importButton, 1, flag=wx.ALL, border=5)
main_btn_vbox.Add(self.exitButton, 1, flag=wx.ALL, border=5)
main_btn_vbox.Add(self.cancelButton, 1, flag=wx.ALL, border=5)
self.hbox.Add(col_btn_vbox, 1)
self.hbox.Add(row_btn_vbox, 1)
self.hbox.Add(main_btn_vbox, 1)
self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
self.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
self.panel.Bind(wx.EVT_TEXT_PASTE, self.do_fit)
# add actual data!
self.grid_builder.add_data_to_grid(self.grid, self.grid_type)
if self.grid_type == 'age':
self.grid_builder.add_age_data_to_grid()
# add drop_down menus
if self.parent_type:
belongs_to = sorted(self.er_magic.data_lists[self.parent_type][0], key=lambda item: item.name)
else:
belongs_to = ''
self.drop_down_menu = drop_down_menus.Menus(self.grid_type, self, self.grid, belongs_to)
self.grid_box = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, name='grid container'), wx.VERTICAL)
self.grid_box.Add(self.grid, 1, flag=wx.ALL|wx.EXPAND, border=5)
# a few special touches if it is a location grid
if self.grid_type == 'location':
lat_lon_dict = self.er_magic.get_min_max_lat_lon(self.er_magic.locations)
for loc in self.er_magic.locations:
# try to fill in min/max latitudes/longitudes from sites
d = lat_lon_dict[loc.name]
col_labels = [self.grid.GetColLabelValue(col) for col in range(self.grid.GetNumberCols())]
row_labels = [self.grid.GetCellValue(row, 0) for row in range(self.grid.GetNumberRows())]
for key, value in list(d.items()):
if value:
if str(loc.er_data[key]) == str(value):
# no need to update
pass
else:
# update
loc.er_data[key] = value
col_ind = col_labels.index(key)
row_ind = row_labels.index(loc.name)
self.grid.SetCellValue(row_ind, col_ind, str(value))
if not self.grid.changes:
self.grid.changes = set([row_ind])
else:
self.grid.changes.add(row_ind)
# a few special touches if it is an age grid
if self.grid_type == 'age':
self.remove_row_button.Disable()
self.add_many_rows_button.Disable()
self.grid.SetColLabelValue(0, 'er_site_name')
toggle_box = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Ages level', name='Ages level'), wx.VERTICAL)
levels = ['specimen', 'sample', 'site', 'location']
age_level = pw.radio_buttons(self.panel, levels, 'Choose level to assign ages')
level_ind = levels.index(self.er_magic.age_type)
age_level.radio_buttons[level_ind].SetValue(True)
toggle_box.Add(age_level)
self.Bind(wx.EVT_RADIOBUTTON, self.toggle_ages)
self.hbox.Add(toggle_box)
# a few special touches if it is a result grid
if self.grid_type == 'result':
# populate specimen_names, sample_names, etc.
self.drop_down_menu.choices[2] = [sorted([spec.name for spec in self.er_magic.specimens if spec]), False]
self.drop_down_menu.choices[3] = [sorted([samp.name for samp in self.er_magic.samples if samp]), False]
self.drop_down_menu.choices[4] = [sorted([site.name for site in self.er_magic.sites if site]), False]
self.drop_down_menu.choices[5] = [sorted([loc.name for loc in self.er_magic.locations if loc]), False]
for row in range(self.grid.GetNumberRows()):
result_name = self.grid.GetCellValue(row, 0)
result = self.er_magic.find_by_name(result_name, self.er_magic.results)
if result:
if result.specimens:
self.grid.SetCellValue(row, 2, ' : '.join([pmag.get_attr(spec) for spec in result.specimens]))
if result.samples:
self.grid.SetCellValue(row, 3, ' : '.join([pmag.get_attr(samp) for samp in result.samples]))
if result.sites:
self.grid.SetCellValue(row, 4, ' : '.join([pmag.get_attr(site) for site in result.sites]))
if result.locations:
self.grid.SetCellValue(row, 5, ' : '.join([pmag.get_attr(loc) for loc in result.locations]))
self.drop_down_menu.choices[5] = [sorted([loc.name for loc in self.er_magic.locations if loc]), False]
# final layout, set size
self.main_sizer.Add(self.hbox, flag=wx.ALL|wx.ALIGN_CENTER|wx.SHAPED, border=20)
self.main_sizer.Add(self.toggle_help_btn, .5, flag=wx.BOTTOM|wx.ALIGN_CENTRE|wx.SHAPED, border=5)
self.main_sizer.Add(self.help_msg_boxsizer, .5, flag=wx.BOTTOM|wx.ALIGN_CENTRE|wx.SHAPED, border=10)
self.main_sizer.Add(self.toggle_codes_btn, .5, flag=wx.BOTTOM|wx.ALIGN_CENTRE|wx.SHAPED, border=5)
self.main_sizer.Add(self.code_msg_boxsizer, .5, flag=wx.BOTTOM|wx.ALIGN_CENTRE|wx.SHAPED, border=5)
self.main_sizer.Add(self.grid_box, 2, flag=wx.ALL|wx.EXPAND, border=10)
self.panel.SetSizer(self.main_sizer)
self.main_sizer.Fit(self)
## this keeps sizing correct if the user resizes the window manually
#self.Bind(wx.EVT_SIZE, self.do_fit)
self.Centre()
self.Show() | initialize window | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/grid_frame2.py#L66-L257 |
PmagPy/PmagPy | dialogs/grid_frame2.py | GridFrame.do_fit | def do_fit(self, event):
"""
Re-fit the window to the size of the content.
"""
#self.grid.ShowScrollbars(wx.SHOW_SB_NEVER, wx.SHOW_SB_NEVER)
if event:
event.Skip()
self.main_sizer.Fit(self)
disp_size = wx.GetDisplaySize()
actual_size = self.GetSize()
rows = self.grid.GetNumberRows()
# if there isn't enough room to display new content
# resize the frame
if disp_size[1] - 75 < actual_size[1]:
self.SetSize((actual_size[0], disp_size[1] * .95))
self.Centre() | python | def do_fit(self, event):
"""
Re-fit the window to the size of the content.
"""
#self.grid.ShowScrollbars(wx.SHOW_SB_NEVER, wx.SHOW_SB_NEVER)
if event:
event.Skip()
self.main_sizer.Fit(self)
disp_size = wx.GetDisplaySize()
actual_size = self.GetSize()
rows = self.grid.GetNumberRows()
# if there isn't enough room to display new content
# resize the frame
if disp_size[1] - 75 < actual_size[1]:
self.SetSize((actual_size[0], disp_size[1] * .95))
self.Centre() | Re-fit the window to the size of the content. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/grid_frame2.py#L269-L284 |
PmagPy/PmagPy | dialogs/grid_frame2.py | GridFrame.toggle_ages | def toggle_ages(self, event):
"""
Switch the type of grid between site/sample
(Users may add ages at either level)
"""
if self.grid.changes:
self.onSave(None)
label = event.GetEventObject().Label
self.er_magic.age_type = label
self.grid.Destroy()
# normally grid_frame is reset to None when grid is destroyed
# in this case we are simply replacing the grid, so we need to
# reset grid_frame
self.parent.Parent.grid_frame = self
self.parent.Parent.Hide()
self.grid = self.grid_builder.make_grid()
self.grid.InitUI()
self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
self.grid_builder.add_data_to_grid(self.grid, self.grid_type)
self.grid_builder.add_age_data_to_grid()
self.drop_down_menu = drop_down_menus.Menus(self.grid_type, self, self.grid, None)
self.grid.SetColLabelValue(0, 'er_' + label + '_name')
self.grid.size_grid()
self.grid_box.Add(self.grid, flag=wx.ALL, border=5)
self.main_sizer.Fit(self)
if self.parent.Parent.validation_mode:
if 'age' in self.parent.Parent.validation_mode:
self.grid.paint_invalid_cells(self.parent.Parent.warn_dict['age'])
self.grid.ForceRefresh()
# the grid show up if it's the same size as the previous grid
# awkward solution (causes flashing):
if self.grid.Size[0] < 100:
if self.grid.GetWindowStyle() != wx.DOUBLE_BORDER:
self.grid.SetWindowStyle(wx.DOUBLE_BORDER)
self.main_sizer.Fit(self)
self.grid.SetWindowStyle(wx.NO_BORDER)
self.main_sizer.Fit(self) | python | def toggle_ages(self, event):
"""
Switch the type of grid between site/sample
(Users may add ages at either level)
"""
if self.grid.changes:
self.onSave(None)
label = event.GetEventObject().Label
self.er_magic.age_type = label
self.grid.Destroy()
# normally grid_frame is reset to None when grid is destroyed
# in this case we are simply replacing the grid, so we need to
# reset grid_frame
self.parent.Parent.grid_frame = self
self.parent.Parent.Hide()
self.grid = self.grid_builder.make_grid()
self.grid.InitUI()
self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
self.grid_builder.add_data_to_grid(self.grid, self.grid_type)
self.grid_builder.add_age_data_to_grid()
self.drop_down_menu = drop_down_menus.Menus(self.grid_type, self, self.grid, None)
self.grid.SetColLabelValue(0, 'er_' + label + '_name')
self.grid.size_grid()
self.grid_box.Add(self.grid, flag=wx.ALL, border=5)
self.main_sizer.Fit(self)
if self.parent.Parent.validation_mode:
if 'age' in self.parent.Parent.validation_mode:
self.grid.paint_invalid_cells(self.parent.Parent.warn_dict['age'])
self.grid.ForceRefresh()
# the grid show up if it's the same size as the previous grid
# awkward solution (causes flashing):
if self.grid.Size[0] < 100:
if self.grid.GetWindowStyle() != wx.DOUBLE_BORDER:
self.grid.SetWindowStyle(wx.DOUBLE_BORDER)
self.main_sizer.Fit(self)
self.grid.SetWindowStyle(wx.NO_BORDER)
self.main_sizer.Fit(self) | Switch the type of grid between site/sample
(Users may add ages at either level) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/grid_frame2.py#L318-L357 |
PmagPy/PmagPy | dialogs/grid_frame2.py | GridFrame.remove_col_label | def remove_col_label(self, event):#, include_pmag=True):
"""
check to see if column is required
if it is not, delete it from grid
"""
er_possible_headers = self.grid_headers[self.grid_type]['er'][2]
pmag_possible_headers = self.grid_headers[self.grid_type]['pmag'][2]
er_actual_headers = self.grid_headers[self.grid_type]['er'][0]
pmag_actual_headers = self.grid_headers[self.grid_type]['pmag'][0]
col = event.GetCol()
label = self.grid.GetColLabelValue(col)
if '**' in label:
label = label.strip('**')
if label in self.grid_headers[self.grid_type]['er'][1]:
pw.simple_warning("That header is required, and cannot be removed")
return False
#elif include_pmag and label in self.grid_headers[self.grid_type]['pmag'][1]:
# pw.simple_warning("That header is required, and cannot be removed")
# return False
else:
print('That header is not required:', label)
self.grid.remove_col(col)
#if label in er_possible_headers:
try:
print('removing {} from er_actual_headers'.format(label))
er_actual_headers.remove(label)
except ValueError:
pass
#if label in pmag_possible_headers:
try:
pmag_actual_headers.remove(label)
except ValueError:
pass
# causes resize on each column header delete
# can leave this out if we want.....
self.main_sizer.Fit(self) | python | def remove_col_label(self, event):#, include_pmag=True):
"""
check to see if column is required
if it is not, delete it from grid
"""
er_possible_headers = self.grid_headers[self.grid_type]['er'][2]
pmag_possible_headers = self.grid_headers[self.grid_type]['pmag'][2]
er_actual_headers = self.grid_headers[self.grid_type]['er'][0]
pmag_actual_headers = self.grid_headers[self.grid_type]['pmag'][0]
col = event.GetCol()
label = self.grid.GetColLabelValue(col)
if '**' in label:
label = label.strip('**')
if label in self.grid_headers[self.grid_type]['er'][1]:
pw.simple_warning("That header is required, and cannot be removed")
return False
#elif include_pmag and label in self.grid_headers[self.grid_type]['pmag'][1]:
# pw.simple_warning("That header is required, and cannot be removed")
# return False
else:
print('That header is not required:', label)
self.grid.remove_col(col)
#if label in er_possible_headers:
try:
print('removing {} from er_actual_headers'.format(label))
er_actual_headers.remove(label)
except ValueError:
pass
#if label in pmag_possible_headers:
try:
pmag_actual_headers.remove(label)
except ValueError:
pass
# causes resize on each column header delete
# can leave this out if we want.....
self.main_sizer.Fit(self) | check to see if column is required
if it is not, delete it from grid | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/grid_frame2.py#L364-L399 |
PmagPy/PmagPy | dialogs/grid_frame2.py | GridFrame.on_add_cols | def on_add_cols(self, event):
"""
Show simple dialog that allows user to add a new column name
"""
col_labels = self.grid.col_labels
# do not list headers that are already column labels in the grid
er_items = [head for head in self.grid_headers[self.grid_type]['er'][2] if head not in col_labels]
# remove unneeded headers
er_items = builder.remove_list_headers(er_items)
pmag_headers = sorted(list(set(self.grid_headers[self.grid_type]['pmag'][2]).union(self.grid_headers[self.grid_type]['pmag'][1])))
# do not list headers that are already column labels in the grid
# make sure that pmag_specific columns are marked with '++'
to_add = [i + '++' for i in self.er_magic.double if i in pmag_headers and i + '++' not in col_labels]
pmag_headers.extend(to_add)
pmag_items = [head for head in pmag_headers if head not in er_items and head not in col_labels]
# remove unneeded headers
pmag_items = sorted(builder.remove_list_headers(pmag_items))
dia = pw.HeaderDialog(self, 'columns to add', items1=er_items, items2=pmag_items)
dia.Centre()
result = dia.ShowModal()
new_headers = []
if result == 5100:
new_headers = dia.text_list
if not new_headers:
return
errors = self.add_new_grid_headers(new_headers, er_items, pmag_items)
if errors:
errors_str = ', '.join(errors)
pw.simple_warning('You are already using the following headers: {}\nSo they will not be added'.format(errors_str))
# problem: if widgets above the grid are too wide,
# the grid does not re-size when adding columns
# awkward solution (causes flashing):
if self.grid.GetWindowStyle() != wx.DOUBLE_BORDER:
self.grid.SetWindowStyle(wx.DOUBLE_BORDER)
self.main_sizer.Fit(self)
self.grid.SetWindowStyle(wx.NO_BORDER)
self.Centre()
self.main_sizer.Fit(self)
#
self.grid.changes = set(range(self.grid.GetNumberRows()))
dia.Destroy() | python | def on_add_cols(self, event):
"""
Show simple dialog that allows user to add a new column name
"""
col_labels = self.grid.col_labels
# do not list headers that are already column labels in the grid
er_items = [head for head in self.grid_headers[self.grid_type]['er'][2] if head not in col_labels]
# remove unneeded headers
er_items = builder.remove_list_headers(er_items)
pmag_headers = sorted(list(set(self.grid_headers[self.grid_type]['pmag'][2]).union(self.grid_headers[self.grid_type]['pmag'][1])))
# do not list headers that are already column labels in the grid
# make sure that pmag_specific columns are marked with '++'
to_add = [i + '++' for i in self.er_magic.double if i in pmag_headers and i + '++' not in col_labels]
pmag_headers.extend(to_add)
pmag_items = [head for head in pmag_headers if head not in er_items and head not in col_labels]
# remove unneeded headers
pmag_items = sorted(builder.remove_list_headers(pmag_items))
dia = pw.HeaderDialog(self, 'columns to add', items1=er_items, items2=pmag_items)
dia.Centre()
result = dia.ShowModal()
new_headers = []
if result == 5100:
new_headers = dia.text_list
if not new_headers:
return
errors = self.add_new_grid_headers(new_headers, er_items, pmag_items)
if errors:
errors_str = ', '.join(errors)
pw.simple_warning('You are already using the following headers: {}\nSo they will not be added'.format(errors_str))
# problem: if widgets above the grid are too wide,
# the grid does not re-size when adding columns
# awkward solution (causes flashing):
if self.grid.GetWindowStyle() != wx.DOUBLE_BORDER:
self.grid.SetWindowStyle(wx.DOUBLE_BORDER)
self.main_sizer.Fit(self)
self.grid.SetWindowStyle(wx.NO_BORDER)
self.Centre()
self.main_sizer.Fit(self)
#
self.grid.changes = set(range(self.grid.GetNumberRows()))
dia.Destroy() | Show simple dialog that allows user to add a new column name | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/grid_frame2.py#L401-L442 |
PmagPy/PmagPy | dialogs/grid_frame2.py | GridFrame.add_new_grid_headers | def add_new_grid_headers(self, new_headers, er_items, pmag_items):
"""
Add in all user-added headers.
If those new headers depend on other headers, add the other headers too.
"""
def add_pmag_reqd_headers():
if self.grid_type == 'result':
return []
add_in = []
col_labels = self.grid.col_labels
for reqd_head in self.grid_headers[self.grid_type]['pmag'][1]:
if reqd_head in self.er_magic.double:
if reqd_head + "++" not in col_labels:
add_in.append(reqd_head + "++")
else:
if reqd_head not in col_labels:
add_in.append(reqd_head)
add_in = builder.remove_list_headers(add_in)
return add_in
#
already_present = []
for name in new_headers:
if name:
if name not in self.grid.col_labels:
col_number = self.grid.add_col(name)
# add to appropriate headers list
if name in er_items:
self.grid_headers[self.grid_type]['er'][0].append(str(name))
if name in pmag_items:
name = name.strip('++')
if name not in self.grid_headers[self.grid_type]['pmag'][0]:
self.grid_headers[self.grid_type]['pmag'][0].append(str(name))
# add any required pmag headers that are not in the grid already
for header in add_pmag_reqd_headers():
col_number = self.grid.add_col(header)
# add drop_down_menus for added reqd columns
if header in vocab.possible_vocabularies:
self.drop_down_menu.add_drop_down(col_number, name)
if header in ['magic_method_codes++']:
self.drop_down_menu.add_method_drop_down(col_number, header)
# add drop down menus for user-added column
if name in vocab.possible_vocabularies:
self.drop_down_menu.add_drop_down(col_number, name)
if name in ['magic_method_codes', 'magic_method_codes++']:
self.drop_down_menu.add_method_drop_down(col_number, name)
else:
already_present.append(name)
#pw.simple_warning('You are already using column header: {}'.format(name))
return already_present | python | def add_new_grid_headers(self, new_headers, er_items, pmag_items):
"""
Add in all user-added headers.
If those new headers depend on other headers, add the other headers too.
"""
def add_pmag_reqd_headers():
if self.grid_type == 'result':
return []
add_in = []
col_labels = self.grid.col_labels
for reqd_head in self.grid_headers[self.grid_type]['pmag'][1]:
if reqd_head in self.er_magic.double:
if reqd_head + "++" not in col_labels:
add_in.append(reqd_head + "++")
else:
if reqd_head not in col_labels:
add_in.append(reqd_head)
add_in = builder.remove_list_headers(add_in)
return add_in
#
already_present = []
for name in new_headers:
if name:
if name not in self.grid.col_labels:
col_number = self.grid.add_col(name)
# add to appropriate headers list
if name in er_items:
self.grid_headers[self.grid_type]['er'][0].append(str(name))
if name in pmag_items:
name = name.strip('++')
if name not in self.grid_headers[self.grid_type]['pmag'][0]:
self.grid_headers[self.grid_type]['pmag'][0].append(str(name))
# add any required pmag headers that are not in the grid already
for header in add_pmag_reqd_headers():
col_number = self.grid.add_col(header)
# add drop_down_menus for added reqd columns
if header in vocab.possible_vocabularies:
self.drop_down_menu.add_drop_down(col_number, name)
if header in ['magic_method_codes++']:
self.drop_down_menu.add_method_drop_down(col_number, header)
# add drop down menus for user-added column
if name in vocab.possible_vocabularies:
self.drop_down_menu.add_drop_down(col_number, name)
if name in ['magic_method_codes', 'magic_method_codes++']:
self.drop_down_menu.add_method_drop_down(col_number, name)
else:
already_present.append(name)
#pw.simple_warning('You are already using column header: {}'.format(name))
return already_present | Add in all user-added headers.
If those new headers depend on other headers, add the other headers too. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/grid_frame2.py#L444-L493 |
PmagPy/PmagPy | dialogs/grid_frame2.py | GridFrame.on_remove_cols | def on_remove_cols(self, event):
"""
enter 'remove columns' mode
"""
# open the help message
self.toggle_help(event=None, mode='open')
# first unselect any selected cols/cells
self.remove_cols_mode = True
self.grid.ClearSelection()
self.remove_cols_button.SetLabel("end delete column mode")
# change button to exit the delete columns mode
self.Unbind(wx.EVT_BUTTON, self.remove_cols_button)
self.Bind(wx.EVT_BUTTON, self.exit_col_remove_mode, self.remove_cols_button)
# then disable all other buttons
for btn in [self.add_cols_button, self.remove_row_button, self.add_many_rows_button]:
btn.Disable()
# then make some visual changes
self.msg_text.SetLabel("Remove grid columns: click on a column header to delete it. Required headers for {}s may not be deleted.".format(self.grid_type))
self.help_msg_boxsizer.Fit(self.help_msg_boxsizer.GetStaticBox())
self.main_sizer.Fit(self)
self.grid.SetWindowStyle(wx.DOUBLE_BORDER)
self.grid_box.GetStaticBox().SetWindowStyle(wx.DOUBLE_BORDER)
self.grid.Refresh()
self.main_sizer.Fit(self) # might not need this one
self.grid.changes = set(range(self.grid.GetNumberRows())) | python | def on_remove_cols(self, event):
"""
enter 'remove columns' mode
"""
# open the help message
self.toggle_help(event=None, mode='open')
# first unselect any selected cols/cells
self.remove_cols_mode = True
self.grid.ClearSelection()
self.remove_cols_button.SetLabel("end delete column mode")
# change button to exit the delete columns mode
self.Unbind(wx.EVT_BUTTON, self.remove_cols_button)
self.Bind(wx.EVT_BUTTON, self.exit_col_remove_mode, self.remove_cols_button)
# then disable all other buttons
for btn in [self.add_cols_button, self.remove_row_button, self.add_many_rows_button]:
btn.Disable()
# then make some visual changes
self.msg_text.SetLabel("Remove grid columns: click on a column header to delete it. Required headers for {}s may not be deleted.".format(self.grid_type))
self.help_msg_boxsizer.Fit(self.help_msg_boxsizer.GetStaticBox())
self.main_sizer.Fit(self)
self.grid.SetWindowStyle(wx.DOUBLE_BORDER)
self.grid_box.GetStaticBox().SetWindowStyle(wx.DOUBLE_BORDER)
self.grid.Refresh()
self.main_sizer.Fit(self) # might not need this one
self.grid.changes = set(range(self.grid.GetNumberRows())) | enter 'remove columns' mode | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/grid_frame2.py#L495-L519 |
PmagPy/PmagPy | dialogs/grid_frame2.py | GridFrame.on_remove_row | def on_remove_row(self, event, row_num=-1):
"""
Remove specified grid row.
If no row number is given, remove the last row.
"""
if row_num == -1:
default = (255, 255, 255, 255)
# unhighlight any selected rows:
for row in self.selected_rows:
attr = wx.grid.GridCellAttr()
attr.SetBackgroundColour(default)
self.grid.SetRowAttr(row, attr)
row_num = self.grid.GetNumberRows() - 1
self.deleteRowButton.Disable()
self.selected_rows = {row_num}
function_mapping = {'specimen': self.er_magic.delete_specimen,
'sample': self.er_magic.delete_sample,
'site': self.er_magic.delete_site,
'location': self.er_magic.delete_location,
'result': self.er_magic.delete_result}
names = [self.grid.GetCellValue(row, 0) for row in self.selected_rows]
orphans = []
for name in names:
if name:
try:
row = self.grid.row_labels.index(name)
function_mapping[self.grid_type](name)
orphans.extend([name])
# if user entered a name, then deletes the row before saving,
# there will be a ValueError
except ValueError:
pass
self.grid.remove_row(row)
self.selected_rows = set()
self.deleteRowButton.Disable()
self.grid.Refresh()
self.main_sizer.Fit(self) | python | def on_remove_row(self, event, row_num=-1):
"""
Remove specified grid row.
If no row number is given, remove the last row.
"""
if row_num == -1:
default = (255, 255, 255, 255)
# unhighlight any selected rows:
for row in self.selected_rows:
attr = wx.grid.GridCellAttr()
attr.SetBackgroundColour(default)
self.grid.SetRowAttr(row, attr)
row_num = self.grid.GetNumberRows() - 1
self.deleteRowButton.Disable()
self.selected_rows = {row_num}
function_mapping = {'specimen': self.er_magic.delete_specimen,
'sample': self.er_magic.delete_sample,
'site': self.er_magic.delete_site,
'location': self.er_magic.delete_location,
'result': self.er_magic.delete_result}
names = [self.grid.GetCellValue(row, 0) for row in self.selected_rows]
orphans = []
for name in names:
if name:
try:
row = self.grid.row_labels.index(name)
function_mapping[self.grid_type](name)
orphans.extend([name])
# if user entered a name, then deletes the row before saving,
# there will be a ValueError
except ValueError:
pass
self.grid.remove_row(row)
self.selected_rows = set()
self.deleteRowButton.Disable()
self.grid.Refresh()
self.main_sizer.Fit(self) | Remove specified grid row.
If no row number is given, remove the last row. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/grid_frame2.py#L535-L573 |
PmagPy/PmagPy | dialogs/grid_frame2.py | GridFrame.exit_col_remove_mode | def exit_col_remove_mode(self, event):
"""
go back from 'remove cols' mode to normal
"""
# close help messge
self.toggle_help(event=None, mode='close')
# update mode
self.remove_cols_mode = False
# re-enable all buttons
for btn in [self.add_cols_button, self.remove_row_button, self.add_many_rows_button]:
btn.Enable()
# unbind grid click for deletion
self.Unbind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK)
# undo visual cues
self.grid.SetWindowStyle(wx.DEFAULT)
self.grid_box.GetStaticBox().SetWindowStyle(wx.DEFAULT)
self.msg_text.SetLabel(self.default_msg_text)
self.help_msg_boxsizer.Fit(self.help_msg_boxsizer.GetStaticBox())
self.main_sizer.Fit(self)
# re-bind self.remove_cols_button
self.Bind(wx.EVT_BUTTON, self.on_remove_cols, self.remove_cols_button)
self.remove_cols_button.SetLabel("Remove columns") | python | def exit_col_remove_mode(self, event):
"""
go back from 'remove cols' mode to normal
"""
# close help messge
self.toggle_help(event=None, mode='close')
# update mode
self.remove_cols_mode = False
# re-enable all buttons
for btn in [self.add_cols_button, self.remove_row_button, self.add_many_rows_button]:
btn.Enable()
# unbind grid click for deletion
self.Unbind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK)
# undo visual cues
self.grid.SetWindowStyle(wx.DEFAULT)
self.grid_box.GetStaticBox().SetWindowStyle(wx.DEFAULT)
self.msg_text.SetLabel(self.default_msg_text)
self.help_msg_boxsizer.Fit(self.help_msg_boxsizer.GetStaticBox())
self.main_sizer.Fit(self)
# re-bind self.remove_cols_button
self.Bind(wx.EVT_BUTTON, self.on_remove_cols, self.remove_cols_button)
self.remove_cols_button.SetLabel("Remove columns") | go back from 'remove cols' mode to normal | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/grid_frame2.py#L575-L597 |
PmagPy/PmagPy | dialogs/grid_frame2.py | GridBuilder.make_grid | def make_grid(self, incl_pmag=True):
"""
return grid
"""
if incl_pmag and self.grid_type in self.er_magic.incl_pmag_data:
incl_pmag = True
else:
incl_pmag = False
er_header = self.grid_headers[self.grid_type]['er'][0]
if incl_pmag:
pmag_header = self.grid_headers[self.grid_type]['pmag'][0]
else:
pmag_header = []
# if we need to use '++' to distinguish pmag magic_method_codes from er
if incl_pmag and self.grid_type in ('specimen', 'sample', 'site'):
for double_header in self.er_magic.double:
try:
pmag_header.remove(double_header)
pmag_header.append(double_header + '++')
except ValueError:
pass
header = sorted(list(set(er_header).union(pmag_header)))
first_headers = []
for string in ['citation', '{}_class'.format(self.grid_type),
'{}_lithology'.format(self.grid_type), '{}_type'.format(self.grid_type),
'site_definition']:
for head in header[:]:
if string in head:
header.remove(head)
first_headers.append(head)
# the way we work it, each specimen is assigned to a sample
# each sample is assigned to a site
# specimens can not be added en masse to a site object, for example
# this data will be written in
for string in ['er_specimen_names', 'er_sample_names', 'er_site_names']:
for head in header[:]:
if string in head:
header.remove(head)
# do headers for results type grid
if self.grid_type == 'result':
#header.remove('pmag_result_name')
header[:0] = ['pmag_result_name', 'er_citation_names', 'er_specimen_names',
'er_sample_names', 'er_site_names', 'er_location_names']
elif self.grid_type == 'age':
for header_type in self.er_magic.first_age_headers:
if header_type in header:
header.remove(header_type)
lst = ['er_' + self.grid_type + '_name']
lst.extend(self.er_magic.first_age_headers)
header[:0] = lst
# do headers for all other data types without parents
elif not self.parent_type:
lst = ['er_' + self.grid_type + '_name']
lst.extend(first_headers)
header[:0] = lst
# do headers for all data types with parents
else:
lst = ['er_' + self.grid_type + '_name', 'er_' + self.parent_type + '_name']
lst.extend(first_headers)
header[:0] = lst
grid = magic_grid.MagicGrid(parent=self.panel, name=self.grid_type,
row_labels=[], col_labels=header,
double=self.er_magic.double)
grid.do_event_bindings()
self.grid = grid
return grid | python | def make_grid(self, incl_pmag=True):
"""
return grid
"""
if incl_pmag and self.grid_type in self.er_magic.incl_pmag_data:
incl_pmag = True
else:
incl_pmag = False
er_header = self.grid_headers[self.grid_type]['er'][0]
if incl_pmag:
pmag_header = self.grid_headers[self.grid_type]['pmag'][0]
else:
pmag_header = []
# if we need to use '++' to distinguish pmag magic_method_codes from er
if incl_pmag and self.grid_type in ('specimen', 'sample', 'site'):
for double_header in self.er_magic.double:
try:
pmag_header.remove(double_header)
pmag_header.append(double_header + '++')
except ValueError:
pass
header = sorted(list(set(er_header).union(pmag_header)))
first_headers = []
for string in ['citation', '{}_class'.format(self.grid_type),
'{}_lithology'.format(self.grid_type), '{}_type'.format(self.grid_type),
'site_definition']:
for head in header[:]:
if string in head:
header.remove(head)
first_headers.append(head)
# the way we work it, each specimen is assigned to a sample
# each sample is assigned to a site
# specimens can not be added en masse to a site object, for example
# this data will be written in
for string in ['er_specimen_names', 'er_sample_names', 'er_site_names']:
for head in header[:]:
if string in head:
header.remove(head)
# do headers for results type grid
if self.grid_type == 'result':
#header.remove('pmag_result_name')
header[:0] = ['pmag_result_name', 'er_citation_names', 'er_specimen_names',
'er_sample_names', 'er_site_names', 'er_location_names']
elif self.grid_type == 'age':
for header_type in self.er_magic.first_age_headers:
if header_type in header:
header.remove(header_type)
lst = ['er_' + self.grid_type + '_name']
lst.extend(self.er_magic.first_age_headers)
header[:0] = lst
# do headers for all other data types without parents
elif not self.parent_type:
lst = ['er_' + self.grid_type + '_name']
lst.extend(first_headers)
header[:0] = lst
# do headers for all data types with parents
else:
lst = ['er_' + self.grid_type + '_name', 'er_' + self.parent_type + '_name']
lst.extend(first_headers)
header[:0] = lst
grid = magic_grid.MagicGrid(parent=self.panel, name=self.grid_type,
row_labels=[], col_labels=header,
double=self.er_magic.double)
grid.do_event_bindings()
self.grid = grid
return grid | return grid | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/grid_frame2.py#L765-L837 |
PmagPy/PmagPy | dialogs/grid_frame2.py | GridBuilder.save_grid_data | def save_grid_data(self):
"""
Save grid data in the data object
"""
if not self.grid.changes:
print('-I- No changes to save')
return
if self.grid_type == 'age':
age_data_type = self.er_magic.age_type
self.er_magic.write_ages = True
starred_cols = self.grid.remove_starred_labels()
self.grid.SaveEditControlValue() # locks in value in cell currently edited
if self.grid.changes:
num_cols = self.grid.GetNumberCols()
for change in self.grid.changes:
if change == -1:
continue
else:
old_item = self.grid.row_items[change]
new_item_name = self.grid.GetCellValue(change, 0)
new_er_data = {}
new_pmag_data = {}
er_header = self.grid_headers[self.grid_type]['er'][0]
pmag_header = self.grid_headers[self.grid_type]['pmag'][0]
start_num = 2 if self.parent_type else 1
result_data = {}
for col in range(start_num, num_cols):
col_label = str(self.grid.GetColLabelValue(col))
value = str(self.grid.GetCellValue(change, col))
#new_data[col_label] = value
if value == '\t':
value = ''
if '++' in col_label:
col_name = col_label[:-2]
new_pmag_data[col_name] = value
continue
# pmag_* files are new interpretations, so should only have "This study"
# er_* files can have multiple citations
if col_label == 'er_citation_names':
new_pmag_data[col_label] = 'This study'
new_er_data[col_label] = value
continue
if er_header and (col_label in er_header):
new_er_data[col_label] = value
if self.grid_type in ('specimen', 'sample', 'site'):
if pmag_header and (col_label in pmag_header) and (col_label not in self.er_magic.double):
new_pmag_data[col_label] = value
else:
if pmag_header and (col_label in pmag_header):
new_pmag_data[col_label] = value
if col_label in ('er_specimen_names', 'er_sample_names',
'er_site_names', 'er_location_names'):
result_data[col_label] = value
# if there is an item in the data, get its name
if isinstance(old_item, str):
old_item_name = None
else:
old_item_name = self.grid.row_items[change].name
if self.parent_type:
new_parent_name = self.grid.GetCellValue(change, 1)
else:
new_parent_name = ''
# create a new item
if new_item_name and not old_item_name:
print('-I- make new item named', new_item_name)
if self.grid_type == 'result':
specs, samps, sites, locs = self.get_result_children(result_data)
item = self.er_magic.add_result(new_item_name, specs, samps, sites,
locs, new_pmag_data)
else:
item = self.er_magic.add_methods[self.grid_type](new_item_name, new_parent_name,
new_er_data, new_pmag_data)
# update an existing item
elif new_item_name and old_item_name:
print('-I- update existing {} formerly named {} to {}'.format(self.grid_type,
old_item_name,
new_item_name))
if self.grid_type == 'result':
specs, samps, sites, locs = self.get_result_children(result_data)
item = self.er_magic.update_methods['result'](old_item_name, new_item_name,
new_er_data=None,
new_pmag_data=new_pmag_data,
spec_names=specs,
samp_names=samps,
site_names=sites,
loc_names=locs,
replace_data=True)
elif self.grid_type == 'age':
item_type = age_data_type
item = self.er_magic.update_methods['age'](old_item_name, new_er_data,
item_type, replace_data=True)
else:
item = self.er_magic.update_methods[self.grid_type](old_item_name, new_item_name,
new_parent_name, new_er_data,
new_pmag_data, replace_data=True) | python | def save_grid_data(self):
"""
Save grid data in the data object
"""
if not self.grid.changes:
print('-I- No changes to save')
return
if self.grid_type == 'age':
age_data_type = self.er_magic.age_type
self.er_magic.write_ages = True
starred_cols = self.grid.remove_starred_labels()
self.grid.SaveEditControlValue() # locks in value in cell currently edited
if self.grid.changes:
num_cols = self.grid.GetNumberCols()
for change in self.grid.changes:
if change == -1:
continue
else:
old_item = self.grid.row_items[change]
new_item_name = self.grid.GetCellValue(change, 0)
new_er_data = {}
new_pmag_data = {}
er_header = self.grid_headers[self.grid_type]['er'][0]
pmag_header = self.grid_headers[self.grid_type]['pmag'][0]
start_num = 2 if self.parent_type else 1
result_data = {}
for col in range(start_num, num_cols):
col_label = str(self.grid.GetColLabelValue(col))
value = str(self.grid.GetCellValue(change, col))
#new_data[col_label] = value
if value == '\t':
value = ''
if '++' in col_label:
col_name = col_label[:-2]
new_pmag_data[col_name] = value
continue
# pmag_* files are new interpretations, so should only have "This study"
# er_* files can have multiple citations
if col_label == 'er_citation_names':
new_pmag_data[col_label] = 'This study'
new_er_data[col_label] = value
continue
if er_header and (col_label in er_header):
new_er_data[col_label] = value
if self.grid_type in ('specimen', 'sample', 'site'):
if pmag_header and (col_label in pmag_header) and (col_label not in self.er_magic.double):
new_pmag_data[col_label] = value
else:
if pmag_header and (col_label in pmag_header):
new_pmag_data[col_label] = value
if col_label in ('er_specimen_names', 'er_sample_names',
'er_site_names', 'er_location_names'):
result_data[col_label] = value
# if there is an item in the data, get its name
if isinstance(old_item, str):
old_item_name = None
else:
old_item_name = self.grid.row_items[change].name
if self.parent_type:
new_parent_name = self.grid.GetCellValue(change, 1)
else:
new_parent_name = ''
# create a new item
if new_item_name and not old_item_name:
print('-I- make new item named', new_item_name)
if self.grid_type == 'result':
specs, samps, sites, locs = self.get_result_children(result_data)
item = self.er_magic.add_result(new_item_name, specs, samps, sites,
locs, new_pmag_data)
else:
item = self.er_magic.add_methods[self.grid_type](new_item_name, new_parent_name,
new_er_data, new_pmag_data)
# update an existing item
elif new_item_name and old_item_name:
print('-I- update existing {} formerly named {} to {}'.format(self.grid_type,
old_item_name,
new_item_name))
if self.grid_type == 'result':
specs, samps, sites, locs = self.get_result_children(result_data)
item = self.er_magic.update_methods['result'](old_item_name, new_item_name,
new_er_data=None,
new_pmag_data=new_pmag_data,
spec_names=specs,
samp_names=samps,
site_names=sites,
loc_names=locs,
replace_data=True)
elif self.grid_type == 'age':
item_type = age_data_type
item = self.er_magic.update_methods['age'](old_item_name, new_er_data,
item_type, replace_data=True)
else:
item = self.er_magic.update_methods[self.grid_type](old_item_name, new_item_name,
new_parent_name, new_er_data,
new_pmag_data, replace_data=True) | Save grid data in the data object | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/grid_frame2.py#L890-L1000 |
PmagPy/PmagPy | programs/hysteresis_magic2.py | main | def main():
"""
NAME
hysteresis_magic.py
DESCRIPTION
calculates hystereis parameters and saves them in rmag_hystereis format file
makes plots if option selected
SYNTAX
hysteresis_magic.py [command line options]
OPTIONS
-h prints help message and quits
-usr USER: identify user, default is ""
-f: specify input file, default is agm_measurements.txt
-fh: specify rmag_hysteresis.txt input file
-F: specify output file, default is rmag_hysteresis.txt
-P: do not make the plots
-spc SPEC: specify specimen name to plot and quit
-sav save all plots and quit
-fmt [png,svg,eps,jpg]
"""
args = sys.argv
PLT = 1
plots = 0
user, meas_file, rmag_out, rmag_file = "", "agm_measurements.txt", "rmag_hysteresis.txt", ""
pltspec = ""
dir_path = '.'
fmt = 'svg'
verbose = pmagplotlib.verbose
version_num = pmag.get_version()
if '-WD' in args:
ind = args.index('-WD')
dir_path = args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if "-usr" in args:
ind = args.index("-usr")
user = args[ind+1]
if '-f' in args:
ind = args.index("-f")
meas_file = args[ind+1]
if '-F' in args:
ind = args.index("-F")
rmag_out = args[ind+1]
if '-fh' in args:
ind = args.index("-fh")
rmag_file = args[ind+1]
rmag_file = dir_path+'/'+rmag_file
if '-P' in args:
PLT = 0
irm_init, imag_init = -1, -1
if '-sav' in args:
verbose = 0
plots = 1
if '-spc' in args:
ind = args.index("-spc")
pltspec = args[ind+1]
verbose = 0
plots = 1
if '-fmt' in args:
ind = args.index("-fmt")
fmt = args[ind+1]
rmag_out = dir_path+'/'+rmag_out
meas_file = dir_path+'/'+meas_file
rmag_rem = dir_path+"/rmag_remanence.txt"
#
#
meas_data, file_type = pmag.magic_read(meas_file)
if file_type != 'magic_measurements':
print(main.__doc__)
print('bad file')
sys.exit()
#
# initialize some variables
# define figure numbers for hyst,deltaM,DdeltaM curves
HystRecs, RemRecs = [], []
HDD = {}
if verbose:
if verbose and PLT:
print("Plots may be on top of each other - use mouse to place ")
if PLT:
HDD['hyst'], HDD['deltaM'], HDD['DdeltaM'] = 1, 2, 3
pmagplotlib.plot_init(HDD['DdeltaM'], 5, 5)
pmagplotlib.plot_init(HDD['deltaM'], 5, 5)
pmagplotlib.plot_init(HDD['hyst'], 5, 5)
imag_init = 0
irm_init = 0
else:
HDD['hyst'], HDD['deltaM'], HDD['DdeltaM'], HDD['irm'], HDD['imag'] = 0, 0, 0, 0, 0
#
if rmag_file != "":
hyst_data, file_type = pmag.magic_read(rmag_file)
#
# get list of unique experiment names and specimen names
#
experiment_names, sids = [], []
for rec in meas_data:
meths = rec['magic_method_codes'].split(':')
methods = []
for meth in meths:
methods.append(meth.strip())
if 'LP-HYS' in methods:
if 'er_synthetic_name' in list(rec.keys()) and rec['er_synthetic_name'] != "":
rec['er_specimen_name'] = rec['er_synthetic_name']
if rec['magic_experiment_name'] not in experiment_names:
experiment_names.append(rec['magic_experiment_name'])
if rec['er_specimen_name'] not in sids:
sids.append(rec['er_specimen_name'])
#
k = 0
locname = ''
if pltspec != "":
k = sids.index(pltspec)
print(sids[k])
while k < len(sids):
s = sids[k]
if verbose and PLT:
print(s, k+1, 'out of ', len(sids))
#
#
# B,M for hysteresis, Bdcd,Mdcd for irm-dcd data
B, M, Bdcd, Mdcd = [], [], [], []
Bimag, Mimag = [], [] # Bimag,Mimag for initial magnetization curves
first_dcd_rec, first_rec, first_imag_rec = 1, 1, 1
for rec in meas_data:
methcodes = rec['magic_method_codes'].split(':')
meths = []
for meth in methcodes:
meths.append(meth.strip())
if rec['er_specimen_name'] == s and "LP-HYS" in meths:
B.append(float(rec['measurement_lab_field_dc']))
M.append(float(rec['measurement_magn_moment']))
if first_rec == 1:
e = rec['magic_experiment_name']
HystRec = {}
first_rec = 0
if "er_location_name" in list(rec.keys()):
HystRec["er_location_name"] = rec["er_location_name"]
locname = rec['er_location_name'].replace('/', '-')
if "er_sample_name" in list(rec.keys()):
HystRec["er_sample_name"] = rec["er_sample_name"]
if "er_site_name" in list(rec.keys()):
HystRec["er_site_name"] = rec["er_site_name"]
if "er_synthetic_name" in list(rec.keys()) and rec['er_synthetic_name'] != "":
HystRec["er_synthetic_name"] = rec["er_synthetic_name"]
else:
HystRec["er_specimen_name"] = rec["er_specimen_name"]
if rec['er_specimen_name'] == s and "LP-IRM-DCD" in meths:
Bdcd.append(float(rec['treatment_dc_field']))
Mdcd.append(float(rec['measurement_magn_moment']))
if first_dcd_rec == 1:
RemRec = {}
irm_exp = rec['magic_experiment_name']
first_dcd_rec = 0
if "er_location_name" in list(rec.keys()):
RemRec["er_location_name"] = rec["er_location_name"]
if "er_sample_name" in list(rec.keys()):
RemRec["er_sample_name"] = rec["er_sample_name"]
if "er_site_name" in list(rec.keys()):
RemRec["er_site_name"] = rec["er_site_name"]
if "er_synthetic_name" in list(rec.keys()) and rec['er_synthetic_name'] != "":
RemRec["er_synthetic_name"] = rec["er_synthetic_name"]
else:
RemRec["er_specimen_name"] = rec["er_specimen_name"]
if rec['er_specimen_name'] == s and "LP-IMAG" in meths:
if first_imag_rec == 1:
imag_exp = rec['magic_experiment_name']
first_imag_rec = 0
Bimag.append(float(rec['measurement_lab_field_dc']))
Mimag.append(float(rec['measurement_magn_moment']))
#
# now plot the hysteresis curve
#
if len(B) > 0:
hmeths = []
for meth in meths:
hmeths.append(meth)
hpars = pmagplotlib.plot_hdd(HDD, B, M, e)
if verbose and PLT:
pmagplotlib.draw_figs(HDD)
#
# get prior interpretations from hyst_data
if rmag_file != "":
hpars_prior = {}
for rec in hyst_data:
if rec['magic_experiment_names'] == e:
if rec['hysteresis_bcr'] != "" and rec['hysteresis_mr_moment'] != "":
hpars_prior['hysteresis_mr_moment'] = rec['hysteresis_mr_moment']
hpars_prior['hysteresis_ms_moment'] = rec['hysteresis_ms_moment']
hpars_prior['hysteresis_bc'] = rec['hysteresis_bc']
hpars_prior['hysteresis_bcr'] = rec['hysteresis_bcr']
break
if verbose:
pmagplotlib.plot_hpars(HDD, hpars_prior, 'ro')
else:
if verbose:
pmagplotlib.plot_hpars(HDD, hpars, 'bs')
HystRec['hysteresis_mr_moment'] = hpars['hysteresis_mr_moment']
HystRec['hysteresis_ms_moment'] = hpars['hysteresis_ms_moment']
HystRec['hysteresis_bc'] = hpars['hysteresis_bc']
HystRec['hysteresis_bcr'] = hpars['hysteresis_bcr']
HystRec['hysteresis_xhf'] = hpars['hysteresis_xhf']
HystRec['magic_experiment_names'] = e
HystRec['magic_software_packages'] = version_num
if hpars["magic_method_codes"] not in hmeths:
hmeths.append(hpars["magic_method_codes"])
methods = ""
for meth in hmeths:
methods = methods+meth.strip()+":"
HystRec["magic_method_codes"] = methods[:-1]
HystRec["er_citation_names"] = "This study"
HystRecs.append(HystRec)
#
if len(Bdcd) > 0:
rmeths = []
for meth in meths:
rmeths.append(meth)
if verbose and PLT:
print('plotting IRM')
if irm_init == 0:
HDD['irm'] = 5
pmagplotlib.plot_init(HDD['irm'], 5, 5)
irm_init = 1
rpars = pmagplotlib.plot_irm(HDD['irm'], Bdcd, Mdcd, irm_exp)
RemRec['remanence_mr_moment'] = rpars['remanence_mr_moment']
RemRec['remanence_bcr'] = rpars['remanence_bcr']
RemRec['magic_experiment_names'] = irm_exp
if rpars["magic_method_codes"] not in meths:
meths.append(rpars["magic_method_codes"])
methods = ""
for meth in rmeths:
methods = methods+meth.strip()+":"
RemRec["magic_method_codes"] = methods[:-1]
RemRec["er_citation_names"] = "This study"
RemRecs.append(RemRec)
else:
if irm_init:
pmagplotlib.clearFIG(HDD['irm'])
if len(Bimag) > 0:
if verbose:
print('plotting initial magnetization curve')
# first normalize by Ms
Mnorm = []
for m in Mimag:
Mnorm.append(m / float(hpars['hysteresis_ms_moment']))
if imag_init == 0:
HDD['imag'] = 4
pmagplotlib.plot_init(HDD['imag'], 5, 5)
imag_init = 1
pmagplotlib.plot_imag(HDD['imag'], Bimag, Mnorm, imag_exp)
else:
if imag_init:
pmagplotlib.clearFIG(HDD['imag'])
#
files = {}
if plots:
if pltspec != "":
s = pltspec
files = {}
for key in list(HDD.keys()):
files[key] = locname+'_'+s+'_'+key+'.'+fmt
pmagplotlib.save_plots(HDD, files)
if pltspec != "":
sys.exit()
if verbose and PLT:
pmagplotlib.draw_figs(HDD)
ans = input(
"S[a]ve plots, [s]pecimen name, [q]uit, <return> to continue\n ")
if ans == "a":
files = {}
for key in list(HDD.keys()):
files[key] = locname+'_'+s+'_'+key+'.'+fmt
pmagplotlib.save_plots(HDD, files)
if ans == '':
k += 1
if ans == "p":
del HystRecs[-1]
k -= 1
if ans == 'q':
print("Good bye")
sys.exit()
if ans == 's':
keepon = 1
specimen = input(
'Enter desired specimen name (or first part there of): ')
while keepon == 1:
try:
k = sids.index(specimen)
keepon = 0
except:
tmplist = []
for qq in range(len(sids)):
if specimen in sids[qq]:
tmplist.append(sids[qq])
print(specimen, " not found, but this was: ")
print(tmplist)
specimen = input('Select one or try again\n ')
k = sids.index(specimen)
else:
k += 1
if len(B) == 0 and len(Bdcd) == 0:
if verbose:
print('skipping this one - no hysteresis data')
k += 1
if rmag_out == "" and ans == 's' and verbose:
really = input(
" Do you want to overwrite the existing rmag_hystersis.txt file? 1/[0] ")
if really == "":
print('i thought not - goodbye')
sys.exit()
rmag_out = "rmag_hysteresis.txt"
if len(HystRecs) > 0:
pmag.magic_write(rmag_out, HystRecs, "rmag_hysteresis")
if verbose:
print("hysteresis parameters saved in ", rmag_out)
if len(RemRecs) > 0:
pmag.magic_write(rmag_rem, RemRecs, "rmag_remanence")
if verbose:
print("remanence parameters saved in ", rmag_rem) | python | def main():
"""
NAME
hysteresis_magic.py
DESCRIPTION
calculates hystereis parameters and saves them in rmag_hystereis format file
makes plots if option selected
SYNTAX
hysteresis_magic.py [command line options]
OPTIONS
-h prints help message and quits
-usr USER: identify user, default is ""
-f: specify input file, default is agm_measurements.txt
-fh: specify rmag_hysteresis.txt input file
-F: specify output file, default is rmag_hysteresis.txt
-P: do not make the plots
-spc SPEC: specify specimen name to plot and quit
-sav save all plots and quit
-fmt [png,svg,eps,jpg]
"""
args = sys.argv
PLT = 1
plots = 0
user, meas_file, rmag_out, rmag_file = "", "agm_measurements.txt", "rmag_hysteresis.txt", ""
pltspec = ""
dir_path = '.'
fmt = 'svg'
verbose = pmagplotlib.verbose
version_num = pmag.get_version()
if '-WD' in args:
ind = args.index('-WD')
dir_path = args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if "-usr" in args:
ind = args.index("-usr")
user = args[ind+1]
if '-f' in args:
ind = args.index("-f")
meas_file = args[ind+1]
if '-F' in args:
ind = args.index("-F")
rmag_out = args[ind+1]
if '-fh' in args:
ind = args.index("-fh")
rmag_file = args[ind+1]
rmag_file = dir_path+'/'+rmag_file
if '-P' in args:
PLT = 0
irm_init, imag_init = -1, -1
if '-sav' in args:
verbose = 0
plots = 1
if '-spc' in args:
ind = args.index("-spc")
pltspec = args[ind+1]
verbose = 0
plots = 1
if '-fmt' in args:
ind = args.index("-fmt")
fmt = args[ind+1]
rmag_out = dir_path+'/'+rmag_out
meas_file = dir_path+'/'+meas_file
rmag_rem = dir_path+"/rmag_remanence.txt"
#
#
meas_data, file_type = pmag.magic_read(meas_file)
if file_type != 'magic_measurements':
print(main.__doc__)
print('bad file')
sys.exit()
#
# initialize some variables
# define figure numbers for hyst,deltaM,DdeltaM curves
HystRecs, RemRecs = [], []
HDD = {}
if verbose:
if verbose and PLT:
print("Plots may be on top of each other - use mouse to place ")
if PLT:
HDD['hyst'], HDD['deltaM'], HDD['DdeltaM'] = 1, 2, 3
pmagplotlib.plot_init(HDD['DdeltaM'], 5, 5)
pmagplotlib.plot_init(HDD['deltaM'], 5, 5)
pmagplotlib.plot_init(HDD['hyst'], 5, 5)
imag_init = 0
irm_init = 0
else:
HDD['hyst'], HDD['deltaM'], HDD['DdeltaM'], HDD['irm'], HDD['imag'] = 0, 0, 0, 0, 0
#
if rmag_file != "":
hyst_data, file_type = pmag.magic_read(rmag_file)
#
# get list of unique experiment names and specimen names
#
experiment_names, sids = [], []
for rec in meas_data:
meths = rec['magic_method_codes'].split(':')
methods = []
for meth in meths:
methods.append(meth.strip())
if 'LP-HYS' in methods:
if 'er_synthetic_name' in list(rec.keys()) and rec['er_synthetic_name'] != "":
rec['er_specimen_name'] = rec['er_synthetic_name']
if rec['magic_experiment_name'] not in experiment_names:
experiment_names.append(rec['magic_experiment_name'])
if rec['er_specimen_name'] not in sids:
sids.append(rec['er_specimen_name'])
#
k = 0
locname = ''
if pltspec != "":
k = sids.index(pltspec)
print(sids[k])
while k < len(sids):
s = sids[k]
if verbose and PLT:
print(s, k+1, 'out of ', len(sids))
#
#
# B,M for hysteresis, Bdcd,Mdcd for irm-dcd data
B, M, Bdcd, Mdcd = [], [], [], []
Bimag, Mimag = [], [] # Bimag,Mimag for initial magnetization curves
first_dcd_rec, first_rec, first_imag_rec = 1, 1, 1
for rec in meas_data:
methcodes = rec['magic_method_codes'].split(':')
meths = []
for meth in methcodes:
meths.append(meth.strip())
if rec['er_specimen_name'] == s and "LP-HYS" in meths:
B.append(float(rec['measurement_lab_field_dc']))
M.append(float(rec['measurement_magn_moment']))
if first_rec == 1:
e = rec['magic_experiment_name']
HystRec = {}
first_rec = 0
if "er_location_name" in list(rec.keys()):
HystRec["er_location_name"] = rec["er_location_name"]
locname = rec['er_location_name'].replace('/', '-')
if "er_sample_name" in list(rec.keys()):
HystRec["er_sample_name"] = rec["er_sample_name"]
if "er_site_name" in list(rec.keys()):
HystRec["er_site_name"] = rec["er_site_name"]
if "er_synthetic_name" in list(rec.keys()) and rec['er_synthetic_name'] != "":
HystRec["er_synthetic_name"] = rec["er_synthetic_name"]
else:
HystRec["er_specimen_name"] = rec["er_specimen_name"]
if rec['er_specimen_name'] == s and "LP-IRM-DCD" in meths:
Bdcd.append(float(rec['treatment_dc_field']))
Mdcd.append(float(rec['measurement_magn_moment']))
if first_dcd_rec == 1:
RemRec = {}
irm_exp = rec['magic_experiment_name']
first_dcd_rec = 0
if "er_location_name" in list(rec.keys()):
RemRec["er_location_name"] = rec["er_location_name"]
if "er_sample_name" in list(rec.keys()):
RemRec["er_sample_name"] = rec["er_sample_name"]
if "er_site_name" in list(rec.keys()):
RemRec["er_site_name"] = rec["er_site_name"]
if "er_synthetic_name" in list(rec.keys()) and rec['er_synthetic_name'] != "":
RemRec["er_synthetic_name"] = rec["er_synthetic_name"]
else:
RemRec["er_specimen_name"] = rec["er_specimen_name"]
if rec['er_specimen_name'] == s and "LP-IMAG" in meths:
if first_imag_rec == 1:
imag_exp = rec['magic_experiment_name']
first_imag_rec = 0
Bimag.append(float(rec['measurement_lab_field_dc']))
Mimag.append(float(rec['measurement_magn_moment']))
#
# now plot the hysteresis curve
#
if len(B) > 0:
hmeths = []
for meth in meths:
hmeths.append(meth)
hpars = pmagplotlib.plot_hdd(HDD, B, M, e)
if verbose and PLT:
pmagplotlib.draw_figs(HDD)
#
# get prior interpretations from hyst_data
if rmag_file != "":
hpars_prior = {}
for rec in hyst_data:
if rec['magic_experiment_names'] == e:
if rec['hysteresis_bcr'] != "" and rec['hysteresis_mr_moment'] != "":
hpars_prior['hysteresis_mr_moment'] = rec['hysteresis_mr_moment']
hpars_prior['hysteresis_ms_moment'] = rec['hysteresis_ms_moment']
hpars_prior['hysteresis_bc'] = rec['hysteresis_bc']
hpars_prior['hysteresis_bcr'] = rec['hysteresis_bcr']
break
if verbose:
pmagplotlib.plot_hpars(HDD, hpars_prior, 'ro')
else:
if verbose:
pmagplotlib.plot_hpars(HDD, hpars, 'bs')
HystRec['hysteresis_mr_moment'] = hpars['hysteresis_mr_moment']
HystRec['hysteresis_ms_moment'] = hpars['hysteresis_ms_moment']
HystRec['hysteresis_bc'] = hpars['hysteresis_bc']
HystRec['hysteresis_bcr'] = hpars['hysteresis_bcr']
HystRec['hysteresis_xhf'] = hpars['hysteresis_xhf']
HystRec['magic_experiment_names'] = e
HystRec['magic_software_packages'] = version_num
if hpars["magic_method_codes"] not in hmeths:
hmeths.append(hpars["magic_method_codes"])
methods = ""
for meth in hmeths:
methods = methods+meth.strip()+":"
HystRec["magic_method_codes"] = methods[:-1]
HystRec["er_citation_names"] = "This study"
HystRecs.append(HystRec)
#
if len(Bdcd) > 0:
rmeths = []
for meth in meths:
rmeths.append(meth)
if verbose and PLT:
print('plotting IRM')
if irm_init == 0:
HDD['irm'] = 5
pmagplotlib.plot_init(HDD['irm'], 5, 5)
irm_init = 1
rpars = pmagplotlib.plot_irm(HDD['irm'], Bdcd, Mdcd, irm_exp)
RemRec['remanence_mr_moment'] = rpars['remanence_mr_moment']
RemRec['remanence_bcr'] = rpars['remanence_bcr']
RemRec['magic_experiment_names'] = irm_exp
if rpars["magic_method_codes"] not in meths:
meths.append(rpars["magic_method_codes"])
methods = ""
for meth in rmeths:
methods = methods+meth.strip()+":"
RemRec["magic_method_codes"] = methods[:-1]
RemRec["er_citation_names"] = "This study"
RemRecs.append(RemRec)
else:
if irm_init:
pmagplotlib.clearFIG(HDD['irm'])
if len(Bimag) > 0:
if verbose:
print('plotting initial magnetization curve')
# first normalize by Ms
Mnorm = []
for m in Mimag:
Mnorm.append(m / float(hpars['hysteresis_ms_moment']))
if imag_init == 0:
HDD['imag'] = 4
pmagplotlib.plot_init(HDD['imag'], 5, 5)
imag_init = 1
pmagplotlib.plot_imag(HDD['imag'], Bimag, Mnorm, imag_exp)
else:
if imag_init:
pmagplotlib.clearFIG(HDD['imag'])
#
files = {}
if plots:
if pltspec != "":
s = pltspec
files = {}
for key in list(HDD.keys()):
files[key] = locname+'_'+s+'_'+key+'.'+fmt
pmagplotlib.save_plots(HDD, files)
if pltspec != "":
sys.exit()
if verbose and PLT:
pmagplotlib.draw_figs(HDD)
ans = input(
"S[a]ve plots, [s]pecimen name, [q]uit, <return> to continue\n ")
if ans == "a":
files = {}
for key in list(HDD.keys()):
files[key] = locname+'_'+s+'_'+key+'.'+fmt
pmagplotlib.save_plots(HDD, files)
if ans == '':
k += 1
if ans == "p":
del HystRecs[-1]
k -= 1
if ans == 'q':
print("Good bye")
sys.exit()
if ans == 's':
keepon = 1
specimen = input(
'Enter desired specimen name (or first part there of): ')
while keepon == 1:
try:
k = sids.index(specimen)
keepon = 0
except:
tmplist = []
for qq in range(len(sids)):
if specimen in sids[qq]:
tmplist.append(sids[qq])
print(specimen, " not found, but this was: ")
print(tmplist)
specimen = input('Select one or try again\n ')
k = sids.index(specimen)
else:
k += 1
if len(B) == 0 and len(Bdcd) == 0:
if verbose:
print('skipping this one - no hysteresis data')
k += 1
if rmag_out == "" and ans == 's' and verbose:
really = input(
" Do you want to overwrite the existing rmag_hystersis.txt file? 1/[0] ")
if really == "":
print('i thought not - goodbye')
sys.exit()
rmag_out = "rmag_hysteresis.txt"
if len(HystRecs) > 0:
pmag.magic_write(rmag_out, HystRecs, "rmag_hysteresis")
if verbose:
print("hysteresis parameters saved in ", rmag_out)
if len(RemRecs) > 0:
pmag.magic_write(rmag_rem, RemRecs, "rmag_remanence")
if verbose:
print("remanence parameters saved in ", rmag_rem) | NAME
hysteresis_magic.py
DESCRIPTION
calculates hystereis parameters and saves them in rmag_hystereis format file
makes plots if option selected
SYNTAX
hysteresis_magic.py [command line options]
OPTIONS
-h prints help message and quits
-usr USER: identify user, default is ""
-f: specify input file, default is agm_measurements.txt
-fh: specify rmag_hysteresis.txt input file
-F: specify output file, default is rmag_hysteresis.txt
-P: do not make the plots
-spc SPEC: specify specimen name to plot and quit
-sav save all plots and quit
-fmt [png,svg,eps,jpg] | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/hysteresis_magic2.py#L11-L332 |
PmagPy/PmagPy | programs/mst_magic.py | main | def main():
"""
NAME
mst_magic.py
DESCRIPTION
converts MsT data (T,M) to measurements format files
SYNTAX
mst_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-usr USER: identify user, default is ""
-f FILE: specify T,M format input file, required
-spn SPEC: specimen name, required
-fsa SFILE: name with sample, site, location information
-F FILE: specify output file, default is measurements.txt
-dc H: specify applied field during measurement, default is 0.5 T
-DM NUM: output to MagIC data model 2.5 or 3, default 3
-syn : This is a synthetic specimen and has no sample/site/location information
-spc NUM : specify number of characters to designate a specimen, default = 0
-loc LOCNAME : specify location/study name, must have either LOCNAME or SAMPFILE or be a synthetic
-ncn NCON: specify naming convention: default is #1 below
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail [email protected] for help.
INPUT files:
T M: T is in Centigrade and M is uncalibrated magnitude
"""
#
# get command line arguments
#
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
dir_path = pmag.get_named_arg("-WD", ".")
user = pmag.get_named_arg("-usr", "")
labfield = pmag.get_named_arg("-dc", '0.5')
meas_file = pmag.get_named_arg("-F", "measurements.txt")
samp_file = pmag.get_named_arg("-fsa", "samples.txt")
try:
infile = pmag.get_named_arg("-f", reqd=True)
except pmag.MissingCommandLineArgException:
print(main.__doc__)
print("-f is required option")
sys.exit()
specnum = int(pmag.get_named_arg("-spc", 0))
location = pmag.get_named_arg("-loc", "")
specimen_name = pmag.get_named_arg("-spn", reqd=True)
syn = 0
if "-syn" in args:
syn = 1
samp_con = pmag.get_named_arg("-ncn", "1")
if "-ncn" in args:
ind = args.index("-ncn")
samp_con = sys.argv[ind+1]
data_model_num = int(pmag.get_named_arg("-DM", 3))
convert.mst(infile, specimen_name, dir_path, "", meas_file, samp_file,
user, specnum, samp_con, labfield, location, syn, data_model_num) | python | def main():
"""
NAME
mst_magic.py
DESCRIPTION
converts MsT data (T,M) to measurements format files
SYNTAX
mst_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-usr USER: identify user, default is ""
-f FILE: specify T,M format input file, required
-spn SPEC: specimen name, required
-fsa SFILE: name with sample, site, location information
-F FILE: specify output file, default is measurements.txt
-dc H: specify applied field during measurement, default is 0.5 T
-DM NUM: output to MagIC data model 2.5 or 3, default 3
-syn : This is a synthetic specimen and has no sample/site/location information
-spc NUM : specify number of characters to designate a specimen, default = 0
-loc LOCNAME : specify location/study name, must have either LOCNAME or SAMPFILE or be a synthetic
-ncn NCON: specify naming convention: default is #1 below
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail [email protected] for help.
INPUT files:
T M: T is in Centigrade and M is uncalibrated magnitude
"""
#
# get command line arguments
#
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
dir_path = pmag.get_named_arg("-WD", ".")
user = pmag.get_named_arg("-usr", "")
labfield = pmag.get_named_arg("-dc", '0.5')
meas_file = pmag.get_named_arg("-F", "measurements.txt")
samp_file = pmag.get_named_arg("-fsa", "samples.txt")
try:
infile = pmag.get_named_arg("-f", reqd=True)
except pmag.MissingCommandLineArgException:
print(main.__doc__)
print("-f is required option")
sys.exit()
specnum = int(pmag.get_named_arg("-spc", 0))
location = pmag.get_named_arg("-loc", "")
specimen_name = pmag.get_named_arg("-spn", reqd=True)
syn = 0
if "-syn" in args:
syn = 1
samp_con = pmag.get_named_arg("-ncn", "1")
if "-ncn" in args:
ind = args.index("-ncn")
samp_con = sys.argv[ind+1]
data_model_num = int(pmag.get_named_arg("-DM", 3))
convert.mst(infile, specimen_name, dir_path, "", meas_file, samp_file,
user, specnum, samp_con, labfield, location, syn, data_model_num) | NAME
mst_magic.py
DESCRIPTION
converts MsT data (T,M) to measurements format files
SYNTAX
mst_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-usr USER: identify user, default is ""
-f FILE: specify T,M format input file, required
-spn SPEC: specimen name, required
-fsa SFILE: name with sample, site, location information
-F FILE: specify output file, default is measurements.txt
-dc H: specify applied field during measurement, default is 0.5 T
-DM NUM: output to MagIC data model 2.5 or 3, default 3
-syn : This is a synthetic specimen and has no sample/site/location information
-spc NUM : specify number of characters to designate a specimen, default = 0
-loc LOCNAME : specify location/study name, must have either LOCNAME or SAMPFILE or be a synthetic
-ncn NCON: specify naming convention: default is #1 below
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail [email protected] for help.
INPUT files:
T M: T is in Centigrade and M is uncalibrated magnitude | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/mst_magic.py#L7-L78 |
PmagPy/PmagPy | programs/deprecated/parse_measurements.py | main | def main():
"""
NAME
parse_measurements.py
DESCRIPTION
takes measurments file and creates specimen and instrument files
SYNTAX
parse_measurements.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE magic_measurements input file, default is "magic_measurements.txt"
-fsi FILE er_sites input file, default is none
-Fsp FILE specimen output er_specimens format file, default is "er_specimens.txt"
-Fin FILE instrument output magic_instruments format file, default is "magic_instruments.txt"
OUPUT
writes er_specimens and magic_instruments formatted files
"""
infile = 'magic_measurements.txt'
sitefile = ""
specout = "er_specimens.txt"
instout = "magic_instruments.txt"
# get command line stuff
if "-h" in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind = sys.argv.index("-f")
infile = sys.argv[ind + 1]
if '-fsi' in sys.argv:
ind = sys.argv.index("-fsi")
sitefile = sys.argv[ind + 1]
if '-Fsp' in sys.argv:
ind = sys.argv.index("-Fsp")
specout = sys.argv[ind + 1]
if '-Fin' in sys.argv:
ind = sys.argv.index("-Fin")
instout = sys.argv[ind + 1]
if '-WD' in sys.argv:
ind = sys.argv.index("-WD")
dir_path = sys.argv[ind + 1]
infile = dir_path + '/' + infile
if sitefile != "":
sitefile = dir_path + '/' + sitefile
specout = dir_path + '/' + specout
instout = dir_path + '/' + instout
# now do re-ordering
pmag.ParseMeasFile(infile, sitefile, instout, specout) | python | def main():
"""
NAME
parse_measurements.py
DESCRIPTION
takes measurments file and creates specimen and instrument files
SYNTAX
parse_measurements.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE magic_measurements input file, default is "magic_measurements.txt"
-fsi FILE er_sites input file, default is none
-Fsp FILE specimen output er_specimens format file, default is "er_specimens.txt"
-Fin FILE instrument output magic_instruments format file, default is "magic_instruments.txt"
OUPUT
writes er_specimens and magic_instruments formatted files
"""
infile = 'magic_measurements.txt'
sitefile = ""
specout = "er_specimens.txt"
instout = "magic_instruments.txt"
# get command line stuff
if "-h" in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind = sys.argv.index("-f")
infile = sys.argv[ind + 1]
if '-fsi' in sys.argv:
ind = sys.argv.index("-fsi")
sitefile = sys.argv[ind + 1]
if '-Fsp' in sys.argv:
ind = sys.argv.index("-Fsp")
specout = sys.argv[ind + 1]
if '-Fin' in sys.argv:
ind = sys.argv.index("-Fin")
instout = sys.argv[ind + 1]
if '-WD' in sys.argv:
ind = sys.argv.index("-WD")
dir_path = sys.argv[ind + 1]
infile = dir_path + '/' + infile
if sitefile != "":
sitefile = dir_path + '/' + sitefile
specout = dir_path + '/' + specout
instout = dir_path + '/' + instout
# now do re-ordering
pmag.ParseMeasFile(infile, sitefile, instout, specout) | NAME
parse_measurements.py
DESCRIPTION
takes measurments file and creates specimen and instrument files
SYNTAX
parse_measurements.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE magic_measurements input file, default is "magic_measurements.txt"
-fsi FILE er_sites input file, default is none
-Fsp FILE specimen output er_specimens format file, default is "er_specimens.txt"
-Fin FILE instrument output magic_instruments format file, default is "magic_instruments.txt"
OUPUT
writes er_specimens and magic_instruments formatted files | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/deprecated/parse_measurements.py#L9-L59 |
PmagPy/PmagPy | programs/conversion_scripts2/utrecht_magic2.py | main | def main(command_line=True, **kwargs):
"""
NAME
utrecht_magic.py
DESCRIPTION
converts Utrecht magnetometer data files to magic_measurements files
SYNTAX
utrecht_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input file, or
-F FILE: specify output file, default is magic_measurements.txt
-Fsa: specify er_samples format file for appending, default is new er_samples.txt (Not working yet)
-WD: output directory for MagIC files
-ncn: Site Naming Convention
Site to Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2: default] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
-spc: number of characters to remove to generate sample names from specimen names
-dmy: European date format
-loc LOCNAME : specify location/study name
-lat latitude of samples
-lon longitude of samples
-A: don't average replicate measurements
-mcd: [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented
-dc: B PHI THETA: dc lab field (in microTesla), phi,and theta must be input as a tuple "(DC,PHI,THETA)". If not input user will be asked for values, this is advantagious if there are differing dc fields between steps or specimens. Note: this currently only works with the decimal IZZI naming convetion (XXX.0,1,2,3 where XXX is the treatment temperature and 0 is a zero field step, 1 is in field, and 2 is a pTRM check, 3 is a tail check). All other steps are hardcoded dc_field = 0.
INPUT
Utrecht magnetometer data file
"""
# initialize some stuff
sample_lat = 0.0
sample_lon = 0.0
noave = 0
er_location_name = "unknown"
args = sys.argv
meth_code = "LP-NO"
version_num = pmag.get_version()
site_num = 1
mag_file = ""
dir_path = '.'
MagRecs = []
SpecOuts = []
SampOuts = []
SiteOuts = []
meas_file='magic_measurements.txt'
spec_file='er_specimens.txt'
samp_file='er_samples.txt'
site_file='er_sites.txt'
meth_code = ""
#
# get command line arguments
#
if command_line:
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind+1]
if '-ID' in sys.argv:
ind = sys.argv.index('-ID')
input_dir_path = sys.argv[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
if "-h" in args:
print(main.__doc__)
return False
if '-F' in args:
ind = args.index("-F")
meas_file = args[ind+1]
if '-Fsp' in args:
ind=args.index("-Fsp")
spec_file=args[ind+1]
if '-Fsa' in args:
ind = args.index("-Fsa")
samp_file = args[ind+1]
if '-Fsi' in args: # LORI addition
ind=args.index("-Fsi")
site_file=args[ind+1]
#try:
# open(samp_file,'r')
# ErSamps,file_type=pmag.magic_read(samp_file)
# print 'sample information will be appended to ', samp_file
#except:
# print samp_file,' not found: sample information will be stored in new er_samples.txt file'
# samp_file = output_dir_path+'/er_samples.txt'
if '-f' in args:
ind = args.index("-f")
mag_file = args[ind+1]
if "-loc" in args:
ind = args.index("-loc")
er_location_name = args[ind+1]
if "-lat" in args:
ind = args.index("-lat")
site_lat = args[ind+1]
if "-lon" in args:
ind = args.index("-lon")
site_lon = args[ind+1]
if "-A" in args:
noave = 1
if "-mcd" in args:
ind = args.index("-mcd")
meth_code = args[ind+1]
#samp_con='5'
if "-ncn" in args:
ind=args.index("-ncn")
samp_con=sys.argv[ind+1]
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
return False, "naming convention option [4] must be in form 4-Z where Z is an integer"
else:
site_num=samp_con.split("-")[1]
samp_con="4"
elif "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
return False, "naming convention option [7] must be in form 7-Z where Z is an integer"
else:
site_num=samp_con.split("-")[1]
samp_con="7"
else: samp_con="1"
if '-dc' in args:
ind=args.index('-dc')
DC_FIELD,DC_PHI,DC_THETA=list(map(float,args[ind+1].strip('( ) [ ]').split(',')))
DC_FIELD *= 1e-6
yn=''
GET_DC_PARAMS=False
else: DC_FIELD,DC_PHI,DC_THETA=0,0,-90
if '-spc' in args:
ind=args.index("-spc")
specnum=-int(args[ind+1])
else: specnum = 0
if '-dmy' in args:
ind=args.index("-dmy")
dmy_flag=True
else: dmy_flag=False
if not command_line:
dir_path = kwargs.get('dir_path', '.')
input_dir_path = kwargs.get('input_dir_path', dir_path)
output_dir_path = dir_path
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
mag_file = kwargs.get('mag_file')
spec_file = kwargs.get('spec_file', 'er_specimens.txt') # specimen outfile
samp_file = kwargs.get('samp_file', 'er_samples.txt')
site_file = kwargs.get('site_file', 'er_sites.txt') # site outfile
er_location_name = kwargs.get('location_name', '')
site_lat = kwargs.get('site_lat', '')
site_lon = kwargs.get('site_lon', '')
#oave = kwargs.get('noave', 0) # default (0) means DO average
meth_code = kwargs.get('meth_code', "LP-NO")
specnum = -int(kwargs.get('specnum', 0))
samp_con = kwargs.get('samp_con', '2')
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
return False, "naming convention option [4] must be in form 4-Z where Z is an integer"
else:
site_num=samp_con.split("-")[1]
samp_con="4"
elif "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
return False, "naming convention option [7] must be in form 7-Z where Z is an integer"
else:
site_num=samp_con.split("-")[1]
samp_con="7"
DC_FIELD,DC_PHI,DC_THETA = list(map(float, kwargs.get('dc_params', (0,0,-90))))
DC_FIELD *= 1e-6
noave = kwargs.get('avg', True)
dmy_flag = kwargs.get('dmy_flag', False)
# format variables
if not mag_file:
return False, 'You must provide a Utrecht formated file'
mag_file = os.path.join(input_dir_path, mag_file)
meas_file = os.path.join(output_dir_path, meas_file)
spec_file = os.path.join(output_dir_path, spec_file)
samp_file = os.path.join(output_dir_path, samp_file)
site_file = os.path.join(output_dir_path, site_file)
# parse data
# Open up the Utrecht file and read the header information
print('mag_file in utrecht_file', mag_file)
AF_or_T = mag_file.split('.')[-1]
data = open(mag_file, 'r')
line = data.readline()
line_items = line.split(',')
operator=line_items[0]
operator=operator.replace("\"","")
machine=line_items[1]
machine=machine.replace("\"","")
machine=machine.rstrip('\n')
print("operator=", operator)
print("machine=", machine)
#read in measurement data
line = data.readline()
while line != "END" and line != '"END"':
ErSpecRec,ErSampRec,ErSiteRec = {},{},{}
line_items = line.split(',')
spec_name=line_items[0]
spec_name=spec_name.replace("\"","")
print("spec_name=", spec_name)
free_string=line_items[1]
free_string=free_string.replace("\"","")
print("free_string=", free_string)
dec=line_items[2]
print("dec=", dec)
inc=line_items[3]
print("inc=", inc)
volume=float(line_items[4])
volume=volume * 1e-6 # enter volume in cm^3, convert to m^3
print("volume=", volume)
bed_plane=line_items[5]
print("bed_plane=", bed_plane)
bed_tilt=line_items[6]
print("bed_tilt=", bed_tilt)
# Configure et er_ tables
ErSpecRec['er_specimen_name'] = spec_name
if specnum==0: sample_name = spec_name
else: sample_name = spec_name[:specnum]
ErSampRec['er_sample_name'] = sample_name
ErSpecRec['er_sample_name'] = sample_name
er_site_name = pmag.parse_site(sample_name,samp_con,site_num)
ErSpecRec['er_site_name']=er_site_name
ErSpecRec['er_location_name']=er_location_name
ErSampRec['sample_azimuth'] = dec
ErSampRec['sample_dip'] = str(float(inc)-90)
ErSampRec['sample_bed_dip_direction'] = bed_plane
ErSampRec['sample_bed_tilt'] = bed_tilt
ErSiteRec['site_lat'] = site_lat
ErSiteRec['site_lon'] = site_lon
ErSpecRec['magic_method_codes'] = meth_code
ErSampRec['er_location_name'] = er_location_name
ErSiteRec['er_location_name'] = er_location_name
ErSiteRec['er_site_name'] = er_site_name
ErSampRec['er_site_name'] = er_site_name
ErSampRec['er_citation_names'] = 'This study'
SpecOuts.append(ErSpecRec)
SampOuts.append(ErSampRec)
SiteOuts.append(ErSiteRec)
#measurement data
line = data.readline()
line = line.rstrip("\n")
items = line.split(",")
while line != '9999':
print(line)
step=items[0]
step=step.split('.')
step_value=step[0]
step_type = ""
if len(step) == 2:
step_type=step[1]
if step_type=='5':
step_value = items[0]
A=float(items[1])
B=float(items[2])
C=float(items[3])
# convert to MagIC coordinates
Z=-A
X=-B
Y=C
cart = np.array([X, Y, Z]).transpose()
direction = pmag.cart2dir(cart).transpose()
measurement_dec = direction[0]
measurement_inc = direction[1]
measurement_magn_moment = direction[2] * 1.0e-12 # the data are in pico-Am^2 - this converts to Am^2
measurement_magn_volume = direction[2] * 1.0e-12 / volume # data volume normalized - converted to A/m
print("measurement_magn_moment=", measurement_magn_moment)
print("measurement_magn_volume=", measurement_magn_volume)
error = items[4]
date=items[5]
date=date.strip('"')
if date.count("-") > 0:
date=date.split("-")
elif date.count("/") > 0:
date=date.split("/")
else: print("date format seperator cannot be identified")
print(date)
time=items[6]
time=time.strip('"')
time=time.split(":")
print(time)
if dmy_flag:
date_time = date[1] + ":" + date[0] + ":" + date[2] + ":" + time[0] + ":" + time[1] + ":" + "0.0"
else:
date_time = date[0] + ":" + date[1] + ":" + date[2] + ":" + time[0] + ":" + time[1] + ":" + "0.0"
print(date_time)
MagRec = {}
MagRec["er_analyst_mail_names"] = operator
MagRec["magic_instrument_codes"] = "Utrecht_" + machine
MagRec["measurement_description"] = "free string = " + free_string
MagRec["measurement_date"] = date_time
MagRec["er_citation_names"] = "This study"
MagRec['er_location_name'] = er_location_name
MagRec['er_site_name'] = er_site_name
MagRec['er_sample_name'] = sample_name
MagRec['magic_software_packages'] = version_num
MagRec["measurement_temp"] = '%8.3e' % (273) # room temp in kelvin
MagRec["measurement_flag"] = 'g'
MagRec["measurement_standard"] = 'u'
MagRec["magic_experiment_name"] = er_location_name + er_site_name + spec_name
MagRec["measurement_number"] = er_location_name + er_site_name + spec_name + items[0]
MagRec["er_specimen_name"] = spec_name
# MagRec["treatment_ac_field"] = '0'
if AF_or_T.lower() == "th":
MagRec["treatment_temp"] = '%8.3e' % (float(step_value)+273.) # temp in kelvin
MagRec['treatment_ac_field']='0'
meas_type = "LP-DIR-T:LT-T-Z"
else:
MagRec['treatment_temp']='273'
MagRec['treatment_ac_field']='%10.3e'%(float(step_value)*1e-3)
meas_type = "LP-DIR-AF:LT-AF-Z"
MagRec['treatment_dc_field']='0'
if step_value == '0':
meas_type = "LT-NO"
print("step_type=", step_type)
if step_type == '0' and AF_or_T.lower() == 'th':
if meas_type == "":
meas_type = "LT-T-Z"
else:
meas_type = meas_type + ":" + "LT-T-Z"
elif step_type == '1':
if meas_type == "":
meas_type = "LT-T-I"
else:
meas_type = meas_type + ":" + "LT-T-I"
MagRec['treatment_dc_field']='%1.2e'%DC_FIELD
elif step_type == '2':
if meas_type == "":
meas_type = "LT-PTRM-I"
else:
meas_type = meas_type + ":" + "LT-PTRM-I"
MagRec['treatment_dc_field']='%1.2e'%DC_FIELD
elif step_type == '3':
if meas_type == "" :
meas_type = "LT-PTRM-Z"
else:
meas_type = meas_type + ":" + "LT-PTRM-Z"
print("meas_type=", meas_type)
MagRec['treatment_dc_field_phi'] = '%1.2f'%DC_PHI
MagRec['treatment_dc_field_theta'] = '%1.2f'%DC_THETA
MagRec['magic_method_codes'] = meas_type
MagRec["measurement_magn_moment"] = measurement_magn_moment
MagRec["measurement_magn_volume"] = measurement_magn_volume
MagRec["measurement_dec"] = measurement_dec
MagRec["measurement_inc"] = measurement_inc
MagRec['measurement_csd'] = error
# MagRec['measurement_positions'] = '1'
MagRecs.append(MagRec)
line = data.readline()
line = line.rstrip("\n")
items = line.split(",")
line = data.readline()
line = line.rstrip("\n")
items = line.split(",")
# write out the data to MagIC data files
pmag.magic_write(spec_file, SpecOuts, 'er_specimens')
pmag.magic_write(samp_file, SampOuts, 'er_samples')
pmag.magic_write(site_file, SiteOuts, 'er_sites')
# MagOuts = pmag.measurements_methods(MagRecs, noave)
# pmag.magic_write(meas_file, MagOuts, 'magic_measurements')
pmag.magic_write(meas_file, MagRecs, 'magic_measurements')
print("results put in ", meas_file)
print("exit!")
return True, meas_file | python | def main(command_line=True, **kwargs):
"""
NAME
utrecht_magic.py
DESCRIPTION
converts Utrecht magnetometer data files to magic_measurements files
SYNTAX
utrecht_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input file, or
-F FILE: specify output file, default is magic_measurements.txt
-Fsa: specify er_samples format file for appending, default is new er_samples.txt (Not working yet)
-WD: output directory for MagIC files
-ncn: Site Naming Convention
Site to Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2: default] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
-spc: number of characters to remove to generate sample names from specimen names
-dmy: European date format
-loc LOCNAME : specify location/study name
-lat latitude of samples
-lon longitude of samples
-A: don't average replicate measurements
-mcd: [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented
-dc: B PHI THETA: dc lab field (in microTesla), phi,and theta must be input as a tuple "(DC,PHI,THETA)". If not input user will be asked for values, this is advantagious if there are differing dc fields between steps or specimens. Note: this currently only works with the decimal IZZI naming convetion (XXX.0,1,2,3 where XXX is the treatment temperature and 0 is a zero field step, 1 is in field, and 2 is a pTRM check, 3 is a tail check). All other steps are hardcoded dc_field = 0.
INPUT
Utrecht magnetometer data file
"""
# initialize some stuff
sample_lat = 0.0
sample_lon = 0.0
noave = 0
er_location_name = "unknown"
args = sys.argv
meth_code = "LP-NO"
version_num = pmag.get_version()
site_num = 1
mag_file = ""
dir_path = '.'
MagRecs = []
SpecOuts = []
SampOuts = []
SiteOuts = []
meas_file='magic_measurements.txt'
spec_file='er_specimens.txt'
samp_file='er_samples.txt'
site_file='er_sites.txt'
meth_code = ""
#
# get command line arguments
#
if command_line:
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind+1]
if '-ID' in sys.argv:
ind = sys.argv.index('-ID')
input_dir_path = sys.argv[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
if "-h" in args:
print(main.__doc__)
return False
if '-F' in args:
ind = args.index("-F")
meas_file = args[ind+1]
if '-Fsp' in args:
ind=args.index("-Fsp")
spec_file=args[ind+1]
if '-Fsa' in args:
ind = args.index("-Fsa")
samp_file = args[ind+1]
if '-Fsi' in args: # LORI addition
ind=args.index("-Fsi")
site_file=args[ind+1]
#try:
# open(samp_file,'r')
# ErSamps,file_type=pmag.magic_read(samp_file)
# print 'sample information will be appended to ', samp_file
#except:
# print samp_file,' not found: sample information will be stored in new er_samples.txt file'
# samp_file = output_dir_path+'/er_samples.txt'
if '-f' in args:
ind = args.index("-f")
mag_file = args[ind+1]
if "-loc" in args:
ind = args.index("-loc")
er_location_name = args[ind+1]
if "-lat" in args:
ind = args.index("-lat")
site_lat = args[ind+1]
if "-lon" in args:
ind = args.index("-lon")
site_lon = args[ind+1]
if "-A" in args:
noave = 1
if "-mcd" in args:
ind = args.index("-mcd")
meth_code = args[ind+1]
#samp_con='5'
if "-ncn" in args:
ind=args.index("-ncn")
samp_con=sys.argv[ind+1]
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
return False, "naming convention option [4] must be in form 4-Z where Z is an integer"
else:
site_num=samp_con.split("-")[1]
samp_con="4"
elif "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
return False, "naming convention option [7] must be in form 7-Z where Z is an integer"
else:
site_num=samp_con.split("-")[1]
samp_con="7"
else: samp_con="1"
if '-dc' in args:
ind=args.index('-dc')
DC_FIELD,DC_PHI,DC_THETA=list(map(float,args[ind+1].strip('( ) [ ]').split(',')))
DC_FIELD *= 1e-6
yn=''
GET_DC_PARAMS=False
else: DC_FIELD,DC_PHI,DC_THETA=0,0,-90
if '-spc' in args:
ind=args.index("-spc")
specnum=-int(args[ind+1])
else: specnum = 0
if '-dmy' in args:
ind=args.index("-dmy")
dmy_flag=True
else: dmy_flag=False
if not command_line:
dir_path = kwargs.get('dir_path', '.')
input_dir_path = kwargs.get('input_dir_path', dir_path)
output_dir_path = dir_path
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
mag_file = kwargs.get('mag_file')
spec_file = kwargs.get('spec_file', 'er_specimens.txt') # specimen outfile
samp_file = kwargs.get('samp_file', 'er_samples.txt')
site_file = kwargs.get('site_file', 'er_sites.txt') # site outfile
er_location_name = kwargs.get('location_name', '')
site_lat = kwargs.get('site_lat', '')
site_lon = kwargs.get('site_lon', '')
#oave = kwargs.get('noave', 0) # default (0) means DO average
meth_code = kwargs.get('meth_code', "LP-NO")
specnum = -int(kwargs.get('specnum', 0))
samp_con = kwargs.get('samp_con', '2')
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
return False, "naming convention option [4] must be in form 4-Z where Z is an integer"
else:
site_num=samp_con.split("-")[1]
samp_con="4"
elif "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
return False, "naming convention option [7] must be in form 7-Z where Z is an integer"
else:
site_num=samp_con.split("-")[1]
samp_con="7"
DC_FIELD,DC_PHI,DC_THETA = list(map(float, kwargs.get('dc_params', (0,0,-90))))
DC_FIELD *= 1e-6
noave = kwargs.get('avg', True)
dmy_flag = kwargs.get('dmy_flag', False)
# format variables
if not mag_file:
return False, 'You must provide a Utrecht formated file'
mag_file = os.path.join(input_dir_path, mag_file)
meas_file = os.path.join(output_dir_path, meas_file)
spec_file = os.path.join(output_dir_path, spec_file)
samp_file = os.path.join(output_dir_path, samp_file)
site_file = os.path.join(output_dir_path, site_file)
# parse data
# Open up the Utrecht file and read the header information
print('mag_file in utrecht_file', mag_file)
AF_or_T = mag_file.split('.')[-1]
data = open(mag_file, 'r')
line = data.readline()
line_items = line.split(',')
operator=line_items[0]
operator=operator.replace("\"","")
machine=line_items[1]
machine=machine.replace("\"","")
machine=machine.rstrip('\n')
print("operator=", operator)
print("machine=", machine)
#read in measurement data
line = data.readline()
while line != "END" and line != '"END"':
ErSpecRec,ErSampRec,ErSiteRec = {},{},{}
line_items = line.split(',')
spec_name=line_items[0]
spec_name=spec_name.replace("\"","")
print("spec_name=", spec_name)
free_string=line_items[1]
free_string=free_string.replace("\"","")
print("free_string=", free_string)
dec=line_items[2]
print("dec=", dec)
inc=line_items[3]
print("inc=", inc)
volume=float(line_items[4])
volume=volume * 1e-6 # enter volume in cm^3, convert to m^3
print("volume=", volume)
bed_plane=line_items[5]
print("bed_plane=", bed_plane)
bed_tilt=line_items[6]
print("bed_tilt=", bed_tilt)
# Configure et er_ tables
ErSpecRec['er_specimen_name'] = spec_name
if specnum==0: sample_name = spec_name
else: sample_name = spec_name[:specnum]
ErSampRec['er_sample_name'] = sample_name
ErSpecRec['er_sample_name'] = sample_name
er_site_name = pmag.parse_site(sample_name,samp_con,site_num)
ErSpecRec['er_site_name']=er_site_name
ErSpecRec['er_location_name']=er_location_name
ErSampRec['sample_azimuth'] = dec
ErSampRec['sample_dip'] = str(float(inc)-90)
ErSampRec['sample_bed_dip_direction'] = bed_plane
ErSampRec['sample_bed_tilt'] = bed_tilt
ErSiteRec['site_lat'] = site_lat
ErSiteRec['site_lon'] = site_lon
ErSpecRec['magic_method_codes'] = meth_code
ErSampRec['er_location_name'] = er_location_name
ErSiteRec['er_location_name'] = er_location_name
ErSiteRec['er_site_name'] = er_site_name
ErSampRec['er_site_name'] = er_site_name
ErSampRec['er_citation_names'] = 'This study'
SpecOuts.append(ErSpecRec)
SampOuts.append(ErSampRec)
SiteOuts.append(ErSiteRec)
#measurement data
line = data.readline()
line = line.rstrip("\n")
items = line.split(",")
while line != '9999':
print(line)
step=items[0]
step=step.split('.')
step_value=step[0]
step_type = ""
if len(step) == 2:
step_type=step[1]
if step_type=='5':
step_value = items[0]
A=float(items[1])
B=float(items[2])
C=float(items[3])
# convert to MagIC coordinates
Z=-A
X=-B
Y=C
cart = np.array([X, Y, Z]).transpose()
direction = pmag.cart2dir(cart).transpose()
measurement_dec = direction[0]
measurement_inc = direction[1]
measurement_magn_moment = direction[2] * 1.0e-12 # the data are in pico-Am^2 - this converts to Am^2
measurement_magn_volume = direction[2] * 1.0e-12 / volume # data volume normalized - converted to A/m
print("measurement_magn_moment=", measurement_magn_moment)
print("measurement_magn_volume=", measurement_magn_volume)
error = items[4]
date=items[5]
date=date.strip('"')
if date.count("-") > 0:
date=date.split("-")
elif date.count("/") > 0:
date=date.split("/")
else: print("date format seperator cannot be identified")
print(date)
time=items[6]
time=time.strip('"')
time=time.split(":")
print(time)
if dmy_flag:
date_time = date[1] + ":" + date[0] + ":" + date[2] + ":" + time[0] + ":" + time[1] + ":" + "0.0"
else:
date_time = date[0] + ":" + date[1] + ":" + date[2] + ":" + time[0] + ":" + time[1] + ":" + "0.0"
print(date_time)
MagRec = {}
MagRec["er_analyst_mail_names"] = operator
MagRec["magic_instrument_codes"] = "Utrecht_" + machine
MagRec["measurement_description"] = "free string = " + free_string
MagRec["measurement_date"] = date_time
MagRec["er_citation_names"] = "This study"
MagRec['er_location_name'] = er_location_name
MagRec['er_site_name'] = er_site_name
MagRec['er_sample_name'] = sample_name
MagRec['magic_software_packages'] = version_num
MagRec["measurement_temp"] = '%8.3e' % (273) # room temp in kelvin
MagRec["measurement_flag"] = 'g'
MagRec["measurement_standard"] = 'u'
MagRec["magic_experiment_name"] = er_location_name + er_site_name + spec_name
MagRec["measurement_number"] = er_location_name + er_site_name + spec_name + items[0]
MagRec["er_specimen_name"] = spec_name
# MagRec["treatment_ac_field"] = '0'
if AF_or_T.lower() == "th":
MagRec["treatment_temp"] = '%8.3e' % (float(step_value)+273.) # temp in kelvin
MagRec['treatment_ac_field']='0'
meas_type = "LP-DIR-T:LT-T-Z"
else:
MagRec['treatment_temp']='273'
MagRec['treatment_ac_field']='%10.3e'%(float(step_value)*1e-3)
meas_type = "LP-DIR-AF:LT-AF-Z"
MagRec['treatment_dc_field']='0'
if step_value == '0':
meas_type = "LT-NO"
print("step_type=", step_type)
if step_type == '0' and AF_or_T.lower() == 'th':
if meas_type == "":
meas_type = "LT-T-Z"
else:
meas_type = meas_type + ":" + "LT-T-Z"
elif step_type == '1':
if meas_type == "":
meas_type = "LT-T-I"
else:
meas_type = meas_type + ":" + "LT-T-I"
MagRec['treatment_dc_field']='%1.2e'%DC_FIELD
elif step_type == '2':
if meas_type == "":
meas_type = "LT-PTRM-I"
else:
meas_type = meas_type + ":" + "LT-PTRM-I"
MagRec['treatment_dc_field']='%1.2e'%DC_FIELD
elif step_type == '3':
if meas_type == "" :
meas_type = "LT-PTRM-Z"
else:
meas_type = meas_type + ":" + "LT-PTRM-Z"
print("meas_type=", meas_type)
MagRec['treatment_dc_field_phi'] = '%1.2f'%DC_PHI
MagRec['treatment_dc_field_theta'] = '%1.2f'%DC_THETA
MagRec['magic_method_codes'] = meas_type
MagRec["measurement_magn_moment"] = measurement_magn_moment
MagRec["measurement_magn_volume"] = measurement_magn_volume
MagRec["measurement_dec"] = measurement_dec
MagRec["measurement_inc"] = measurement_inc
MagRec['measurement_csd'] = error
# MagRec['measurement_positions'] = '1'
MagRecs.append(MagRec)
line = data.readline()
line = line.rstrip("\n")
items = line.split(",")
line = data.readline()
line = line.rstrip("\n")
items = line.split(",")
# write out the data to MagIC data files
pmag.magic_write(spec_file, SpecOuts, 'er_specimens')
pmag.magic_write(samp_file, SampOuts, 'er_samples')
pmag.magic_write(site_file, SiteOuts, 'er_sites')
# MagOuts = pmag.measurements_methods(MagRecs, noave)
# pmag.magic_write(meas_file, MagOuts, 'magic_measurements')
pmag.magic_write(meas_file, MagRecs, 'magic_measurements')
print("results put in ", meas_file)
print("exit!")
return True, meas_file | NAME
utrecht_magic.py
DESCRIPTION
converts Utrecht magnetometer data files to magic_measurements files
SYNTAX
utrecht_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input file, or
-F FILE: specify output file, default is magic_measurements.txt
-Fsa: specify er_samples format file for appending, default is new er_samples.txt (Not working yet)
-WD: output directory for MagIC files
-ncn: Site Naming Convention
Site to Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2: default] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
-spc: number of characters to remove to generate sample names from specimen names
-dmy: European date format
-loc LOCNAME : specify location/study name
-lat latitude of samples
-lon longitude of samples
-A: don't average replicate measurements
-mcd: [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented
-dc: B PHI THETA: dc lab field (in microTesla), phi,and theta must be input as a tuple "(DC,PHI,THETA)". If not input user will be asked for values, this is advantagious if there are differing dc fields between steps or specimens. Note: this currently only works with the decimal IZZI naming convetion (XXX.0,1,2,3 where XXX is the treatment temperature and 0 is a zero field step, 1 is in field, and 2 is a pTRM check, 3 is a tail check). All other steps are hardcoded dc_field = 0.
INPUT
Utrecht magnetometer data file | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/conversion_scripts2/utrecht_magic2.py#L12-L397 |
PmagPy/PmagPy | dialogs/pmag_gui_dialogs2.py | convert_HUJI_files_to_MagIC.on_okButton | def on_okButton(self, event):
"""
grab user input values, format them, and run huji_magic.py with the appropriate flags
"""
os.chdir(self.WD)
options = {}
HUJI_file = self.bSizer0.return_value()
if not HUJI_file:
pw.simple_warning("You must select a HUJI format file")
return False
options['magfile'] = HUJI_file
magicoutfile=os.path.split(HUJI_file)[1]+".magic"
outfile=os.path.join(self.WD, magicoutfile)
options['meas_file'] = outfile
user = self.bSizer1.return_value()
options['user'] = user
if user:
user = '-usr ' + user
experiment_type = self.bSizer2.return_value()
options['codelist'] = experiment_type
if not experiment_type:
pw.simple_warning("You must select an experiment type")
return False
cooling_rate = self.cooling_rate.GetValue() or 0
if cooling_rate:
experiment_type = experiment_type + " " + cooling_rate
lab_field = self.bSizer3.return_value()
if not lab_field:
lab_field = "0 0 0"
lab_field_list = lab_field.split()
options['labfield'] = lab_field_list[0]
options['phi'] = lab_field_list[1]
options['theta'] = lab_field_list[2]
lab_field = '-dc ' + lab_field
spc = self.bSizer4.return_value()
options['specnum'] = spc or 0
if not spc:
spc = '-spc 0'
else:
spc = '-spc ' + spc
ncn = self.bSizer5.return_value()
options['samp_con'] = ncn
loc_name = self.bSizer6.return_value()
options['er_location_name'] = loc_name
if loc_name:
loc_name = '-loc ' + loc_name
peak_AF = self.bSizer7.return_value()
options['peakfield'] = peak_AF
replicate = self.bSizer8.return_value()
if replicate:
options['noave'] = 0
replicate = ''
else:
options['noave'] = 1
replicate = '-A'
old_format= self.bSizer0a.return_value()
if old_format:
COMMAND = "huji_magic.py -f {} -F {} {} -LP {} {} -ncn {} {} {} {} {}".format(HUJI_file, outfile, user, experiment_type, loc_name, ncn, lab_field, spc, peak_AF, replicate)
program_ran, error_message = huji_magic.main(False, **options)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message)
else: # new format
COMMAND = "huji_magic_new.py -f {} -F {} {} -LP {} {} -ncn {} {} {} {}".format(HUJI_file, outfile, user, experiment_type, loc_name, ncn, lab_field, spc, peak_AF)
program_ran, error_message = huji_magic_new.main(False, **options)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message) | python | def on_okButton(self, event):
"""
grab user input values, format them, and run huji_magic.py with the appropriate flags
"""
os.chdir(self.WD)
options = {}
HUJI_file = self.bSizer0.return_value()
if not HUJI_file:
pw.simple_warning("You must select a HUJI format file")
return False
options['magfile'] = HUJI_file
magicoutfile=os.path.split(HUJI_file)[1]+".magic"
outfile=os.path.join(self.WD, magicoutfile)
options['meas_file'] = outfile
user = self.bSizer1.return_value()
options['user'] = user
if user:
user = '-usr ' + user
experiment_type = self.bSizer2.return_value()
options['codelist'] = experiment_type
if not experiment_type:
pw.simple_warning("You must select an experiment type")
return False
cooling_rate = self.cooling_rate.GetValue() or 0
if cooling_rate:
experiment_type = experiment_type + " " + cooling_rate
lab_field = self.bSizer3.return_value()
if not lab_field:
lab_field = "0 0 0"
lab_field_list = lab_field.split()
options['labfield'] = lab_field_list[0]
options['phi'] = lab_field_list[1]
options['theta'] = lab_field_list[2]
lab_field = '-dc ' + lab_field
spc = self.bSizer4.return_value()
options['specnum'] = spc or 0
if not spc:
spc = '-spc 0'
else:
spc = '-spc ' + spc
ncn = self.bSizer5.return_value()
options['samp_con'] = ncn
loc_name = self.bSizer6.return_value()
options['er_location_name'] = loc_name
if loc_name:
loc_name = '-loc ' + loc_name
peak_AF = self.bSizer7.return_value()
options['peakfield'] = peak_AF
replicate = self.bSizer8.return_value()
if replicate:
options['noave'] = 0
replicate = ''
else:
options['noave'] = 1
replicate = '-A'
old_format= self.bSizer0a.return_value()
if old_format:
COMMAND = "huji_magic.py -f {} -F {} {} -LP {} {} -ncn {} {} {} {} {}".format(HUJI_file, outfile, user, experiment_type, loc_name, ncn, lab_field, spc, peak_AF, replicate)
program_ran, error_message = huji_magic.main(False, **options)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message)
else: # new format
COMMAND = "huji_magic_new.py -f {} -F {} {} -LP {} {} -ncn {} {} {} {}".format(HUJI_file, outfile, user, experiment_type, loc_name, ncn, lab_field, spc, peak_AF)
program_ran, error_message = huji_magic_new.main(False, **options)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message) | grab user input values, format them, and run huji_magic.py with the appropriate flags | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_gui_dialogs2.py#L1158-L1231 |
PmagPy/PmagPy | dialogs/pmag_gui_dialogs2.py | convert_Utrecht_files_to_MagIC.on_okButton | def on_okButton(self, event):
"""
Complies information input in GUI into a kwargs dictionary which can
be passed into the utrecht_magic script and run to output magic files
"""
os.chdir(self.WD)
options_dict = {}
wd = self.WD
options_dict['dir_path'] = wd
full_file = self.bSizer0.return_value()
if not full_file:
pw.simple_warning('You must provide a Utrecht format file')
return False
input_directory, Utrecht_file = os.path.split(full_file)
options_dict['mag_file'] = Utrecht_file
options_dict['input_dir_path'] = input_directory
if input_directory:
ID = "-ID " + input_directory
else:
ID = ''
outfile = Utrecht_file + ".magic"
options_dict['meas_file'] = outfile
samp_outfile = Utrecht_file[:Utrecht_file.find('.')] + "_er_samples.txt"
options_dict['samp_file'] = samp_outfile
spec_outfile = Utrecht_file[:Utrecht_file.find('.')] + "_er_specimens.txt"
options_dict['spec_file'] = spec_outfile
site_outfile = Utrecht_file[:Utrecht_file.find('.')] + "_er_sites.txt"
options_dict['site_file'] = site_outfile
dc_flag,dc_params = '',''
if self.bSizer6.return_value() != '':
dc_params = list(map(float,self.bSizer6.return_value().split()))
options_dict['dc_params'] = dc_params
dc_flag = '-dc'
spec_num = self.bSizer3.return_value()
options_dict['specnum'] = spec_num
if spec_num:
spec_num = "-spc " + str(spec_num)
else:
spec_num = "-spc 0" # defaults to 0 if user doesn't choose number
loc_name = self.bSizer4.return_value()
options_dict['location_name'] = loc_name
if loc_name:
loc_name = "-loc " + loc_name
ncn = self.bSizer2.return_value()
options_dict['samp_con'] = ncn
particulars = self.bSizer1.return_value()
options_dict['meth_code'] = particulars
if particulars:
particulars = "-mcd " + particulars
euro_date = self.bSizer7.return_value()
if euro_date: options_dict['dmy_flag'] = True; dmy_flag='-dmy'
else: options_dict['dmy_flag'] = False; dmy_flag=''
try: site_lat,site_lon = self.bSizer8.return_value().split()
except ValueError: site_lat,site_lon = '',''
options_dict['site_lat'] = site_lat
options_dict['site_lon'] = site_lon
replicate = self.bSizer5.return_value()
if replicate:
options_dict['avg'] = False
replicate = ''
else:
options_dict['avg'] = True
replicate = '-A'
COMMAND = "cit_magic.py -WD {} -f {} -F {} {} {} {} -ncn {} {} -Fsp {} -Fsi {} -Fsa {} {} {} {} {} -lat {} -lon {}".format(wd, Utrecht_file, outfile, particulars, spec_num, loc_name, ncn, ID, spec_outfile, site_outfile, samp_outfile, replicate, dc_flag, dc_params, dmy_flag, site_lon, site_lat)
# to run as module:
program_ran, error_message = utrecht_magic.main(command_line=False, **options_dict)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message) | python | def on_okButton(self, event):
"""
Complies information input in GUI into a kwargs dictionary which can
be passed into the utrecht_magic script and run to output magic files
"""
os.chdir(self.WD)
options_dict = {}
wd = self.WD
options_dict['dir_path'] = wd
full_file = self.bSizer0.return_value()
if not full_file:
pw.simple_warning('You must provide a Utrecht format file')
return False
input_directory, Utrecht_file = os.path.split(full_file)
options_dict['mag_file'] = Utrecht_file
options_dict['input_dir_path'] = input_directory
if input_directory:
ID = "-ID " + input_directory
else:
ID = ''
outfile = Utrecht_file + ".magic"
options_dict['meas_file'] = outfile
samp_outfile = Utrecht_file[:Utrecht_file.find('.')] + "_er_samples.txt"
options_dict['samp_file'] = samp_outfile
spec_outfile = Utrecht_file[:Utrecht_file.find('.')] + "_er_specimens.txt"
options_dict['spec_file'] = spec_outfile
site_outfile = Utrecht_file[:Utrecht_file.find('.')] + "_er_sites.txt"
options_dict['site_file'] = site_outfile
dc_flag,dc_params = '',''
if self.bSizer6.return_value() != '':
dc_params = list(map(float,self.bSizer6.return_value().split()))
options_dict['dc_params'] = dc_params
dc_flag = '-dc'
spec_num = self.bSizer3.return_value()
options_dict['specnum'] = spec_num
if spec_num:
spec_num = "-spc " + str(spec_num)
else:
spec_num = "-spc 0" # defaults to 0 if user doesn't choose number
loc_name = self.bSizer4.return_value()
options_dict['location_name'] = loc_name
if loc_name:
loc_name = "-loc " + loc_name
ncn = self.bSizer2.return_value()
options_dict['samp_con'] = ncn
particulars = self.bSizer1.return_value()
options_dict['meth_code'] = particulars
if particulars:
particulars = "-mcd " + particulars
euro_date = self.bSizer7.return_value()
if euro_date: options_dict['dmy_flag'] = True; dmy_flag='-dmy'
else: options_dict['dmy_flag'] = False; dmy_flag=''
try: site_lat,site_lon = self.bSizer8.return_value().split()
except ValueError: site_lat,site_lon = '',''
options_dict['site_lat'] = site_lat
options_dict['site_lon'] = site_lon
replicate = self.bSizer5.return_value()
if replicate:
options_dict['avg'] = False
replicate = ''
else:
options_dict['avg'] = True
replicate = '-A'
COMMAND = "cit_magic.py -WD {} -f {} -F {} {} {} {} -ncn {} {} -Fsp {} -Fsi {} -Fsa {} {} {} {} {} -lat {} -lon {}".format(wd, Utrecht_file, outfile, particulars, spec_num, loc_name, ncn, ID, spec_outfile, site_outfile, samp_outfile, replicate, dc_flag, dc_params, dmy_flag, site_lon, site_lat)
# to run as module:
program_ran, error_message = utrecht_magic.main(command_line=False, **options_dict)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message) | Complies information input in GUI into a kwargs dictionary which can
be passed into the utrecht_magic script and run to output magic files | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_gui_dialogs2.py#L2275-L2345 |
PmagPy/PmagPy | dialogs/pmag_gui_dialogs2.py | OrientFrameGrid3.on_m_calc_orient | def on_m_calc_orient(self,event):
'''
This fucntion does exactly what the 'import orientation' fuction does in MagIC.py
after some dialog boxes the function calls orientation_magic.py
'''
# first see if demag_orient.txt
self.on_m_save_file(None)
orient_convention_dia = orient_convention(None)
orient_convention_dia.Center()
#orient_convention_dia.ShowModal()
if orient_convention_dia.ShowModal() == wx.ID_OK:
ocn_flag = orient_convention_dia.ocn_flag
dcn_flag = orient_convention_dia.dcn_flag
gmt_flags = orient_convention_dia.gmt_flags
orient_convention_dia.Destroy()
else:
return
or_con = orient_convention_dia.ocn
dec_correction_con = int(orient_convention_dia.dcn)
try:
hours_from_gmt = float(orient_convention_dia.gmt)
except:
hours_from_gmt = 0
try:
dec_correction = float(orient_convention_dia.correct_dec)
except:
dec_correction = 0
method_code_dia=method_code_dialog(None)
method_code_dia.Center()
if method_code_dia.ShowModal() == wx.ID_OK:
bedding_codes_flags=method_code_dia.bedding_codes_flags
methodcodes_flags=method_code_dia.methodcodes_flags
method_code_dia.Destroy()
else:
print("-I- Canceling calculation")
return
method_codes = method_code_dia.methodcodes
average_bedding = method_code_dia.average_bedding
bed_correction = method_code_dia.bed_correction
command_args=['orientation_magic.py']
command_args.append("-WD %s"%self.WD)
command_args.append("-Fsa er_samples_orient.txt")
command_args.append("-Fsi er_sites_orient.txt ")
command_args.append("-f %s"%"demag_orient.txt")
command_args.append(ocn_flag)
command_args.append(dcn_flag)
command_args.append(gmt_flags)
command_args.append(bedding_codes_flags)
command_args.append(methodcodes_flags)
commandline = " ".join(command_args)
print("-I- executing command: %s" %commandline)
os.chdir(self.WD)
if os.path.exists(os.path.join(self.WD, 'er_samples.txt')) or os.path.exists(os.path.join(self.WD, 'er_sites.txt')):
append = True
else:
append = False
samp_file = "er_samples.txt"
site_file = "er_sites.txt"
success, error_message = ipmag.orientation_magic(or_con, dec_correction_con, dec_correction,
bed_correction, hours_from_gmt=hours_from_gmt,
method_codes=method_codes, average_bedding=average_bedding,
orient_file='demag_orient.txt', samp_file=samp_file,
site_file=site_file, input_dir_path=self.WD,
output_dir_path=self.WD, append=append, data_model=3)
if not success:
dlg1 = wx.MessageDialog(None,caption="Message:", message="-E- ERROR: Error in running orientation_magic.py\n{}".format(error_message) ,style=wx.OK|wx.ICON_INFORMATION)
dlg1.ShowModal()
dlg1.Destroy()
print("-E- ERROR: Error in running orientation_magic.py")
return
else:
dlg2 = wx.MessageDialog(None,caption="Message:", message="-I- Successfully ran orientation_magic", style=wx.OK|wx.ICON_INFORMATION)
dlg2.ShowModal()
dlg2.Destroy()
self.Parent.Show()
self.Parent.Raise()
self.Destroy()
self.contribution.add_magic_table('samples')
return | python | def on_m_calc_orient(self,event):
'''
This fucntion does exactly what the 'import orientation' fuction does in MagIC.py
after some dialog boxes the function calls orientation_magic.py
'''
# first see if demag_orient.txt
self.on_m_save_file(None)
orient_convention_dia = orient_convention(None)
orient_convention_dia.Center()
#orient_convention_dia.ShowModal()
if orient_convention_dia.ShowModal() == wx.ID_OK:
ocn_flag = orient_convention_dia.ocn_flag
dcn_flag = orient_convention_dia.dcn_flag
gmt_flags = orient_convention_dia.gmt_flags
orient_convention_dia.Destroy()
else:
return
or_con = orient_convention_dia.ocn
dec_correction_con = int(orient_convention_dia.dcn)
try:
hours_from_gmt = float(orient_convention_dia.gmt)
except:
hours_from_gmt = 0
try:
dec_correction = float(orient_convention_dia.correct_dec)
except:
dec_correction = 0
method_code_dia=method_code_dialog(None)
method_code_dia.Center()
if method_code_dia.ShowModal() == wx.ID_OK:
bedding_codes_flags=method_code_dia.bedding_codes_flags
methodcodes_flags=method_code_dia.methodcodes_flags
method_code_dia.Destroy()
else:
print("-I- Canceling calculation")
return
method_codes = method_code_dia.methodcodes
average_bedding = method_code_dia.average_bedding
bed_correction = method_code_dia.bed_correction
command_args=['orientation_magic.py']
command_args.append("-WD %s"%self.WD)
command_args.append("-Fsa er_samples_orient.txt")
command_args.append("-Fsi er_sites_orient.txt ")
command_args.append("-f %s"%"demag_orient.txt")
command_args.append(ocn_flag)
command_args.append(dcn_flag)
command_args.append(gmt_flags)
command_args.append(bedding_codes_flags)
command_args.append(methodcodes_flags)
commandline = " ".join(command_args)
print("-I- executing command: %s" %commandline)
os.chdir(self.WD)
if os.path.exists(os.path.join(self.WD, 'er_samples.txt')) or os.path.exists(os.path.join(self.WD, 'er_sites.txt')):
append = True
else:
append = False
samp_file = "er_samples.txt"
site_file = "er_sites.txt"
success, error_message = ipmag.orientation_magic(or_con, dec_correction_con, dec_correction,
bed_correction, hours_from_gmt=hours_from_gmt,
method_codes=method_codes, average_bedding=average_bedding,
orient_file='demag_orient.txt', samp_file=samp_file,
site_file=site_file, input_dir_path=self.WD,
output_dir_path=self.WD, append=append, data_model=3)
if not success:
dlg1 = wx.MessageDialog(None,caption="Message:", message="-E- ERROR: Error in running orientation_magic.py\n{}".format(error_message) ,style=wx.OK|wx.ICON_INFORMATION)
dlg1.ShowModal()
dlg1.Destroy()
print("-E- ERROR: Error in running orientation_magic.py")
return
else:
dlg2 = wx.MessageDialog(None,caption="Message:", message="-I- Successfully ran orientation_magic", style=wx.OK|wx.ICON_INFORMATION)
dlg2.ShowModal()
dlg2.Destroy()
self.Parent.Show()
self.Parent.Raise()
self.Destroy()
self.contribution.add_magic_table('samples')
return | This fucntion does exactly what the 'import orientation' fuction does in MagIC.py
after some dialog boxes the function calls orientation_magic.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_gui_dialogs2.py#L2697-L2782 |
PmagPy/PmagPy | programs/eigs_s.py | main | def main():
"""
NAME
eigs_s.py
DESCRIPTION
converts eigenparamters format data to s format
SYNTAX
eigs_s.py [-h][-i][command line options][<filename]
OPTIONS
-h prints help message and quits
-i allows interactive file name entry
-f FILE, specifies input file name
-F FILE, specifies output file name
< filenmae, reads file from standard input (Unix-like operating systems only)
INPUT
tau_i, dec_i inc_i of eigenvectors
OUTPUT
x11,x22,x33,x12,x23,x13
"""
file=""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
elif '-i' in sys.argv:
file=input("Enter eigenparameters data file name: ")
elif '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
if file!="":
f=open(file,'r')
data=f.readlines()
f.close()
else:
data=sys.stdin.readlines()
ofile=""
if '-F' in sys.argv:
ind = sys.argv.index('-F')
ofile= sys.argv[ind+1]
out = open(ofile, 'w + a')
file_outstring = ""
for line in data:
tau,Vdirs=[],[]
rec=line.split()
for k in range(0,9,3):
tau.append(float(rec[k]))
Vdirs.append((float(rec[k+1]),float(rec[k+2])))
srot=pmag.doeigs_s(tau,Vdirs)
outstring=""
for s in srot:outstring+='%10.8f '%(s)
if ofile=="":
print(outstring)
else:
out.write(outstring+'\n') | python | def main():
"""
NAME
eigs_s.py
DESCRIPTION
converts eigenparamters format data to s format
SYNTAX
eigs_s.py [-h][-i][command line options][<filename]
OPTIONS
-h prints help message and quits
-i allows interactive file name entry
-f FILE, specifies input file name
-F FILE, specifies output file name
< filenmae, reads file from standard input (Unix-like operating systems only)
INPUT
tau_i, dec_i inc_i of eigenvectors
OUTPUT
x11,x22,x33,x12,x23,x13
"""
file=""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
elif '-i' in sys.argv:
file=input("Enter eigenparameters data file name: ")
elif '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
if file!="":
f=open(file,'r')
data=f.readlines()
f.close()
else:
data=sys.stdin.readlines()
ofile=""
if '-F' in sys.argv:
ind = sys.argv.index('-F')
ofile= sys.argv[ind+1]
out = open(ofile, 'w + a')
file_outstring = ""
for line in data:
tau,Vdirs=[],[]
rec=line.split()
for k in range(0,9,3):
tau.append(float(rec[k]))
Vdirs.append((float(rec[k+1]),float(rec[k+2])))
srot=pmag.doeigs_s(tau,Vdirs)
outstring=""
for s in srot:outstring+='%10.8f '%(s)
if ofile=="":
print(outstring)
else:
out.write(outstring+'\n') | NAME
eigs_s.py
DESCRIPTION
converts eigenparamters format data to s format
SYNTAX
eigs_s.py [-h][-i][command line options][<filename]
OPTIONS
-h prints help message and quits
-i allows interactive file name entry
-f FILE, specifies input file name
-F FILE, specifies output file name
< filenmae, reads file from standard input (Unix-like operating systems only)
INPUT
tau_i, dec_i inc_i of eigenvectors
OUTPUT
x11,x22,x33,x12,x23,x13 | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/eigs_s.py#L8-L66 |
PmagPy/PmagPy | programs/ani_depthplot.py | main | def main():
"""
NAME
ani_depthplot.py
DESCRIPTION
plots tau, V3_inc, V1_dec, P and chi versus core_depth
SYNTAX
ani_depthplot.py [command line optins]
# or, for Anaconda users:
ani_depthplot_anaconda [command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input rmag_anisotropy format file from magic (MagIC 2 only)
-fb FILE: specify input measurements format file from magic
-fsa FILE: specify input sample format file from magic
-fsp FILE: specify input specimen file (MagIC 3 only)
-fsum FILE : specify input LIMS database (IODP) core summary csv file
to print the core names, set lab to 1
-fa FILE: specify input ages format file from magic
-d min max [in m] depth range to plot
-ds [mcd,mbsf], specify depth scale, default is mbsf (core depth)
-sav save plot without review
-fmt specfiy format for figures - default is svg
DEFAULTS:
Anisotropy file: specimens.txt
Bulk susceptibility file: measurements.txt
Samples file: samples.txt
"""
args = sys.argv
if '-h' in args:
print(main.__doc__)
sys.exit()
dataframe = extractor.command_line_dataframe([['f', False, 'rmag_anisotropy.txt'],
['fb', False, 'magic_measurements.txt'],
['fsa', False, 'er_samples.txt'],
['fa', False, None], ['fsum', False, None],
['fmt', False, 'svg'], ['ds', False, 'mbsf'],
['d', False, '-1 -1'], ['sav', False, False],
['WD', False, '.' ], ['DM', False, 3],
['fsp', False, 'specimens.txt']])
#args = sys.argv
checked_args = extractor.extract_and_check_args(args, dataframe)
ani_file, meas_file, samp_file, age_file, sum_file, fmt, depth_scale, depth, save_quietly, dir_path, data_model, spec_file = extractor.get_vars(['f', 'fb', 'fsa', 'fa', 'fsum', 'fmt', 'ds', 'd', 'sav', 'WD', 'DM', 'fsp'], checked_args)
# format min/max depth
try:
dmin, dmax = depth.split()
dmin, dmax = float(dmin), float(dmax)
except:
print('you must provide depth in this format: -d dmin dmax')
print('could not parse "{}", defaulting to plotting all depths'.format('-d ' + str(depth)))
dmin, dmax = -1, -1
if depth_scale:
if depth_scale not in ['age', 'mbsf', 'mcd']:
print('-W- Unrecognized option "{}" provided for depth scale.\n Options for depth scale are mbsf (meters below sea floor) or mcd (meters composite depth).\n Alternatively, if you provide an age file the depth scale will be automatically set to plot by age instead.\n Using default "mbsf"'.format(depth_scale))
depth_scale = 'sample_core_depth'
if age_file:
depth_scale = 'age'
elif 'mbsf' in depth_scale:
depth_scale = 'sample_core_depth'
elif 'mcd' in depth_scale:
depth_scale = 'sample_composite_depth'
data_model = int(float(data_model))
# MagIC 2
if data_model == 2:
fig, figname = ipmag.ani_depthplot2(ani_file, meas_file, samp_file, age_file, sum_file, fmt, dmin, dmax, depth_scale, dir_path)
# MagIC 3
else:
if meas_file == "magic_measurements.txt":
meas_file = 'measurements.txt'
if samp_file in ['er_samples.txt', 'pmag_samples.txt']:
samp_file = "samples.txt"
site_file = 'sites.txt'
fig, fignames = ipmag.ani_depthplot(spec_file, samp_file, meas_file, site_file, age_file, sum_file, fmt, dmin, dmax, depth_scale, dir_path)
figname = fignames[0]
if save_quietly:
if dir_path == '.':
dir_path = os.getcwd()
plt.savefig(figname)
plt.clf()
print('Saved file: {}'.format(figname))
return False
app = wx.App(redirect=False)
if not fig:
pw.simple_warning('No plot was able to be created with the data you provided.\nMake sure you have given all the required information and try again')
return False
dpi = fig.get_dpi()
pixel_width = dpi * fig.get_figwidth()
pixel_height = dpi * fig.get_figheight()
figname = os.path.join(dir_path, figname)
plot_frame = pmag_menu_dialogs.PlotFrame((int(pixel_width), int(pixel_height + 50)),
fig, figname, standalone=True)
app.MainLoop() | python | def main():
"""
NAME
ani_depthplot.py
DESCRIPTION
plots tau, V3_inc, V1_dec, P and chi versus core_depth
SYNTAX
ani_depthplot.py [command line optins]
# or, for Anaconda users:
ani_depthplot_anaconda [command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input rmag_anisotropy format file from magic (MagIC 2 only)
-fb FILE: specify input measurements format file from magic
-fsa FILE: specify input sample format file from magic
-fsp FILE: specify input specimen file (MagIC 3 only)
-fsum FILE : specify input LIMS database (IODP) core summary csv file
to print the core names, set lab to 1
-fa FILE: specify input ages format file from magic
-d min max [in m] depth range to plot
-ds [mcd,mbsf], specify depth scale, default is mbsf (core depth)
-sav save plot without review
-fmt specfiy format for figures - default is svg
DEFAULTS:
Anisotropy file: specimens.txt
Bulk susceptibility file: measurements.txt
Samples file: samples.txt
"""
args = sys.argv
if '-h' in args:
print(main.__doc__)
sys.exit()
dataframe = extractor.command_line_dataframe([['f', False, 'rmag_anisotropy.txt'],
['fb', False, 'magic_measurements.txt'],
['fsa', False, 'er_samples.txt'],
['fa', False, None], ['fsum', False, None],
['fmt', False, 'svg'], ['ds', False, 'mbsf'],
['d', False, '-1 -1'], ['sav', False, False],
['WD', False, '.' ], ['DM', False, 3],
['fsp', False, 'specimens.txt']])
#args = sys.argv
checked_args = extractor.extract_and_check_args(args, dataframe)
ani_file, meas_file, samp_file, age_file, sum_file, fmt, depth_scale, depth, save_quietly, dir_path, data_model, spec_file = extractor.get_vars(['f', 'fb', 'fsa', 'fa', 'fsum', 'fmt', 'ds', 'd', 'sav', 'WD', 'DM', 'fsp'], checked_args)
# format min/max depth
try:
dmin, dmax = depth.split()
dmin, dmax = float(dmin), float(dmax)
except:
print('you must provide depth in this format: -d dmin dmax')
print('could not parse "{}", defaulting to plotting all depths'.format('-d ' + str(depth)))
dmin, dmax = -1, -1
if depth_scale:
if depth_scale not in ['age', 'mbsf', 'mcd']:
print('-W- Unrecognized option "{}" provided for depth scale.\n Options for depth scale are mbsf (meters below sea floor) or mcd (meters composite depth).\n Alternatively, if you provide an age file the depth scale will be automatically set to plot by age instead.\n Using default "mbsf"'.format(depth_scale))
depth_scale = 'sample_core_depth'
if age_file:
depth_scale = 'age'
elif 'mbsf' in depth_scale:
depth_scale = 'sample_core_depth'
elif 'mcd' in depth_scale:
depth_scale = 'sample_composite_depth'
data_model = int(float(data_model))
# MagIC 2
if data_model == 2:
fig, figname = ipmag.ani_depthplot2(ani_file, meas_file, samp_file, age_file, sum_file, fmt, dmin, dmax, depth_scale, dir_path)
# MagIC 3
else:
if meas_file == "magic_measurements.txt":
meas_file = 'measurements.txt'
if samp_file in ['er_samples.txt', 'pmag_samples.txt']:
samp_file = "samples.txt"
site_file = 'sites.txt'
fig, fignames = ipmag.ani_depthplot(spec_file, samp_file, meas_file, site_file, age_file, sum_file, fmt, dmin, dmax, depth_scale, dir_path)
figname = fignames[0]
if save_quietly:
if dir_path == '.':
dir_path = os.getcwd()
plt.savefig(figname)
plt.clf()
print('Saved file: {}'.format(figname))
return False
app = wx.App(redirect=False)
if not fig:
pw.simple_warning('No plot was able to be created with the data you provided.\nMake sure you have given all the required information and try again')
return False
dpi = fig.get_dpi()
pixel_width = dpi * fig.get_figwidth()
pixel_height = dpi * fig.get_figheight()
figname = os.path.join(dir_path, figname)
plot_frame = pmag_menu_dialogs.PlotFrame((int(pixel_width), int(pixel_height + 50)),
fig, figname, standalone=True)
app.MainLoop() | NAME
ani_depthplot.py
DESCRIPTION
plots tau, V3_inc, V1_dec, P and chi versus core_depth
SYNTAX
ani_depthplot.py [command line optins]
# or, for Anaconda users:
ani_depthplot_anaconda [command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input rmag_anisotropy format file from magic (MagIC 2 only)
-fb FILE: specify input measurements format file from magic
-fsa FILE: specify input sample format file from magic
-fsp FILE: specify input specimen file (MagIC 3 only)
-fsum FILE : specify input LIMS database (IODP) core summary csv file
to print the core names, set lab to 1
-fa FILE: specify input ages format file from magic
-d min max [in m] depth range to plot
-ds [mcd,mbsf], specify depth scale, default is mbsf (core depth)
-sav save plot without review
-fmt specfiy format for figures - default is svg
DEFAULTS:
Anisotropy file: specimens.txt
Bulk susceptibility file: measurements.txt
Samples file: samples.txt | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/ani_depthplot.py#L16-L119 |
spyder-ide/spyder-kernels | spyder_kernels/utils/nsview.py | get_numpy_dtype | def get_numpy_dtype(obj):
"""Return NumPy data type associated to obj
Return None if NumPy is not available
or if obj is not a NumPy array or scalar"""
if ndarray is not FakeObject:
# NumPy is available
import numpy as np
if isinstance(obj, np.generic) or isinstance(obj, np.ndarray):
# Numpy scalars all inherit from np.generic.
# Numpy arrays all inherit from np.ndarray.
# If we check that we are certain we have one of these
# types then we are less likely to generate an exception below.
try:
return obj.dtype.type
except (AttributeError, RuntimeError):
# AttributeError: some NumPy objects have no dtype attribute
# RuntimeError: happens with NetCDF objects (Issue 998)
return | python | def get_numpy_dtype(obj):
"""Return NumPy data type associated to obj
Return None if NumPy is not available
or if obj is not a NumPy array or scalar"""
if ndarray is not FakeObject:
# NumPy is available
import numpy as np
if isinstance(obj, np.generic) or isinstance(obj, np.ndarray):
# Numpy scalars all inherit from np.generic.
# Numpy arrays all inherit from np.ndarray.
# If we check that we are certain we have one of these
# types then we are less likely to generate an exception below.
try:
return obj.dtype.type
except (AttributeError, RuntimeError):
# AttributeError: some NumPy objects have no dtype attribute
# RuntimeError: happens with NetCDF objects (Issue 998)
return | Return NumPy data type associated to obj
Return None if NumPy is not available
or if obj is not a NumPy array or scalar | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/nsview.py#L50-L67 |
spyder-ide/spyder-kernels | spyder_kernels/utils/nsview.py | address | def address(obj):
"""Return object address as a string: '<classname @ address>'"""
return "<%s @ %s>" % (obj.__class__.__name__,
hex(id(obj)).upper().replace('X', 'x')) | python | def address(obj):
"""Return object address as a string: '<classname @ address>'"""
return "<%s @ %s>" % (obj.__class__.__name__,
hex(id(obj)).upper().replace('X', 'x')) | Return object address as a string: '<classname @ address> | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/nsview.py#L102-L105 |
spyder-ide/spyder-kernels | spyder_kernels/utils/nsview.py | get_size | def get_size(item):
"""Return size of an item of arbitrary type"""
if isinstance(item, (list, set, tuple, dict)):
return len(item)
elif isinstance(item, (ndarray, MaskedArray)):
return item.shape
elif isinstance(item, Image):
return item.size
if isinstance(item, (DataFrame, Index, Series)):
return item.shape
else:
return 1 | python | def get_size(item):
"""Return size of an item of arbitrary type"""
if isinstance(item, (list, set, tuple, dict)):
return len(item)
elif isinstance(item, (ndarray, MaskedArray)):
return item.shape
elif isinstance(item, Image):
return item.size
if isinstance(item, (DataFrame, Index, Series)):
return item.shape
else:
return 1 | Return size of an item of arbitrary type | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/nsview.py#L116-L127 |
spyder-ide/spyder-kernels | spyder_kernels/utils/nsview.py | get_object_attrs | def get_object_attrs(obj):
"""
Get the attributes of an object using dir.
This filters protected attributes
"""
attrs = [k for k in dir(obj) if not k.startswith('__')]
if not attrs:
attrs = dir(obj)
return attrs | python | def get_object_attrs(obj):
"""
Get the attributes of an object using dir.
This filters protected attributes
"""
attrs = [k for k in dir(obj) if not k.startswith('__')]
if not attrs:
attrs = dir(obj)
return attrs | Get the attributes of an object using dir.
This filters protected attributes | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/nsview.py#L130-L139 |
spyder-ide/spyder-kernels | spyder_kernels/utils/nsview.py | str_to_timedelta | def str_to_timedelta(value):
"""Convert a string to a datetime.timedelta value.
The following strings are accepted:
- 'datetime.timedelta(1, 5, 12345)'
- 'timedelta(1, 5, 12345)'
- '(1, 5, 12345)'
- '1, 5, 12345'
- '1'
if there are less then three parameters, the missing parameters are
assumed to be 0. Variations in the spacing of the parameters are allowed.
Raises:
ValueError for strings not matching the above criterion.
"""
m = re.match(r'^(?:(?:datetime\.)?timedelta)?'
r'\(?'
r'([^)]*)'
r'\)?$', value)
if not m:
raise ValueError('Invalid string for datetime.timedelta')
args = [int(a.strip()) for a in m.group(1).split(',')]
return datetime.timedelta(*args) | python | def str_to_timedelta(value):
"""Convert a string to a datetime.timedelta value.
The following strings are accepted:
- 'datetime.timedelta(1, 5, 12345)'
- 'timedelta(1, 5, 12345)'
- '(1, 5, 12345)'
- '1, 5, 12345'
- '1'
if there are less then three parameters, the missing parameters are
assumed to be 0. Variations in the spacing of the parameters are allowed.
Raises:
ValueError for strings not matching the above criterion.
"""
m = re.match(r'^(?:(?:datetime\.)?timedelta)?'
r'\(?'
r'([^)]*)'
r'\)?$', value)
if not m:
raise ValueError('Invalid string for datetime.timedelta')
args = [int(a.strip()) for a in m.group(1).split(',')]
return datetime.timedelta(*args) | Convert a string to a datetime.timedelta value.
The following strings are accepted:
- 'datetime.timedelta(1, 5, 12345)'
- 'timedelta(1, 5, 12345)'
- '(1, 5, 12345)'
- '1, 5, 12345'
- '1'
if there are less then three parameters, the missing parameters are
assumed to be 0. Variations in the spacing of the parameters are allowed.
Raises:
ValueError for strings not matching the above criterion. | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/nsview.py#L163-L188 |
spyder-ide/spyder-kernels | spyder_kernels/utils/nsview.py | get_color_name | def get_color_name(value):
"""Return color name depending on value type"""
if not is_known_type(value):
return CUSTOM_TYPE_COLOR
for typ, name in list(COLORS.items()):
if isinstance(value, typ):
return name
else:
np_dtype = get_numpy_dtype(value)
if np_dtype is None or not hasattr(value, 'size'):
return UNSUPPORTED_COLOR
elif value.size == 1:
return SCALAR_COLOR
else:
return ARRAY_COLOR | python | def get_color_name(value):
"""Return color name depending on value type"""
if not is_known_type(value):
return CUSTOM_TYPE_COLOR
for typ, name in list(COLORS.items()):
if isinstance(value, typ):
return name
else:
np_dtype = get_numpy_dtype(value)
if np_dtype is None or not hasattr(value, 'size'):
return UNSUPPORTED_COLOR
elif value.size == 1:
return SCALAR_COLOR
else:
return ARRAY_COLOR | Return color name depending on value type | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/nsview.py#L217-L231 |
spyder-ide/spyder-kernels | spyder_kernels/utils/nsview.py | sort_against | def sort_against(list1, list2, reverse=False):
"""
Arrange items of list1 in the same order as sorted(list2).
In other words, apply to list1 the permutation which takes list2
to sorted(list2, reverse).
"""
try:
return [item for _, item in
sorted(zip(list2, list1), key=lambda x: x[0], reverse=reverse)]
except:
return list1 | python | def sort_against(list1, list2, reverse=False):
"""
Arrange items of list1 in the same order as sorted(list2).
In other words, apply to list1 the permutation which takes list2
to sorted(list2, reverse).
"""
try:
return [item for _, item in
sorted(zip(list2, list1), key=lambda x: x[0], reverse=reverse)]
except:
return list1 | Arrange items of list1 in the same order as sorted(list2).
In other words, apply to list1 the permutation which takes list2
to sorted(list2, reverse). | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/nsview.py#L243-L254 |
spyder-ide/spyder-kernels | spyder_kernels/utils/nsview.py | default_display | def default_display(value, with_module=True):
"""Default display for unknown objects."""
object_type = type(value)
try:
name = object_type.__name__
module = object_type.__module__
if with_module:
return name + ' object of ' + module + ' module'
else:
return name
except:
type_str = to_text_string(object_type)
return type_str[1:-1] | python | def default_display(value, with_module=True):
"""Default display for unknown objects."""
object_type = type(value)
try:
name = object_type.__name__
module = object_type.__module__
if with_module:
return name + ' object of ' + module + ' module'
else:
return name
except:
type_str = to_text_string(object_type)
return type_str[1:-1] | Default display for unknown objects. | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/nsview.py#L265-L277 |
spyder-ide/spyder-kernels | spyder_kernels/utils/nsview.py | collections_display | def collections_display(value, level):
"""Display for collections (i.e. list, set, tuple and dict)."""
is_dict = isinstance(value, dict)
is_set = isinstance(value, set)
# Get elements
if is_dict:
elements = iteritems(value)
else:
elements = value
# Truncate values
truncate = False
if level == 1 and len(value) > 10:
elements = islice(elements, 10) if is_dict or is_set else value[:10]
truncate = True
elif level == 2 and len(value) > 5:
elements = islice(elements, 5) if is_dict or is_set else value[:5]
truncate = True
# Get display of each element
if level <= 2:
if is_dict:
displays = [value_to_display(k, level=level) + ':' +
value_to_display(v, level=level)
for (k, v) in list(elements)]
else:
displays = [value_to_display(e, level=level)
for e in elements]
if truncate:
displays.append('...')
display = ', '.join(displays)
else:
display = '...'
# Return display
if is_dict:
display = '{' + display + '}'
elif isinstance(value, list):
display = '[' + display + ']'
elif isinstance(value, set):
display = '{' + display + '}'
else:
display = '(' + display + ')'
return display | python | def collections_display(value, level):
"""Display for collections (i.e. list, set, tuple and dict)."""
is_dict = isinstance(value, dict)
is_set = isinstance(value, set)
# Get elements
if is_dict:
elements = iteritems(value)
else:
elements = value
# Truncate values
truncate = False
if level == 1 and len(value) > 10:
elements = islice(elements, 10) if is_dict or is_set else value[:10]
truncate = True
elif level == 2 and len(value) > 5:
elements = islice(elements, 5) if is_dict or is_set else value[:5]
truncate = True
# Get display of each element
if level <= 2:
if is_dict:
displays = [value_to_display(k, level=level) + ':' +
value_to_display(v, level=level)
for (k, v) in list(elements)]
else:
displays = [value_to_display(e, level=level)
for e in elements]
if truncate:
displays.append('...')
display = ', '.join(displays)
else:
display = '...'
# Return display
if is_dict:
display = '{' + display + '}'
elif isinstance(value, list):
display = '[' + display + ']'
elif isinstance(value, set):
display = '{' + display + '}'
else:
display = '(' + display + ')'
return display | Display for collections (i.e. list, set, tuple and dict). | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/nsview.py#L280-L325 |
spyder-ide/spyder-kernels | spyder_kernels/utils/nsview.py | value_to_display | def value_to_display(value, minmax=False, level=0):
"""Convert value for display purpose"""
# To save current Numpy printoptions
np_printoptions = FakeObject
try:
numeric_numpy_types = (int64, int32, int16, int8,
uint64, uint32, uint16, uint8,
float64, float32, float16,
complex128, complex64, bool_)
if ndarray is not FakeObject:
# Save printoptions
np_printoptions = get_printoptions()
# Set max number of elements to show for Numpy arrays
# in our display
set_printoptions(threshold=10)
if isinstance(value, recarray):
if level == 0:
fields = value.names
display = 'Field names: ' + ', '.join(fields)
else:
display = 'Recarray'
elif isinstance(value, MaskedArray):
display = 'Masked array'
elif isinstance(value, ndarray):
if level == 0:
if minmax:
try:
display = 'Min: %r\nMax: %r' % (value.min(), value.max())
except (TypeError, ValueError):
if value.dtype.type in numeric_numpy_types:
display = str(value)
else:
display = default_display(value)
elif value.dtype.type in numeric_numpy_types:
display = str(value)
else:
display = default_display(value)
else:
display = 'Numpy array'
elif any([type(value) == t for t in [list, set, tuple, dict]]):
display = collections_display(value, level+1)
elif isinstance(value, Image):
if level == 0:
display = '%s Mode: %s' % (address(value), value.mode)
else:
display = 'Image'
elif isinstance(value, DataFrame):
if level == 0:
cols = value.columns
if PY2 and len(cols) > 0:
# Get rid of possible BOM utf-8 data present at the
# beginning of a file, which gets attached to the first
# column header when headers are present in the first
# row.
# Fixes Issue 2514
try:
ini_col = to_text_string(cols[0], encoding='utf-8-sig')
except:
ini_col = to_text_string(cols[0])
cols = [ini_col] + [to_text_string(c) for c in cols[1:]]
else:
cols = [to_text_string(c) for c in cols]
display = 'Column names: ' + ', '.join(list(cols))
else:
display = 'Dataframe'
elif isinstance(value, NavigableString):
# Fixes Issue 2448
display = to_text_string(value)
if level > 0:
display = u"'" + display + u"'"
elif isinstance(value, Index):
if level == 0:
try:
display = value._summary()
except AttributeError:
display = value.summary()
else:
display = 'Index'
elif is_binary_string(value):
# We don't apply this to classes that extend string types
# See issue 5636
if is_type_text_string(value):
try:
display = to_text_string(value, 'utf8')
if level > 0:
display = u"'" + display + u"'"
except:
display = value
if level > 0:
display = b"'" + display + b"'"
else:
display = default_display(value)
elif is_text_string(value):
# We don't apply this to classes that extend string types
# See issue 5636
if is_type_text_string(value):
display = value
if level > 0:
display = u"'" + display + u"'"
else:
display = default_display(value)
elif (isinstance(value, datetime.date) or
isinstance(value, datetime.timedelta)):
display = str(value)
elif (isinstance(value, NUMERIC_TYPES) or
isinstance(value, bool) or
isinstance(value, numeric_numpy_types)):
display = repr(value)
else:
if level == 0:
display = default_display(value)
else:
display = default_display(value, with_module=False)
except:
display = default_display(value)
# Truncate display at 70 chars to avoid freezing Spyder
# because of large displays
if len(display) > 70:
if is_binary_string(display):
ellipses = b' ...'
else:
ellipses = u' ...'
display = display[:70].rstrip() + ellipses
# Restore Numpy printoptions
if np_printoptions is not FakeObject:
set_printoptions(**np_printoptions)
return display | python | def value_to_display(value, minmax=False, level=0):
"""Convert value for display purpose"""
# To save current Numpy printoptions
np_printoptions = FakeObject
try:
numeric_numpy_types = (int64, int32, int16, int8,
uint64, uint32, uint16, uint8,
float64, float32, float16,
complex128, complex64, bool_)
if ndarray is not FakeObject:
# Save printoptions
np_printoptions = get_printoptions()
# Set max number of elements to show for Numpy arrays
# in our display
set_printoptions(threshold=10)
if isinstance(value, recarray):
if level == 0:
fields = value.names
display = 'Field names: ' + ', '.join(fields)
else:
display = 'Recarray'
elif isinstance(value, MaskedArray):
display = 'Masked array'
elif isinstance(value, ndarray):
if level == 0:
if minmax:
try:
display = 'Min: %r\nMax: %r' % (value.min(), value.max())
except (TypeError, ValueError):
if value.dtype.type in numeric_numpy_types:
display = str(value)
else:
display = default_display(value)
elif value.dtype.type in numeric_numpy_types:
display = str(value)
else:
display = default_display(value)
else:
display = 'Numpy array'
elif any([type(value) == t for t in [list, set, tuple, dict]]):
display = collections_display(value, level+1)
elif isinstance(value, Image):
if level == 0:
display = '%s Mode: %s' % (address(value), value.mode)
else:
display = 'Image'
elif isinstance(value, DataFrame):
if level == 0:
cols = value.columns
if PY2 and len(cols) > 0:
# Get rid of possible BOM utf-8 data present at the
# beginning of a file, which gets attached to the first
# column header when headers are present in the first
# row.
# Fixes Issue 2514
try:
ini_col = to_text_string(cols[0], encoding='utf-8-sig')
except:
ini_col = to_text_string(cols[0])
cols = [ini_col] + [to_text_string(c) for c in cols[1:]]
else:
cols = [to_text_string(c) for c in cols]
display = 'Column names: ' + ', '.join(list(cols))
else:
display = 'Dataframe'
elif isinstance(value, NavigableString):
# Fixes Issue 2448
display = to_text_string(value)
if level > 0:
display = u"'" + display + u"'"
elif isinstance(value, Index):
if level == 0:
try:
display = value._summary()
except AttributeError:
display = value.summary()
else:
display = 'Index'
elif is_binary_string(value):
# We don't apply this to classes that extend string types
# See issue 5636
if is_type_text_string(value):
try:
display = to_text_string(value, 'utf8')
if level > 0:
display = u"'" + display + u"'"
except:
display = value
if level > 0:
display = b"'" + display + b"'"
else:
display = default_display(value)
elif is_text_string(value):
# We don't apply this to classes that extend string types
# See issue 5636
if is_type_text_string(value):
display = value
if level > 0:
display = u"'" + display + u"'"
else:
display = default_display(value)
elif (isinstance(value, datetime.date) or
isinstance(value, datetime.timedelta)):
display = str(value)
elif (isinstance(value, NUMERIC_TYPES) or
isinstance(value, bool) or
isinstance(value, numeric_numpy_types)):
display = repr(value)
else:
if level == 0:
display = default_display(value)
else:
display = default_display(value, with_module=False)
except:
display = default_display(value)
# Truncate display at 70 chars to avoid freezing Spyder
# because of large displays
if len(display) > 70:
if is_binary_string(display):
ellipses = b' ...'
else:
ellipses = u' ...'
display = display[:70].rstrip() + ellipses
# Restore Numpy printoptions
if np_printoptions is not FakeObject:
set_printoptions(**np_printoptions)
return display | Convert value for display purpose | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/nsview.py#L328-L458 |
spyder-ide/spyder-kernels | spyder_kernels/utils/nsview.py | display_to_value | def display_to_value(value, default_value, ignore_errors=True):
"""Convert back to value"""
from qtpy.compat import from_qvariant
value = from_qvariant(value, to_text_string)
try:
np_dtype = get_numpy_dtype(default_value)
if isinstance(default_value, bool):
# We must test for boolean before NumPy data types
# because `bool` class derives from `int` class
try:
value = bool(float(value))
except ValueError:
value = value.lower() == "true"
elif np_dtype is not None:
if 'complex' in str(type(default_value)):
value = np_dtype(complex(value))
else:
value = np_dtype(value)
elif is_binary_string(default_value):
value = to_binary_string(value, 'utf8')
elif is_text_string(default_value):
value = to_text_string(value)
elif isinstance(default_value, complex):
value = complex(value)
elif isinstance(default_value, float):
value = float(value)
elif isinstance(default_value, int):
try:
value = int(value)
except ValueError:
value = float(value)
elif isinstance(default_value, datetime.datetime):
value = datestr_to_datetime(value)
elif isinstance(default_value, datetime.date):
value = datestr_to_datetime(value).date()
elif isinstance(default_value, datetime.timedelta):
value = str_to_timedelta(value)
elif ignore_errors:
value = try_to_eval(value)
else:
value = eval(value)
except (ValueError, SyntaxError):
if ignore_errors:
value = try_to_eval(value)
else:
return default_value
return value | python | def display_to_value(value, default_value, ignore_errors=True):
"""Convert back to value"""
from qtpy.compat import from_qvariant
value = from_qvariant(value, to_text_string)
try:
np_dtype = get_numpy_dtype(default_value)
if isinstance(default_value, bool):
# We must test for boolean before NumPy data types
# because `bool` class derives from `int` class
try:
value = bool(float(value))
except ValueError:
value = value.lower() == "true"
elif np_dtype is not None:
if 'complex' in str(type(default_value)):
value = np_dtype(complex(value))
else:
value = np_dtype(value)
elif is_binary_string(default_value):
value = to_binary_string(value, 'utf8')
elif is_text_string(default_value):
value = to_text_string(value)
elif isinstance(default_value, complex):
value = complex(value)
elif isinstance(default_value, float):
value = float(value)
elif isinstance(default_value, int):
try:
value = int(value)
except ValueError:
value = float(value)
elif isinstance(default_value, datetime.datetime):
value = datestr_to_datetime(value)
elif isinstance(default_value, datetime.date):
value = datestr_to_datetime(value).date()
elif isinstance(default_value, datetime.timedelta):
value = str_to_timedelta(value)
elif ignore_errors:
value = try_to_eval(value)
else:
value = eval(value)
except (ValueError, SyntaxError):
if ignore_errors:
value = try_to_eval(value)
else:
return default_value
return value | Convert back to value | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/nsview.py#L461-L507 |
spyder-ide/spyder-kernels | spyder_kernels/utils/nsview.py | get_type_string | def get_type_string(item):
"""Return type string of an object."""
if isinstance(item, DataFrame):
return "DataFrame"
if isinstance(item, Index):
return type(item).__name__
if isinstance(item, Series):
return "Series"
found = re.findall(r"<(?:type|class) '(\S*)'>",
to_text_string(type(item)))
if found:
return found[0] | python | def get_type_string(item):
"""Return type string of an object."""
if isinstance(item, DataFrame):
return "DataFrame"
if isinstance(item, Index):
return type(item).__name__
if isinstance(item, Series):
return "Series"
found = re.findall(r"<(?:type|class) '(\S*)'>",
to_text_string(type(item)))
if found:
return found[0] | Return type string of an object. | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/nsview.py#L513-L524 |
spyder-ide/spyder-kernels | spyder_kernels/utils/nsview.py | get_human_readable_type | def get_human_readable_type(item):
"""Return human-readable type string of an item"""
if isinstance(item, (ndarray, MaskedArray)):
return item.dtype.name
elif isinstance(item, Image):
return "Image"
else:
text = get_type_string(item)
if text is None:
text = to_text_string('unknown')
else:
return text[text.find('.')+1:] | python | def get_human_readable_type(item):
"""Return human-readable type string of an item"""
if isinstance(item, (ndarray, MaskedArray)):
return item.dtype.name
elif isinstance(item, Image):
return "Image"
else:
text = get_type_string(item)
if text is None:
text = to_text_string('unknown')
else:
return text[text.find('.')+1:] | Return human-readable type string of an item | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/nsview.py#L533-L544 |
spyder-ide/spyder-kernels | spyder_kernels/utils/nsview.py | is_supported | def is_supported(value, check_all=False, filters=None, iterate=False):
"""Return True if the value is supported, False otherwise"""
assert filters is not None
if value is None:
return True
if not is_editable_type(value):
return False
elif not isinstance(value, filters):
return False
elif iterate:
if isinstance(value, (list, tuple, set)):
valid_count = 0
for val in value:
if is_supported(val, filters=filters, iterate=check_all):
valid_count += 1
if not check_all:
break
return valid_count > 0
elif isinstance(value, dict):
for key, val in list(value.items()):
if not is_supported(key, filters=filters, iterate=check_all) \
or not is_supported(val, filters=filters,
iterate=check_all):
return False
if not check_all:
break
return True | python | def is_supported(value, check_all=False, filters=None, iterate=False):
"""Return True if the value is supported, False otherwise"""
assert filters is not None
if value is None:
return True
if not is_editable_type(value):
return False
elif not isinstance(value, filters):
return False
elif iterate:
if isinstance(value, (list, tuple, set)):
valid_count = 0
for val in value:
if is_supported(val, filters=filters, iterate=check_all):
valid_count += 1
if not check_all:
break
return valid_count > 0
elif isinstance(value, dict):
for key, val in list(value.items()):
if not is_supported(key, filters=filters, iterate=check_all) \
or not is_supported(val, filters=filters,
iterate=check_all):
return False
if not check_all:
break
return True | Return True if the value is supported, False otherwise | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/nsview.py#L551-L577 |
spyder-ide/spyder-kernels | spyder_kernels/utils/nsview.py | globalsfilter | def globalsfilter(input_dict, check_all=False, filters=None,
exclude_private=None, exclude_capitalized=None,
exclude_uppercase=None, exclude_unsupported=None,
excluded_names=None):
"""Keep only objects that can be pickled"""
output_dict = {}
for key, value in list(input_dict.items()):
excluded = (exclude_private and key.startswith('_')) or \
(exclude_capitalized and key[0].isupper()) or \
(exclude_uppercase and key.isupper()
and len(key) > 1 and not key[1:].isdigit()) or \
(key in excluded_names) or \
(exclude_unsupported and \
not is_supported(value, check_all=check_all,
filters=filters))
if not excluded:
output_dict[key] = value
return output_dict | python | def globalsfilter(input_dict, check_all=False, filters=None,
exclude_private=None, exclude_capitalized=None,
exclude_uppercase=None, exclude_unsupported=None,
excluded_names=None):
"""Keep only objects that can be pickled"""
output_dict = {}
for key, value in list(input_dict.items()):
excluded = (exclude_private and key.startswith('_')) or \
(exclude_capitalized and key[0].isupper()) or \
(exclude_uppercase and key.isupper()
and len(key) > 1 and not key[1:].isdigit()) or \
(key in excluded_names) or \
(exclude_unsupported and \
not is_supported(value, check_all=check_all,
filters=filters))
if not excluded:
output_dict[key] = value
return output_dict | Keep only objects that can be pickled | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/nsview.py#L580-L597 |
spyder-ide/spyder-kernels | spyder_kernels/utils/nsview.py | get_supported_types | def get_supported_types():
"""
Return a dictionnary containing types lists supported by the
namespace browser.
Note:
If you update this list, don't forget to update variablexplorer.rst
in spyder-docs
"""
from datetime import date, timedelta
editable_types = [int, float, complex, list, set, dict, tuple, date,
timedelta] + list(TEXT_TYPES) + list(INT_TYPES)
try:
from numpy import ndarray, matrix, generic
editable_types += [ndarray, matrix, generic]
except:
pass
try:
from pandas import DataFrame, Series, DatetimeIndex
editable_types += [DataFrame, Series, Index]
except:
pass
picklable_types = editable_types[:]
try:
from spyder.pil_patch import Image
editable_types.append(Image.Image)
except:
pass
return dict(picklable=picklable_types, editable=editable_types) | python | def get_supported_types():
"""
Return a dictionnary containing types lists supported by the
namespace browser.
Note:
If you update this list, don't forget to update variablexplorer.rst
in spyder-docs
"""
from datetime import date, timedelta
editable_types = [int, float, complex, list, set, dict, tuple, date,
timedelta] + list(TEXT_TYPES) + list(INT_TYPES)
try:
from numpy import ndarray, matrix, generic
editable_types += [ndarray, matrix, generic]
except:
pass
try:
from pandas import DataFrame, Series, DatetimeIndex
editable_types += [DataFrame, Series, Index]
except:
pass
picklable_types = editable_types[:]
try:
from spyder.pil_patch import Image
editable_types.append(Image.Image)
except:
pass
return dict(picklable=picklable_types, editable=editable_types) | Return a dictionnary containing types lists supported by the
namespace browser.
Note:
If you update this list, don't forget to update variablexplorer.rst
in spyder-docs | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/nsview.py#L608-L636 |
spyder-ide/spyder-kernels | spyder_kernels/utils/nsview.py | get_remote_data | def get_remote_data(data, settings, mode, more_excluded_names=None):
"""
Return globals according to filter described in *settings*:
* data: data to be filtered (dictionary)
* settings: variable explorer settings (dictionary)
* mode (string): 'editable' or 'picklable'
* more_excluded_names: additional excluded names (list)
"""
supported_types = get_supported_types()
assert mode in list(supported_types.keys())
excluded_names = settings['excluded_names']
if more_excluded_names is not None:
excluded_names += more_excluded_names
return globalsfilter(data, check_all=settings['check_all'],
filters=tuple(supported_types[mode]),
exclude_private=settings['exclude_private'],
exclude_uppercase=settings['exclude_uppercase'],
exclude_capitalized=settings['exclude_capitalized'],
exclude_unsupported=settings['exclude_unsupported'],
excluded_names=excluded_names) | python | def get_remote_data(data, settings, mode, more_excluded_names=None):
"""
Return globals according to filter described in *settings*:
* data: data to be filtered (dictionary)
* settings: variable explorer settings (dictionary)
* mode (string): 'editable' or 'picklable'
* more_excluded_names: additional excluded names (list)
"""
supported_types = get_supported_types()
assert mode in list(supported_types.keys())
excluded_names = settings['excluded_names']
if more_excluded_names is not None:
excluded_names += more_excluded_names
return globalsfilter(data, check_all=settings['check_all'],
filters=tuple(supported_types[mode]),
exclude_private=settings['exclude_private'],
exclude_uppercase=settings['exclude_uppercase'],
exclude_capitalized=settings['exclude_capitalized'],
exclude_unsupported=settings['exclude_unsupported'],
excluded_names=excluded_names) | Return globals according to filter described in *settings*:
* data: data to be filtered (dictionary)
* settings: variable explorer settings (dictionary)
* mode (string): 'editable' or 'picklable'
* more_excluded_names: additional excluded names (list) | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/nsview.py#L639-L658 |
spyder-ide/spyder-kernels | spyder_kernels/utils/nsview.py | make_remote_view | def make_remote_view(data, settings, more_excluded_names=None):
"""
Make a remote view of dictionary *data*
-> globals explorer
"""
data = get_remote_data(data, settings, mode='editable',
more_excluded_names=more_excluded_names)
remote = {}
for key, value in list(data.items()):
view = value_to_display(value, minmax=settings['minmax'])
remote[key] = {'type': get_human_readable_type(value),
'size': get_size(value),
'color': get_color_name(value),
'view': view}
return remote | python | def make_remote_view(data, settings, more_excluded_names=None):
"""
Make a remote view of dictionary *data*
-> globals explorer
"""
data = get_remote_data(data, settings, mode='editable',
more_excluded_names=more_excluded_names)
remote = {}
for key, value in list(data.items()):
view = value_to_display(value, minmax=settings['minmax'])
remote[key] = {'type': get_human_readable_type(value),
'size': get_size(value),
'color': get_color_name(value),
'view': view}
return remote | Make a remote view of dictionary *data*
-> globals explorer | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/nsview.py#L661-L675 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel._pdb_frame | def _pdb_frame(self):
"""Return current Pdb frame if there is any"""
if self._pdb_obj is not None and self._pdb_obj.curframe is not None:
return self._pdb_obj.curframe | python | def _pdb_frame(self):
"""Return current Pdb frame if there is any"""
if self._pdb_obj is not None and self._pdb_obj.curframe is not None:
return self._pdb_obj.curframe | Return current Pdb frame if there is any | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L47-L50 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel.get_namespace_view | def get_namespace_view(self):
"""
Return the namespace view
This is a dictionary with the following structure
{'a': {'color': '#800000', 'size': 1, 'type': 'str', 'view': '1'}}
Here:
* 'a' is the variable name
* 'color' is the color used to show it
* 'size' and 'type' are self-evident
* and'view' is its value or the text shown in the last column
"""
from spyder_kernels.utils.nsview import make_remote_view
settings = self.namespace_view_settings
if settings:
ns = self._get_current_namespace()
view = repr(make_remote_view(ns, settings, EXCLUDED_NAMES))
return view
else:
return repr(None) | python | def get_namespace_view(self):
"""
Return the namespace view
This is a dictionary with the following structure
{'a': {'color': '#800000', 'size': 1, 'type': 'str', 'view': '1'}}
Here:
* 'a' is the variable name
* 'color' is the color used to show it
* 'size' and 'type' are self-evident
* and'view' is its value or the text shown in the last column
"""
from spyder_kernels.utils.nsview import make_remote_view
settings = self.namespace_view_settings
if settings:
ns = self._get_current_namespace()
view = repr(make_remote_view(ns, settings, EXCLUDED_NAMES))
return view
else:
return repr(None) | Return the namespace view
This is a dictionary with the following structure
{'a': {'color': '#800000', 'size': 1, 'type': 'str', 'view': '1'}}
Here:
* 'a' is the variable name
* 'color' is the color used to show it
* 'size' and 'type' are self-evident
* and'view' is its value or the text shown in the last column | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L65-L87 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel.get_var_properties | def get_var_properties(self):
"""
Get some properties of the variables in the current
namespace
"""
from spyder_kernels.utils.nsview import get_remote_data
settings = self.namespace_view_settings
if settings:
ns = self._get_current_namespace()
data = get_remote_data(ns, settings, mode='editable',
more_excluded_names=EXCLUDED_NAMES)
properties = {}
for name, value in list(data.items()):
properties[name] = {
'is_list': isinstance(value, (tuple, list)),
'is_dict': isinstance(value, dict),
'is_set': isinstance(value, set),
'len': self._get_len(value),
'is_array': self._is_array(value),
'is_image': self._is_image(value),
'is_data_frame': self._is_data_frame(value),
'is_series': self._is_series(value),
'array_shape': self._get_array_shape(value),
'array_ndim': self._get_array_ndim(value)
}
return repr(properties)
else:
return repr(None) | python | def get_var_properties(self):
"""
Get some properties of the variables in the current
namespace
"""
from spyder_kernels.utils.nsview import get_remote_data
settings = self.namespace_view_settings
if settings:
ns = self._get_current_namespace()
data = get_remote_data(ns, settings, mode='editable',
more_excluded_names=EXCLUDED_NAMES)
properties = {}
for name, value in list(data.items()):
properties[name] = {
'is_list': isinstance(value, (tuple, list)),
'is_dict': isinstance(value, dict),
'is_set': isinstance(value, set),
'len': self._get_len(value),
'is_array': self._is_array(value),
'is_image': self._is_image(value),
'is_data_frame': self._is_data_frame(value),
'is_series': self._is_series(value),
'array_shape': self._get_array_shape(value),
'array_ndim': self._get_array_ndim(value)
}
return repr(properties)
else:
return repr(None) | Get some properties of the variables in the current
namespace | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L89-L119 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel.send_spyder_msg | def send_spyder_msg(self, spyder_msg_type, content=None, data=None):
"""
Publish custom messages to the Spyder frontend.
Parameters
----------
spyder_msg_type: str
The spyder message type
content: dict
The (JSONable) content of the message
data: any
Any object that is serializable by cloudpickle (should be most
things). Will arrive as cloudpickled bytes in `.buffers[0]`.
"""
import cloudpickle
if content is None:
content = {}
content['spyder_msg_type'] = spyder_msg_type
msg = self.session.send(
self.iopub_socket,
'spyder_msg',
content=content,
buffers=[cloudpickle.dumps(data, protocol=PICKLE_PROTOCOL)],
parent=self._parent_header,
)
self.log.debug(msg) | python | def send_spyder_msg(self, spyder_msg_type, content=None, data=None):
"""
Publish custom messages to the Spyder frontend.
Parameters
----------
spyder_msg_type: str
The spyder message type
content: dict
The (JSONable) content of the message
data: any
Any object that is serializable by cloudpickle (should be most
things). Will arrive as cloudpickled bytes in `.buffers[0]`.
"""
import cloudpickle
if content is None:
content = {}
content['spyder_msg_type'] = spyder_msg_type
msg = self.session.send(
self.iopub_socket,
'spyder_msg',
content=content,
buffers=[cloudpickle.dumps(data, protocol=PICKLE_PROTOCOL)],
parent=self._parent_header,
)
self.log.debug(msg) | Publish custom messages to the Spyder frontend.
Parameters
----------
spyder_msg_type: str
The spyder message type
content: dict
The (JSONable) content of the message
data: any
Any object that is serializable by cloudpickle (should be most
things). Will arrive as cloudpickled bytes in `.buffers[0]`. | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L121-L148 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel.get_value | def get_value(self, name):
"""Get the value of a variable"""
ns = self._get_current_namespace()
value = ns[name]
try:
self.send_spyder_msg('data', data=value)
except:
# * There is no need to inform users about
# these errors.
# * value = None makes Spyder to ignore
# petitions to display a value
self.send_spyder_msg('data', data=None)
self._do_publish_pdb_state = False | python | def get_value(self, name):
"""Get the value of a variable"""
ns = self._get_current_namespace()
value = ns[name]
try:
self.send_spyder_msg('data', data=value)
except:
# * There is no need to inform users about
# these errors.
# * value = None makes Spyder to ignore
# petitions to display a value
self.send_spyder_msg('data', data=None)
self._do_publish_pdb_state = False | Get the value of a variable | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L150-L162 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel.set_value | def set_value(self, name, value, PY2_frontend):
"""Set the value of a variable"""
import cloudpickle
ns = self._get_reference_namespace(name)
# We send serialized values in a list of one element
# from Spyder to the kernel, to be able to send them
# at all in Python 2
svalue = value[0]
# We need to convert svalue to bytes if the frontend
# runs in Python 2 and the kernel runs in Python 3
if PY2_frontend and not PY2:
svalue = bytes(svalue, 'latin-1')
# Deserialize and set value in namespace
dvalue = cloudpickle.loads(svalue)
ns[name] = dvalue
self.log.debug(ns) | python | def set_value(self, name, value, PY2_frontend):
"""Set the value of a variable"""
import cloudpickle
ns = self._get_reference_namespace(name)
# We send serialized values in a list of one element
# from Spyder to the kernel, to be able to send them
# at all in Python 2
svalue = value[0]
# We need to convert svalue to bytes if the frontend
# runs in Python 2 and the kernel runs in Python 3
if PY2_frontend and not PY2:
svalue = bytes(svalue, 'latin-1')
# Deserialize and set value in namespace
dvalue = cloudpickle.loads(svalue)
ns[name] = dvalue
self.log.debug(ns) | Set the value of a variable | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L164-L182 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel.remove_value | def remove_value(self, name):
"""Remove a variable"""
ns = self._get_reference_namespace(name)
ns.pop(name) | python | def remove_value(self, name):
"""Remove a variable"""
ns = self._get_reference_namespace(name)
ns.pop(name) | Remove a variable | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L184-L187 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel.copy_value | def copy_value(self, orig_name, new_name):
"""Copy a variable"""
ns = self._get_reference_namespace(orig_name)
ns[new_name] = ns[orig_name] | python | def copy_value(self, orig_name, new_name):
"""Copy a variable"""
ns = self._get_reference_namespace(orig_name)
ns[new_name] = ns[orig_name] | Copy a variable | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L189-L192 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel.load_data | def load_data(self, filename, ext):
"""Load data from filename"""
from spyder_kernels.utils.iofuncs import iofunctions
from spyder_kernels.utils.misc import fix_reference_name
glbs = self._mglobals()
load_func = iofunctions.load_funcs[ext]
data, error_message = load_func(filename)
if error_message:
return error_message
for key in list(data.keys()):
new_key = fix_reference_name(key, blacklist=list(glbs.keys()))
if new_key != key:
data[new_key] = data.pop(key)
try:
glbs.update(data)
except Exception as error:
return str(error)
return None | python | def load_data(self, filename, ext):
"""Load data from filename"""
from spyder_kernels.utils.iofuncs import iofunctions
from spyder_kernels.utils.misc import fix_reference_name
glbs = self._mglobals()
load_func = iofunctions.load_funcs[ext]
data, error_message = load_func(filename)
if error_message:
return error_message
for key in list(data.keys()):
new_key = fix_reference_name(key, blacklist=list(glbs.keys()))
if new_key != key:
data[new_key] = data.pop(key)
try:
glbs.update(data)
except Exception as error:
return str(error)
return None | Load data from filename | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L194-L217 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel.save_namespace | def save_namespace(self, filename):
"""Save namespace into filename"""
from spyder_kernels.utils.nsview import get_remote_data
from spyder_kernels.utils.iofuncs import iofunctions
ns = self._get_current_namespace()
settings = self.namespace_view_settings
data = get_remote_data(ns, settings, mode='picklable',
more_excluded_names=EXCLUDED_NAMES).copy()
return iofunctions.save(data, filename) | python | def save_namespace(self, filename):
"""Save namespace into filename"""
from spyder_kernels.utils.nsview import get_remote_data
from spyder_kernels.utils.iofuncs import iofunctions
ns = self._get_current_namespace()
settings = self.namespace_view_settings
data = get_remote_data(ns, settings, mode='picklable',
more_excluded_names=EXCLUDED_NAMES).copy()
return iofunctions.save(data, filename) | Save namespace into filename | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L219-L228 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel.publish_pdb_state | def publish_pdb_state(self):
"""
Publish Variable Explorer state and Pdb step through
send_spyder_msg.
"""
if self._pdb_obj and self._do_publish_pdb_state:
state = dict(namespace_view = self.get_namespace_view(),
var_properties = self.get_var_properties(),
step = self._pdb_step)
self.send_spyder_msg('pdb_state', content={'pdb_state': state})
self._do_publish_pdb_state = True | python | def publish_pdb_state(self):
"""
Publish Variable Explorer state and Pdb step through
send_spyder_msg.
"""
if self._pdb_obj and self._do_publish_pdb_state:
state = dict(namespace_view = self.get_namespace_view(),
var_properties = self.get_var_properties(),
step = self._pdb_step)
self.send_spyder_msg('pdb_state', content={'pdb_state': state})
self._do_publish_pdb_state = True | Publish Variable Explorer state and Pdb step through
send_spyder_msg. | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L231-L241 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel.is_defined | def is_defined(self, obj, force_import=False):
"""Return True if object is defined in current namespace"""
from spyder_kernels.utils.dochelpers import isdefined
ns = self._get_current_namespace(with_magics=True)
return isdefined(obj, force_import=force_import, namespace=ns) | python | def is_defined(self, obj, force_import=False):
"""Return True if object is defined in current namespace"""
from spyder_kernels.utils.dochelpers import isdefined
ns = self._get_current_namespace(with_magics=True)
return isdefined(obj, force_import=force_import, namespace=ns) | Return True if object is defined in current namespace | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L254-L259 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel.get_doc | def get_doc(self, objtxt):
"""Get object documentation dictionary"""
try:
import matplotlib
matplotlib.rcParams['docstring.hardcopy'] = True
except:
pass
from spyder_kernels.utils.dochelpers import getdoc
obj, valid = self._eval(objtxt)
if valid:
return getdoc(obj) | python | def get_doc(self, objtxt):
"""Get object documentation dictionary"""
try:
import matplotlib
matplotlib.rcParams['docstring.hardcopy'] = True
except:
pass
from spyder_kernels.utils.dochelpers import getdoc
obj, valid = self._eval(objtxt)
if valid:
return getdoc(obj) | Get object documentation dictionary | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L261-L272 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel.get_source | def get_source(self, objtxt):
"""Get object source"""
from spyder_kernels.utils.dochelpers import getsource
obj, valid = self._eval(objtxt)
if valid:
return getsource(obj) | python | def get_source(self, objtxt):
"""Get object source"""
from spyder_kernels.utils.dochelpers import getsource
obj, valid = self._eval(objtxt)
if valid:
return getsource(obj) | Get object source | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L274-L280 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel._get_current_namespace | def _get_current_namespace(self, with_magics=False):
"""
Return current namespace
This is globals() if not debugging, or a dictionary containing
both locals() and globals() for current frame when debugging
"""
ns = {}
glbs = self._mglobals()
if self._pdb_frame is None:
ns.update(glbs)
else:
ns.update(glbs)
ns.update(self._pdb_locals)
# Add magics to ns so we can show help about them on the Help
# plugin
if with_magics:
line_magics = self.shell.magics_manager.magics['line']
cell_magics = self.shell.magics_manager.magics['cell']
ns.update(line_magics)
ns.update(cell_magics)
return ns | python | def _get_current_namespace(self, with_magics=False):
"""
Return current namespace
This is globals() if not debugging, or a dictionary containing
both locals() and globals() for current frame when debugging
"""
ns = {}
glbs = self._mglobals()
if self._pdb_frame is None:
ns.update(glbs)
else:
ns.update(glbs)
ns.update(self._pdb_locals)
# Add magics to ns so we can show help about them on the Help
# plugin
if with_magics:
line_magics = self.shell.magics_manager.magics['line']
cell_magics = self.shell.magics_manager.magics['cell']
ns.update(line_magics)
ns.update(cell_magics)
return ns | Return current namespace
This is globals() if not debugging, or a dictionary containing
both locals() and globals() for current frame when debugging | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L310-L334 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel._get_reference_namespace | def _get_reference_namespace(self, name):
"""
Return namespace where reference name is defined
It returns the globals() if reference has not yet been defined
"""
glbs = self._mglobals()
if self._pdb_frame is None:
return glbs
else:
lcls = self._pdb_locals
if name in lcls:
return lcls
else:
return glbs | python | def _get_reference_namespace(self, name):
"""
Return namespace where reference name is defined
It returns the globals() if reference has not yet been defined
"""
glbs = self._mglobals()
if self._pdb_frame is None:
return glbs
else:
lcls = self._pdb_locals
if name in lcls:
return lcls
else:
return glbs | Return namespace where reference name is defined
It returns the globals() if reference has not yet been defined | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L336-L350 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel._mglobals | def _mglobals(self):
"""Return current globals -- handles Pdb frames"""
if self._pdb_frame is not None:
return self._pdb_frame.f_globals
else:
return self.shell.user_ns | python | def _mglobals(self):
"""Return current globals -- handles Pdb frames"""
if self._pdb_frame is not None:
return self._pdb_frame.f_globals
else:
return self.shell.user_ns | Return current globals -- handles Pdb frames | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L352-L357 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel._is_image | def _is_image(self, var):
"""Return True if variable is a PIL.Image image"""
try:
from PIL import Image
return isinstance(var, Image.Image)
except:
return False | python | def _is_image(self, var):
"""Return True if variable is a PIL.Image image"""
try:
from PIL import Image
return isinstance(var, Image.Image)
except:
return False | Return True if variable is a PIL.Image image | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L374-L380 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel._set_spyder_breakpoints | def _set_spyder_breakpoints(self, breakpoints):
"""Set all Spyder breakpoints in an active pdb session"""
if not self._pdb_obj:
return
# Breakpoints come serialized from Spyder. We send them
# in a list of one element to be able to send them at all
# in Python 2
serialized_breakpoints = breakpoints[0]
breakpoints = pickle.loads(serialized_breakpoints)
self._pdb_obj.set_spyder_breakpoints(breakpoints) | python | def _set_spyder_breakpoints(self, breakpoints):
"""Set all Spyder breakpoints in an active pdb session"""
if not self._pdb_obj:
return
# Breakpoints come serialized from Spyder. We send them
# in a list of one element to be able to send them at all
# in Python 2
serialized_breakpoints = breakpoints[0]
breakpoints = pickle.loads(serialized_breakpoints)
self._pdb_obj.set_spyder_breakpoints(breakpoints) | Set all Spyder breakpoints in an active pdb session | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L423-L434 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel._eval | def _eval(self, text):
"""
Evaluate text and return (obj, valid)
where *obj* is the object represented by *text*
and *valid* is True if object evaluation did not raise any exception
"""
from spyder_kernels.py3compat import is_text_string
assert is_text_string(text)
ns = self._get_current_namespace(with_magics=True)
try:
return eval(text, ns), True
except:
return None, False | python | def _eval(self, text):
"""
Evaluate text and return (obj, valid)
where *obj* is the object represented by *text*
and *valid* is True if object evaluation did not raise any exception
"""
from spyder_kernels.py3compat import is_text_string
assert is_text_string(text)
ns = self._get_current_namespace(with_magics=True)
try:
return eval(text, ns), True
except:
return None, False | Evaluate text and return (obj, valid)
where *obj* is the object represented by *text*
and *valid* is True if object evaluation did not raise any exception | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L441-L454 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel._set_mpl_backend | def _set_mpl_backend(self, backend, pylab=False):
"""
Set a backend for Matplotlib.
backend: A parameter that can be passed to %matplotlib
(e.g. 'inline' or 'tk').
"""
import traceback
from IPython.core.getipython import get_ipython
generic_error = (
"\n" + "="*73 + "\n"
"NOTE: The following error appeared when setting "
"your Matplotlib backend!!\n" + "="*73 + "\n\n"
"{0}"
)
magic = 'pylab' if pylab else 'matplotlib'
error = None
try:
get_ipython().run_line_magic(magic, backend)
except RuntimeError as err:
# This catches errors generated by ipykernel when
# trying to set a backend. See issue 5541
if "GUI eventloops" in str(err):
import matplotlib
previous_backend = matplotlib.get_backend()
if not backend in previous_backend.lower():
# Only inform about an error if the user selected backend
# and the one set by Matplotlib are different. Else this
# message is very confusing.
error = (
"\n"
"NOTE: Spyder *can't* set your selected Matplotlib "
"backend because there is a previous backend already "
"in use.\n\n"
"Your backend will be {0}".format(previous_backend)
)
del matplotlib
# This covers other RuntimeError's
else:
error = generic_error.format(traceback.format_exc())
except Exception:
error = generic_error.format(traceback.format_exc())
self._mpl_backend_error = error | python | def _set_mpl_backend(self, backend, pylab=False):
"""
Set a backend for Matplotlib.
backend: A parameter that can be passed to %matplotlib
(e.g. 'inline' or 'tk').
"""
import traceback
from IPython.core.getipython import get_ipython
generic_error = (
"\n" + "="*73 + "\n"
"NOTE: The following error appeared when setting "
"your Matplotlib backend!!\n" + "="*73 + "\n\n"
"{0}"
)
magic = 'pylab' if pylab else 'matplotlib'
error = None
try:
get_ipython().run_line_magic(magic, backend)
except RuntimeError as err:
# This catches errors generated by ipykernel when
# trying to set a backend. See issue 5541
if "GUI eventloops" in str(err):
import matplotlib
previous_backend = matplotlib.get_backend()
if not backend in previous_backend.lower():
# Only inform about an error if the user selected backend
# and the one set by Matplotlib are different. Else this
# message is very confusing.
error = (
"\n"
"NOTE: Spyder *can't* set your selected Matplotlib "
"backend because there is a previous backend already "
"in use.\n\n"
"Your backend will be {0}".format(previous_backend)
)
del matplotlib
# This covers other RuntimeError's
else:
error = generic_error.format(traceback.format_exc())
except Exception:
error = generic_error.format(traceback.format_exc())
self._mpl_backend_error = error | Set a backend for Matplotlib.
backend: A parameter that can be passed to %matplotlib
(e.g. 'inline' or 'tk'). | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L457-L503 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel._load_autoreload_magic | def _load_autoreload_magic(self):
"""Load %autoreload magic."""
from IPython.core.getipython import get_ipython
try:
get_ipython().run_line_magic('reload_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
except Exception:
pass | python | def _load_autoreload_magic(self):
"""Load %autoreload magic."""
from IPython.core.getipython import get_ipython
try:
get_ipython().run_line_magic('reload_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
except Exception:
pass | Load %autoreload magic. | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L511-L518 |
spyder-ide/spyder-kernels | spyder_kernels/console/kernel.py | SpyderKernel._load_wurlitzer | def _load_wurlitzer(self):
"""Load wurlitzer extension."""
# Wurlitzer has no effect on Windows
if not os.name == 'nt':
from IPython.core.getipython import get_ipython
# Enclose this in a try/except because if it fails the
# console will be totally unusable.
# Fixes spyder-ide/spyder#8668
try:
get_ipython().run_line_magic('reload_ext', 'wurlitzer')
except Exception:
pass | python | def _load_wurlitzer(self):
"""Load wurlitzer extension."""
# Wurlitzer has no effect on Windows
if not os.name == 'nt':
from IPython.core.getipython import get_ipython
# Enclose this in a try/except because if it fails the
# console will be totally unusable.
# Fixes spyder-ide/spyder#8668
try:
get_ipython().run_line_magic('reload_ext', 'wurlitzer')
except Exception:
pass | Load wurlitzer extension. | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/kernel.py#L520-L531 |
spyder-ide/spyder-kernels | spyder_kernels/console/start.py | import_spydercustomize | def import_spydercustomize():
"""Import our customizations into the kernel."""
here = osp.dirname(__file__)
parent = osp.dirname(here)
customize_dir = osp.join(parent, 'customize')
# Remove current directory from sys.path to prevent kernel
# crashes when people name Python files or modules with
# the same name as standard library modules.
# See spyder-ide/spyder#8007
while '' in sys.path:
sys.path.remove('')
# Import our customizations
site.addsitedir(customize_dir)
import spydercustomize
# Remove our customize path from sys.path
try:
sys.path.remove(customize_dir)
except ValueError:
pass | python | def import_spydercustomize():
"""Import our customizations into the kernel."""
here = osp.dirname(__file__)
parent = osp.dirname(here)
customize_dir = osp.join(parent, 'customize')
# Remove current directory from sys.path to prevent kernel
# crashes when people name Python files or modules with
# the same name as standard library modules.
# See spyder-ide/spyder#8007
while '' in sys.path:
sys.path.remove('')
# Import our customizations
site.addsitedir(customize_dir)
import spydercustomize
# Remove our customize path from sys.path
try:
sys.path.remove(customize_dir)
except ValueError:
pass | Import our customizations into the kernel. | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/start.py#L24-L45 |
spyder-ide/spyder-kernels | spyder_kernels/console/start.py | kernel_config | def kernel_config():
"""Create a config object with IPython kernel options."""
import ipykernel
from IPython.core.application import get_ipython_dir
from traitlets.config.loader import Config, load_pyconfig_files
# ---- IPython config ----
try:
profile_path = osp.join(get_ipython_dir(), 'profile_default')
cfg = load_pyconfig_files(['ipython_config.py',
'ipython_kernel_config.py'],
profile_path)
except:
cfg = Config()
# ---- Spyder config ----
spy_cfg = Config()
# Enable/disable certain features for testing
testing = os.environ.get('SPY_TESTING') == 'True'
if testing:
# Don't load nor save history in our IPython consoles.
spy_cfg.HistoryAccessor.enabled = False
# Until we implement Issue 1052
spy_cfg.InteractiveShell.xmode = 'Plain'
# Jedi completer. It's only available in Python 3
jedi_o = os.environ.get('SPY_JEDI_O') == 'True'
if not PY2:
spy_cfg.IPCompleter.use_jedi = jedi_o
# Run lines of code at startup
run_lines_o = os.environ.get('SPY_RUN_LINES_O')
if run_lines_o is not None:
spy_cfg.IPKernelApp.exec_lines = [x.strip() for x in run_lines_o.split(';')]
else:
spy_cfg.IPKernelApp.exec_lines = []
# Clean terminal arguments input
clear_argv = "import sys;sys.argv = [''];del sys"
spy_cfg.IPKernelApp.exec_lines.append(clear_argv)
# Load %autoreload magic
spy_cfg.IPKernelApp.exec_lines.append(
"get_ipython().kernel._load_autoreload_magic()")
# Load wurlitzer extension
spy_cfg.IPKernelApp.exec_lines.append(
"get_ipython().kernel._load_wurlitzer()")
# Default inline backend configuration
# This is useful to have when people doesn't
# use our config system to configure the
# inline backend but want to use
# '%matplotlib inline' at runtime
if LooseVersion(ipykernel.__version__) < LooseVersion('4.5'):
dpi_option = 'savefig.dpi'
else:
dpi_option = 'figure.dpi'
spy_cfg.InlineBackend.rc = {'figure.figsize': (6.0, 4.0),
dpi_option: 72,
'font.size': 10,
'figure.subplot.bottom': .125,
'figure.facecolor': 'white',
'figure.edgecolor': 'white'}
# Pylab configuration
mpl_backend = None
if is_module_installed('matplotlib'):
# Set Matplotlib backend with Spyder options
pylab_o = os.environ.get('SPY_PYLAB_O')
backend_o = os.environ.get('SPY_BACKEND_O')
if pylab_o == 'True' and backend_o is not None:
# Select the automatic backend
if backend_o == '1':
if is_module_installed('PyQt5'):
auto_backend = 'qt5'
elif is_module_installed('PyQt4'):
auto_backend = 'qt4'
elif is_module_installed('_tkinter'):
auto_backend = 'tk'
else:
auto_backend = 'inline'
else:
auto_backend = ''
# Mapping of Spyder options to backends
backends = {'0': 'inline',
'1': auto_backend,
'2': 'qt5',
'3': 'qt4',
'4': 'osx',
'5': 'gtk3',
'6': 'gtk',
'7': 'wx',
'8': 'tk'}
# Select backend
mpl_backend = backends[backend_o]
# Inline backend configuration
if mpl_backend == 'inline':
# Figure format
format_o = os.environ.get('SPY_FORMAT_O')
formats = {'0': 'png',
'1': 'svg'}
if format_o is not None:
spy_cfg.InlineBackend.figure_format = formats[format_o]
# Resolution
resolution_o = os.environ.get('SPY_RESOLUTION_O')
if resolution_o is not None:
spy_cfg.InlineBackend.rc[dpi_option] = float(resolution_o)
# Figure size
width_o = float(os.environ.get('SPY_WIDTH_O'))
height_o = float(os.environ.get('SPY_HEIGHT_O'))
if width_o is not None and height_o is not None:
spy_cfg.InlineBackend.rc['figure.figsize'] = (width_o,
height_o)
# Print figure kwargs
bbox_inches_o = os.environ.get('SPY_BBOX_INCHES_O')
bbox_inches = 'tight' if bbox_inches_o == 'True' else None
spy_cfg.InlineBackend.print_figure_kwargs.update(
{'bbox_inches': bbox_inches})
else:
# Set Matplotlib backend to inline for external kernels.
# Fixes issue 108
mpl_backend = 'inline'
# Automatically load Pylab and Numpy, or only set Matplotlib
# backend
autoload_pylab_o = os.environ.get('SPY_AUTOLOAD_PYLAB_O') == 'True'
command = "get_ipython().kernel._set_mpl_backend('{0}', {1})"
spy_cfg.IPKernelApp.exec_lines.append(
command.format(mpl_backend, autoload_pylab_o))
# Enable Cython magic
run_cython = os.environ.get('SPY_RUN_CYTHON') == 'True'
if run_cython and is_module_installed('Cython'):
spy_cfg.IPKernelApp.exec_lines.append('%reload_ext Cython')
# Run a file at startup
use_file_o = os.environ.get('SPY_USE_FILE_O')
run_file_o = os.environ.get('SPY_RUN_FILE_O')
if use_file_o == 'True' and run_file_o is not None:
spy_cfg.IPKernelApp.file_to_run = run_file_o
# Autocall
autocall_o = os.environ.get('SPY_AUTOCALL_O')
if autocall_o is not None:
spy_cfg.ZMQInteractiveShell.autocall = int(autocall_o)
# To handle the banner by ourselves in IPython 3+
spy_cfg.ZMQInteractiveShell.banner1 = ''
# Greedy completer
greedy_o = os.environ.get('SPY_GREEDY_O') == 'True'
spy_cfg.IPCompleter.greedy = greedy_o
# Sympy loading
sympy_o = os.environ.get('SPY_SYMPY_O') == 'True'
if sympy_o and is_module_installed('sympy'):
lines = sympy_config(mpl_backend)
spy_cfg.IPKernelApp.exec_lines.append(lines)
# Merge IPython and Spyder configs. Spyder prefs will have prevalence
# over IPython ones
cfg._merge(spy_cfg)
return cfg | python | def kernel_config():
"""Create a config object with IPython kernel options."""
import ipykernel
from IPython.core.application import get_ipython_dir
from traitlets.config.loader import Config, load_pyconfig_files
# ---- IPython config ----
try:
profile_path = osp.join(get_ipython_dir(), 'profile_default')
cfg = load_pyconfig_files(['ipython_config.py',
'ipython_kernel_config.py'],
profile_path)
except:
cfg = Config()
# ---- Spyder config ----
spy_cfg = Config()
# Enable/disable certain features for testing
testing = os.environ.get('SPY_TESTING') == 'True'
if testing:
# Don't load nor save history in our IPython consoles.
spy_cfg.HistoryAccessor.enabled = False
# Until we implement Issue 1052
spy_cfg.InteractiveShell.xmode = 'Plain'
# Jedi completer. It's only available in Python 3
jedi_o = os.environ.get('SPY_JEDI_O') == 'True'
if not PY2:
spy_cfg.IPCompleter.use_jedi = jedi_o
# Run lines of code at startup
run_lines_o = os.environ.get('SPY_RUN_LINES_O')
if run_lines_o is not None:
spy_cfg.IPKernelApp.exec_lines = [x.strip() for x in run_lines_o.split(';')]
else:
spy_cfg.IPKernelApp.exec_lines = []
# Clean terminal arguments input
clear_argv = "import sys;sys.argv = [''];del sys"
spy_cfg.IPKernelApp.exec_lines.append(clear_argv)
# Load %autoreload magic
spy_cfg.IPKernelApp.exec_lines.append(
"get_ipython().kernel._load_autoreload_magic()")
# Load wurlitzer extension
spy_cfg.IPKernelApp.exec_lines.append(
"get_ipython().kernel._load_wurlitzer()")
# Default inline backend configuration
# This is useful to have when people doesn't
# use our config system to configure the
# inline backend but want to use
# '%matplotlib inline' at runtime
if LooseVersion(ipykernel.__version__) < LooseVersion('4.5'):
dpi_option = 'savefig.dpi'
else:
dpi_option = 'figure.dpi'
spy_cfg.InlineBackend.rc = {'figure.figsize': (6.0, 4.0),
dpi_option: 72,
'font.size': 10,
'figure.subplot.bottom': .125,
'figure.facecolor': 'white',
'figure.edgecolor': 'white'}
# Pylab configuration
mpl_backend = None
if is_module_installed('matplotlib'):
# Set Matplotlib backend with Spyder options
pylab_o = os.environ.get('SPY_PYLAB_O')
backend_o = os.environ.get('SPY_BACKEND_O')
if pylab_o == 'True' and backend_o is not None:
# Select the automatic backend
if backend_o == '1':
if is_module_installed('PyQt5'):
auto_backend = 'qt5'
elif is_module_installed('PyQt4'):
auto_backend = 'qt4'
elif is_module_installed('_tkinter'):
auto_backend = 'tk'
else:
auto_backend = 'inline'
else:
auto_backend = ''
# Mapping of Spyder options to backends
backends = {'0': 'inline',
'1': auto_backend,
'2': 'qt5',
'3': 'qt4',
'4': 'osx',
'5': 'gtk3',
'6': 'gtk',
'7': 'wx',
'8': 'tk'}
# Select backend
mpl_backend = backends[backend_o]
# Inline backend configuration
if mpl_backend == 'inline':
# Figure format
format_o = os.environ.get('SPY_FORMAT_O')
formats = {'0': 'png',
'1': 'svg'}
if format_o is not None:
spy_cfg.InlineBackend.figure_format = formats[format_o]
# Resolution
resolution_o = os.environ.get('SPY_RESOLUTION_O')
if resolution_o is not None:
spy_cfg.InlineBackend.rc[dpi_option] = float(resolution_o)
# Figure size
width_o = float(os.environ.get('SPY_WIDTH_O'))
height_o = float(os.environ.get('SPY_HEIGHT_O'))
if width_o is not None and height_o is not None:
spy_cfg.InlineBackend.rc['figure.figsize'] = (width_o,
height_o)
# Print figure kwargs
bbox_inches_o = os.environ.get('SPY_BBOX_INCHES_O')
bbox_inches = 'tight' if bbox_inches_o == 'True' else None
spy_cfg.InlineBackend.print_figure_kwargs.update(
{'bbox_inches': bbox_inches})
else:
# Set Matplotlib backend to inline for external kernels.
# Fixes issue 108
mpl_backend = 'inline'
# Automatically load Pylab and Numpy, or only set Matplotlib
# backend
autoload_pylab_o = os.environ.get('SPY_AUTOLOAD_PYLAB_O') == 'True'
command = "get_ipython().kernel._set_mpl_backend('{0}', {1})"
spy_cfg.IPKernelApp.exec_lines.append(
command.format(mpl_backend, autoload_pylab_o))
# Enable Cython magic
run_cython = os.environ.get('SPY_RUN_CYTHON') == 'True'
if run_cython and is_module_installed('Cython'):
spy_cfg.IPKernelApp.exec_lines.append('%reload_ext Cython')
# Run a file at startup
use_file_o = os.environ.get('SPY_USE_FILE_O')
run_file_o = os.environ.get('SPY_RUN_FILE_O')
if use_file_o == 'True' and run_file_o is not None:
spy_cfg.IPKernelApp.file_to_run = run_file_o
# Autocall
autocall_o = os.environ.get('SPY_AUTOCALL_O')
if autocall_o is not None:
spy_cfg.ZMQInteractiveShell.autocall = int(autocall_o)
# To handle the banner by ourselves in IPython 3+
spy_cfg.ZMQInteractiveShell.banner1 = ''
# Greedy completer
greedy_o = os.environ.get('SPY_GREEDY_O') == 'True'
spy_cfg.IPCompleter.greedy = greedy_o
# Sympy loading
sympy_o = os.environ.get('SPY_SYMPY_O') == 'True'
if sympy_o and is_module_installed('sympy'):
lines = sympy_config(mpl_backend)
spy_cfg.IPKernelApp.exec_lines.append(lines)
# Merge IPython and Spyder configs. Spyder prefs will have prevalence
# over IPython ones
cfg._merge(spy_cfg)
return cfg | Create a config object with IPython kernel options. | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/start.py#L78-L250 |
spyder-ide/spyder-kernels | spyder_kernels/console/start.py | varexp | def varexp(line):
"""
Spyder's variable explorer magic
Used to generate plots, histograms and images of the variables displayed
on it.
"""
ip = get_ipython() #analysis:ignore
funcname, name = line.split()
try:
import guiqwt.pyplot as pyplot
except:
import matplotlib.pyplot as pyplot
__fig__ = pyplot.figure();
__items__ = getattr(pyplot, funcname[2:])(ip.user_ns[name])
pyplot.show()
del __fig__, __items__ | python | def varexp(line):
"""
Spyder's variable explorer magic
Used to generate plots, histograms and images of the variables displayed
on it.
"""
ip = get_ipython() #analysis:ignore
funcname, name = line.split()
try:
import guiqwt.pyplot as pyplot
except:
import matplotlib.pyplot as pyplot
__fig__ = pyplot.figure();
__items__ = getattr(pyplot, funcname[2:])(ip.user_ns[name])
pyplot.show()
del __fig__, __items__ | Spyder's variable explorer magic
Used to generate plots, histograms and images of the variables displayed
on it. | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/console/start.py#L253-L269 |
spyder-ide/spyder-kernels | spyder_kernels/utils/misc.py | fix_reference_name | def fix_reference_name(name, blacklist=None):
"""Return a syntax-valid Python reference name from an arbitrary name"""
name = "".join(re.split(r'[^0-9a-zA-Z_]', name))
while name and not re.match(r'([a-zA-Z]+[0-9a-zA-Z_]*)$', name):
if not re.match(r'[a-zA-Z]', name[0]):
name = name[1:]
continue
name = str(name)
if not name:
name = "data"
if blacklist is not None and name in blacklist:
get_new_name = lambda index: name+('%03d' % index)
index = 0
while get_new_name(index) in blacklist:
index += 1
name = get_new_name(index)
return name | python | def fix_reference_name(name, blacklist=None):
"""Return a syntax-valid Python reference name from an arbitrary name"""
name = "".join(re.split(r'[^0-9a-zA-Z_]', name))
while name and not re.match(r'([a-zA-Z]+[0-9a-zA-Z_]*)$', name):
if not re.match(r'[a-zA-Z]', name[0]):
name = name[1:]
continue
name = str(name)
if not name:
name = "data"
if blacklist is not None and name in blacklist:
get_new_name = lambda index: name+('%03d' % index)
index = 0
while get_new_name(index) in blacklist:
index += 1
name = get_new_name(index)
return name | Return a syntax-valid Python reference name from an arbitrary name | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/misc.py#L13-L29 |
spyder-ide/spyder-kernels | spyder_kernels/customize/spydercustomize.py | user_return | def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
# This is useful when debugging in an active interpreter (otherwise,
# the debugger will stop before reaching the target file)
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno<= 0):
return
self._wait_for_mainpyfile = 0
self._old_Pdb_user_return(frame, return_value) | python | def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
# This is useful when debugging in an active interpreter (otherwise,
# the debugger will stop before reaching the target file)
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno<= 0):
return
self._wait_for_mainpyfile = 0
self._old_Pdb_user_return(frame, return_value) | This function is called when a return trap is set here. | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/customize/spydercustomize.py#L424-L433 |
spyder-ide/spyder-kernels | spyder_kernels/customize/spydercustomize.py | post_mortem_excepthook | def post_mortem_excepthook(type, value, tb):
"""
For post mortem exception handling, print a banner and enable post
mortem debugging.
"""
clear_post_mortem()
ipython_shell = get_ipython()
ipython_shell.showtraceback((type, value, tb))
p = pdb.Pdb(ipython_shell.colors)
if not type == SyntaxError:
# wait for stderr to print (stderr.flush does not work in this case)
time.sleep(0.1)
_print('*' * 40)
_print('Entering post mortem debugging...')
_print('*' * 40)
# add ability to move between frames
p.send_initial_notification = False
p.reset()
frame = tb.tb_frame
prev = frame
while frame.f_back:
prev = frame
frame = frame.f_back
frame = prev
# wait for stdout to print
time.sleep(0.1)
p.interaction(frame, tb) | python | def post_mortem_excepthook(type, value, tb):
"""
For post mortem exception handling, print a banner and enable post
mortem debugging.
"""
clear_post_mortem()
ipython_shell = get_ipython()
ipython_shell.showtraceback((type, value, tb))
p = pdb.Pdb(ipython_shell.colors)
if not type == SyntaxError:
# wait for stderr to print (stderr.flush does not work in this case)
time.sleep(0.1)
_print('*' * 40)
_print('Entering post mortem debugging...')
_print('*' * 40)
# add ability to move between frames
p.send_initial_notification = False
p.reset()
frame = tb.tb_frame
prev = frame
while frame.f_back:
prev = frame
frame = frame.f_back
frame = prev
# wait for stdout to print
time.sleep(0.1)
p.interaction(frame, tb) | For post mortem exception handling, print a banner and enable post
mortem debugging. | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/customize/spydercustomize.py#L731-L758 |
spyder-ide/spyder-kernels | spyder_kernels/customize/spydercustomize.py | set_post_mortem | def set_post_mortem():
"""
Enable the post mortem debugging excepthook.
"""
def ipython_post_mortem_debug(shell, etype, evalue, tb,
tb_offset=None):
post_mortem_excepthook(etype, evalue, tb)
ipython_shell = get_ipython()
ipython_shell.set_custom_exc((Exception,), ipython_post_mortem_debug) | python | def set_post_mortem():
"""
Enable the post mortem debugging excepthook.
"""
def ipython_post_mortem_debug(shell, etype, evalue, tb,
tb_offset=None):
post_mortem_excepthook(etype, evalue, tb)
ipython_shell = get_ipython()
ipython_shell.set_custom_exc((Exception,), ipython_post_mortem_debug) | Enable the post mortem debugging excepthook. | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/customize/spydercustomize.py#L761-L769 |
spyder-ide/spyder-kernels | spyder_kernels/customize/spydercustomize.py | runfile | def runfile(filename, args=None, wdir=None, namespace=None, post_mortem=False):
"""
Run filename
args: command line arguments (string)
wdir: working directory
post_mortem: boolean, whether to enter post-mortem mode on error
"""
try:
filename = filename.decode('utf-8')
except (UnicodeError, TypeError, AttributeError):
# UnicodeError, TypeError --> eventually raised in Python 2
# AttributeError --> systematically raised in Python 3
pass
if __umr__.enabled:
__umr__.run()
if args is not None and not isinstance(args, basestring):
raise TypeError("expected a character buffer object")
if namespace is None:
namespace = _get_globals()
namespace['__file__'] = filename
sys.argv = [filename]
if args is not None:
for arg in shlex.split(args):
sys.argv.append(arg)
if wdir is not None:
try:
wdir = wdir.decode('utf-8')
except (UnicodeError, TypeError, AttributeError):
# UnicodeError, TypeError --> eventually raised in Python 2
# AttributeError --> systematically raised in Python 3
pass
os.chdir(wdir)
if post_mortem:
set_post_mortem()
if __umr__.has_cython:
# Cython files
with io.open(filename, encoding='utf-8') as f:
ipython_shell = get_ipython()
ipython_shell.run_cell_magic('cython', '', f.read())
else:
execfile(filename, namespace)
clear_post_mortem()
sys.argv = ['']
# Avoid error when running `%reset -f` programmatically
# See issue spyder-ide/spyder-kernels#91
try:
namespace.pop('__file__')
except KeyError:
pass | python | def runfile(filename, args=None, wdir=None, namespace=None, post_mortem=False):
"""
Run filename
args: command line arguments (string)
wdir: working directory
post_mortem: boolean, whether to enter post-mortem mode on error
"""
try:
filename = filename.decode('utf-8')
except (UnicodeError, TypeError, AttributeError):
# UnicodeError, TypeError --> eventually raised in Python 2
# AttributeError --> systematically raised in Python 3
pass
if __umr__.enabled:
__umr__.run()
if args is not None and not isinstance(args, basestring):
raise TypeError("expected a character buffer object")
if namespace is None:
namespace = _get_globals()
namespace['__file__'] = filename
sys.argv = [filename]
if args is not None:
for arg in shlex.split(args):
sys.argv.append(arg)
if wdir is not None:
try:
wdir = wdir.decode('utf-8')
except (UnicodeError, TypeError, AttributeError):
# UnicodeError, TypeError --> eventually raised in Python 2
# AttributeError --> systematically raised in Python 3
pass
os.chdir(wdir)
if post_mortem:
set_post_mortem()
if __umr__.has_cython:
# Cython files
with io.open(filename, encoding='utf-8') as f:
ipython_shell = get_ipython()
ipython_shell.run_cell_magic('cython', '', f.read())
else:
execfile(filename, namespace)
clear_post_mortem()
sys.argv = ['']
# Avoid error when running `%reset -f` programmatically
# See issue spyder-ide/spyder-kernels#91
try:
namespace.pop('__file__')
except KeyError:
pass | Run filename
args: command line arguments (string)
wdir: working directory
post_mortem: boolean, whether to enter post-mortem mode on error | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/customize/spydercustomize.py#L786-L836 |
spyder-ide/spyder-kernels | spyder_kernels/customize/spydercustomize.py | runcell | def runcell(cellname, filename):
"""
Run a code cell from an editor as a file.
Currently looks for code in an `ipython` property called `cell_code`.
This property must be set by the editor prior to calling this function.
This function deletes the contents of `cell_code` upon completion.
Parameters
----------
cellname : str
Used as a reference in the history log of which
cell was run with the fuction. This variable is not used.
filename : str
Needed to allow for proper traceback links.
"""
try:
filename = filename.decode('utf-8')
except (UnicodeError, TypeError, AttributeError):
# UnicodeError, TypeError --> eventually raised in Python 2
# AttributeError --> systematically raised in Python 3
pass
ipython_shell = get_ipython()
namespace = _get_globals()
namespace['__file__'] = filename
try:
cell_code = ipython_shell.cell_code
except AttributeError:
_print("--Run Cell Error--\n"
"Please use only through Spyder's Editor; "
"shouldn't be called manually from the console")
return
# Trigger `post_execute` to exit the additional pre-execution.
# See Spyder PR #7310.
ipython_shell.events.trigger('post_execute')
ipython_shell.run_cell(cell_code)
namespace.pop('__file__')
del ipython_shell.cell_code | python | def runcell(cellname, filename):
"""
Run a code cell from an editor as a file.
Currently looks for code in an `ipython` property called `cell_code`.
This property must be set by the editor prior to calling this function.
This function deletes the contents of `cell_code` upon completion.
Parameters
----------
cellname : str
Used as a reference in the history log of which
cell was run with the fuction. This variable is not used.
filename : str
Needed to allow for proper traceback links.
"""
try:
filename = filename.decode('utf-8')
except (UnicodeError, TypeError, AttributeError):
# UnicodeError, TypeError --> eventually raised in Python 2
# AttributeError --> systematically raised in Python 3
pass
ipython_shell = get_ipython()
namespace = _get_globals()
namespace['__file__'] = filename
try:
cell_code = ipython_shell.cell_code
except AttributeError:
_print("--Run Cell Error--\n"
"Please use only through Spyder's Editor; "
"shouldn't be called manually from the console")
return
# Trigger `post_execute` to exit the additional pre-execution.
# See Spyder PR #7310.
ipython_shell.events.trigger('post_execute')
ipython_shell.run_cell(cell_code)
namespace.pop('__file__')
del ipython_shell.cell_code | Run a code cell from an editor as a file.
Currently looks for code in an `ipython` property called `cell_code`.
This property must be set by the editor prior to calling this function.
This function deletes the contents of `cell_code` upon completion.
Parameters
----------
cellname : str
Used as a reference in the history log of which
cell was run with the fuction. This variable is not used.
filename : str
Needed to allow for proper traceback links. | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/customize/spydercustomize.py#L842-L881 |
spyder-ide/spyder-kernels | spyder_kernels/customize/spydercustomize.py | debugfile | def debugfile(filename, args=None, wdir=None, post_mortem=False):
"""
Debug filename
args: command line arguments (string)
wdir: working directory
post_mortem: boolean, included for compatiblity with runfile
"""
debugger = pdb.Pdb()
filename = debugger.canonic(filename)
debugger._wait_for_mainpyfile = 1
debugger.mainpyfile = filename
debugger._user_requested_quit = 0
if os.name == 'nt':
filename = filename.replace('\\', '/')
debugger.run("runfile(%r, args=%r, wdir=%r)" % (filename, args, wdir)) | python | def debugfile(filename, args=None, wdir=None, post_mortem=False):
"""
Debug filename
args: command line arguments (string)
wdir: working directory
post_mortem: boolean, included for compatiblity with runfile
"""
debugger = pdb.Pdb()
filename = debugger.canonic(filename)
debugger._wait_for_mainpyfile = 1
debugger.mainpyfile = filename
debugger._user_requested_quit = 0
if os.name == 'nt':
filename = filename.replace('\\', '/')
debugger.run("runfile(%r, args=%r, wdir=%r)" % (filename, args, wdir)) | Debug filename
args: command line arguments (string)
wdir: working directory
post_mortem: boolean, included for compatiblity with runfile | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/customize/spydercustomize.py#L887-L901 |
spyder-ide/spyder-kernels | spyder_kernels/customize/spydercustomize.py | UserModuleReloader.create_pathlist | def create_pathlist(self, initial_pathlist):
"""
Add to pathlist Python library paths to be skipped from module
reloading.
"""
# Get standard installation paths
try:
paths = sysconfig.get_paths()
standard_paths = [paths['stdlib'],
paths['purelib'],
paths['scripts'],
paths['data']]
except Exception:
standard_paths = []
# Get user installation path
# See Spyder issue 8776
try:
import site
if getattr(site, 'getusersitepackages', False):
# Virtualenvs don't have this function but
# conda envs do
user_path = [site.getusersitepackages()]
elif getattr(site, 'USER_SITE', False):
# However, it seems virtualenvs have this
# constant
user_path = [site.USER_SITE]
else:
user_path = []
except Exception:
user_path = []
return initial_pathlist + standard_paths + user_path | python | def create_pathlist(self, initial_pathlist):
"""
Add to pathlist Python library paths to be skipped from module
reloading.
"""
# Get standard installation paths
try:
paths = sysconfig.get_paths()
standard_paths = [paths['stdlib'],
paths['purelib'],
paths['scripts'],
paths['data']]
except Exception:
standard_paths = []
# Get user installation path
# See Spyder issue 8776
try:
import site
if getattr(site, 'getusersitepackages', False):
# Virtualenvs don't have this function but
# conda envs do
user_path = [site.getusersitepackages()]
elif getattr(site, 'USER_SITE', False):
# However, it seems virtualenvs have this
# constant
user_path = [site.USER_SITE]
else:
user_path = []
except Exception:
user_path = []
return initial_pathlist + standard_paths + user_path | Add to pathlist Python library paths to be skipped from module
reloading. | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/customize/spydercustomize.py#L573-L605 |
spyder-ide/spyder-kernels | spyder_kernels/customize/spydercustomize.py | UserModuleReloader.is_module_reloadable | def is_module_reloadable(self, module, modname):
"""Decide if a module is reloadable or not."""
if self.has_cython:
# Don't return cached inline compiled .PYX files
return False
else:
if (self.is_module_in_pathlist(module) or
self.is_module_in_namelist(modname)):
return False
else:
return True | python | def is_module_reloadable(self, module, modname):
"""Decide if a module is reloadable or not."""
if self.has_cython:
# Don't return cached inline compiled .PYX files
return False
else:
if (self.is_module_in_pathlist(module) or
self.is_module_in_namelist(modname)):
return False
else:
return True | Decide if a module is reloadable or not. | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/customize/spydercustomize.py#L607-L617 |
spyder-ide/spyder-kernels | spyder_kernels/customize/spydercustomize.py | UserModuleReloader.is_module_in_pathlist | def is_module_in_pathlist(self, module):
"""Decide if a module can be reloaded or not according to its path."""
modpath = getattr(module, '__file__', None)
# Skip module according to different criteria
if modpath is None:
# *module* is a C module that is statically linked into the
# interpreter. There is no way to know its path, so we
# choose to ignore it.
return True
elif any([p in modpath for p in self.pathlist]):
# We don't want to reload modules that belong to the
# standard library or installed to site-packages,
# just modules created by the user.
return True
elif not os.name == 'nt':
# Module paths containing the strings below can be ihherited
# from the default Linux installation, Homebrew or the user
# site-packages in a virtualenv.
patterns = [r'^/usr/lib.*',
r'^/usr/local/lib.*',
r'^/usr/.*/dist-packages/.*',
r'^/home/.*/.local/lib.*',
r'^/Library/.*',
r'^/Users/.*/Library/.*',
r'^/Users/.*/.local/.*',
]
if [p for p in patterns if re.search(p, modpath)]:
return True
else:
return False
else:
return False | python | def is_module_in_pathlist(self, module):
"""Decide if a module can be reloaded or not according to its path."""
modpath = getattr(module, '__file__', None)
# Skip module according to different criteria
if modpath is None:
# *module* is a C module that is statically linked into the
# interpreter. There is no way to know its path, so we
# choose to ignore it.
return True
elif any([p in modpath for p in self.pathlist]):
# We don't want to reload modules that belong to the
# standard library or installed to site-packages,
# just modules created by the user.
return True
elif not os.name == 'nt':
# Module paths containing the strings below can be ihherited
# from the default Linux installation, Homebrew or the user
# site-packages in a virtualenv.
patterns = [r'^/usr/lib.*',
r'^/usr/local/lib.*',
r'^/usr/.*/dist-packages/.*',
r'^/home/.*/.local/lib.*',
r'^/Library/.*',
r'^/Users/.*/Library/.*',
r'^/Users/.*/.local/.*',
]
if [p for p in patterns if re.search(p, modpath)]:
return True
else:
return False
else:
return False | Decide if a module can be reloaded or not according to its path. | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/customize/spydercustomize.py#L623-L656 |
spyder-ide/spyder-kernels | spyder_kernels/customize/spydercustomize.py | UserModuleReloader.activate_cython | def activate_cython(self):
"""
Activate Cython support.
We need to run this here because if the support is
active, we don't to run the UMR at all.
"""
run_cython = os.environ.get("SPY_RUN_CYTHON") == "True"
if run_cython:
try:
__import__('Cython')
self.has_cython = True
except Exception:
pass
if self.has_cython:
# Import pyximport to enable Cython files support for
# import statement
import pyximport
pyx_setup_args = {}
# Add Numpy include dir to pyximport/distutils
try:
import numpy
pyx_setup_args['include_dirs'] = numpy.get_include()
except Exception:
pass
# Setup pyximport and enable Cython files reload
pyximport.install(setup_args=pyx_setup_args,
reload_support=True) | python | def activate_cython(self):
"""
Activate Cython support.
We need to run this here because if the support is
active, we don't to run the UMR at all.
"""
run_cython = os.environ.get("SPY_RUN_CYTHON") == "True"
if run_cython:
try:
__import__('Cython')
self.has_cython = True
except Exception:
pass
if self.has_cython:
# Import pyximport to enable Cython files support for
# import statement
import pyximport
pyx_setup_args = {}
# Add Numpy include dir to pyximport/distutils
try:
import numpy
pyx_setup_args['include_dirs'] = numpy.get_include()
except Exception:
pass
# Setup pyximport and enable Cython files reload
pyximport.install(setup_args=pyx_setup_args,
reload_support=True) | Activate Cython support.
We need to run this here because if the support is
active, we don't to run the UMR at all. | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/customize/spydercustomize.py#L658-L689 |
spyder-ide/spyder-kernels | spyder_kernels/customize/spydercustomize.py | UserModuleReloader.run | def run(self):
"""
Delete user modules to force Python to deeply reload them
Do not del modules which are considered as system modules, i.e.
modules installed in subdirectories of Python interpreter's binary
Do not del C modules
"""
self.modnames_to_reload = []
for modname, module in list(sys.modules.items()):
if modname not in self.previous_modules:
# Decide if a module can be reloaded or not
if self.is_module_reloadable(module, modname):
self.modnames_to_reload.append(modname)
del sys.modules[modname]
else:
continue
# Report reloaded modules
if self.verbose and self.modnames_to_reload:
modnames = self.modnames_to_reload
_print("\x1b[4;33m%s\x1b[24m%s\x1b[0m"\
% ("Reloaded modules", ": "+", ".join(modnames))) | python | def run(self):
"""
Delete user modules to force Python to deeply reload them
Do not del modules which are considered as system modules, i.e.
modules installed in subdirectories of Python interpreter's binary
Do not del C modules
"""
self.modnames_to_reload = []
for modname, module in list(sys.modules.items()):
if modname not in self.previous_modules:
# Decide if a module can be reloaded or not
if self.is_module_reloadable(module, modname):
self.modnames_to_reload.append(modname)
del sys.modules[modname]
else:
continue
# Report reloaded modules
if self.verbose and self.modnames_to_reload:
modnames = self.modnames_to_reload
_print("\x1b[4;33m%s\x1b[24m%s\x1b[0m"\
% ("Reloaded modules", ": "+", ".join(modnames))) | Delete user modules to force Python to deeply reload them
Do not del modules which are considered as system modules, i.e.
modules installed in subdirectories of Python interpreter's binary
Do not del C modules | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/customize/spydercustomize.py#L691-L713 |
spyder-ide/spyder-kernels | spyder_kernels/utils/iofuncs.py | get_matlab_value | def get_matlab_value(val):
"""
Extract a value from a Matlab file
From the oct2py project, see
https://pythonhosted.org/oct2py/conversions.html
"""
import numpy as np
# Extract each item of a list.
if isinstance(val, list):
return [get_matlab_value(v) for v in val]
# Ignore leaf objects.
if not isinstance(val, np.ndarray):
return val
# Convert user defined classes.
if hasattr(val, 'classname'):
out = dict()
for name in val.dtype.names:
out[name] = get_matlab_value(val[name].squeeze().tolist())
cls = type(val.classname, (object,), out)
return cls()
# Extract struct data.
elif val.dtype.names:
out = MatlabStruct()
for name in val.dtype.names:
out[name] = get_matlab_value(val[name].squeeze().tolist())
val = out
# Extract cells.
elif val.dtype.kind == 'O':
val = val.squeeze().tolist()
if not isinstance(val, list):
val = [val]
val = get_matlab_value(val)
# Compress singleton values.
elif val.size == 1:
val = val.item()
# Compress empty values.
elif val.size == 0:
if val.dtype.kind in 'US':
val = ''
else:
val = []
return val | python | def get_matlab_value(val):
"""
Extract a value from a Matlab file
From the oct2py project, see
https://pythonhosted.org/oct2py/conversions.html
"""
import numpy as np
# Extract each item of a list.
if isinstance(val, list):
return [get_matlab_value(v) for v in val]
# Ignore leaf objects.
if not isinstance(val, np.ndarray):
return val
# Convert user defined classes.
if hasattr(val, 'classname'):
out = dict()
for name in val.dtype.names:
out[name] = get_matlab_value(val[name].squeeze().tolist())
cls = type(val.classname, (object,), out)
return cls()
# Extract struct data.
elif val.dtype.names:
out = MatlabStruct()
for name in val.dtype.names:
out[name] = get_matlab_value(val[name].squeeze().tolist())
val = out
# Extract cells.
elif val.dtype.kind == 'O':
val = val.squeeze().tolist()
if not isinstance(val, list):
val = [val]
val = get_matlab_value(val)
# Compress singleton values.
elif val.size == 1:
val = val.item()
# Compress empty values.
elif val.size == 0:
if val.dtype.kind in 'US':
val = ''
else:
val = []
return val | Extract a value from a Matlab file
From the oct2py project, see
https://pythonhosted.org/oct2py/conversions.html | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/iofuncs.py#L106-L156 |
spyder-ide/spyder-kernels | spyder_kernels/utils/iofuncs.py | load_pickle | def load_pickle(filename):
"""Load a pickle file as a dictionary"""
try:
if pd:
return pd.read_pickle(filename), None
else:
with open(filename, 'rb') as fid:
data = pickle.load(fid)
return data, None
except Exception as err:
return None, str(err) | python | def load_pickle(filename):
"""Load a pickle file as a dictionary"""
try:
if pd:
return pd.read_pickle(filename), None
else:
with open(filename, 'rb') as fid:
data = pickle.load(fid)
return data, None
except Exception as err:
return None, str(err) | Load a pickle file as a dictionary | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/iofuncs.py#L260-L270 |
spyder-ide/spyder-kernels | spyder_kernels/utils/iofuncs.py | load_json | def load_json(filename):
"""Load a json file as a dictionary"""
try:
if PY2:
args = 'rb'
else:
args = 'r'
with open(filename, args) as fid:
data = json.load(fid)
return data, None
except Exception as err:
return None, str(err) | python | def load_json(filename):
"""Load a json file as a dictionary"""
try:
if PY2:
args = 'rb'
else:
args = 'r'
with open(filename, args) as fid:
data = json.load(fid)
return data, None
except Exception as err:
return None, str(err) | Load a json file as a dictionary | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/iofuncs.py#L273-L284 |
spyder-ide/spyder-kernels | spyder_kernels/utils/iofuncs.py | save_dictionary | def save_dictionary(data, filename):
"""Save dictionary in a single file .spydata file"""
filename = osp.abspath(filename)
old_cwd = getcwd()
os.chdir(osp.dirname(filename))
error_message = None
skipped_keys = []
data_copy = {}
try:
# Copy dictionary before modifying it to fix #6689
for obj_name, obj_value in data.items():
# Skip modules, since they can't be pickled, users virtually never
# would want them to be and so they don't show up in the skip list.
# Skip callables, since they are only pickled by reference and thus
# must already be present in the user's environment anyway.
if not (callable(obj_value) or isinstance(obj_value,
types.ModuleType)):
# If an object cannot be deepcopied, then it cannot be pickled.
# Ergo, we skip it and list it later.
try:
data_copy[obj_name] = copy.deepcopy(obj_value)
except Exception:
skipped_keys.append(obj_name)
data = data_copy
if not data:
raise RuntimeError('No supported objects to save')
saved_arrays = {}
if load_array is not None:
# Saving numpy arrays with np.save
arr_fname = osp.splitext(filename)[0]
for name in list(data.keys()):
try:
if isinstance(data[name],
np.ndarray) and data[name].size > 0:
# Save arrays at data root
fname = __save_array(data[name], arr_fname,
len(saved_arrays))
saved_arrays[(name, None)] = osp.basename(fname)
data.pop(name)
elif isinstance(data[name], (list, dict)):
# Save arrays nested in lists or dictionaries
if isinstance(data[name], list):
iterator = enumerate(data[name])
else:
iterator = iter(list(data[name].items()))
to_remove = []
for index, value in iterator:
if isinstance(value,
np.ndarray) and value.size > 0:
fname = __save_array(value, arr_fname,
len(saved_arrays))
saved_arrays[(name, index)] = (
osp.basename(fname))
to_remove.append(index)
for index in sorted(to_remove, reverse=True):
data[name].pop(index)
except (RuntimeError, pickle.PicklingError, TypeError,
AttributeError, IndexError):
# If an array can't be saved with numpy for some reason,
# leave the object intact and try to save it normally.
pass
if saved_arrays:
data['__saved_arrays__'] = saved_arrays
pickle_filename = osp.splitext(filename)[0] + '.pickle'
# Attempt to pickle everything.
# If pickling fails, iterate through to eliminate problem objs & retry.
with open(pickle_filename, 'w+b') as fdesc:
try:
pickle.dump(data, fdesc, protocol=2)
except (pickle.PicklingError, AttributeError, TypeError,
ImportError, IndexError, RuntimeError):
data_filtered = {}
for obj_name, obj_value in data.items():
try:
pickle.dumps(obj_value, protocol=2)
except Exception:
skipped_keys.append(obj_name)
else:
data_filtered[obj_name] = obj_value
if not data_filtered:
raise RuntimeError('No supported objects to save')
pickle.dump(data_filtered, fdesc, protocol=2)
# Use PAX (POSIX.1-2001) format instead of default GNU.
# This improves interoperability and UTF-8/long variable name support.
with tarfile.open(filename, "w", format=tarfile.PAX_FORMAT) as tar:
for fname in ([pickle_filename]
+ [fn for fn in list(saved_arrays.values())]):
tar.add(osp.basename(fname))
os.remove(fname)
except (RuntimeError, pickle.PicklingError, TypeError) as error:
error_message = to_text_string(error)
else:
if skipped_keys:
skipped_keys.sort()
error_message = ('Some objects could not be saved: '
+ ', '.join(skipped_keys))
finally:
os.chdir(old_cwd)
return error_message | python | def save_dictionary(data, filename):
"""Save dictionary in a single file .spydata file"""
filename = osp.abspath(filename)
old_cwd = getcwd()
os.chdir(osp.dirname(filename))
error_message = None
skipped_keys = []
data_copy = {}
try:
# Copy dictionary before modifying it to fix #6689
for obj_name, obj_value in data.items():
# Skip modules, since they can't be pickled, users virtually never
# would want them to be and so they don't show up in the skip list.
# Skip callables, since they are only pickled by reference and thus
# must already be present in the user's environment anyway.
if not (callable(obj_value) or isinstance(obj_value,
types.ModuleType)):
# If an object cannot be deepcopied, then it cannot be pickled.
# Ergo, we skip it and list it later.
try:
data_copy[obj_name] = copy.deepcopy(obj_value)
except Exception:
skipped_keys.append(obj_name)
data = data_copy
if not data:
raise RuntimeError('No supported objects to save')
saved_arrays = {}
if load_array is not None:
# Saving numpy arrays with np.save
arr_fname = osp.splitext(filename)[0]
for name in list(data.keys()):
try:
if isinstance(data[name],
np.ndarray) and data[name].size > 0:
# Save arrays at data root
fname = __save_array(data[name], arr_fname,
len(saved_arrays))
saved_arrays[(name, None)] = osp.basename(fname)
data.pop(name)
elif isinstance(data[name], (list, dict)):
# Save arrays nested in lists or dictionaries
if isinstance(data[name], list):
iterator = enumerate(data[name])
else:
iterator = iter(list(data[name].items()))
to_remove = []
for index, value in iterator:
if isinstance(value,
np.ndarray) and value.size > 0:
fname = __save_array(value, arr_fname,
len(saved_arrays))
saved_arrays[(name, index)] = (
osp.basename(fname))
to_remove.append(index)
for index in sorted(to_remove, reverse=True):
data[name].pop(index)
except (RuntimeError, pickle.PicklingError, TypeError,
AttributeError, IndexError):
# If an array can't be saved with numpy for some reason,
# leave the object intact and try to save it normally.
pass
if saved_arrays:
data['__saved_arrays__'] = saved_arrays
pickle_filename = osp.splitext(filename)[0] + '.pickle'
# Attempt to pickle everything.
# If pickling fails, iterate through to eliminate problem objs & retry.
with open(pickle_filename, 'w+b') as fdesc:
try:
pickle.dump(data, fdesc, protocol=2)
except (pickle.PicklingError, AttributeError, TypeError,
ImportError, IndexError, RuntimeError):
data_filtered = {}
for obj_name, obj_value in data.items():
try:
pickle.dumps(obj_value, protocol=2)
except Exception:
skipped_keys.append(obj_name)
else:
data_filtered[obj_name] = obj_value
if not data_filtered:
raise RuntimeError('No supported objects to save')
pickle.dump(data_filtered, fdesc, protocol=2)
# Use PAX (POSIX.1-2001) format instead of default GNU.
# This improves interoperability and UTF-8/long variable name support.
with tarfile.open(filename, "w", format=tarfile.PAX_FORMAT) as tar:
for fname in ([pickle_filename]
+ [fn for fn in list(saved_arrays.values())]):
tar.add(osp.basename(fname))
os.remove(fname)
except (RuntimeError, pickle.PicklingError, TypeError) as error:
error_message = to_text_string(error)
else:
if skipped_keys:
skipped_keys.sort()
error_message = ('Some objects could not be saved: '
+ ', '.join(skipped_keys))
finally:
os.chdir(old_cwd)
return error_message | Save dictionary in a single file .spydata file | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/iofuncs.py#L287-L389 |
spyder-ide/spyder-kernels | spyder_kernels/utils/iofuncs.py | load_dictionary | def load_dictionary(filename):
"""Load dictionary from .spydata file"""
filename = osp.abspath(filename)
old_cwd = getcwd()
tmp_folder = tempfile.mkdtemp()
os.chdir(tmp_folder)
data = None
error_message = None
try:
with tarfile.open(filename, "r") as tar:
tar.extractall()
pickle_filename = glob.glob('*.pickle')[0]
# 'New' format (Spyder >=2.2 for Python 2 and Python 3)
with open(pickle_filename, 'rb') as fdesc:
data = pickle.loads(fdesc.read())
saved_arrays = {}
if load_array is not None:
# Loading numpy arrays saved with np.save
try:
saved_arrays = data.pop('__saved_arrays__')
for (name, index), fname in list(saved_arrays.items()):
arr = np.load( osp.join(tmp_folder, fname) )
if index is None:
data[name] = arr
elif isinstance(data[name], dict):
data[name][index] = arr
else:
data[name].insert(index, arr)
except KeyError:
pass
# Except AttributeError from e.g. trying to load function no longer present
except (AttributeError, EOFError, ValueError) as error:
error_message = to_text_string(error)
# To ensure working dir gets changed back and temp dir wiped no matter what
finally:
os.chdir(old_cwd)
try:
shutil.rmtree(tmp_folder)
except OSError as error:
error_message = to_text_string(error)
return data, error_message | python | def load_dictionary(filename):
"""Load dictionary from .spydata file"""
filename = osp.abspath(filename)
old_cwd = getcwd()
tmp_folder = tempfile.mkdtemp()
os.chdir(tmp_folder)
data = None
error_message = None
try:
with tarfile.open(filename, "r") as tar:
tar.extractall()
pickle_filename = glob.glob('*.pickle')[0]
# 'New' format (Spyder >=2.2 for Python 2 and Python 3)
with open(pickle_filename, 'rb') as fdesc:
data = pickle.loads(fdesc.read())
saved_arrays = {}
if load_array is not None:
# Loading numpy arrays saved with np.save
try:
saved_arrays = data.pop('__saved_arrays__')
for (name, index), fname in list(saved_arrays.items()):
arr = np.load( osp.join(tmp_folder, fname) )
if index is None:
data[name] = arr
elif isinstance(data[name], dict):
data[name][index] = arr
else:
data[name].insert(index, arr)
except KeyError:
pass
# Except AttributeError from e.g. trying to load function no longer present
except (AttributeError, EOFError, ValueError) as error:
error_message = to_text_string(error)
# To ensure working dir gets changed back and temp dir wiped no matter what
finally:
os.chdir(old_cwd)
try:
shutil.rmtree(tmp_folder)
except OSError as error:
error_message = to_text_string(error)
return data, error_message | Load dictionary from .spydata file | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/iofuncs.py#L392-L432 |
spyder-ide/spyder-kernels | spyder_kernels/utils/dochelpers.py | getobj | def getobj(txt, last=False):
"""Return the last valid object name in string"""
txt_end = ""
for startchar, endchar in ["[]", "()"]:
if txt.endswith(endchar):
pos = txt.rfind(startchar)
if pos:
txt_end = txt[pos:]
txt = txt[:pos]
tokens = re.split(SYMBOLS, txt)
token = None
try:
while token is None or re.match(SYMBOLS, token):
token = tokens.pop()
if token.endswith('.'):
token = token[:-1]
if token.startswith('.'):
# Invalid object name
return None
if last:
#XXX: remove this statement as well as the "last" argument
token += txt[ txt.rfind(token) + len(token) ]
token += txt_end
if token:
return token
except IndexError:
return None | python | def getobj(txt, last=False):
"""Return the last valid object name in string"""
txt_end = ""
for startchar, endchar in ["[]", "()"]:
if txt.endswith(endchar):
pos = txt.rfind(startchar)
if pos:
txt_end = txt[pos:]
txt = txt[:pos]
tokens = re.split(SYMBOLS, txt)
token = None
try:
while token is None or re.match(SYMBOLS, token):
token = tokens.pop()
if token.endswith('.'):
token = token[:-1]
if token.startswith('.'):
# Invalid object name
return None
if last:
#XXX: remove this statement as well as the "last" argument
token += txt[ txt.rfind(token) + len(token) ]
token += txt_end
if token:
return token
except IndexError:
return None | Return the last valid object name in string | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/dochelpers.py#L25-L51 |
spyder-ide/spyder-kernels | spyder_kernels/utils/dochelpers.py | getdoc | def getdoc(obj):
"""
Return text documentation from an object. This comes in a form of
dictionary with four keys:
name:
The name of the inspected object
argspec:
It's argspec
note:
A phrase describing the type of object (function or method) we are
inspecting, and the module it belongs to.
docstring:
It's docstring
"""
docstring = inspect.getdoc(obj) or inspect.getcomments(obj) or ''
# Most of the time doc will only contain ascii characters, but there are
# some docstrings that contain non-ascii characters. Not all source files
# declare their encoding in the first line, so querying for that might not
# yield anything, either. So assume the most commonly used
# multi-byte file encoding (which also covers ascii).
try:
docstring = to_text_string(docstring)
except:
pass
# Doc dict keys
doc = {'name': '',
'argspec': '',
'note': '',
'docstring': docstring}
if callable(obj):
try:
name = obj.__name__
except AttributeError:
doc['docstring'] = docstring
return doc
if inspect.ismethod(obj):
imclass = get_meth_class(obj)
if get_meth_class_inst(obj) is not None:
doc['note'] = 'Method of %s instance' \
% get_meth_class_inst(obj).__class__.__name__
else:
doc['note'] = 'Unbound %s method' % imclass.__name__
obj = get_meth_func(obj)
elif hasattr(obj, '__module__'):
doc['note'] = 'Function of %s module' % obj.__module__
else:
doc['note'] = 'Function'
doc['name'] = obj.__name__
if inspect.isfunction(obj):
if PY2:
args, varargs, varkw, defaults = inspect.getargspec(obj)
doc['argspec'] = inspect.formatargspec(
args, varargs, varkw, defaults,
formatvalue=lambda o:'='+repr(o))
else:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults,
annotations) = inspect.getfullargspec(obj)
doc['argspec'] = inspect.formatargspec(
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults,
annotations, formatvalue=lambda o:'='+repr(o))
if name == '<lambda>':
doc['name'] = name + ' lambda '
doc['argspec'] = doc['argspec'][1:-1] # remove parentheses
else:
argspec = getargspecfromtext(doc['docstring'])
if argspec:
doc['argspec'] = argspec
# Many scipy and numpy docstrings begin with a function
# signature on the first line. This ends up begin redundant
# when we are using title and argspec to create the
# rich text "Definition:" field. We'll carefully remove this
# redundancy but only under a strict set of conditions:
# Remove the starting charaters of the 'doc' portion *iff*
# the non-whitespace characters on the first line
# match *exactly* the combined function title
# and argspec we determined above.
signature = doc['name'] + doc['argspec']
docstring_blocks = doc['docstring'].split("\n\n")
first_block = docstring_blocks[0].strip()
if first_block == signature:
doc['docstring'] = doc['docstring'].replace(
signature, '', 1).lstrip()
else:
doc['argspec'] = '(...)'
# Remove self from argspec
argspec = doc['argspec']
doc['argspec'] = argspec.replace('(self)', '()').replace('(self, ', '(')
return doc | python | def getdoc(obj):
"""
Return text documentation from an object. This comes in a form of
dictionary with four keys:
name:
The name of the inspected object
argspec:
It's argspec
note:
A phrase describing the type of object (function or method) we are
inspecting, and the module it belongs to.
docstring:
It's docstring
"""
docstring = inspect.getdoc(obj) or inspect.getcomments(obj) or ''
# Most of the time doc will only contain ascii characters, but there are
# some docstrings that contain non-ascii characters. Not all source files
# declare their encoding in the first line, so querying for that might not
# yield anything, either. So assume the most commonly used
# multi-byte file encoding (which also covers ascii).
try:
docstring = to_text_string(docstring)
except:
pass
# Doc dict keys
doc = {'name': '',
'argspec': '',
'note': '',
'docstring': docstring}
if callable(obj):
try:
name = obj.__name__
except AttributeError:
doc['docstring'] = docstring
return doc
if inspect.ismethod(obj):
imclass = get_meth_class(obj)
if get_meth_class_inst(obj) is not None:
doc['note'] = 'Method of %s instance' \
% get_meth_class_inst(obj).__class__.__name__
else:
doc['note'] = 'Unbound %s method' % imclass.__name__
obj = get_meth_func(obj)
elif hasattr(obj, '__module__'):
doc['note'] = 'Function of %s module' % obj.__module__
else:
doc['note'] = 'Function'
doc['name'] = obj.__name__
if inspect.isfunction(obj):
if PY2:
args, varargs, varkw, defaults = inspect.getargspec(obj)
doc['argspec'] = inspect.formatargspec(
args, varargs, varkw, defaults,
formatvalue=lambda o:'='+repr(o))
else:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults,
annotations) = inspect.getfullargspec(obj)
doc['argspec'] = inspect.formatargspec(
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults,
annotations, formatvalue=lambda o:'='+repr(o))
if name == '<lambda>':
doc['name'] = name + ' lambda '
doc['argspec'] = doc['argspec'][1:-1] # remove parentheses
else:
argspec = getargspecfromtext(doc['docstring'])
if argspec:
doc['argspec'] = argspec
# Many scipy and numpy docstrings begin with a function
# signature on the first line. This ends up begin redundant
# when we are using title and argspec to create the
# rich text "Definition:" field. We'll carefully remove this
# redundancy but only under a strict set of conditions:
# Remove the starting charaters of the 'doc' portion *iff*
# the non-whitespace characters on the first line
# match *exactly* the combined function title
# and argspec we determined above.
signature = doc['name'] + doc['argspec']
docstring_blocks = doc['docstring'].split("\n\n")
first_block = docstring_blocks[0].strip()
if first_block == signature:
doc['docstring'] = doc['docstring'].replace(
signature, '', 1).lstrip()
else:
doc['argspec'] = '(...)'
# Remove self from argspec
argspec = doc['argspec']
doc['argspec'] = argspec.replace('(self)', '()').replace('(self, ', '(')
return doc | Return text documentation from an object. This comes in a form of
dictionary with four keys:
name:
The name of the inspected object
argspec:
It's argspec
note:
A phrase describing the type of object (function or method) we are
inspecting, and the module it belongs to.
docstring:
It's docstring | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/dochelpers.py#L63-L157 |
spyder-ide/spyder-kernels | spyder_kernels/utils/dochelpers.py | getsource | def getsource(obj):
"""Wrapper around inspect.getsource"""
try:
try:
src = to_text_string(inspect.getsource(obj))
except TypeError:
if hasattr(obj, '__class__'):
src = to_text_string(inspect.getsource(obj.__class__))
else:
# Bindings like VTK or ITK require this case
src = getdoc(obj)
return src
except (TypeError, IOError):
return | python | def getsource(obj):
"""Wrapper around inspect.getsource"""
try:
try:
src = to_text_string(inspect.getsource(obj))
except TypeError:
if hasattr(obj, '__class__'):
src = to_text_string(inspect.getsource(obj.__class__))
else:
# Bindings like VTK or ITK require this case
src = getdoc(obj)
return src
except (TypeError, IOError):
return | Wrapper around inspect.getsource | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/dochelpers.py#L160-L173 |
spyder-ide/spyder-kernels | spyder_kernels/utils/dochelpers.py | getsignaturefromtext | def getsignaturefromtext(text, objname):
"""Get object signatures from text (object documentation)
Return a list containing a single string in most cases
Example of multiple signatures: PyQt5 objects"""
if isinstance(text, dict):
text = text.get('docstring', '')
# Regexps
oneline_re = objname + r'\([^\)].+?(?<=[\w\]\}\'"])\)(?!,)'
multiline_re = objname + r'\([^\)]+(?<=[\w\]\}\'"])\)(?!,)'
multiline_end_parenleft_re = r'(%s\([^\)]+(\),\n.+)+(?<=[\w\]\}\'"])\))'
# Grabbing signatures
if not text:
text = ''
sigs_1 = re.findall(oneline_re + '|' + multiline_re, text)
sigs_2 = [g[0] for g in re.findall(multiline_end_parenleft_re % objname, text)]
all_sigs = sigs_1 + sigs_2
# The most relevant signature is usually the first one. There could be
# others in doctests but those are not so important
if all_sigs:
return all_sigs[0]
else:
return '' | python | def getsignaturefromtext(text, objname):
"""Get object signatures from text (object documentation)
Return a list containing a single string in most cases
Example of multiple signatures: PyQt5 objects"""
if isinstance(text, dict):
text = text.get('docstring', '')
# Regexps
oneline_re = objname + r'\([^\)].+?(?<=[\w\]\}\'"])\)(?!,)'
multiline_re = objname + r'\([^\)]+(?<=[\w\]\}\'"])\)(?!,)'
multiline_end_parenleft_re = r'(%s\([^\)]+(\),\n.+)+(?<=[\w\]\}\'"])\))'
# Grabbing signatures
if not text:
text = ''
sigs_1 = re.findall(oneline_re + '|' + multiline_re, text)
sigs_2 = [g[0] for g in re.findall(multiline_end_parenleft_re % objname, text)]
all_sigs = sigs_1 + sigs_2
# The most relevant signature is usually the first one. There could be
# others in doctests but those are not so important
if all_sigs:
return all_sigs[0]
else:
return '' | Get object signatures from text (object documentation)
Return a list containing a single string in most cases
Example of multiple signatures: PyQt5 objects | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/dochelpers.py#L176-L197 |
spyder-ide/spyder-kernels | spyder_kernels/utils/dochelpers.py | getargspecfromtext | def getargspecfromtext(text):
"""
Try to get the formatted argspec of a callable from the first block of its
docstring
This will return something like
'(foo, bar, k=1)'
"""
blocks = text.split("\n\n")
first_block = blocks[0].strip()
return getsignaturefromtext(first_block, '') | python | def getargspecfromtext(text):
"""
Try to get the formatted argspec of a callable from the first block of its
docstring
This will return something like
'(foo, bar, k=1)'
"""
blocks = text.split("\n\n")
first_block = blocks[0].strip()
return getsignaturefromtext(first_block, '') | Try to get the formatted argspec of a callable from the first block of its
docstring
This will return something like
'(foo, bar, k=1)' | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/dochelpers.py#L204-L214 |
spyder-ide/spyder-kernels | spyder_kernels/utils/dochelpers.py | getargsfromtext | def getargsfromtext(text, objname):
"""Get arguments from text (object documentation)"""
signature = getsignaturefromtext(text, objname)
if signature:
argtxt = signature[signature.find('(')+1:-1]
return argtxt.split(',') | python | def getargsfromtext(text, objname):
"""Get arguments from text (object documentation)"""
signature = getsignaturefromtext(text, objname)
if signature:
argtxt = signature[signature.find('(')+1:-1]
return argtxt.split(',') | Get arguments from text (object documentation) | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/dochelpers.py#L217-L222 |
spyder-ide/spyder-kernels | spyder_kernels/utils/dochelpers.py | getargs | def getargs(obj):
"""Get the names and default values of a function's arguments"""
if inspect.isfunction(obj) or inspect.isbuiltin(obj):
func_obj = obj
elif inspect.ismethod(obj):
func_obj = get_meth_func(obj)
elif inspect.isclass(obj) and hasattr(obj, '__init__'):
func_obj = getattr(obj, '__init__')
else:
return []
if not hasattr(func_obj, 'func_code'):
# Builtin: try to extract info from doc
args = getargsfromdoc(func_obj)
if args is not None:
return args
else:
# Example: PyQt5
return getargsfromdoc(obj)
args, _, _ = inspect.getargs(func_obj.func_code)
if not args:
return getargsfromdoc(obj)
# Supporting tuple arguments in def statement:
for i_arg, arg in enumerate(args):
if isinstance(arg, list):
args[i_arg] = "(%s)" % ", ".join(arg)
defaults = get_func_defaults(func_obj)
if defaults is not None:
for index, default in enumerate(defaults):
args[index+len(args)-len(defaults)] += '='+repr(default)
if inspect.isclass(obj) or inspect.ismethod(obj):
if len(args) == 1:
return None
if 'self' in args:
args.remove('self')
return args | python | def getargs(obj):
"""Get the names and default values of a function's arguments"""
if inspect.isfunction(obj) or inspect.isbuiltin(obj):
func_obj = obj
elif inspect.ismethod(obj):
func_obj = get_meth_func(obj)
elif inspect.isclass(obj) and hasattr(obj, '__init__'):
func_obj = getattr(obj, '__init__')
else:
return []
if not hasattr(func_obj, 'func_code'):
# Builtin: try to extract info from doc
args = getargsfromdoc(func_obj)
if args is not None:
return args
else:
# Example: PyQt5
return getargsfromdoc(obj)
args, _, _ = inspect.getargs(func_obj.func_code)
if not args:
return getargsfromdoc(obj)
# Supporting tuple arguments in def statement:
for i_arg, arg in enumerate(args):
if isinstance(arg, list):
args[i_arg] = "(%s)" % ", ".join(arg)
defaults = get_func_defaults(func_obj)
if defaults is not None:
for index, default in enumerate(defaults):
args[index+len(args)-len(defaults)] += '='+repr(default)
if inspect.isclass(obj) or inspect.ismethod(obj):
if len(args) == 1:
return None
if 'self' in args:
args.remove('self')
return args | Get the names and default values of a function's arguments | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/dochelpers.py#L231-L267 |
spyder-ide/spyder-kernels | spyder_kernels/utils/dochelpers.py | getargtxt | def getargtxt(obj, one_arg_per_line=True):
"""
Get the names and default values of a function's arguments
Return list with separators (', ') formatted for calltips
"""
args = getargs(obj)
if args:
sep = ', '
textlist = None
for i_arg, arg in enumerate(args):
if textlist is None:
textlist = ['']
textlist[-1] += arg
if i_arg < len(args)-1:
textlist[-1] += sep
if len(textlist[-1]) >= 32 or one_arg_per_line:
textlist.append('')
if inspect.isclass(obj) or inspect.ismethod(obj):
if len(textlist) == 1:
return None
if 'self'+sep in textlist:
textlist.remove('self'+sep)
return textlist | python | def getargtxt(obj, one_arg_per_line=True):
"""
Get the names and default values of a function's arguments
Return list with separators (', ') formatted for calltips
"""
args = getargs(obj)
if args:
sep = ', '
textlist = None
for i_arg, arg in enumerate(args):
if textlist is None:
textlist = ['']
textlist[-1] += arg
if i_arg < len(args)-1:
textlist[-1] += sep
if len(textlist[-1]) >= 32 or one_arg_per_line:
textlist.append('')
if inspect.isclass(obj) or inspect.ismethod(obj):
if len(textlist) == 1:
return None
if 'self'+sep in textlist:
textlist.remove('self'+sep)
return textlist | Get the names and default values of a function's arguments
Return list with separators (', ') formatted for calltips | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/dochelpers.py#L270-L292 |
spyder-ide/spyder-kernels | spyder_kernels/utils/dochelpers.py | isdefined | def isdefined(obj, force_import=False, namespace=None):
"""Return True if object is defined in namespace
If namespace is None --> namespace = locals()"""
if namespace is None:
namespace = locals()
attr_list = obj.split('.')
base = attr_list.pop(0)
if len(base) == 0:
return False
if base not in builtins.__dict__ and base not in namespace:
if force_import:
try:
module = __import__(base, globals(), namespace)
if base not in globals():
globals()[base] = module
namespace[base] = module
except Exception:
return False
else:
return False
for attr in attr_list:
try:
attr_not_found = not hasattr(eval(base, namespace), attr)
except (SyntaxError, AttributeError):
return False
if attr_not_found:
if force_import:
try:
__import__(base+'.'+attr, globals(), namespace)
except (ImportError, SyntaxError):
return False
else:
return False
base += '.'+attr
return True | python | def isdefined(obj, force_import=False, namespace=None):
"""Return True if object is defined in namespace
If namespace is None --> namespace = locals()"""
if namespace is None:
namespace = locals()
attr_list = obj.split('.')
base = attr_list.pop(0)
if len(base) == 0:
return False
if base not in builtins.__dict__ and base not in namespace:
if force_import:
try:
module = __import__(base, globals(), namespace)
if base not in globals():
globals()[base] = module
namespace[base] = module
except Exception:
return False
else:
return False
for attr in attr_list:
try:
attr_not_found = not hasattr(eval(base, namespace), attr)
except (SyntaxError, AttributeError):
return False
if attr_not_found:
if force_import:
try:
__import__(base+'.'+attr, globals(), namespace)
except (ImportError, SyntaxError):
return False
else:
return False
base += '.'+attr
return True | Return True if object is defined in namespace
If namespace is None --> namespace = locals() | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/dochelpers.py#L295-L329 |
swift-nav/libsbp | python/sbp/client/drivers/file_driver.py | FileDriver.read | def read(self, size):
"""
Read wrapper.
Parameters
----------
size : int
Number of bytes to read.
"""
return_val = self.handle.read(size)
if not return_val:
raise IOError
else:
return return_val | python | def read(self, size):
"""
Read wrapper.
Parameters
----------
size : int
Number of bytes to read.
"""
return_val = self.handle.read(size)
if not return_val:
raise IOError
else:
return return_val | Read wrapper.
Parameters
----------
size : int
Number of bytes to read. | https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/python/sbp/client/drivers/file_driver.py#L25-L38 |
swift-nav/libsbp | generator/sbpg/targets/latex.py | escape_tex | def escape_tex(value):
"""
Make text tex safe
"""
newval = value
for pattern, replacement in LATEX_SUBS:
newval = pattern.sub(replacement, newval)
return newval | python | def escape_tex(value):
"""
Make text tex safe
"""
newval = value
for pattern, replacement in LATEX_SUBS:
newval = pattern.sub(replacement, newval)
return newval | Make text tex safe | https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/latex.py#L55-L62 |
swift-nav/libsbp | generator/sbpg/targets/latex.py | classnameify | def classnameify(s):
"""
Makes a classname
"""
return ''.join(w if w in ACRONYMS else w.title() for w in s.split('_')) | python | def classnameify(s):
"""
Makes a classname
"""
return ''.join(w if w in ACRONYMS else w.title() for w in s.split('_')) | Makes a classname | https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/latex.py#L64-L68 |
swift-nav/libsbp | generator/sbpg/targets/latex.py | packagenameify | def packagenameify(s):
"""
Makes a package name
"""
return ''.join(w if w in ACRONYMS else w.title() for w in s.split('.')[-1:]) | python | def packagenameify(s):
"""
Makes a package name
"""
return ''.join(w if w in ACRONYMS else w.title() for w in s.split('.')[-1:]) | Makes a package name | https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/latex.py#L73-L77 |
swift-nav/libsbp | generator/sbpg/targets/latex.py | handle_fields | def handle_fields(definitions, fields, prefix, offset, multiplier):
"""
Helper for handling naming and sizing of fields. It's terrible.
"""
items = []
for f in fields:
if f.type_id == "array" and f.options['fill'].value in CONSTRUCT_CODE:
prefix_name = '.'.join([prefix, f.identifier]) if prefix else f.identifier
n_with_values = f.options['n_with_values'].value
bitfields = f.options['fields'].value if n_with_values > 0 else None
if 'size' in f.options:
name = "%s[%s]" % (f.options['fill'].value, str(f.options['size'].value))
size = field_sizes[f.options['fill'].value] * f.options['size'].value
item = FieldItem(prefix_name, name, offset, size,
str(f.units), f.desc, n_with_values, bitfields)
items.append(item)
offset += size
else:
name = "%s[%s]" % (f.options['fill'].value, "N")
multiplier = field_sizes[f.options['fill'].value]
size = field_sizes[f.options['fill'].value] * 1
item = FieldItem(prefix_name, name, offset, "N",
str(f.units), f.desc, n_with_values, bitfields)
items.append(item)
offset += size
elif f.type_id == "string":
prefix_name = '.'.join([prefix, f.identifier]) if prefix else f.identifier
n_with_values = f.options['n_with_values'].value
bitfields = f.options['fields'].value if n_with_values > 0 else None
if 'size' in f.options:
name = "string"
size = field_sizes['u8'] * f.options['size'].value
item = FieldItem(prefix_name, name, offset, size,
str(f.units), f.desc, n_with_values, bitfields)
items.append(item)
offset += size
else:
name = "string"
size = field_sizes['u8']
multiplier = 1
item = FieldItem(prefix_name, name, offset, "N",
str(f.units), f.desc, n_with_values, bitfields)
items.append(item)
offset += size
elif f.type_id == "array":
name = f.options['fill'].value
definition = next(d for d in definitions if name == d.identifier)
prefix_name = '.'.join([prefix, f.identifier]) if prefix else f.identifier
(new_items, new_offset, new_multiplier) \
= handle_fields(definitions,
definition.fields,
prefix_name + "[N]",
offset,
multiplier)
multiplier = new_offset - offset
(newer_items, newer_offset, newer_multiplier) \
= handle_fields(definitions,
definition.fields,
prefix_name + "[N]", offset,
multiplier)
items += newer_items
offset = newer_offset
elif f.type_id not in CONSTRUCT_CODE:
name = f.type_id
definition = next(d for d in definitions if name == d.identifier)
prefix_name = '.'.join([prefix, f.identifier]) if prefix else f.identifier
(new_items, new_offset, new_multiplier) \
= handle_fields(definitions,
definition.fields,
prefix_name,
offset,
multiplier)
items += new_items
offset = new_offset
multiplier = new_multiplier
else:
size = field_sizes[f.type_id]
name = f.type_id
adj_offset = "%dN+%d" % (multiplier, offset) if multiplier else offset
prefix_name = '.'.join([prefix, f.identifier]) if prefix else f.identifier
n_with_values = f.options['n_with_values'].value
bitfields = f.options['fields'].value if n_with_values > 0 else None
item = FieldItem(prefix_name, name, adj_offset, size, str(f.units), f.desc, n_with_values, bitfields)
items.append(item)
offset += size
return (items, offset, multiplier) | python | def handle_fields(definitions, fields, prefix, offset, multiplier):
"""
Helper for handling naming and sizing of fields. It's terrible.
"""
items = []
for f in fields:
if f.type_id == "array" and f.options['fill'].value in CONSTRUCT_CODE:
prefix_name = '.'.join([prefix, f.identifier]) if prefix else f.identifier
n_with_values = f.options['n_with_values'].value
bitfields = f.options['fields'].value if n_with_values > 0 else None
if 'size' in f.options:
name = "%s[%s]" % (f.options['fill'].value, str(f.options['size'].value))
size = field_sizes[f.options['fill'].value] * f.options['size'].value
item = FieldItem(prefix_name, name, offset, size,
str(f.units), f.desc, n_with_values, bitfields)
items.append(item)
offset += size
else:
name = "%s[%s]" % (f.options['fill'].value, "N")
multiplier = field_sizes[f.options['fill'].value]
size = field_sizes[f.options['fill'].value] * 1
item = FieldItem(prefix_name, name, offset, "N",
str(f.units), f.desc, n_with_values, bitfields)
items.append(item)
offset += size
elif f.type_id == "string":
prefix_name = '.'.join([prefix, f.identifier]) if prefix else f.identifier
n_with_values = f.options['n_with_values'].value
bitfields = f.options['fields'].value if n_with_values > 0 else None
if 'size' in f.options:
name = "string"
size = field_sizes['u8'] * f.options['size'].value
item = FieldItem(prefix_name, name, offset, size,
str(f.units), f.desc, n_with_values, bitfields)
items.append(item)
offset += size
else:
name = "string"
size = field_sizes['u8']
multiplier = 1
item = FieldItem(prefix_name, name, offset, "N",
str(f.units), f.desc, n_with_values, bitfields)
items.append(item)
offset += size
elif f.type_id == "array":
name = f.options['fill'].value
definition = next(d for d in definitions if name == d.identifier)
prefix_name = '.'.join([prefix, f.identifier]) if prefix else f.identifier
(new_items, new_offset, new_multiplier) \
= handle_fields(definitions,
definition.fields,
prefix_name + "[N]",
offset,
multiplier)
multiplier = new_offset - offset
(newer_items, newer_offset, newer_multiplier) \
= handle_fields(definitions,
definition.fields,
prefix_name + "[N]", offset,
multiplier)
items += newer_items
offset = newer_offset
elif f.type_id not in CONSTRUCT_CODE:
name = f.type_id
definition = next(d for d in definitions if name == d.identifier)
prefix_name = '.'.join([prefix, f.identifier]) if prefix else f.identifier
(new_items, new_offset, new_multiplier) \
= handle_fields(definitions,
definition.fields,
prefix_name,
offset,
multiplier)
items += new_items
offset = new_offset
multiplier = new_multiplier
else:
size = field_sizes[f.type_id]
name = f.type_id
adj_offset = "%dN+%d" % (multiplier, offset) if multiplier else offset
prefix_name = '.'.join([prefix, f.identifier]) if prefix else f.identifier
n_with_values = f.options['n_with_values'].value
bitfields = f.options['fields'].value if n_with_values > 0 else None
item = FieldItem(prefix_name, name, adj_offset, size, str(f.units), f.desc, n_with_values, bitfields)
items.append(item)
offset += size
return (items, offset, multiplier) | Helper for handling naming and sizing of fields. It's terrible. | https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/latex.py#L168-L253 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.