repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
PmagPy/PmagPy | pmagpy/ipmag.py | combine_magic | def combine_magic(filenames, outfile='measurements.txt', data_model=3, magic_table='measurements',
dir_path=".", input_dir_path=""):
"""
Takes a list of magic-formatted files, concatenates them, and creates a
single file. Returns output filename if the operation was successful.
Parameters
-----------
filenames : list of MagIC formatted files
outfile : name of output file [e.g., measurements.txt]
data_model : data model number (2.5 or 3), default 3
magic_table : name of magic table, default 'measurements'
dir_path : str
output directory, default "."
input_dir_path : str
input file directory (if different from dir_path), default ""
Returns
----------
outfile name if success, False if failure
"""
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path)
if float(data_model) == 3.0:
outfile = pmag.resolve_file_name(outfile, output_dir_path)
output_dir_path, file_name = os.path.split(outfile)
con = cb.Contribution(output_dir_path, read_tables=[])
# make sure files actually exist
filenames = [pmag.resolve_file_name(f, input_dir_path) for f in filenames]
#filenames = [os.path.realpath(f) for f in filenames]
filenames = [f for f in filenames if os.path.exists(f)]
if not filenames:
print("You have provided no valid file paths, so nothing will be combined".format(
magic_table))
return False
# figure out file type from first of files to join
with open(filenames[0]) as f:
file_type = f.readline().split()[1]
if file_type in ['er_specimens', 'er_samples', 'er_sites',
'er_locations', 'er_ages', 'pmag_specimens',
'pmag_samples', 'pmag_sites', 'pmag_results',
'magic_measurements', 'rmag_anisotropy',
'rmag_results', 'rmag_specimens']:
print(
'-W- You are working in MagIC 3 but have provided a MagIC 2.5 file: {}'.format(file_type))
return False
if file_type not in con.table_names:
file_type = magic_table
infiles = [pd.read_csv(infile, sep='\t', header=1)
for infile in filenames]
df = pd.concat(infiles, ignore_index=True, sort=True)
# drop any fully duplicated rows
df.drop_duplicates(inplace=True)
con.add_magic_table(dtype=file_type, df=df)
# drop any mostly empty rows IF they have duplicate index
parent, child = con.get_parent_and_child(file_type)
ignore_cols = [col[:-1] for col in [file_type, parent] if col]
ignore_cols.extend(['software_packages', 'citations'])
con.tables[file_type].drop_duplicate_rows(ignore_cols)
# correctly handle measurements.sequence column
if 'sequence' in con.tables[file_type].df:
con.tables[file_type].df['sequence'] = range(1, len(con.tables[file_type].df) + 1)
# write table to file, use custom name
res = con.write_table_to_file(file_type, custom_name=file_name)
return res
else:
datasets = []
if not filenames:
print("You must provide at least one file")
return False
for infile in filenames:
if not os.path.isfile(infile):
print("{} is not a valid file name".format(infile))
return False
try:
dataset, file_type = pmag.magic_read(infile)
except IndexError:
print('-W- Could not get records from {}'.format(infile))
print(' Skipping...')
continue
print("File ", infile, " read in with ", len(dataset), " records")
for rec in dataset:
datasets.append(rec)
Recs, keys = pmag.fillkeys(datasets)
if Recs:
pmag.magic_write(outfile, Recs, file_type)
print("All records stored in ", outfile)
return outfile
print("No file could be created")
return False | python | def combine_magic(filenames, outfile='measurements.txt', data_model=3, magic_table='measurements',
dir_path=".", input_dir_path=""):
"""
Takes a list of magic-formatted files, concatenates them, and creates a
single file. Returns output filename if the operation was successful.
Parameters
-----------
filenames : list of MagIC formatted files
outfile : name of output file [e.g., measurements.txt]
data_model : data model number (2.5 or 3), default 3
magic_table : name of magic table, default 'measurements'
dir_path : str
output directory, default "."
input_dir_path : str
input file directory (if different from dir_path), default ""
Returns
----------
outfile name if success, False if failure
"""
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path)
if float(data_model) == 3.0:
outfile = pmag.resolve_file_name(outfile, output_dir_path)
output_dir_path, file_name = os.path.split(outfile)
con = cb.Contribution(output_dir_path, read_tables=[])
# make sure files actually exist
filenames = [pmag.resolve_file_name(f, input_dir_path) for f in filenames]
#filenames = [os.path.realpath(f) for f in filenames]
filenames = [f for f in filenames if os.path.exists(f)]
if not filenames:
print("You have provided no valid file paths, so nothing will be combined".format(
magic_table))
return False
# figure out file type from first of files to join
with open(filenames[0]) as f:
file_type = f.readline().split()[1]
if file_type in ['er_specimens', 'er_samples', 'er_sites',
'er_locations', 'er_ages', 'pmag_specimens',
'pmag_samples', 'pmag_sites', 'pmag_results',
'magic_measurements', 'rmag_anisotropy',
'rmag_results', 'rmag_specimens']:
print(
'-W- You are working in MagIC 3 but have provided a MagIC 2.5 file: {}'.format(file_type))
return False
if file_type not in con.table_names:
file_type = magic_table
infiles = [pd.read_csv(infile, sep='\t', header=1)
for infile in filenames]
df = pd.concat(infiles, ignore_index=True, sort=True)
# drop any fully duplicated rows
df.drop_duplicates(inplace=True)
con.add_magic_table(dtype=file_type, df=df)
# drop any mostly empty rows IF they have duplicate index
parent, child = con.get_parent_and_child(file_type)
ignore_cols = [col[:-1] for col in [file_type, parent] if col]
ignore_cols.extend(['software_packages', 'citations'])
con.tables[file_type].drop_duplicate_rows(ignore_cols)
# correctly handle measurements.sequence column
if 'sequence' in con.tables[file_type].df:
con.tables[file_type].df['sequence'] = range(1, len(con.tables[file_type].df) + 1)
# write table to file, use custom name
res = con.write_table_to_file(file_type, custom_name=file_name)
return res
else:
datasets = []
if not filenames:
print("You must provide at least one file")
return False
for infile in filenames:
if not os.path.isfile(infile):
print("{} is not a valid file name".format(infile))
return False
try:
dataset, file_type = pmag.magic_read(infile)
except IndexError:
print('-W- Could not get records from {}'.format(infile))
print(' Skipping...')
continue
print("File ", infile, " read in with ", len(dataset), " records")
for rec in dataset:
datasets.append(rec)
Recs, keys = pmag.fillkeys(datasets)
if Recs:
pmag.magic_write(outfile, Recs, file_type)
print("All records stored in ", outfile)
return outfile
print("No file could be created")
return False | Takes a list of magic-formatted files, concatenates them, and creates a
single file. Returns output filename if the operation was successful.
Parameters
-----------
filenames : list of MagIC formatted files
outfile : name of output file [e.g., measurements.txt]
data_model : data model number (2.5 or 3), default 3
magic_table : name of magic table, default 'measurements'
dir_path : str
output directory, default "."
input_dir_path : str
input file directory (if different from dir_path), default ""
Returns
----------
outfile name if success, False if failure | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L2643-L2733 |
PmagPy/PmagPy | pmagpy/ipmag.py | ani_depthplot2 | def ani_depthplot2(ani_file='rmag_anisotropy.txt', meas_file='magic_measurements.txt', samp_file='er_samples.txt', age_file=None, sum_file=None, fmt='svg', dmin=-1, dmax=-1, depth_scale='sample_core_depth', dir_path='.'):
"""
returns matplotlib figure with anisotropy data plotted against depth
available depth scales: 'sample_composite_depth', 'sample_core_depth', or 'age' (you must provide an age file to use this option)
"""
pcol = 4
tint = 9
plots = 0
# format files to use full path
# os.path.join(dir_path, ani_file)
ani_file = pmag.resolve_file_name(ani_file, dir_path)
if not os.path.isfile(ani_file):
print("Could not find rmag_anisotropy type file: {}.\nPlease provide a valid file path and try again".format(ani_file))
return False, "Could not find rmag_anisotropy type file: {}.\nPlease provide a valid file path and try again".format(ani_file)
# os.path.join(dir_path, meas_file)
meas_file = pmag.resolve_file_name(meas_file, dir_path)
if age_file:
if not os.path.isfile(age_file):
print(
'Warning: you have provided an invalid age file. Attempting to use sample file instead')
age_file = None
depth_scale = 'sample_core_depth'
# os.path.join(dir_path, samp_file)
samp_file = pmag.resolve_file_name(samp_file, dir_path)
else:
# os.path.join(dir_path, age_file)
samp_file = pmag.resolve_file_name(samp_file, dir_path)
depth_scale = 'age'
print(
'Warning: you have provided an er_ages format file, which will take precedence over er_samples')
else:
samp_file = pmag.resolve_file_name(samp_file, dir_path)
label = 1
if sum_file:
sum_file = os.path.join(dir_path, sum_file)
dmin, dmax = float(dmin), float(dmax)
# get data read in
isbulk = 0 # tests if there are bulk susceptibility measurements
AniData, file_type = pmag.magic_read(ani_file) # read in tensor elements
if not age_file:
# read in sample depth info from er_sample.txt format file
Samps, file_type = pmag.magic_read(samp_file)
else:
# read in sample age info from er_ages.txt format file
Samps, file_type = pmag.magic_read(samp_file)
age_unit = Samps[0]['age_unit']
for s in Samps:
# change to upper case for every sample name
s['er_sample_name'] = s['er_sample_name'].upper()
Meas, file_type = pmag.magic_read(meas_file)
# print 'meas_file', meas_file
# print 'file_type', file_type
if file_type == 'magic_measurements':
isbulk = 1
Data = []
Bulks = []
BulkDepths = []
for rec in AniData:
# look for depth record for this sample
samprecs = pmag.get_dictitem(Samps, 'er_sample_name',
rec['er_sample_name'].upper(), 'T')
# see if there are non-blank depth data
sampdepths = pmag.get_dictitem(samprecs, depth_scale, '', 'F')
if dmax != -1:
# fishes out records within depth bounds
sampdepths = pmag.get_dictitem(
sampdepths, depth_scale, dmax, 'max')
sampdepths = pmag.get_dictitem(
sampdepths, depth_scale, dmin, 'min')
if len(sampdepths) > 0: # if there are any....
# set the core depth of this record
rec['core_depth'] = sampdepths[0][depth_scale]
Data.append(rec) # fish out data with core_depth
if isbulk: # if there are bulk data
chis = pmag.get_dictitem(
Meas, 'er_specimen_name', rec['er_specimen_name'], 'T')
# get the non-zero values for this specimen
chis = pmag.get_dictitem(
chis, 'measurement_chi_volume', '', 'F')
if len(chis) > 0: # if there are any....
# put in microSI
Bulks.append(
1e6 * float(chis[0]['measurement_chi_volume']))
BulkDepths.append(float(sampdepths[0][depth_scale]))
if len(Bulks) > 0: # set min and max bulk values
bmin = min(Bulks)
bmax = max(Bulks)
xlab = "Depth (m)"
if len(Data) > 0:
location = Data[0]['er_location_name']
else:
return False, 'no data to plot'
# collect the data for plotting tau V3_inc and V1_dec
Depths, Tau1, Tau2, Tau3, V3Incs, P, V1Decs = [], [], [], [], [], [], []
F23s = []
Axs = [] # collect the plot ids
# START HERE
if len(Bulks) > 0:
pcol += 1
# get all the s1 values from Data as floats
s1 = pmag.get_dictkey(Data, 'anisotropy_s1', 'f')
s2 = pmag.get_dictkey(Data, 'anisotropy_s2', 'f')
s3 = pmag.get_dictkey(Data, 'anisotropy_s3', 'f')
s4 = pmag.get_dictkey(Data, 'anisotropy_s4', 'f')
s5 = pmag.get_dictkey(Data, 'anisotropy_s5', 'f')
s6 = pmag.get_dictkey(Data, 'anisotropy_s6', 'f')
nmeas = pmag.get_dictkey(Data, 'anisotropy_n', 'int')
sigma = pmag.get_dictkey(Data, 'anisotropy_sigma', 'f')
Depths = pmag.get_dictkey(Data, 'core_depth', 'f')
# Ss=np.array([s1,s4,s5,s4,s2,s6,s5,s6,s3]).transpose() # make an array
Ss = np.array([s1, s2, s3, s4, s5, s6]).transpose() # make an array
# Ts=np.reshape(Ss,(len(Ss),3,-1)) # and re-shape to be n-length array of
# 3x3 sub-arrays
for k in range(len(Depths)):
# tau,Evecs= pmag.tauV(Ts[k]) # get the sorted eigenvalues and eigenvectors
# v3=pmag.cart2dir(Evecs[2])[1] # convert to inclination of the minimum
# eigenvector
fpars = pmag.dohext(nmeas[k] - 6, sigma[k], Ss[k])
V3Incs.append(fpars['v3_inc'])
V1Decs.append(fpars['v1_dec'])
Tau1.append(fpars['t1'])
Tau2.append(fpars['t2'])
Tau3.append(fpars['t3'])
P.append(old_div(Tau1[-1], Tau3[-1]))
F23s.append(fpars['F23'])
if len(Depths) > 0:
if dmax == -1:
dmax = max(Depths)
dmin = min(Depths)
tau_min = 1
for t in Tau3:
if t > 0 and t < tau_min:
tau_min = t
tau_max = max(Tau1)
# tau_min=min(Tau3)
P_max = max(P)
P_min = min(P)
# dmax=dmax+.05*dmax
# dmin=dmin-.05*dmax
main_plot = plt.figure(1, figsize=(10, 8)) # make the figure
version_num = pmag.get_version()
plt.figtext(.02, .01, version_num) # attach the pmagpy version number
ax = plt.subplot(1, pcol, 1) # make the first column
Axs.append(ax)
ax.plot(Tau1, Depths, 'rs')
ax.plot(Tau2, Depths, 'b^')
ax.plot(Tau3, Depths, 'ko')
if sum_file:
core_depth_key, core_label_key, Cores = read_core_csv_file(
sum_file)
for core in Cores:
depth = float(core[core_depth_key])
if depth > dmin and depth < dmax:
plt.plot([0, 90], [depth, depth], 'b--')
ax.axis([tau_min, tau_max, dmax, dmin])
ax.set_xlabel('Eigenvalues')
if depth_scale == 'sample_core_depth':
ax.set_ylabel('Depth (mbsf)')
elif depth_scale == 'age':
ax.set_ylabel('Age (' + age_unit + ')')
else:
ax.set_ylabel('Depth (mcd)')
ax2 = plt.subplot(1, pcol, 2) # make the second column
ax2.plot(P, Depths, 'rs')
ax2.axis([P_min, P_max, dmax, dmin])
ax2.set_xlabel('P')
ax2.set_title(location)
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth > dmin and depth < dmax:
plt.plot([0, 90], [depth, depth], 'b--')
Axs.append(ax2)
ax3 = plt.subplot(1, pcol, 3)
Axs.append(ax3)
ax3.plot(V3Incs, Depths, 'ko')
ax3.axis([0, 90, dmax, dmin])
ax3.set_xlabel('V3 Inclination')
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth > dmin and depth < dmax:
plt.plot([0, 90], [depth, depth], 'b--')
ax4 = plt.subplot(1, np.abs(pcol), 4)
Axs.append(ax4)
ax4.plot(V1Decs, Depths, 'rs')
ax4.axis([0, 360, dmax, dmin])
ax4.set_xlabel('V1 Declination')
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth >= dmin and depth <= dmax:
plt.plot([0, 360], [depth, depth], 'b--')
if pcol == 4 and label == 1:
plt.text(360, depth + tint, core[core_label_key])
# ax5=plt.subplot(1,np.abs(pcol),5)
# Axs.append(ax5)
# ax5.plot(F23s,Depths,'rs')
# bounds=ax5.axis()
# ax5.axis([bounds[0],bounds[1],dmax,dmin])
# ax5.set_xlabel('F_23')
# ax5.semilogx()
# if sum_file:
# for core in Cores:
# depth=float(core[core_depth_key])
# if depth>=dmin and depth<=dmax:
# plt.plot([bounds[0],bounds[1]],[depth,depth],'b--')
# if pcol==5 and label==1:plt.text(bounds[1],depth+tint,core[core_label_key])
# if pcol==6:
if pcol == 5:
# ax6=plt.subplot(1,pcol,6)
ax6 = plt.subplot(1, pcol, 5)
Axs.append(ax6)
ax6.plot(Bulks, BulkDepths, 'bo')
ax6.axis([bmin - 1, 1.1 * bmax, dmax, dmin])
ax6.set_xlabel('Bulk Susc. (uSI)')
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth >= dmin and depth <= dmax:
plt.plot([0, bmax], [depth, depth], 'b--')
if label == 1:
plt.text(1.1 * bmax, depth + tint,
core[core_label_key])
for x in Axs:
# this makes the x-tick labels more reasonable - they were
# overcrowded using the defaults
pmagplotlib.delticks(x)
fig_name = location + '_ani_depthplot.' + fmt
return main_plot, fig_name
else:
return False, "No data to plot" | python | def ani_depthplot2(ani_file='rmag_anisotropy.txt', meas_file='magic_measurements.txt', samp_file='er_samples.txt', age_file=None, sum_file=None, fmt='svg', dmin=-1, dmax=-1, depth_scale='sample_core_depth', dir_path='.'):
"""
returns matplotlib figure with anisotropy data plotted against depth
available depth scales: 'sample_composite_depth', 'sample_core_depth', or 'age' (you must provide an age file to use this option)
"""
pcol = 4
tint = 9
plots = 0
# format files to use full path
# os.path.join(dir_path, ani_file)
ani_file = pmag.resolve_file_name(ani_file, dir_path)
if not os.path.isfile(ani_file):
print("Could not find rmag_anisotropy type file: {}.\nPlease provide a valid file path and try again".format(ani_file))
return False, "Could not find rmag_anisotropy type file: {}.\nPlease provide a valid file path and try again".format(ani_file)
# os.path.join(dir_path, meas_file)
meas_file = pmag.resolve_file_name(meas_file, dir_path)
if age_file:
if not os.path.isfile(age_file):
print(
'Warning: you have provided an invalid age file. Attempting to use sample file instead')
age_file = None
depth_scale = 'sample_core_depth'
# os.path.join(dir_path, samp_file)
samp_file = pmag.resolve_file_name(samp_file, dir_path)
else:
# os.path.join(dir_path, age_file)
samp_file = pmag.resolve_file_name(samp_file, dir_path)
depth_scale = 'age'
print(
'Warning: you have provided an er_ages format file, which will take precedence over er_samples')
else:
samp_file = pmag.resolve_file_name(samp_file, dir_path)
label = 1
if sum_file:
sum_file = os.path.join(dir_path, sum_file)
dmin, dmax = float(dmin), float(dmax)
# get data read in
isbulk = 0 # tests if there are bulk susceptibility measurements
AniData, file_type = pmag.magic_read(ani_file) # read in tensor elements
if not age_file:
# read in sample depth info from er_sample.txt format file
Samps, file_type = pmag.magic_read(samp_file)
else:
# read in sample age info from er_ages.txt format file
Samps, file_type = pmag.magic_read(samp_file)
age_unit = Samps[0]['age_unit']
for s in Samps:
# change to upper case for every sample name
s['er_sample_name'] = s['er_sample_name'].upper()
Meas, file_type = pmag.magic_read(meas_file)
# print 'meas_file', meas_file
# print 'file_type', file_type
if file_type == 'magic_measurements':
isbulk = 1
Data = []
Bulks = []
BulkDepths = []
for rec in AniData:
# look for depth record for this sample
samprecs = pmag.get_dictitem(Samps, 'er_sample_name',
rec['er_sample_name'].upper(), 'T')
# see if there are non-blank depth data
sampdepths = pmag.get_dictitem(samprecs, depth_scale, '', 'F')
if dmax != -1:
# fishes out records within depth bounds
sampdepths = pmag.get_dictitem(
sampdepths, depth_scale, dmax, 'max')
sampdepths = pmag.get_dictitem(
sampdepths, depth_scale, dmin, 'min')
if len(sampdepths) > 0: # if there are any....
# set the core depth of this record
rec['core_depth'] = sampdepths[0][depth_scale]
Data.append(rec) # fish out data with core_depth
if isbulk: # if there are bulk data
chis = pmag.get_dictitem(
Meas, 'er_specimen_name', rec['er_specimen_name'], 'T')
# get the non-zero values for this specimen
chis = pmag.get_dictitem(
chis, 'measurement_chi_volume', '', 'F')
if len(chis) > 0: # if there are any....
# put in microSI
Bulks.append(
1e6 * float(chis[0]['measurement_chi_volume']))
BulkDepths.append(float(sampdepths[0][depth_scale]))
if len(Bulks) > 0: # set min and max bulk values
bmin = min(Bulks)
bmax = max(Bulks)
xlab = "Depth (m)"
if len(Data) > 0:
location = Data[0]['er_location_name']
else:
return False, 'no data to plot'
# collect the data for plotting tau V3_inc and V1_dec
Depths, Tau1, Tau2, Tau3, V3Incs, P, V1Decs = [], [], [], [], [], [], []
F23s = []
Axs = [] # collect the plot ids
# START HERE
if len(Bulks) > 0:
pcol += 1
# get all the s1 values from Data as floats
s1 = pmag.get_dictkey(Data, 'anisotropy_s1', 'f')
s2 = pmag.get_dictkey(Data, 'anisotropy_s2', 'f')
s3 = pmag.get_dictkey(Data, 'anisotropy_s3', 'f')
s4 = pmag.get_dictkey(Data, 'anisotropy_s4', 'f')
s5 = pmag.get_dictkey(Data, 'anisotropy_s5', 'f')
s6 = pmag.get_dictkey(Data, 'anisotropy_s6', 'f')
nmeas = pmag.get_dictkey(Data, 'anisotropy_n', 'int')
sigma = pmag.get_dictkey(Data, 'anisotropy_sigma', 'f')
Depths = pmag.get_dictkey(Data, 'core_depth', 'f')
# Ss=np.array([s1,s4,s5,s4,s2,s6,s5,s6,s3]).transpose() # make an array
Ss = np.array([s1, s2, s3, s4, s5, s6]).transpose() # make an array
# Ts=np.reshape(Ss,(len(Ss),3,-1)) # and re-shape to be n-length array of
# 3x3 sub-arrays
for k in range(len(Depths)):
# tau,Evecs= pmag.tauV(Ts[k]) # get the sorted eigenvalues and eigenvectors
# v3=pmag.cart2dir(Evecs[2])[1] # convert to inclination of the minimum
# eigenvector
fpars = pmag.dohext(nmeas[k] - 6, sigma[k], Ss[k])
V3Incs.append(fpars['v3_inc'])
V1Decs.append(fpars['v1_dec'])
Tau1.append(fpars['t1'])
Tau2.append(fpars['t2'])
Tau3.append(fpars['t3'])
P.append(old_div(Tau1[-1], Tau3[-1]))
F23s.append(fpars['F23'])
if len(Depths) > 0:
if dmax == -1:
dmax = max(Depths)
dmin = min(Depths)
tau_min = 1
for t in Tau3:
if t > 0 and t < tau_min:
tau_min = t
tau_max = max(Tau1)
# tau_min=min(Tau3)
P_max = max(P)
P_min = min(P)
# dmax=dmax+.05*dmax
# dmin=dmin-.05*dmax
main_plot = plt.figure(1, figsize=(10, 8)) # make the figure
version_num = pmag.get_version()
plt.figtext(.02, .01, version_num) # attach the pmagpy version number
ax = plt.subplot(1, pcol, 1) # make the first column
Axs.append(ax)
ax.plot(Tau1, Depths, 'rs')
ax.plot(Tau2, Depths, 'b^')
ax.plot(Tau3, Depths, 'ko')
if sum_file:
core_depth_key, core_label_key, Cores = read_core_csv_file(
sum_file)
for core in Cores:
depth = float(core[core_depth_key])
if depth > dmin and depth < dmax:
plt.plot([0, 90], [depth, depth], 'b--')
ax.axis([tau_min, tau_max, dmax, dmin])
ax.set_xlabel('Eigenvalues')
if depth_scale == 'sample_core_depth':
ax.set_ylabel('Depth (mbsf)')
elif depth_scale == 'age':
ax.set_ylabel('Age (' + age_unit + ')')
else:
ax.set_ylabel('Depth (mcd)')
ax2 = plt.subplot(1, pcol, 2) # make the second column
ax2.plot(P, Depths, 'rs')
ax2.axis([P_min, P_max, dmax, dmin])
ax2.set_xlabel('P')
ax2.set_title(location)
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth > dmin and depth < dmax:
plt.plot([0, 90], [depth, depth], 'b--')
Axs.append(ax2)
ax3 = plt.subplot(1, pcol, 3)
Axs.append(ax3)
ax3.plot(V3Incs, Depths, 'ko')
ax3.axis([0, 90, dmax, dmin])
ax3.set_xlabel('V3 Inclination')
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth > dmin and depth < dmax:
plt.plot([0, 90], [depth, depth], 'b--')
ax4 = plt.subplot(1, np.abs(pcol), 4)
Axs.append(ax4)
ax4.plot(V1Decs, Depths, 'rs')
ax4.axis([0, 360, dmax, dmin])
ax4.set_xlabel('V1 Declination')
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth >= dmin and depth <= dmax:
plt.plot([0, 360], [depth, depth], 'b--')
if pcol == 4 and label == 1:
plt.text(360, depth + tint, core[core_label_key])
# ax5=plt.subplot(1,np.abs(pcol),5)
# Axs.append(ax5)
# ax5.plot(F23s,Depths,'rs')
# bounds=ax5.axis()
# ax5.axis([bounds[0],bounds[1],dmax,dmin])
# ax5.set_xlabel('F_23')
# ax5.semilogx()
# if sum_file:
# for core in Cores:
# depth=float(core[core_depth_key])
# if depth>=dmin and depth<=dmax:
# plt.plot([bounds[0],bounds[1]],[depth,depth],'b--')
# if pcol==5 and label==1:plt.text(bounds[1],depth+tint,core[core_label_key])
# if pcol==6:
if pcol == 5:
# ax6=plt.subplot(1,pcol,6)
ax6 = plt.subplot(1, pcol, 5)
Axs.append(ax6)
ax6.plot(Bulks, BulkDepths, 'bo')
ax6.axis([bmin - 1, 1.1 * bmax, dmax, dmin])
ax6.set_xlabel('Bulk Susc. (uSI)')
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth >= dmin and depth <= dmax:
plt.plot([0, bmax], [depth, depth], 'b--')
if label == 1:
plt.text(1.1 * bmax, depth + tint,
core[core_label_key])
for x in Axs:
# this makes the x-tick labels more reasonable - they were
# overcrowded using the defaults
pmagplotlib.delticks(x)
fig_name = location + '_ani_depthplot.' + fmt
return main_plot, fig_name
else:
return False, "No data to plot" | returns matplotlib figure with anisotropy data plotted against depth
available depth scales: 'sample_composite_depth', 'sample_core_depth', or 'age' (you must provide an age file to use this option) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L2736-L2977 |
PmagPy/PmagPy | pmagpy/ipmag.py | ani_depthplot | def ani_depthplot(spec_file='specimens.txt', samp_file='samples.txt',
meas_file='measurements.txt', site_file='sites.txt',
age_file="", sum_file="", fmt='svg', dmin=-1, dmax=-1,
depth_scale='core_depth', dir_path='.', contribution=None):
"""
returns matplotlib figure with anisotropy data plotted against depth
available depth scales: 'composite_depth', 'core_depth' or 'age' (you must provide an age file to use this option).
You must provide valid specimens and sites files, and either a samples or an ages file.
You may additionally provide measurements and a summary file (csv).
Parameters
----------
spec_file : str, default "specimens.txt"
samp_file : str, default "samples.txt"
meas_file : str, default "measurements.txt"
site_file : str, default "sites.txt"
age_file : str, default ""
sum_file : str, default ""
fmt : str, default "svg"
format for figures, ["svg", "jpg", "pdf", "png"]
dmin : number, default -1
minimum depth to plot (if -1, default to plotting all)
dmax : number, default -1
maximum depth to plot (if -1, default to plotting all)
depth_scale : str, default "core_depth"
scale to plot, ['composite_depth', 'core_depth', 'age'].
if 'age' is selected, you must provide an ages file.
dir_path : str, default "."
directory for input files
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
Returns
---------
plot : matplotlib plot, or False if no plot could be created
name : figure name, or error message if no plot could be created
"""
if depth_scale == 'sample_core_depth':
depth_scale = 'core_depth'
if depth_scale == 'sample_composite_depth':
depth_scale = 'composite_depth'
pcol = 4
tint = 9
plots = 0
dmin, dmax = float(dmin), float(dmax)
# if contribution object is not provided, read in data from files
if isinstance(contribution, cb.Contribution):
con = contribution
else:
# format files to use full path
meas_file = pmag.resolve_file_name(meas_file, dir_path)
spec_file = pmag.resolve_file_name(spec_file, dir_path)
samp_file = pmag.resolve_file_name(samp_file, dir_path)
site_file = pmag.resolve_file_name(site_file, dir_path)
if age_file:
age_file = pmag.resolve_file_name(age_file, dir_path)
if not os.path.isfile(age_file):
print(
'Warning: you have provided an invalid age file. Attempting to use sample file instead')
age_file = None
depth_scale = 'core_depth'
else:
samp_file = age_file
depth_scale = 'age'
print(
'Warning: you have provided an ages format file, which will take precedence over samples')
samp_file = pmag.resolve_file_name(samp_file, dir_path)
label = 1
if sum_file:
sum_file = pmag.resolve_file_name(sum_file, dir_path)
core_df=pd.read_csv(sum_file)
depths=core_df['Top depth cored CSF (m)'].values
# contribution
dir_path = os.path.split(spec_file)[0]
tables = ['measurements', 'specimens', 'samples', 'sites']
con = cb.Contribution(dir_path, read_tables=tables,
custom_filenames={'measurements': meas_file, 'specimens': spec_file,
'samples': samp_file, 'sites': site_file})
for ftype in ['specimens', 'samples', 'sites']:
if not con.tables.get(ftype):
if ftype == 'samples':
if con.tables.get('ages'):
depth_scale = 'age'
continue
print("-W- This function requires a {} file to run.".format(ftype))
print(" Make sure you include one in your working directory")
return False, "missing required file type: {}".format(ftype)
# propagate needed values
con.propagate_cols(['core_depth'], 'samples', 'sites')
con.propagate_location_to_specimens()
# get data read in
isbulk = 0 # tests if there are bulk susceptibility measurements
ani_file = spec_file
SampData = con.tables['samples'].df
AniData = con.tables['specimens'].df
# add sample into specimens (AniData)
AniData = pd.merge(
AniData, SampData[['sample', depth_scale]], how='inner', on='sample')
# trim down AniData
cond = AniData[depth_scale].astype(bool)
AniData = AniData[cond]
if dmin != -1:
AniData = AniData[AniData[depth_scale] < dmax]
if dmax != -1:
AniData = AniData[AniData[depth_scale] > dmin]
AniData['core_depth'] = AniData[depth_scale]
if not age_file:
Samps = con.tables['samples'].convert_to_pmag_data_list()
else:
con.add_magic_table(dtype='ages', fname=age_file)
Samps = con.tables['ages'].convert_to_pmag_data_list()
# get age unit
age_unit = con.tables['ages'].df['age_unit'][0]
# propagate ages down to sample level
for s in Samps:
# change to upper case for every sample name
s['sample'] = s['sample'].upper()
if 'measurements' in con.tables:
isbulk = 1
Meas = con.tables['measurements'].df # convert_to_pmag_data_list()
if isbulk:
Meas = Meas[Meas['specimen'].astype('bool')]
Meas = Meas[Meas['susc_chi_volume'].astype(bool)]
# add core_depth into Measurements dataframe
Meas = pd.merge(Meas[['susc_chi_volume', 'specimen']], AniData[[
'specimen', 'core_depth']], how='inner', on='specimen')
Bulks = list(Meas['susc_chi_volume'] * 1e6)
BulkDepths = list(Meas['core_depth'])
else:
Bulks, BulkDepths = [], []
# now turn Data from pandas dataframe to a list of dicts
Data = list(AniData.T.apply(dict))
if len(Bulks) > 0: # set min and max bulk values
bmin = min(Bulks)
bmax = max(Bulks)
xlab = "Depth (m)"
#
if len(Data) > 0:
location = Data[0].get('location', 'unknown')
if cb.is_null(location):
location = 'unknown'
try:
location = con.tables['sites'].df['location'][0]
except KeyError:
pass
else:
return False, 'no data to plot'
# collect the data for plotting tau V3_inc and V1_dec
Depths, Tau1, Tau2, Tau3, V3Incs, P, V1Decs = [], [], [], [], [], [], []
F23s = []
Axs = [] # collect the plot ids
if len(Bulks) > 0:
pcol += 1
Data = pmag.get_dictitem(Data, 'aniso_s', '', 'not_null')
# get all the s1 values from Data as floats
aniso_s = pmag.get_dictkey(Data, 'aniso_s', '')
aniso_s = [a.split(':') for a in aniso_s if a is not None]
#print('aniso_s', aniso_s)
s1 = [float(a[0]) for a in aniso_s]
s2 = [float(a[1]) for a in aniso_s]
s3 = [float(a[2]) for a in aniso_s]
s4 = [float(a[3]) for a in aniso_s]
s5 = [float(a[4]) for a in aniso_s]
s6 = [float(a[5]) for a in aniso_s]
# we are good with s1 - s2
nmeas = pmag.get_dictkey(Data, 'aniso_s_n_measurements', 'int')
sigma = pmag.get_dictkey(Data, 'aniso_s_sigma', 'f')
Depths = pmag.get_dictkey(Data, 'core_depth', 'f')
# Ss=np.array([s1,s4,s5,s4,s2,s6,s5,s6,s3]).transpose() # make an array
Ss = np.array([s1, s2, s3, s4, s5, s6]).transpose() # make an array
# Ts=np.reshape(Ss,(len(Ss),3,-1)) # and re-shape to be n-length array of
# 3x3 sub-arrays
for k in range(len(Depths)):
# tau,Evecs= pmag.tauV(Ts[k]) # get the sorted eigenvalues and eigenvectors
# v3=pmag.cart2dir(Evecs[2])[1] # convert to inclination of the minimum
# eigenvector
fpars = pmag.dohext(nmeas[k] - 6, sigma[k], Ss[k])
V3Incs.append(fpars['v3_inc'])
V1Decs.append(fpars['v1_dec'])
Tau1.append(fpars['t1'])
Tau2.append(fpars['t2'])
Tau3.append(fpars['t3'])
P.append(old_div(Tau1[-1], Tau3[-1]))
F23s.append(fpars['F23'])
if len(Depths) > 0:
if dmax == -1:
dmax = max(Depths)
dmin = min(Depths)
tau_min = 1
for t in Tau3:
if t > 0 and t < tau_min:
tau_min = t
tau_max = max(Tau1)
# tau_min=min(Tau3)
P_max = max(P)
P_min = min(P)
# dmax=dmax+.05*dmax
# dmin=dmin-.05*dmax
main_plot = plt.figure(1, figsize=(11, 7)) # make the figure
# main_plot = plt.figure(1, figsize=(10, 8)) # make the figure
version_num = pmag.get_version()
plt.figtext(.02, .01, version_num) # attach the pmagpy version number
ax = plt.subplot(1, pcol, 1) # make the first column
Axs.append(ax)
ax.plot(Tau1, Depths, 'rs')
ax.plot(Tau2, Depths, 'b^')
ax.plot(Tau3, Depths, 'ko')
if sum_file:
for depth in depths:
if depth >= dmin and depth < dmax:
plt.axhline(depth,color='blue',linestyle='dotted')
if tau_min>.3: tau_min=.3
if tau_max<.36: tau_max=.36
ax.axis([tau_min, tau_max, dmax, dmin])
ax.set_xlabel('Eigenvalues')
if depth_scale == 'core_depth':
ax.set_ylabel('Depth (mbsf)')
elif depth_scale == 'age':
ax.set_ylabel('Age (' + age_unit + ')')
else:
ax.set_ylabel('Depth (mcd)')
ax2 = plt.subplot(1, pcol, 2) # make the second column
ax2.yaxis.set_major_locator(plt.NullLocator())
ax2.plot(P, Depths, 'rs')
ax2.axis([P_min, P_max, dmax, dmin])
ax2.set_xlabel('P')
ax2.set_title(location)
if sum_file:
for depth in depths:
if depth >= dmin and depth < dmax:
plt.axhline(depth,color='blue',linestyle='dotted')
Axs.append(ax2)
ax3 = plt.subplot(1, pcol, 3)
Axs.append(ax3)
ax3.plot(V3Incs, Depths, 'ko')
ax3.axis([0, 90, dmax, dmin])
ax3.set_xlabel('V3 Inclination')
ax3.yaxis.set_major_locator(plt.NullLocator())
if sum_file:
for depth in depths:
if depth >= dmin and depth < dmax:
plt.axhline(depth,color='blue',linestyle='dotted')
ax4 = plt.subplot(1, np.abs(pcol), 4)
Axs.append(ax4)
ax4.plot(V1Decs, Depths, 'rs')
ax4.axis([0, 360, dmax, dmin])
ax4.set_xlabel('V1 Declination')
ax4.yaxis.set_major_locator(plt.NullLocator())
if sum_file:
for depth in depths:
if depth >= dmin and depth < dmax:
plt.axhline(depth,color='blue',linestyle='dotted')
# ax5=plt.subplot(1,np.abs(pcol),5)
# Axs.append(ax5)
# ax5.plot(F23s,Depths,'rs')
# bounds=ax5.axis()
# ax5.axis([bounds[0],bounds[1],dmax,dmin])
# ax5.set_xlabel('F_23')
# ax5.semilogx()
# if sum_file:
# for core in Cores:
# depth=float(core[core_depth_key])
# if depth>=dmin and depth<=dmax:
# plt.plot([bounds[0],bounds[1]],[depth,depth],'b--')
# if pcol==5 and label==1:plt.text(bounds[1],depth+tint,core[core_label_key])
# if pcol==6:
if pcol == 5:
# ax6=plt.subplot(1,pcol,6)
ax6 = plt.subplot(1, pcol, 5)
Axs.append(ax6)
ax6.plot(Bulks, BulkDepths, 'bo')
ax6.axis([bmin - 1, 1.1 * bmax, dmax, dmin])
ax6.set_xlabel('Bulk Susc. (uSI)')
ax6.yaxis.set_major_locator(plt.NullLocator())
if sum_file:
for depth in depths:
if depth >= dmin and depth < dmax:
plt.axhline(depth,color='blue',linestyle='dotted')
for x in Axs:
# this makes the x-tick labels more reasonable - they were
# overcrowded using the defaults
pmagplotlib.delticks(x)
fig_name = location + '_ani_depthplot.' + fmt
return main_plot, [fig_name]
else:
return False, "No data to plot" | python | def ani_depthplot(spec_file='specimens.txt', samp_file='samples.txt',
meas_file='measurements.txt', site_file='sites.txt',
age_file="", sum_file="", fmt='svg', dmin=-1, dmax=-1,
depth_scale='core_depth', dir_path='.', contribution=None):
"""
returns matplotlib figure with anisotropy data plotted against depth
available depth scales: 'composite_depth', 'core_depth' or 'age' (you must provide an age file to use this option).
You must provide valid specimens and sites files, and either a samples or an ages file.
You may additionally provide measurements and a summary file (csv).
Parameters
----------
spec_file : str, default "specimens.txt"
samp_file : str, default "samples.txt"
meas_file : str, default "measurements.txt"
site_file : str, default "sites.txt"
age_file : str, default ""
sum_file : str, default ""
fmt : str, default "svg"
format for figures, ["svg", "jpg", "pdf", "png"]
dmin : number, default -1
minimum depth to plot (if -1, default to plotting all)
dmax : number, default -1
maximum depth to plot (if -1, default to plotting all)
depth_scale : str, default "core_depth"
scale to plot, ['composite_depth', 'core_depth', 'age'].
if 'age' is selected, you must provide an ages file.
dir_path : str, default "."
directory for input files
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
Returns
---------
plot : matplotlib plot, or False if no plot could be created
name : figure name, or error message if no plot could be created
"""
if depth_scale == 'sample_core_depth':
depth_scale = 'core_depth'
if depth_scale == 'sample_composite_depth':
depth_scale = 'composite_depth'
pcol = 4
tint = 9
plots = 0
dmin, dmax = float(dmin), float(dmax)
# if contribution object is not provided, read in data from files
if isinstance(contribution, cb.Contribution):
con = contribution
else:
# format files to use full path
meas_file = pmag.resolve_file_name(meas_file, dir_path)
spec_file = pmag.resolve_file_name(spec_file, dir_path)
samp_file = pmag.resolve_file_name(samp_file, dir_path)
site_file = pmag.resolve_file_name(site_file, dir_path)
if age_file:
age_file = pmag.resolve_file_name(age_file, dir_path)
if not os.path.isfile(age_file):
print(
'Warning: you have provided an invalid age file. Attempting to use sample file instead')
age_file = None
depth_scale = 'core_depth'
else:
samp_file = age_file
depth_scale = 'age'
print(
'Warning: you have provided an ages format file, which will take precedence over samples')
samp_file = pmag.resolve_file_name(samp_file, dir_path)
label = 1
if sum_file:
sum_file = pmag.resolve_file_name(sum_file, dir_path)
core_df=pd.read_csv(sum_file)
depths=core_df['Top depth cored CSF (m)'].values
# contribution
dir_path = os.path.split(spec_file)[0]
tables = ['measurements', 'specimens', 'samples', 'sites']
con = cb.Contribution(dir_path, read_tables=tables,
custom_filenames={'measurements': meas_file, 'specimens': spec_file,
'samples': samp_file, 'sites': site_file})
for ftype in ['specimens', 'samples', 'sites']:
if not con.tables.get(ftype):
if ftype == 'samples':
if con.tables.get('ages'):
depth_scale = 'age'
continue
print("-W- This function requires a {} file to run.".format(ftype))
print(" Make sure you include one in your working directory")
return False, "missing required file type: {}".format(ftype)
# propagate needed values
con.propagate_cols(['core_depth'], 'samples', 'sites')
con.propagate_location_to_specimens()
# get data read in
isbulk = 0 # tests if there are bulk susceptibility measurements
ani_file = spec_file
SampData = con.tables['samples'].df
AniData = con.tables['specimens'].df
# add sample into specimens (AniData)
AniData = pd.merge(
AniData, SampData[['sample', depth_scale]], how='inner', on='sample')
# trim down AniData
cond = AniData[depth_scale].astype(bool)
AniData = AniData[cond]
if dmin != -1:
AniData = AniData[AniData[depth_scale] < dmax]
if dmax != -1:
AniData = AniData[AniData[depth_scale] > dmin]
AniData['core_depth'] = AniData[depth_scale]
if not age_file:
Samps = con.tables['samples'].convert_to_pmag_data_list()
else:
con.add_magic_table(dtype='ages', fname=age_file)
Samps = con.tables['ages'].convert_to_pmag_data_list()
# get age unit
age_unit = con.tables['ages'].df['age_unit'][0]
# propagate ages down to sample level
for s in Samps:
# change to upper case for every sample name
s['sample'] = s['sample'].upper()
if 'measurements' in con.tables:
isbulk = 1
Meas = con.tables['measurements'].df # convert_to_pmag_data_list()
if isbulk:
Meas = Meas[Meas['specimen'].astype('bool')]
Meas = Meas[Meas['susc_chi_volume'].astype(bool)]
# add core_depth into Measurements dataframe
Meas = pd.merge(Meas[['susc_chi_volume', 'specimen']], AniData[[
'specimen', 'core_depth']], how='inner', on='specimen')
Bulks = list(Meas['susc_chi_volume'] * 1e6)
BulkDepths = list(Meas['core_depth'])
else:
Bulks, BulkDepths = [], []
# now turn Data from pandas dataframe to a list of dicts
Data = list(AniData.T.apply(dict))
if len(Bulks) > 0: # set min and max bulk values
bmin = min(Bulks)
bmax = max(Bulks)
xlab = "Depth (m)"
#
if len(Data) > 0:
location = Data[0].get('location', 'unknown')
if cb.is_null(location):
location = 'unknown'
try:
location = con.tables['sites'].df['location'][0]
except KeyError:
pass
else:
return False, 'no data to plot'
# collect the data for plotting tau V3_inc and V1_dec
Depths, Tau1, Tau2, Tau3, V3Incs, P, V1Decs = [], [], [], [], [], [], []
F23s = []
Axs = [] # collect the plot ids
if len(Bulks) > 0:
pcol += 1
Data = pmag.get_dictitem(Data, 'aniso_s', '', 'not_null')
# get all the s1 values from Data as floats
aniso_s = pmag.get_dictkey(Data, 'aniso_s', '')
aniso_s = [a.split(':') for a in aniso_s if a is not None]
#print('aniso_s', aniso_s)
s1 = [float(a[0]) for a in aniso_s]
s2 = [float(a[1]) for a in aniso_s]
s3 = [float(a[2]) for a in aniso_s]
s4 = [float(a[3]) for a in aniso_s]
s5 = [float(a[4]) for a in aniso_s]
s6 = [float(a[5]) for a in aniso_s]
# we are good with s1 - s2
nmeas = pmag.get_dictkey(Data, 'aniso_s_n_measurements', 'int')
sigma = pmag.get_dictkey(Data, 'aniso_s_sigma', 'f')
Depths = pmag.get_dictkey(Data, 'core_depth', 'f')
# Ss=np.array([s1,s4,s5,s4,s2,s6,s5,s6,s3]).transpose() # make an array
Ss = np.array([s1, s2, s3, s4, s5, s6]).transpose() # make an array
# Ts=np.reshape(Ss,(len(Ss),3,-1)) # and re-shape to be n-length array of
# 3x3 sub-arrays
for k in range(len(Depths)):
# tau,Evecs= pmag.tauV(Ts[k]) # get the sorted eigenvalues and eigenvectors
# v3=pmag.cart2dir(Evecs[2])[1] # convert to inclination of the minimum
# eigenvector
fpars = pmag.dohext(nmeas[k] - 6, sigma[k], Ss[k])
V3Incs.append(fpars['v3_inc'])
V1Decs.append(fpars['v1_dec'])
Tau1.append(fpars['t1'])
Tau2.append(fpars['t2'])
Tau3.append(fpars['t3'])
P.append(old_div(Tau1[-1], Tau3[-1]))
F23s.append(fpars['F23'])
if len(Depths) > 0:
if dmax == -1:
dmax = max(Depths)
dmin = min(Depths)
tau_min = 1
for t in Tau3:
if t > 0 and t < tau_min:
tau_min = t
tau_max = max(Tau1)
# tau_min=min(Tau3)
P_max = max(P)
P_min = min(P)
# dmax=dmax+.05*dmax
# dmin=dmin-.05*dmax
main_plot = plt.figure(1, figsize=(11, 7)) # make the figure
# main_plot = plt.figure(1, figsize=(10, 8)) # make the figure
version_num = pmag.get_version()
plt.figtext(.02, .01, version_num) # attach the pmagpy version number
ax = plt.subplot(1, pcol, 1) # make the first column
Axs.append(ax)
ax.plot(Tau1, Depths, 'rs')
ax.plot(Tau2, Depths, 'b^')
ax.plot(Tau3, Depths, 'ko')
if sum_file:
for depth in depths:
if depth >= dmin and depth < dmax:
plt.axhline(depth,color='blue',linestyle='dotted')
if tau_min>.3: tau_min=.3
if tau_max<.36: tau_max=.36
ax.axis([tau_min, tau_max, dmax, dmin])
ax.set_xlabel('Eigenvalues')
if depth_scale == 'core_depth':
ax.set_ylabel('Depth (mbsf)')
elif depth_scale == 'age':
ax.set_ylabel('Age (' + age_unit + ')')
else:
ax.set_ylabel('Depth (mcd)')
ax2 = plt.subplot(1, pcol, 2) # make the second column
ax2.yaxis.set_major_locator(plt.NullLocator())
ax2.plot(P, Depths, 'rs')
ax2.axis([P_min, P_max, dmax, dmin])
ax2.set_xlabel('P')
ax2.set_title(location)
if sum_file:
for depth in depths:
if depth >= dmin and depth < dmax:
plt.axhline(depth,color='blue',linestyle='dotted')
Axs.append(ax2)
ax3 = plt.subplot(1, pcol, 3)
Axs.append(ax3)
ax3.plot(V3Incs, Depths, 'ko')
ax3.axis([0, 90, dmax, dmin])
ax3.set_xlabel('V3 Inclination')
ax3.yaxis.set_major_locator(plt.NullLocator())
if sum_file:
for depth in depths:
if depth >= dmin and depth < dmax:
plt.axhline(depth,color='blue',linestyle='dotted')
ax4 = plt.subplot(1, np.abs(pcol), 4)
Axs.append(ax4)
ax4.plot(V1Decs, Depths, 'rs')
ax4.axis([0, 360, dmax, dmin])
ax4.set_xlabel('V1 Declination')
ax4.yaxis.set_major_locator(plt.NullLocator())
if sum_file:
for depth in depths:
if depth >= dmin and depth < dmax:
plt.axhline(depth,color='blue',linestyle='dotted')
# ax5=plt.subplot(1,np.abs(pcol),5)
# Axs.append(ax5)
# ax5.plot(F23s,Depths,'rs')
# bounds=ax5.axis()
# ax5.axis([bounds[0],bounds[1],dmax,dmin])
# ax5.set_xlabel('F_23')
# ax5.semilogx()
# if sum_file:
# for core in Cores:
# depth=float(core[core_depth_key])
# if depth>=dmin and depth<=dmax:
# plt.plot([bounds[0],bounds[1]],[depth,depth],'b--')
# if pcol==5 and label==1:plt.text(bounds[1],depth+tint,core[core_label_key])
# if pcol==6:
if pcol == 5:
# ax6=plt.subplot(1,pcol,6)
ax6 = plt.subplot(1, pcol, 5)
Axs.append(ax6)
ax6.plot(Bulks, BulkDepths, 'bo')
ax6.axis([bmin - 1, 1.1 * bmax, dmax, dmin])
ax6.set_xlabel('Bulk Susc. (uSI)')
ax6.yaxis.set_major_locator(plt.NullLocator())
if sum_file:
for depth in depths:
if depth >= dmin and depth < dmax:
plt.axhline(depth,color='blue',linestyle='dotted')
for x in Axs:
# this makes the x-tick labels more reasonable - they were
# overcrowded using the defaults
pmagplotlib.delticks(x)
fig_name = location + '_ani_depthplot.' + fmt
return main_plot, [fig_name]
else:
return False, "No data to plot" | returns matplotlib figure with anisotropy data plotted against depth
available depth scales: 'composite_depth', 'core_depth' or 'age' (you must provide an age file to use this option).
You must provide valid specimens and sites files, and either a samples or an ages file.
You may additionally provide measurements and a summary file (csv).
Parameters
----------
spec_file : str, default "specimens.txt"
samp_file : str, default "samples.txt"
meas_file : str, default "measurements.txt"
site_file : str, default "sites.txt"
age_file : str, default ""
sum_file : str, default ""
fmt : str, default "svg"
format for figures, ["svg", "jpg", "pdf", "png"]
dmin : number, default -1
minimum depth to plot (if -1, default to plotting all)
dmax : number, default -1
maximum depth to plot (if -1, default to plotting all)
depth_scale : str, default "core_depth"
scale to plot, ['composite_depth', 'core_depth', 'age'].
if 'age' is selected, you must provide an ages file.
dir_path : str, default "."
directory for input files
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
Returns
---------
plot : matplotlib plot, or False if no plot could be created
name : figure name, or error message if no plot could be created | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L2980-L3293 |
PmagPy/PmagPy | pmagpy/ipmag.py | core_depthplot | def core_depthplot(input_dir_path='.', meas_file='measurements.txt', spc_file='',
samp_file='samples.txt', age_file='', sum_file='', wt_file='',
depth_scale='core_depth', dmin=-1, dmax=-1, sym='bo',
size=5, spc_sym='ro', spc_size=5, meth='', step=0, fmt='svg',
pltDec=True, pltInc=True, pltMag=True, pltLine=True, pltSus=True,
logit=False, pltTime=False, timescale=None, amin=-1, amax=-1,
norm=False, data_model_num=3,location=""):
"""
depth scale can be 'core_depth' or 'composite_depth' (for data model=3)
if age file is provided, depth_scale will be set to 'age' by default.
You must provide at least a measurements,specimens and sample file to plot.
Parameters
----------
input_dir_path : str, default "."
file input directory
meas_file : str, default "measurements.txt"
input measurements file
spc_file : str, default ""
input specimens file
samp_file : str, default ""
input samples file
age_file : str, default ""
input ages file
sum_file : str, default ""
input csv summary file
wt_file : str, default ""
input file with weights
depth_scale : str, default "core_depth"
['core_depth', 'composite_depth']
dmin : number, default -1
minimum depth to plot (if -1, default to plotting all)
dmax : number, default -1
maximum depth to plot (if -1, default to plotting all)
sym : str, default "bo"
symbol color and shape, default blue circles
(see matplotlib documentaiton for more options)
size : int, defualt 5
symbol size
spc_sym : str, default 'ro'
specimen symbol color and shape, default red circles
(see matplotlib documentation for more options)
meth : str, default ""
method codes, ["LT-NO", "AF", "T", "ARM", "IRM", "X"]
step : int, default 0
treatment step for plotting:
for AF, in mT, for T, in C
fmt : str, default "svg"
format for figures, [svg,jpg,png,pdf]
pltDec : bool, default True
plot declination
pltInc : bool, default True
plot inclination
pltMag : bool, default True
plot magnetization
pltLine : bool, default True
connect dots with a line
pltSus : bool, default True
plot blanket treatment
logit : bool, default False
plot magnetization on a log scale
amin : int, default -1
minimum time to plot (if -1, default to plotting all)
amax : int, default -1
maximum time to plot (if -1, default to plotting all)
norm : bool, default False
normalize by weight
data_model_num : int, default 3
MagIC data model (please, use data model 3)
"""
data_model_num = int(data_model_num)
# replace MagIC 3 defaults with MagIC 2.5 defaults if needed
if data_model_num == 2 and meas_file == 'measurements.txt':
meas_file = 'magic_measurements.txt'
if data_model_num == 2 and samp_file == 'samples.txt':
samp_file = 'er_samples.txt'
if data_model_num == 2 and age_file == 'ages.txt':
age_file = 'er_ages.txt'
if data_model_num == 2 and depth_scale == "core_depth":
depth_scale = "sample_core_depth"
# initialize MagIC 3.0 vs 2.5 column names
loc_col_name = "location" if data_model_num == 3 else "er_location_name"
site_col_name = "site" if data_model_num == 3 else "er_site_name"
samp_col_name = "sample" if data_model_num == 3 else "er_sample_name"
spec_col_name = "specimen" if data_model_num == 3 else "er_specimen_name"
meth_col_name = "method_codes" if data_model_num == 3 else "magic_method_codes"
spec_dec_col_name = "dir_dec" if data_model_num == 3 else "specimen_dec"
spec_inc_col_name = "dir_inc" if data_model_num == 3 else "specimen_inc"
avg_weight_col_name = "weight" if data_model_num == 3 else "average_weight"
spec_weight_col_name = "weight" if data_model_num == 3 else "specimen_weight"
age_col_name = "age" if data_model_num == 3 else "average_age"
height_col_name = "height" if data_model_num == 3 else "average_height"
average_dec_col_name = "dir_dec" if data_model_num == 3 else "average_dec"
average_inc_col_name = "dir_inc" if data_model_num == 3 else "average_inc"
# initialize other variables
width = 10
Ssym, Ssize = 'cs', 5
pcol = 3
pel = 3
maxInt = -1000
minInt = 1e10
maxSuc = -1000
minSuc = 10000
main_plot = None
if size:
size = int(size)
if spc_size:
spc_size = int(spc_size)
title = ""
if location:title=location
# file formats not supported for the moment
ngr_file = "" # nothing needed, not implemented fully in original script
suc_file = "" # nothing else needed, also was not implemented in original script
res_file = "" # need also res_sym, res_size
wig_file = "" # if wig_file: pcol+=1; width+=2
# which plots to make
if not pltDec:
pcol -= 1
pel -= 1
width -= 2
if not pltInc:
pcol -= 1
pel -= 1
width -= 2
if not pltMag:
pcol -= 1
pel -= 1
width -= 2
# method and step
if not step or meth == 'LT-NO':
step = 0
method = 'LT-NO'
elif meth == "AF":
step = round(float(step) * 1e-3, 6)
method = 'LT-AF-Z'
elif meth == 'T':
step = round(float(step) + 273, 6)
method = 'LT-T-Z'
elif meth == 'ARM':
method = 'LT-AF-I'
step = round(float(step) * 1e-3, 6)
elif meth == 'IRM':
method = 'LT-IRM'
step = round(float(step) * 1e-3, 6)
# not supporting susceptibility at the moment LJ
elif meth == 'X':
method = 'LP-X'
pcol += 1
ind = sys.argv.index('-LP')
if sys.argv[ind+2] == 'mass':
if data_model_num != 3:
suc_key = 'measurement_chi_mass'
else:
suc_key = 'susc_chi_mass'
elif sys.argv[ind+2] == 'vol':
if data_model_num != 3:
suc_key = 'measurement_chi_volume'
else:
suc_key = 'susc_chi_volume'
else:
print('error in susceptibility units')
return False, 'error in susceptibility units'
else:
print('method: {} not supported'.format(meth))
return False, 'method: "{}" not supported'.format(meth)
if wt_file:
norm = True
if dmin and dmax:
dmin, dmax = float(dmin), float(dmax)
else:
dmin, dmax = -1, -1
if pltTime:
amin = float(amin)
amax = float(amax)
pcol += 1
width += 2
if not (amax and timescale):
return False, "To plot time, you must provide amin, amax, and timescale"
#
#
# read in 3.0 data and translate to 2.5
if meas_file:
meas_file = pmag.resolve_file_name(meas_file, input_dir_path)
if spc_file:
spc_file = pmag.resolve_file_name(spc_file, input_dir_path)
if samp_file:
samp_file = pmag.resolve_file_name(samp_file, input_dir_path)
if age_file:
age_file = pmag.resolve_file_name(age_file, input_dir_path)
if data_model_num == 3:
fnames = {'specimens': spc_file, 'samples': samp_file,
'ages': age_file, 'measurements': meas_file}
fnames = {k: v for (k, v) in fnames.items() if v}
con = cb.Contribution(input_dir_path, custom_filenames=fnames)
for dtype in ['measurements', 'specimens']:
if dtype not in con.tables:
print(
'-E- You must have a {} file in your input directory ({}) to run core_depthplot'.format(dtype, input_dir_path))
print(' If needed, you can specify your input directory on the command line with "core_depthplot.py -ID dirname ... "')
print(' Or with ipmag.core_depthplot(input_dir_path=dirname, ...)')
# return False, '-E- You must have a {} file in your input directory ({}) to run core_depthplot'.format(dtype, input_dir_path)
# propagate data to measurements
con.propagate_name_down('sample', 'measurements')
con.propagate_name_down('site', 'measurements')
# propagate depth info from sites --> samples
con.propagate_cols(
['core_depth', 'composite_depth'], 'samples', 'sites')
if age_file == "":
# get sample data straight from the contribution
Samps = []
if 'samples' in con.tables:
Samps = con.tables['samples'].convert_to_pmag_data_list()
else:
depth_scale = 'age'
Samps = []
# get age data from contribution
if 'ages' in con.tables:
# we need to get sample in here
# this doesn't do the trick by itself
con.propagate_ages()
con.propagate_cols(['age', 'age_unit'], 'samples', 'sites')
Samps = con.tables['samples'].convert_to_pmag_data_list()
age_unit = ""
if spc_file:
Specs3 = []
# get specimen data from contribution
Specs = []
if 'specimens' in con.tables:
Specs = con.tables['specimens'].convert_to_pmag_data_list()
if res_file:
warn = '-W- result file option is not currently available for MagIC data model 3'
print(warn)
return False, warn
#Results, file_type = pmag.magic_read(res_file)
if norm:
#warn = '-W- norm option is not currently available for MagIC data model 3'
# print(warn)
# return False, warn
Specs3, file_type = pmag.magic_read(wt_file)
# translate specimen records to 2.5
ErSpecs = []
# for spec in Specs3:
# ErSpecs.append(map_magic.mapping(spec, spec_magic3_2_magic2_map))
ErSpecs = Specs3
print(len(ErSpecs), ' specimens read in from ', wt_file)
if not os.path.isfile(spc_file):
if not os.path.isfile(meas_file):
return False, "You must provide either a magic_measurements file or a pmag_specimens file"
if not age_file and not samp_file:
print('-W- You must provide either an age file or a sample file')
return False, '-W- You must provide either an age file or a sample file'
# read in 2.5 data
elif data_model_num == 2:
if age_file == "":
if samp_file:
samp_file = os.path.join(input_dir_path, samp_file)
Samps, file_type = pmag.magic_read(samp_file)
else:
depth_scale = 'age'
if age_file:
age_file = os.path.join(input_dir_path, age_file)
Samps, file_type = pmag.magic_read(age_file)
age_unit = ""
if spc_file:
Specs, file_type = pmag.magic_read(spc_file)
if res_file:
Results, file_type = pmag.magic_read(res_file)
if norm:
ErSpecs, file_type = pmag.magic_read(wt_file)
print(len(ErSpecs), ' specimens read in from ', wt_file)
if not os.path.isfile(spc_file):
if not os.path.isfile(meas_file):
return False, "You must provide either a magic_measurements file or a pmag_specimens file"
else:
return False, "Invalid data model number: {}".format(str(data_model_num))
Cores = []
core_depth_key = "Top depth cored CSF (m)"
if sum_file:
# os.path.join(input_dir_path, sum_file)
sum_file = pmag.resolve_file_name(sum_file, input_dir_path)
with open(sum_file, 'r') as fin:
indat = fin.readlines()
if "Core Summary" in indat[0]:
headline = 1
else:
headline = 0
keys = indat[headline].replace('\n', '').split(',')
if "Core Top (m)" in keys:
core_depth_key = "Core Top (m)"
if "Top depth cored CSF (m)" in keys:
core_dpeth_key = "Top depth cored CSF (m)"
if "Core Label" in keys:
core_label_key = "Core Label"
if "Core label" in keys:
core_label_key = "Core label"
for line in indat[2:]:
if 'TOTALS' not in line:
CoreRec = {}
for k in range(len(keys)):
CoreRec[keys[k]] = line.split(',')[k]
Cores.append(CoreRec)
if len(Cores) == 0:
print('no Core depth information available: import core summary file')
sum_file = ""
Data = []
if 'core_depth' in depth_scale or depth_scale == 'mbsf':
ylab = "Depth (mbsf)"
depth_scale = 'core_depth'
elif depth_scale == 'age':
ylab = "Age"
elif 'composite_depth' in depth_scale or depth_scale == 'mcd':
ylab = "Depth (mcd)"
depth_scale = 'composite_depth'
else:
print('Warning: You have provided unsupported depth scale: {}.\nUsing default (mbsf) instead.'.format(
depth_scale))
depth_scale = 'core_depth'
ylab = "Depth (mbsf)"
# fix depth scale for data model 2 if needed
if data_model_num == 2 and not depth_scale.startswith('sample_'):
if depth_scale != "age":
depth_scale = "sample_" + depth_scale
# collect the data for plotting declination
Depths, Decs, Incs, Ints = [], [], [], []
SDepths, SDecs, SIncs, SInts = [], [], [], []
SSucs = []
samples = []
methods, steps, m2 = [], [], []
if os.path.isfile(meas_file): # plot the bulk measurement data
if data_model_num == 3:
Meas = []
if 'measurements' in con.tables:
Meas = con.tables['measurements'].convert_to_pmag_data_list()
# has measurement_magn_mass ....
dec_key, inc_key = 'dir_dec', 'dir_inc'
meth_key, temp_key, ac_key, dc_key = 'method_codes', 'treat_temp', 'treat_ac_field', 'treat_dc_field'
intlist = ['magnitude', 'magn_moment',
'magn_volume', 'magn_mass']
meas_key = "magn_moment"
elif data_model_num == 2:
intlist = ['measurement_magnitude', 'measurement_magn_moment',
'measurement_magn_volume', 'measurement_magn_mass']
temp_key, ac_key, dc_key = 'treatment_temp', 'treatment_ac_field', 'treatment_dc_field'
dec_key, inc_key = 'measurement_dec', 'measurement_inc'
Meas, file_type = pmag.magic_read(meas_file)
meas_key = 'measurement_magn_moment'
#
print(len(Meas), ' measurements read in from ', meas_file)
#
for m in intlist: # find the intensity key with data
# get all non-blank data for this specimen
meas_data = pmag.get_dictitem(Meas, m, '', 'F')
if len(meas_data) > 0:
print('using intensity key:', m)
meas_key = m
break
# fish out the desired method code
m1 = pmag.get_dictitem(Meas, meth_col_name, method, 'has')
if method == 'LT-T-Z':
m2 = pmag.get_dictitem(m1, temp_key, str(
step), 'eval') # fish out the desired step
elif 'LT-AF' in method:
m2 = pmag.get_dictitem(m1, ac_key, str(step), 'eval')
elif 'LT-IRM' in method:
m2 = pmag.get_dictitem(m1, dc_key, str(step), 'eval')
elif 'LP-X' in method:
m2 = pmag.get_dictitem(m1, suc_key, '', 'F')
if len(m2) > 0:
for rec in m2: # fish out depths and weights
D = pmag.get_dictitem(
Samps, samp_col_name, rec[samp_col_name], 'T')
if not D: # if using an age_file, you may need to sort by site
D = pmag.get_dictitem(
Samps, site_col_name, rec[site_col_name], 'T')
depth = pmag.get_dictitem(D, depth_scale, '', 'F')
if len(depth) > 0:
if ylab == 'Age':
# get units of ages - assume they are all the same!
ylab = ylab + ' (' + depth[0]['age_unit'] + ')'
rec[depth_scale] = float(depth[0][depth_scale])
rec[meth_col_name] = rec[meth_col_name] + \
':' + depth[0][meth_col_name]
if norm:
specrecs = pmag.get_dictitem(
ErSpecs, spec_col_name, rec[spec_col_name], 'T')
specwts = pmag.get_dictitem(
specrecs, spec_weight_col_name, "", 'F')
if len(specwts) > 0:
rec[weight_col_name] = specwts[0][spec_weight_col_name]
# fish out data with core_depth and (if needed)
# weights
Data.append(rec)
else:
# fish out data with core_depth and (if needed) weights
Data.append(rec)
if title == "":
pieces = rec[samp_col_name].split('-')
location = rec.get(loc_col_name, '')
title = location
SData = pmag.sort_diclist(Data, depth_scale)
for rec in SData: # fish out bulk measurement data from desired depths
if dmax == -1 or float(rec[depth_scale]) < dmax and float(rec[depth_scale]) > dmin:
Depths.append((rec[depth_scale]))
if method == "LP-X":
SSucs.append(float(rec[suc_key]))
else:
if pltDec:
Decs.append(float(rec[dec_key]))
if pltInc:
Incs.append(float(rec[inc_key]))
if not norm and pltMag:
Ints.append(float(rec[meas_key]))
if norm and pltMag:
Ints.append(
float(rec[meas_key]) / float(rec[spec_weight_col_name]))
if len(SSucs) > 0:
maxSuc = max(SSucs)
minSuc = min(SSucs)
if len(Ints) > 1:
maxInt = max(Ints)
minInt = min(Ints)
if len(Depths) == 0:
print('no bulk measurement data matched your request')
else:
print(len(Depths), "depths found")
SpecDepths, SpecDecs, SpecIncs = [], [], []
FDepths, FDecs, FIncs = [], [], []
if spc_file: # add depths to spec data
# get all the discrete data with best fit lines
BFLs = pmag.get_dictitem(Specs, meth_col_name, 'DE-BFL', 'has')
for spec in BFLs:
if location == "":
location = spec.get(loc_col_name, "")
samp = pmag.get_dictitem(
Samps, samp_col_name, spec[samp_col_name], 'T')
if len(samp) > 0 and depth_scale in list(samp[0].keys()) and samp[0][depth_scale] != "":
if ylab == 'Age':
# get units of ages - assume they are all the same!
ylab = ylab + ' (' + samp[0]['age_unit'] + ')'
# filter for depth
if dmax == -1 or float(samp[0][depth_scale]) < dmax and float(samp[0][depth_scale]) > dmin:
# fish out data with core_depth
SpecDepths.append(float(samp[0][depth_scale]))
# fish out data with core_depth
SpecDecs.append(float(spec[spec_dec_col_name]))
# fish out data with core_depth
SpecIncs.append(float(spec[spec_inc_col_name]))
else:
print('no core_depth found for: ', spec[spec_col_name])
# get all the discrete data with best fit lines
FMs = pmag.get_dictitem(Specs, meth_col_name, 'DE-FM', 'has')
for spec in FMs:
if location == "":
location = spec.get(loc_col_name, "")
samp = pmag.get_dictitem(
Samps, samp_col_name, spec[samp_col_name], 'T')
if len(samp) > 0 and depth_scale in list(samp[0].keys()) and samp[0][depth_scale] != "":
if ylab == 'Age':
# get units of ages - assume they are all the same!
ylab = ylab + ' (' + samp[0]['age_unit'] + ')'
# filter for depth
if dmax == -1 or float(samp[0][depth_scale]) < dmax and float(samp[0][depth_scale]) > dmin:
# fish out data with core_depth
FDepths.append(float(samp[0][depth_scale]))
# fish out data with core_depth
FDecs.append(float(spec[spec_dec_col]))
# fish out data with core_depth
FIncs.append(float(spec[spec_inc_col]))
else:
print('no core_depth found for: ', spec[spec_col_name])
ResDepths, ResDecs, ResIncs = [], [], []
if 'age' in depth_scale: # set y-key
res_scale = age_col_name
else:
res_scale = height_col_name
if res_file: # creates lists of Result Data
for res in Results:
meths = res[meth_col_name].split(":")
if 'DE-FM' in meths:
# filter for depth
if dmax == -1 or float(res[res_scale]) < dmax and float(res[res_scale]) > dmin:
# fish out data with core_depth
ResDepths.append(float(res[res_scale]))
# fish out data with core_depth
ResDecs.append(float(res['average_dec']))
# fish out data with core_depth
ResIncs.append(float(res['average_inc']))
Susc, Sus_depths = [], []
if dmin == -1:
if len(Depths) > 0:
dmin, dmax = Depths[0], Depths[-1]
if len(FDepths) > 0:
dmin, dmax = FDepths[0], FDepths[-1]
if pltSus and len(SDepths) > 0:
if SDepths[0] < dmin:
dmin = SDepths[0]
if SDepths[-1] > dmax:
dmax = SDepths[-1]
if len(SpecDepths) > 0:
if min(SpecDepths) < dmin:
dmin = min(SpecDepths)
if max(SpecDepths) > dmax:
dmax = max(SpecDepths)
if len(ResDepths) > 0:
if min(ResDepths) < dmin:
dmin = min(ResDepths)
if max(ResDepths) > dmax:
dmax = max(ResDepths)
# wig_file and suc_file not currently supported options
# if suc_file:
# with open(suc_file, 'r') as s_file:
# sucdat = s_file.readlines()
# keys = sucdat[0].replace('\n', '').split(',') # splits on underscores
# for line in sucdat[1:]:
# SucRec = {}
# for k in range(len(keys)):
# SucRec[keys[k]] = line.split(',')[k]
# if float(SucRec['Top Depth (m)']) < dmax and float(SucRec['Top Depth (m)']) > dmin and SucRec['Magnetic Susceptibility (80 mm)'] != "":
# Susc.append(float(SucRec['Magnetic Susceptibility (80 mm)']))
# if Susc[-1] > maxSuc:
# maxSuc = Susc[-1]
# if Susc[-1] < minSuc:
# minSuc = Susc[-1]
# Sus_depths.append(float(SucRec['Top Depth (m)']))
#WIG, WIG_depths = [], []
# if wig_file:
# wigdat, file_type = pmag.magic_read(wig_file)
# swigdat = pmag.sort_diclist(wigdat, depth_scale)
# keys = list(wigdat[0].keys())
# for key in keys:
# if key != depth_scale:
# plt_key = key
# break
# for wig in swigdat:
# if float(wig[depth_scale]) < dmax and float(wig[depth_scale]) > dmin:
# WIG.append(float(wig[plt_key]))
# WIG_depths.append(float(wig[depth_scale]))
tint = 4.5
plot = 1
#print('Decs', len(Decs))
#print('Depths', len(Depths), 'SpecDecs', len(SpecDecs))
#print('SpecDepths', len(SpecDepths), 'ResDecs', len(ResDecs))
#print('ResDepths', len(ResDepths), 'SDecs', len(SDecs))
#print('SDepths', len(SDepths), 'SIincs', len(SIncs))
#print('Incs', len(Incs))
if (Decs and Depths) or (SpecDecs and SpecDepths) or (ResDecs and ResDepths) or (SDecs and SDepths) or (SInts and SDepths) or (SIncs and SDepths) or (Incs and Depths):
main_plot = plt.figure(1, figsize=(width, 8)) # this works
# pylab.figure(1,figsize=(width,8))
version_num = pmag.get_version()
plt.figtext(.02, .01, version_num)
if pltDec:
ax = plt.subplot(1, pcol, plot)
if pltLine:
plt.plot(Decs, Depths, 'k')
if len(Decs) > 0:
plt.plot(Decs, Depths, sym, markersize=size)
if len(Decs) == 0 and pltLine and len(SDecs) > 0:
plt.plot(SDecs, SDepths, 'k')
if len(SDecs) > 0:
plt.plot(SDecs, SDepths, Ssym, markersize=Ssize)
if spc_file:
plt.plot(SpecDecs, SpecDepths, spc_sym, markersize=spc_size)
if spc_file and len(FDepths) > 0:
plt.scatter(
FDecs, FDepths, marker=spc_sym[-1], edgecolor=spc_sym[0], facecolor='white', s=spc_size**2)
if res_file:
plt.plot(ResDecs, ResDepths, res_sym, markersize=res_size)
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth > dmin and depth < dmax:
plt.plot([0, 360.], [depth, depth], 'b--')
if pel == plt:
plt.text(360, depth + tint, core[core_label_key])
if pel == plot:
plt.axis([0, 400, dmax, dmin])
else:
plt.axis([0, 360., dmax, dmin])
plt.xlabel('Declination')
plt.ylabel(ylab)
plot += 1
pmagplotlib.delticks(ax) # dec xticks are too crowded otherwise
else:
print('no data!')
return False, 'No data found to plot\nTry again with different parameters'
if pltInc:
plt.subplot(1, pcol, plot)
if pltLine:
plt.plot(Incs, Depths, 'k')
if len(Incs) > 0:
plt.plot(Incs, Depths, sym, markersize=size)
if len(Incs) == 0 and pltLine and len(SIncs) > 0:
plt.plot(SIncs, SDepths, 'k')
if len(SIncs) > 0:
plt.plot(SIncs, SDepths, Ssym, markersize=Ssize)
if spc_file and len(SpecDepths) > 0:
plt.plot(SpecIncs, SpecDepths, spc_sym, markersize=spc_size)
if spc_file and len(FDepths) > 0:
plt.scatter(
FIncs, FDepths, marker=spc_sym[-1], edgecolor=spc_sym[0], facecolor='white', s=spc_size**2)
if res_file:
plt.plot(ResIncs, ResDepths, res_sym, markersize=res_size)
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth > dmin and depth < dmax:
if pel == plot:
plt.text(90, depth + tint, core[core_label_key])
plt.plot([-90, 90], [depth, depth], 'b--')
plt.plot([0, 0], [dmax, dmin], 'k-')
if pel == plot:
plt.axis([-90, 110, dmax, dmin])
else:
plt.axis([-90, 90, dmax, dmin])
plt.xlabel('Inclination')
plt.ylabel('')
plot += 1
if pltMag and len(Ints) > 0 or len(SInts) > 0:
plt.subplot(1, pcol, plot)
for pow in range(-10, 10):
if maxInt * 10**pow > 1:
break
if not logit:
for k in range(len(Ints)):
Ints[k] = Ints[k] * 10**pow
for k in range(len(SInts)):
SInts[k] = SInts[k] * 10**pow
if pltLine and len(Ints) > 0:
plt.plot(Ints, Depths, 'k')
if len(Ints) > 0:
plt.plot(Ints, Depths, sym, markersize=size)
if len(Ints) == 0 and pltLine and len(SInts) > 0:
plt.plot(SInts, SDepths, 'k-')
if len(SInts) > 0:
plt.plot(SInts, SDepths, Ssym, markersize=Ssize)
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
plt.plot([0, maxInt * 10**pow + .1], [depth, depth], 'b--')
if depth > dmin and depth < dmax:
plt.text(maxInt * 10**pow - .2 * maxInt * 10 **
pow, depth + tint, core[core_label_key])
plt.axis([0, maxInt * 10**pow + .1, dmax, dmin])
if not norm:
plt.xlabel('%s %i %s' % ('Intensity (10^-', pow, ' Am^2)'))
else:
plt.xlabel('%s %i %s' % ('Intensity (10^-', pow, ' Am^2/kg)'))
else:
if pltLine:
plt.semilogx(Ints, Depths, 'k')
if len(Ints) > 0:
plt.semilogx(Ints, Depths, sym, markersize=size)
if len(Ints) == 0 and pltLine and len(SInts) > 0:
plt.semilogx(SInts, SDepths, 'k')
if len(Ints) == 0 and pltLine == 1 and len(SInts) > 0:
plt.semilogx(SInts, SDepths, 'k')
if len(SInts) > 0:
plt.semilogx(SInts, SDepths, Ssym, markersize=Ssize)
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
plt.semilogx([minInt, maxInt], [depth, depth], 'b--')
if depth > dmin and depth < dmax:
plt.text(maxInt - .2 * maxInt, depth +
tint, core[core_label_key])
minInt = plt.axis()[0]
plt.axis([minInt, maxInt, dmax, dmin])
if not norm:
plt.xlabel('Intensity (Am^2)')
else:
plt.xlabel('Intensity (Am^2/kg)')
plot += 1
if suc_file or len(SSucs) > 0:
plt.subplot(1, pcol, plot)
if len(Susc) > 0:
if pltLine:
plt.plot(Susc, Sus_depths, 'k')
if not logit:
plt.plot(Susc, Sus_depths, sym, markersize=size)
if logit:
plt.semilogx(Susc, Sus_depths, sym, markersize=size)
if len(SSucs) > 0:
if not logit:
plt.plot(SSucs, SDepths, sym, markersize=size)
if logit:
plt.semilogx(SSucs, SDepths, sym, markersize=size)
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if not logit:
plt.plot([minSuc, maxSuc], [depth, depth], 'b--')
if logit:
plt.semilogx([minSuc, maxSuc], [depth, depth], 'b--')
plt.axis([minSuc, maxSuc, dmax, dmin])
plt.xlabel('Susceptibility')
plot += 1
# if wig_file:
# plt.subplot(1, pcol, plot)
# plt.plot(WIG, WIG_depths, 'k')
# if sum_file:
# for core in Cores:
# depth = float(core[core_depth_key])
# plt.plot([WIG[0], WIG[-1]], [depth, depth], 'b--')
# plt.axis([min(WIG), max(WIG), dmax, dmin])
# plt.xlabel(plt_key)
# plot += 1
if pltTime:
ax1 = plt.subplot(1, pcol, plot)
ax1.axis([-.25, 1.5, amax, amin])
plot += 1
TS, Chrons = pmag.get_ts(timescale)
X, Y, Y2 = [0, 1], [], []
cnt = 0
if amin < TS[1]: # in the Brunhes
Y = [amin, amin] # minimum age
Y1 = [TS[1], TS[1]] # age of the B/M boundary
# color in Brunhes, black
ax1.fill_between(X, Y, Y1, facecolor='black')
for d in TS[1:]:
pol = cnt % 2
cnt += 1
if d <= amax and d >= amin:
ind = TS.index(d)
Y = [TS[ind], TS[ind]]
Y1 = [TS[ind + 1], TS[ind + 1]]
if pol:
# fill in every other time
ax1.fill_between(X, Y, Y1, facecolor='black')
ax1.plot([0, 1, 1, 0, 0], [amin, amin, amax, amax, amin], 'k-')
ax2 = ax1.twinx()
plt.ylabel("Age (Ma): " + timescale)
for k in range(len(Chrons) - 1):
c = Chrons[k]
cnext = Chrons[k + 1]
d = cnext[1] - old_div((cnext[1] - c[1]), 3.)
if d >= amin and d < amax:
# make the Chron boundary tick
ax2.plot([1, 1.5], [c[1], c[1]], 'k-')
ax2.text(1.05, d, c[0])
ax2.axis([-.25, 1.5, amax, amin])
figname = location + '_m:_' + method + '_core-depthplot.' + fmt
plt.title(location)
return main_plot, figname | python | def core_depthplot(input_dir_path='.', meas_file='measurements.txt', spc_file='',
samp_file='samples.txt', age_file='', sum_file='', wt_file='',
depth_scale='core_depth', dmin=-1, dmax=-1, sym='bo',
size=5, spc_sym='ro', spc_size=5, meth='', step=0, fmt='svg',
pltDec=True, pltInc=True, pltMag=True, pltLine=True, pltSus=True,
logit=False, pltTime=False, timescale=None, amin=-1, amax=-1,
norm=False, data_model_num=3,location=""):
"""
depth scale can be 'core_depth' or 'composite_depth' (for data model=3)
if age file is provided, depth_scale will be set to 'age' by default.
You must provide at least a measurements,specimens and sample file to plot.
Parameters
----------
input_dir_path : str, default "."
file input directory
meas_file : str, default "measurements.txt"
input measurements file
spc_file : str, default ""
input specimens file
samp_file : str, default ""
input samples file
age_file : str, default ""
input ages file
sum_file : str, default ""
input csv summary file
wt_file : str, default ""
input file with weights
depth_scale : str, default "core_depth"
['core_depth', 'composite_depth']
dmin : number, default -1
minimum depth to plot (if -1, default to plotting all)
dmax : number, default -1
maximum depth to plot (if -1, default to plotting all)
sym : str, default "bo"
symbol color and shape, default blue circles
(see matplotlib documentaiton for more options)
size : int, defualt 5
symbol size
spc_sym : str, default 'ro'
specimen symbol color and shape, default red circles
(see matplotlib documentation for more options)
meth : str, default ""
method codes, ["LT-NO", "AF", "T", "ARM", "IRM", "X"]
step : int, default 0
treatment step for plotting:
for AF, in mT, for T, in C
fmt : str, default "svg"
format for figures, [svg,jpg,png,pdf]
pltDec : bool, default True
plot declination
pltInc : bool, default True
plot inclination
pltMag : bool, default True
plot magnetization
pltLine : bool, default True
connect dots with a line
pltSus : bool, default True
plot blanket treatment
logit : bool, default False
plot magnetization on a log scale
amin : int, default -1
minimum time to plot (if -1, default to plotting all)
amax : int, default -1
maximum time to plot (if -1, default to plotting all)
norm : bool, default False
normalize by weight
data_model_num : int, default 3
MagIC data model (please, use data model 3)
"""
data_model_num = int(data_model_num)
# replace MagIC 3 defaults with MagIC 2.5 defaults if needed
if data_model_num == 2 and meas_file == 'measurements.txt':
meas_file = 'magic_measurements.txt'
if data_model_num == 2 and samp_file == 'samples.txt':
samp_file = 'er_samples.txt'
if data_model_num == 2 and age_file == 'ages.txt':
age_file = 'er_ages.txt'
if data_model_num == 2 and depth_scale == "core_depth":
depth_scale = "sample_core_depth"
# initialize MagIC 3.0 vs 2.5 column names
loc_col_name = "location" if data_model_num == 3 else "er_location_name"
site_col_name = "site" if data_model_num == 3 else "er_site_name"
samp_col_name = "sample" if data_model_num == 3 else "er_sample_name"
spec_col_name = "specimen" if data_model_num == 3 else "er_specimen_name"
meth_col_name = "method_codes" if data_model_num == 3 else "magic_method_codes"
spec_dec_col_name = "dir_dec" if data_model_num == 3 else "specimen_dec"
spec_inc_col_name = "dir_inc" if data_model_num == 3 else "specimen_inc"
avg_weight_col_name = "weight" if data_model_num == 3 else "average_weight"
spec_weight_col_name = "weight" if data_model_num == 3 else "specimen_weight"
age_col_name = "age" if data_model_num == 3 else "average_age"
height_col_name = "height" if data_model_num == 3 else "average_height"
average_dec_col_name = "dir_dec" if data_model_num == 3 else "average_dec"
average_inc_col_name = "dir_inc" if data_model_num == 3 else "average_inc"
# initialize other variables
width = 10
Ssym, Ssize = 'cs', 5
pcol = 3
pel = 3
maxInt = -1000
minInt = 1e10
maxSuc = -1000
minSuc = 10000
main_plot = None
if size:
size = int(size)
if spc_size:
spc_size = int(spc_size)
title = ""
if location:title=location
# file formats not supported for the moment
ngr_file = "" # nothing needed, not implemented fully in original script
suc_file = "" # nothing else needed, also was not implemented in original script
res_file = "" # need also res_sym, res_size
wig_file = "" # if wig_file: pcol+=1; width+=2
# which plots to make
if not pltDec:
pcol -= 1
pel -= 1
width -= 2
if not pltInc:
pcol -= 1
pel -= 1
width -= 2
if not pltMag:
pcol -= 1
pel -= 1
width -= 2
# method and step
if not step or meth == 'LT-NO':
step = 0
method = 'LT-NO'
elif meth == "AF":
step = round(float(step) * 1e-3, 6)
method = 'LT-AF-Z'
elif meth == 'T':
step = round(float(step) + 273, 6)
method = 'LT-T-Z'
elif meth == 'ARM':
method = 'LT-AF-I'
step = round(float(step) * 1e-3, 6)
elif meth == 'IRM':
method = 'LT-IRM'
step = round(float(step) * 1e-3, 6)
# not supporting susceptibility at the moment LJ
elif meth == 'X':
method = 'LP-X'
pcol += 1
ind = sys.argv.index('-LP')
if sys.argv[ind+2] == 'mass':
if data_model_num != 3:
suc_key = 'measurement_chi_mass'
else:
suc_key = 'susc_chi_mass'
elif sys.argv[ind+2] == 'vol':
if data_model_num != 3:
suc_key = 'measurement_chi_volume'
else:
suc_key = 'susc_chi_volume'
else:
print('error in susceptibility units')
return False, 'error in susceptibility units'
else:
print('method: {} not supported'.format(meth))
return False, 'method: "{}" not supported'.format(meth)
if wt_file:
norm = True
if dmin and dmax:
dmin, dmax = float(dmin), float(dmax)
else:
dmin, dmax = -1, -1
if pltTime:
amin = float(amin)
amax = float(amax)
pcol += 1
width += 2
if not (amax and timescale):
return False, "To plot time, you must provide amin, amax, and timescale"
#
#
# read in 3.0 data and translate to 2.5
if meas_file:
meas_file = pmag.resolve_file_name(meas_file, input_dir_path)
if spc_file:
spc_file = pmag.resolve_file_name(spc_file, input_dir_path)
if samp_file:
samp_file = pmag.resolve_file_name(samp_file, input_dir_path)
if age_file:
age_file = pmag.resolve_file_name(age_file, input_dir_path)
if data_model_num == 3:
fnames = {'specimens': spc_file, 'samples': samp_file,
'ages': age_file, 'measurements': meas_file}
fnames = {k: v for (k, v) in fnames.items() if v}
con = cb.Contribution(input_dir_path, custom_filenames=fnames)
for dtype in ['measurements', 'specimens']:
if dtype not in con.tables:
print(
'-E- You must have a {} file in your input directory ({}) to run core_depthplot'.format(dtype, input_dir_path))
print(' If needed, you can specify your input directory on the command line with "core_depthplot.py -ID dirname ... "')
print(' Or with ipmag.core_depthplot(input_dir_path=dirname, ...)')
# return False, '-E- You must have a {} file in your input directory ({}) to run core_depthplot'.format(dtype, input_dir_path)
# propagate data to measurements
con.propagate_name_down('sample', 'measurements')
con.propagate_name_down('site', 'measurements')
# propagate depth info from sites --> samples
con.propagate_cols(
['core_depth', 'composite_depth'], 'samples', 'sites')
if age_file == "":
# get sample data straight from the contribution
Samps = []
if 'samples' in con.tables:
Samps = con.tables['samples'].convert_to_pmag_data_list()
else:
depth_scale = 'age'
Samps = []
# get age data from contribution
if 'ages' in con.tables:
# we need to get sample in here
# this doesn't do the trick by itself
con.propagate_ages()
con.propagate_cols(['age', 'age_unit'], 'samples', 'sites')
Samps = con.tables['samples'].convert_to_pmag_data_list()
age_unit = ""
if spc_file:
Specs3 = []
# get specimen data from contribution
Specs = []
if 'specimens' in con.tables:
Specs = con.tables['specimens'].convert_to_pmag_data_list()
if res_file:
warn = '-W- result file option is not currently available for MagIC data model 3'
print(warn)
return False, warn
#Results, file_type = pmag.magic_read(res_file)
if norm:
#warn = '-W- norm option is not currently available for MagIC data model 3'
# print(warn)
# return False, warn
Specs3, file_type = pmag.magic_read(wt_file)
# translate specimen records to 2.5
ErSpecs = []
# for spec in Specs3:
# ErSpecs.append(map_magic.mapping(spec, spec_magic3_2_magic2_map))
ErSpecs = Specs3
print(len(ErSpecs), ' specimens read in from ', wt_file)
if not os.path.isfile(spc_file):
if not os.path.isfile(meas_file):
return False, "You must provide either a magic_measurements file or a pmag_specimens file"
if not age_file and not samp_file:
print('-W- You must provide either an age file or a sample file')
return False, '-W- You must provide either an age file or a sample file'
# read in 2.5 data
elif data_model_num == 2:
if age_file == "":
if samp_file:
samp_file = os.path.join(input_dir_path, samp_file)
Samps, file_type = pmag.magic_read(samp_file)
else:
depth_scale = 'age'
if age_file:
age_file = os.path.join(input_dir_path, age_file)
Samps, file_type = pmag.magic_read(age_file)
age_unit = ""
if spc_file:
Specs, file_type = pmag.magic_read(spc_file)
if res_file:
Results, file_type = pmag.magic_read(res_file)
if norm:
ErSpecs, file_type = pmag.magic_read(wt_file)
print(len(ErSpecs), ' specimens read in from ', wt_file)
if not os.path.isfile(spc_file):
if not os.path.isfile(meas_file):
return False, "You must provide either a magic_measurements file or a pmag_specimens file"
else:
return False, "Invalid data model number: {}".format(str(data_model_num))
Cores = []
core_depth_key = "Top depth cored CSF (m)"
if sum_file:
# os.path.join(input_dir_path, sum_file)
sum_file = pmag.resolve_file_name(sum_file, input_dir_path)
with open(sum_file, 'r') as fin:
indat = fin.readlines()
if "Core Summary" in indat[0]:
headline = 1
else:
headline = 0
keys = indat[headline].replace('\n', '').split(',')
if "Core Top (m)" in keys:
core_depth_key = "Core Top (m)"
if "Top depth cored CSF (m)" in keys:
core_dpeth_key = "Top depth cored CSF (m)"
if "Core Label" in keys:
core_label_key = "Core Label"
if "Core label" in keys:
core_label_key = "Core label"
for line in indat[2:]:
if 'TOTALS' not in line:
CoreRec = {}
for k in range(len(keys)):
CoreRec[keys[k]] = line.split(',')[k]
Cores.append(CoreRec)
if len(Cores) == 0:
print('no Core depth information available: import core summary file')
sum_file = ""
Data = []
if 'core_depth' in depth_scale or depth_scale == 'mbsf':
ylab = "Depth (mbsf)"
depth_scale = 'core_depth'
elif depth_scale == 'age':
ylab = "Age"
elif 'composite_depth' in depth_scale or depth_scale == 'mcd':
ylab = "Depth (mcd)"
depth_scale = 'composite_depth'
else:
print('Warning: You have provided unsupported depth scale: {}.\nUsing default (mbsf) instead.'.format(
depth_scale))
depth_scale = 'core_depth'
ylab = "Depth (mbsf)"
# fix depth scale for data model 2 if needed
if data_model_num == 2 and not depth_scale.startswith('sample_'):
if depth_scale != "age":
depth_scale = "sample_" + depth_scale
# collect the data for plotting declination
Depths, Decs, Incs, Ints = [], [], [], []
SDepths, SDecs, SIncs, SInts = [], [], [], []
SSucs = []
samples = []
methods, steps, m2 = [], [], []
if os.path.isfile(meas_file): # plot the bulk measurement data
if data_model_num == 3:
Meas = []
if 'measurements' in con.tables:
Meas = con.tables['measurements'].convert_to_pmag_data_list()
# has measurement_magn_mass ....
dec_key, inc_key = 'dir_dec', 'dir_inc'
meth_key, temp_key, ac_key, dc_key = 'method_codes', 'treat_temp', 'treat_ac_field', 'treat_dc_field'
intlist = ['magnitude', 'magn_moment',
'magn_volume', 'magn_mass']
meas_key = "magn_moment"
elif data_model_num == 2:
intlist = ['measurement_magnitude', 'measurement_magn_moment',
'measurement_magn_volume', 'measurement_magn_mass']
temp_key, ac_key, dc_key = 'treatment_temp', 'treatment_ac_field', 'treatment_dc_field'
dec_key, inc_key = 'measurement_dec', 'measurement_inc'
Meas, file_type = pmag.magic_read(meas_file)
meas_key = 'measurement_magn_moment'
#
print(len(Meas), ' measurements read in from ', meas_file)
#
for m in intlist: # find the intensity key with data
# get all non-blank data for this specimen
meas_data = pmag.get_dictitem(Meas, m, '', 'F')
if len(meas_data) > 0:
print('using intensity key:', m)
meas_key = m
break
# fish out the desired method code
m1 = pmag.get_dictitem(Meas, meth_col_name, method, 'has')
if method == 'LT-T-Z':
m2 = pmag.get_dictitem(m1, temp_key, str(
step), 'eval') # fish out the desired step
elif 'LT-AF' in method:
m2 = pmag.get_dictitem(m1, ac_key, str(step), 'eval')
elif 'LT-IRM' in method:
m2 = pmag.get_dictitem(m1, dc_key, str(step), 'eval')
elif 'LP-X' in method:
m2 = pmag.get_dictitem(m1, suc_key, '', 'F')
if len(m2) > 0:
for rec in m2: # fish out depths and weights
D = pmag.get_dictitem(
Samps, samp_col_name, rec[samp_col_name], 'T')
if not D: # if using an age_file, you may need to sort by site
D = pmag.get_dictitem(
Samps, site_col_name, rec[site_col_name], 'T')
depth = pmag.get_dictitem(D, depth_scale, '', 'F')
if len(depth) > 0:
if ylab == 'Age':
# get units of ages - assume they are all the same!
ylab = ylab + ' (' + depth[0]['age_unit'] + ')'
rec[depth_scale] = float(depth[0][depth_scale])
rec[meth_col_name] = rec[meth_col_name] + \
':' + depth[0][meth_col_name]
if norm:
specrecs = pmag.get_dictitem(
ErSpecs, spec_col_name, rec[spec_col_name], 'T')
specwts = pmag.get_dictitem(
specrecs, spec_weight_col_name, "", 'F')
if len(specwts) > 0:
rec[weight_col_name] = specwts[0][spec_weight_col_name]
# fish out data with core_depth and (if needed)
# weights
Data.append(rec)
else:
# fish out data with core_depth and (if needed) weights
Data.append(rec)
if title == "":
pieces = rec[samp_col_name].split('-')
location = rec.get(loc_col_name, '')
title = location
SData = pmag.sort_diclist(Data, depth_scale)
for rec in SData: # fish out bulk measurement data from desired depths
if dmax == -1 or float(rec[depth_scale]) < dmax and float(rec[depth_scale]) > dmin:
Depths.append((rec[depth_scale]))
if method == "LP-X":
SSucs.append(float(rec[suc_key]))
else:
if pltDec:
Decs.append(float(rec[dec_key]))
if pltInc:
Incs.append(float(rec[inc_key]))
if not norm and pltMag:
Ints.append(float(rec[meas_key]))
if norm and pltMag:
Ints.append(
float(rec[meas_key]) / float(rec[spec_weight_col_name]))
if len(SSucs) > 0:
maxSuc = max(SSucs)
minSuc = min(SSucs)
if len(Ints) > 1:
maxInt = max(Ints)
minInt = min(Ints)
if len(Depths) == 0:
print('no bulk measurement data matched your request')
else:
print(len(Depths), "depths found")
SpecDepths, SpecDecs, SpecIncs = [], [], []
FDepths, FDecs, FIncs = [], [], []
if spc_file: # add depths to spec data
# get all the discrete data with best fit lines
BFLs = pmag.get_dictitem(Specs, meth_col_name, 'DE-BFL', 'has')
for spec in BFLs:
if location == "":
location = spec.get(loc_col_name, "")
samp = pmag.get_dictitem(
Samps, samp_col_name, spec[samp_col_name], 'T')
if len(samp) > 0 and depth_scale in list(samp[0].keys()) and samp[0][depth_scale] != "":
if ylab == 'Age':
# get units of ages - assume they are all the same!
ylab = ylab + ' (' + samp[0]['age_unit'] + ')'
# filter for depth
if dmax == -1 or float(samp[0][depth_scale]) < dmax and float(samp[0][depth_scale]) > dmin:
# fish out data with core_depth
SpecDepths.append(float(samp[0][depth_scale]))
# fish out data with core_depth
SpecDecs.append(float(spec[spec_dec_col_name]))
# fish out data with core_depth
SpecIncs.append(float(spec[spec_inc_col_name]))
else:
print('no core_depth found for: ', spec[spec_col_name])
# get all the discrete data with best fit lines
FMs = pmag.get_dictitem(Specs, meth_col_name, 'DE-FM', 'has')
for spec in FMs:
if location == "":
location = spec.get(loc_col_name, "")
samp = pmag.get_dictitem(
Samps, samp_col_name, spec[samp_col_name], 'T')
if len(samp) > 0 and depth_scale in list(samp[0].keys()) and samp[0][depth_scale] != "":
if ylab == 'Age':
# get units of ages - assume they are all the same!
ylab = ylab + ' (' + samp[0]['age_unit'] + ')'
# filter for depth
if dmax == -1 or float(samp[0][depth_scale]) < dmax and float(samp[0][depth_scale]) > dmin:
# fish out data with core_depth
FDepths.append(float(samp[0][depth_scale]))
# fish out data with core_depth
FDecs.append(float(spec[spec_dec_col]))
# fish out data with core_depth
FIncs.append(float(spec[spec_inc_col]))
else:
print('no core_depth found for: ', spec[spec_col_name])
ResDepths, ResDecs, ResIncs = [], [], []
if 'age' in depth_scale: # set y-key
res_scale = age_col_name
else:
res_scale = height_col_name
if res_file: # creates lists of Result Data
for res in Results:
meths = res[meth_col_name].split(":")
if 'DE-FM' in meths:
# filter for depth
if dmax == -1 or float(res[res_scale]) < dmax and float(res[res_scale]) > dmin:
# fish out data with core_depth
ResDepths.append(float(res[res_scale]))
# fish out data with core_depth
ResDecs.append(float(res['average_dec']))
# fish out data with core_depth
ResIncs.append(float(res['average_inc']))
Susc, Sus_depths = [], []
if dmin == -1:
if len(Depths) > 0:
dmin, dmax = Depths[0], Depths[-1]
if len(FDepths) > 0:
dmin, dmax = FDepths[0], FDepths[-1]
if pltSus and len(SDepths) > 0:
if SDepths[0] < dmin:
dmin = SDepths[0]
if SDepths[-1] > dmax:
dmax = SDepths[-1]
if len(SpecDepths) > 0:
if min(SpecDepths) < dmin:
dmin = min(SpecDepths)
if max(SpecDepths) > dmax:
dmax = max(SpecDepths)
if len(ResDepths) > 0:
if min(ResDepths) < dmin:
dmin = min(ResDepths)
if max(ResDepths) > dmax:
dmax = max(ResDepths)
# wig_file and suc_file not currently supported options
# if suc_file:
# with open(suc_file, 'r') as s_file:
# sucdat = s_file.readlines()
# keys = sucdat[0].replace('\n', '').split(',') # splits on underscores
# for line in sucdat[1:]:
# SucRec = {}
# for k in range(len(keys)):
# SucRec[keys[k]] = line.split(',')[k]
# if float(SucRec['Top Depth (m)']) < dmax and float(SucRec['Top Depth (m)']) > dmin and SucRec['Magnetic Susceptibility (80 mm)'] != "":
# Susc.append(float(SucRec['Magnetic Susceptibility (80 mm)']))
# if Susc[-1] > maxSuc:
# maxSuc = Susc[-1]
# if Susc[-1] < minSuc:
# minSuc = Susc[-1]
# Sus_depths.append(float(SucRec['Top Depth (m)']))
#WIG, WIG_depths = [], []
# if wig_file:
# wigdat, file_type = pmag.magic_read(wig_file)
# swigdat = pmag.sort_diclist(wigdat, depth_scale)
# keys = list(wigdat[0].keys())
# for key in keys:
# if key != depth_scale:
# plt_key = key
# break
# for wig in swigdat:
# if float(wig[depth_scale]) < dmax and float(wig[depth_scale]) > dmin:
# WIG.append(float(wig[plt_key]))
# WIG_depths.append(float(wig[depth_scale]))
tint = 4.5
plot = 1
#print('Decs', len(Decs))
#print('Depths', len(Depths), 'SpecDecs', len(SpecDecs))
#print('SpecDepths', len(SpecDepths), 'ResDecs', len(ResDecs))
#print('ResDepths', len(ResDepths), 'SDecs', len(SDecs))
#print('SDepths', len(SDepths), 'SIincs', len(SIncs))
#print('Incs', len(Incs))
if (Decs and Depths) or (SpecDecs and SpecDepths) or (ResDecs and ResDepths) or (SDecs and SDepths) or (SInts and SDepths) or (SIncs and SDepths) or (Incs and Depths):
main_plot = plt.figure(1, figsize=(width, 8)) # this works
# pylab.figure(1,figsize=(width,8))
version_num = pmag.get_version()
plt.figtext(.02, .01, version_num)
if pltDec:
ax = plt.subplot(1, pcol, plot)
if pltLine:
plt.plot(Decs, Depths, 'k')
if len(Decs) > 0:
plt.plot(Decs, Depths, sym, markersize=size)
if len(Decs) == 0 and pltLine and len(SDecs) > 0:
plt.plot(SDecs, SDepths, 'k')
if len(SDecs) > 0:
plt.plot(SDecs, SDepths, Ssym, markersize=Ssize)
if spc_file:
plt.plot(SpecDecs, SpecDepths, spc_sym, markersize=spc_size)
if spc_file and len(FDepths) > 0:
plt.scatter(
FDecs, FDepths, marker=spc_sym[-1], edgecolor=spc_sym[0], facecolor='white', s=spc_size**2)
if res_file:
plt.plot(ResDecs, ResDepths, res_sym, markersize=res_size)
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth > dmin and depth < dmax:
plt.plot([0, 360.], [depth, depth], 'b--')
if pel == plt:
plt.text(360, depth + tint, core[core_label_key])
if pel == plot:
plt.axis([0, 400, dmax, dmin])
else:
plt.axis([0, 360., dmax, dmin])
plt.xlabel('Declination')
plt.ylabel(ylab)
plot += 1
pmagplotlib.delticks(ax) # dec xticks are too crowded otherwise
else:
print('no data!')
return False, 'No data found to plot\nTry again with different parameters'
if pltInc:
plt.subplot(1, pcol, plot)
if pltLine:
plt.plot(Incs, Depths, 'k')
if len(Incs) > 0:
plt.plot(Incs, Depths, sym, markersize=size)
if len(Incs) == 0 and pltLine and len(SIncs) > 0:
plt.plot(SIncs, SDepths, 'k')
if len(SIncs) > 0:
plt.plot(SIncs, SDepths, Ssym, markersize=Ssize)
if spc_file and len(SpecDepths) > 0:
plt.plot(SpecIncs, SpecDepths, spc_sym, markersize=spc_size)
if spc_file and len(FDepths) > 0:
plt.scatter(
FIncs, FDepths, marker=spc_sym[-1], edgecolor=spc_sym[0], facecolor='white', s=spc_size**2)
if res_file:
plt.plot(ResIncs, ResDepths, res_sym, markersize=res_size)
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if depth > dmin and depth < dmax:
if pel == plot:
plt.text(90, depth + tint, core[core_label_key])
plt.plot([-90, 90], [depth, depth], 'b--')
plt.plot([0, 0], [dmax, dmin], 'k-')
if pel == plot:
plt.axis([-90, 110, dmax, dmin])
else:
plt.axis([-90, 90, dmax, dmin])
plt.xlabel('Inclination')
plt.ylabel('')
plot += 1
if pltMag and len(Ints) > 0 or len(SInts) > 0:
plt.subplot(1, pcol, plot)
for pow in range(-10, 10):
if maxInt * 10**pow > 1:
break
if not logit:
for k in range(len(Ints)):
Ints[k] = Ints[k] * 10**pow
for k in range(len(SInts)):
SInts[k] = SInts[k] * 10**pow
if pltLine and len(Ints) > 0:
plt.plot(Ints, Depths, 'k')
if len(Ints) > 0:
plt.plot(Ints, Depths, sym, markersize=size)
if len(Ints) == 0 and pltLine and len(SInts) > 0:
plt.plot(SInts, SDepths, 'k-')
if len(SInts) > 0:
plt.plot(SInts, SDepths, Ssym, markersize=Ssize)
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
plt.plot([0, maxInt * 10**pow + .1], [depth, depth], 'b--')
if depth > dmin and depth < dmax:
plt.text(maxInt * 10**pow - .2 * maxInt * 10 **
pow, depth + tint, core[core_label_key])
plt.axis([0, maxInt * 10**pow + .1, dmax, dmin])
if not norm:
plt.xlabel('%s %i %s' % ('Intensity (10^-', pow, ' Am^2)'))
else:
plt.xlabel('%s %i %s' % ('Intensity (10^-', pow, ' Am^2/kg)'))
else:
if pltLine:
plt.semilogx(Ints, Depths, 'k')
if len(Ints) > 0:
plt.semilogx(Ints, Depths, sym, markersize=size)
if len(Ints) == 0 and pltLine and len(SInts) > 0:
plt.semilogx(SInts, SDepths, 'k')
if len(Ints) == 0 and pltLine == 1 and len(SInts) > 0:
plt.semilogx(SInts, SDepths, 'k')
if len(SInts) > 0:
plt.semilogx(SInts, SDepths, Ssym, markersize=Ssize)
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
plt.semilogx([minInt, maxInt], [depth, depth], 'b--')
if depth > dmin and depth < dmax:
plt.text(maxInt - .2 * maxInt, depth +
tint, core[core_label_key])
minInt = plt.axis()[0]
plt.axis([minInt, maxInt, dmax, dmin])
if not norm:
plt.xlabel('Intensity (Am^2)')
else:
plt.xlabel('Intensity (Am^2/kg)')
plot += 1
if suc_file or len(SSucs) > 0:
plt.subplot(1, pcol, plot)
if len(Susc) > 0:
if pltLine:
plt.plot(Susc, Sus_depths, 'k')
if not logit:
plt.plot(Susc, Sus_depths, sym, markersize=size)
if logit:
plt.semilogx(Susc, Sus_depths, sym, markersize=size)
if len(SSucs) > 0:
if not logit:
plt.plot(SSucs, SDepths, sym, markersize=size)
if logit:
plt.semilogx(SSucs, SDepths, sym, markersize=size)
if sum_file:
for core in Cores:
depth = float(core[core_depth_key])
if not logit:
plt.plot([minSuc, maxSuc], [depth, depth], 'b--')
if logit:
plt.semilogx([minSuc, maxSuc], [depth, depth], 'b--')
plt.axis([minSuc, maxSuc, dmax, dmin])
plt.xlabel('Susceptibility')
plot += 1
# if wig_file:
# plt.subplot(1, pcol, plot)
# plt.plot(WIG, WIG_depths, 'k')
# if sum_file:
# for core in Cores:
# depth = float(core[core_depth_key])
# plt.plot([WIG[0], WIG[-1]], [depth, depth], 'b--')
# plt.axis([min(WIG), max(WIG), dmax, dmin])
# plt.xlabel(plt_key)
# plot += 1
if pltTime:
ax1 = plt.subplot(1, pcol, plot)
ax1.axis([-.25, 1.5, amax, amin])
plot += 1
TS, Chrons = pmag.get_ts(timescale)
X, Y, Y2 = [0, 1], [], []
cnt = 0
if amin < TS[1]: # in the Brunhes
Y = [amin, amin] # minimum age
Y1 = [TS[1], TS[1]] # age of the B/M boundary
# color in Brunhes, black
ax1.fill_between(X, Y, Y1, facecolor='black')
for d in TS[1:]:
pol = cnt % 2
cnt += 1
if d <= amax and d >= amin:
ind = TS.index(d)
Y = [TS[ind], TS[ind]]
Y1 = [TS[ind + 1], TS[ind + 1]]
if pol:
# fill in every other time
ax1.fill_between(X, Y, Y1, facecolor='black')
ax1.plot([0, 1, 1, 0, 0], [amin, amin, amax, amax, amin], 'k-')
ax2 = ax1.twinx()
plt.ylabel("Age (Ma): " + timescale)
for k in range(len(Chrons) - 1):
c = Chrons[k]
cnext = Chrons[k + 1]
d = cnext[1] - old_div((cnext[1] - c[1]), 3.)
if d >= amin and d < amax:
# make the Chron boundary tick
ax2.plot([1, 1.5], [c[1], c[1]], 'k-')
ax2.text(1.05, d, c[0])
ax2.axis([-.25, 1.5, amax, amin])
figname = location + '_m:_' + method + '_core-depthplot.' + fmt
plt.title(location)
return main_plot, figname | depth scale can be 'core_depth' or 'composite_depth' (for data model=3)
if age file is provided, depth_scale will be set to 'age' by default.
You must provide at least a measurements,specimens and sample file to plot.
Parameters
----------
input_dir_path : str, default "."
file input directory
meas_file : str, default "measurements.txt"
input measurements file
spc_file : str, default ""
input specimens file
samp_file : str, default ""
input samples file
age_file : str, default ""
input ages file
sum_file : str, default ""
input csv summary file
wt_file : str, default ""
input file with weights
depth_scale : str, default "core_depth"
['core_depth', 'composite_depth']
dmin : number, default -1
minimum depth to plot (if -1, default to plotting all)
dmax : number, default -1
maximum depth to plot (if -1, default to plotting all)
sym : str, default "bo"
symbol color and shape, default blue circles
(see matplotlib documentaiton for more options)
size : int, defualt 5
symbol size
spc_sym : str, default 'ro'
specimen symbol color and shape, default red circles
(see matplotlib documentation for more options)
meth : str, default ""
method codes, ["LT-NO", "AF", "T", "ARM", "IRM", "X"]
step : int, default 0
treatment step for plotting:
for AF, in mT, for T, in C
fmt : str, default "svg"
format for figures, [svg,jpg,png,pdf]
pltDec : bool, default True
plot declination
pltInc : bool, default True
plot inclination
pltMag : bool, default True
plot magnetization
pltLine : bool, default True
connect dots with a line
pltSus : bool, default True
plot blanket treatment
logit : bool, default False
plot magnetization on a log scale
amin : int, default -1
minimum time to plot (if -1, default to plotting all)
amax : int, default -1
maximum time to plot (if -1, default to plotting all)
norm : bool, default False
normalize by weight
data_model_num : int, default 3
MagIC data model (please, use data model 3) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L3295-L4056 |
PmagPy/PmagPy | pmagpy/ipmag.py | download_magic | def download_magic(infile, dir_path='.', input_dir_path='',
overwrite=False, print_progress=True,
data_model=3., separate_locs=False):
"""
takes the name of a text file downloaded from the MagIC database and
unpacks it into magic-formatted files. by default, download_magic assumes
that you are doing everything in your current directory. if not, you may
provide optional arguments dir_path (where you want the results to go) and
input_dir_path (where the downloaded file is IF that location is different from
dir_path).
Parameters
----------
infile : str
MagIC-format file to unpack
dir_path : str
output directory (default ".")
input_dir_path : str, default ""
path for intput file if different from output_dir_path (default is same)
overwrite: bool
overwrite current directory (default False)
print_progress: bool
verbose output (default True)
data_model : float
MagIC data model 2.5 or 3 (default 3)
separate_locs : bool
create a separate directory for each location (Location_*)
(default False)
"""
if data_model == 2.5:
method_col = "magic_method_codes"
else:
method_col = "method_codes"
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
infile = pmag.resolve_file_name(infile, input_dir_path)
# try to deal reasonably with unicode errors
try:
f = codecs.open(infile, 'r', "utf-8")
infile = f.readlines()
except UnicodeDecodeError:
f = codecs.open(infile, 'r', "Latin-1")
infile = f.readlines()
f.close()
File = [] # will contain all non-blank lines from downloaded file
for line in infile:
line = line.replace('\n', '')
if line[0:4] == '>>>>' or len(line.strip()) > 0: # skip blank lines
File.append(line)
LN = 0 # tracks our progress iterating through File
type_list = []
filenum = 0
while LN < len(File) - 1:
line = File[LN]
if ">>>>" in line:
LN += 1
continue
file_type = line.split('\t')[1]
file_type = file_type.lower()
if file_type[-1] == "\n":
file_type = file_type[:-1]
if print_progress == True:
print('working on: ', repr(file_type))
if file_type not in type_list:
type_list.append(file_type)
else:
filenum += 1
LN += 1
line = File[LN]
# skip empty tables
if line == ">>>>>>>>>>":
LN += 1
continue
keys = line.replace('\n', '').split('\t')
if keys[0][0] == '.':
keys = line.replace('\n', '').replace('.', '').split('\t')
keys.append('RecNo') # cludge for new MagIC download format
LN += 1
Recs = []
while LN < len(File):
line = File[LN]
# finish up one file type and then break
if ">>>>" in line and len(Recs) > 0:
if filenum == 0:
outfile = os.path.join(dir_path, file_type.strip() + '.txt')
else:
outfile = os.path.join(dir_path, file_type.strip() + '_' + str(filenum) + '.txt')
NewRecs = []
for rec in Recs:
if method_col in list(rec.keys()):
meths = rec[method_col].split(":")
if len(meths) > 0:
methods = ""
for meth in meths:
methods = methods + meth.strip() + ":" # get rid of nasty spaces!!!!!!
rec[method_col] = methods[:-1]
NewRecs.append(rec)
pmag.magic_write(outfile, Recs, file_type)
if print_progress == True:
print(file_type, " data put in ", outfile)
Recs = []
LN += 1
break
# keep adding records of the same file type
else:
rec = line.split('\t')
Rec = {}
if len(rec) == len(keys):
for k in range(len(rec)):
Rec[keys[k]] = rec[k]
Recs.append(Rec)
# in case of magic_search_results.txt, which has an extra
# column:
elif len(rec) - len(keys) == 1:
for k in range(len(rec))[:-1]:
Rec[keys[k]] = rec[k]
Recs.append(Rec)
elif len(rec) < len(keys):
for k in range(len(rec)):
Rec[keys[k]] = rec[k]
for k in range(len(rec), len(keys)):
Rec[keys[k]] = ""
Recs.append(Rec)
else:
print('WARNING: problem in file with line: ')
print(line)
print('skipping....')
LN += 1
if len(Recs) > 0:
if filenum == 0:
outfile = os.path.join(dir_path, file_type.strip() + '.txt')
else:
outfile = os.path.join(dir_path, file_type.strip() + '_' + str(filenum) + '.txt')
NewRecs = []
for rec in Recs:
if method_col in list(rec.keys()):
meths = rec[method_col].split(":")
if len(meths) > 0:
methods = ""
for meth in meths:
methods = methods + meth.strip() + ":" # get rid of nasty spaces!!!!!!
rec[method_col] = methods[:-1]
NewRecs.append(rec)
pmag.magic_write(outfile, Recs, file_type)
if print_progress == True:
print(file_type, " data put in ", outfile)
# look through locations table and create separate directories for each
# location
if separate_locs:
con = cb.Contribution(dir_path)
con.propagate_location_to_measurements()
con.propagate_name_down('location', 'samples')
for dtype in con.tables:
con.write_table_to_file(dtype)
locs, locnum = [], 1
if 'locations' in type_list:
locs, file_type = pmag.magic_read(
os.path.join(dir_path, 'locations.txt'))
if len(locs) > 0: # at least one location
# go through unique location names
for loc_name in set([loc.get('location') for loc in locs]):
if print_progress == True:
print('location_' + str(locnum) + ": ", loc_name)
lpath = os.path.join(dir_path, 'Location_' + str(locnum))
locnum += 1
try:
os.mkdir(lpath)
except:
print('directory ', lpath,
' already exists - overwriting everything: {}'.format(overwrite))
if not overwrite:
print("-W- download_magic encountered a duplicate subdirectory ({}) and could not finish.\nRerun with overwrite=True, or unpack this file in a different directory.".format(lpath))
return False
for f in type_list:
fname = os.path.join(dir_path, f + '.txt')
if print_progress == True:
print('unpacking: ', fname)
recs, file_type = pmag.magic_read(fname)
if print_progress == True:
print(len(recs), ' read in')
lrecs = pmag.get_dictitem(recs, 'location', loc_name, 'T')
if len(lrecs) > 0:
outfile_name = os.path.join(lpath, f + ".txt")
pmag.magic_write(outfile_name, lrecs, file_type)
if print_progress == True:
print(len(lrecs), ' stored in ', outfile_name)
return True | python | def download_magic(infile, dir_path='.', input_dir_path='',
overwrite=False, print_progress=True,
data_model=3., separate_locs=False):
"""
takes the name of a text file downloaded from the MagIC database and
unpacks it into magic-formatted files. by default, download_magic assumes
that you are doing everything in your current directory. if not, you may
provide optional arguments dir_path (where you want the results to go) and
input_dir_path (where the downloaded file is IF that location is different from
dir_path).
Parameters
----------
infile : str
MagIC-format file to unpack
dir_path : str
output directory (default ".")
input_dir_path : str, default ""
path for intput file if different from output_dir_path (default is same)
overwrite: bool
overwrite current directory (default False)
print_progress: bool
verbose output (default True)
data_model : float
MagIC data model 2.5 or 3 (default 3)
separate_locs : bool
create a separate directory for each location (Location_*)
(default False)
"""
if data_model == 2.5:
method_col = "magic_method_codes"
else:
method_col = "method_codes"
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
infile = pmag.resolve_file_name(infile, input_dir_path)
# try to deal reasonably with unicode errors
try:
f = codecs.open(infile, 'r', "utf-8")
infile = f.readlines()
except UnicodeDecodeError:
f = codecs.open(infile, 'r', "Latin-1")
infile = f.readlines()
f.close()
File = [] # will contain all non-blank lines from downloaded file
for line in infile:
line = line.replace('\n', '')
if line[0:4] == '>>>>' or len(line.strip()) > 0: # skip blank lines
File.append(line)
LN = 0 # tracks our progress iterating through File
type_list = []
filenum = 0
while LN < len(File) - 1:
line = File[LN]
if ">>>>" in line:
LN += 1
continue
file_type = line.split('\t')[1]
file_type = file_type.lower()
if file_type[-1] == "\n":
file_type = file_type[:-1]
if print_progress == True:
print('working on: ', repr(file_type))
if file_type not in type_list:
type_list.append(file_type)
else:
filenum += 1
LN += 1
line = File[LN]
# skip empty tables
if line == ">>>>>>>>>>":
LN += 1
continue
keys = line.replace('\n', '').split('\t')
if keys[0][0] == '.':
keys = line.replace('\n', '').replace('.', '').split('\t')
keys.append('RecNo') # cludge for new MagIC download format
LN += 1
Recs = []
while LN < len(File):
line = File[LN]
# finish up one file type and then break
if ">>>>" in line and len(Recs) > 0:
if filenum == 0:
outfile = os.path.join(dir_path, file_type.strip() + '.txt')
else:
outfile = os.path.join(dir_path, file_type.strip() + '_' + str(filenum) + '.txt')
NewRecs = []
for rec in Recs:
if method_col in list(rec.keys()):
meths = rec[method_col].split(":")
if len(meths) > 0:
methods = ""
for meth in meths:
methods = methods + meth.strip() + ":" # get rid of nasty spaces!!!!!!
rec[method_col] = methods[:-1]
NewRecs.append(rec)
pmag.magic_write(outfile, Recs, file_type)
if print_progress == True:
print(file_type, " data put in ", outfile)
Recs = []
LN += 1
break
# keep adding records of the same file type
else:
rec = line.split('\t')
Rec = {}
if len(rec) == len(keys):
for k in range(len(rec)):
Rec[keys[k]] = rec[k]
Recs.append(Rec)
# in case of magic_search_results.txt, which has an extra
# column:
elif len(rec) - len(keys) == 1:
for k in range(len(rec))[:-1]:
Rec[keys[k]] = rec[k]
Recs.append(Rec)
elif len(rec) < len(keys):
for k in range(len(rec)):
Rec[keys[k]] = rec[k]
for k in range(len(rec), len(keys)):
Rec[keys[k]] = ""
Recs.append(Rec)
else:
print('WARNING: problem in file with line: ')
print(line)
print('skipping....')
LN += 1
if len(Recs) > 0:
if filenum == 0:
outfile = os.path.join(dir_path, file_type.strip() + '.txt')
else:
outfile = os.path.join(dir_path, file_type.strip() + '_' + str(filenum) + '.txt')
NewRecs = []
for rec in Recs:
if method_col in list(rec.keys()):
meths = rec[method_col].split(":")
if len(meths) > 0:
methods = ""
for meth in meths:
methods = methods + meth.strip() + ":" # get rid of nasty spaces!!!!!!
rec[method_col] = methods[:-1]
NewRecs.append(rec)
pmag.magic_write(outfile, Recs, file_type)
if print_progress == True:
print(file_type, " data put in ", outfile)
# look through locations table and create separate directories for each
# location
if separate_locs:
con = cb.Contribution(dir_path)
con.propagate_location_to_measurements()
con.propagate_name_down('location', 'samples')
for dtype in con.tables:
con.write_table_to_file(dtype)
locs, locnum = [], 1
if 'locations' in type_list:
locs, file_type = pmag.magic_read(
os.path.join(dir_path, 'locations.txt'))
if len(locs) > 0: # at least one location
# go through unique location names
for loc_name in set([loc.get('location') for loc in locs]):
if print_progress == True:
print('location_' + str(locnum) + ": ", loc_name)
lpath = os.path.join(dir_path, 'Location_' + str(locnum))
locnum += 1
try:
os.mkdir(lpath)
except:
print('directory ', lpath,
' already exists - overwriting everything: {}'.format(overwrite))
if not overwrite:
print("-W- download_magic encountered a duplicate subdirectory ({}) and could not finish.\nRerun with overwrite=True, or unpack this file in a different directory.".format(lpath))
return False
for f in type_list:
fname = os.path.join(dir_path, f + '.txt')
if print_progress == True:
print('unpacking: ', fname)
recs, file_type = pmag.magic_read(fname)
if print_progress == True:
print(len(recs), ' read in')
lrecs = pmag.get_dictitem(recs, 'location', loc_name, 'T')
if len(lrecs) > 0:
outfile_name = os.path.join(lpath, f + ".txt")
pmag.magic_write(outfile_name, lrecs, file_type)
if print_progress == True:
print(len(lrecs), ' stored in ', outfile_name)
return True | takes the name of a text file downloaded from the MagIC database and
unpacks it into magic-formatted files. by default, download_magic assumes
that you are doing everything in your current directory. if not, you may
provide optional arguments dir_path (where you want the results to go) and
input_dir_path (where the downloaded file is IF that location is different from
dir_path).
Parameters
----------
infile : str
MagIC-format file to unpack
dir_path : str
output directory (default ".")
input_dir_path : str, default ""
path for intput file if different from output_dir_path (default is same)
overwrite: bool
overwrite current directory (default False)
print_progress: bool
verbose output (default True)
data_model : float
MagIC data model 2.5 or 3 (default 3)
separate_locs : bool
create a separate directory for each location (Location_*)
(default False) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L4059-L4245 |
PmagPy/PmagPy | pmagpy/ipmag.py | upload_magic2 | def upload_magic2(concat=0, dir_path='.', data_model=None):
"""
Finds all magic files in a given directory, and compiles them into an
upload.txt file which can be uploaded into the MagIC database. Returns a
tuple of either: (False, error_message, errors) if there was a problem
creating/validating the upload file or: (filename, '', None) if the upload
was fully successful.
"""
SpecDone = []
locations = []
concat = int(concat)
files_list = ["er_expeditions.txt", "er_locations.txt", "er_samples.txt", "er_specimens.txt", "er_sites.txt", "er_ages.txt", "er_citations.txt", "er_mailinglist.txt", "magic_measurements.txt",
"rmag_hysteresis.txt", "rmag_anisotropy.txt", "rmag_remanence.txt", "rmag_results.txt", "pmag_specimens.txt", "pmag_samples.txt", "pmag_sites.txt", "pmag_results.txt", "pmag_criteria.txt", "magic_instruments.txt"]
file_names = [os.path.join(dir_path, f) for f in files_list]
# begin the upload process
up = os.path.join(dir_path, "upload.txt")
if os.path.exists(up):
os.remove(up)
RmKeys = ['citation_label', 'compilation', 'calculation_type', 'average_n_lines', 'average_n_planes',
'specimen_grade', 'site_vgp_lat', 'site_vgp_lon', 'direction_type', 'specimen_Z',
'magic_instrument_codes', 'cooling_rate_corr', 'cooling_rate_mcd', 'anisotropy_atrm_alt',
'anisotropy_apar_perc', 'anisotropy_F', 'anisotropy_F_crit', 'specimen_scat',
'specimen_gmax', 'specimen_frac', 'site_vadm', 'site_lon', 'site_vdm', 'site_lat',
'measurement_chi', 'specimen_k_prime', 'specimen_k_prime_sse', 'external_database_names',
'external_database_ids', 'Further Notes', 'Typology', 'Notes (Year/Area/Locus/Level)',
'Site', 'Object Number', 'dir_n_specimens']
print("-I- Removing: ", RmKeys)
CheckDec = ['_dec', '_lon', '_azimuth', 'dip_direction']
CheckSign = ['specimen_b_beta']
last = file_names[-1]
methods, first_file = [], 1
for File in file_names:
# read in the data
Data, file_type = pmag.magic_read(File)
if (file_type != "bad_file") and (file_type != "empty_file"):
print("-I- file", File, " successfully read in")
if len(RmKeys) > 0:
for rec in Data:
# remove unwanted keys
for key in RmKeys:
if key == 'specimen_Z' and key in list(rec.keys()):
# change # change this to lower case
rec[key] = 'specimen_z'
if key in list(rec.keys()):
del rec[key] # get rid of unwanted keys
# make sure b_beta is positive
# ignore blanks
if 'specimen_b_beta' in list(rec.keys()) and rec['specimen_b_beta'] != "":
if float(rec['specimen_b_beta']) < 0:
# make sure value is positive
rec['specimen_b_beta'] = str(
-float(rec['specimen_b_beta']))
print('-I- adjusted to positive: ',
'specimen_b_beta', rec['specimen_b_beta'])
# make all declinations/azimuths/longitudes in range
# 0=>360.
rec = pmag.adjust_all_to_360(rec)
if file_type == 'er_locations':
for rec in Data:
locations.append(rec['er_location_name'])
if file_type in ['pmag_samples', 'pmag_sites', 'pmag_specimens']:
# if there is NO pmag data for specimens (samples/sites),
# do not try to write it to file
# (this causes validation errors, elsewise)
ignore = True
for rec in Data:
if ignore == False:
break
keys = list(rec.keys())
exclude_keys = ['er_citation_names', 'er_site_name', 'er_sample_name',
'er_location_name', 'er_specimen_names', 'er_sample_names']
for key in exclude_keys:
if key in keys:
keys.remove(key)
for key in keys:
if rec[key]:
ignore = False
break
if ignore:
continue
if file_type == 'er_samples': # check to only upload top priority orientation record!
NewSamps, Done = [], []
for rec in Data:
if rec['er_sample_name'] not in Done:
orient, az_type = pmag.get_orient(
Data, rec['er_sample_name'])
NewSamps.append(orient)
Done.append(rec['er_sample_name'])
Data = NewSamps
print(
'only highest priority orientation record from er_samples.txt read in ')
if file_type == 'er_specimens': # only specimens that have sample names
NewData, SpecDone = [], []
for rec in Data:
if rec['er_sample_name'] in Done:
NewData.append(rec)
SpecDone.append(rec['er_specimen_name'])
else:
print('no valid sample record found for: ')
print(rec)
Data = NewData
# print 'only measurements that have specimen/sample info'
if file_type == 'magic_measurements': # only measurements that have specimen names
no_specs = []
NewData = []
for rec in Data:
if rec['er_specimen_name'] in SpecDone:
NewData.append(rec)
else:
print('no valid specimen record found for: ')
print(rec)
no_specs.append(rec)
# print set([record['er_specimen_name'] for record in
# no_specs])
Data = NewData
# write out the data
if len(Data) > 0:
if first_file == 1:
keystring = pmag.first_rec(up, Data[0], file_type)
first_file = 0
else:
keystring = pmag.first_up(up, Data[0], file_type)
for rec in Data:
# collect the method codes
if "magic_method_codes" in list(rec.keys()):
meths = rec["magic_method_codes"].split(':')
for meth in meths:
if meth.strip() not in methods:
if meth.strip() != "LP-DIR-":
methods.append(meth.strip())
try:
pmag.putout(up, keystring, rec)
except IOError:
print('-W- File input error: slowing down')
time.sleep(1)
pmag.putout(up, keystring, rec)
# write out the file separator
f = open(up, 'a')
f.write('>>>>>>>>>>\n')
f.close()
print(file_type, 'written to ', up)
else:
print('File:', File)
print(file_type, 'is bad or non-existent - skipping ')
# write out the methods table
first_rec, MethRec = 1, {}
for meth in methods:
MethRec["magic_method_code"] = meth
if first_rec == 1:
meth_keys = pmag.first_up(up, MethRec, "magic_methods")
first_rec = 0
try:
pmag.putout(up, meth_keys, MethRec)
except IOError:
print('-W- File input error: slowing down')
time.sleep(1)
pmag.putout(up, meth_keys, MethRec)
if concat == 1:
f = open(up, 'a')
f.write('>>>>>>>>>>\n')
f.close()
if os.path.isfile(up):
from . import validate_upload2 as validate_upload
validated = False
validated, errors = validate_upload.read_upload(up, data_model)
else:
print("no data found, upload file not created")
return False, "no data found, upload file not created", None
# rename upload.txt according to location + timestamp
format_string = "%d.%b.%Y"
if locations:
location = locations[0].replace(' ', '_')
new_up = location + '_' + time.strftime(format_string) + '.txt'
else:
new_up = 'unknown_location_' + time.strftime(format_string) + '.txt'
new_up = os.path.join(dir_path, new_up)
if os.path.isfile(new_up):
fname, extension = os.path.splitext(new_up)
for i in range(1, 100):
if os.path.isfile(fname + "_" + str(i) + extension):
continue
else:
new_up = fname + "_" + str(i) + extension
break
os.rename(up, new_up)
print("Finished preparing upload file: {} ".format(new_up))
if not validated:
print("-W- validation of upload file has failed.\nYou can still upload {} to MagIC,\nbut you will need to fix the above errors before your contribution can be activated.".format(new_up))
return False, "Validation of your upload file has failed.\nYou can still upload {} to MagIC,\nbut you will need to fix the above errors before your contribution can be activated.".format(new_up), errors
return new_up, '', None | python | def upload_magic2(concat=0, dir_path='.', data_model=None):
"""
Finds all magic files in a given directory, and compiles them into an
upload.txt file which can be uploaded into the MagIC database. Returns a
tuple of either: (False, error_message, errors) if there was a problem
creating/validating the upload file or: (filename, '', None) if the upload
was fully successful.
"""
SpecDone = []
locations = []
concat = int(concat)
files_list = ["er_expeditions.txt", "er_locations.txt", "er_samples.txt", "er_specimens.txt", "er_sites.txt", "er_ages.txt", "er_citations.txt", "er_mailinglist.txt", "magic_measurements.txt",
"rmag_hysteresis.txt", "rmag_anisotropy.txt", "rmag_remanence.txt", "rmag_results.txt", "pmag_specimens.txt", "pmag_samples.txt", "pmag_sites.txt", "pmag_results.txt", "pmag_criteria.txt", "magic_instruments.txt"]
file_names = [os.path.join(dir_path, f) for f in files_list]
# begin the upload process
up = os.path.join(dir_path, "upload.txt")
if os.path.exists(up):
os.remove(up)
RmKeys = ['citation_label', 'compilation', 'calculation_type', 'average_n_lines', 'average_n_planes',
'specimen_grade', 'site_vgp_lat', 'site_vgp_lon', 'direction_type', 'specimen_Z',
'magic_instrument_codes', 'cooling_rate_corr', 'cooling_rate_mcd', 'anisotropy_atrm_alt',
'anisotropy_apar_perc', 'anisotropy_F', 'anisotropy_F_crit', 'specimen_scat',
'specimen_gmax', 'specimen_frac', 'site_vadm', 'site_lon', 'site_vdm', 'site_lat',
'measurement_chi', 'specimen_k_prime', 'specimen_k_prime_sse', 'external_database_names',
'external_database_ids', 'Further Notes', 'Typology', 'Notes (Year/Area/Locus/Level)',
'Site', 'Object Number', 'dir_n_specimens']
print("-I- Removing: ", RmKeys)
CheckDec = ['_dec', '_lon', '_azimuth', 'dip_direction']
CheckSign = ['specimen_b_beta']
last = file_names[-1]
methods, first_file = [], 1
for File in file_names:
# read in the data
Data, file_type = pmag.magic_read(File)
if (file_type != "bad_file") and (file_type != "empty_file"):
print("-I- file", File, " successfully read in")
if len(RmKeys) > 0:
for rec in Data:
# remove unwanted keys
for key in RmKeys:
if key == 'specimen_Z' and key in list(rec.keys()):
# change # change this to lower case
rec[key] = 'specimen_z'
if key in list(rec.keys()):
del rec[key] # get rid of unwanted keys
# make sure b_beta is positive
# ignore blanks
if 'specimen_b_beta' in list(rec.keys()) and rec['specimen_b_beta'] != "":
if float(rec['specimen_b_beta']) < 0:
# make sure value is positive
rec['specimen_b_beta'] = str(
-float(rec['specimen_b_beta']))
print('-I- adjusted to positive: ',
'specimen_b_beta', rec['specimen_b_beta'])
# make all declinations/azimuths/longitudes in range
# 0=>360.
rec = pmag.adjust_all_to_360(rec)
if file_type == 'er_locations':
for rec in Data:
locations.append(rec['er_location_name'])
if file_type in ['pmag_samples', 'pmag_sites', 'pmag_specimens']:
# if there is NO pmag data for specimens (samples/sites),
# do not try to write it to file
# (this causes validation errors, elsewise)
ignore = True
for rec in Data:
if ignore == False:
break
keys = list(rec.keys())
exclude_keys = ['er_citation_names', 'er_site_name', 'er_sample_name',
'er_location_name', 'er_specimen_names', 'er_sample_names']
for key in exclude_keys:
if key in keys:
keys.remove(key)
for key in keys:
if rec[key]:
ignore = False
break
if ignore:
continue
if file_type == 'er_samples': # check to only upload top priority orientation record!
NewSamps, Done = [], []
for rec in Data:
if rec['er_sample_name'] not in Done:
orient, az_type = pmag.get_orient(
Data, rec['er_sample_name'])
NewSamps.append(orient)
Done.append(rec['er_sample_name'])
Data = NewSamps
print(
'only highest priority orientation record from er_samples.txt read in ')
if file_type == 'er_specimens': # only specimens that have sample names
NewData, SpecDone = [], []
for rec in Data:
if rec['er_sample_name'] in Done:
NewData.append(rec)
SpecDone.append(rec['er_specimen_name'])
else:
print('no valid sample record found for: ')
print(rec)
Data = NewData
# print 'only measurements that have specimen/sample info'
if file_type == 'magic_measurements': # only measurements that have specimen names
no_specs = []
NewData = []
for rec in Data:
if rec['er_specimen_name'] in SpecDone:
NewData.append(rec)
else:
print('no valid specimen record found for: ')
print(rec)
no_specs.append(rec)
# print set([record['er_specimen_name'] for record in
# no_specs])
Data = NewData
# write out the data
if len(Data) > 0:
if first_file == 1:
keystring = pmag.first_rec(up, Data[0], file_type)
first_file = 0
else:
keystring = pmag.first_up(up, Data[0], file_type)
for rec in Data:
# collect the method codes
if "magic_method_codes" in list(rec.keys()):
meths = rec["magic_method_codes"].split(':')
for meth in meths:
if meth.strip() not in methods:
if meth.strip() != "LP-DIR-":
methods.append(meth.strip())
try:
pmag.putout(up, keystring, rec)
except IOError:
print('-W- File input error: slowing down')
time.sleep(1)
pmag.putout(up, keystring, rec)
# write out the file separator
f = open(up, 'a')
f.write('>>>>>>>>>>\n')
f.close()
print(file_type, 'written to ', up)
else:
print('File:', File)
print(file_type, 'is bad or non-existent - skipping ')
# write out the methods table
first_rec, MethRec = 1, {}
for meth in methods:
MethRec["magic_method_code"] = meth
if first_rec == 1:
meth_keys = pmag.first_up(up, MethRec, "magic_methods")
first_rec = 0
try:
pmag.putout(up, meth_keys, MethRec)
except IOError:
print('-W- File input error: slowing down')
time.sleep(1)
pmag.putout(up, meth_keys, MethRec)
if concat == 1:
f = open(up, 'a')
f.write('>>>>>>>>>>\n')
f.close()
if os.path.isfile(up):
from . import validate_upload2 as validate_upload
validated = False
validated, errors = validate_upload.read_upload(up, data_model)
else:
print("no data found, upload file not created")
return False, "no data found, upload file not created", None
# rename upload.txt according to location + timestamp
format_string = "%d.%b.%Y"
if locations:
location = locations[0].replace(' ', '_')
new_up = location + '_' + time.strftime(format_string) + '.txt'
else:
new_up = 'unknown_location_' + time.strftime(format_string) + '.txt'
new_up = os.path.join(dir_path, new_up)
if os.path.isfile(new_up):
fname, extension = os.path.splitext(new_up)
for i in range(1, 100):
if os.path.isfile(fname + "_" + str(i) + extension):
continue
else:
new_up = fname + "_" + str(i) + extension
break
os.rename(up, new_up)
print("Finished preparing upload file: {} ".format(new_up))
if not validated:
print("-W- validation of upload file has failed.\nYou can still upload {} to MagIC,\nbut you will need to fix the above errors before your contribution can be activated.".format(new_up))
return False, "Validation of your upload file has failed.\nYou can still upload {} to MagIC,\nbut you will need to fix the above errors before your contribution can be activated.".format(new_up), errors
return new_up, '', None | Finds all magic files in a given directory, and compiles them into an
upload.txt file which can be uploaded into the MagIC database. Returns a
tuple of either: (False, error_message, errors) if there was a problem
creating/validating the upload file or: (filename, '', None) if the upload
was fully successful. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L4248-L4445 |
PmagPy/PmagPy | pmagpy/ipmag.py | upload_magic | def upload_magic(concat=False, dir_path='.', dmodel=None, vocab="", contribution=None,
input_dir_path=""):
"""
Finds all magic files in a given directory, and compiles them into an
upload.txt file which can be uploaded into the MagIC database.
Parameters
----------
concat : boolean where True means do concatenate to upload.txt file in dir_path,
False means write a new file (default is False)
dir_path : string for input/output directory (default ".")
dmodel : pmagpy data_model.DataModel object,
if not provided will be created (default None)
vocab : pmagpy controlled_vocabularies3.Vocabulary object,
if not provided will be created (default None)
contribution : pmagpy contribution_builder.Contribution object, if not provided will be created
in directory (default None)
input_dir_path : str, default ""
path for intput files if different from output dir_path (default is same)
Returns
----------
tuple of either: (False, error_message, errors, all_failing_items)
if there was a problem creating/validating the upload file
or: (filename, '', None, None) if the file creation was fully successful.
"""
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
locations = []
concat = int(concat)
dtypes = ["locations", "samples", "specimens", "sites", "ages", "measurements",
"criteria", "contribution", "images"]
fnames = [os.path.join(input_dir_path, dtype + ".txt") for dtype in dtypes]
file_names = [fname for fname in fnames if os.path.exists(fname)]
error_fnames = [dtype + "_errors.txt" for dtype in dtypes]
error_full_fnames = [os.path.join(
dir_path, fname) for fname in error_fnames if os.path.exists(os.path.join(dir_path, fname))]
print('-I- Removing old error files from {}: {}'.format(dir_path,
", ".join(error_fnames)))
for error in error_full_fnames:
os.remove(error)
if isinstance(contribution, cb.Contribution):
# if contribution object provided, use it
con = contribution
for table_name in con.tables:
con.tables[table_name].write_magic_file()
elif file_names:
# otherwise create a new Contribution in dir_path
con = Contribution(input_dir_path, vocabulary=vocab)
else:
# if no contribution is provided and no contribution could be created,
# you are out of luck
print("-W- No 3.0 files found in your directory: {}, upload file not created".format(input_dir_path))
return False, "no 3.0 files found, upload file not created", None, None
# if the contribution has no tables, you can't make an upload file
if not con.tables.keys():
print("-W- No tables found in your contribution in directory {}, file not created".format(input_dir_path))
return False, "-W- No tables found in your contribution, file not created", None, None
con.propagate_cols(['core_depth', 'composite_depth'],
'sites', 'samples', down=False)
# take out any extra added columns
# con.remove_non_magic_cols()
# begin the upload process
up = os.path.join(dir_path, "upload.txt")
if os.path.exists(up):
os.remove(up)
RmKeys = ('citation_label', 'compilation', 'calculation_type', 'average_n_lines', 'average_n_planes',
'specimen_grade', 'site_vgp_lat', 'site_vgp_lon', 'direction_type', 'specimen_Z',
'magic_instrument_codes', 'cooling_rate_corr', 'cooling_rate_mcd', 'anisotropy_atrm_alt',
'anisotropy_apar_perc', 'anisotropy_F', 'anisotropy_F_crit', 'specimen_scat',
'specimen_gmax', 'specimen_frac', 'site_vadm', 'site_lon', 'site_vdm', 'site_lat',
'measurement_chi', 'specimen_k_prime', 'specimen_k_prime_sse', 'external_database_names',
'external_database_ids', 'Further Notes', 'Typology', 'Notes (Year/Area/Locus/Level)',
'Site', 'Object Number', 'version', 'site_definition')
#print("-I- Removing: ", RmKeys)
extra_RmKeys = {'measurements': ['sample', 'site', 'location'],
'specimens': ['site', 'location', 'age', 'age_unit', 'age_high',
'age_low', 'age_sigma', 'specimen_core_depth'],
'samples': ['location', 'age', 'age_unit', 'age_high', 'age_low',
'age_sigma', 'core_depth', 'composite_depth'],
'sites': ['texture', 'azimuth', 'azimuth_dec_correction', 'dip',
'orientation_quality', 'sample_alternatives', 'timestamp'],
'ages': ['level']}
failing = []
all_failing_items = {}
if not dmodel:
dmodel = data_model.DataModel()
last_file_type = sorted(con.tables.keys())[-1]
for file_type in sorted(con.tables.keys()):
container = con.tables[file_type]
# format all float values to have correct number of decimals
container.all_to_str()
# make sure all nans and Nones are changed to ''
container.df.fillna('')
df = container.df
if len(df):
print("-I- {} file successfully read in".format(file_type))
# make some adjustments to clean up data
# drop non MagIC keys
DropKeys = list(RmKeys) + extra_RmKeys.get(file_type, [])
DropKeys = set(DropKeys).intersection(df.columns)
if DropKeys:
print(
'-I- dropping these columns: {} from the {} table'.format(', '.join(DropKeys), file_type))
df.drop(DropKeys, axis=1, inplace=True)
container.df = df
unrecognized_cols = container.get_non_magic_cols()
if unrecognized_cols:
print('-W- {} table still has some unrecognized columns: {}'.format(file_type.title(),
", ".join(unrecognized_cols)))
# make sure int_b_beta is positive
if 'int_b_beta' in df.columns:
# get rid of empty strings
df = df.replace(r'\s+( +\.)|#', np.nan,
regex=True).replace('', np.nan)
try:
df['int_b_beta'] = df['int_b_beta'].astype(
float).apply(abs)
except ValueError:
"-W- Non numeric values found in int_b_beta column.\n Could not apply absolute value."
# make all declinations/azimuths/longitudes in range 0=>360.
relevant_cols = val_up3.get_degree_cols(df)
for col in relevant_cols:
df[col] = df[col].apply(pmag.adjust_val_to_360)
# get list of location names
if file_type == 'locations':
locations = sorted(df['location'].unique())
# LJ: need to deal with this
# use only highest priority orientation -- not sure how this works
elif file_type == 'samples':
# orient,az_type=pmag.get_orient(Data,rec['sample'])
pass
# include only specimen records with samples
elif file_type == 'specimens':
df = df[df['sample'].notnull()]
if 'samples' in con.tables:
samp_df = con.tables['samples'].df
df = df[df['sample'].isin(samp_df.index.unique())]
# include only measurements with specmiens
elif file_type == 'measurements':
df = df[df['specimen'].notnull()]
if 'specimens' in con.tables:
spec_df = con.tables['specimens'].df
df = df[df['specimen'].isin(spec_df.index.unique())]
# run validations
res = val_up3.validate_table(
con, file_type, output_dir=dir_path) # , verbose=True)
if res:
dtype, bad_rows, bad_cols, missing_cols, missing_groups, failing_items = res
if dtype not in all_failing_items:
all_failing_items[dtype] = {}
all_failing_items[dtype]["rows"] = failing_items
all_failing_items[dtype]["missing_columns"] = missing_cols
all_failing_items[dtype]["missing_groups"] = missing_groups
failing.append(dtype)
# write out the data
if len(df):
container.write_magic_file(up, append=True, multi_type=True)
# write out the file separator
if last_file_type != file_type:
f = open(up, 'a')
f.write('>>>>>>>>>>\n')
f.close()
print("-I-", file_type, 'written to ', up)
else: # last file, no newline at end of file
#f = open(up, 'a')
# f.write('>>>>>>>>>>')
# f.close()
print("-I-", file_type, 'written to ', up)
# if there was no understandable data
else:
print(file_type, 'is bad or non-existent - skipping ')
# add to existing file
if concat:
f = open(up, 'a')
f.write('>>>>>>>>>>\n')
f.close()
if not os.path.isfile(up):
print("no data found, upload file not created")
return False, "no data found, upload file not created", None, None
# rename upload.txt according to location + timestamp
format_string = "%d.%b.%Y"
if locations:
locs = set(locations)
locs = sorted(locs)[:3]
#location = locations[0].replace(' ', '_')
try:
locs = [loc.replace(' ', '-') for loc in locs]
except AttributeError:
locs = ["unknown_location"]
location = "_".join(locs)
new_up = location + '_' + time.strftime(format_string) + '.txt'
else:
new_up = 'unknown_location_' + time.strftime(format_string) + '.txt'
new_up = os.path.join(dir_path, new_up)
if os.path.isfile(new_up):
fname, extension = os.path.splitext(new_up)
for i in range(1, 100):
if os.path.isfile(fname + "_" + str(i) + extension):
continue
else:
new_up = fname + "_" + str(i) + extension
break
if not up:
print("-W- Could not create an upload file")
return False, "Could not create an upload file", None, None
os.rename(up, new_up)
print("Finished preparing upload file: {} ".format(new_up))
if failing:
print("-W- These tables have errors: {}".format(", ".join(failing)))
print("-W- validation of upload file has failed.\nYou can still upload {} to MagIC,\nbut you will need to fix the above errors before your contribution can be activated.".format(new_up))
return False, "Validation of your upload file has failed.\nYou can still upload {} to MagIC,\nbut you will need to fix the above errors before your contribution can be activated.".format(new_up), failing, all_failing_items
else:
print("-I- Your file has passed validation. You should be able to upload it to the MagIC database without trouble!")
return new_up, '', None, None | python | def upload_magic(concat=False, dir_path='.', dmodel=None, vocab="", contribution=None,
input_dir_path=""):
"""
Finds all magic files in a given directory, and compiles them into an
upload.txt file which can be uploaded into the MagIC database.
Parameters
----------
concat : boolean where True means do concatenate to upload.txt file in dir_path,
False means write a new file (default is False)
dir_path : string for input/output directory (default ".")
dmodel : pmagpy data_model.DataModel object,
if not provided will be created (default None)
vocab : pmagpy controlled_vocabularies3.Vocabulary object,
if not provided will be created (default None)
contribution : pmagpy contribution_builder.Contribution object, if not provided will be created
in directory (default None)
input_dir_path : str, default ""
path for intput files if different from output dir_path (default is same)
Returns
----------
tuple of either: (False, error_message, errors, all_failing_items)
if there was a problem creating/validating the upload file
or: (filename, '', None, None) if the file creation was fully successful.
"""
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
locations = []
concat = int(concat)
dtypes = ["locations", "samples", "specimens", "sites", "ages", "measurements",
"criteria", "contribution", "images"]
fnames = [os.path.join(input_dir_path, dtype + ".txt") for dtype in dtypes]
file_names = [fname for fname in fnames if os.path.exists(fname)]
error_fnames = [dtype + "_errors.txt" for dtype in dtypes]
error_full_fnames = [os.path.join(
dir_path, fname) for fname in error_fnames if os.path.exists(os.path.join(dir_path, fname))]
print('-I- Removing old error files from {}: {}'.format(dir_path,
", ".join(error_fnames)))
for error in error_full_fnames:
os.remove(error)
if isinstance(contribution, cb.Contribution):
# if contribution object provided, use it
con = contribution
for table_name in con.tables:
con.tables[table_name].write_magic_file()
elif file_names:
# otherwise create a new Contribution in dir_path
con = Contribution(input_dir_path, vocabulary=vocab)
else:
# if no contribution is provided and no contribution could be created,
# you are out of luck
print("-W- No 3.0 files found in your directory: {}, upload file not created".format(input_dir_path))
return False, "no 3.0 files found, upload file not created", None, None
# if the contribution has no tables, you can't make an upload file
if not con.tables.keys():
print("-W- No tables found in your contribution in directory {}, file not created".format(input_dir_path))
return False, "-W- No tables found in your contribution, file not created", None, None
con.propagate_cols(['core_depth', 'composite_depth'],
'sites', 'samples', down=False)
# take out any extra added columns
# con.remove_non_magic_cols()
# begin the upload process
up = os.path.join(dir_path, "upload.txt")
if os.path.exists(up):
os.remove(up)
RmKeys = ('citation_label', 'compilation', 'calculation_type', 'average_n_lines', 'average_n_planes',
'specimen_grade', 'site_vgp_lat', 'site_vgp_lon', 'direction_type', 'specimen_Z',
'magic_instrument_codes', 'cooling_rate_corr', 'cooling_rate_mcd', 'anisotropy_atrm_alt',
'anisotropy_apar_perc', 'anisotropy_F', 'anisotropy_F_crit', 'specimen_scat',
'specimen_gmax', 'specimen_frac', 'site_vadm', 'site_lon', 'site_vdm', 'site_lat',
'measurement_chi', 'specimen_k_prime', 'specimen_k_prime_sse', 'external_database_names',
'external_database_ids', 'Further Notes', 'Typology', 'Notes (Year/Area/Locus/Level)',
'Site', 'Object Number', 'version', 'site_definition')
#print("-I- Removing: ", RmKeys)
extra_RmKeys = {'measurements': ['sample', 'site', 'location'],
'specimens': ['site', 'location', 'age', 'age_unit', 'age_high',
'age_low', 'age_sigma', 'specimen_core_depth'],
'samples': ['location', 'age', 'age_unit', 'age_high', 'age_low',
'age_sigma', 'core_depth', 'composite_depth'],
'sites': ['texture', 'azimuth', 'azimuth_dec_correction', 'dip',
'orientation_quality', 'sample_alternatives', 'timestamp'],
'ages': ['level']}
failing = []
all_failing_items = {}
if not dmodel:
dmodel = data_model.DataModel()
last_file_type = sorted(con.tables.keys())[-1]
for file_type in sorted(con.tables.keys()):
container = con.tables[file_type]
# format all float values to have correct number of decimals
container.all_to_str()
# make sure all nans and Nones are changed to ''
container.df.fillna('')
df = container.df
if len(df):
print("-I- {} file successfully read in".format(file_type))
# make some adjustments to clean up data
# drop non MagIC keys
DropKeys = list(RmKeys) + extra_RmKeys.get(file_type, [])
DropKeys = set(DropKeys).intersection(df.columns)
if DropKeys:
print(
'-I- dropping these columns: {} from the {} table'.format(', '.join(DropKeys), file_type))
df.drop(DropKeys, axis=1, inplace=True)
container.df = df
unrecognized_cols = container.get_non_magic_cols()
if unrecognized_cols:
print('-W- {} table still has some unrecognized columns: {}'.format(file_type.title(),
", ".join(unrecognized_cols)))
# make sure int_b_beta is positive
if 'int_b_beta' in df.columns:
# get rid of empty strings
df = df.replace(r'\s+( +\.)|#', np.nan,
regex=True).replace('', np.nan)
try:
df['int_b_beta'] = df['int_b_beta'].astype(
float).apply(abs)
except ValueError:
"-W- Non numeric values found in int_b_beta column.\n Could not apply absolute value."
# make all declinations/azimuths/longitudes in range 0=>360.
relevant_cols = val_up3.get_degree_cols(df)
for col in relevant_cols:
df[col] = df[col].apply(pmag.adjust_val_to_360)
# get list of location names
if file_type == 'locations':
locations = sorted(df['location'].unique())
# LJ: need to deal with this
# use only highest priority orientation -- not sure how this works
elif file_type == 'samples':
# orient,az_type=pmag.get_orient(Data,rec['sample'])
pass
# include only specimen records with samples
elif file_type == 'specimens':
df = df[df['sample'].notnull()]
if 'samples' in con.tables:
samp_df = con.tables['samples'].df
df = df[df['sample'].isin(samp_df.index.unique())]
# include only measurements with specmiens
elif file_type == 'measurements':
df = df[df['specimen'].notnull()]
if 'specimens' in con.tables:
spec_df = con.tables['specimens'].df
df = df[df['specimen'].isin(spec_df.index.unique())]
# run validations
res = val_up3.validate_table(
con, file_type, output_dir=dir_path) # , verbose=True)
if res:
dtype, bad_rows, bad_cols, missing_cols, missing_groups, failing_items = res
if dtype not in all_failing_items:
all_failing_items[dtype] = {}
all_failing_items[dtype]["rows"] = failing_items
all_failing_items[dtype]["missing_columns"] = missing_cols
all_failing_items[dtype]["missing_groups"] = missing_groups
failing.append(dtype)
# write out the data
if len(df):
container.write_magic_file(up, append=True, multi_type=True)
# write out the file separator
if last_file_type != file_type:
f = open(up, 'a')
f.write('>>>>>>>>>>\n')
f.close()
print("-I-", file_type, 'written to ', up)
else: # last file, no newline at end of file
#f = open(up, 'a')
# f.write('>>>>>>>>>>')
# f.close()
print("-I-", file_type, 'written to ', up)
# if there was no understandable data
else:
print(file_type, 'is bad or non-existent - skipping ')
# add to existing file
if concat:
f = open(up, 'a')
f.write('>>>>>>>>>>\n')
f.close()
if not os.path.isfile(up):
print("no data found, upload file not created")
return False, "no data found, upload file not created", None, None
# rename upload.txt according to location + timestamp
format_string = "%d.%b.%Y"
if locations:
locs = set(locations)
locs = sorted(locs)[:3]
#location = locations[0].replace(' ', '_')
try:
locs = [loc.replace(' ', '-') for loc in locs]
except AttributeError:
locs = ["unknown_location"]
location = "_".join(locs)
new_up = location + '_' + time.strftime(format_string) + '.txt'
else:
new_up = 'unknown_location_' + time.strftime(format_string) + '.txt'
new_up = os.path.join(dir_path, new_up)
if os.path.isfile(new_up):
fname, extension = os.path.splitext(new_up)
for i in range(1, 100):
if os.path.isfile(fname + "_" + str(i) + extension):
continue
else:
new_up = fname + "_" + str(i) + extension
break
if not up:
print("-W- Could not create an upload file")
return False, "Could not create an upload file", None, None
os.rename(up, new_up)
print("Finished preparing upload file: {} ".format(new_up))
if failing:
print("-W- These tables have errors: {}".format(", ".join(failing)))
print("-W- validation of upload file has failed.\nYou can still upload {} to MagIC,\nbut you will need to fix the above errors before your contribution can be activated.".format(new_up))
return False, "Validation of your upload file has failed.\nYou can still upload {} to MagIC,\nbut you will need to fix the above errors before your contribution can be activated.".format(new_up), failing, all_failing_items
else:
print("-I- Your file has passed validation. You should be able to upload it to the MagIC database without trouble!")
return new_up, '', None, None | Finds all magic files in a given directory, and compiles them into an
upload.txt file which can be uploaded into the MagIC database.
Parameters
----------
concat : boolean where True means do concatenate to upload.txt file in dir_path,
False means write a new file (default is False)
dir_path : string for input/output directory (default ".")
dmodel : pmagpy data_model.DataModel object,
if not provided will be created (default None)
vocab : pmagpy controlled_vocabularies3.Vocabulary object,
if not provided will be created (default None)
contribution : pmagpy contribution_builder.Contribution object, if not provided will be created
in directory (default None)
input_dir_path : str, default ""
path for intput files if different from output dir_path (default is same)
Returns
----------
tuple of either: (False, error_message, errors, all_failing_items)
if there was a problem creating/validating the upload file
or: (filename, '', None, None) if the file creation was fully successful. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L4453-L4673 |
PmagPy/PmagPy | pmagpy/ipmag.py | specimens_results_magic | def specimens_results_magic(infile='pmag_specimens.txt', measfile='magic_measurements.txt', sampfile='er_samples.txt', sitefile='er_sites.txt', agefile='er_ages.txt', specout='er_specimens.txt', sampout='pmag_samples.txt', siteout='pmag_sites.txt', resout='pmag_results.txt', critout='pmag_criteria.txt', instout='magic_instruments.txt', plotsites=False, fmt='svg', dir_path='.', cors=[], priorities=['DA-AC-ARM', 'DA-AC-TRM'], coord='g', user='', vgps_level='site', do_site_intensity=True, DefaultAge=["none"], avg_directions_by_sample=False, avg_intensities_by_sample=False, avg_all_components=False, avg_by_polarity=False, skip_directions=False, skip_intensities=False, use_sample_latitude=False, use_paleolatitude=False, use_criteria='default'):
"""
Writes magic_instruments, er_specimens, pmag_samples, pmag_sites, pmag_criteria, and pmag_results. The data used to write this is obtained by reading a pmag_speciemns, a magic_measurements, a er_samples, a er_sites, a er_ages.
@param -> infile: path from the WD to the pmag speciemns table
@param -> measfile: path from the WD to the magic measurement file
@param -> sampfile: path from the WD to the er sample file
@param -> sitefile: path from the WD to the er sites data file
@param -> agefile: path from the WD to the er ages data file
@param -> specout: path from the WD to the place to write the er specimens data file
@param -> sampout: path from the WD to the place to write the pmag samples data file
@param -> siteout: path from the WD to the place to write the pmag sites data file
@param -> resout: path from the WD to the place to write the pmag results data file
@param -> critout: path from the WD to the place to write the pmag criteria file
@param -> instout: path from th WD to the place to write the magic instruments file
@param -> documentation incomplete if you know more about the purpose of the parameters in this function and it's side effects please extend and complete this string
"""
# initialize some variables
plotsites = False # cannot use draw_figs from within ipmag
Comps = [] # list of components
version_num = pmag.get_version()
args = sys.argv
model_lat_file = ""
Dcrit, Icrit, nocrit = 0, 0, 0
corrections = []
nocorrection = ['DA-NL', 'DA-AC', 'DA-CR']
# do some data adjustments
for cor in cors:
nocorrection.remove('DA-' + cor)
corrections.append('DA-' + cor)
for p in priorities:
if not p.startswith('DA-AC-'):
p = 'DA-AC-' + p
# translate coord into coords
if coord == 's':
coords = ['-1']
if coord == 'g':
coords = ['0']
if coord == 't':
coords = ['100']
if coord == 'b':
coords = ['0', '100']
if vgps_level == 'sample':
vgps = 1 # save sample level VGPS/VADMs
else:
vgps = 0 # site level
if do_site_intensity:
nositeints = 0
else:
nositeints = 1
# chagne these all to True/False instead of 1/0
if not skip_intensities:
# set model lat and
if use_sample_latitude and use_paleolatitude:
print("you should set a paleolatitude file OR use present day lat - not both")
return False
elif use_sample_latitude:
get_model_lat = 1
elif use_paleolatitude:
get_model_lat = 2
try:
model_lat_file = dir_path + '/' + args[ind + 1]
get_model_lat = 2
mlat = open(model_lat_file, 'r')
ModelLats = []
for line in mlat.readlines():
ModelLat = {}
tmp = line.split()
ModelLat["er_site_name"] = tmp[0]
ModelLat["site_model_lat"] = tmp[1]
ModelLat["er_sample_name"] = tmp[0]
ModelLat["sample_lat"] = tmp[1]
ModelLats.append(ModelLat)
mlat.clos()
except:
print("use_paleolatitude option requires a valid paleolatitude file")
else:
get_model_lat = 0 # skips VADM calculation entirely
if plotsites and not skip_directions: # plot by site - set up plot window
EQ = {}
EQ['eqarea'] = 1
# define figure 1 as equal area projection
pmagplotlib.plot_init(EQ['eqarea'], 5, 5)
# I don't know why this has to be here, but otherwise the first plot
# never plots...
pmagplotlib.plot_net(EQ['eqarea'])
pmagplotlib.draw_figs(EQ)
infile = os.path.join(dir_path, infile)
measfile = os.path.join(dir_path, measfile)
instout = os.path.join(dir_path, instout)
sampfile = os.path.join(dir_path, sampfile)
sitefile = os.path.join(dir_path, sitefile)
agefile = os.path.join(dir_path, agefile)
specout = os.path.join(dir_path, specout)
sampout = os.path.join(dir_path, sampout)
siteout = os.path.join(dir_path, siteout)
resout = os.path.join(dir_path, resout)
critout = os.path.join(dir_path, critout)
if use_criteria == 'none':
Dcrit, Icrit, nocrit = 1, 1, 1 # no selection criteria
crit_data = pmag.default_criteria(nocrit)
elif use_criteria == 'default':
crit_data = pmag.default_criteria(nocrit) # use default criteria
elif use_criteria == 'existing':
crit_data, file_type = pmag.magic_read(
critout) # use pmag_criteria file
print("Acceptance criteria read in from ", critout)
accept = {}
for critrec in crit_data:
for key in list(critrec.keys()):
# need to migrate specimen_dang to specimen_int_dang for intensity
# data using old format
if 'IE-SPEC' in list(critrec.keys()) and 'specimen_dang' in list(critrec.keys()) and 'specimen_int_dang' not in list(critrec.keys()):
critrec['specimen_int_dang'] = critrec['specimen_dang']
del critrec['specimen_dang']
# need to get rid of ron shaars sample_int_sigma_uT
if 'sample_int_sigma_uT' in list(critrec.keys()):
critrec['sample_int_sigma'] = '%10.3e' % (
eval(critrec['sample_int_sigma_uT']) * 1e-6)
if key not in list(accept.keys()) and critrec[key] != '':
accept[key] = critrec[key]
if use_criteria == 'default':
pmag.magic_write(critout, [accept], 'pmag_criteria')
print("\n Pmag Criteria stored in ", critout, '\n')
# now we're done slow dancing
# read in site data - has the lats and lons
SiteNFO, file_type = pmag.magic_read(sitefile)
# read in site data - has the lats and lons
SampNFO, file_type = pmag.magic_read(sampfile)
# find all the sites with height info.
height_nfo = pmag.get_dictitem(SiteNFO, 'site_height', '', 'F')
if agefile:
AgeNFO, file_type = pmag.magic_read(
agefile) # read in the age information
# read in specimen interpretations
Data, file_type = pmag.magic_read(infile)
# retrieve specimens with intensity data
IntData = pmag.get_dictitem(Data, 'specimen_int', '', 'F')
comment, orient = "", []
samples, sites = [], []
for rec in Data: # run through the data filling in missing keys and finding all components, coordinates available
# fill in missing fields, collect unique sample and site names
if 'er_sample_name' not in list(rec.keys()):
rec['er_sample_name'] = ""
elif rec['er_sample_name'] not in samples:
samples.append(rec['er_sample_name'])
if 'er_site_name' not in list(rec.keys()):
rec['er_site_name'] = ""
elif rec['er_site_name'] not in sites:
sites.append(rec['er_site_name'])
if 'specimen_int' not in list(rec.keys()):
rec['specimen_int'] = ''
if 'specimen_comp_name' not in list(rec.keys()) or rec['specimen_comp_name'] == "":
rec['specimen_comp_name'] = 'A'
if rec['specimen_comp_name'] not in Comps:
Comps.append(rec['specimen_comp_name'])
rec['specimen_tilt_correction'] = rec['specimen_tilt_correction'].strip(
'\n')
if "specimen_tilt_correction" not in list(rec.keys()):
rec["specimen_tilt_correction"] = "-1" # assume sample coordinates
if rec["specimen_tilt_correction"] not in orient:
# collect available coordinate systems
orient.append(rec["specimen_tilt_correction"])
if "specimen_direction_type" not in list(rec.keys()):
# assume direction is line - not plane
rec["specimen_direction_type"] = 'l'
if "specimen_dec" not in list(rec.keys()):
# if no declination, set direction type to blank
rec["specimen_direction_type"] = ''
if "specimen_n" not in list(rec.keys()):
rec["specimen_n"] = '' # put in n
if "specimen_alpha95" not in list(rec.keys()):
rec["specimen_alpha95"] = '' # put in alpha95
if "magic_method_codes" not in list(rec.keys()):
rec["magic_method_codes"] = ''
# start parsing data into SpecDirs, SpecPlanes, SpecInts
SpecInts, SpecDirs, SpecPlanes = [], [], []
samples.sort() # get sorted list of samples and sites
sites.sort()
if not skip_intensities: # don't skip intensities
# retrieve specimens with intensity data
IntData = pmag.get_dictitem(Data, 'specimen_int', '', 'F')
if nocrit == 0: # use selection criteria
for rec in IntData: # do selection criteria
kill = pmag.grade(rec, accept, 'specimen_int')
if len(kill) == 0:
# intensity record to be included in sample, site
# calculations
SpecInts.append(rec)
else:
SpecInts = IntData[:] # take everything - no selection criteria
# check for required data adjustments
if len(corrections) > 0 and len(SpecInts) > 0:
for cor in corrections:
# only take specimens with the required corrections
SpecInts = pmag.get_dictitem(
SpecInts, 'magic_method_codes', cor, 'has')
if len(nocorrection) > 0 and len(SpecInts) > 0:
for cor in nocorrection:
# exclude the corrections not specified for inclusion
SpecInts = pmag.get_dictitem(
SpecInts, 'magic_method_codes', cor, 'not')
# take top priority specimen of its name in remaining specimens (only one
# per customer)
PrioritySpecInts = []
specimens = pmag.get_specs(SpecInts) # get list of uniq specimen names
for spec in specimens:
# all the records for this specimen
ThisSpecRecs = pmag.get_dictitem(
SpecInts, 'er_specimen_name', spec, 'T')
if len(ThisSpecRecs) == 1:
PrioritySpecInts.append(ThisSpecRecs[0])
elif len(ThisSpecRecs) > 1: # more than one
prec = []
for p in priorities:
# all the records for this specimen
ThisSpecRecs = pmag.get_dictitem(
SpecInts, 'magic_method_codes', p, 'has')
if len(ThisSpecRecs) > 0:
prec.append(ThisSpecRecs[0])
PrioritySpecInts.append(prec[0]) # take the best one
SpecInts = PrioritySpecInts # this has the first specimen record
if not skip_directions: # don't skip directions
# retrieve specimens with directed lines and planes
AllDirs = pmag.get_dictitem(Data, 'specimen_direction_type', '', 'F')
# get all specimens with specimen_n information
Ns = pmag.get_dictitem(AllDirs, 'specimen_n', '', 'F')
if nocrit != 1: # use selection criteria
for rec in Ns: # look through everything with specimen_n for "good" data
kill = pmag.grade(rec, accept, 'specimen_dir')
if len(kill) == 0: # nothing killed it
SpecDirs.append(rec)
else: # no criteria
SpecDirs = AllDirs[:] # take them all
# SpecDirs is now the list of all specimen directions (lines and planes)
# that pass muster
# list of all sample data and list of those that pass the DE-SAMP criteria
PmagSamps, SampDirs = [], []
PmagSites, PmagResults = [], [] # list of all site data and selected results
SampInts = []
for samp in samples: # run through the sample names
if avg_directions_by_sample: # average by sample if desired
# get all the directional data for this sample
SampDir = pmag.get_dictitem(SpecDirs, 'er_sample_name', samp, 'T')
if len(SampDir) > 0: # there are some directions
for coord in coords: # step through desired coordinate systems
# get all the directions for this sample
CoordDir = pmag.get_dictitem(
SampDir, 'specimen_tilt_correction', coord, 'T')
if len(CoordDir) > 0: # there are some with this coordinate system
if not avg_all_components: # look component by component
for comp in Comps:
# get all directions from this component
CompDir = pmag.get_dictitem(
CoordDir, 'specimen_comp_name', comp, 'T')
if len(CompDir) > 0: # there are some
# get a sample average from all specimens
PmagSampRec = pmag.lnpbykey(
CompDir, 'sample', 'specimen')
# decorate the sample record
PmagSampRec["er_location_name"] = CompDir[0]['er_location_name']
PmagSampRec["er_site_name"] = CompDir[0]['er_site_name']
PmagSampRec["er_sample_name"] = samp
PmagSampRec["er_citation_names"] = "This study"
PmagSampRec["er_analyst_mail_names"] = user
PmagSampRec['magic_software_packages'] = version_num
if CompDir[0]['specimen_flag'] == 'g':
PmagSampRec['sample_flag'] = 'g'
else:
PmagSampRec['sample_flag'] = 'b'
if nocrit != 1:
PmagSampRec['pmag_criteria_codes'] = "ACCEPT"
if agefile != "":
PmagSampRec = pmag.get_age(
PmagSampRec, "er_site_name", "sample_inferred_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', PmagSampRec['er_site_name'], 'T')
if len(site_height) > 0:
# add in height if available
PmagSampRec["sample_height"] = site_height[0]['site_height']
PmagSampRec['sample_comp_name'] = comp
PmagSampRec['sample_tilt_correction'] = coord
PmagSampRec['er_specimen_names'] = pmag.get_list(
CompDir, 'er_specimen_name') # get a list of the specimen names used
PmagSampRec['magic_method_codes'] = pmag.get_list(
CompDir, 'magic_method_codes') # get a list of the methods used
if nocrit != 1: # apply selection criteria
kill = pmag.grade(
PmagSampRec, accept, 'sample_dir')
else:
kill = []
if len(kill) == 0:
SampDirs.append(PmagSampRec)
if vgps == 1: # if sample level VGP info desired, do that now
PmagResRec = pmag.getsampVGP(
PmagSampRec, SiteNFO)
if PmagResRec != "":
PmagResults.append(PmagResRec)
# print(PmagSampRec)
PmagSamps.append(PmagSampRec)
if avg_all_components: # average all components together basically same as above
PmagSampRec = pmag.lnpbykey(
CoordDir, 'sample', 'specimen')
PmagSampRec["er_location_name"] = CoordDir[0]['er_location_name']
PmagSampRec["er_site_name"] = CoordDir[0]['er_site_name']
PmagSampRec["er_sample_name"] = samp
PmagSampRec["er_citation_names"] = "This study"
PmagSampRec["er_analyst_mail_names"] = user
PmagSampRec['magic_software_packages'] = version_num
if all(i['specimen_flag'] == 'g' for i in CoordDir):
PmagSampRec['sample_flag'] = 'g'
else:
PmagSampRec['sample_flag'] = 'b'
if nocrit != 1:
PmagSampRec['pmag_criteria_codes'] = ""
if agefile != "":
PmagSampRec = pmag.get_age(
PmagSampRec, "er_site_name", "sample_inferred_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', site, 'T')
if len(site_height) > 0:
# add in height if available
PmagSampRec["sample_height"] = site_height[0]['site_height']
PmagSampRec['sample_tilt_correction'] = coord
PmagSampRec['sample_comp_name'] = pmag.get_list(
CoordDir, 'specimen_comp_name') # get components used
PmagSampRec['er_specimen_names'] = pmag.get_list(
CoordDir, 'er_specimen_name') # get specimne names averaged
PmagSampRec['magic_method_codes'] = pmag.get_list(
CoordDir, 'magic_method_codes') # assemble method codes
if nocrit != 1: # apply selection criteria
kill = pmag.grade(
PmagSampRec, accept, 'sample_dir')
if len(kill) == 0: # passes the mustard
SampDirs.append(PmagSampRec)
if vgps == 1:
PmagResRec = pmag.getsampVGP(
PmagSampRec, SiteNFO)
if PmagResRec != "":
PmagResults.append(PmagResRec)
else: # take everything
SampDirs.append(PmagSampRec)
if vgps == 1:
PmagResRec = pmag.getsampVGP(
PmagSampRec, SiteNFO)
if PmagResRec != "":
PmagResults.append(PmagResRec)
PmagSamps.append(PmagSampRec)
if avg_intensities_by_sample: # average by sample if desired
# get all the intensity data for this sample
SampI = pmag.get_dictitem(SpecInts, 'er_sample_name', samp, 'T')
if len(SampI) > 0: # there are some
# get average intensity stuff
PmagSampRec = pmag.average_int(SampI, 'specimen', 'sample')
# decorate sample record
PmagSampRec["sample_description"] = "sample intensity"
PmagSampRec["sample_direction_type"] = ""
PmagSampRec['er_site_name'] = SampI[0]["er_site_name"]
PmagSampRec['er_sample_name'] = samp
PmagSampRec['er_location_name'] = SampI[0]["er_location_name"]
PmagSampRec["er_citation_names"] = "This study"
PmagSampRec["er_analyst_mail_names"] = user
if agefile != "":
PmagSampRec = pmag.get_age(
PmagSampRec, "er_site_name", "sample_inferred_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', PmagSampRec['er_site_name'], 'T')
if len(site_height) > 0:
# add in height if available
PmagSampRec["sample_height"] = site_height[0]['site_height']
PmagSampRec['er_specimen_names'] = pmag.get_list(
SampI, 'er_specimen_name')
PmagSampRec['magic_method_codes'] = pmag.get_list(
SampI, 'magic_method_codes')
if nocrit != 1: # apply criteria!
kill = pmag.grade(PmagSampRec, accept, 'sample_int')
if len(kill) == 0:
PmagSampRec['pmag_criteria_codes'] = "ACCEPT"
SampInts.append(PmagSampRec)
PmagSamps.append(PmagSampRec)
else:
PmagSampRec = {} # sample rejected
else: # no criteria
SampInts.append(PmagSampRec)
PmagSamps.append(PmagSampRec)
PmagSampRec['pmag_criteria_codes'] = ""
if vgps == 1 and get_model_lat != 0 and PmagSampRec != {}:
if get_model_lat == 1: # use sample latitude
PmagResRec = pmag.getsampVDM(PmagSampRec, SampNFO)
# get rid of the model lat key
del(PmagResRec['model_lat'])
elif get_model_lat == 2: # use model latitude
PmagResRec = pmag.getsampVDM(PmagSampRec, ModelLats)
if PmagResRec != {}:
PmagResRec['magic_method_codes'] = PmagResRec['magic_method_codes'] + ":IE-MLAT"
if PmagResRec != {}:
PmagResRec['er_specimen_names'] = PmagSampRec['er_specimen_names']
PmagResRec['er_sample_names'] = PmagSampRec['er_sample_name']
PmagResRec['pmag_criteria_codes'] = 'ACCEPT'
PmagResRec['average_int_sigma_perc'] = PmagSampRec['sample_int_sigma_perc']
PmagResRec['average_int_sigma'] = PmagSampRec['sample_int_sigma']
PmagResRec['average_int_n'] = PmagSampRec['sample_int_n']
PmagResRec['vadm_n'] = PmagSampRec['sample_int_n']
PmagResRec['data_type'] = 'i'
PmagResults.append(PmagResRec)
if len(PmagSamps) > 0:
# fill in missing keys from different types of records
TmpSamps, keylist = pmag.fillkeys(PmagSamps)
# save in sample output file
pmag.magic_write(sampout, TmpSamps, 'pmag_samples')
print(' sample averages written to ', sampout)
#
# create site averages from specimens or samples as specified
#
for site in sites:
for coord in coords:
if not avg_directions_by_sample:
key, dirlist = 'specimen', SpecDirs # if specimen averages at site level desired
if avg_directions_by_sample:
key, dirlist = 'sample', SampDirs # if sample averages at site level desired
# get all the sites with directions
tmp = pmag.get_dictitem(dirlist, 'er_site_name', site, 'T')
# use only the last coordinate if avg_all_components==False
tmp1 = pmag.get_dictitem(tmp, key + '_tilt_correction', coord, 'T')
# fish out site information (lat/lon, etc.)
sd = pmag.get_dictitem(SiteNFO, 'er_site_name', site, 'T')
if len(sd) > 0:
sitedat = sd[0]
if not avg_all_components: # do component wise averaging
for comp in Comps:
# get all components comp
siteD = pmag.get_dictitem(
tmp1, key + '_comp_name', comp, 'T')
# remove bad data from means
quality_siteD = []
# remove any records for which specimen_flag or sample_flag are 'b'
# assume 'g' if flag is not provided
for rec in siteD:
spec_quality = rec.get('specimen_flag', 'g')
samp_quality = rec.get('sample_flag', 'g')
if (spec_quality == 'g') and (samp_quality == 'g'):
quality_siteD.append(rec)
siteD = quality_siteD
if len(siteD) > 0: # there are some for this site and component name
# get an average for this site
PmagSiteRec = pmag.lnpbykey(siteD, 'site', key)
# decorate the site record
PmagSiteRec['site_comp_name'] = comp
PmagSiteRec["er_location_name"] = siteD[0]['er_location_name']
PmagSiteRec["er_site_name"] = siteD[0]['er_site_name']
PmagSiteRec['site_tilt_correction'] = coord
PmagSiteRec['site_comp_name'] = pmag.get_list(
siteD, key + '_comp_name')
if avg_directions_by_sample:
PmagSiteRec['er_sample_names'] = pmag.get_list(
siteD, 'er_sample_name')
else:
PmagSiteRec['er_specimen_names'] = pmag.get_list(
siteD, 'er_specimen_name')
# determine the demagnetization code (DC3,4 or 5) for this site
AFnum = len(pmag.get_dictitem(
siteD, 'magic_method_codes', 'LP-DIR-AF', 'has'))
Tnum = len(pmag.get_dictitem(
siteD, 'magic_method_codes', 'LP-DIR-T', 'has'))
DC = 3
if AFnum > 0:
DC += 1
if Tnum > 0:
DC += 1
PmagSiteRec['magic_method_codes'] = pmag.get_list(
siteD, 'magic_method_codes') + ':' + 'LP-DC' + str(DC)
PmagSiteRec['magic_method_codes'].strip(":")
if plotsites:
print(PmagSiteRec['er_site_name'])
# plot and list the data
pmagplotlib.plot_site(
EQ['eqarea'], PmagSiteRec, siteD, key)
pmagplotlib.draw_figs(EQ)
PmagSites.append(PmagSiteRec)
else: # last component only
# get the last orientation system specified
siteD = tmp1[:]
if len(siteD) > 0: # there are some
# get the average for this site
PmagSiteRec = pmag.lnpbykey(siteD, 'site', key)
# decorate the record
PmagSiteRec["er_location_name"] = siteD[0]['er_location_name']
PmagSiteRec["er_site_name"] = siteD[0]['er_site_name']
PmagSiteRec['site_comp_name'] = comp
PmagSiteRec['site_tilt_correction'] = coord
PmagSiteRec['site_comp_name'] = pmag.get_list(
siteD, key + '_comp_name')
PmagSiteRec['er_specimen_names'] = pmag.get_list(
siteD, 'er_specimen_name')
PmagSiteRec['er_sample_names'] = pmag.get_list(
siteD, 'er_sample_name')
AFnum = len(pmag.get_dictitem(
siteD, 'magic_method_codes', 'LP-DIR-AF', 'has'))
Tnum = len(pmag.get_dictitem(
siteD, 'magic_method_codes', 'LP-DIR-T', 'has'))
DC = 3
if AFnum > 0:
DC += 1
if Tnum > 0:
DC += 1
PmagSiteRec['magic_method_codes'] = pmag.get_list(
siteD, 'magic_method_codes') + ':' + 'LP-DC' + str(DC)
PmagSiteRec['magic_method_codes'].strip(":")
if not avg_directions_by_sample:
PmagSiteRec['site_comp_name'] = pmag.get_list(
siteD, key + '_comp_name')
if plotsites:
pmagplotlib.plot_site(
EQ['eqarea'], PmagSiteRec, siteD, key)
pmagplotlib.draw_figs(EQ)
PmagSites.append(PmagSiteRec)
else:
print('site information not found in er_sites for site, ',
site, ' site will be skipped')
for PmagSiteRec in PmagSites: # now decorate each dictionary some more, and calculate VGPs etc. for results table
PmagSiteRec["er_citation_names"] = "This study"
PmagSiteRec["er_analyst_mail_names"] = user
PmagSiteRec['magic_software_packages'] = version_num
if agefile != "":
PmagSiteRec = pmag.get_age(
PmagSiteRec, "er_site_name", "site_inferred_", AgeNFO, DefaultAge)
PmagSiteRec['pmag_criteria_codes'] = 'ACCEPT'
if 'site_n_lines' in list(PmagSiteRec.keys()) and 'site_n_planes' in list(PmagSiteRec.keys()) and PmagSiteRec['site_n_lines'] != "" and PmagSiteRec['site_n_planes'] != "":
if int(PmagSiteRec["site_n_planes"]) > 0:
PmagSiteRec["magic_method_codes"] = PmagSiteRec['magic_method_codes'] + ":DE-FM-LP"
elif int(PmagSiteRec["site_n_lines"]) > 2:
PmagSiteRec["magic_method_codes"] = PmagSiteRec['magic_method_codes'] + ":DE-FM"
kill = pmag.grade(PmagSiteRec, accept, 'site_dir')
if len(kill) == 0:
PmagResRec = {} # set up dictionary for the pmag_results table entry
PmagResRec['data_type'] = 'i' # decorate it a bit
PmagResRec['magic_software_packages'] = version_num
PmagSiteRec['site_description'] = 'Site direction included in results table'
PmagResRec['pmag_criteria_codes'] = 'ACCEPT'
dec = float(PmagSiteRec["site_dec"])
inc = float(PmagSiteRec["site_inc"])
if 'site_alpha95' in list(PmagSiteRec.keys()) and PmagSiteRec['site_alpha95'] != "":
a95 = float(PmagSiteRec["site_alpha95"])
else:
a95 = 180.
sitedat = pmag.get_dictitem(SiteNFO, 'er_site_name', PmagSiteRec['er_site_name'], 'T')[
0] # fish out site information (lat/lon, etc.)
lat = float(sitedat['site_lat'])
lon = float(sitedat['site_lon'])
plon, plat, dp, dm = pmag.dia_vgp(
dec, inc, a95, lat, lon) # get the VGP for this site
if PmagSiteRec['site_tilt_correction'] == '-1':
C = ' (spec coord) '
if PmagSiteRec['site_tilt_correction'] == '0':
C = ' (geog. coord) '
if PmagSiteRec['site_tilt_correction'] == '100':
C = ' (strat. coord) '
PmagResRec["pmag_result_name"] = "VGP Site: " + \
PmagSiteRec["er_site_name"] # decorate some more
PmagResRec["result_description"] = "Site VGP, coord system = " + \
str(coord) + ' component: ' + comp
PmagResRec['er_site_names'] = PmagSiteRec['er_site_name']
PmagResRec['pmag_criteria_codes'] = 'ACCEPT'
PmagResRec['er_citation_names'] = 'This study'
PmagResRec['er_analyst_mail_names'] = user
PmagResRec["er_location_names"] = PmagSiteRec["er_location_name"]
if avg_directions_by_sample:
PmagResRec["er_sample_names"] = PmagSiteRec["er_sample_names"]
else:
PmagResRec["er_specimen_names"] = PmagSiteRec["er_specimen_names"]
PmagResRec["tilt_correction"] = PmagSiteRec['site_tilt_correction']
PmagResRec["pole_comp_name"] = PmagSiteRec['site_comp_name']
PmagResRec["average_dec"] = PmagSiteRec["site_dec"]
PmagResRec["average_inc"] = PmagSiteRec["site_inc"]
PmagResRec["average_alpha95"] = PmagSiteRec["site_alpha95"]
PmagResRec["average_n"] = PmagSiteRec["site_n"]
PmagResRec["average_n_lines"] = PmagSiteRec["site_n_lines"]
PmagResRec["average_n_planes"] = PmagSiteRec["site_n_planes"]
PmagResRec["vgp_n"] = PmagSiteRec["site_n"]
PmagResRec["average_k"] = PmagSiteRec["site_k"]
PmagResRec["average_r"] = PmagSiteRec["site_r"]
PmagResRec["average_lat"] = '%10.4f ' % (lat)
PmagResRec["average_lon"] = '%10.4f ' % (lon)
if agefile != "":
PmagResRec = pmag.get_age(
PmagResRec, "er_site_names", "average_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', site, 'T')
if len(site_height) > 0:
PmagResRec["average_height"] = site_height[0]['site_height']
PmagResRec["vgp_lat"] = '%7.1f ' % (plat)
PmagResRec["vgp_lon"] = '%7.1f ' % (plon)
PmagResRec["vgp_dp"] = '%7.1f ' % (dp)
PmagResRec["vgp_dm"] = '%7.1f ' % (dm)
PmagResRec["magic_method_codes"] = PmagSiteRec["magic_method_codes"]
if '0' in PmagSiteRec['site_tilt_correction'] and "DA-DIR-GEO" not in PmagSiteRec['magic_method_codes']:
PmagSiteRec['magic_method_codes'] = PmagSiteRec['magic_method_codes'] + ":DA-DIR-GEO"
if '100' in PmagSiteRec['site_tilt_correction'] and "DA-DIR-TILT" not in PmagSiteRec['magic_method_codes']:
PmagSiteRec['magic_method_codes'] = PmagSiteRec['magic_method_codes'] + ":DA-DIR-TILT"
PmagSiteRec['site_polarity'] = ""
if avg_by_polarity: # assign polarity based on angle of pole lat to spin axis - may want to re-think this sometime
angle = pmag.angle([0, 0], [0, (90 - plat)])
if angle <= 55.:
PmagSiteRec["site_polarity"] = 'n'
if angle > 55. and angle < 125.:
PmagSiteRec["site_polarity"] = 't'
if angle >= 125.:
PmagSiteRec["site_polarity"] = 'r'
PmagResults.append(PmagResRec)
if avg_by_polarity:
# find the tilt corrected data
crecs = pmag.get_dictitem(
PmagSites, 'site_tilt_correction', '100', 'T')
if len(crecs) < 2:
# if there aren't any, find the geographic corrected data
crecs = pmag.get_dictitem(
PmagSites, 'site_tilt_correction', '0', 'T')
if len(crecs) > 2: # if there are some,
comp = pmag.get_list(crecs, 'site_comp_name').split(':')[
0] # find the first component
# fish out all of the first component
crecs = pmag.get_dictitem(crecs, 'site_comp_name', comp, 'T')
precs = []
for rec in crecs:
precs.append({'dec': rec['site_dec'], 'inc': rec['site_inc'],
'name': rec['er_site_name'], 'loc': rec['er_location_name']})
# calculate average by polarity
polpars = pmag.fisher_by_pol(precs)
# hunt through all the modes (normal=A, reverse=B, all=ALL)
for mode in list(polpars.keys()):
PolRes = {}
PolRes['er_citation_names'] = 'This study'
PolRes["pmag_result_name"] = "Polarity Average: Polarity " + mode
PolRes["data_type"] = "a"
PolRes["average_dec"] = '%7.1f' % (polpars[mode]['dec'])
PolRes["average_inc"] = '%7.1f' % (polpars[mode]['inc'])
PolRes["average_n"] = '%i' % (polpars[mode]['n'])
PolRes["average_r"] = '%5.4f' % (polpars[mode]['r'])
PolRes["average_k"] = '%6.0f' % (polpars[mode]['k'])
PolRes["average_alpha95"] = '%7.1f' % (
polpars[mode]['alpha95'])
PolRes['er_site_names'] = polpars[mode]['sites']
PolRes['er_location_names'] = polpars[mode]['locs']
PolRes['magic_software_packages'] = version_num
PmagResults.append(PolRes)
if not skip_intensities and nositeints != 1:
for site in sites: # now do intensities for each site
if plotsites:
print(site)
if not avg_intensities_by_sample:
key, intlist = 'specimen', SpecInts # if using specimen level data
if avg_intensities_by_sample:
key, intlist = 'sample', PmagSamps # if using sample level data
# get all the intensities for this site
Ints = pmag.get_dictitem(intlist, 'er_site_name', site, 'T')
if len(Ints) > 0: # there are some
# get average intensity stuff for site table
PmagSiteRec = pmag.average_int(Ints, key, 'site')
# get average intensity stuff for results table
PmagResRec = pmag.average_int(Ints, key, 'average')
if plotsites: # if site by site examination requested - print this site out to the screen
for rec in Ints:
print(rec['er_' + key + '_name'], ' %7.1f' %
(1e6 * float(rec[key + '_int'])))
if len(Ints) > 1:
print('Average: ', '%7.1f' % (
1e6 * float(PmagResRec['average_int'])), 'N: ', len(Ints))
print('Sigma: ', '%7.1f' % (
1e6 * float(PmagResRec['average_int_sigma'])), 'Sigma %: ', PmagResRec['average_int_sigma_perc'])
input('Press any key to continue\n')
er_location_name = Ints[0]["er_location_name"]
# decorate the records
PmagSiteRec["er_location_name"] = er_location_name
PmagSiteRec["er_citation_names"] = "This study"
PmagResRec["er_location_names"] = er_location_name
PmagResRec["er_citation_names"] = "This study"
PmagSiteRec["er_analyst_mail_names"] = user
PmagResRec["er_analyst_mail_names"] = user
PmagResRec["data_type"] = 'i'
if not avg_intensities_by_sample:
PmagSiteRec['er_specimen_names'] = pmag.get_list(
Ints, 'er_specimen_name') # list of all specimens used
PmagResRec['er_specimen_names'] = pmag.get_list(
Ints, 'er_specimen_name')
PmagSiteRec['er_sample_names'] = pmag.get_list(
Ints, 'er_sample_name') # list of all samples used
PmagResRec['er_sample_names'] = pmag.get_list(
Ints, 'er_sample_name')
PmagSiteRec['er_site_name'] = site
PmagResRec['er_site_names'] = site
PmagSiteRec['magic_method_codes'] = pmag.get_list(
Ints, 'magic_method_codes')
PmagResRec['magic_method_codes'] = pmag.get_list(
Ints, 'magic_method_codes')
kill = pmag.grade(PmagSiteRec, accept, 'site_int')
if nocrit == 1 or len(kill) == 0:
b, sig = float(PmagResRec['average_int']), ""
if(PmagResRec['average_int_sigma']) != "":
sig = float(PmagResRec['average_int_sigma'])
# fish out site direction
sdir = pmag.get_dictitem(
PmagResults, 'er_site_names', site, 'T')
# get the VDM for this record using last average
# inclination (hope it is the right one!)
if len(sdir) > 0 and sdir[-1]['average_inc'] != "":
inc = float(sdir[0]['average_inc'])
# get magnetic latitude using dipole formula
mlat = pmag.magnetic_lat(inc)
# get VDM with magnetic latitude
PmagResRec["vdm"] = '%8.3e ' % (pmag.b_vdm(b, mlat))
PmagResRec["vdm_n"] = PmagResRec['average_int_n']
if 'average_int_sigma' in list(PmagResRec.keys()) and PmagResRec['average_int_sigma'] != "":
vdm_sig = pmag.b_vdm(
float(PmagResRec['average_int_sigma']), mlat)
PmagResRec["vdm_sigma"] = '%8.3e ' % (vdm_sig)
else:
PmagResRec["vdm_sigma"] = ""
mlat = "" # define a model latitude
if get_model_lat == 1: # use present site latitude
mlats = pmag.get_dictitem(
SiteNFO, 'er_site_name', site, 'T')
if len(mlats) > 0:
mlat = mlats[0]['site_lat']
# use a model latitude from some plate reconstruction model
# (or something)
elif get_model_lat == 2:
mlats = pmag.get_dictitem(
ModelLats, 'er_site_name', site, 'T')
if len(mlats) > 0:
PmagResRec['model_lat'] = mlats[0]['site_model_lat']
mlat = PmagResRec['model_lat']
if mlat != "":
# get the VADM using the desired latitude
PmagResRec["vadm"] = '%8.3e ' % (
pmag.b_vdm(b, float(mlat)))
if sig != "":
vdm_sig = pmag.b_vdm(
float(PmagResRec['average_int_sigma']), float(mlat))
PmagResRec["vadm_sigma"] = '%8.3e ' % (vdm_sig)
PmagResRec["vadm_n"] = PmagResRec['average_int_n']
else:
PmagResRec["vadm_sigma"] = ""
# fish out site information (lat/lon, etc.)
sitedat = pmag.get_dictitem(
SiteNFO, 'er_site_name', PmagSiteRec['er_site_name'], 'T')
if len(sitedat) > 0:
sitedat = sitedat[0]
PmagResRec['average_lat'] = sitedat['site_lat']
PmagResRec['average_lon'] = sitedat['site_lon']
else:
PmagResRec['average_lon'] = 'UNKNOWN'
PmagResRec['average_lon'] = 'UNKNOWN'
PmagResRec['magic_software_packages'] = version_num
PmagResRec["pmag_result_name"] = "V[A]DM: Site " + site
PmagResRec["result_description"] = "V[A]DM of site"
PmagResRec["pmag_criteria_codes"] = "ACCEPT"
if agefile != "":
PmagResRec = pmag.get_age(
PmagResRec, "er_site_names", "average_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', site, 'T')
if len(site_height) > 0:
PmagResRec["average_height"] = site_height[0]['site_height']
PmagSites.append(PmagSiteRec)
PmagResults.append(PmagResRec)
if len(PmagSites) > 0:
Tmp, keylist = pmag.fillkeys(PmagSites)
pmag.magic_write(siteout, Tmp, 'pmag_sites')
print(' sites written to ', siteout)
else:
print("No Site level table")
if len(PmagResults) > 0:
TmpRes, keylist = pmag.fillkeys(PmagResults)
pmag.magic_write(resout, TmpRes, 'pmag_results')
print(' results written to ', resout)
else:
print("No Results level table") | python | def specimens_results_magic(infile='pmag_specimens.txt', measfile='magic_measurements.txt', sampfile='er_samples.txt', sitefile='er_sites.txt', agefile='er_ages.txt', specout='er_specimens.txt', sampout='pmag_samples.txt', siteout='pmag_sites.txt', resout='pmag_results.txt', critout='pmag_criteria.txt', instout='magic_instruments.txt', plotsites=False, fmt='svg', dir_path='.', cors=[], priorities=['DA-AC-ARM', 'DA-AC-TRM'], coord='g', user='', vgps_level='site', do_site_intensity=True, DefaultAge=["none"], avg_directions_by_sample=False, avg_intensities_by_sample=False, avg_all_components=False, avg_by_polarity=False, skip_directions=False, skip_intensities=False, use_sample_latitude=False, use_paleolatitude=False, use_criteria='default'):
"""
Writes magic_instruments, er_specimens, pmag_samples, pmag_sites, pmag_criteria, and pmag_results. The data used to write this is obtained by reading a pmag_speciemns, a magic_measurements, a er_samples, a er_sites, a er_ages.
@param -> infile: path from the WD to the pmag speciemns table
@param -> measfile: path from the WD to the magic measurement file
@param -> sampfile: path from the WD to the er sample file
@param -> sitefile: path from the WD to the er sites data file
@param -> agefile: path from the WD to the er ages data file
@param -> specout: path from the WD to the place to write the er specimens data file
@param -> sampout: path from the WD to the place to write the pmag samples data file
@param -> siteout: path from the WD to the place to write the pmag sites data file
@param -> resout: path from the WD to the place to write the pmag results data file
@param -> critout: path from the WD to the place to write the pmag criteria file
@param -> instout: path from th WD to the place to write the magic instruments file
@param -> documentation incomplete if you know more about the purpose of the parameters in this function and it's side effects please extend and complete this string
"""
# initialize some variables
plotsites = False # cannot use draw_figs from within ipmag
Comps = [] # list of components
version_num = pmag.get_version()
args = sys.argv
model_lat_file = ""
Dcrit, Icrit, nocrit = 0, 0, 0
corrections = []
nocorrection = ['DA-NL', 'DA-AC', 'DA-CR']
# do some data adjustments
for cor in cors:
nocorrection.remove('DA-' + cor)
corrections.append('DA-' + cor)
for p in priorities:
if not p.startswith('DA-AC-'):
p = 'DA-AC-' + p
# translate coord into coords
if coord == 's':
coords = ['-1']
if coord == 'g':
coords = ['0']
if coord == 't':
coords = ['100']
if coord == 'b':
coords = ['0', '100']
if vgps_level == 'sample':
vgps = 1 # save sample level VGPS/VADMs
else:
vgps = 0 # site level
if do_site_intensity:
nositeints = 0
else:
nositeints = 1
# chagne these all to True/False instead of 1/0
if not skip_intensities:
# set model lat and
if use_sample_latitude and use_paleolatitude:
print("you should set a paleolatitude file OR use present day lat - not both")
return False
elif use_sample_latitude:
get_model_lat = 1
elif use_paleolatitude:
get_model_lat = 2
try:
model_lat_file = dir_path + '/' + args[ind + 1]
get_model_lat = 2
mlat = open(model_lat_file, 'r')
ModelLats = []
for line in mlat.readlines():
ModelLat = {}
tmp = line.split()
ModelLat["er_site_name"] = tmp[0]
ModelLat["site_model_lat"] = tmp[1]
ModelLat["er_sample_name"] = tmp[0]
ModelLat["sample_lat"] = tmp[1]
ModelLats.append(ModelLat)
mlat.clos()
except:
print("use_paleolatitude option requires a valid paleolatitude file")
else:
get_model_lat = 0 # skips VADM calculation entirely
if plotsites and not skip_directions: # plot by site - set up plot window
EQ = {}
EQ['eqarea'] = 1
# define figure 1 as equal area projection
pmagplotlib.plot_init(EQ['eqarea'], 5, 5)
# I don't know why this has to be here, but otherwise the first plot
# never plots...
pmagplotlib.plot_net(EQ['eqarea'])
pmagplotlib.draw_figs(EQ)
infile = os.path.join(dir_path, infile)
measfile = os.path.join(dir_path, measfile)
instout = os.path.join(dir_path, instout)
sampfile = os.path.join(dir_path, sampfile)
sitefile = os.path.join(dir_path, sitefile)
agefile = os.path.join(dir_path, agefile)
specout = os.path.join(dir_path, specout)
sampout = os.path.join(dir_path, sampout)
siteout = os.path.join(dir_path, siteout)
resout = os.path.join(dir_path, resout)
critout = os.path.join(dir_path, critout)
if use_criteria == 'none':
Dcrit, Icrit, nocrit = 1, 1, 1 # no selection criteria
crit_data = pmag.default_criteria(nocrit)
elif use_criteria == 'default':
crit_data = pmag.default_criteria(nocrit) # use default criteria
elif use_criteria == 'existing':
crit_data, file_type = pmag.magic_read(
critout) # use pmag_criteria file
print("Acceptance criteria read in from ", critout)
accept = {}
for critrec in crit_data:
for key in list(critrec.keys()):
# need to migrate specimen_dang to specimen_int_dang for intensity
# data using old format
if 'IE-SPEC' in list(critrec.keys()) and 'specimen_dang' in list(critrec.keys()) and 'specimen_int_dang' not in list(critrec.keys()):
critrec['specimen_int_dang'] = critrec['specimen_dang']
del critrec['specimen_dang']
# need to get rid of ron shaars sample_int_sigma_uT
if 'sample_int_sigma_uT' in list(critrec.keys()):
critrec['sample_int_sigma'] = '%10.3e' % (
eval(critrec['sample_int_sigma_uT']) * 1e-6)
if key not in list(accept.keys()) and critrec[key] != '':
accept[key] = critrec[key]
if use_criteria == 'default':
pmag.magic_write(critout, [accept], 'pmag_criteria')
print("\n Pmag Criteria stored in ", critout, '\n')
# now we're done slow dancing
# read in site data - has the lats and lons
SiteNFO, file_type = pmag.magic_read(sitefile)
# read in site data - has the lats and lons
SampNFO, file_type = pmag.magic_read(sampfile)
# find all the sites with height info.
height_nfo = pmag.get_dictitem(SiteNFO, 'site_height', '', 'F')
if agefile:
AgeNFO, file_type = pmag.magic_read(
agefile) # read in the age information
# read in specimen interpretations
Data, file_type = pmag.magic_read(infile)
# retrieve specimens with intensity data
IntData = pmag.get_dictitem(Data, 'specimen_int', '', 'F')
comment, orient = "", []
samples, sites = [], []
for rec in Data: # run through the data filling in missing keys and finding all components, coordinates available
# fill in missing fields, collect unique sample and site names
if 'er_sample_name' not in list(rec.keys()):
rec['er_sample_name'] = ""
elif rec['er_sample_name'] not in samples:
samples.append(rec['er_sample_name'])
if 'er_site_name' not in list(rec.keys()):
rec['er_site_name'] = ""
elif rec['er_site_name'] not in sites:
sites.append(rec['er_site_name'])
if 'specimen_int' not in list(rec.keys()):
rec['specimen_int'] = ''
if 'specimen_comp_name' not in list(rec.keys()) or rec['specimen_comp_name'] == "":
rec['specimen_comp_name'] = 'A'
if rec['specimen_comp_name'] not in Comps:
Comps.append(rec['specimen_comp_name'])
rec['specimen_tilt_correction'] = rec['specimen_tilt_correction'].strip(
'\n')
if "specimen_tilt_correction" not in list(rec.keys()):
rec["specimen_tilt_correction"] = "-1" # assume sample coordinates
if rec["specimen_tilt_correction"] not in orient:
# collect available coordinate systems
orient.append(rec["specimen_tilt_correction"])
if "specimen_direction_type" not in list(rec.keys()):
# assume direction is line - not plane
rec["specimen_direction_type"] = 'l'
if "specimen_dec" not in list(rec.keys()):
# if no declination, set direction type to blank
rec["specimen_direction_type"] = ''
if "specimen_n" not in list(rec.keys()):
rec["specimen_n"] = '' # put in n
if "specimen_alpha95" not in list(rec.keys()):
rec["specimen_alpha95"] = '' # put in alpha95
if "magic_method_codes" not in list(rec.keys()):
rec["magic_method_codes"] = ''
# start parsing data into SpecDirs, SpecPlanes, SpecInts
SpecInts, SpecDirs, SpecPlanes = [], [], []
samples.sort() # get sorted list of samples and sites
sites.sort()
if not skip_intensities: # don't skip intensities
# retrieve specimens with intensity data
IntData = pmag.get_dictitem(Data, 'specimen_int', '', 'F')
if nocrit == 0: # use selection criteria
for rec in IntData: # do selection criteria
kill = pmag.grade(rec, accept, 'specimen_int')
if len(kill) == 0:
# intensity record to be included in sample, site
# calculations
SpecInts.append(rec)
else:
SpecInts = IntData[:] # take everything - no selection criteria
# check for required data adjustments
if len(corrections) > 0 and len(SpecInts) > 0:
for cor in corrections:
# only take specimens with the required corrections
SpecInts = pmag.get_dictitem(
SpecInts, 'magic_method_codes', cor, 'has')
if len(nocorrection) > 0 and len(SpecInts) > 0:
for cor in nocorrection:
# exclude the corrections not specified for inclusion
SpecInts = pmag.get_dictitem(
SpecInts, 'magic_method_codes', cor, 'not')
# take top priority specimen of its name in remaining specimens (only one
# per customer)
PrioritySpecInts = []
specimens = pmag.get_specs(SpecInts) # get list of uniq specimen names
for spec in specimens:
# all the records for this specimen
ThisSpecRecs = pmag.get_dictitem(
SpecInts, 'er_specimen_name', spec, 'T')
if len(ThisSpecRecs) == 1:
PrioritySpecInts.append(ThisSpecRecs[0])
elif len(ThisSpecRecs) > 1: # more than one
prec = []
for p in priorities:
# all the records for this specimen
ThisSpecRecs = pmag.get_dictitem(
SpecInts, 'magic_method_codes', p, 'has')
if len(ThisSpecRecs) > 0:
prec.append(ThisSpecRecs[0])
PrioritySpecInts.append(prec[0]) # take the best one
SpecInts = PrioritySpecInts # this has the first specimen record
if not skip_directions: # don't skip directions
# retrieve specimens with directed lines and planes
AllDirs = pmag.get_dictitem(Data, 'specimen_direction_type', '', 'F')
# get all specimens with specimen_n information
Ns = pmag.get_dictitem(AllDirs, 'specimen_n', '', 'F')
if nocrit != 1: # use selection criteria
for rec in Ns: # look through everything with specimen_n for "good" data
kill = pmag.grade(rec, accept, 'specimen_dir')
if len(kill) == 0: # nothing killed it
SpecDirs.append(rec)
else: # no criteria
SpecDirs = AllDirs[:] # take them all
# SpecDirs is now the list of all specimen directions (lines and planes)
# that pass muster
# list of all sample data and list of those that pass the DE-SAMP criteria
PmagSamps, SampDirs = [], []
PmagSites, PmagResults = [], [] # list of all site data and selected results
SampInts = []
for samp in samples: # run through the sample names
if avg_directions_by_sample: # average by sample if desired
# get all the directional data for this sample
SampDir = pmag.get_dictitem(SpecDirs, 'er_sample_name', samp, 'T')
if len(SampDir) > 0: # there are some directions
for coord in coords: # step through desired coordinate systems
# get all the directions for this sample
CoordDir = pmag.get_dictitem(
SampDir, 'specimen_tilt_correction', coord, 'T')
if len(CoordDir) > 0: # there are some with this coordinate system
if not avg_all_components: # look component by component
for comp in Comps:
# get all directions from this component
CompDir = pmag.get_dictitem(
CoordDir, 'specimen_comp_name', comp, 'T')
if len(CompDir) > 0: # there are some
# get a sample average from all specimens
PmagSampRec = pmag.lnpbykey(
CompDir, 'sample', 'specimen')
# decorate the sample record
PmagSampRec["er_location_name"] = CompDir[0]['er_location_name']
PmagSampRec["er_site_name"] = CompDir[0]['er_site_name']
PmagSampRec["er_sample_name"] = samp
PmagSampRec["er_citation_names"] = "This study"
PmagSampRec["er_analyst_mail_names"] = user
PmagSampRec['magic_software_packages'] = version_num
if CompDir[0]['specimen_flag'] == 'g':
PmagSampRec['sample_flag'] = 'g'
else:
PmagSampRec['sample_flag'] = 'b'
if nocrit != 1:
PmagSampRec['pmag_criteria_codes'] = "ACCEPT"
if agefile != "":
PmagSampRec = pmag.get_age(
PmagSampRec, "er_site_name", "sample_inferred_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', PmagSampRec['er_site_name'], 'T')
if len(site_height) > 0:
# add in height if available
PmagSampRec["sample_height"] = site_height[0]['site_height']
PmagSampRec['sample_comp_name'] = comp
PmagSampRec['sample_tilt_correction'] = coord
PmagSampRec['er_specimen_names'] = pmag.get_list(
CompDir, 'er_specimen_name') # get a list of the specimen names used
PmagSampRec['magic_method_codes'] = pmag.get_list(
CompDir, 'magic_method_codes') # get a list of the methods used
if nocrit != 1: # apply selection criteria
kill = pmag.grade(
PmagSampRec, accept, 'sample_dir')
else:
kill = []
if len(kill) == 0:
SampDirs.append(PmagSampRec)
if vgps == 1: # if sample level VGP info desired, do that now
PmagResRec = pmag.getsampVGP(
PmagSampRec, SiteNFO)
if PmagResRec != "":
PmagResults.append(PmagResRec)
# print(PmagSampRec)
PmagSamps.append(PmagSampRec)
if avg_all_components: # average all components together basically same as above
PmagSampRec = pmag.lnpbykey(
CoordDir, 'sample', 'specimen')
PmagSampRec["er_location_name"] = CoordDir[0]['er_location_name']
PmagSampRec["er_site_name"] = CoordDir[0]['er_site_name']
PmagSampRec["er_sample_name"] = samp
PmagSampRec["er_citation_names"] = "This study"
PmagSampRec["er_analyst_mail_names"] = user
PmagSampRec['magic_software_packages'] = version_num
if all(i['specimen_flag'] == 'g' for i in CoordDir):
PmagSampRec['sample_flag'] = 'g'
else:
PmagSampRec['sample_flag'] = 'b'
if nocrit != 1:
PmagSampRec['pmag_criteria_codes'] = ""
if agefile != "":
PmagSampRec = pmag.get_age(
PmagSampRec, "er_site_name", "sample_inferred_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', site, 'T')
if len(site_height) > 0:
# add in height if available
PmagSampRec["sample_height"] = site_height[0]['site_height']
PmagSampRec['sample_tilt_correction'] = coord
PmagSampRec['sample_comp_name'] = pmag.get_list(
CoordDir, 'specimen_comp_name') # get components used
PmagSampRec['er_specimen_names'] = pmag.get_list(
CoordDir, 'er_specimen_name') # get specimne names averaged
PmagSampRec['magic_method_codes'] = pmag.get_list(
CoordDir, 'magic_method_codes') # assemble method codes
if nocrit != 1: # apply selection criteria
kill = pmag.grade(
PmagSampRec, accept, 'sample_dir')
if len(kill) == 0: # passes the mustard
SampDirs.append(PmagSampRec)
if vgps == 1:
PmagResRec = pmag.getsampVGP(
PmagSampRec, SiteNFO)
if PmagResRec != "":
PmagResults.append(PmagResRec)
else: # take everything
SampDirs.append(PmagSampRec)
if vgps == 1:
PmagResRec = pmag.getsampVGP(
PmagSampRec, SiteNFO)
if PmagResRec != "":
PmagResults.append(PmagResRec)
PmagSamps.append(PmagSampRec)
if avg_intensities_by_sample: # average by sample if desired
# get all the intensity data for this sample
SampI = pmag.get_dictitem(SpecInts, 'er_sample_name', samp, 'T')
if len(SampI) > 0: # there are some
# get average intensity stuff
PmagSampRec = pmag.average_int(SampI, 'specimen', 'sample')
# decorate sample record
PmagSampRec["sample_description"] = "sample intensity"
PmagSampRec["sample_direction_type"] = ""
PmagSampRec['er_site_name'] = SampI[0]["er_site_name"]
PmagSampRec['er_sample_name'] = samp
PmagSampRec['er_location_name'] = SampI[0]["er_location_name"]
PmagSampRec["er_citation_names"] = "This study"
PmagSampRec["er_analyst_mail_names"] = user
if agefile != "":
PmagSampRec = pmag.get_age(
PmagSampRec, "er_site_name", "sample_inferred_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', PmagSampRec['er_site_name'], 'T')
if len(site_height) > 0:
# add in height if available
PmagSampRec["sample_height"] = site_height[0]['site_height']
PmagSampRec['er_specimen_names'] = pmag.get_list(
SampI, 'er_specimen_name')
PmagSampRec['magic_method_codes'] = pmag.get_list(
SampI, 'magic_method_codes')
if nocrit != 1: # apply criteria!
kill = pmag.grade(PmagSampRec, accept, 'sample_int')
if len(kill) == 0:
PmagSampRec['pmag_criteria_codes'] = "ACCEPT"
SampInts.append(PmagSampRec)
PmagSamps.append(PmagSampRec)
else:
PmagSampRec = {} # sample rejected
else: # no criteria
SampInts.append(PmagSampRec)
PmagSamps.append(PmagSampRec)
PmagSampRec['pmag_criteria_codes'] = ""
if vgps == 1 and get_model_lat != 0 and PmagSampRec != {}:
if get_model_lat == 1: # use sample latitude
PmagResRec = pmag.getsampVDM(PmagSampRec, SampNFO)
# get rid of the model lat key
del(PmagResRec['model_lat'])
elif get_model_lat == 2: # use model latitude
PmagResRec = pmag.getsampVDM(PmagSampRec, ModelLats)
if PmagResRec != {}:
PmagResRec['magic_method_codes'] = PmagResRec['magic_method_codes'] + ":IE-MLAT"
if PmagResRec != {}:
PmagResRec['er_specimen_names'] = PmagSampRec['er_specimen_names']
PmagResRec['er_sample_names'] = PmagSampRec['er_sample_name']
PmagResRec['pmag_criteria_codes'] = 'ACCEPT'
PmagResRec['average_int_sigma_perc'] = PmagSampRec['sample_int_sigma_perc']
PmagResRec['average_int_sigma'] = PmagSampRec['sample_int_sigma']
PmagResRec['average_int_n'] = PmagSampRec['sample_int_n']
PmagResRec['vadm_n'] = PmagSampRec['sample_int_n']
PmagResRec['data_type'] = 'i'
PmagResults.append(PmagResRec)
if len(PmagSamps) > 0:
# fill in missing keys from different types of records
TmpSamps, keylist = pmag.fillkeys(PmagSamps)
# save in sample output file
pmag.magic_write(sampout, TmpSamps, 'pmag_samples')
print(' sample averages written to ', sampout)
#
# create site averages from specimens or samples as specified
#
for site in sites:
for coord in coords:
if not avg_directions_by_sample:
key, dirlist = 'specimen', SpecDirs # if specimen averages at site level desired
if avg_directions_by_sample:
key, dirlist = 'sample', SampDirs # if sample averages at site level desired
# get all the sites with directions
tmp = pmag.get_dictitem(dirlist, 'er_site_name', site, 'T')
# use only the last coordinate if avg_all_components==False
tmp1 = pmag.get_dictitem(tmp, key + '_tilt_correction', coord, 'T')
# fish out site information (lat/lon, etc.)
sd = pmag.get_dictitem(SiteNFO, 'er_site_name', site, 'T')
if len(sd) > 0:
sitedat = sd[0]
if not avg_all_components: # do component wise averaging
for comp in Comps:
# get all components comp
siteD = pmag.get_dictitem(
tmp1, key + '_comp_name', comp, 'T')
# remove bad data from means
quality_siteD = []
# remove any records for which specimen_flag or sample_flag are 'b'
# assume 'g' if flag is not provided
for rec in siteD:
spec_quality = rec.get('specimen_flag', 'g')
samp_quality = rec.get('sample_flag', 'g')
if (spec_quality == 'g') and (samp_quality == 'g'):
quality_siteD.append(rec)
siteD = quality_siteD
if len(siteD) > 0: # there are some for this site and component name
# get an average for this site
PmagSiteRec = pmag.lnpbykey(siteD, 'site', key)
# decorate the site record
PmagSiteRec['site_comp_name'] = comp
PmagSiteRec["er_location_name"] = siteD[0]['er_location_name']
PmagSiteRec["er_site_name"] = siteD[0]['er_site_name']
PmagSiteRec['site_tilt_correction'] = coord
PmagSiteRec['site_comp_name'] = pmag.get_list(
siteD, key + '_comp_name')
if avg_directions_by_sample:
PmagSiteRec['er_sample_names'] = pmag.get_list(
siteD, 'er_sample_name')
else:
PmagSiteRec['er_specimen_names'] = pmag.get_list(
siteD, 'er_specimen_name')
# determine the demagnetization code (DC3,4 or 5) for this site
AFnum = len(pmag.get_dictitem(
siteD, 'magic_method_codes', 'LP-DIR-AF', 'has'))
Tnum = len(pmag.get_dictitem(
siteD, 'magic_method_codes', 'LP-DIR-T', 'has'))
DC = 3
if AFnum > 0:
DC += 1
if Tnum > 0:
DC += 1
PmagSiteRec['magic_method_codes'] = pmag.get_list(
siteD, 'magic_method_codes') + ':' + 'LP-DC' + str(DC)
PmagSiteRec['magic_method_codes'].strip(":")
if plotsites:
print(PmagSiteRec['er_site_name'])
# plot and list the data
pmagplotlib.plot_site(
EQ['eqarea'], PmagSiteRec, siteD, key)
pmagplotlib.draw_figs(EQ)
PmagSites.append(PmagSiteRec)
else: # last component only
# get the last orientation system specified
siteD = tmp1[:]
if len(siteD) > 0: # there are some
# get the average for this site
PmagSiteRec = pmag.lnpbykey(siteD, 'site', key)
# decorate the record
PmagSiteRec["er_location_name"] = siteD[0]['er_location_name']
PmagSiteRec["er_site_name"] = siteD[0]['er_site_name']
PmagSiteRec['site_comp_name'] = comp
PmagSiteRec['site_tilt_correction'] = coord
PmagSiteRec['site_comp_name'] = pmag.get_list(
siteD, key + '_comp_name')
PmagSiteRec['er_specimen_names'] = pmag.get_list(
siteD, 'er_specimen_name')
PmagSiteRec['er_sample_names'] = pmag.get_list(
siteD, 'er_sample_name')
AFnum = len(pmag.get_dictitem(
siteD, 'magic_method_codes', 'LP-DIR-AF', 'has'))
Tnum = len(pmag.get_dictitem(
siteD, 'magic_method_codes', 'LP-DIR-T', 'has'))
DC = 3
if AFnum > 0:
DC += 1
if Tnum > 0:
DC += 1
PmagSiteRec['magic_method_codes'] = pmag.get_list(
siteD, 'magic_method_codes') + ':' + 'LP-DC' + str(DC)
PmagSiteRec['magic_method_codes'].strip(":")
if not avg_directions_by_sample:
PmagSiteRec['site_comp_name'] = pmag.get_list(
siteD, key + '_comp_name')
if plotsites:
pmagplotlib.plot_site(
EQ['eqarea'], PmagSiteRec, siteD, key)
pmagplotlib.draw_figs(EQ)
PmagSites.append(PmagSiteRec)
else:
print('site information not found in er_sites for site, ',
site, ' site will be skipped')
for PmagSiteRec in PmagSites: # now decorate each dictionary some more, and calculate VGPs etc. for results table
PmagSiteRec["er_citation_names"] = "This study"
PmagSiteRec["er_analyst_mail_names"] = user
PmagSiteRec['magic_software_packages'] = version_num
if agefile != "":
PmagSiteRec = pmag.get_age(
PmagSiteRec, "er_site_name", "site_inferred_", AgeNFO, DefaultAge)
PmagSiteRec['pmag_criteria_codes'] = 'ACCEPT'
if 'site_n_lines' in list(PmagSiteRec.keys()) and 'site_n_planes' in list(PmagSiteRec.keys()) and PmagSiteRec['site_n_lines'] != "" and PmagSiteRec['site_n_planes'] != "":
if int(PmagSiteRec["site_n_planes"]) > 0:
PmagSiteRec["magic_method_codes"] = PmagSiteRec['magic_method_codes'] + ":DE-FM-LP"
elif int(PmagSiteRec["site_n_lines"]) > 2:
PmagSiteRec["magic_method_codes"] = PmagSiteRec['magic_method_codes'] + ":DE-FM"
kill = pmag.grade(PmagSiteRec, accept, 'site_dir')
if len(kill) == 0:
PmagResRec = {} # set up dictionary for the pmag_results table entry
PmagResRec['data_type'] = 'i' # decorate it a bit
PmagResRec['magic_software_packages'] = version_num
PmagSiteRec['site_description'] = 'Site direction included in results table'
PmagResRec['pmag_criteria_codes'] = 'ACCEPT'
dec = float(PmagSiteRec["site_dec"])
inc = float(PmagSiteRec["site_inc"])
if 'site_alpha95' in list(PmagSiteRec.keys()) and PmagSiteRec['site_alpha95'] != "":
a95 = float(PmagSiteRec["site_alpha95"])
else:
a95 = 180.
sitedat = pmag.get_dictitem(SiteNFO, 'er_site_name', PmagSiteRec['er_site_name'], 'T')[
0] # fish out site information (lat/lon, etc.)
lat = float(sitedat['site_lat'])
lon = float(sitedat['site_lon'])
plon, plat, dp, dm = pmag.dia_vgp(
dec, inc, a95, lat, lon) # get the VGP for this site
if PmagSiteRec['site_tilt_correction'] == '-1':
C = ' (spec coord) '
if PmagSiteRec['site_tilt_correction'] == '0':
C = ' (geog. coord) '
if PmagSiteRec['site_tilt_correction'] == '100':
C = ' (strat. coord) '
PmagResRec["pmag_result_name"] = "VGP Site: " + \
PmagSiteRec["er_site_name"] # decorate some more
PmagResRec["result_description"] = "Site VGP, coord system = " + \
str(coord) + ' component: ' + comp
PmagResRec['er_site_names'] = PmagSiteRec['er_site_name']
PmagResRec['pmag_criteria_codes'] = 'ACCEPT'
PmagResRec['er_citation_names'] = 'This study'
PmagResRec['er_analyst_mail_names'] = user
PmagResRec["er_location_names"] = PmagSiteRec["er_location_name"]
if avg_directions_by_sample:
PmagResRec["er_sample_names"] = PmagSiteRec["er_sample_names"]
else:
PmagResRec["er_specimen_names"] = PmagSiteRec["er_specimen_names"]
PmagResRec["tilt_correction"] = PmagSiteRec['site_tilt_correction']
PmagResRec["pole_comp_name"] = PmagSiteRec['site_comp_name']
PmagResRec["average_dec"] = PmagSiteRec["site_dec"]
PmagResRec["average_inc"] = PmagSiteRec["site_inc"]
PmagResRec["average_alpha95"] = PmagSiteRec["site_alpha95"]
PmagResRec["average_n"] = PmagSiteRec["site_n"]
PmagResRec["average_n_lines"] = PmagSiteRec["site_n_lines"]
PmagResRec["average_n_planes"] = PmagSiteRec["site_n_planes"]
PmagResRec["vgp_n"] = PmagSiteRec["site_n"]
PmagResRec["average_k"] = PmagSiteRec["site_k"]
PmagResRec["average_r"] = PmagSiteRec["site_r"]
PmagResRec["average_lat"] = '%10.4f ' % (lat)
PmagResRec["average_lon"] = '%10.4f ' % (lon)
if agefile != "":
PmagResRec = pmag.get_age(
PmagResRec, "er_site_names", "average_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', site, 'T')
if len(site_height) > 0:
PmagResRec["average_height"] = site_height[0]['site_height']
PmagResRec["vgp_lat"] = '%7.1f ' % (plat)
PmagResRec["vgp_lon"] = '%7.1f ' % (plon)
PmagResRec["vgp_dp"] = '%7.1f ' % (dp)
PmagResRec["vgp_dm"] = '%7.1f ' % (dm)
PmagResRec["magic_method_codes"] = PmagSiteRec["magic_method_codes"]
if '0' in PmagSiteRec['site_tilt_correction'] and "DA-DIR-GEO" not in PmagSiteRec['magic_method_codes']:
PmagSiteRec['magic_method_codes'] = PmagSiteRec['magic_method_codes'] + ":DA-DIR-GEO"
if '100' in PmagSiteRec['site_tilt_correction'] and "DA-DIR-TILT" not in PmagSiteRec['magic_method_codes']:
PmagSiteRec['magic_method_codes'] = PmagSiteRec['magic_method_codes'] + ":DA-DIR-TILT"
PmagSiteRec['site_polarity'] = ""
if avg_by_polarity: # assign polarity based on angle of pole lat to spin axis - may want to re-think this sometime
angle = pmag.angle([0, 0], [0, (90 - plat)])
if angle <= 55.:
PmagSiteRec["site_polarity"] = 'n'
if angle > 55. and angle < 125.:
PmagSiteRec["site_polarity"] = 't'
if angle >= 125.:
PmagSiteRec["site_polarity"] = 'r'
PmagResults.append(PmagResRec)
if avg_by_polarity:
# find the tilt corrected data
crecs = pmag.get_dictitem(
PmagSites, 'site_tilt_correction', '100', 'T')
if len(crecs) < 2:
# if there aren't any, find the geographic corrected data
crecs = pmag.get_dictitem(
PmagSites, 'site_tilt_correction', '0', 'T')
if len(crecs) > 2: # if there are some,
comp = pmag.get_list(crecs, 'site_comp_name').split(':')[
0] # find the first component
# fish out all of the first component
crecs = pmag.get_dictitem(crecs, 'site_comp_name', comp, 'T')
precs = []
for rec in crecs:
precs.append({'dec': rec['site_dec'], 'inc': rec['site_inc'],
'name': rec['er_site_name'], 'loc': rec['er_location_name']})
# calculate average by polarity
polpars = pmag.fisher_by_pol(precs)
# hunt through all the modes (normal=A, reverse=B, all=ALL)
for mode in list(polpars.keys()):
PolRes = {}
PolRes['er_citation_names'] = 'This study'
PolRes["pmag_result_name"] = "Polarity Average: Polarity " + mode
PolRes["data_type"] = "a"
PolRes["average_dec"] = '%7.1f' % (polpars[mode]['dec'])
PolRes["average_inc"] = '%7.1f' % (polpars[mode]['inc'])
PolRes["average_n"] = '%i' % (polpars[mode]['n'])
PolRes["average_r"] = '%5.4f' % (polpars[mode]['r'])
PolRes["average_k"] = '%6.0f' % (polpars[mode]['k'])
PolRes["average_alpha95"] = '%7.1f' % (
polpars[mode]['alpha95'])
PolRes['er_site_names'] = polpars[mode]['sites']
PolRes['er_location_names'] = polpars[mode]['locs']
PolRes['magic_software_packages'] = version_num
PmagResults.append(PolRes)
if not skip_intensities and nositeints != 1:
for site in sites: # now do intensities for each site
if plotsites:
print(site)
if not avg_intensities_by_sample:
key, intlist = 'specimen', SpecInts # if using specimen level data
if avg_intensities_by_sample:
key, intlist = 'sample', PmagSamps # if using sample level data
# get all the intensities for this site
Ints = pmag.get_dictitem(intlist, 'er_site_name', site, 'T')
if len(Ints) > 0: # there are some
# get average intensity stuff for site table
PmagSiteRec = pmag.average_int(Ints, key, 'site')
# get average intensity stuff for results table
PmagResRec = pmag.average_int(Ints, key, 'average')
if plotsites: # if site by site examination requested - print this site out to the screen
for rec in Ints:
print(rec['er_' + key + '_name'], ' %7.1f' %
(1e6 * float(rec[key + '_int'])))
if len(Ints) > 1:
print('Average: ', '%7.1f' % (
1e6 * float(PmagResRec['average_int'])), 'N: ', len(Ints))
print('Sigma: ', '%7.1f' % (
1e6 * float(PmagResRec['average_int_sigma'])), 'Sigma %: ', PmagResRec['average_int_sigma_perc'])
input('Press any key to continue\n')
er_location_name = Ints[0]["er_location_name"]
# decorate the records
PmagSiteRec["er_location_name"] = er_location_name
PmagSiteRec["er_citation_names"] = "This study"
PmagResRec["er_location_names"] = er_location_name
PmagResRec["er_citation_names"] = "This study"
PmagSiteRec["er_analyst_mail_names"] = user
PmagResRec["er_analyst_mail_names"] = user
PmagResRec["data_type"] = 'i'
if not avg_intensities_by_sample:
PmagSiteRec['er_specimen_names'] = pmag.get_list(
Ints, 'er_specimen_name') # list of all specimens used
PmagResRec['er_specimen_names'] = pmag.get_list(
Ints, 'er_specimen_name')
PmagSiteRec['er_sample_names'] = pmag.get_list(
Ints, 'er_sample_name') # list of all samples used
PmagResRec['er_sample_names'] = pmag.get_list(
Ints, 'er_sample_name')
PmagSiteRec['er_site_name'] = site
PmagResRec['er_site_names'] = site
PmagSiteRec['magic_method_codes'] = pmag.get_list(
Ints, 'magic_method_codes')
PmagResRec['magic_method_codes'] = pmag.get_list(
Ints, 'magic_method_codes')
kill = pmag.grade(PmagSiteRec, accept, 'site_int')
if nocrit == 1 or len(kill) == 0:
b, sig = float(PmagResRec['average_int']), ""
if(PmagResRec['average_int_sigma']) != "":
sig = float(PmagResRec['average_int_sigma'])
# fish out site direction
sdir = pmag.get_dictitem(
PmagResults, 'er_site_names', site, 'T')
# get the VDM for this record using last average
# inclination (hope it is the right one!)
if len(sdir) > 0 and sdir[-1]['average_inc'] != "":
inc = float(sdir[0]['average_inc'])
# get magnetic latitude using dipole formula
mlat = pmag.magnetic_lat(inc)
# get VDM with magnetic latitude
PmagResRec["vdm"] = '%8.3e ' % (pmag.b_vdm(b, mlat))
PmagResRec["vdm_n"] = PmagResRec['average_int_n']
if 'average_int_sigma' in list(PmagResRec.keys()) and PmagResRec['average_int_sigma'] != "":
vdm_sig = pmag.b_vdm(
float(PmagResRec['average_int_sigma']), mlat)
PmagResRec["vdm_sigma"] = '%8.3e ' % (vdm_sig)
else:
PmagResRec["vdm_sigma"] = ""
mlat = "" # define a model latitude
if get_model_lat == 1: # use present site latitude
mlats = pmag.get_dictitem(
SiteNFO, 'er_site_name', site, 'T')
if len(mlats) > 0:
mlat = mlats[0]['site_lat']
# use a model latitude from some plate reconstruction model
# (or something)
elif get_model_lat == 2:
mlats = pmag.get_dictitem(
ModelLats, 'er_site_name', site, 'T')
if len(mlats) > 0:
PmagResRec['model_lat'] = mlats[0]['site_model_lat']
mlat = PmagResRec['model_lat']
if mlat != "":
# get the VADM using the desired latitude
PmagResRec["vadm"] = '%8.3e ' % (
pmag.b_vdm(b, float(mlat)))
if sig != "":
vdm_sig = pmag.b_vdm(
float(PmagResRec['average_int_sigma']), float(mlat))
PmagResRec["vadm_sigma"] = '%8.3e ' % (vdm_sig)
PmagResRec["vadm_n"] = PmagResRec['average_int_n']
else:
PmagResRec["vadm_sigma"] = ""
# fish out site information (lat/lon, etc.)
sitedat = pmag.get_dictitem(
SiteNFO, 'er_site_name', PmagSiteRec['er_site_name'], 'T')
if len(sitedat) > 0:
sitedat = sitedat[0]
PmagResRec['average_lat'] = sitedat['site_lat']
PmagResRec['average_lon'] = sitedat['site_lon']
else:
PmagResRec['average_lon'] = 'UNKNOWN'
PmagResRec['average_lon'] = 'UNKNOWN'
PmagResRec['magic_software_packages'] = version_num
PmagResRec["pmag_result_name"] = "V[A]DM: Site " + site
PmagResRec["result_description"] = "V[A]DM of site"
PmagResRec["pmag_criteria_codes"] = "ACCEPT"
if agefile != "":
PmagResRec = pmag.get_age(
PmagResRec, "er_site_names", "average_", AgeNFO, DefaultAge)
site_height = pmag.get_dictitem(
height_nfo, 'er_site_name', site, 'T')
if len(site_height) > 0:
PmagResRec["average_height"] = site_height[0]['site_height']
PmagSites.append(PmagSiteRec)
PmagResults.append(PmagResRec)
if len(PmagSites) > 0:
Tmp, keylist = pmag.fillkeys(PmagSites)
pmag.magic_write(siteout, Tmp, 'pmag_sites')
print(' sites written to ', siteout)
else:
print("No Site level table")
if len(PmagResults) > 0:
TmpRes, keylist = pmag.fillkeys(PmagResults)
pmag.magic_write(resout, TmpRes, 'pmag_results')
print(' results written to ', resout)
else:
print("No Results level table") | Writes magic_instruments, er_specimens, pmag_samples, pmag_sites, pmag_criteria, and pmag_results. The data used to write this is obtained by reading a pmag_speciemns, a magic_measurements, a er_samples, a er_sites, a er_ages.
@param -> infile: path from the WD to the pmag speciemns table
@param -> measfile: path from the WD to the magic measurement file
@param -> sampfile: path from the WD to the er sample file
@param -> sitefile: path from the WD to the er sites data file
@param -> agefile: path from the WD to the er ages data file
@param -> specout: path from the WD to the place to write the er specimens data file
@param -> sampout: path from the WD to the place to write the pmag samples data file
@param -> siteout: path from the WD to the place to write the pmag sites data file
@param -> resout: path from the WD to the place to write the pmag results data file
@param -> critout: path from the WD to the place to write the pmag criteria file
@param -> instout: path from th WD to the place to write the magic instruments file
@param -> documentation incomplete if you know more about the purpose of the parameters in this function and it's side effects please extend and complete this string | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L4676-L5466 |
PmagPy/PmagPy | pmagpy/ipmag.py | orientation_magic | def orientation_magic(or_con=1, dec_correction_con=1, dec_correction=0, bed_correction=True,
samp_con='1', hours_from_gmt=0, method_codes='', average_bedding=False,
orient_file='orient.txt', samp_file='samples.txt', site_file='sites.txt',
output_dir_path='.', input_dir_path='', append=False, data_model=3):
"""
use this function to convert tab delimited field notebook information to MagIC formatted tables (er_samples and er_sites)
INPUT FORMAT
Input files must be tab delimited and have in the first line:
tab location_name
Note: The "location_name" will facilitate searching in the MagIC database. Data from different
"locations" should be put in separate files. The definition of a "location" is rather loose.
Also this is the word 'tab' not a tab, which will be indicated by '\t'.
The second line has the names of the columns (tab delimited), e.g.:
site_name sample_name mag_azimuth field_dip date lat long sample_lithology sample_type sample_class shadow_angle hhmm stratigraphic_height bedding_dip_direction bedding_dip GPS_baseline image_name image_look image_photographer participants method_codes site_description sample_description GPS_Az, sample_igsn, sample_texture, sample_cooling_rate, cooling_rate_corr, cooling_rate_mcd
Notes:
1) column order doesn't matter but the NAMES do.
2) sample_name, sample_lithology, sample_type, sample_class, lat and long are required. all others are optional.
3) If subsequent data are the same (e.g., date, bedding orientation, participants, stratigraphic_height),
you can leave the field blank and the program will fill in the last recorded information. BUT if you really want a blank stratigraphic_height, enter a '-1'. These will not be inherited and must be specified for each entry: image_name, look, photographer or method_codes
4) hhmm must be in the format: hh:mm and the hh must be in 24 hour time.
date must be mm/dd/yy (years < 50 will be converted to 20yy and >50 will be assumed 19yy). hours_from_gmt is the number of hours to SUBTRACT from hh to get to GMT.
5) image_name, image_look and image_photographer are colon delimited lists of file name (e.g., IMG_001.jpg) image look direction and the name of the photographer respectively. If all images had same look and photographer, just enter info once. The images will be assigned to the site for which they were taken - not at the sample level.
6) participants: Names of who helped take the samples. These must be a colon delimited list.
7) method_codes: Special method codes on a sample level, e.g., SO-GT5 which means the orientation is has an uncertainty of >5 degrees
for example if it broke off before orienting....
8) GPS_Az is the place to put directly determined GPS Azimuths, using, e.g., points along the drill direction.
9) sample_cooling_rate is the cooling rate in K per Ma
10) int_corr_cooling_rate
11) cooling_rate_mcd: data adjustment method code for cooling rate correction; DA-CR-EG is educated guess; DA-CR-PS is percent estimated from pilot samples; DA-CR-TRM is comparison between 2 TRMs acquired with slow and rapid cooling rates.
is the percent cooling rate factor to apply to specimens from this sample, DA-CR-XX is the method code
defaults:
orientation_magic(or_con=1, dec_correction_con=1, dec_correction=0, bed_correction=True, samp_con='1', hours_from_gmt=0, method_codes='', average_bedding=False, orient_file='orient.txt', samp_file='er_samples.txt', site_file='er_sites.txt', output_dir_path='.', input_dir_path='', append=False):
orientation conventions:
[1] Standard Pomeroy convention of azimuth and hade (degrees from vertical down)
of the drill direction (field arrow). lab arrow azimuth= sample_azimuth = mag_azimuth;
lab arrow dip = sample_dip =-field_dip. i.e. the lab arrow dip is minus the hade.
[2] Field arrow is the strike of the plane orthogonal to the drill direction,
Field dip is the hade of the drill direction. Lab arrow azimuth = mag_azimuth-90
Lab arrow dip = -field_dip
[3] Lab arrow is the same as the drill direction;
hade was measured in the field.
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
[4] lab azimuth and dip are same as mag_azimuth, field_dip : use this for unoriented samples too
[5] Same as AZDIP convention explained below -
azimuth and inclination of the drill direction are mag_azimuth and field_dip;
lab arrow is as in [1] above.
lab azimuth is same as mag_azimuth,lab arrow dip=field_dip-90
[6] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = 90-field_dip
[7] see http://earthref.org/PmagPy/cookbook/#field_info for more information. You can customize other format yourself, or email [email protected] for help.
Magnetic declination convention:
[1] Use the IGRF value at the lat/long and date supplied [default]
[2] Will supply declination correction
[3] mag_az is already corrected in file
[4] Correct mag_az but not bedding_dip_dir
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail [email protected] for help.
"""
# initialize some variables
# bed_correction used to be BedCorr
# dec_correction_con used to be corr
# dec_correction used to be DecCorr
# meths is now method_codes
# delta_u is now hours_from_gmt
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, output_dir_path)
or_con, dec_correction_con, dec_correction = int(
or_con), int(dec_correction_con), float(dec_correction)
hours_from_gmt = float(hours_from_gmt)
stratpos = ""
# date of sampling, latitude (pos North), longitude (pos East)
date, lat, lon = "", "", ""
bed_dip, bed_dip_dir = "", ""
Lats, Lons = [], [] # list of latitudes and longitudes
# lists of Sample records and Site records
SampOuts, SiteOuts, ImageOuts = [], [], []
samplelist, sitelist, imagelist = [], [], []
Z = 1
newbaseline, newbeddir, newbeddip = "", "", ""
fpars = []
sclass, lithology, sample_type = "", "", ""
newclass, newlith, newtype = '', '', ''
BPs = [] # bedding pole declinations, bedding pole inclinations
image_file = "er_images.txt"
#
# use 3.0. default filenames when in 3.0.
# but, still allow for custom names
data_model = int(data_model)
if data_model == 3:
if samp_file == "er_samples.txt":
samp_file = "samples.txt"
if site_file == "er_sites.txt":
site_file = "sites.txt"
image_file = "images.txt"
orient_file = pmag.resolve_file_name(orient_file, input_dir_path)
if not os.path.exists(orient_file):
return False, "No such file: {}. If the orientation file is not in your current working directory, make sure you have specified the correct input directory.".format(orient_file)
samp_file = os.path.join(output_dir_path, samp_file)
site_file = os.path.join(output_dir_path, site_file)
image_file = os.path.join(output_dir_path, image_file)
# validate input
if '4' in samp_con[0]:
pattern = re.compile('[4][-]\d')
result = pattern.match(samp_con)
if not result:
raise Exception(
"If using sample naming convention 4, you must provide the number of characters with which to distinguish sample from site. [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX)")
if '7' in samp_con[0]:
pattern = re.compile('[7][-]\d')
result = pattern.match(samp_con)
if not result:
raise Exception(
"If using sample naming convention 7, you must provide the number of characters with which to distinguish sample from site. [7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY")
if dec_correction_con == 2 and not dec_correction:
raise Exception(
"If using magnetic declination convention 2, you must also provide a declincation correction in degrees")
SampRecs, SiteRecs, ImageRecs = [], [], []
SampRecs_sorted, SiteRecs_sorted = {}, {}
if append:
try:
SampRecs, file_type = pmag.magic_read(samp_file)
# convert 3.0. sample file to 2.5 format
if data_model == 3:
SampRecs3 = SampRecs
SampRecs = []
for samp_rec in SampRecs3:
rec = map_magic.mapping(
samp_rec, map_magic.samp_magic3_2_magic2_map)
SampRecs.append(rec)
# magic_data dictionary sorted by sample_name
SampRecs_sorted = pmag.sort_magic_data(SampRecs, 'er_sample_name')
print('sample data to be appended to: ', samp_file)
except Exception as ex:
print(ex)
print('problem with existing file: ',
samp_file, ' will create new.')
try:
SiteRecs, file_type = pmag.magic_read(site_file)
# convert 3.0. site file to 2.5 format
if data_model == 3:
SiteRecs3 = SiteRecs
SiteRecs = []
for site_rec in SiteRecs3:
SiteRecs.append(map_magic.mapping(
site_rec, map_magic.site_magic3_2_magic2_map))
# magic_data dictionary sorted by site_name
SiteRecs_sorted = pmag.sort_magic_data(SiteRecs, 'er_site_name')
print('site data to be appended to: ', site_file)
except Exception as ex:
print(ex)
print('problem with existing file: ',
site_file, ' will create new.')
try:
ImageRecs, file_type = pmag.magic_read(image_file)
# convert from 3.0. --> 2.5
if data_model == 3:
ImageRecs3 = ImageRecs
ImageRecs = []
for image_rec in ImageRecs3:
ImageRecs.append(map_magic.mapping(
image_rec, map_magic.image_magic3_2_magic2_map))
print('image data to be appended to: ', image_file)
except:
print('problem with existing file: ',
image_file, ' will create new.')
#
# read in file to convert
#
OrData, location_name = pmag.magic_read(orient_file)
if location_name == "demag_orient":
location_name = ""
#
# step through the data sample by sample
#
# use map_magic in here...
for OrRec in OrData:
if 'mag_azimuth' not in list(OrRec.keys()):
OrRec['mag_azimuth'] = ""
if 'field_dip' not in list(OrRec.keys()):
OrRec['field_dip'] = ""
if OrRec['mag_azimuth'] == " ":
OrRec["mag_azimuth"] = ""
if OrRec['field_dip'] == " ":
OrRec["field_dip"] = ""
if 'sample_description' in list(OrRec.keys()):
sample_description = OrRec['sample_description']
else:
sample_description = ""
if 'cooling_rate_corr' in list(OrRec.keys()):
if 'cooling_rate_mcd' not in list(OrRec.keys()):
OrRec['cooling_rate_mcd'] = 'DA-CR'
sample_orientation_flag = 'g'
if 'sample_orientation_flag' in list(OrRec.keys()):
if OrRec['sample_orientation_flag'] == 'b' or OrRec["mag_azimuth"] == "":
sample_orientation_flag = 'b'
methcodes = method_codes # initialize method codes
if methcodes:
if 'method_codes' in list(OrRec.keys()) and OrRec['method_codes'].strip() != "":
methcodes = methcodes + ":" + \
OrRec['method_codes'] # add notes
else:
if 'method_codes' in list(OrRec.keys()) and OrRec['method_codes'].strip() != "":
methcodes = OrRec['method_codes'] # add notes
codes = methcodes.replace(" ", "").split(":")
sample_name = OrRec["sample_name"]
# patch added by rshaar 7/2016
# if sample_name already exists in er_samples.txt:
# merge the new data colmuns calculated by orientation_magic with the existing data colmuns
# this is done to make sure no previous data in er_samples.txt and
# er_sites.txt is lost.
if sample_name in list(SampRecs_sorted.keys()):
Prev_MagRec = SampRecs_sorted[sample_name][-1]
MagRec = Prev_MagRec
else:
Prev_MagRec = {}
MagRec = {}
MagRec["er_citation_names"] = "This study"
# the following keys were calculated or defined in the code above:
for key in ['sample_igsn', 'sample_texture', 'sample_cooling_rate',
'cooling_rate_corr', 'cooling_rate_mcd']:
val = OrRec.get(key, '')
if val:
MagRec[key] = val
elif key in list(Prev_MagRec.keys()):
MagRec[key] = Prev_MagRec[key]
else:
MagRec[key] = ""
if location_name != "":
MagRec["er_location_name"] = location_name
elif "er_location_name" in list(Prev_MagRec.keys()):
MagRec["er_location_name"] = Prev_MagRec["er_location_name"]
else:
MagRec["er_location_name"] = ""
# the following keys are taken directly from OrRec dictionary:
for key in ["sample_height", "er_sample_alternatives", "sample_orientation_flag"]:
if key in list(OrRec.keys()) and OrRec[key] != "":
MagRec[key] = OrRec[key]
elif key in list(Prev_MagRec.keys()):
MagRec[key] = Prev_MagRec[key]
else:
MagRec[key] = ""
# the following keys, if blank, used to be defined here as "Not Specified" :
for key in ["sample_class", "sample_lithology", "sample_type"]:
if key in list(OrRec.keys()) and OrRec[key] != "" and OrRec[key] != "Not Specified":
MagRec[key] = OrRec[key]
elif key in list(Prev_MagRec.keys()) and Prev_MagRec[key] != "" and Prev_MagRec[key] != "Not Specified":
MagRec[key] = Prev_MagRec[key]
else:
MagRec[key] = "" # "Not Specified"
# (rshaar) From here parse new information and replace previous, if exists:
#
# parse information common to all orientation methods
#
MagRec["er_sample_name"] = OrRec["sample_name"]
if "IGSN" in list(OrRec.keys()):
MagRec["sample_igsn"] = OrRec["IGSN"]
else:
MagRec["sample_igsn"] = ""
# MagRec["sample_height"],MagRec["sample_bed_dip_direction"],MagRec["sample_bed_dip"]="","",""
MagRec["sample_bed_dip_direction"], MagRec["sample_bed_dip"] = "", ""
# if "er_sample_alternatives" in OrRec.keys():
# MagRec["er_sample_alternatives"]=OrRec["sample_alternatives"]
sample = OrRec["sample_name"]
if OrRec['mag_azimuth'] == "" and OrRec['field_dip'] != "":
OrRec['mag_azimuth'] = '999'
if OrRec["mag_azimuth"] != "":
labaz, labdip = pmag.orient(
float(OrRec["mag_azimuth"]), float(OrRec["field_dip"]), or_con)
if labaz < 0:
labaz += 360.
else:
labaz, labdip = "", ""
if OrRec['mag_azimuth'] == '999':
labaz = ""
if "GPS_baseline" in list(OrRec.keys()) and OrRec['GPS_baseline'] != "":
newbaseline = OrRec["GPS_baseline"]
if newbaseline != "":
baseline = float(newbaseline)
MagRec['er_scientist_mail_names'] = OrRec.get('participants', '')
newlat = OrRec["lat"]
if newlat != "":
lat = float(newlat)
if lat == "":
print("No latitude specified for ! ", sample,
". Latitude is required for all samples.")
return False, "No latitude specified for ! " + sample + ". Latitude is required for all samples."
MagRec["sample_lat"] = '%11.5f' % (lat)
newlon = OrRec["long"]
if newlon != "":
lon = float(newlon)
if lon == "":
print("No longitude specified for ! ", sample,
". Longitude is required for all samples.")
return False, str("No longitude specified for ! " + sample + ". Longitude is required for all samples.")
MagRec["sample_lon"] = '%11.5f' % (lon)
if 'bedding_dip_direction' in list(OrRec.keys()):
newbeddir = OrRec["bedding_dip_direction"]
if newbeddir != "":
bed_dip_dir = OrRec['bedding_dip_direction']
if 'bedding_dip' in list(OrRec.keys()):
newbeddip = OrRec["bedding_dip"]
if newbeddip != "":
bed_dip = OrRec['bedding_dip']
MagRec["sample_bed_dip"] = bed_dip
MagRec["sample_bed_dip_direction"] = bed_dip_dir
# MagRec["sample_type"]=sample_type
if labdip != "":
MagRec["sample_dip"] = '%7.1f' % labdip
else:
MagRec["sample_dip"] = ""
if "date" in list(OrRec.keys()) and OrRec["date"] != "":
newdate = OrRec["date"]
if newdate != "":
date = newdate
mmddyy = date.split('/')
yy = int(mmddyy[2])
if yy > 50:
yy = 1900 + yy
else:
yy = 2000 + yy
decimal_year = yy + old_div(float(mmddyy[0]), 12)
sample_date = '%i:%s:%s' % (yy, mmddyy[0], mmddyy[1])
time = OrRec['hhmm']
if time:
sample_date += (':' + time)
MagRec["sample_date"] = sample_date.strip(':')
if labaz != "":
MagRec["sample_azimuth"] = '%7.1f' % (labaz)
else:
MagRec["sample_azimuth"] = ""
if "stratigraphic_height" in list(OrRec.keys()):
if OrRec["stratigraphic_height"] != "":
MagRec["sample_height"] = OrRec["stratigraphic_height"]
stratpos = OrRec["stratigraphic_height"]
elif OrRec["stratigraphic_height"] == '-1':
MagRec["sample_height"] = "" # make empty
elif stratpos != "":
# keep last record if blank
MagRec["sample_height"] = stratpos
#
# get magnetic declination (corrected with igrf value)
if dec_correction_con == 1 and MagRec['sample_azimuth'] != "":
x, y, z, f = pmag.doigrf(lon, lat, 0, decimal_year)
Dir = pmag.cart2dir((x, y, z))
dec_correction = Dir[0]
if "bedding_dip" in list(OrRec.keys()):
if OrRec["bedding_dip"] != "":
MagRec["sample_bed_dip"] = OrRec["bedding_dip"]
bed_dip = OrRec["bedding_dip"]
else:
MagRec["sample_bed_dip"] = bed_dip
else:
MagRec["sample_bed_dip"] = '0'
if "bedding_dip_direction" in list(OrRec.keys()):
if OrRec["bedding_dip_direction"] != "" and bed_correction == 1:
dd = float(OrRec["bedding_dip_direction"]) + dec_correction
if dd > 360.:
dd = dd - 360.
MagRec["sample_bed_dip_direction"] = '%7.1f' % (dd)
dip_dir = MagRec["sample_bed_dip_direction"]
else:
MagRec["sample_bed_dip_direction"] = OrRec['bedding_dip_direction']
else:
MagRec["sample_bed_dip_direction"] = '0'
if average_bedding:
if str(MagRec["sample_bed_dip_direction"]) and str(MagRec["sample_bed_dip"]):
BPs.append([float(MagRec["sample_bed_dip_direction"]),
float(MagRec["sample_bed_dip"]) - 90., 1.])
if MagRec['sample_azimuth'] == "" and MagRec['sample_dip'] == "":
MagRec["sample_declination_correction"] = ''
methcodes = methcodes + ':SO-NO'
MagRec["magic_method_codes"] = methcodes
MagRec['sample_description'] = sample_description
#
# work on the site stuff too
if 'site_name' in list(OrRec.keys()) and OrRec['site_name'] != "":
site = OrRec['site_name']
elif 'site_name' in list(Prev_MagRec.keys()) and Prev_MagRec['site_name'] != "":
site = Prev_MagRec['site_name']
else:
# parse out the site name
site = pmag.parse_site(OrRec["sample_name"], samp_con, Z)
MagRec["er_site_name"] = site
site_description = "" # overwrite any prior description
if 'site_description' in list(OrRec.keys()) and OrRec['site_description'] != "":
site_description = OrRec['site_description'].replace(",", ";")
if "image_name" in list(OrRec.keys()):
images = OrRec["image_name"].split(":")
if "image_look" in list(OrRec.keys()):
looks = OrRec['image_look'].split(":")
else:
looks = []
if "image_photographer" in list(OrRec.keys()):
photographers = OrRec['image_photographer'].split(":")
else:
photographers = []
for image in images:
if image != "" and image not in imagelist:
imagelist.append(image)
ImageRec = {}
ImageRec['er_image_name'] = image
ImageRec['image_type'] = "outcrop"
ImageRec['image_date'] = sample_date
ImageRec['er_citation_names'] = "This study"
ImageRec['er_location_name'] = location_name
ImageRec['er_site_name'] = MagRec['er_site_name']
k = images.index(image)
if len(looks) > k:
ImageRec['er_image_description'] = "Look direction: " + looks[k]
elif len(looks) >= 1:
ImageRec['er_image_description'] = "Look direction: " + looks[-1]
else:
ImageRec['er_image_description'] = "Look direction: unknown"
if len(photographers) > k:
ImageRec['er_photographer_mail_names'] = photographers[k]
elif len(photographers) >= 1:
ImageRec['er_photographer_mail_names'] = photographers[-1]
else:
ImageRec['er_photographer_mail_names'] = "unknown"
ImageOuts.append(ImageRec)
if site not in sitelist:
sitelist.append(site) # collect unique site names
# patch added by rshaar 7/2016
# if sample_name already exists in er_samples.txt:
# merge the new data colmuns calculated by orientation_magic with the existing data colmuns
# this is done to make sure no previous data in er_samples.txt and
# er_sites.txt is lost.
if site in list(SiteRecs_sorted.keys()):
Prev_MagRec = SiteRecs_sorted[site][-1]
SiteRec = Prev_MagRec
else:
Prev_MagRec = {}
SiteRec = {}
SiteRec["er_citation_names"] = "This study"
SiteRec["er_site_name"] = site
SiteRec["site_definition"] = "s"
if "er_location_name" in SiteRec and SiteRec.get("er_location_name"):
pass
elif key in list(Prev_MagRec.keys()) and Prev_MagRec[key] != "":
SiteRec[key] = Prev_MagRec[key]
else:
print('setting location name to ""')
SiteRec[key] = ""
for key in ["lat", "lon", "height"]:
if "site_" + key in list(Prev_MagRec.keys()) and Prev_MagRec["site_" + key] != "":
SiteRec["site_" + key] = Prev_MagRec["site_" + key]
else:
SiteRec["site_" + key] = MagRec["sample_" + key]
# SiteRec["site_lat"]=MagRec["sample_lat"]
# SiteRec["site_lon"]=MagRec["sample_lon"]
# SiteRec["site_height"]=MagRec["sample_height"]
for key in ["class", "lithology", "type"]:
if "site_" + key in list(Prev_MagRec.keys()) and Prev_MagRec["site_" + key] != "Not Specified":
SiteRec["site_" + key] = Prev_MagRec["site_" + key]
else:
SiteRec["site_" + key] = MagRec["sample_" + key]
# SiteRec["site_class"]=MagRec["sample_class"]
# SiteRec["site_lithology"]=MagRec["sample_lithology"]
# SiteRec["site_type"]=MagRec["sample_type"]
if site_description != "": # overwrite only if site_description has something
SiteRec["site_description"] = site_description
SiteOuts.append(SiteRec)
if sample not in samplelist:
samplelist.append(sample)
if MagRec['sample_azimuth'] != "": # assume magnetic compass only
MagRec['magic_method_codes'] = MagRec['magic_method_codes'] + ':SO-MAG'
MagRec['magic_method_codes'] = MagRec['magic_method_codes'].strip(
":")
SampOuts.append(MagRec)
if MagRec['sample_azimuth'] != "" and dec_correction_con != 3:
az = labaz + dec_correction
if az > 360.:
az = az - 360.
CMDRec = {}
for key in list(MagRec.keys()):
CMDRec[key] = MagRec[key] # make a copy of MagRec
CMDRec["sample_azimuth"] = '%7.1f' % (az)
CMDRec["magic_method_codes"] = methcodes + ':SO-CMD-NORTH'
CMDRec["magic_method_codes"] = CMDRec['magic_method_codes'].strip(
':')
CMDRec["sample_declination_correction"] = '%7.1f' % (
dec_correction)
if dec_correction_con == 1:
CMDRec['sample_description'] = sample_description + \
':Declination correction calculated from IGRF'
else:
CMDRec['sample_description'] = sample_description + \
':Declination correction supplied by user'
CMDRec["sample_description"] = CMDRec['sample_description'].strip(
':')
SampOuts.append(CMDRec)
if "mag_az_bs" in list(OrRec.keys()) and OrRec["mag_az_bs"] != "" and OrRec["mag_az_bs"] != " ":
SRec = {}
for key in list(MagRec.keys()):
SRec[key] = MagRec[key] # make a copy of MagRec
labaz = float(OrRec["mag_az_bs"])
az = labaz + dec_correction
if az > 360.:
az = az - 360.
SRec["sample_azimuth"] = '%7.1f' % (az)
SRec["sample_declination_correction"] = '%7.1f' % (
dec_correction)
SRec["magic_method_codes"] = methcodes + \
':SO-SIGHT-BACK:SO-CMD-NORTH'
SampOuts.append(SRec)
#
# check for suncompass data
#
# there are sun compass data
if "shadow_angle" in list(OrRec.keys()) and OrRec["shadow_angle"] != "":
if hours_from_gmt == "":
#hours_from_gmt=raw_input("Enter hours to subtract from time for GMT: [0] ")
hours_from_gmt = 0
SunRec, sundata = {}, {}
shad_az = float(OrRec["shadow_angle"])
if not OrRec["hhmm"]:
print('If using the column shadow_angle for sun compass data, you must also provide the time for each sample. Sample ',
sample, ' has shadow_angle but is missing the "hh:mm" column.')
else: # calculate sun declination
sundata["date"] = '%i:%s:%s:%s' % (
yy, mmddyy[0], mmddyy[1], OrRec["hhmm"])
sundata["delta_u"] = hours_from_gmt
sundata["lon"] = lon # do not truncate!
sundata["lat"] = lat # do not truncate!
sundata["shadow_angle"] = OrRec["shadow_angle"]
# now you can truncate
sundec = '%7.1f' % (pmag.dosundec(sundata))
for key in list(MagRec.keys()):
SunRec[key] = MagRec[key] # make a copy of MagRec
SunRec["sample_azimuth"] = sundec # do not truncate!
SunRec["sample_declination_correction"] = ''
SunRec["magic_method_codes"] = methcodes + ':SO-SUN'
SunRec["magic_method_codes"] = SunRec['magic_method_codes'].strip(
':')
SampOuts.append(SunRec)
#
# check for differential GPS data
#
# there are diff GPS data
if "prism_angle" in list(OrRec.keys()) and OrRec["prism_angle"] != "":
GPSRec = {}
for key in list(MagRec.keys()):
GPSRec[key] = MagRec[key] # make a copy of MagRec
prism_angle = float(OrRec["prism_angle"])
sundata["shadow_angle"] = OrRec["shadow_angle"]
sundec = pmag.dosundec(sundata)
for key in list(MagRec.keys()):
SunRec[key] = MagRec[key] # make a copy of MagRec
SunRec["sample_azimuth"] = '%7.1f' % (sundec)
SunRec["sample_declination_correction"] = ''
SunRec["magic_method_codes"] = methcodes + ':SO-SUN'
SunRec["magic_method_codes"] = SunRec['magic_method_codes'].strip(
':')
SampOuts.append(SunRec)
#
# check for differential GPS data
#
# there are diff GPS data
if "prism_angle" in list(OrRec.keys()) and OrRec["prism_angle"] != "":
GPSRec = {}
for key in list(MagRec.keys()):
GPSRec[key] = MagRec[key] # make a copy of MagRec
prism_angle = float(OrRec["prism_angle"])
laser_angle = float(OrRec["laser_angle"])
if OrRec["GPS_baseline"] != "":
baseline = float(OrRec["GPS_baseline"]) # new baseline
gps_dec = baseline + laser_angle + prism_angle - 90.
while gps_dec > 360.:
gps_dec = gps_dec - 360.
while gps_dec < 0:
gps_dec = gps_dec + 360.
for key in list(MagRec.keys()):
GPSRec[key] = MagRec[key] # make a copy of MagRec
GPSRec["sample_azimuth"] = '%7.1f' % (gps_dec)
GPSRec["sample_declination_correction"] = ''
GPSRec["magic_method_codes"] = methcodes + ':SO-GPS-DIFF'
SampOuts.append(GPSRec)
# there are differential GPS Azimuth data
if "GPS_Az" in list(OrRec.keys()) and OrRec["GPS_Az"] != "":
GPSRec = {}
for key in list(MagRec.keys()):
GPSRec[key] = MagRec[key] # make a copy of MagRec
GPSRec["sample_azimuth"] = '%7.1f' % (float(OrRec["GPS_Az"]))
GPSRec["sample_declination_correction"] = ''
GPSRec["magic_method_codes"] = methcodes + ':SO-GPS-DIFF'
SampOuts.append(GPSRec)
if average_bedding != "0" and fpars:
fpars = pmag.fisher_mean(BPs)
print('over-writing all bedding with average ')
Samps = []
for rec in SampOuts:
if average_bedding != "0" and fpars:
rec['sample_bed_dip_direction'] = '%7.1f' % (fpars['dec'])
rec['sample_bed_dip'] = '%7.1f' % (fpars['inc'] + 90.)
Samps.append(rec)
else:
Samps.append(rec)
for rec in SampRecs:
if rec['er_sample_name'] not in samplelist: # overwrite prior for this sample
Samps.append(rec)
for rec in SiteRecs:
if rec['er_site_name'] not in sitelist: # overwrite prior for this sample
SiteOuts.append(rec)
for rec in ImageRecs:
if rec['er_image_name'] not in imagelist: # overwrite prior for this sample
ImageOuts.append(rec)
print('saving data...')
SampsOut, keys = pmag.fillkeys(Samps)
Sites, keys = pmag.fillkeys(SiteOuts)
if data_model == 3:
SampsOut3 = []
Sites3 = []
for samp_rec in SampsOut:
new_rec = map_magic.mapping(
samp_rec, map_magic.samp_magic2_2_magic3_map)
SampsOut3.append(new_rec)
for site_rec in Sites:
new_rec = map_magic.mapping(
site_rec, map_magic.site_magic2_2_magic3_map)
Sites3.append(new_rec)
wrote_samps = pmag.magic_write(samp_file, SampsOut3, "samples")
wrote_sites = pmag.magic_write(site_file, Sites3, "sites")
else:
wrote_samps = pmag.magic_write(samp_file, SampsOut, "er_samples")
wrote_sites = pmag.magic_write(site_file, Sites, "er_sites")
if wrote_samps:
print("Data saved in ", samp_file, ' and ', site_file)
else:
print("No data found")
if len(ImageOuts) > 0:
# need to do conversion here 3.0. --> 2.5
Images, keys = pmag.fillkeys(ImageOuts)
image_type = "er_images"
if data_model == 3:
# convert 2.5 --> 3.0.
image_type = "images"
Images2 = Images
Images = []
for image_rec in Images2:
Images.append(map_magic.mapping(
image_rec, map_magic.image_magic2_2_magic3_map))
pmag.magic_write(image_file, Images, image_type)
print("Image info saved in ", image_file)
return True, None | python | def orientation_magic(or_con=1, dec_correction_con=1, dec_correction=0, bed_correction=True,
samp_con='1', hours_from_gmt=0, method_codes='', average_bedding=False,
orient_file='orient.txt', samp_file='samples.txt', site_file='sites.txt',
output_dir_path='.', input_dir_path='', append=False, data_model=3):
"""
use this function to convert tab delimited field notebook information to MagIC formatted tables (er_samples and er_sites)
INPUT FORMAT
Input files must be tab delimited and have in the first line:
tab location_name
Note: The "location_name" will facilitate searching in the MagIC database. Data from different
"locations" should be put in separate files. The definition of a "location" is rather loose.
Also this is the word 'tab' not a tab, which will be indicated by '\t'.
The second line has the names of the columns (tab delimited), e.g.:
site_name sample_name mag_azimuth field_dip date lat long sample_lithology sample_type sample_class shadow_angle hhmm stratigraphic_height bedding_dip_direction bedding_dip GPS_baseline image_name image_look image_photographer participants method_codes site_description sample_description GPS_Az, sample_igsn, sample_texture, sample_cooling_rate, cooling_rate_corr, cooling_rate_mcd
Notes:
1) column order doesn't matter but the NAMES do.
2) sample_name, sample_lithology, sample_type, sample_class, lat and long are required. all others are optional.
3) If subsequent data are the same (e.g., date, bedding orientation, participants, stratigraphic_height),
you can leave the field blank and the program will fill in the last recorded information. BUT if you really want a blank stratigraphic_height, enter a '-1'. These will not be inherited and must be specified for each entry: image_name, look, photographer or method_codes
4) hhmm must be in the format: hh:mm and the hh must be in 24 hour time.
date must be mm/dd/yy (years < 50 will be converted to 20yy and >50 will be assumed 19yy). hours_from_gmt is the number of hours to SUBTRACT from hh to get to GMT.
5) image_name, image_look and image_photographer are colon delimited lists of file name (e.g., IMG_001.jpg) image look direction and the name of the photographer respectively. If all images had same look and photographer, just enter info once. The images will be assigned to the site for which they were taken - not at the sample level.
6) participants: Names of who helped take the samples. These must be a colon delimited list.
7) method_codes: Special method codes on a sample level, e.g., SO-GT5 which means the orientation is has an uncertainty of >5 degrees
for example if it broke off before orienting....
8) GPS_Az is the place to put directly determined GPS Azimuths, using, e.g., points along the drill direction.
9) sample_cooling_rate is the cooling rate in K per Ma
10) int_corr_cooling_rate
11) cooling_rate_mcd: data adjustment method code for cooling rate correction; DA-CR-EG is educated guess; DA-CR-PS is percent estimated from pilot samples; DA-CR-TRM is comparison between 2 TRMs acquired with slow and rapid cooling rates.
is the percent cooling rate factor to apply to specimens from this sample, DA-CR-XX is the method code
defaults:
orientation_magic(or_con=1, dec_correction_con=1, dec_correction=0, bed_correction=True, samp_con='1', hours_from_gmt=0, method_codes='', average_bedding=False, orient_file='orient.txt', samp_file='er_samples.txt', site_file='er_sites.txt', output_dir_path='.', input_dir_path='', append=False):
orientation conventions:
[1] Standard Pomeroy convention of azimuth and hade (degrees from vertical down)
of the drill direction (field arrow). lab arrow azimuth= sample_azimuth = mag_azimuth;
lab arrow dip = sample_dip =-field_dip. i.e. the lab arrow dip is minus the hade.
[2] Field arrow is the strike of the plane orthogonal to the drill direction,
Field dip is the hade of the drill direction. Lab arrow azimuth = mag_azimuth-90
Lab arrow dip = -field_dip
[3] Lab arrow is the same as the drill direction;
hade was measured in the field.
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
[4] lab azimuth and dip are same as mag_azimuth, field_dip : use this for unoriented samples too
[5] Same as AZDIP convention explained below -
azimuth and inclination of the drill direction are mag_azimuth and field_dip;
lab arrow is as in [1] above.
lab azimuth is same as mag_azimuth,lab arrow dip=field_dip-90
[6] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = 90-field_dip
[7] see http://earthref.org/PmagPy/cookbook/#field_info for more information. You can customize other format yourself, or email [email protected] for help.
Magnetic declination convention:
[1] Use the IGRF value at the lat/long and date supplied [default]
[2] Will supply declination correction
[3] mag_az is already corrected in file
[4] Correct mag_az but not bedding_dip_dir
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail [email protected] for help.
"""
# initialize some variables
# bed_correction used to be BedCorr
# dec_correction_con used to be corr
# dec_correction used to be DecCorr
# meths is now method_codes
# delta_u is now hours_from_gmt
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, output_dir_path)
or_con, dec_correction_con, dec_correction = int(
or_con), int(dec_correction_con), float(dec_correction)
hours_from_gmt = float(hours_from_gmt)
stratpos = ""
# date of sampling, latitude (pos North), longitude (pos East)
date, lat, lon = "", "", ""
bed_dip, bed_dip_dir = "", ""
Lats, Lons = [], [] # list of latitudes and longitudes
# lists of Sample records and Site records
SampOuts, SiteOuts, ImageOuts = [], [], []
samplelist, sitelist, imagelist = [], [], []
Z = 1
newbaseline, newbeddir, newbeddip = "", "", ""
fpars = []
sclass, lithology, sample_type = "", "", ""
newclass, newlith, newtype = '', '', ''
BPs = [] # bedding pole declinations, bedding pole inclinations
image_file = "er_images.txt"
#
# use 3.0. default filenames when in 3.0.
# but, still allow for custom names
data_model = int(data_model)
if data_model == 3:
if samp_file == "er_samples.txt":
samp_file = "samples.txt"
if site_file == "er_sites.txt":
site_file = "sites.txt"
image_file = "images.txt"
orient_file = pmag.resolve_file_name(orient_file, input_dir_path)
if not os.path.exists(orient_file):
return False, "No such file: {}. If the orientation file is not in your current working directory, make sure you have specified the correct input directory.".format(orient_file)
samp_file = os.path.join(output_dir_path, samp_file)
site_file = os.path.join(output_dir_path, site_file)
image_file = os.path.join(output_dir_path, image_file)
# validate input
if '4' in samp_con[0]:
pattern = re.compile('[4][-]\d')
result = pattern.match(samp_con)
if not result:
raise Exception(
"If using sample naming convention 4, you must provide the number of characters with which to distinguish sample from site. [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX)")
if '7' in samp_con[0]:
pattern = re.compile('[7][-]\d')
result = pattern.match(samp_con)
if not result:
raise Exception(
"If using sample naming convention 7, you must provide the number of characters with which to distinguish sample from site. [7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY")
if dec_correction_con == 2 and not dec_correction:
raise Exception(
"If using magnetic declination convention 2, you must also provide a declincation correction in degrees")
SampRecs, SiteRecs, ImageRecs = [], [], []
SampRecs_sorted, SiteRecs_sorted = {}, {}
if append:
try:
SampRecs, file_type = pmag.magic_read(samp_file)
# convert 3.0. sample file to 2.5 format
if data_model == 3:
SampRecs3 = SampRecs
SampRecs = []
for samp_rec in SampRecs3:
rec = map_magic.mapping(
samp_rec, map_magic.samp_magic3_2_magic2_map)
SampRecs.append(rec)
# magic_data dictionary sorted by sample_name
SampRecs_sorted = pmag.sort_magic_data(SampRecs, 'er_sample_name')
print('sample data to be appended to: ', samp_file)
except Exception as ex:
print(ex)
print('problem with existing file: ',
samp_file, ' will create new.')
try:
SiteRecs, file_type = pmag.magic_read(site_file)
# convert 3.0. site file to 2.5 format
if data_model == 3:
SiteRecs3 = SiteRecs
SiteRecs = []
for site_rec in SiteRecs3:
SiteRecs.append(map_magic.mapping(
site_rec, map_magic.site_magic3_2_magic2_map))
# magic_data dictionary sorted by site_name
SiteRecs_sorted = pmag.sort_magic_data(SiteRecs, 'er_site_name')
print('site data to be appended to: ', site_file)
except Exception as ex:
print(ex)
print('problem with existing file: ',
site_file, ' will create new.')
try:
ImageRecs, file_type = pmag.magic_read(image_file)
# convert from 3.0. --> 2.5
if data_model == 3:
ImageRecs3 = ImageRecs
ImageRecs = []
for image_rec in ImageRecs3:
ImageRecs.append(map_magic.mapping(
image_rec, map_magic.image_magic3_2_magic2_map))
print('image data to be appended to: ', image_file)
except:
print('problem with existing file: ',
image_file, ' will create new.')
#
# read in file to convert
#
OrData, location_name = pmag.magic_read(orient_file)
if location_name == "demag_orient":
location_name = ""
#
# step through the data sample by sample
#
# use map_magic in here...
for OrRec in OrData:
if 'mag_azimuth' not in list(OrRec.keys()):
OrRec['mag_azimuth'] = ""
if 'field_dip' not in list(OrRec.keys()):
OrRec['field_dip'] = ""
if OrRec['mag_azimuth'] == " ":
OrRec["mag_azimuth"] = ""
if OrRec['field_dip'] == " ":
OrRec["field_dip"] = ""
if 'sample_description' in list(OrRec.keys()):
sample_description = OrRec['sample_description']
else:
sample_description = ""
if 'cooling_rate_corr' in list(OrRec.keys()):
if 'cooling_rate_mcd' not in list(OrRec.keys()):
OrRec['cooling_rate_mcd'] = 'DA-CR'
sample_orientation_flag = 'g'
if 'sample_orientation_flag' in list(OrRec.keys()):
if OrRec['sample_orientation_flag'] == 'b' or OrRec["mag_azimuth"] == "":
sample_orientation_flag = 'b'
methcodes = method_codes # initialize method codes
if methcodes:
if 'method_codes' in list(OrRec.keys()) and OrRec['method_codes'].strip() != "":
methcodes = methcodes + ":" + \
OrRec['method_codes'] # add notes
else:
if 'method_codes' in list(OrRec.keys()) and OrRec['method_codes'].strip() != "":
methcodes = OrRec['method_codes'] # add notes
codes = methcodes.replace(" ", "").split(":")
sample_name = OrRec["sample_name"]
# patch added by rshaar 7/2016
# if sample_name already exists in er_samples.txt:
# merge the new data colmuns calculated by orientation_magic with the existing data colmuns
# this is done to make sure no previous data in er_samples.txt and
# er_sites.txt is lost.
if sample_name in list(SampRecs_sorted.keys()):
Prev_MagRec = SampRecs_sorted[sample_name][-1]
MagRec = Prev_MagRec
else:
Prev_MagRec = {}
MagRec = {}
MagRec["er_citation_names"] = "This study"
# the following keys were calculated or defined in the code above:
for key in ['sample_igsn', 'sample_texture', 'sample_cooling_rate',
'cooling_rate_corr', 'cooling_rate_mcd']:
val = OrRec.get(key, '')
if val:
MagRec[key] = val
elif key in list(Prev_MagRec.keys()):
MagRec[key] = Prev_MagRec[key]
else:
MagRec[key] = ""
if location_name != "":
MagRec["er_location_name"] = location_name
elif "er_location_name" in list(Prev_MagRec.keys()):
MagRec["er_location_name"] = Prev_MagRec["er_location_name"]
else:
MagRec["er_location_name"] = ""
# the following keys are taken directly from OrRec dictionary:
for key in ["sample_height", "er_sample_alternatives", "sample_orientation_flag"]:
if key in list(OrRec.keys()) and OrRec[key] != "":
MagRec[key] = OrRec[key]
elif key in list(Prev_MagRec.keys()):
MagRec[key] = Prev_MagRec[key]
else:
MagRec[key] = ""
# the following keys, if blank, used to be defined here as "Not Specified" :
for key in ["sample_class", "sample_lithology", "sample_type"]:
if key in list(OrRec.keys()) and OrRec[key] != "" and OrRec[key] != "Not Specified":
MagRec[key] = OrRec[key]
elif key in list(Prev_MagRec.keys()) and Prev_MagRec[key] != "" and Prev_MagRec[key] != "Not Specified":
MagRec[key] = Prev_MagRec[key]
else:
MagRec[key] = "" # "Not Specified"
# (rshaar) From here parse new information and replace previous, if exists:
#
# parse information common to all orientation methods
#
MagRec["er_sample_name"] = OrRec["sample_name"]
if "IGSN" in list(OrRec.keys()):
MagRec["sample_igsn"] = OrRec["IGSN"]
else:
MagRec["sample_igsn"] = ""
# MagRec["sample_height"],MagRec["sample_bed_dip_direction"],MagRec["sample_bed_dip"]="","",""
MagRec["sample_bed_dip_direction"], MagRec["sample_bed_dip"] = "", ""
# if "er_sample_alternatives" in OrRec.keys():
# MagRec["er_sample_alternatives"]=OrRec["sample_alternatives"]
sample = OrRec["sample_name"]
if OrRec['mag_azimuth'] == "" and OrRec['field_dip'] != "":
OrRec['mag_azimuth'] = '999'
if OrRec["mag_azimuth"] != "":
labaz, labdip = pmag.orient(
float(OrRec["mag_azimuth"]), float(OrRec["field_dip"]), or_con)
if labaz < 0:
labaz += 360.
else:
labaz, labdip = "", ""
if OrRec['mag_azimuth'] == '999':
labaz = ""
if "GPS_baseline" in list(OrRec.keys()) and OrRec['GPS_baseline'] != "":
newbaseline = OrRec["GPS_baseline"]
if newbaseline != "":
baseline = float(newbaseline)
MagRec['er_scientist_mail_names'] = OrRec.get('participants', '')
newlat = OrRec["lat"]
if newlat != "":
lat = float(newlat)
if lat == "":
print("No latitude specified for ! ", sample,
". Latitude is required for all samples.")
return False, "No latitude specified for ! " + sample + ". Latitude is required for all samples."
MagRec["sample_lat"] = '%11.5f' % (lat)
newlon = OrRec["long"]
if newlon != "":
lon = float(newlon)
if lon == "":
print("No longitude specified for ! ", sample,
". Longitude is required for all samples.")
return False, str("No longitude specified for ! " + sample + ". Longitude is required for all samples.")
MagRec["sample_lon"] = '%11.5f' % (lon)
if 'bedding_dip_direction' in list(OrRec.keys()):
newbeddir = OrRec["bedding_dip_direction"]
if newbeddir != "":
bed_dip_dir = OrRec['bedding_dip_direction']
if 'bedding_dip' in list(OrRec.keys()):
newbeddip = OrRec["bedding_dip"]
if newbeddip != "":
bed_dip = OrRec['bedding_dip']
MagRec["sample_bed_dip"] = bed_dip
MagRec["sample_bed_dip_direction"] = bed_dip_dir
# MagRec["sample_type"]=sample_type
if labdip != "":
MagRec["sample_dip"] = '%7.1f' % labdip
else:
MagRec["sample_dip"] = ""
if "date" in list(OrRec.keys()) and OrRec["date"] != "":
newdate = OrRec["date"]
if newdate != "":
date = newdate
mmddyy = date.split('/')
yy = int(mmddyy[2])
if yy > 50:
yy = 1900 + yy
else:
yy = 2000 + yy
decimal_year = yy + old_div(float(mmddyy[0]), 12)
sample_date = '%i:%s:%s' % (yy, mmddyy[0], mmddyy[1])
time = OrRec['hhmm']
if time:
sample_date += (':' + time)
MagRec["sample_date"] = sample_date.strip(':')
if labaz != "":
MagRec["sample_azimuth"] = '%7.1f' % (labaz)
else:
MagRec["sample_azimuth"] = ""
if "stratigraphic_height" in list(OrRec.keys()):
if OrRec["stratigraphic_height"] != "":
MagRec["sample_height"] = OrRec["stratigraphic_height"]
stratpos = OrRec["stratigraphic_height"]
elif OrRec["stratigraphic_height"] == '-1':
MagRec["sample_height"] = "" # make empty
elif stratpos != "":
# keep last record if blank
MagRec["sample_height"] = stratpos
#
# get magnetic declination (corrected with igrf value)
if dec_correction_con == 1 and MagRec['sample_azimuth'] != "":
x, y, z, f = pmag.doigrf(lon, lat, 0, decimal_year)
Dir = pmag.cart2dir((x, y, z))
dec_correction = Dir[0]
if "bedding_dip" in list(OrRec.keys()):
if OrRec["bedding_dip"] != "":
MagRec["sample_bed_dip"] = OrRec["bedding_dip"]
bed_dip = OrRec["bedding_dip"]
else:
MagRec["sample_bed_dip"] = bed_dip
else:
MagRec["sample_bed_dip"] = '0'
if "bedding_dip_direction" in list(OrRec.keys()):
if OrRec["bedding_dip_direction"] != "" and bed_correction == 1:
dd = float(OrRec["bedding_dip_direction"]) + dec_correction
if dd > 360.:
dd = dd - 360.
MagRec["sample_bed_dip_direction"] = '%7.1f' % (dd)
dip_dir = MagRec["sample_bed_dip_direction"]
else:
MagRec["sample_bed_dip_direction"] = OrRec['bedding_dip_direction']
else:
MagRec["sample_bed_dip_direction"] = '0'
if average_bedding:
if str(MagRec["sample_bed_dip_direction"]) and str(MagRec["sample_bed_dip"]):
BPs.append([float(MagRec["sample_bed_dip_direction"]),
float(MagRec["sample_bed_dip"]) - 90., 1.])
if MagRec['sample_azimuth'] == "" and MagRec['sample_dip'] == "":
MagRec["sample_declination_correction"] = ''
methcodes = methcodes + ':SO-NO'
MagRec["magic_method_codes"] = methcodes
MagRec['sample_description'] = sample_description
#
# work on the site stuff too
if 'site_name' in list(OrRec.keys()) and OrRec['site_name'] != "":
site = OrRec['site_name']
elif 'site_name' in list(Prev_MagRec.keys()) and Prev_MagRec['site_name'] != "":
site = Prev_MagRec['site_name']
else:
# parse out the site name
site = pmag.parse_site(OrRec["sample_name"], samp_con, Z)
MagRec["er_site_name"] = site
site_description = "" # overwrite any prior description
if 'site_description' in list(OrRec.keys()) and OrRec['site_description'] != "":
site_description = OrRec['site_description'].replace(",", ";")
if "image_name" in list(OrRec.keys()):
images = OrRec["image_name"].split(":")
if "image_look" in list(OrRec.keys()):
looks = OrRec['image_look'].split(":")
else:
looks = []
if "image_photographer" in list(OrRec.keys()):
photographers = OrRec['image_photographer'].split(":")
else:
photographers = []
for image in images:
if image != "" and image not in imagelist:
imagelist.append(image)
ImageRec = {}
ImageRec['er_image_name'] = image
ImageRec['image_type'] = "outcrop"
ImageRec['image_date'] = sample_date
ImageRec['er_citation_names'] = "This study"
ImageRec['er_location_name'] = location_name
ImageRec['er_site_name'] = MagRec['er_site_name']
k = images.index(image)
if len(looks) > k:
ImageRec['er_image_description'] = "Look direction: " + looks[k]
elif len(looks) >= 1:
ImageRec['er_image_description'] = "Look direction: " + looks[-1]
else:
ImageRec['er_image_description'] = "Look direction: unknown"
if len(photographers) > k:
ImageRec['er_photographer_mail_names'] = photographers[k]
elif len(photographers) >= 1:
ImageRec['er_photographer_mail_names'] = photographers[-1]
else:
ImageRec['er_photographer_mail_names'] = "unknown"
ImageOuts.append(ImageRec)
if site not in sitelist:
sitelist.append(site) # collect unique site names
# patch added by rshaar 7/2016
# if sample_name already exists in er_samples.txt:
# merge the new data colmuns calculated by orientation_magic with the existing data colmuns
# this is done to make sure no previous data in er_samples.txt and
# er_sites.txt is lost.
if site in list(SiteRecs_sorted.keys()):
Prev_MagRec = SiteRecs_sorted[site][-1]
SiteRec = Prev_MagRec
else:
Prev_MagRec = {}
SiteRec = {}
SiteRec["er_citation_names"] = "This study"
SiteRec["er_site_name"] = site
SiteRec["site_definition"] = "s"
if "er_location_name" in SiteRec and SiteRec.get("er_location_name"):
pass
elif key in list(Prev_MagRec.keys()) and Prev_MagRec[key] != "":
SiteRec[key] = Prev_MagRec[key]
else:
print('setting location name to ""')
SiteRec[key] = ""
for key in ["lat", "lon", "height"]:
if "site_" + key in list(Prev_MagRec.keys()) and Prev_MagRec["site_" + key] != "":
SiteRec["site_" + key] = Prev_MagRec["site_" + key]
else:
SiteRec["site_" + key] = MagRec["sample_" + key]
# SiteRec["site_lat"]=MagRec["sample_lat"]
# SiteRec["site_lon"]=MagRec["sample_lon"]
# SiteRec["site_height"]=MagRec["sample_height"]
for key in ["class", "lithology", "type"]:
if "site_" + key in list(Prev_MagRec.keys()) and Prev_MagRec["site_" + key] != "Not Specified":
SiteRec["site_" + key] = Prev_MagRec["site_" + key]
else:
SiteRec["site_" + key] = MagRec["sample_" + key]
# SiteRec["site_class"]=MagRec["sample_class"]
# SiteRec["site_lithology"]=MagRec["sample_lithology"]
# SiteRec["site_type"]=MagRec["sample_type"]
if site_description != "": # overwrite only if site_description has something
SiteRec["site_description"] = site_description
SiteOuts.append(SiteRec)
if sample not in samplelist:
samplelist.append(sample)
if MagRec['sample_azimuth'] != "": # assume magnetic compass only
MagRec['magic_method_codes'] = MagRec['magic_method_codes'] + ':SO-MAG'
MagRec['magic_method_codes'] = MagRec['magic_method_codes'].strip(
":")
SampOuts.append(MagRec)
if MagRec['sample_azimuth'] != "" and dec_correction_con != 3:
az = labaz + dec_correction
if az > 360.:
az = az - 360.
CMDRec = {}
for key in list(MagRec.keys()):
CMDRec[key] = MagRec[key] # make a copy of MagRec
CMDRec["sample_azimuth"] = '%7.1f' % (az)
CMDRec["magic_method_codes"] = methcodes + ':SO-CMD-NORTH'
CMDRec["magic_method_codes"] = CMDRec['magic_method_codes'].strip(
':')
CMDRec["sample_declination_correction"] = '%7.1f' % (
dec_correction)
if dec_correction_con == 1:
CMDRec['sample_description'] = sample_description + \
':Declination correction calculated from IGRF'
else:
CMDRec['sample_description'] = sample_description + \
':Declination correction supplied by user'
CMDRec["sample_description"] = CMDRec['sample_description'].strip(
':')
SampOuts.append(CMDRec)
if "mag_az_bs" in list(OrRec.keys()) and OrRec["mag_az_bs"] != "" and OrRec["mag_az_bs"] != " ":
SRec = {}
for key in list(MagRec.keys()):
SRec[key] = MagRec[key] # make a copy of MagRec
labaz = float(OrRec["mag_az_bs"])
az = labaz + dec_correction
if az > 360.:
az = az - 360.
SRec["sample_azimuth"] = '%7.1f' % (az)
SRec["sample_declination_correction"] = '%7.1f' % (
dec_correction)
SRec["magic_method_codes"] = methcodes + \
':SO-SIGHT-BACK:SO-CMD-NORTH'
SampOuts.append(SRec)
#
# check for suncompass data
#
# there are sun compass data
if "shadow_angle" in list(OrRec.keys()) and OrRec["shadow_angle"] != "":
if hours_from_gmt == "":
#hours_from_gmt=raw_input("Enter hours to subtract from time for GMT: [0] ")
hours_from_gmt = 0
SunRec, sundata = {}, {}
shad_az = float(OrRec["shadow_angle"])
if not OrRec["hhmm"]:
print('If using the column shadow_angle for sun compass data, you must also provide the time for each sample. Sample ',
sample, ' has shadow_angle but is missing the "hh:mm" column.')
else: # calculate sun declination
sundata["date"] = '%i:%s:%s:%s' % (
yy, mmddyy[0], mmddyy[1], OrRec["hhmm"])
sundata["delta_u"] = hours_from_gmt
sundata["lon"] = lon # do not truncate!
sundata["lat"] = lat # do not truncate!
sundata["shadow_angle"] = OrRec["shadow_angle"]
# now you can truncate
sundec = '%7.1f' % (pmag.dosundec(sundata))
for key in list(MagRec.keys()):
SunRec[key] = MagRec[key] # make a copy of MagRec
SunRec["sample_azimuth"] = sundec # do not truncate!
SunRec["sample_declination_correction"] = ''
SunRec["magic_method_codes"] = methcodes + ':SO-SUN'
SunRec["magic_method_codes"] = SunRec['magic_method_codes'].strip(
':')
SampOuts.append(SunRec)
#
# check for differential GPS data
#
# there are diff GPS data
if "prism_angle" in list(OrRec.keys()) and OrRec["prism_angle"] != "":
GPSRec = {}
for key in list(MagRec.keys()):
GPSRec[key] = MagRec[key] # make a copy of MagRec
prism_angle = float(OrRec["prism_angle"])
sundata["shadow_angle"] = OrRec["shadow_angle"]
sundec = pmag.dosundec(sundata)
for key in list(MagRec.keys()):
SunRec[key] = MagRec[key] # make a copy of MagRec
SunRec["sample_azimuth"] = '%7.1f' % (sundec)
SunRec["sample_declination_correction"] = ''
SunRec["magic_method_codes"] = methcodes + ':SO-SUN'
SunRec["magic_method_codes"] = SunRec['magic_method_codes'].strip(
':')
SampOuts.append(SunRec)
#
# check for differential GPS data
#
# there are diff GPS data
if "prism_angle" in list(OrRec.keys()) and OrRec["prism_angle"] != "":
GPSRec = {}
for key in list(MagRec.keys()):
GPSRec[key] = MagRec[key] # make a copy of MagRec
prism_angle = float(OrRec["prism_angle"])
laser_angle = float(OrRec["laser_angle"])
if OrRec["GPS_baseline"] != "":
baseline = float(OrRec["GPS_baseline"]) # new baseline
gps_dec = baseline + laser_angle + prism_angle - 90.
while gps_dec > 360.:
gps_dec = gps_dec - 360.
while gps_dec < 0:
gps_dec = gps_dec + 360.
for key in list(MagRec.keys()):
GPSRec[key] = MagRec[key] # make a copy of MagRec
GPSRec["sample_azimuth"] = '%7.1f' % (gps_dec)
GPSRec["sample_declination_correction"] = ''
GPSRec["magic_method_codes"] = methcodes + ':SO-GPS-DIFF'
SampOuts.append(GPSRec)
# there are differential GPS Azimuth data
if "GPS_Az" in list(OrRec.keys()) and OrRec["GPS_Az"] != "":
GPSRec = {}
for key in list(MagRec.keys()):
GPSRec[key] = MagRec[key] # make a copy of MagRec
GPSRec["sample_azimuth"] = '%7.1f' % (float(OrRec["GPS_Az"]))
GPSRec["sample_declination_correction"] = ''
GPSRec["magic_method_codes"] = methcodes + ':SO-GPS-DIFF'
SampOuts.append(GPSRec)
if average_bedding != "0" and fpars:
fpars = pmag.fisher_mean(BPs)
print('over-writing all bedding with average ')
Samps = []
for rec in SampOuts:
if average_bedding != "0" and fpars:
rec['sample_bed_dip_direction'] = '%7.1f' % (fpars['dec'])
rec['sample_bed_dip'] = '%7.1f' % (fpars['inc'] + 90.)
Samps.append(rec)
else:
Samps.append(rec)
for rec in SampRecs:
if rec['er_sample_name'] not in samplelist: # overwrite prior for this sample
Samps.append(rec)
for rec in SiteRecs:
if rec['er_site_name'] not in sitelist: # overwrite prior for this sample
SiteOuts.append(rec)
for rec in ImageRecs:
if rec['er_image_name'] not in imagelist: # overwrite prior for this sample
ImageOuts.append(rec)
print('saving data...')
SampsOut, keys = pmag.fillkeys(Samps)
Sites, keys = pmag.fillkeys(SiteOuts)
if data_model == 3:
SampsOut3 = []
Sites3 = []
for samp_rec in SampsOut:
new_rec = map_magic.mapping(
samp_rec, map_magic.samp_magic2_2_magic3_map)
SampsOut3.append(new_rec)
for site_rec in Sites:
new_rec = map_magic.mapping(
site_rec, map_magic.site_magic2_2_magic3_map)
Sites3.append(new_rec)
wrote_samps = pmag.magic_write(samp_file, SampsOut3, "samples")
wrote_sites = pmag.magic_write(site_file, Sites3, "sites")
else:
wrote_samps = pmag.magic_write(samp_file, SampsOut, "er_samples")
wrote_sites = pmag.magic_write(site_file, Sites, "er_sites")
if wrote_samps:
print("Data saved in ", samp_file, ' and ', site_file)
else:
print("No data found")
if len(ImageOuts) > 0:
# need to do conversion here 3.0. --> 2.5
Images, keys = pmag.fillkeys(ImageOuts)
image_type = "er_images"
if data_model == 3:
# convert 2.5 --> 3.0.
image_type = "images"
Images2 = Images
Images = []
for image_rec in Images2:
Images.append(map_magic.mapping(
image_rec, map_magic.image_magic2_2_magic3_map))
pmag.magic_write(image_file, Images, image_type)
print("Image info saved in ", image_file)
return True, None | use this function to convert tab delimited field notebook information to MagIC formatted tables (er_samples and er_sites)
INPUT FORMAT
Input files must be tab delimited and have in the first line:
tab location_name
Note: The "location_name" will facilitate searching in the MagIC database. Data from different
"locations" should be put in separate files. The definition of a "location" is rather loose.
Also this is the word 'tab' not a tab, which will be indicated by '\t'.
The second line has the names of the columns (tab delimited), e.g.:
site_name sample_name mag_azimuth field_dip date lat long sample_lithology sample_type sample_class shadow_angle hhmm stratigraphic_height bedding_dip_direction bedding_dip GPS_baseline image_name image_look image_photographer participants method_codes site_description sample_description GPS_Az, sample_igsn, sample_texture, sample_cooling_rate, cooling_rate_corr, cooling_rate_mcd
Notes:
1) column order doesn't matter but the NAMES do.
2) sample_name, sample_lithology, sample_type, sample_class, lat and long are required. all others are optional.
3) If subsequent data are the same (e.g., date, bedding orientation, participants, stratigraphic_height),
you can leave the field blank and the program will fill in the last recorded information. BUT if you really want a blank stratigraphic_height, enter a '-1'. These will not be inherited and must be specified for each entry: image_name, look, photographer or method_codes
4) hhmm must be in the format: hh:mm and the hh must be in 24 hour time.
date must be mm/dd/yy (years < 50 will be converted to 20yy and >50 will be assumed 19yy). hours_from_gmt is the number of hours to SUBTRACT from hh to get to GMT.
5) image_name, image_look and image_photographer are colon delimited lists of file name (e.g., IMG_001.jpg) image look direction and the name of the photographer respectively. If all images had same look and photographer, just enter info once. The images will be assigned to the site for which they were taken - not at the sample level.
6) participants: Names of who helped take the samples. These must be a colon delimited list.
7) method_codes: Special method codes on a sample level, e.g., SO-GT5 which means the orientation is has an uncertainty of >5 degrees
for example if it broke off before orienting....
8) GPS_Az is the place to put directly determined GPS Azimuths, using, e.g., points along the drill direction.
9) sample_cooling_rate is the cooling rate in K per Ma
10) int_corr_cooling_rate
11) cooling_rate_mcd: data adjustment method code for cooling rate correction; DA-CR-EG is educated guess; DA-CR-PS is percent estimated from pilot samples; DA-CR-TRM is comparison between 2 TRMs acquired with slow and rapid cooling rates.
is the percent cooling rate factor to apply to specimens from this sample, DA-CR-XX is the method code
defaults:
orientation_magic(or_con=1, dec_correction_con=1, dec_correction=0, bed_correction=True, samp_con='1', hours_from_gmt=0, method_codes='', average_bedding=False, orient_file='orient.txt', samp_file='er_samples.txt', site_file='er_sites.txt', output_dir_path='.', input_dir_path='', append=False):
orientation conventions:
[1] Standard Pomeroy convention of azimuth and hade (degrees from vertical down)
of the drill direction (field arrow). lab arrow azimuth= sample_azimuth = mag_azimuth;
lab arrow dip = sample_dip =-field_dip. i.e. the lab arrow dip is minus the hade.
[2] Field arrow is the strike of the plane orthogonal to the drill direction,
Field dip is the hade of the drill direction. Lab arrow azimuth = mag_azimuth-90
Lab arrow dip = -field_dip
[3] Lab arrow is the same as the drill direction;
hade was measured in the field.
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
[4] lab azimuth and dip are same as mag_azimuth, field_dip : use this for unoriented samples too
[5] Same as AZDIP convention explained below -
azimuth and inclination of the drill direction are mag_azimuth and field_dip;
lab arrow is as in [1] above.
lab azimuth is same as mag_azimuth,lab arrow dip=field_dip-90
[6] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = 90-field_dip
[7] see http://earthref.org/PmagPy/cookbook/#field_info for more information. You can customize other format yourself, or email [email protected] for help.
Magnetic declination convention:
[1] Use the IGRF value at the lat/long and date supplied [default]
[2] Will supply declination correction
[3] mag_az is already corrected in file
[4] Correct mag_az but not bedding_dip_dir
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail [email protected] for help. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L5469-L6152 |
PmagPy/PmagPy | pmagpy/ipmag.py | azdip_magic | def azdip_magic(orient_file='orient.txt', samp_file="samples.txt", samp_con="1", Z=1, method_codes='FS-FD', location_name='unknown', append=False, output_dir='.', input_dir='.', data_model=3):
"""
takes space delimited AzDip file and converts to MagIC formatted tables
Parameters
__________
orient_file : name of azdip formatted input file
samp_file : name of samples.txt formatted output file
samp_con : integer of sample orientation convention
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
method_codes : colon delimited string with the following as desired
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
SO-MAG orientation with magnetic compass
location_name : location of samples
append : boolean. if True, append to the output file
output_dir : path to output file directory
input_dir : path to input file directory
data_model : MagIC data model.
INPUT FORMAT
Input files must be space delimited:
Samp Az Dip Strike Dip
Orientation convention:
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
e.g. field_dip is degrees from horizontal of drill direction
Magnetic declination convention:
Az is already corrected in file
"""
#
# initialize variables
#
data_model = int(data_model)
if (data_model != 3) and (samp_file == "samples.txt"):
samp_file = "er_samples.txt"
if (data_model == 2) and (samp_file == "er_samples.txt"):
samp_file = "samples.txt"
DEBUG = 0
version_num = pmag.get_version()
or_con, corr = "3", "1"
# date of sampling, latitude (pos North), longitude (pos East)
date, lat, lon = "", "", ""
bed_dip, bed_dip_dir = "", ""
participantlist = ""
sites = [] # list of site names
Lats, Lons = [], [] # list of latitudes and longitudes
# lists of Sample records and Site records
SampRecs, SiteRecs, ImageRecs, imagelist = [], [], [], []
average_bedding = "1", 1, "0"
newbaseline, newbeddir, newbeddip = "", "", ""
delta_u = "0"
sclass, lithology, type = "", "", ""
newclass, newlith, newtype = '', '', ''
user = ""
corr == "3"
DecCorr = 0.
samp_file = pmag.resolve_file_name(samp_file, output_dir)
orient_file = pmag.resolve_file_name(orient_file, input_dir)
input_dir = os.path.split(orient_file)[0]
output_dir = os.path.split(samp_file)[0]
#
#
if append:
try:
SampRecs, file_type = pmag.magic_read(samp_file)
print("sample data to be appended to: ", samp_file)
except:
print('problem with existing samp file: ',
samp_file, ' will create new')
#
# read in file to convert
#
azfile = open(orient_file, 'r')
AzDipDat = azfile.readlines()
azfile.close()
if not AzDipDat:
return False, 'No data in orientation file, please try again'
azfile.close()
SampOut, samplist = [], []
for line in AzDipDat:
orec = line.split()
if len(orec) > 2:
labaz, labdip = pmag.orient(float(orec[1]), float(orec[2]), or_con)
bed_dip = float(orec[4])
if bed_dip != 0:
bed_dip_dir = float(orec[3]) - \
90. # assume dip to right of strike
else:
bed_dip_dir = float(orec[3]) # assume dip to right of strike
MagRec = {}
MagRec["er_location_name"] = location_name
MagRec["er_citation_names"] = "This study"
#
# parse information common to all orientation methods
#
MagRec["er_sample_name"] = orec[0]
MagRec["sample_bed_dip"] = '%7.1f' % (bed_dip)
MagRec["sample_bed_dip_direction"] = '%7.1f' % (bed_dip_dir)
MagRec["sample_dip"] = '%7.1f' % (labdip)
MagRec["sample_azimuth"] = '%7.1f' % (labaz)
methods = method_codes.replace(" ", "").split(":")
OR = 0
for method in methods:
method_type = method.split("-")
if "SO" in method_type:
OR = 1
if OR == 0:
method_codes = method_codes + ":SO-NO"
MagRec["magic_method_codes"] = method_codes
# parse out the site name
site = pmag.parse_site(orec[0], samp_con, Z)
MagRec["er_site_name"] = site
MagRec['magic_software_packages'] = version_num
SampOut.append(MagRec)
if MagRec['er_sample_name'] not in samplist:
samplist.append(MagRec['er_sample_name'])
for samp in SampRecs:
if samp not in samplist:
SampOut.append(samp)
Samps, keys = pmag.fillkeys(SampOut)
if data_model == 2:
# write to file
pmag.magic_write(samp_file, Samps, "er_samples")
else:
# translate sample records to MagIC 3
Samps3 = []
for samp in Samps:
Samps3.append(map_magic.mapping(
samp, map_magic.samp_magic2_2_magic3_map))
# write to file
pmag.magic_write(samp_file, Samps3, "samples")
print("Data saved in ", samp_file)
return True, None | python | def azdip_magic(orient_file='orient.txt', samp_file="samples.txt", samp_con="1", Z=1, method_codes='FS-FD', location_name='unknown', append=False, output_dir='.', input_dir='.', data_model=3):
"""
takes space delimited AzDip file and converts to MagIC formatted tables
Parameters
__________
orient_file : name of azdip formatted input file
samp_file : name of samples.txt formatted output file
samp_con : integer of sample orientation convention
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
method_codes : colon delimited string with the following as desired
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
SO-MAG orientation with magnetic compass
location_name : location of samples
append : boolean. if True, append to the output file
output_dir : path to output file directory
input_dir : path to input file directory
data_model : MagIC data model.
INPUT FORMAT
Input files must be space delimited:
Samp Az Dip Strike Dip
Orientation convention:
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
e.g. field_dip is degrees from horizontal of drill direction
Magnetic declination convention:
Az is already corrected in file
"""
#
# initialize variables
#
data_model = int(data_model)
if (data_model != 3) and (samp_file == "samples.txt"):
samp_file = "er_samples.txt"
if (data_model == 2) and (samp_file == "er_samples.txt"):
samp_file = "samples.txt"
DEBUG = 0
version_num = pmag.get_version()
or_con, corr = "3", "1"
# date of sampling, latitude (pos North), longitude (pos East)
date, lat, lon = "", "", ""
bed_dip, bed_dip_dir = "", ""
participantlist = ""
sites = [] # list of site names
Lats, Lons = [], [] # list of latitudes and longitudes
# lists of Sample records and Site records
SampRecs, SiteRecs, ImageRecs, imagelist = [], [], [], []
average_bedding = "1", 1, "0"
newbaseline, newbeddir, newbeddip = "", "", ""
delta_u = "0"
sclass, lithology, type = "", "", ""
newclass, newlith, newtype = '', '', ''
user = ""
corr == "3"
DecCorr = 0.
samp_file = pmag.resolve_file_name(samp_file, output_dir)
orient_file = pmag.resolve_file_name(orient_file, input_dir)
input_dir = os.path.split(orient_file)[0]
output_dir = os.path.split(samp_file)[0]
#
#
if append:
try:
SampRecs, file_type = pmag.magic_read(samp_file)
print("sample data to be appended to: ", samp_file)
except:
print('problem with existing samp file: ',
samp_file, ' will create new')
#
# read in file to convert
#
azfile = open(orient_file, 'r')
AzDipDat = azfile.readlines()
azfile.close()
if not AzDipDat:
return False, 'No data in orientation file, please try again'
azfile.close()
SampOut, samplist = [], []
for line in AzDipDat:
orec = line.split()
if len(orec) > 2:
labaz, labdip = pmag.orient(float(orec[1]), float(orec[2]), or_con)
bed_dip = float(orec[4])
if bed_dip != 0:
bed_dip_dir = float(orec[3]) - \
90. # assume dip to right of strike
else:
bed_dip_dir = float(orec[3]) # assume dip to right of strike
MagRec = {}
MagRec["er_location_name"] = location_name
MagRec["er_citation_names"] = "This study"
#
# parse information common to all orientation methods
#
MagRec["er_sample_name"] = orec[0]
MagRec["sample_bed_dip"] = '%7.1f' % (bed_dip)
MagRec["sample_bed_dip_direction"] = '%7.1f' % (bed_dip_dir)
MagRec["sample_dip"] = '%7.1f' % (labdip)
MagRec["sample_azimuth"] = '%7.1f' % (labaz)
methods = method_codes.replace(" ", "").split(":")
OR = 0
for method in methods:
method_type = method.split("-")
if "SO" in method_type:
OR = 1
if OR == 0:
method_codes = method_codes + ":SO-NO"
MagRec["magic_method_codes"] = method_codes
# parse out the site name
site = pmag.parse_site(orec[0], samp_con, Z)
MagRec["er_site_name"] = site
MagRec['magic_software_packages'] = version_num
SampOut.append(MagRec)
if MagRec['er_sample_name'] not in samplist:
samplist.append(MagRec['er_sample_name'])
for samp in SampRecs:
if samp not in samplist:
SampOut.append(samp)
Samps, keys = pmag.fillkeys(SampOut)
if data_model == 2:
# write to file
pmag.magic_write(samp_file, Samps, "er_samples")
else:
# translate sample records to MagIC 3
Samps3 = []
for samp in Samps:
Samps3.append(map_magic.mapping(
samp, map_magic.samp_magic2_2_magic3_map))
# write to file
pmag.magic_write(samp_file, Samps3, "samples")
print("Data saved in ", samp_file)
return True, None | takes space delimited AzDip file and converts to MagIC formatted tables
Parameters
__________
orient_file : name of azdip formatted input file
samp_file : name of samples.txt formatted output file
samp_con : integer of sample orientation convention
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
method_codes : colon delimited string with the following as desired
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
SO-MAG orientation with magnetic compass
location_name : location of samples
append : boolean. if True, append to the output file
output_dir : path to output file directory
input_dir : path to input file directory
data_model : MagIC data model.
INPUT FORMAT
Input files must be space delimited:
Samp Az Dip Strike Dip
Orientation convention:
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
e.g. field_dip is degrees from horizontal of drill direction
Magnetic declination convention:
Az is already corrected in file | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L6155-L6301 |
PmagPy/PmagPy | pmagpy/ipmag.py | dayplot_magic | def dayplot_magic(path_to_file='.', hyst_file="specimens.txt", rem_file='',
save=True, save_folder='.', fmt='svg', data_model=3,
interactive=False, contribution=None):
"""
Makes 'day plots' (Day et al. 1977) and squareness/coercivity plots
(Neel, 1955; plots after Tauxe et al., 2002); plots 'linear mixing'
curve from Dunlop and Carter-Stiglitz (2006).
Optional Parameters (defaults are used if not specified)
----------
path_to_file : path to directory that contains files (default is current directory, '.')
the default input file is 'specimens.txt' (data_model=3
if data_model = 2, then must these are the defaults:
hyst_file : hysteresis file (default is 'rmag_hysteresis.txt')
rem_file : remanence file (default is 'rmag_remanence.txt')
save : boolean argument to save plots (default is True)
save_folder : relative directory where plots will be saved (default is current directory, '.')
fmt : format of saved figures (default is 'pdf')
"""
args = sys.argv
hyst_path = os.path.join(path_to_file, hyst_file)
if data_model == 2 and rem_file != '':
rem_path = os.path.join(path_to_file, rem_file)
# hyst_file,rem_file="rmag_hysteresis.txt","rmag_remanence.txt"
dir_path = path_to_file
verbose = pmagplotlib.verbose
# initialize some variables
# define figure numbers for Day,S-Bc,S-Bcr
DSC = {}
DSC['day'], DSC['S-Bc'], DSC['S-Bcr'], DSC['bcr1-bcr2'] = 1, 2, 3, 4
hyst_data, file_type = pmag.magic_read(hyst_path)
rem_data = []
if data_model == 2 and rem_file != "":
rem_data, file_type = pmag.magic_read(rem_path)
S, BcrBc, Bcr2, Bc, hsids, Bcr = [], [], [], [], [], []
Ms, Bcr1, Bcr1Bc, S1 = [], [], [], []
locations = ''
if data_model == 2:
for rec in hyst_data:
if 'er_location_name' in list(rec.keys()) and rec['er_location_name'] not in locations:
locations = locations + rec['er_location_name'] + '_'
if rec['hysteresis_bcr'] != "" and rec['hysteresis_mr_moment'] != "":
S.append(old_div(float(rec['hysteresis_mr_moment']), float(
rec['hysteresis_ms_moment'])))
Bcr.append(float(rec['hysteresis_bcr']))
Bc.append(float(rec['hysteresis_bc']))
BcrBc.append(old_div(Bcr[-1], Bc[-1]))
if 'er_synthetic_name' in list(rec.keys()) and rec['er_synthetic_name'] != "":
rec['er_specimen_name'] = rec['er_synthetic_name']
hsids.append(rec['er_specimen_name'])
if len(rem_data) > 0:
for rec in rem_data:
if rec['remanence_bcr'] != "" and float(rec['remanence_bcr']) > 0:
try:
ind = hsids.index(rec['er_specimen_name'])
Bcr1.append(float(rec['remanence_bcr']))
Bcr1Bc.append(old_div(Bcr1[-1], Bc[ind]))
S1.append(S[ind])
Bcr2.append(Bcr[ind])
except ValueError:
if verbose:
print('hysteresis data for ',
rec['er_specimen_name'], ' not found')
else:
fnames = {'specimens': hyst_file}
if contribution:
con = contribution
else:
con = cb.Contribution(dir_path, read_tables=['specimens'],
custom_filenames=fnames)
if 'specimens' not in con.tables:
print('-E- No specimen file found in {}'.format(os.path.realpath(dir_path)))
return False, []
spec_container = con.tables['specimens']
spec_df = spec_container.df
# get as much data as possible for naming plots
#if pmagplotlib.isServer:
con.propagate_location_to_specimens()
loc_list = []
if 'location' in spec_df.columns:
loc_list = spec_df['location'].unique()
do_rem = bool('rem_bcr' in spec_df.columns)
for ind, row in spec_df.iterrows():
if row['hyst_bcr'] and row['hyst_mr_moment']:
S.append(
old_div(float(row['hyst_mr_moment']), float(row['hyst_ms_moment'])))
Bcr.append(float(row['hyst_bcr']))
Bc.append(float(row['hyst_bc']))
BcrBc.append(old_div(Bcr[-1], Bc[-1]))
hsids.append(row['specimen'])
if do_rem:
if row['rem_bcr'] and float(row['rem_bcr']) > 0:
try:
Bcr1.append(float(row['rem_bcr']))
Bcr1Bc.append(old_div(Bcr1[-1], Bc[-1]))
S1.append(S[-1])
Bcr2.append(Bcr[-1])
except ValueError:
if verbose:
print('hysteresis data for ',
row['specimen'], end=' ')
print(' not found')
#
# now plot the day and S-Bc, S-Bcr plots
#
fnames = {'day': os.path.join(save_folder, "_".join(loc_list) + '_Day.' + fmt),
'S-Bcr': os.path.join(save_folder, "_".join(loc_list) + '_S-Bcr.' + fmt),
'S-Bc': os.path.join(save_folder, "_".join(loc_list) + '_S-Bc.' + fmt)}
if len(Bcr1) > 0:
plt.figure(num=DSC['day'], figsize=(5, 5))
#plt.figure(num=DSC['S-Bc'], figsize=(5, 5))
plt.figure(num=DSC['S-Bcr'], figsize=(5, 5))
plt.figure(num=DSC['bcr1-bcr2'], figsize=(5, 5))
pmagplotlib.plot_day(DSC['day'], Bcr1Bc, S1, 'ro')
pmagplotlib.plot_s_bcr(DSC['S-Bcr'], Bcr1, S1, 'ro')
#pmagplotlib.plot_init(DSC['bcr1-bcr2'], 5, 5)
pmagplotlib.plot_bcr(DSC['bcr1-bcr2'], Bcr1, Bcr2)
fnames.pop('S-Bc')
fnames['bcr1-bcr2'] = os.path.join(save_folder, 'bcr1-bcr2.png')
DSC.pop('S-Bc')
if pmagplotlib.isServer:
for key in list(DSC.keys()):
fnames[key] = 'LO:_' + ":".join(set(loc_list)) + '_' + 'SI:__SA:__SP:__TY:_' + key + '_.' + fmt
if save:
pmagplotlib.save_plots(DSC, fnames, incl_directory=True)
return True, fnames.values()
if interactive:
pmagplotlib.draw_figs(DSC)
ans = pmagplotlib.save_or_quit()
if ans == 'a':
pmagplotlib.save_plots(DSC, fnames, incl_directory=True)
return True, fnames.values()
else:
plt.figure(num=DSC['day'], figsize=(5, 5))
plt.figure(num=DSC['S-Bc'], figsize=(5, 5))
plt.figure(num=DSC['S-Bcr'], figsize=(5, 5))
#plt.figure(num=DSC['bcr1-bcr2'], figsize=(5, 5))
del DSC['bcr1-bcr2']
# do other plots instead
pmagplotlib.plot_day(DSC['day'], BcrBc, S, 'bs')
pmagplotlib.plot_s_bcr(DSC['S-Bcr'], Bcr, S, 'bs')
pmagplotlib.plot_s_bc(DSC['S-Bc'], Bc, S, 'bs')
if pmagplotlib.isServer:
for key in list(DSC.keys()):
fnames[key] = 'LO:_' + ":".join(set(loc_list)) + '_' + 'SI:__SA:__SP:__TY:_' + key + '_.' + fmt
if save:
pmagplotlib.save_plots(DSC, fnames, incl_directory=True)
return True, fnames.values()
elif interactive:
pmagplotlib.draw_figs(DSC)
ans = pmagplotlib.save_or_quit()
if ans == 'a':
pmagplotlib.save_plots(DSC, fnames, incl_directory=True)
return True, fnames.values()
return True, [] | python | def dayplot_magic(path_to_file='.', hyst_file="specimens.txt", rem_file='',
save=True, save_folder='.', fmt='svg', data_model=3,
interactive=False, contribution=None):
"""
Makes 'day plots' (Day et al. 1977) and squareness/coercivity plots
(Neel, 1955; plots after Tauxe et al., 2002); plots 'linear mixing'
curve from Dunlop and Carter-Stiglitz (2006).
Optional Parameters (defaults are used if not specified)
----------
path_to_file : path to directory that contains files (default is current directory, '.')
the default input file is 'specimens.txt' (data_model=3
if data_model = 2, then must these are the defaults:
hyst_file : hysteresis file (default is 'rmag_hysteresis.txt')
rem_file : remanence file (default is 'rmag_remanence.txt')
save : boolean argument to save plots (default is True)
save_folder : relative directory where plots will be saved (default is current directory, '.')
fmt : format of saved figures (default is 'pdf')
"""
args = sys.argv
hyst_path = os.path.join(path_to_file, hyst_file)
if data_model == 2 and rem_file != '':
rem_path = os.path.join(path_to_file, rem_file)
# hyst_file,rem_file="rmag_hysteresis.txt","rmag_remanence.txt"
dir_path = path_to_file
verbose = pmagplotlib.verbose
# initialize some variables
# define figure numbers for Day,S-Bc,S-Bcr
DSC = {}
DSC['day'], DSC['S-Bc'], DSC['S-Bcr'], DSC['bcr1-bcr2'] = 1, 2, 3, 4
hyst_data, file_type = pmag.magic_read(hyst_path)
rem_data = []
if data_model == 2 and rem_file != "":
rem_data, file_type = pmag.magic_read(rem_path)
S, BcrBc, Bcr2, Bc, hsids, Bcr = [], [], [], [], [], []
Ms, Bcr1, Bcr1Bc, S1 = [], [], [], []
locations = ''
if data_model == 2:
for rec in hyst_data:
if 'er_location_name' in list(rec.keys()) and rec['er_location_name'] not in locations:
locations = locations + rec['er_location_name'] + '_'
if rec['hysteresis_bcr'] != "" and rec['hysteresis_mr_moment'] != "":
S.append(old_div(float(rec['hysteresis_mr_moment']), float(
rec['hysteresis_ms_moment'])))
Bcr.append(float(rec['hysteresis_bcr']))
Bc.append(float(rec['hysteresis_bc']))
BcrBc.append(old_div(Bcr[-1], Bc[-1]))
if 'er_synthetic_name' in list(rec.keys()) and rec['er_synthetic_name'] != "":
rec['er_specimen_name'] = rec['er_synthetic_name']
hsids.append(rec['er_specimen_name'])
if len(rem_data) > 0:
for rec in rem_data:
if rec['remanence_bcr'] != "" and float(rec['remanence_bcr']) > 0:
try:
ind = hsids.index(rec['er_specimen_name'])
Bcr1.append(float(rec['remanence_bcr']))
Bcr1Bc.append(old_div(Bcr1[-1], Bc[ind]))
S1.append(S[ind])
Bcr2.append(Bcr[ind])
except ValueError:
if verbose:
print('hysteresis data for ',
rec['er_specimen_name'], ' not found')
else:
fnames = {'specimens': hyst_file}
if contribution:
con = contribution
else:
con = cb.Contribution(dir_path, read_tables=['specimens'],
custom_filenames=fnames)
if 'specimens' not in con.tables:
print('-E- No specimen file found in {}'.format(os.path.realpath(dir_path)))
return False, []
spec_container = con.tables['specimens']
spec_df = spec_container.df
# get as much data as possible for naming plots
#if pmagplotlib.isServer:
con.propagate_location_to_specimens()
loc_list = []
if 'location' in spec_df.columns:
loc_list = spec_df['location'].unique()
do_rem = bool('rem_bcr' in spec_df.columns)
for ind, row in spec_df.iterrows():
if row['hyst_bcr'] and row['hyst_mr_moment']:
S.append(
old_div(float(row['hyst_mr_moment']), float(row['hyst_ms_moment'])))
Bcr.append(float(row['hyst_bcr']))
Bc.append(float(row['hyst_bc']))
BcrBc.append(old_div(Bcr[-1], Bc[-1]))
hsids.append(row['specimen'])
if do_rem:
if row['rem_bcr'] and float(row['rem_bcr']) > 0:
try:
Bcr1.append(float(row['rem_bcr']))
Bcr1Bc.append(old_div(Bcr1[-1], Bc[-1]))
S1.append(S[-1])
Bcr2.append(Bcr[-1])
except ValueError:
if verbose:
print('hysteresis data for ',
row['specimen'], end=' ')
print(' not found')
#
# now plot the day and S-Bc, S-Bcr plots
#
fnames = {'day': os.path.join(save_folder, "_".join(loc_list) + '_Day.' + fmt),
'S-Bcr': os.path.join(save_folder, "_".join(loc_list) + '_S-Bcr.' + fmt),
'S-Bc': os.path.join(save_folder, "_".join(loc_list) + '_S-Bc.' + fmt)}
if len(Bcr1) > 0:
plt.figure(num=DSC['day'], figsize=(5, 5))
#plt.figure(num=DSC['S-Bc'], figsize=(5, 5))
plt.figure(num=DSC['S-Bcr'], figsize=(5, 5))
plt.figure(num=DSC['bcr1-bcr2'], figsize=(5, 5))
pmagplotlib.plot_day(DSC['day'], Bcr1Bc, S1, 'ro')
pmagplotlib.plot_s_bcr(DSC['S-Bcr'], Bcr1, S1, 'ro')
#pmagplotlib.plot_init(DSC['bcr1-bcr2'], 5, 5)
pmagplotlib.plot_bcr(DSC['bcr1-bcr2'], Bcr1, Bcr2)
fnames.pop('S-Bc')
fnames['bcr1-bcr2'] = os.path.join(save_folder, 'bcr1-bcr2.png')
DSC.pop('S-Bc')
if pmagplotlib.isServer:
for key in list(DSC.keys()):
fnames[key] = 'LO:_' + ":".join(set(loc_list)) + '_' + 'SI:__SA:__SP:__TY:_' + key + '_.' + fmt
if save:
pmagplotlib.save_plots(DSC, fnames, incl_directory=True)
return True, fnames.values()
if interactive:
pmagplotlib.draw_figs(DSC)
ans = pmagplotlib.save_or_quit()
if ans == 'a':
pmagplotlib.save_plots(DSC, fnames, incl_directory=True)
return True, fnames.values()
else:
plt.figure(num=DSC['day'], figsize=(5, 5))
plt.figure(num=DSC['S-Bc'], figsize=(5, 5))
plt.figure(num=DSC['S-Bcr'], figsize=(5, 5))
#plt.figure(num=DSC['bcr1-bcr2'], figsize=(5, 5))
del DSC['bcr1-bcr2']
# do other plots instead
pmagplotlib.plot_day(DSC['day'], BcrBc, S, 'bs')
pmagplotlib.plot_s_bcr(DSC['S-Bcr'], Bcr, S, 'bs')
pmagplotlib.plot_s_bc(DSC['S-Bc'], Bc, S, 'bs')
if pmagplotlib.isServer:
for key in list(DSC.keys()):
fnames[key] = 'LO:_' + ":".join(set(loc_list)) + '_' + 'SI:__SA:__SP:__TY:_' + key + '_.' + fmt
if save:
pmagplotlib.save_plots(DSC, fnames, incl_directory=True)
return True, fnames.values()
elif interactive:
pmagplotlib.draw_figs(DSC)
ans = pmagplotlib.save_or_quit()
if ans == 'a':
pmagplotlib.save_plots(DSC, fnames, incl_directory=True)
return True, fnames.values()
return True, [] | Makes 'day plots' (Day et al. 1977) and squareness/coercivity plots
(Neel, 1955; plots after Tauxe et al., 2002); plots 'linear mixing'
curve from Dunlop and Carter-Stiglitz (2006).
Optional Parameters (defaults are used if not specified)
----------
path_to_file : path to directory that contains files (default is current directory, '.')
the default input file is 'specimens.txt' (data_model=3
if data_model = 2, then must these are the defaults:
hyst_file : hysteresis file (default is 'rmag_hysteresis.txt')
rem_file : remanence file (default is 'rmag_remanence.txt')
save : boolean argument to save plots (default is True)
save_folder : relative directory where plots will be saved (default is current directory, '.')
fmt : format of saved figures (default is 'pdf') | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L6547-L6710 |
PmagPy/PmagPy | pmagpy/ipmag.py | curie | def curie(path_to_file='.', file_name='', magic=False,
window_length=3, save=False, save_folder='.', fmt='svg', t_begin="", t_end=""):
"""
Plots and interprets curie temperature data.
***
The 1st derivative is calculated from smoothed M-T curve (convolution
with trianfular window with width= <-w> degrees)
***
The 2nd derivative is calculated from smoothed 1st derivative curve
(using the same sliding window width)
***
The estimated curie temp. is the maximum of the 2nd derivative.
Temperature steps should be in multiples of 1.0 degrees.
Parameters
__________
file_name : name of file to be opened
Optional Parameters (defaults are used if not specified)
----------
path_to_file : path to directory that contains file (default is current directory, '.')
window_length : dimension of smoothing window (input to smooth() function)
save : boolean argument to save plots (default is False)
save_folder : relative directory where plots will be saved (default is current directory, '.')
fmt : format of saved figures
t_begin: start of truncated window for search
t_end: end of truncated window for search
magic : True if MagIC formated measurements.txt file
"""
plot = 0
window_len = window_length
# read data from file
complete_path = os.path.join(path_to_file, file_name)
if magic:
data_df = pd.read_csv(complete_path, sep='\t', header=1)
T = data_df['meas_temp'].values-273
magn_key = cb.get_intensity_col(data_df)
M = data_df[magn_key].values
else:
Data = np.loadtxt(complete_path, dtype=np.float)
T = Data.transpose()[0]
M = Data.transpose()[1]
T = list(T)
M = list(M)
# cut the data if -t is one of the flags
if t_begin != "":
while T[0] < t_begin:
M.pop(0)
T.pop(0)
while T[-1] > t_end:
M.pop(-1)
T.pop(-1)
# prepare the signal:
# from M(T) array with unequal deltaT
# to M(T) array with deltaT=(1 degree).
# if delataT is larger, then points are added using linear fit between
# consecutive data points.
# exit if deltaT is not integer
i = 0
while i < (len(T) - 1):
if (T[i + 1] - T[i]) % 1 > 0.001:
print("delta T should be integer, this program will not work!")
print("temperature range:", T[i], T[i + 1])
sys.exit()
if (T[i + 1] - T[i]) == 0.:
M[i] = np.average([M[i], M[i + 1]])
M.pop(i + 1)
T.pop(i + 1)
elif (T[i + 1] - T[i]) < 0.:
M.pop(i + 1)
T.pop(i + 1)
print("check data in T=%.0f ,M[T] is ignored" % (T[i]))
elif (T[i + 1] - T[i]) > 1.:
slope, b = np.polyfit([T[i], T[i + 1]], [M[i], M[i + 1]], 1)
for j in range(int(T[i + 1]) - int(T[i]) - 1):
M.insert(i + 1, slope * (T[i] + 1.) + b)
T.insert(i + 1, (T[i] + 1.))
i = i + 1
i = i + 1
# calculate the smoothed signal
M = np.array(M, 'f')
T = np.array(T, 'f')
M_smooth = []
M_smooth = smooth(M, window_len)
# plot the original data and the smooth data
PLT = {'M_T': 1, 'der1': 2, 'der2': 3, 'Curie': 4}
plt.figure(num=PLT['M_T'], figsize=(5, 5))
string = 'M-T (sliding window=%i)' % int(window_len)
pmagplotlib.plot_xy(PLT['M_T'], T, M_smooth, sym='-')
pmagplotlib.plot_xy(PLT['M_T'], T, M, sym='--',
xlab='Temperature C', ylab='Magnetization', title=string)
# calculate first derivative
d1, T_d1 = [], []
for i in range(len(M_smooth) - 1):
Dy = M_smooth[i - 1] - M_smooth[i + 1]
Dx = T[i - 1] - T[i + 1]
d1.append(old_div(Dy, Dx))
T_d1 = T[1:len(T - 1)]
d1 = np.array(d1, 'f')
d1_smooth = smooth(d1, window_len)
# plot the first derivative
plt.figure(num=PLT['der1'], figsize=(5, 5))
string = '1st derivative (sliding window=%i)' % int(window_len)
pmagplotlib.plot_xy(PLT['der1'], T_d1, d1_smooth,
sym='-', xlab='Temperature C', title=string)
pmagplotlib.plot_xy(PLT['der1'], T_d1, d1, sym='b--')
# calculate second derivative
d2, T_d2 = [], []
for i in range(len(d1_smooth) - 1):
Dy = d1_smooth[i - 1] - d1_smooth[i + 1]
Dx = T[i - 1] - T[i + 1]
# print Dy/Dx
d2.append(old_div(Dy, Dx))
T_d2 = T[2:len(T - 2)]
d2 = np.array(d2, 'f')
d2_smooth = smooth(d2, window_len)
# plot the second derivative
plt.figure(num=PLT['der2'], figsize=(5, 5))
string = '2nd derivative (sliding window=%i)' % int(window_len)
pmagplotlib.plot_xy(PLT['der2'], T_d2, d2, sym='-',
xlab='Temperature C', title=string)
d2 = list(d2)
print('second derivative maximum is at T=%i' %
int(T_d2[d2.index(max(d2))]))
# calculate Curie temperature for different width of sliding windows
curie, curie_1 = [], []
wn = list(range(5, 50, 1))
for win in wn:
# calculate the smoothed signal
M_smooth = []
M_smooth = smooth(M, win)
# calculate first derivative
d1, T_d1 = [], []
for i in range(len(M_smooth) - 1):
Dy = M_smooth[i - 1] - M_smooth[i + 1]
Dx = T[i - 1] - T[i + 1]
d1.append(old_div(Dy, Dx))
T_d1 = T[1:len(T - 1)]
d1 = np.array(d1, 'f')
d1_smooth = smooth(d1, win)
# calculate second derivative
d2, T_d2 = [], []
for i in range(len(d1_smooth) - 1):
Dy = d1_smooth[i - 1] - d1_smooth[i + 1]
Dx = T[i - 1] - T[i + 1]
d2.append(old_div(Dy, Dx))
T_d2 = T[2:len(T - 2)]
d2 = np.array(d2, 'f')
d2_smooth = smooth(d2, win)
d2 = list(d2)
d2_smooth = list(d2_smooth)
curie.append(T_d2[d2.index(max(d2))])
curie_1.append(T_d2[d2_smooth.index(max(d2_smooth))])
# plot Curie temp for different sliding window length
plt.figure(num=PLT['Curie'], figsize=(5, 5))
pmagplotlib.plot_xy(PLT['Curie'], wn, curie, sym='.',
xlab='Sliding Window Width (degrees)', ylab='Curie Temp', title='Curie Statistics')
files = {}
for key in list(PLT.keys()):
files[key] = str(key) + '.' + fmt
if save == True:
for key in list(PLT.keys()):
try:
plt.figure(num=PLT[key])
plt.savefig(save_folder + '/' + files[key].replace('/', '-'))
except:
print('could not save: ', PLT[key], files[key])
print("output file format not supported ")
plt.show() | python | def curie(path_to_file='.', file_name='', magic=False,
window_length=3, save=False, save_folder='.', fmt='svg', t_begin="", t_end=""):
"""
Plots and interprets curie temperature data.
***
The 1st derivative is calculated from smoothed M-T curve (convolution
with trianfular window with width= <-w> degrees)
***
The 2nd derivative is calculated from smoothed 1st derivative curve
(using the same sliding window width)
***
The estimated curie temp. is the maximum of the 2nd derivative.
Temperature steps should be in multiples of 1.0 degrees.
Parameters
__________
file_name : name of file to be opened
Optional Parameters (defaults are used if not specified)
----------
path_to_file : path to directory that contains file (default is current directory, '.')
window_length : dimension of smoothing window (input to smooth() function)
save : boolean argument to save plots (default is False)
save_folder : relative directory where plots will be saved (default is current directory, '.')
fmt : format of saved figures
t_begin: start of truncated window for search
t_end: end of truncated window for search
magic : True if MagIC formated measurements.txt file
"""
plot = 0
window_len = window_length
# read data from file
complete_path = os.path.join(path_to_file, file_name)
if magic:
data_df = pd.read_csv(complete_path, sep='\t', header=1)
T = data_df['meas_temp'].values-273
magn_key = cb.get_intensity_col(data_df)
M = data_df[magn_key].values
else:
Data = np.loadtxt(complete_path, dtype=np.float)
T = Data.transpose()[0]
M = Data.transpose()[1]
T = list(T)
M = list(M)
# cut the data if -t is one of the flags
if t_begin != "":
while T[0] < t_begin:
M.pop(0)
T.pop(0)
while T[-1] > t_end:
M.pop(-1)
T.pop(-1)
# prepare the signal:
# from M(T) array with unequal deltaT
# to M(T) array with deltaT=(1 degree).
# if delataT is larger, then points are added using linear fit between
# consecutive data points.
# exit if deltaT is not integer
i = 0
while i < (len(T) - 1):
if (T[i + 1] - T[i]) % 1 > 0.001:
print("delta T should be integer, this program will not work!")
print("temperature range:", T[i], T[i + 1])
sys.exit()
if (T[i + 1] - T[i]) == 0.:
M[i] = np.average([M[i], M[i + 1]])
M.pop(i + 1)
T.pop(i + 1)
elif (T[i + 1] - T[i]) < 0.:
M.pop(i + 1)
T.pop(i + 1)
print("check data in T=%.0f ,M[T] is ignored" % (T[i]))
elif (T[i + 1] - T[i]) > 1.:
slope, b = np.polyfit([T[i], T[i + 1]], [M[i], M[i + 1]], 1)
for j in range(int(T[i + 1]) - int(T[i]) - 1):
M.insert(i + 1, slope * (T[i] + 1.) + b)
T.insert(i + 1, (T[i] + 1.))
i = i + 1
i = i + 1
# calculate the smoothed signal
M = np.array(M, 'f')
T = np.array(T, 'f')
M_smooth = []
M_smooth = smooth(M, window_len)
# plot the original data and the smooth data
PLT = {'M_T': 1, 'der1': 2, 'der2': 3, 'Curie': 4}
plt.figure(num=PLT['M_T'], figsize=(5, 5))
string = 'M-T (sliding window=%i)' % int(window_len)
pmagplotlib.plot_xy(PLT['M_T'], T, M_smooth, sym='-')
pmagplotlib.plot_xy(PLT['M_T'], T, M, sym='--',
xlab='Temperature C', ylab='Magnetization', title=string)
# calculate first derivative
d1, T_d1 = [], []
for i in range(len(M_smooth) - 1):
Dy = M_smooth[i - 1] - M_smooth[i + 1]
Dx = T[i - 1] - T[i + 1]
d1.append(old_div(Dy, Dx))
T_d1 = T[1:len(T - 1)]
d1 = np.array(d1, 'f')
d1_smooth = smooth(d1, window_len)
# plot the first derivative
plt.figure(num=PLT['der1'], figsize=(5, 5))
string = '1st derivative (sliding window=%i)' % int(window_len)
pmagplotlib.plot_xy(PLT['der1'], T_d1, d1_smooth,
sym='-', xlab='Temperature C', title=string)
pmagplotlib.plot_xy(PLT['der1'], T_d1, d1, sym='b--')
# calculate second derivative
d2, T_d2 = [], []
for i in range(len(d1_smooth) - 1):
Dy = d1_smooth[i - 1] - d1_smooth[i + 1]
Dx = T[i - 1] - T[i + 1]
# print Dy/Dx
d2.append(old_div(Dy, Dx))
T_d2 = T[2:len(T - 2)]
d2 = np.array(d2, 'f')
d2_smooth = smooth(d2, window_len)
# plot the second derivative
plt.figure(num=PLT['der2'], figsize=(5, 5))
string = '2nd derivative (sliding window=%i)' % int(window_len)
pmagplotlib.plot_xy(PLT['der2'], T_d2, d2, sym='-',
xlab='Temperature C', title=string)
d2 = list(d2)
print('second derivative maximum is at T=%i' %
int(T_d2[d2.index(max(d2))]))
# calculate Curie temperature for different width of sliding windows
curie, curie_1 = [], []
wn = list(range(5, 50, 1))
for win in wn:
# calculate the smoothed signal
M_smooth = []
M_smooth = smooth(M, win)
# calculate first derivative
d1, T_d1 = [], []
for i in range(len(M_smooth) - 1):
Dy = M_smooth[i - 1] - M_smooth[i + 1]
Dx = T[i - 1] - T[i + 1]
d1.append(old_div(Dy, Dx))
T_d1 = T[1:len(T - 1)]
d1 = np.array(d1, 'f')
d1_smooth = smooth(d1, win)
# calculate second derivative
d2, T_d2 = [], []
for i in range(len(d1_smooth) - 1):
Dy = d1_smooth[i - 1] - d1_smooth[i + 1]
Dx = T[i - 1] - T[i + 1]
d2.append(old_div(Dy, Dx))
T_d2 = T[2:len(T - 2)]
d2 = np.array(d2, 'f')
d2_smooth = smooth(d2, win)
d2 = list(d2)
d2_smooth = list(d2_smooth)
curie.append(T_d2[d2.index(max(d2))])
curie_1.append(T_d2[d2_smooth.index(max(d2_smooth))])
# plot Curie temp for different sliding window length
plt.figure(num=PLT['Curie'], figsize=(5, 5))
pmagplotlib.plot_xy(PLT['Curie'], wn, curie, sym='.',
xlab='Sliding Window Width (degrees)', ylab='Curie Temp', title='Curie Statistics')
files = {}
for key in list(PLT.keys()):
files[key] = str(key) + '.' + fmt
if save == True:
for key in list(PLT.keys()):
try:
plt.figure(num=PLT[key])
plt.savefig(save_folder + '/' + files[key].replace('/', '-'))
except:
print('could not save: ', PLT[key], files[key])
print("output file format not supported ")
plt.show() | Plots and interprets curie temperature data.
***
The 1st derivative is calculated from smoothed M-T curve (convolution
with trianfular window with width= <-w> degrees)
***
The 2nd derivative is calculated from smoothed 1st derivative curve
(using the same sliding window width)
***
The estimated curie temp. is the maximum of the 2nd derivative.
Temperature steps should be in multiples of 1.0 degrees.
Parameters
__________
file_name : name of file to be opened
Optional Parameters (defaults are used if not specified)
----------
path_to_file : path to directory that contains file (default is current directory, '.')
window_length : dimension of smoothing window (input to smooth() function)
save : boolean argument to save plots (default is False)
save_folder : relative directory where plots will be saved (default is current directory, '.')
fmt : format of saved figures
t_begin: start of truncated window for search
t_end: end of truncated window for search
magic : True if MagIC formated measurements.txt file | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L6790-L6968 |
PmagPy/PmagPy | pmagpy/ipmag.py | chi_magic2 | def chi_magic2(path_to_file='.', file_name='magic_measurements.txt',
save=False, save_folder='.', fmt='svg'):
"""
Generates plots that compare susceptibility to temperature at different
frequencies.
Optional Parameters (defaults are used if not specified)
----------
path_to_file : path to directory that contains file (default is current directory, '.')
file_name : name of file to be opened (default is 'magic_measurements.txt')
save : boolean argument to save plots (default is False)
save_folder : relative directory where plots will be saved (default is current directory, '.')
"""
cont, FTinit, BTinit, k = "", 0, 0, 0
complete_path = os.path.join(path_to_file, file_name)
Tind, cont = 0, ""
EXP = ""
#
meas_data, file_type = pmag.magic_read(complete_path)
#
# get list of unique experiment names
#
# initialize some variables (a continuation flag, plot initialization
# flags and the experiment counter
experiment_names = []
for rec in meas_data:
if rec['magic_experiment_name'] not in experiment_names:
experiment_names.append(rec['magic_experiment_name'])
#
# hunt through by experiment name
if EXP != "":
try:
k = experiment_names.index(EXP)
except:
print("Bad experiment name")
sys.exit()
while k < len(experiment_names):
e = experiment_names[k]
if EXP == "":
print(e, k + 1, 'out of ', len(experiment_names))
#
# initialize lists of data, susceptibility, temperature, frequency and
# field
X, T, F, B = [], [], [], []
for rec in meas_data:
methcodes = rec['magic_method_codes']
meths = methcodes.strip().split(':')
if rec['magic_experiment_name'] == e and "LP-X" in meths: # looking for chi measurement
if 'measurement_temp' not in list(rec.keys()):
rec['measurement_temp'] = '300' # set defaults
if 'measurement_freq' not in list(rec.keys()):
rec['measurement_freq'] = '0' # set defaults
if 'measurement_lab_field_ac' not in list(rec.keys()):
rec['measurement_lab_field_ac'] = '0' # set default
X.append(float(rec['measurement_x']))
T.append(float(rec['measurement_temp']))
F.append(float(rec['measurement_freq']))
B.append(float(rec['measurement_lab_field_ac']))
#
# get unique list of Ts,Fs, and Bs
#
Ts, Fs, Bs = [], [], []
for k in range(len(X)): # hunt through all the measurements
if T[k] not in Ts:
Ts.append(T[k]) # append if not in list
if F[k] not in Fs:
Fs.append(F[k])
if B[k] not in Bs:
Bs.append(B[k])
Ts.sort() # sort list of temperatures, frequencies and fields
Fs.sort()
Bs.sort()
if '-x' in sys.argv:
k = len(experiment_names) + 1 # just plot the one
else:
k += 1 # increment experiment number
#
# plot chi versus T and F holding B constant
#
plotnum = 1 # initialize plot number to 1
if len(X) > 2: # if there are any data to plot, continue
b = Bs[-1] # keeping field constant and at maximum
XTF = [] # initialize list of chi versus Temp and freq
for f in Fs: # step through frequencies sequentially
XT = [] # initialize list of chi versus temp
for kk in range(len(X)): # hunt through all the data
if F[kk] == f and B[kk] == b: # select data with given freq and field
XT.append([X[kk], T[kk]]) # append to list
XTF.append(XT) # append list to list of frequencies
if len(XT) > 1: # if there are any temperature dependent data
plt.figure(num=plotnum, figsize=(5, 5)) # initialize plot
# call the plotting function
pmagplotlib.plot_xtf(plotnum, XTF, Fs, e, b)
pmagplotlib.show_fig(plotnum)
plotnum += 1 # increment plot number
f = Fs[0] # set frequency to minimum
XTB = [] # initialize list if chi versus Temp and field
for b in Bs: # step through field values
XT = [] # initial chi versus temp list for this field
for kk in range(len(X)): # hunt through all the data
if F[kk] == f and B[kk] == b: # select data with given freq and field
XT.append([X[kk], T[kk]]) # append to list
XTB.append(XT)
if len(XT) > 1: # if there are any temperature dependent data
plt.figure(num=plotnum, figsize=(5, 5)) # set up plot
# call the plotting function
pmagplotlib.plot_xtb(plotnum, XTB, Bs, e, f)
pmagplotlib.show_fig(plotnum)
plotnum += 1 # increment plot number
if save == True:
files = {}
PLTS = {}
for p in range(1, plotnum):
key = str(p)
files[key] = e + '_' + key + '.' + fmt
PLTS[key] = p
for key in list(PLTS.keys()):
try:
plt.figure(num=PLTS[key])
plt.savefig(save_folder + '/' +
files[key].replace('/', '-'))
except:
print('could not save: ', PLTS[key], files[key])
print("output file format not supported ") | python | def chi_magic2(path_to_file='.', file_name='magic_measurements.txt',
save=False, save_folder='.', fmt='svg'):
"""
Generates plots that compare susceptibility to temperature at different
frequencies.
Optional Parameters (defaults are used if not specified)
----------
path_to_file : path to directory that contains file (default is current directory, '.')
file_name : name of file to be opened (default is 'magic_measurements.txt')
save : boolean argument to save plots (default is False)
save_folder : relative directory where plots will be saved (default is current directory, '.')
"""
cont, FTinit, BTinit, k = "", 0, 0, 0
complete_path = os.path.join(path_to_file, file_name)
Tind, cont = 0, ""
EXP = ""
#
meas_data, file_type = pmag.magic_read(complete_path)
#
# get list of unique experiment names
#
# initialize some variables (a continuation flag, plot initialization
# flags and the experiment counter
experiment_names = []
for rec in meas_data:
if rec['magic_experiment_name'] not in experiment_names:
experiment_names.append(rec['magic_experiment_name'])
#
# hunt through by experiment name
if EXP != "":
try:
k = experiment_names.index(EXP)
except:
print("Bad experiment name")
sys.exit()
while k < len(experiment_names):
e = experiment_names[k]
if EXP == "":
print(e, k + 1, 'out of ', len(experiment_names))
#
# initialize lists of data, susceptibility, temperature, frequency and
# field
X, T, F, B = [], [], [], []
for rec in meas_data:
methcodes = rec['magic_method_codes']
meths = methcodes.strip().split(':')
if rec['magic_experiment_name'] == e and "LP-X" in meths: # looking for chi measurement
if 'measurement_temp' not in list(rec.keys()):
rec['measurement_temp'] = '300' # set defaults
if 'measurement_freq' not in list(rec.keys()):
rec['measurement_freq'] = '0' # set defaults
if 'measurement_lab_field_ac' not in list(rec.keys()):
rec['measurement_lab_field_ac'] = '0' # set default
X.append(float(rec['measurement_x']))
T.append(float(rec['measurement_temp']))
F.append(float(rec['measurement_freq']))
B.append(float(rec['measurement_lab_field_ac']))
#
# get unique list of Ts,Fs, and Bs
#
Ts, Fs, Bs = [], [], []
for k in range(len(X)): # hunt through all the measurements
if T[k] not in Ts:
Ts.append(T[k]) # append if not in list
if F[k] not in Fs:
Fs.append(F[k])
if B[k] not in Bs:
Bs.append(B[k])
Ts.sort() # sort list of temperatures, frequencies and fields
Fs.sort()
Bs.sort()
if '-x' in sys.argv:
k = len(experiment_names) + 1 # just plot the one
else:
k += 1 # increment experiment number
#
# plot chi versus T and F holding B constant
#
plotnum = 1 # initialize plot number to 1
if len(X) > 2: # if there are any data to plot, continue
b = Bs[-1] # keeping field constant and at maximum
XTF = [] # initialize list of chi versus Temp and freq
for f in Fs: # step through frequencies sequentially
XT = [] # initialize list of chi versus temp
for kk in range(len(X)): # hunt through all the data
if F[kk] == f and B[kk] == b: # select data with given freq and field
XT.append([X[kk], T[kk]]) # append to list
XTF.append(XT) # append list to list of frequencies
if len(XT) > 1: # if there are any temperature dependent data
plt.figure(num=plotnum, figsize=(5, 5)) # initialize plot
# call the plotting function
pmagplotlib.plot_xtf(plotnum, XTF, Fs, e, b)
pmagplotlib.show_fig(plotnum)
plotnum += 1 # increment plot number
f = Fs[0] # set frequency to minimum
XTB = [] # initialize list if chi versus Temp and field
for b in Bs: # step through field values
XT = [] # initial chi versus temp list for this field
for kk in range(len(X)): # hunt through all the data
if F[kk] == f and B[kk] == b: # select data with given freq and field
XT.append([X[kk], T[kk]]) # append to list
XTB.append(XT)
if len(XT) > 1: # if there are any temperature dependent data
plt.figure(num=plotnum, figsize=(5, 5)) # set up plot
# call the plotting function
pmagplotlib.plot_xtb(plotnum, XTB, Bs, e, f)
pmagplotlib.show_fig(plotnum)
plotnum += 1 # increment plot number
if save == True:
files = {}
PLTS = {}
for p in range(1, plotnum):
key = str(p)
files[key] = e + '_' + key + '.' + fmt
PLTS[key] = p
for key in list(PLTS.keys()):
try:
plt.figure(num=PLTS[key])
plt.savefig(save_folder + '/' +
files[key].replace('/', '-'))
except:
print('could not save: ', PLTS[key], files[key])
print("output file format not supported ") | Generates plots that compare susceptibility to temperature at different
frequencies.
Optional Parameters (defaults are used if not specified)
----------
path_to_file : path to directory that contains file (default is current directory, '.')
file_name : name of file to be opened (default is 'magic_measurements.txt')
save : boolean argument to save plots (default is False)
save_folder : relative directory where plots will be saved (default is current directory, '.') | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L6971-L7094 |
PmagPy/PmagPy | pmagpy/ipmag.py | pmag_results_extract | def pmag_results_extract(res_file="pmag_results.txt", crit_file="", spec_file="",
age_file="", latex=False, grade=False, WD="."):
"""
Generate tab delimited output file(s) with result data.
Save output files and return True if successful.
Possible output files: Directions, Intensities, SiteNfo, Criteria,
Specimens
Optional Parameters (defaults are used if not specified)
----------
res_file : name of pmag_results file (default is "pmag_results.txt")
crit_file : name of criteria file (default is "pmag_criteria.txt")
spec_file : name of specimen file (default is "pmag_specimens.txt")
age_file : name of age file (default is "er_ages.txt")
latex : boolean argument to output in LaTeX (default is False)
WD : path to directory that contains input files and takes output (default is current directory, '.')
"""
# format outfiles
if latex:
latex = 1
file_type = '.tex'
else:
latex = 0
file_type = '.txt'
dir_path = os.path.realpath(WD)
outfile = os.path.join(dir_path, 'Directions' + file_type)
Ioutfile = os.path.join(dir_path, 'Intensities' + file_type)
Soutfile = os.path.join(dir_path, 'SiteNfo' + file_type)
Specout = os.path.join(dir_path, 'Specimens' + file_type)
Critout = os.path.join(dir_path, 'Criteria' + file_type)
# format infiles
res_file = os.path.join(dir_path, res_file)
if crit_file:
crit_file = os.path.join(dir_path, crit_file)
if spec_file:
spec_file = os.path.join(dir_path, spec_file)
else:
grade = False
# open output files
f = open(outfile, 'w')
sf = open(Soutfile, 'w')
fI = open(Ioutfile, 'w')
if crit_file:
cr = open(Critout, 'w')
# set up column headers
Sites, file_type = pmag.magic_read(res_file)
if crit_file:
Crits, file_type = pmag.magic_read(crit_file)
else:
Crits = []
SiteCols = ["Site", "Location",
"Lat. (N)", "Long. (E)", "Age ", "Age sigma", "Units"]
SiteKeys = ["er_site_names", "average_lat", "average_lon", "average_age",
"average_age_sigma", "average_age_unit"]
DirCols = ["Site", 'Comp.', "perc TC", "Dec.", "Inc.", "Nl", "Np", "k ", "R", "a95",
"PLat", "PLong"]
DirKeys = ["er_site_names", "pole_comp_name", "tilt_correction", "average_dec", "average_inc",
"average_n_lines", "average_n_planes", "average_k", "average_r", "average_alpha95",
"vgp_lat", "vgp_lon"]
IntCols = ["Site", "N", "B (uT)", "sigma",
"sigma perc", "VADM", "VADM sigma"]
IntKeys = ["er_site_names", "average_int_n", "average_int", "average_int_sigma",
'average_int_sigma_perc', "vadm", "vadm_sigma"]
AllowedKeys = ['specimen_frac', 'specimen_scat', 'specimen_gap_max', 'measurement_step_min',
'measurement_step_max', 'measurement_step_unit', 'specimen_polarity',
'specimen_nrm', 'specimen_direction_type', 'specimen_comp_nmb', 'specimen_mad',
'specimen_alpha95', 'specimen_n', 'specimen_int_sigma',
'specimen_int_sigma_perc', 'specimen_int_rel_sigma',
'specimen_int_rel_sigma_perc', 'specimen_int_mad', 'specimen_int_n',
'specimen_w', 'specimen_q', 'specimen_f', 'specimen_fvds', 'specimen_b_sigma',
'specimen_b_beta', 'specimen_g', 'specimen_dang', 'specimen_md',
'specimen_ptrm', 'specimen_drat', 'specimen_drats', 'specimen_rsc',
'specimen_viscosity_index', 'specimen_magn_moment', 'specimen_magn_volume',
'specimen_magn_mass', 'specimen_int_ptrm_n', 'specimen_delta', 'specimen_theta',
'specimen_gamma', 'sample_polarity', 'sample_nrm', 'sample_direction_type',
'sample_comp_nmb', 'sample_sigma', 'sample_alpha95', 'sample_n',
'sample_n_lines', 'sample_n_planes', 'sample_k', 'sample_r',
'sample_tilt_correction', 'sample_int_sigma', 'sample_int_sigma_perc',
'sample_int_rel_sigma', 'sample_int_rel_sigma_perc', 'sample_int_n',
'sample_magn_moment', 'sample_magn_volume', 'sample_magn_mass', 'site_polarity',
'site_nrm', 'site_direction_type', 'site_comp_nmb', 'site_sigma',
'site_alpha95', 'site_n', 'site_n_lines', 'site_n_planes', 'site_k', 'site_r',
'site_tilt_correction', 'site_int_sigma', 'site_int_sigma_perc',
'site_int_rel_sigma', 'site_int_rel_sigma_perc', 'site_int_n',
'site_magn_moment', 'site_magn_volume', 'site_magn_mass', 'average_age_min',
'average_age_max', 'average_age_sigma', 'average_age_unit', 'average_sigma',
'average_alpha95', 'average_n', 'average_nn', 'average_k', 'average_r',
'average_int_sigma', 'average_int_rel_sigma', 'average_int_rel_sigma_perc',
'average_int_n', 'average_int_nn', 'vgp_dp', 'vgp_dm', 'vgp_sigma',
'vgp_alpha95', 'vgp_n', 'vdm_sigma', 'vdm_n', 'vadm_sigma', 'vadm_n']
if crit_file:
crit = Crits[0] # get a list of useful keys
for key in list(crit.keys()):
if key not in AllowedKeys:
del(crit[key])
for key in list(crit.keys()):
if (not crit[key]) or (eval(crit[key]) > 1000) or (eval(crit[key]) == 0):
# get rid of all blank or too big ones or too little ones
del(crit[key])
CritKeys = list(crit.keys())
if spec_file:
Specs, file_type = pmag.magic_read(spec_file)
fsp = open(Specout, 'w') # including specimen intensities if desired
SpecCols = ["Site", "Specimen", "B (uT)", "MAD", "Beta", "N", "Q", "DANG", "f-vds",
"DRATS", "T (C)"]
SpecKeys = ['er_site_name', 'er_specimen_name', 'specimen_int', 'specimen_int_mad',
'specimen_b_beta', 'specimen_int_n', 'specimen_q', 'specimen_dang',
'specimen_fvds', 'specimen_drats', 'trange']
Xtra = ['specimen_frac', 'specimen_scat', 'specimen_gmax']
if grade:
SpecCols.append('Grade')
SpecKeys.append('specimen_grade')
for x in Xtra: # put in the new intensity keys if present
if x in list(Specs[0].keys()):
SpecKeys.append(x)
newkey = ""
for k in x.split('_')[1:]:
newkey = newkey + k + '_'
SpecCols.append(newkey.strip('_'))
SpecCols.append('Corrections')
SpecKeys.append('corrections')
# these should be multiplied by 1e6
Micro = ['specimen_int', 'average_int', 'average_int_sigma']
Zeta = ['vadm', 'vadm_sigma'] # these should be multiplied by 1e21
# write out the header information for each output file
if latex: # write out the latex header stuff
sep = ' & '
end = '\\\\'
f.write('\\documentclass{article}\n')
f.write('\\usepackage[margin=1in]{geometry}\n')
f.write('\\usepackage{longtable}\n')
f.write('\\begin{document}\n')
sf.write('\\documentclass{article}\n')
sf.write('\\usepackage[margin=1in]{geometry}\n')
sf.write('\\usepackage{longtable}\n')
sf.write('\\begin{document}\n')
fI.write('\\documentclass{article}\n')
fI.write('\\usepackage[margin=1in]{geometry}\n')
fI.write('\\usepackage{longtable}\n')
fI.write('\\begin{document}\n')
if crit_file:
cr.write('\\documentclass{article}\n')
cr.write('\\usepackage[margin=1in]{geometry}\n')
cr.write('\\usepackage{longtable}\n')
cr.write('\\begin{document}\n')
if spec_file:
fsp.write('\\documentclass{article}\n')
fsp.write('\\usepackage[margin=1in]{geometry}\n')
fsp.write('\\usepackage{longtable}\n')
fsp.write('\\begin{document}\n')
tabstring = '\\begin{longtable}{'
fstring = tabstring
for k in range(len(SiteCols)):
fstring = fstring + 'r'
sf.write(fstring + '}\n')
sf.write('\hline\n')
fstring = tabstring
for k in range(len(DirCols)):
fstring = fstring + 'r'
f.write(fstring + '}\n')
f.write('\hline\n')
fstring = tabstring
for k in range(len(IntCols)):
fstring = fstring + 'r'
fI.write(fstring + '}\n')
fI.write('\hline\n')
fstring = tabstring
if crit_file:
for k in range(len(CritKeys)):
fstring = fstring + 'r'
cr.write(fstring + '}\n')
cr.write('\hline\n')
if spec_file:
fstring = tabstring
for k in range(len(SpecCols)):
fstring = fstring + 'r'
fsp.write(fstring + '}\n')
fsp.write('\hline\n')
else: # just set the tab and line endings for tab delimited
sep = ' \t '
end = ''
# now write out the actual column headers
Soutstring, Doutstring, Ioutstring, Spoutstring, Croutstring = "", "", "", "", ""
for k in range(len(SiteCols)):
Soutstring = Soutstring + SiteCols[k] + sep
Soutstring = Soutstring.strip(sep)
Soutstring = Soutstring + end + '\n'
sf.write(Soutstring)
for k in range(len(DirCols)):
Doutstring = Doutstring + DirCols[k] + sep
Doutstring = Doutstring.strip(sep)
Doutstring = Doutstring + end + '\n'
f.write(Doutstring)
for k in range(len(IntCols)):
Ioutstring = Ioutstring + IntCols[k] + sep
Ioutstring = Ioutstring.strip(sep)
Ioutstring = Ioutstring + end + '\n'
fI.write(Ioutstring)
if crit_file:
for k in range(len(CritKeys)):
Croutstring = Croutstring + CritKeys[k] + sep
Croutstring = Croutstring.strip(sep)
Croutstring = Croutstring + end + '\n'
cr.write(Croutstring)
if spec_file:
for k in range(len(SpecCols)):
Spoutstring = Spoutstring + SpecCols[k] + sep
Spoutstring = Spoutstring.strip(sep)
Spoutstring = Spoutstring + end + "\n"
fsp.write(Spoutstring)
if latex: # put in a horizontal line in latex file
f.write('\hline\n')
sf.write('\hline\n')
fI.write('\hline\n')
if crit_file:
cr.write('\hline\n')
if spec_file:
fsp.write('\hline\n')
# do criteria
if crit_file:
for crit in Crits:
Croutstring = ""
for key in CritKeys:
Croutstring = Croutstring + crit[key] + sep
Croutstring = Croutstring.strip(sep) + end
cr.write(Croutstring + '\n')
# do directions
# get all results with VGPs
VGPs = pmag.get_dictitem(Sites, 'vgp_lat', '', 'F')
VGPs = pmag.get_dictitem(VGPs, 'data_type', 'i',
'T') # get site level stuff
for site in VGPs:
if len(site['er_site_names'].split(":")) == 1:
if 'er_sample_names' not in list(site.keys()):
site['er_sample_names'] = ''
if 'pole_comp_name' not in list(site.keys()):
site['pole_comp_name'] = "A"
if 'average_nn' not in list(site.keys()) and 'average_n' in list(site.keys()):
site['average_nn'] = site['average_n']
if 'average_n_lines' not in list(site.keys()):
site['average_n_lines'] = site['average_nn']
if 'average_n_planes' not in list(site.keys()):
site['average_n_planes'] = ""
Soutstring, Doutstring = "", ""
for key in SiteKeys:
if key in list(site.keys()):
Soutstring = Soutstring + site[key] + sep
Soutstring = Soutstring.strip(sep) + end
sf.write(Soutstring + '\n')
for key in DirKeys:
if key in list(site.keys()):
Doutstring = Doutstring + site[key] + sep
Doutstring = Doutstring.strip(sep) + end
f.write(Doutstring + '\n')
# now do intensities
VADMs = pmag.get_dictitem(Sites, 'vadm', '', 'F')
VADMs = pmag.get_dictitem(VADMs, 'data_type', 'i', 'T')
for site in VADMs: # do results level stuff
if site not in VGPs:
Soutstring = ""
for key in SiteKeys:
if key in list(site.keys()):
Soutstring = Soutstring + site[key] + sep
else:
Soutstring = Soutstring + " " + sep
Soutstring = Soutstring.strip(sep) + end
sf.write(Soutstring + '\n')
if len(site['er_site_names'].split(":")) == 1 and site['data_type'] == 'i':
if 'average_int_sigma_perc' not in list(site.keys()):
site['average_int_sigma_perc'] = "0"
if site["average_int_sigma"] == "":
site["average_int_sigma"] = "0"
if site["average_int_sigma_perc"] == "":
site["average_int_sigma_perc"] = "0"
if site["vadm"] == "":
site["vadm"] = "0"
if site["vadm_sigma"] == "":
site["vadm_sigma"] = "0"
for key in list(site.keys()): # reformat vadms, intensities
if key in Micro:
site[key] = '%7.1f' % (float(site[key]) * 1e6)
if key in Zeta:
site[key] = '%7.1f' % (float(site[key]) * 1e-21)
outstring = ""
for key in IntKeys:
if key not in list(site.keys()):
site[key] = ""
outstring = outstring + site[key] + sep
outstring = outstring.strip(sep) + end + '\n'
fI.write(outstring)
# VDMs=pmag.get_dictitem(Sites,'vdm','','F') # get non-blank VDMs
# for site in VDMs: # do results level stuff
# if len(site['er_site_names'].split(":"))==1:
# if 'average_int_sigma_perc' not in site.keys():site['average_int_sigma_perc']="0"
# if site["average_int_sigma"]=="":site["average_int_sigma"]="0"
# if site["average_int_sigma_perc"]=="":site["average_int_sigma_perc"]="0"
# if site["vadm"]=="":site["vadm"]="0"
# if site["vadm_sigma"]=="":site["vadm_sigma"]="0"
# for key in site.keys(): # reformat vadms, intensities
# if key in Micro: site[key]='%7.1f'%(float(site[key])*1e6)
# if key in Zeta: site[key]='%7.1f'%(float(site[key])*1e-21)
# outstring=""
# for key in IntKeys:
# outstring=outstring+site[key]+sep
# fI.write(outstring.strip(sep)+'\n')
if spec_file:
SpecsInts = pmag.get_dictitem(Specs, 'specimen_int', '', 'F')
for spec in SpecsInts:
spec['trange'] = '%i' % (int(float(spec['measurement_step_min']) - 273)) + \
'-' + '%i' % (int(float(spec['measurement_step_max']) - 273))
meths = spec['magic_method_codes'].split(':')
corrections = ''
for meth in meths:
if 'DA' in meth:
corrections = corrections + meth[3:] + ':'
corrections = corrections.strip(':')
if corrections.strip() == "":
corrections = "None"
spec['corrections'] = corrections
outstring = ""
for key in SpecKeys:
if key in Micro:
spec[key] = '%7.1f' % (float(spec[key]) * 1e6)
if key in Zeta:
spec[key] = '%7.1f' % (float(spec[key]) * 1e-21)
outstring = outstring + spec[key] + sep
fsp.write(outstring.strip(sep) + end + '\n')
#
if latex: # write out the tail stuff
f.write('\hline\n')
sf.write('\hline\n')
fI.write('\hline\n')
f.write('\end{longtable}\n')
sf.write('\end{longtable}\n')
fI.write('\end{longtable}\n')
f.write('\end{document}\n')
sf.write('\end{document}\n')
fI.write('\end{document}\n')
if spec_file:
fsp.write('\hline\n')
fsp.write('\end{longtable}\n')
fsp.write('\end{document}\n')
if crit_file:
cr.write('\hline\n')
cr.write('\end{longtable}\n')
cr.write('\end{document}\n')
f.close()
sf.close()
fI.close()
print('data saved in: ', outfile, Ioutfile, Soutfile)
outfiles = [outfile, Ioutfile, Soutfile]
if spec_file:
fsp.close()
print('specimen data saved in: ', Specout)
outfiles.append(Specout)
if crit_file:
cr.close()
print('Selection criteria saved in: ', Critout)
outfiles.append(Critout)
return True, outfiles | python | def pmag_results_extract(res_file="pmag_results.txt", crit_file="", spec_file="",
age_file="", latex=False, grade=False, WD="."):
"""
Generate tab delimited output file(s) with result data.
Save output files and return True if successful.
Possible output files: Directions, Intensities, SiteNfo, Criteria,
Specimens
Optional Parameters (defaults are used if not specified)
----------
res_file : name of pmag_results file (default is "pmag_results.txt")
crit_file : name of criteria file (default is "pmag_criteria.txt")
spec_file : name of specimen file (default is "pmag_specimens.txt")
age_file : name of age file (default is "er_ages.txt")
latex : boolean argument to output in LaTeX (default is False)
WD : path to directory that contains input files and takes output (default is current directory, '.')
"""
# format outfiles
if latex:
latex = 1
file_type = '.tex'
else:
latex = 0
file_type = '.txt'
dir_path = os.path.realpath(WD)
outfile = os.path.join(dir_path, 'Directions' + file_type)
Ioutfile = os.path.join(dir_path, 'Intensities' + file_type)
Soutfile = os.path.join(dir_path, 'SiteNfo' + file_type)
Specout = os.path.join(dir_path, 'Specimens' + file_type)
Critout = os.path.join(dir_path, 'Criteria' + file_type)
# format infiles
res_file = os.path.join(dir_path, res_file)
if crit_file:
crit_file = os.path.join(dir_path, crit_file)
if spec_file:
spec_file = os.path.join(dir_path, spec_file)
else:
grade = False
# open output files
f = open(outfile, 'w')
sf = open(Soutfile, 'w')
fI = open(Ioutfile, 'w')
if crit_file:
cr = open(Critout, 'w')
# set up column headers
Sites, file_type = pmag.magic_read(res_file)
if crit_file:
Crits, file_type = pmag.magic_read(crit_file)
else:
Crits = []
SiteCols = ["Site", "Location",
"Lat. (N)", "Long. (E)", "Age ", "Age sigma", "Units"]
SiteKeys = ["er_site_names", "average_lat", "average_lon", "average_age",
"average_age_sigma", "average_age_unit"]
DirCols = ["Site", 'Comp.', "perc TC", "Dec.", "Inc.", "Nl", "Np", "k ", "R", "a95",
"PLat", "PLong"]
DirKeys = ["er_site_names", "pole_comp_name", "tilt_correction", "average_dec", "average_inc",
"average_n_lines", "average_n_planes", "average_k", "average_r", "average_alpha95",
"vgp_lat", "vgp_lon"]
IntCols = ["Site", "N", "B (uT)", "sigma",
"sigma perc", "VADM", "VADM sigma"]
IntKeys = ["er_site_names", "average_int_n", "average_int", "average_int_sigma",
'average_int_sigma_perc', "vadm", "vadm_sigma"]
AllowedKeys = ['specimen_frac', 'specimen_scat', 'specimen_gap_max', 'measurement_step_min',
'measurement_step_max', 'measurement_step_unit', 'specimen_polarity',
'specimen_nrm', 'specimen_direction_type', 'specimen_comp_nmb', 'specimen_mad',
'specimen_alpha95', 'specimen_n', 'specimen_int_sigma',
'specimen_int_sigma_perc', 'specimen_int_rel_sigma',
'specimen_int_rel_sigma_perc', 'specimen_int_mad', 'specimen_int_n',
'specimen_w', 'specimen_q', 'specimen_f', 'specimen_fvds', 'specimen_b_sigma',
'specimen_b_beta', 'specimen_g', 'specimen_dang', 'specimen_md',
'specimen_ptrm', 'specimen_drat', 'specimen_drats', 'specimen_rsc',
'specimen_viscosity_index', 'specimen_magn_moment', 'specimen_magn_volume',
'specimen_magn_mass', 'specimen_int_ptrm_n', 'specimen_delta', 'specimen_theta',
'specimen_gamma', 'sample_polarity', 'sample_nrm', 'sample_direction_type',
'sample_comp_nmb', 'sample_sigma', 'sample_alpha95', 'sample_n',
'sample_n_lines', 'sample_n_planes', 'sample_k', 'sample_r',
'sample_tilt_correction', 'sample_int_sigma', 'sample_int_sigma_perc',
'sample_int_rel_sigma', 'sample_int_rel_sigma_perc', 'sample_int_n',
'sample_magn_moment', 'sample_magn_volume', 'sample_magn_mass', 'site_polarity',
'site_nrm', 'site_direction_type', 'site_comp_nmb', 'site_sigma',
'site_alpha95', 'site_n', 'site_n_lines', 'site_n_planes', 'site_k', 'site_r',
'site_tilt_correction', 'site_int_sigma', 'site_int_sigma_perc',
'site_int_rel_sigma', 'site_int_rel_sigma_perc', 'site_int_n',
'site_magn_moment', 'site_magn_volume', 'site_magn_mass', 'average_age_min',
'average_age_max', 'average_age_sigma', 'average_age_unit', 'average_sigma',
'average_alpha95', 'average_n', 'average_nn', 'average_k', 'average_r',
'average_int_sigma', 'average_int_rel_sigma', 'average_int_rel_sigma_perc',
'average_int_n', 'average_int_nn', 'vgp_dp', 'vgp_dm', 'vgp_sigma',
'vgp_alpha95', 'vgp_n', 'vdm_sigma', 'vdm_n', 'vadm_sigma', 'vadm_n']
if crit_file:
crit = Crits[0] # get a list of useful keys
for key in list(crit.keys()):
if key not in AllowedKeys:
del(crit[key])
for key in list(crit.keys()):
if (not crit[key]) or (eval(crit[key]) > 1000) or (eval(crit[key]) == 0):
# get rid of all blank or too big ones or too little ones
del(crit[key])
CritKeys = list(crit.keys())
if spec_file:
Specs, file_type = pmag.magic_read(spec_file)
fsp = open(Specout, 'w') # including specimen intensities if desired
SpecCols = ["Site", "Specimen", "B (uT)", "MAD", "Beta", "N", "Q", "DANG", "f-vds",
"DRATS", "T (C)"]
SpecKeys = ['er_site_name', 'er_specimen_name', 'specimen_int', 'specimen_int_mad',
'specimen_b_beta', 'specimen_int_n', 'specimen_q', 'specimen_dang',
'specimen_fvds', 'specimen_drats', 'trange']
Xtra = ['specimen_frac', 'specimen_scat', 'specimen_gmax']
if grade:
SpecCols.append('Grade')
SpecKeys.append('specimen_grade')
for x in Xtra: # put in the new intensity keys if present
if x in list(Specs[0].keys()):
SpecKeys.append(x)
newkey = ""
for k in x.split('_')[1:]:
newkey = newkey + k + '_'
SpecCols.append(newkey.strip('_'))
SpecCols.append('Corrections')
SpecKeys.append('corrections')
# these should be multiplied by 1e6
Micro = ['specimen_int', 'average_int', 'average_int_sigma']
Zeta = ['vadm', 'vadm_sigma'] # these should be multiplied by 1e21
# write out the header information for each output file
if latex: # write out the latex header stuff
sep = ' & '
end = '\\\\'
f.write('\\documentclass{article}\n')
f.write('\\usepackage[margin=1in]{geometry}\n')
f.write('\\usepackage{longtable}\n')
f.write('\\begin{document}\n')
sf.write('\\documentclass{article}\n')
sf.write('\\usepackage[margin=1in]{geometry}\n')
sf.write('\\usepackage{longtable}\n')
sf.write('\\begin{document}\n')
fI.write('\\documentclass{article}\n')
fI.write('\\usepackage[margin=1in]{geometry}\n')
fI.write('\\usepackage{longtable}\n')
fI.write('\\begin{document}\n')
if crit_file:
cr.write('\\documentclass{article}\n')
cr.write('\\usepackage[margin=1in]{geometry}\n')
cr.write('\\usepackage{longtable}\n')
cr.write('\\begin{document}\n')
if spec_file:
fsp.write('\\documentclass{article}\n')
fsp.write('\\usepackage[margin=1in]{geometry}\n')
fsp.write('\\usepackage{longtable}\n')
fsp.write('\\begin{document}\n')
tabstring = '\\begin{longtable}{'
fstring = tabstring
for k in range(len(SiteCols)):
fstring = fstring + 'r'
sf.write(fstring + '}\n')
sf.write('\hline\n')
fstring = tabstring
for k in range(len(DirCols)):
fstring = fstring + 'r'
f.write(fstring + '}\n')
f.write('\hline\n')
fstring = tabstring
for k in range(len(IntCols)):
fstring = fstring + 'r'
fI.write(fstring + '}\n')
fI.write('\hline\n')
fstring = tabstring
if crit_file:
for k in range(len(CritKeys)):
fstring = fstring + 'r'
cr.write(fstring + '}\n')
cr.write('\hline\n')
if spec_file:
fstring = tabstring
for k in range(len(SpecCols)):
fstring = fstring + 'r'
fsp.write(fstring + '}\n')
fsp.write('\hline\n')
else: # just set the tab and line endings for tab delimited
sep = ' \t '
end = ''
# now write out the actual column headers
Soutstring, Doutstring, Ioutstring, Spoutstring, Croutstring = "", "", "", "", ""
for k in range(len(SiteCols)):
Soutstring = Soutstring + SiteCols[k] + sep
Soutstring = Soutstring.strip(sep)
Soutstring = Soutstring + end + '\n'
sf.write(Soutstring)
for k in range(len(DirCols)):
Doutstring = Doutstring + DirCols[k] + sep
Doutstring = Doutstring.strip(sep)
Doutstring = Doutstring + end + '\n'
f.write(Doutstring)
for k in range(len(IntCols)):
Ioutstring = Ioutstring + IntCols[k] + sep
Ioutstring = Ioutstring.strip(sep)
Ioutstring = Ioutstring + end + '\n'
fI.write(Ioutstring)
if crit_file:
for k in range(len(CritKeys)):
Croutstring = Croutstring + CritKeys[k] + sep
Croutstring = Croutstring.strip(sep)
Croutstring = Croutstring + end + '\n'
cr.write(Croutstring)
if spec_file:
for k in range(len(SpecCols)):
Spoutstring = Spoutstring + SpecCols[k] + sep
Spoutstring = Spoutstring.strip(sep)
Spoutstring = Spoutstring + end + "\n"
fsp.write(Spoutstring)
if latex: # put in a horizontal line in latex file
f.write('\hline\n')
sf.write('\hline\n')
fI.write('\hline\n')
if crit_file:
cr.write('\hline\n')
if spec_file:
fsp.write('\hline\n')
# do criteria
if crit_file:
for crit in Crits:
Croutstring = ""
for key in CritKeys:
Croutstring = Croutstring + crit[key] + sep
Croutstring = Croutstring.strip(sep) + end
cr.write(Croutstring + '\n')
# do directions
# get all results with VGPs
VGPs = pmag.get_dictitem(Sites, 'vgp_lat', '', 'F')
VGPs = pmag.get_dictitem(VGPs, 'data_type', 'i',
'T') # get site level stuff
for site in VGPs:
if len(site['er_site_names'].split(":")) == 1:
if 'er_sample_names' not in list(site.keys()):
site['er_sample_names'] = ''
if 'pole_comp_name' not in list(site.keys()):
site['pole_comp_name'] = "A"
if 'average_nn' not in list(site.keys()) and 'average_n' in list(site.keys()):
site['average_nn'] = site['average_n']
if 'average_n_lines' not in list(site.keys()):
site['average_n_lines'] = site['average_nn']
if 'average_n_planes' not in list(site.keys()):
site['average_n_planes'] = ""
Soutstring, Doutstring = "", ""
for key in SiteKeys:
if key in list(site.keys()):
Soutstring = Soutstring + site[key] + sep
Soutstring = Soutstring.strip(sep) + end
sf.write(Soutstring + '\n')
for key in DirKeys:
if key in list(site.keys()):
Doutstring = Doutstring + site[key] + sep
Doutstring = Doutstring.strip(sep) + end
f.write(Doutstring + '\n')
# now do intensities
VADMs = pmag.get_dictitem(Sites, 'vadm', '', 'F')
VADMs = pmag.get_dictitem(VADMs, 'data_type', 'i', 'T')
for site in VADMs: # do results level stuff
if site not in VGPs:
Soutstring = ""
for key in SiteKeys:
if key in list(site.keys()):
Soutstring = Soutstring + site[key] + sep
else:
Soutstring = Soutstring + " " + sep
Soutstring = Soutstring.strip(sep) + end
sf.write(Soutstring + '\n')
if len(site['er_site_names'].split(":")) == 1 and site['data_type'] == 'i':
if 'average_int_sigma_perc' not in list(site.keys()):
site['average_int_sigma_perc'] = "0"
if site["average_int_sigma"] == "":
site["average_int_sigma"] = "0"
if site["average_int_sigma_perc"] == "":
site["average_int_sigma_perc"] = "0"
if site["vadm"] == "":
site["vadm"] = "0"
if site["vadm_sigma"] == "":
site["vadm_sigma"] = "0"
for key in list(site.keys()): # reformat vadms, intensities
if key in Micro:
site[key] = '%7.1f' % (float(site[key]) * 1e6)
if key in Zeta:
site[key] = '%7.1f' % (float(site[key]) * 1e-21)
outstring = ""
for key in IntKeys:
if key not in list(site.keys()):
site[key] = ""
outstring = outstring + site[key] + sep
outstring = outstring.strip(sep) + end + '\n'
fI.write(outstring)
# VDMs=pmag.get_dictitem(Sites,'vdm','','F') # get non-blank VDMs
# for site in VDMs: # do results level stuff
# if len(site['er_site_names'].split(":"))==1:
# if 'average_int_sigma_perc' not in site.keys():site['average_int_sigma_perc']="0"
# if site["average_int_sigma"]=="":site["average_int_sigma"]="0"
# if site["average_int_sigma_perc"]=="":site["average_int_sigma_perc"]="0"
# if site["vadm"]=="":site["vadm"]="0"
# if site["vadm_sigma"]=="":site["vadm_sigma"]="0"
# for key in site.keys(): # reformat vadms, intensities
# if key in Micro: site[key]='%7.1f'%(float(site[key])*1e6)
# if key in Zeta: site[key]='%7.1f'%(float(site[key])*1e-21)
# outstring=""
# for key in IntKeys:
# outstring=outstring+site[key]+sep
# fI.write(outstring.strip(sep)+'\n')
if spec_file:
SpecsInts = pmag.get_dictitem(Specs, 'specimen_int', '', 'F')
for spec in SpecsInts:
spec['trange'] = '%i' % (int(float(spec['measurement_step_min']) - 273)) + \
'-' + '%i' % (int(float(spec['measurement_step_max']) - 273))
meths = spec['magic_method_codes'].split(':')
corrections = ''
for meth in meths:
if 'DA' in meth:
corrections = corrections + meth[3:] + ':'
corrections = corrections.strip(':')
if corrections.strip() == "":
corrections = "None"
spec['corrections'] = corrections
outstring = ""
for key in SpecKeys:
if key in Micro:
spec[key] = '%7.1f' % (float(spec[key]) * 1e6)
if key in Zeta:
spec[key] = '%7.1f' % (float(spec[key]) * 1e-21)
outstring = outstring + spec[key] + sep
fsp.write(outstring.strip(sep) + end + '\n')
#
if latex: # write out the tail stuff
f.write('\hline\n')
sf.write('\hline\n')
fI.write('\hline\n')
f.write('\end{longtable}\n')
sf.write('\end{longtable}\n')
fI.write('\end{longtable}\n')
f.write('\end{document}\n')
sf.write('\end{document}\n')
fI.write('\end{document}\n')
if spec_file:
fsp.write('\hline\n')
fsp.write('\end{longtable}\n')
fsp.write('\end{document}\n')
if crit_file:
cr.write('\hline\n')
cr.write('\end{longtable}\n')
cr.write('\end{document}\n')
f.close()
sf.close()
fI.close()
print('data saved in: ', outfile, Ioutfile, Soutfile)
outfiles = [outfile, Ioutfile, Soutfile]
if spec_file:
fsp.close()
print('specimen data saved in: ', Specout)
outfiles.append(Specout)
if crit_file:
cr.close()
print('Selection criteria saved in: ', Critout)
outfiles.append(Critout)
return True, outfiles | Generate tab delimited output file(s) with result data.
Save output files and return True if successful.
Possible output files: Directions, Intensities, SiteNfo, Criteria,
Specimens
Optional Parameters (defaults are used if not specified)
----------
res_file : name of pmag_results file (default is "pmag_results.txt")
crit_file : name of criteria file (default is "pmag_criteria.txt")
spec_file : name of specimen file (default is "pmag_specimens.txt")
age_file : name of age file (default is "er_ages.txt")
latex : boolean argument to output in LaTeX (default is False)
WD : path to directory that contains input files and takes output (default is current directory, '.') | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L7097-L7456 |
PmagPy/PmagPy | pmagpy/ipmag.py | demag_magic | def demag_magic(path_to_file='.', file_name='magic_measurements.txt',
save=False, save_folder='.', fmt='svg', plot_by='loc',
treat=None, XLP="", individual=None, average_measurements=False,
single_plot=False):
'''
Takes demagnetization data (from magic_measurements file) and outputs
intensity plots (with optional save).
Parameters
-----------
path_to_file : path to directory that contains files (default is current directory, '.')
file_name : name of measurements file (default is 'magic_measurements.txt')
save : boolean argument to save plots (default is False)
save_folder : relative directory where plots will be saved (default is current directory, '.')
fmt : format of saved figures (default is 'svg')
plot_by : specifies what sampling level you wish to plot the data at
('loc' -- plots all samples of the same location on the same plot
'exp' -- plots all samples of the same expedition on the same plot
'site' -- plots all samples of the same site on the same plot
'sample' -- plots all measurements of the same sample on the same plot
'spc' -- plots each specimen individually)
treat : treatment step
'T' = thermal demagnetization
'AF' = alternating field demagnetization
'M' = microwave radiation demagnetization
(default is 'AF')
XLP : filter data by a particular method
individual : This function outputs all plots by default. If plotting by sample
or specimen, you may not wish to see (or wait for) every single plot. You can
therefore specify a particular plot by setting this keyword argument to
a string of the site/sample/specimen name.
average_measurements : Option to average demagnetization measurements by
the grouping specified with the 'plot_by' keyword argument (default is False)
single_plot : Option to output a single plot with all measurements (default is False)
'''
FIG = {} # plot dictionary
FIG['demag'] = 1 # demag is figure 1
in_file, plot_key, LT = os.path.join(
path_to_file, file_name), 'er_location_name', "LT-AF-Z"
XLP = ""
norm = 1
units, dmag_key = 'T', 'treatment_ac_field'
plot_num = 0
if plot_by == 'loc':
plot_key = 'er_location_name'
elif plot_by == 'exp':
plot_key = 'er_expedition_name'
elif plot_by == 'site':
plot_key = 'er_site_name'
elif plot_by == 'sam':
plot_key = 'er_sample_name'
elif plot_by == 'spc':
plot_key = 'er_specimen_name'
if treat != None:
LT = 'LT-' + treat + '-Z' # get lab treatment for plotting
if LT == 'LT-T-Z':
units, dmag_key = 'K', 'treatment_temp'
elif LT == 'LT-AF-Z':
units, dmag_key = 'T', 'treatment_ac_field'
elif LT == 'LT-M-Z':
units, dmag_key = 'J', 'treatment_mw_energy'
else:
units = 'U'
else:
LT = 'LT-AF-Z'
plot_dict = {}
data, file_type = pmag.magic_read(in_file)
sids = pmag.get_specs(data)
plt.figure(num=FIG['demag'], figsize=(5, 5))
print(len(data), ' records read from ', in_file)
#
#
# find desired intensity data
#
# get plotlist
#
plotlist, intlist = [], ['measurement_magnitude', 'measurement_magn_moment',
'measurement_magn_volume', 'measurement_magn_mass']
IntMeths = []
FixData = []
for rec in data:
meths = []
methcodes = rec['magic_method_codes'].split(':')
for meth in methcodes:
meths.append(meth.strip())
for key in list(rec.keys()):
if key in intlist and rec[key] != "":
if key not in IntMeths:
IntMeths.append(key)
if rec[plot_key] not in plotlist and LT in meths:
plotlist.append(rec[plot_key])
if 'measurement_flag' not in list(rec.keys()):
rec['measurement_flag'] = 'g'
FixData.append(rec)
plotlist.sort()
if len(IntMeths) == 0:
print('No intensity information found')
data = FixData
# plot first intensity method found - normalized to initial value anyway -
# doesn't matter which used
int_key = IntMeths[0]
# print plotlist
if individual is not None:
if type(individual) == list or type(individual) == tuple:
plotlist = list(individual)
else:
plotlist = []
plotlist.append(individual)
for plot in plotlist:
print(plot, 'plotting by: ', plot_key)
# fish out all the data for this type of plot
PLTblock = pmag.get_dictitem(data, plot_key, plot, 'T')
# fish out all the dmag for this experiment type
PLTblock = pmag.get_dictitem(PLTblock, 'magic_method_codes', LT, 'has')
# get all with this intensity key non-blank
PLTblock = pmag.get_dictitem(PLTblock, int_key, '', 'F')
if XLP != "":
# reject data with XLP in method_code
PLTblock = pmag.get_dictitem(
PLTblock, 'magic_method_codes', XLP, 'not')
# for plot in plotlist:
if len(PLTblock) > 2:
title = PLTblock[0][plot_key]
spcs = []
for rec in PLTblock:
if rec['er_specimen_name'] not in spcs:
spcs.append(rec['er_specimen_name'])
if average_measurements is False:
for spc in spcs:
# plot specimen by specimen
SPCblock = pmag.get_dictitem(
PLTblock, 'er_specimen_name', spc, 'T')
INTblock = []
for rec in SPCblock:
INTblock.append([float(rec[dmag_key]), 0, 0, float(
rec[int_key]), 1, rec['measurement_flag']])
if len(INTblock) > 2:
pmagplotlib.plot_mag(
FIG['demag'], INTblock, title, 0, units, norm)
else:
AVGblock = {}
for spc in spcs:
# plot specimen by specimen
SPCblock = pmag.get_dictitem(
PLTblock, 'er_specimen_name', spc, 'T')
for rec in SPCblock:
if rec['measurement_flag'] == 'g':
if float(rec[dmag_key]) not in list(AVGblock.keys()):
AVGblock[float(rec[dmag_key])] = [
float(rec[int_key])]
else:
AVGblock[float(rec[dmag_key])].append(
float(rec[int_key]))
INTblock = []
for step in sorted(AVGblock.keys()):
INTblock.append([float(step), 0, 0, old_div(
float(sum(AVGblock[step])), float(len(AVGblock[step]))), 1, 'g'])
pmagplotlib.plot_mag(FIG['demag'], INTblock,
title, 0, units, norm)
if save == True:
plt.savefig(os.path.join(save_folder, title) + '.' + fmt)
if single_plot is False:
plt.show()
if single_plot is True:
plt.show() | python | def demag_magic(path_to_file='.', file_name='magic_measurements.txt',
save=False, save_folder='.', fmt='svg', plot_by='loc',
treat=None, XLP="", individual=None, average_measurements=False,
single_plot=False):
'''
Takes demagnetization data (from magic_measurements file) and outputs
intensity plots (with optional save).
Parameters
-----------
path_to_file : path to directory that contains files (default is current directory, '.')
file_name : name of measurements file (default is 'magic_measurements.txt')
save : boolean argument to save plots (default is False)
save_folder : relative directory where plots will be saved (default is current directory, '.')
fmt : format of saved figures (default is 'svg')
plot_by : specifies what sampling level you wish to plot the data at
('loc' -- plots all samples of the same location on the same plot
'exp' -- plots all samples of the same expedition on the same plot
'site' -- plots all samples of the same site on the same plot
'sample' -- plots all measurements of the same sample on the same plot
'spc' -- plots each specimen individually)
treat : treatment step
'T' = thermal demagnetization
'AF' = alternating field demagnetization
'M' = microwave radiation demagnetization
(default is 'AF')
XLP : filter data by a particular method
individual : This function outputs all plots by default. If plotting by sample
or specimen, you may not wish to see (or wait for) every single plot. You can
therefore specify a particular plot by setting this keyword argument to
a string of the site/sample/specimen name.
average_measurements : Option to average demagnetization measurements by
the grouping specified with the 'plot_by' keyword argument (default is False)
single_plot : Option to output a single plot with all measurements (default is False)
'''
FIG = {} # plot dictionary
FIG['demag'] = 1 # demag is figure 1
in_file, plot_key, LT = os.path.join(
path_to_file, file_name), 'er_location_name', "LT-AF-Z"
XLP = ""
norm = 1
units, dmag_key = 'T', 'treatment_ac_field'
plot_num = 0
if plot_by == 'loc':
plot_key = 'er_location_name'
elif plot_by == 'exp':
plot_key = 'er_expedition_name'
elif plot_by == 'site':
plot_key = 'er_site_name'
elif plot_by == 'sam':
plot_key = 'er_sample_name'
elif plot_by == 'spc':
plot_key = 'er_specimen_name'
if treat != None:
LT = 'LT-' + treat + '-Z' # get lab treatment for plotting
if LT == 'LT-T-Z':
units, dmag_key = 'K', 'treatment_temp'
elif LT == 'LT-AF-Z':
units, dmag_key = 'T', 'treatment_ac_field'
elif LT == 'LT-M-Z':
units, dmag_key = 'J', 'treatment_mw_energy'
else:
units = 'U'
else:
LT = 'LT-AF-Z'
plot_dict = {}
data, file_type = pmag.magic_read(in_file)
sids = pmag.get_specs(data)
plt.figure(num=FIG['demag'], figsize=(5, 5))
print(len(data), ' records read from ', in_file)
#
#
# find desired intensity data
#
# get plotlist
#
plotlist, intlist = [], ['measurement_magnitude', 'measurement_magn_moment',
'measurement_magn_volume', 'measurement_magn_mass']
IntMeths = []
FixData = []
for rec in data:
meths = []
methcodes = rec['magic_method_codes'].split(':')
for meth in methcodes:
meths.append(meth.strip())
for key in list(rec.keys()):
if key in intlist and rec[key] != "":
if key not in IntMeths:
IntMeths.append(key)
if rec[plot_key] not in plotlist and LT in meths:
plotlist.append(rec[plot_key])
if 'measurement_flag' not in list(rec.keys()):
rec['measurement_flag'] = 'g'
FixData.append(rec)
plotlist.sort()
if len(IntMeths) == 0:
print('No intensity information found')
data = FixData
# plot first intensity method found - normalized to initial value anyway -
# doesn't matter which used
int_key = IntMeths[0]
# print plotlist
if individual is not None:
if type(individual) == list or type(individual) == tuple:
plotlist = list(individual)
else:
plotlist = []
plotlist.append(individual)
for plot in plotlist:
print(plot, 'plotting by: ', plot_key)
# fish out all the data for this type of plot
PLTblock = pmag.get_dictitem(data, plot_key, plot, 'T')
# fish out all the dmag for this experiment type
PLTblock = pmag.get_dictitem(PLTblock, 'magic_method_codes', LT, 'has')
# get all with this intensity key non-blank
PLTblock = pmag.get_dictitem(PLTblock, int_key, '', 'F')
if XLP != "":
# reject data with XLP in method_code
PLTblock = pmag.get_dictitem(
PLTblock, 'magic_method_codes', XLP, 'not')
# for plot in plotlist:
if len(PLTblock) > 2:
title = PLTblock[0][plot_key]
spcs = []
for rec in PLTblock:
if rec['er_specimen_name'] not in spcs:
spcs.append(rec['er_specimen_name'])
if average_measurements is False:
for spc in spcs:
# plot specimen by specimen
SPCblock = pmag.get_dictitem(
PLTblock, 'er_specimen_name', spc, 'T')
INTblock = []
for rec in SPCblock:
INTblock.append([float(rec[dmag_key]), 0, 0, float(
rec[int_key]), 1, rec['measurement_flag']])
if len(INTblock) > 2:
pmagplotlib.plot_mag(
FIG['demag'], INTblock, title, 0, units, norm)
else:
AVGblock = {}
for spc in spcs:
# plot specimen by specimen
SPCblock = pmag.get_dictitem(
PLTblock, 'er_specimen_name', spc, 'T')
for rec in SPCblock:
if rec['measurement_flag'] == 'g':
if float(rec[dmag_key]) not in list(AVGblock.keys()):
AVGblock[float(rec[dmag_key])] = [
float(rec[int_key])]
else:
AVGblock[float(rec[dmag_key])].append(
float(rec[int_key]))
INTblock = []
for step in sorted(AVGblock.keys()):
INTblock.append([float(step), 0, 0, old_div(
float(sum(AVGblock[step])), float(len(AVGblock[step]))), 1, 'g'])
pmagplotlib.plot_mag(FIG['demag'], INTblock,
title, 0, units, norm)
if save == True:
plt.savefig(os.path.join(save_folder, title) + '.' + fmt)
if single_plot is False:
plt.show()
if single_plot is True:
plt.show() | Takes demagnetization data (from magic_measurements file) and outputs
intensity plots (with optional save).
Parameters
-----------
path_to_file : path to directory that contains files (default is current directory, '.')
file_name : name of measurements file (default is 'magic_measurements.txt')
save : boolean argument to save plots (default is False)
save_folder : relative directory where plots will be saved (default is current directory, '.')
fmt : format of saved figures (default is 'svg')
plot_by : specifies what sampling level you wish to plot the data at
('loc' -- plots all samples of the same location on the same plot
'exp' -- plots all samples of the same expedition on the same plot
'site' -- plots all samples of the same site on the same plot
'sample' -- plots all measurements of the same sample on the same plot
'spc' -- plots each specimen individually)
treat : treatment step
'T' = thermal demagnetization
'AF' = alternating field demagnetization
'M' = microwave radiation demagnetization
(default is 'AF')
XLP : filter data by a particular method
individual : This function outputs all plots by default. If plotting by sample
or specimen, you may not wish to see (or wait for) every single plot. You can
therefore specify a particular plot by setting this keyword argument to
a string of the site/sample/specimen name.
average_measurements : Option to average demagnetization measurements by
the grouping specified with the 'plot_by' keyword argument (default is False)
single_plot : Option to output a single plot with all measurements (default is False) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L7459-L7627 |
PmagPy/PmagPy | pmagpy/ipmag.py | iplot_hys | def iplot_hys(fignum, B, M, s):
"""
function to plot hysteresis data
This function has been adapted from pmagplotlib.iplot_hys for specific use
within a Jupyter notebook.
Parameters
-----------
fignum : reference number for matplotlib figure being created
B : list of B (flux density) values of hysteresis experiment
M : list of M (magnetization) values of hysteresis experiment
s : specimen name
"""
if fignum != 0:
plt.figure(num=fignum)
plt.clf()
hpars = {}
# close up loop
Npts = len(M)
B70 = 0.7 * B[0] # 70 percent of maximum field
for b in B:
if b < B70:
break
Nint = B.index(b) - 1
if Nint > 30:
Nint = 30
if Nint < 10:
Nint = 10
Bzero, Mzero, Mfix, Mnorm, Madj, MadjN = "", "", [], [], [], []
Mazero = ""
m_init = 0.5 * (M[0] + M[1])
m_fin = 0.5 * (M[-1] + M[-2])
diff = m_fin - m_init
Bmin = 0.
for k in range(Npts):
frac = old_div(float(k), float(Npts - 1))
Mfix.append((M[k] - diff * frac))
if Bzero == "" and B[k] < 0:
Bzero = k
if B[k] < Bmin:
Bmin = B[k]
kmin = k
# adjust slope with first 30 data points (throwing out first 3)
Bslop = B[2:Nint + 2]
Mslop = Mfix[2:Nint + 2]
polyU = polyfit(Bslop, Mslop, 1) # best fit line to high field points
# adjust slope with first 30 points of ascending branch
Bslop = B[kmin:kmin + (Nint + 1)]
Mslop = Mfix[kmin:kmin + (Nint + 1)]
polyL = polyfit(Bslop, Mslop, 1) # best fit line to high field points
xhf = 0.5 * (polyU[0] + polyL[0]) # mean of two slopes
# convert B to A/m, high field slope in m^3
hpars['hysteresis_xhf'] = '%8.2e' % (xhf * 4 * np.pi * 1e-7)
meanint = 0.5 * (polyU[1] + polyL[1]) # mean of two intercepts
Msat = 0.5 * (polyU[1] - polyL[1]) # mean of saturation remanence
Moff = []
for k in range(Npts):
# take out linear slope and offset (makes symmetric about origin)
Moff.append((Mfix[k] - xhf * B[k] - meanint))
if Mzero == "" and Moff[k] < 0:
Mzero = k
if Mzero != "" and Mazero == "" and Moff[k] > 0:
Mazero = k
hpars['hysteresis_ms_moment'] = '%8.3e' % (Msat) # Ms in Am^2
#
# split into upper and lower loops for splining
Mupper, Bupper, Mlower, Blower = [], [], [], []
deltaM, Bdm = [], [] # diff between upper and lower curves at Bdm
for k in range(kmin - 2, 0, -2):
Mupper.append(old_div(Moff[k], Msat))
Bupper.append(B[k])
for k in range(kmin + 2, len(B)-1):
Mlower.append(Moff[k] / Msat)
Blower.append(B[k])
Iupper = spline.Spline(Bupper, Mupper) # get splines for upper up and down
Ilower = spline.Spline(Blower, Mlower) # get splines for lower
for b in np.arange(B[0]): # get range of field values
Mpos = ((Iupper(b) - Ilower(b))) # evaluate on both sides of B
Mneg = ((Iupper(-b) - Ilower(-b)))
Bdm.append(b)
deltaM.append(0.5 * (Mpos + Mneg)) # take average delta M
print('whew')
for k in range(Npts):
MadjN.append(old_div(Moff[k], Msat))
Mnorm.append(old_div(M[k], Msat))
# find Mr : average of two spline fits evaluted at B=0 (times Msat)
Mr = Msat * 0.5 * (Iupper(0.) - Ilower(0.))
hpars['hysteresis_mr_moment'] = '%8.3e' % (Mr)
# find Bc (x intercept), interpolate between two bounding points
Bz = B[Mzero - 1:Mzero + 1]
Mz = Moff[Mzero - 1:Mzero + 1]
Baz = B[Mazero - 1:Mazero + 1]
Maz = Moff[Mazero - 1:Mazero + 1]
try:
poly = polyfit(Bz, Mz, 1) # best fit line through two bounding points
Bc = old_div(-poly[1], poly[0]) # x intercept
# best fit line through two bounding points
poly = polyfit(Baz, Maz, 1)
Bac = old_div(-poly[1], poly[0]) # x intercept
hpars['hysteresis_bc'] = '%8.3e' % (0.5 * (abs(Bc) + abs(Bac)))
except:
hpars['hysteresis_bc'] = '0'
return hpars, deltaM, Bdm, B, Mnorm, MadjN | python | def iplot_hys(fignum, B, M, s):
"""
function to plot hysteresis data
This function has been adapted from pmagplotlib.iplot_hys for specific use
within a Jupyter notebook.
Parameters
-----------
fignum : reference number for matplotlib figure being created
B : list of B (flux density) values of hysteresis experiment
M : list of M (magnetization) values of hysteresis experiment
s : specimen name
"""
if fignum != 0:
plt.figure(num=fignum)
plt.clf()
hpars = {}
# close up loop
Npts = len(M)
B70 = 0.7 * B[0] # 70 percent of maximum field
for b in B:
if b < B70:
break
Nint = B.index(b) - 1
if Nint > 30:
Nint = 30
if Nint < 10:
Nint = 10
Bzero, Mzero, Mfix, Mnorm, Madj, MadjN = "", "", [], [], [], []
Mazero = ""
m_init = 0.5 * (M[0] + M[1])
m_fin = 0.5 * (M[-1] + M[-2])
diff = m_fin - m_init
Bmin = 0.
for k in range(Npts):
frac = old_div(float(k), float(Npts - 1))
Mfix.append((M[k] - diff * frac))
if Bzero == "" and B[k] < 0:
Bzero = k
if B[k] < Bmin:
Bmin = B[k]
kmin = k
# adjust slope with first 30 data points (throwing out first 3)
Bslop = B[2:Nint + 2]
Mslop = Mfix[2:Nint + 2]
polyU = polyfit(Bslop, Mslop, 1) # best fit line to high field points
# adjust slope with first 30 points of ascending branch
Bslop = B[kmin:kmin + (Nint + 1)]
Mslop = Mfix[kmin:kmin + (Nint + 1)]
polyL = polyfit(Bslop, Mslop, 1) # best fit line to high field points
xhf = 0.5 * (polyU[0] + polyL[0]) # mean of two slopes
# convert B to A/m, high field slope in m^3
hpars['hysteresis_xhf'] = '%8.2e' % (xhf * 4 * np.pi * 1e-7)
meanint = 0.5 * (polyU[1] + polyL[1]) # mean of two intercepts
Msat = 0.5 * (polyU[1] - polyL[1]) # mean of saturation remanence
Moff = []
for k in range(Npts):
# take out linear slope and offset (makes symmetric about origin)
Moff.append((Mfix[k] - xhf * B[k] - meanint))
if Mzero == "" and Moff[k] < 0:
Mzero = k
if Mzero != "" and Mazero == "" and Moff[k] > 0:
Mazero = k
hpars['hysteresis_ms_moment'] = '%8.3e' % (Msat) # Ms in Am^2
#
# split into upper and lower loops for splining
Mupper, Bupper, Mlower, Blower = [], [], [], []
deltaM, Bdm = [], [] # diff between upper and lower curves at Bdm
for k in range(kmin - 2, 0, -2):
Mupper.append(old_div(Moff[k], Msat))
Bupper.append(B[k])
for k in range(kmin + 2, len(B)-1):
Mlower.append(Moff[k] / Msat)
Blower.append(B[k])
Iupper = spline.Spline(Bupper, Mupper) # get splines for upper up and down
Ilower = spline.Spline(Blower, Mlower) # get splines for lower
for b in np.arange(B[0]): # get range of field values
Mpos = ((Iupper(b) - Ilower(b))) # evaluate on both sides of B
Mneg = ((Iupper(-b) - Ilower(-b)))
Bdm.append(b)
deltaM.append(0.5 * (Mpos + Mneg)) # take average delta M
print('whew')
for k in range(Npts):
MadjN.append(old_div(Moff[k], Msat))
Mnorm.append(old_div(M[k], Msat))
# find Mr : average of two spline fits evaluted at B=0 (times Msat)
Mr = Msat * 0.5 * (Iupper(0.) - Ilower(0.))
hpars['hysteresis_mr_moment'] = '%8.3e' % (Mr)
# find Bc (x intercept), interpolate between two bounding points
Bz = B[Mzero - 1:Mzero + 1]
Mz = Moff[Mzero - 1:Mzero + 1]
Baz = B[Mazero - 1:Mazero + 1]
Maz = Moff[Mazero - 1:Mazero + 1]
try:
poly = polyfit(Bz, Mz, 1) # best fit line through two bounding points
Bc = old_div(-poly[1], poly[0]) # x intercept
# best fit line through two bounding points
poly = polyfit(Baz, Maz, 1)
Bac = old_div(-poly[1], poly[0]) # x intercept
hpars['hysteresis_bc'] = '%8.3e' % (0.5 * (abs(Bc) + abs(Bac)))
except:
hpars['hysteresis_bc'] = '0'
return hpars, deltaM, Bdm, B, Mnorm, MadjN | function to plot hysteresis data
This function has been adapted from pmagplotlib.iplot_hys for specific use
within a Jupyter notebook.
Parameters
-----------
fignum : reference number for matplotlib figure being created
B : list of B (flux density) values of hysteresis experiment
M : list of M (magnetization) values of hysteresis experiment
s : specimen name | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L7630-L7733 |
PmagPy/PmagPy | pmagpy/ipmag.py | hysteresis_magic2 | def hysteresis_magic2(path_to_file='.', hyst_file="rmag_hysteresis.txt",
save=False, save_folder='.',
fmt="svg", plots=True):
"""
Calculates hysteresis parameters, saves them in rmag_hysteresis format file.
If selected, this function also plots hysteresis loops, delta M curves,
d (Delta M)/dB curves, and IRM backfield curves.
Parameters (defaults are used if not specified)
----------
path_to_file : path to directory that contains files (default is current directory, '.')
hyst_file : hysteresis file (default is 'rmag_hysteresis.txt')
save : boolean argument to save plots (default is False)
save_folder : relative directory where plots will be saved (default is current directory, '.')
fmt : format of saved figures (default is 'pdf')
plots: whether or not to display the plots (default is true)
"""
user, meas_file, rmag_out, rmag_file = "", "agm_measurements.txt", "rmag_hysteresis.txt", ""
pltspec = ""
dir_path = save_folder
verbose = pmagplotlib.verbose
version_num = pmag.get_version()
rmag_out = save_folder + '/' + rmag_out
meas_file = path_to_file + '/' + hyst_file
rmag_rem = save_folder + "/rmag_remanence.txt"
#
#
meas_data, file_type = pmag.magic_read(meas_file)
if file_type != 'magic_measurements':
print(hysteresis_magic.__doc__)
print('bad file')
return
# initialize some variables
# define figure numbers for hyst,deltaM,DdeltaM curves
HystRecs, RemRecs = [], []
HDD = {}
HDD['hyst'], HDD['deltaM'], HDD['DdeltaM'] = 1, 2, 3
experiment_names, sids = [], []
for rec in meas_data:
meths = rec['magic_method_codes'].split(':')
methods = []
for meth in meths:
methods.append(meth.strip())
if 'LP-HYS' in methods:
if 'er_synthetic_name' in list(rec.keys()) and rec['er_synthetic_name'] != "":
rec['er_specimen_name'] = rec['er_synthetic_name']
if rec['magic_experiment_name'] not in experiment_names:
experiment_names.append(rec['magic_experiment_name'])
if rec['er_specimen_name'] not in sids:
sids.append(rec['er_specimen_name'])
#
fignum = 1
sample_num = 0
# initialize variables to record some bulk info in first loop
first_dcd_rec, first_rec, first_imag_rec = 1, 1, 1
while sample_num < len(sids):
sample = sids[sample_num]
print(sample, sample_num + 1, 'out of ', len(sids))
# B,M for hysteresis, Bdcd,Mdcd for irm-dcd data
B, M, Bdcd, Mdcd = [], [], [], []
Bimag, Mimag = [], [] # Bimag,Mimag for initial magnetization curves
for rec in meas_data:
methcodes = rec['magic_method_codes'].split(':')
meths = []
for meth in methcodes:
meths.append(meth.strip())
if rec['er_specimen_name'] == sample and "LP-HYS" in meths:
B.append(float(rec['measurement_lab_field_dc']))
M.append(float(rec['measurement_magn_moment']))
if first_rec == 1:
e = rec['magic_experiment_name']
HystRec = {}
first_rec = 0
if "er_location_name" in list(rec.keys()):
HystRec["er_location_name"] = rec["er_location_name"]
locname = rec['er_location_name'].replace('/', '-')
if "er_sample_name" in list(rec.keys()):
HystRec["er_sample_name"] = rec["er_sample_name"]
if "er_site_name" in list(rec.keys()):
HystRec["er_site_name"] = rec["er_site_name"]
if "er_synthetic_name" in list(rec.keys()) and rec['er_synthetic_name'] != "":
HystRec["er_synthetic_name"] = rec["er_synthetic_name"]
else:
HystRec["er_specimen_name"] = rec["er_specimen_name"]
if rec['er_specimen_name'] == sample and "LP-IRM-DCD" in meths:
Bdcd.append(float(rec['treatment_dc_field']))
Mdcd.append(float(rec['measurement_magn_moment']))
if first_dcd_rec == 1:
RemRec = {}
irm_exp = rec['magic_experiment_name']
first_dcd_rec = 0
if "er_location_name" in list(rec.keys()):
RemRec["er_location_name"] = rec["er_location_name"]
if "er_sample_name" in list(rec.keys()):
RemRec["er_sample_name"] = rec["er_sample_name"]
if "er_site_name" in list(rec.keys()):
RemRec["er_site_name"] = rec["er_site_name"]
if "er_synthetic_name" in list(rec.keys()) and rec['er_synthetic_name'] != "":
RemRec["er_synthetic_name"] = rec["er_synthetic_name"]
else:
RemRec["er_specimen_name"] = rec["er_specimen_name"]
if rec['er_specimen_name'] == sample and "LP-IMAG" in meths:
if first_imag_rec == 1:
imag_exp = rec['magic_experiment_name']
first_imag_rec = 0
Bimag.append(float(rec['measurement_lab_field_dc']))
Mimag.append(float(rec['measurement_magn_moment']))
if len(B) > 0:
hmeths = []
for meth in meths:
hmeths.append(meth)
# fignum = 1
fig = plt.figure(figsize=(8, 8))
hpars, deltaM, Bdm, B, Mnorm, MadjN = iplot_hys(1, B, M, sample)
ax1 = fig.add_subplot(2, 2, 1)
ax1.axhline(0, color='k')
ax1.axvline(0, color='k')
ax1.plot(B, Mnorm, 'r')
ax1.plot(B, MadjN, 'b')
ax1.set_xlabel('B (T)')
ax1.set_ylabel("M/Msat")
# ax1.set_title(sample)
ax1.set_xlim(-1, 1)
ax1.set_ylim(-1, 1)
bounds = ax1.axis()
n4 = 'Ms: ' + \
'%8.2e' % (float(hpars['hysteresis_ms_moment'])) + ' Am^2'
ax1.text(bounds[1] - .9 * bounds[1], -.9, n4, fontsize=9)
n1 = 'Mr: ' + \
'%8.2e' % (float(hpars['hysteresis_mr_moment'])) + ' Am^2'
ax1.text(bounds[1] - .9 * bounds[1], -.7, n1, fontsize=9)
n2 = 'Bc: ' + '%8.2e' % (float(hpars['hysteresis_bc'])) + ' T'
ax1.text(bounds[1] - .9 * bounds[1], -.5, n2, fontsize=9)
if 'hysteresis_xhf' in list(hpars.keys()):
n3 = r'Xhf: ' + \
'%8.2e' % (float(hpars['hysteresis_xhf'])) + ' m^3'
ax1.text(bounds[1] - .9 * bounds[1], -.3, n3, fontsize=9)
# plt.subplot(1,2,2)
# plt.subplot(1,3,3)
DdeltaM = []
Mhalf = ""
for k in range(2, len(Bdm)):
# differnential
DdeltaM.append(
old_div(abs(deltaM[k] - deltaM[k - 2]), (Bdm[k] - Bdm[k - 2])))
for k in range(len(deltaM)):
if old_div(deltaM[k], deltaM[0]) < 0.5:
Mhalf = k
break
try:
Bhf = Bdm[Mhalf - 1:Mhalf + 1]
Mhf = deltaM[Mhalf - 1:Mhalf + 1]
# best fit line through two bounding points
poly = polyfit(Bhf, Mhf, 1)
Bcr = old_div((.5 * deltaM[0] - poly[1]), poly[0])
hpars['hysteresis_bcr'] = '%8.3e' % (Bcr)
hpars['magic_method_codes'] = "LP-BCR-HDM"
if HDD['deltaM'] != 0:
ax2 = fig.add_subplot(2, 2, 2)
ax2.plot(Bdm, deltaM, 'b')
ax2.set_xlabel('B (T)')
ax2.set_ylabel('Delta M')
linex = [0, Bcr, Bcr]
liney = [old_div(deltaM[0], 2.), old_div(deltaM[0], 2.), 0]
ax2.plot(linex, liney, 'r')
# ax2.set_title(sample)
ax3 = fig.add_subplot(2, 2, 3)
ax3.plot(Bdm[(len(Bdm) - len(DdeltaM)):], DdeltaM, 'b')
ax3.set_xlabel('B (T)')
ax3.set_ylabel('d (Delta M)/dB')
# ax3.set_title(sample)
ax4 = fig.add_subplot(2, 2, 4)
ax4.plot(Bdcd, Mdcd)
ax4.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
ax4.axhline(0, color='k')
ax4.axvline(0, color='k')
ax4.set_xlabel('B (T)')
ax4.set_ylabel('M/Mr')
except:
print("not doing it")
hpars['hysteresis_bcr'] = '0'
hpars['magic_method_codes'] = ""
plt.gcf()
plt.gca()
plt.tight_layout()
if save:
plt.savefig(save_folder + '/' + sample + '_hysteresis.' + fmt)
plt.show()
sample_num += 1 | python | def hysteresis_magic2(path_to_file='.', hyst_file="rmag_hysteresis.txt",
save=False, save_folder='.',
fmt="svg", plots=True):
"""
Calculates hysteresis parameters, saves them in rmag_hysteresis format file.
If selected, this function also plots hysteresis loops, delta M curves,
d (Delta M)/dB curves, and IRM backfield curves.
Parameters (defaults are used if not specified)
----------
path_to_file : path to directory that contains files (default is current directory, '.')
hyst_file : hysteresis file (default is 'rmag_hysteresis.txt')
save : boolean argument to save plots (default is False)
save_folder : relative directory where plots will be saved (default is current directory, '.')
fmt : format of saved figures (default is 'pdf')
plots: whether or not to display the plots (default is true)
"""
user, meas_file, rmag_out, rmag_file = "", "agm_measurements.txt", "rmag_hysteresis.txt", ""
pltspec = ""
dir_path = save_folder
verbose = pmagplotlib.verbose
version_num = pmag.get_version()
rmag_out = save_folder + '/' + rmag_out
meas_file = path_to_file + '/' + hyst_file
rmag_rem = save_folder + "/rmag_remanence.txt"
#
#
meas_data, file_type = pmag.magic_read(meas_file)
if file_type != 'magic_measurements':
print(hysteresis_magic.__doc__)
print('bad file')
return
# initialize some variables
# define figure numbers for hyst,deltaM,DdeltaM curves
HystRecs, RemRecs = [], []
HDD = {}
HDD['hyst'], HDD['deltaM'], HDD['DdeltaM'] = 1, 2, 3
experiment_names, sids = [], []
for rec in meas_data:
meths = rec['magic_method_codes'].split(':')
methods = []
for meth in meths:
methods.append(meth.strip())
if 'LP-HYS' in methods:
if 'er_synthetic_name' in list(rec.keys()) and rec['er_synthetic_name'] != "":
rec['er_specimen_name'] = rec['er_synthetic_name']
if rec['magic_experiment_name'] not in experiment_names:
experiment_names.append(rec['magic_experiment_name'])
if rec['er_specimen_name'] not in sids:
sids.append(rec['er_specimen_name'])
#
fignum = 1
sample_num = 0
# initialize variables to record some bulk info in first loop
first_dcd_rec, first_rec, first_imag_rec = 1, 1, 1
while sample_num < len(sids):
sample = sids[sample_num]
print(sample, sample_num + 1, 'out of ', len(sids))
# B,M for hysteresis, Bdcd,Mdcd for irm-dcd data
B, M, Bdcd, Mdcd = [], [], [], []
Bimag, Mimag = [], [] # Bimag,Mimag for initial magnetization curves
for rec in meas_data:
methcodes = rec['magic_method_codes'].split(':')
meths = []
for meth in methcodes:
meths.append(meth.strip())
if rec['er_specimen_name'] == sample and "LP-HYS" in meths:
B.append(float(rec['measurement_lab_field_dc']))
M.append(float(rec['measurement_magn_moment']))
if first_rec == 1:
e = rec['magic_experiment_name']
HystRec = {}
first_rec = 0
if "er_location_name" in list(rec.keys()):
HystRec["er_location_name"] = rec["er_location_name"]
locname = rec['er_location_name'].replace('/', '-')
if "er_sample_name" in list(rec.keys()):
HystRec["er_sample_name"] = rec["er_sample_name"]
if "er_site_name" in list(rec.keys()):
HystRec["er_site_name"] = rec["er_site_name"]
if "er_synthetic_name" in list(rec.keys()) and rec['er_synthetic_name'] != "":
HystRec["er_synthetic_name"] = rec["er_synthetic_name"]
else:
HystRec["er_specimen_name"] = rec["er_specimen_name"]
if rec['er_specimen_name'] == sample and "LP-IRM-DCD" in meths:
Bdcd.append(float(rec['treatment_dc_field']))
Mdcd.append(float(rec['measurement_magn_moment']))
if first_dcd_rec == 1:
RemRec = {}
irm_exp = rec['magic_experiment_name']
first_dcd_rec = 0
if "er_location_name" in list(rec.keys()):
RemRec["er_location_name"] = rec["er_location_name"]
if "er_sample_name" in list(rec.keys()):
RemRec["er_sample_name"] = rec["er_sample_name"]
if "er_site_name" in list(rec.keys()):
RemRec["er_site_name"] = rec["er_site_name"]
if "er_synthetic_name" in list(rec.keys()) and rec['er_synthetic_name'] != "":
RemRec["er_synthetic_name"] = rec["er_synthetic_name"]
else:
RemRec["er_specimen_name"] = rec["er_specimen_name"]
if rec['er_specimen_name'] == sample and "LP-IMAG" in meths:
if first_imag_rec == 1:
imag_exp = rec['magic_experiment_name']
first_imag_rec = 0
Bimag.append(float(rec['measurement_lab_field_dc']))
Mimag.append(float(rec['measurement_magn_moment']))
if len(B) > 0:
hmeths = []
for meth in meths:
hmeths.append(meth)
# fignum = 1
fig = plt.figure(figsize=(8, 8))
hpars, deltaM, Bdm, B, Mnorm, MadjN = iplot_hys(1, B, M, sample)
ax1 = fig.add_subplot(2, 2, 1)
ax1.axhline(0, color='k')
ax1.axvline(0, color='k')
ax1.plot(B, Mnorm, 'r')
ax1.plot(B, MadjN, 'b')
ax1.set_xlabel('B (T)')
ax1.set_ylabel("M/Msat")
# ax1.set_title(sample)
ax1.set_xlim(-1, 1)
ax1.set_ylim(-1, 1)
bounds = ax1.axis()
n4 = 'Ms: ' + \
'%8.2e' % (float(hpars['hysteresis_ms_moment'])) + ' Am^2'
ax1.text(bounds[1] - .9 * bounds[1], -.9, n4, fontsize=9)
n1 = 'Mr: ' + \
'%8.2e' % (float(hpars['hysteresis_mr_moment'])) + ' Am^2'
ax1.text(bounds[1] - .9 * bounds[1], -.7, n1, fontsize=9)
n2 = 'Bc: ' + '%8.2e' % (float(hpars['hysteresis_bc'])) + ' T'
ax1.text(bounds[1] - .9 * bounds[1], -.5, n2, fontsize=9)
if 'hysteresis_xhf' in list(hpars.keys()):
n3 = r'Xhf: ' + \
'%8.2e' % (float(hpars['hysteresis_xhf'])) + ' m^3'
ax1.text(bounds[1] - .9 * bounds[1], -.3, n3, fontsize=9)
# plt.subplot(1,2,2)
# plt.subplot(1,3,3)
DdeltaM = []
Mhalf = ""
for k in range(2, len(Bdm)):
# differnential
DdeltaM.append(
old_div(abs(deltaM[k] - deltaM[k - 2]), (Bdm[k] - Bdm[k - 2])))
for k in range(len(deltaM)):
if old_div(deltaM[k], deltaM[0]) < 0.5:
Mhalf = k
break
try:
Bhf = Bdm[Mhalf - 1:Mhalf + 1]
Mhf = deltaM[Mhalf - 1:Mhalf + 1]
# best fit line through two bounding points
poly = polyfit(Bhf, Mhf, 1)
Bcr = old_div((.5 * deltaM[0] - poly[1]), poly[0])
hpars['hysteresis_bcr'] = '%8.3e' % (Bcr)
hpars['magic_method_codes'] = "LP-BCR-HDM"
if HDD['deltaM'] != 0:
ax2 = fig.add_subplot(2, 2, 2)
ax2.plot(Bdm, deltaM, 'b')
ax2.set_xlabel('B (T)')
ax2.set_ylabel('Delta M')
linex = [0, Bcr, Bcr]
liney = [old_div(deltaM[0], 2.), old_div(deltaM[0], 2.), 0]
ax2.plot(linex, liney, 'r')
# ax2.set_title(sample)
ax3 = fig.add_subplot(2, 2, 3)
ax3.plot(Bdm[(len(Bdm) - len(DdeltaM)):], DdeltaM, 'b')
ax3.set_xlabel('B (T)')
ax3.set_ylabel('d (Delta M)/dB')
# ax3.set_title(sample)
ax4 = fig.add_subplot(2, 2, 4)
ax4.plot(Bdcd, Mdcd)
ax4.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
ax4.axhline(0, color='k')
ax4.axvline(0, color='k')
ax4.set_xlabel('B (T)')
ax4.set_ylabel('M/Mr')
except:
print("not doing it")
hpars['hysteresis_bcr'] = '0'
hpars['magic_method_codes'] = ""
plt.gcf()
plt.gca()
plt.tight_layout()
if save:
plt.savefig(save_folder + '/' + sample + '_hysteresis.' + fmt)
plt.show()
sample_num += 1 | Calculates hysteresis parameters, saves them in rmag_hysteresis format file.
If selected, this function also plots hysteresis loops, delta M curves,
d (Delta M)/dB curves, and IRM backfield curves.
Parameters (defaults are used if not specified)
----------
path_to_file : path to directory that contains files (default is current directory, '.')
hyst_file : hysteresis file (default is 'rmag_hysteresis.txt')
save : boolean argument to save plots (default is False)
save_folder : relative directory where plots will be saved (default is current directory, '.')
fmt : format of saved figures (default is 'pdf')
plots: whether or not to display the plots (default is true) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L7736-L7926 |
PmagPy/PmagPy | pmagpy/ipmag.py | find_ei | def find_ei(data, nb=1000, save=False, save_folder='.', fmt='svg',
site_correction=False, return_new_dirs=False):
"""
Applies series of assumed flattening factor and "unsquishes" inclinations assuming tangent function.
Finds flattening factor that gives elongation/inclination pair consistent with TK03;
or, if correcting by site instead of for study-level secular variation,
finds flattening factor that minimizes elongation and most resembles a
Fisherian distribution.
Finds bootstrap confidence bounds
Required Parameter
-----------
data: a nested list of dec/inc pairs
Optional Parameters (defaults are used unless specified)
-----------
nb: number of bootstrapped pseudo-samples (default is 1000)
save: Boolean argument to save plots (default is False)
save_folder: path to folder in which plots should be saved (default is current directory)
fmt: specify format of saved plots (default is 'svg')
site_correction: Boolean argument to specify whether to "unsquish" data to
1) the elongation/inclination pair consistent with TK03 secular variation model
(site_correction = False)
or
2) a Fisherian distribution (site_correction = True). Default is FALSE.
Note that many directions (~ 100) are needed for this correction to be reliable.
return_new_dirs: optional return of newly "unflattened" directions (default is False)
Returns
-----------
four plots: 1) equal area plot of original directions
2) Elongation/inclination pairs as a function of f, data plus 25 bootstrap samples
3) Cumulative distribution of bootstrapped optimal inclinations plus uncertainties.
Estimate from original data set plotted as solid line
4) Orientation of principle direction through unflattening
NOTE: If distribution does not have a solution, plot labeled: Pathological. Some bootstrap samples may have
valid solutions and those are plotted in the CDFs and E/I plot.
"""
print("Bootstrapping.... be patient")
print("")
sys.stdout.flush()
upper, lower = int(round(.975 * nb)), int(round(.025 * nb))
E, I = [], []
plt.figure(num=1, figsize=(4, 4))
plot_net(1)
plot_di(di_block=data)
plt.title('Original')
ppars = pmag.doprinc(data)
Io = ppars['inc']
n = ppars["N"]
Es, Is, Fs, V2s = pmag.find_f(data)
if site_correction == True:
Inc, Elong = Is[Es.index(min(Es))], Es[Es.index(min(Es))]
flat_f = Fs[Es.index(min(Es))]
else:
Inc, Elong = Is[-1], Es[-1]
flat_f = Fs[-1]
plt.figure(num=2, figsize=(4, 4))
plt.plot(Is, Es, 'r')
plt.xlabel("Inclination")
plt.ylabel("Elongation")
plt.text(Inc, Elong, ' %3.1f' % (flat_f))
plt.text(Is[0] - 2, Es[0], ' %s' % ('f=1'))
b = 0
while b < nb:
bdata = pmag.pseudo(data)
Esb, Isb, Fsb, V2sb = pmag.find_f(bdata)
if b < 25:
plt.plot(Isb, Esb, 'y')
if Esb[-1] != 0:
ppars = pmag.doprinc(bdata)
if site_correction == True:
I.append(abs(Isb[Esb.index(min(Esb))]))
E.append(Esb[Esb.index(min(Esb))])
else:
I.append(abs(Isb[-1]))
E.append(Esb[-1])
b += 1
I.sort()
E.sort()
Eexp = []
for i in I:
Eexp.append(pmag.EI(i))
plt.plot(I, Eexp, 'g-')
if Inc == 0:
title = 'Pathological Distribution: ' + \
'[%7.1f, %7.1f]' % (I[lower], I[upper])
else:
title = '%7.1f [%7.1f, %7.1f]' % (Inc, I[lower], I[upper])
cdf_fig_num = 3
plt.figure(num=cdf_fig_num, figsize=(4, 4))
pmagplotlib.plot_cdf(cdf_fig_num, I, 'Inclinations', 'r', title)
pmagplotlib.plot_vs(cdf_fig_num, [I[lower], I[upper]], 'b', '--')
pmagplotlib.plot_vs(cdf_fig_num, [Inc], 'g', '-')
pmagplotlib.plot_vs(cdf_fig_num, [Io], 'k', '-')
# plot corrected directional data
di_lists = unpack_di_block(data)
if len(di_lists) == 3:
decs, incs, intensity = di_lists
if len(di_lists) == 2:
decs, incs = di_lists
if flat_f:
unsquished_incs = unsquish(incs, flat_f)
plt.figure(num=4, figsize=(4, 4))
plot_net(4)
plot_di(decs, unsquished_incs)
plt.title('Corrected for flattening')
else:
plt.figure(num=4, figsize=(4, 4))
plot_net(4)
plot_di(decs, incs)
plt.title('Corrected for flattening')
if (Inc, Elong, flat_f) == (0, 0, 0):
print("PATHOLOGICAL DISTRIBUTION")
print("The original inclination was: " + str(Io))
print("")
print("The corrected inclination is: " + str(Inc))
print("with bootstrapped confidence bounds of: " +
str(I[lower]) + ' to ' + str(I[upper]))
print("and elongation parameter of: " + str(Elong))
print("The flattening factor is: " + str(flat_f))
if return_new_dirs is True:
return make_di_block(decs, unsquished_incs) | python | def find_ei(data, nb=1000, save=False, save_folder='.', fmt='svg',
site_correction=False, return_new_dirs=False):
"""
Applies series of assumed flattening factor and "unsquishes" inclinations assuming tangent function.
Finds flattening factor that gives elongation/inclination pair consistent with TK03;
or, if correcting by site instead of for study-level secular variation,
finds flattening factor that minimizes elongation and most resembles a
Fisherian distribution.
Finds bootstrap confidence bounds
Required Parameter
-----------
data: a nested list of dec/inc pairs
Optional Parameters (defaults are used unless specified)
-----------
nb: number of bootstrapped pseudo-samples (default is 1000)
save: Boolean argument to save plots (default is False)
save_folder: path to folder in which plots should be saved (default is current directory)
fmt: specify format of saved plots (default is 'svg')
site_correction: Boolean argument to specify whether to "unsquish" data to
1) the elongation/inclination pair consistent with TK03 secular variation model
(site_correction = False)
or
2) a Fisherian distribution (site_correction = True). Default is FALSE.
Note that many directions (~ 100) are needed for this correction to be reliable.
return_new_dirs: optional return of newly "unflattened" directions (default is False)
Returns
-----------
four plots: 1) equal area plot of original directions
2) Elongation/inclination pairs as a function of f, data plus 25 bootstrap samples
3) Cumulative distribution of bootstrapped optimal inclinations plus uncertainties.
Estimate from original data set plotted as solid line
4) Orientation of principle direction through unflattening
NOTE: If distribution does not have a solution, plot labeled: Pathological. Some bootstrap samples may have
valid solutions and those are plotted in the CDFs and E/I plot.
"""
print("Bootstrapping.... be patient")
print("")
sys.stdout.flush()
upper, lower = int(round(.975 * nb)), int(round(.025 * nb))
E, I = [], []
plt.figure(num=1, figsize=(4, 4))
plot_net(1)
plot_di(di_block=data)
plt.title('Original')
ppars = pmag.doprinc(data)
Io = ppars['inc']
n = ppars["N"]
Es, Is, Fs, V2s = pmag.find_f(data)
if site_correction == True:
Inc, Elong = Is[Es.index(min(Es))], Es[Es.index(min(Es))]
flat_f = Fs[Es.index(min(Es))]
else:
Inc, Elong = Is[-1], Es[-1]
flat_f = Fs[-1]
plt.figure(num=2, figsize=(4, 4))
plt.plot(Is, Es, 'r')
plt.xlabel("Inclination")
plt.ylabel("Elongation")
plt.text(Inc, Elong, ' %3.1f' % (flat_f))
plt.text(Is[0] - 2, Es[0], ' %s' % ('f=1'))
b = 0
while b < nb:
bdata = pmag.pseudo(data)
Esb, Isb, Fsb, V2sb = pmag.find_f(bdata)
if b < 25:
plt.plot(Isb, Esb, 'y')
if Esb[-1] != 0:
ppars = pmag.doprinc(bdata)
if site_correction == True:
I.append(abs(Isb[Esb.index(min(Esb))]))
E.append(Esb[Esb.index(min(Esb))])
else:
I.append(abs(Isb[-1]))
E.append(Esb[-1])
b += 1
I.sort()
E.sort()
Eexp = []
for i in I:
Eexp.append(pmag.EI(i))
plt.plot(I, Eexp, 'g-')
if Inc == 0:
title = 'Pathological Distribution: ' + \
'[%7.1f, %7.1f]' % (I[lower], I[upper])
else:
title = '%7.1f [%7.1f, %7.1f]' % (Inc, I[lower], I[upper])
cdf_fig_num = 3
plt.figure(num=cdf_fig_num, figsize=(4, 4))
pmagplotlib.plot_cdf(cdf_fig_num, I, 'Inclinations', 'r', title)
pmagplotlib.plot_vs(cdf_fig_num, [I[lower], I[upper]], 'b', '--')
pmagplotlib.plot_vs(cdf_fig_num, [Inc], 'g', '-')
pmagplotlib.plot_vs(cdf_fig_num, [Io], 'k', '-')
# plot corrected directional data
di_lists = unpack_di_block(data)
if len(di_lists) == 3:
decs, incs, intensity = di_lists
if len(di_lists) == 2:
decs, incs = di_lists
if flat_f:
unsquished_incs = unsquish(incs, flat_f)
plt.figure(num=4, figsize=(4, 4))
plot_net(4)
plot_di(decs, unsquished_incs)
plt.title('Corrected for flattening')
else:
plt.figure(num=4, figsize=(4, 4))
plot_net(4)
plot_di(decs, incs)
plt.title('Corrected for flattening')
if (Inc, Elong, flat_f) == (0, 0, 0):
print("PATHOLOGICAL DISTRIBUTION")
print("The original inclination was: " + str(Io))
print("")
print("The corrected inclination is: " + str(Inc))
print("with bootstrapped confidence bounds of: " +
str(I[lower]) + ' to ' + str(I[upper]))
print("and elongation parameter of: " + str(Elong))
print("The flattening factor is: " + str(flat_f))
if return_new_dirs is True:
return make_di_block(decs, unsquished_incs) | Applies series of assumed flattening factor and "unsquishes" inclinations assuming tangent function.
Finds flattening factor that gives elongation/inclination pair consistent with TK03;
or, if correcting by site instead of for study-level secular variation,
finds flattening factor that minimizes elongation and most resembles a
Fisherian distribution.
Finds bootstrap confidence bounds
Required Parameter
-----------
data: a nested list of dec/inc pairs
Optional Parameters (defaults are used unless specified)
-----------
nb: number of bootstrapped pseudo-samples (default is 1000)
save: Boolean argument to save plots (default is False)
save_folder: path to folder in which plots should be saved (default is current directory)
fmt: specify format of saved plots (default is 'svg')
site_correction: Boolean argument to specify whether to "unsquish" data to
1) the elongation/inclination pair consistent with TK03 secular variation model
(site_correction = False)
or
2) a Fisherian distribution (site_correction = True). Default is FALSE.
Note that many directions (~ 100) are needed for this correction to be reliable.
return_new_dirs: optional return of newly "unflattened" directions (default is False)
Returns
-----------
four plots: 1) equal area plot of original directions
2) Elongation/inclination pairs as a function of f, data plus 25 bootstrap samples
3) Cumulative distribution of bootstrapped optimal inclinations plus uncertainties.
Estimate from original data set plotted as solid line
4) Orientation of principle direction through unflattening
NOTE: If distribution does not have a solution, plot labeled: Pathological. Some bootstrap samples may have
valid solutions and those are plotted in the CDFs and E/I plot. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L7929-L8062 |
PmagPy/PmagPy | pmagpy/ipmag.py | plate_rate_mc | def plate_rate_mc(pole1_plon, pole1_plat, pole1_kappa, pole1_N, pole1_age, pole1_age_error,
pole2_plon, pole2_plat, pole2_kappa, pole2_N, pole2_age, pole2_age_error,
ref_loc_lon, ref_loc_lat, samplesize=10000, random_seed=None, plot=True,
savefig=True, save_directory='./', figure_name=''):
"""
Determine the latitudinal motion implied by a pair of poles and utilize
the Monte Carlo sampling method of Swanson-Hysell (2014) to determine the
associated uncertainty.
Parameters:
------------
plon : longitude of pole
plat : latitude of pole
kappa : Fisher precision parameter for VPGs in pole
N : number of VGPs in pole
age : age assigned to pole in Ma
age_error : 1 sigma age uncertainty in million years
ref_loc_lon : longitude of reference location
ref_loc_lat : latitude of reference location
samplesize : number of draws from pole and age distributions (default set to 10000)
random_seed : set random seed for reproducible number generation (default is None)
plot : whether to make figures (default is True, optional)
savefig : whether to save figures (default is True, optional)
save_directory = default is local directory (optional)
figure_name = prefix for file names (optional)
Returns
--------
rate : rate of latitudinal motion in cm/yr along with estimated 2.5 and 97.5
percentile rate estimates
"""
ref_loc = [ref_loc_lon, ref_loc_lat]
pole1 = (pole1_plon, pole1_plat)
pole1_paleolat = 90 - pmag.angle(pole1, ref_loc)
pole2 = (pole2_plon, pole2_plat)
pole2_paleolat = 90 - pmag.angle(pole2, ref_loc)
print("The paleolatitude for ref_loc resulting from pole 1 is:" +
str(pole1_paleolat))
print("The paleolatitude for ref_loc resulting from pole 2 is:" +
str(pole2_paleolat))
rate = old_div(((pole1_paleolat - pole2_paleolat) * 111 *
100000), ((pole1_age - pole2_age) * 1000000))
print("The rate of paleolatitudinal change implied by the poles pairs in cm/yr is:" + str(rate))
if random_seed != None:
np.random.seed(random_seed)
pole1_MCages = np.random.normal(pole1_age, pole1_age_error, samplesize)
pole2_MCages = np.random.normal(pole2_age, pole2_age_error, samplesize)
plt.hist(pole1_MCages, 100, histtype='stepfilled',
color='darkred', label='Pole 1 ages')
plt.hist(pole2_MCages, 100, histtype='stepfilled',
color='darkblue', label='Pole 2 ages')
plt.xlabel('Age (Ma)')
plt.ylabel('n')
plt.legend(loc=3)
if savefig == True:
plot_extension = '_1.svg'
plt.savefig(save_directory + figure_name + plot_extension)
plt.show()
pole1_MCpoles = []
pole1_MCpole_lat = []
pole1_MCpole_long = []
pole1_MCpaleolat = []
for n in range(samplesize):
vgp_samples = []
for vgp in range(pole1_N):
# pmag.dev returns a direction from a fisher distribution with
# specified kappa
direction_atN = pmag.fshdev(pole1_kappa)
# this direction is centered at latitude of 90 degrees and needs to be rotated
# to be centered on the mean pole position
tilt_direction = pole1_plon
tilt_amount = 90 - pole1_plat
direction = pmag.dotilt(
direction_atN[0], direction_atN[1], tilt_direction, tilt_amount)
vgp_samples.append([direction[0], direction[1], 1.])
mean = pmag.fisher_mean(vgp_samples)
mean_pole_position = (mean['dec'], mean['inc'])
pole1_MCpoles.append([mean['dec'], mean['inc'], 1.])
pole1_MCpole_lat.append(mean['inc'])
pole1_MCpole_long.append(mean['dec'])
paleolat = 90 - pmag.angle(mean_pole_position, ref_loc)
pole1_MCpaleolat.append(paleolat[0])
pole2_MCpoles = []
pole2_MCpole_lat = []
pole2_MCpole_long = []
pole2_MCpaleolat = []
for n in range(samplesize):
vgp_samples = []
for vgp in range(pole2_N):
# pmag.dev returns a direction from a fisher distribution with
# specified kappa
direction_atN = pmag.fshdev(pole2_kappa)
# this direction is centered at latitude of 90 degrees and needs to be rotated
# to be centered on the mean pole position
tilt_direction = pole2_plon
tilt_amount = 90 - pole2_plat
direction = pmag.dotilt(
direction_atN[0], direction_atN[1], tilt_direction, tilt_amount)
vgp_samples.append([direction[0], direction[1], 1.])
mean = pmag.fisher_mean(vgp_samples)
mean_pole_position = (mean['dec'], mean['inc'])
pole2_MCpoles.append([mean['dec'], mean['inc'], 1.])
pole2_MCpole_lat.append(mean['inc'])
pole2_MCpole_long.append(mean['dec'])
paleolat = 90 - pmag.angle(mean_pole_position, ref_loc)
pole2_MCpaleolat.append(paleolat[0])
if plot is True:
plt.figure(figsize=(5, 5))
map_axis = make_mollweide_map()
plot_vgp(map_axis, pole1_MCpole_long, pole1_MCpole_lat, color='b')
plot_vgp(map_axis, pole2_MCpole_long, pole2_MCpole_lat, color='g')
if savefig == True:
plot_extension = '_2.svg'
plt.savefig(save_directory + figure_name + plot_extension)
plt.show()
# calculating the change in paleolatitude between the Monte Carlo pairs
pole1_pole2_Delta_degrees = []
pole1_pole2_Delta_kilometers = []
pole1_pole2_Delta_myr = []
pole1_pole2_degrees_per_myr = []
pole1_pole2_cm_per_yr = []
for n in range(samplesize):
Delta_degrees = pole1_MCpaleolat[n] - pole2_MCpaleolat[n]
Delta_Myr = pole1_MCages[n] - pole2_MCages[n]
pole1_pole2_Delta_degrees.append(Delta_degrees)
degrees_per_myr = old_div(Delta_degrees, Delta_Myr)
cm_per_yr = old_div(((Delta_degrees * 111) * 100000),
(Delta_Myr * 1000000))
pole1_pole2_degrees_per_myr.append(degrees_per_myr)
pole1_pole2_cm_per_yr.append(cm_per_yr)
if plot is True:
plotnumber = 100
plt.figure(num=None, figsize=(10, 4))
plt.subplot(1, 2, 1)
for n in range(plotnumber):
plt.plot([pole1_MCpaleolat[n], pole2_MCpaleolat[n]],
[pole1_MCages[n], pole2_MCages[n]], 'k-', linewidth=0.1, alpha=0.3)
plt.scatter(pole1_MCpaleolat[:plotnumber],
pole1_MCages[:plotnumber], color='b', s=3)
plt.scatter(pole1_paleolat, pole1_age, color='lightblue',
s=100, edgecolor='w', zorder=10000)
plt.scatter(pole2_MCpaleolat[:plotnumber],
pole2_MCages[:plotnumber], color='g', s=3)
plt.scatter(pole2_paleolat, pole2_age, color='lightgreen',
s=100, edgecolor='w', zorder=10000)
plt.plot([pole1_paleolat, pole2_paleolat], [
pole1_age, pole2_age], 'w-', linewidth=2)
plt.gca().invert_yaxis()
plt.xlabel('paleolatitude (degrees)', size=14)
plt.ylabel('time (Ma)', size=14)
plt.subplot(1, 2, 2)
plt.hist(pole1_pole2_cm_per_yr, bins=600)
plt.ylabel('n', size=14)
plt.xlabel('latitudinal drift rate (cm/yr)', size=14)
# plt.xlim([0,90])
if savefig == True:
plot_extension = '_3.svg'
plt.savefig(save_directory + figure_name + plot_extension)
plt.show()
twopointfive_percentile = stats.scoreatpercentile(
pole1_pole2_cm_per_yr, 2.5)
fifty_percentile = stats.scoreatpercentile(pole1_pole2_cm_per_yr, 50)
ninetysevenpointfive_percentile = stats.scoreatpercentile(
pole1_pole2_cm_per_yr, 97.5)
print("2.5th percentile is: " +
str(round(twopointfive_percentile, 2)) + " cm/yr")
print("50th percentile is: " + str(round(fifty_percentile, 2)) + " cm/yr")
print("97.5th percentile is: " +
str(round(ninetysevenpointfive_percentile, 2)) + " cm/yr")
return rate[0], twopointfive_percentile, ninetysevenpointfive_percentile | python | def plate_rate_mc(pole1_plon, pole1_plat, pole1_kappa, pole1_N, pole1_age, pole1_age_error,
pole2_plon, pole2_plat, pole2_kappa, pole2_N, pole2_age, pole2_age_error,
ref_loc_lon, ref_loc_lat, samplesize=10000, random_seed=None, plot=True,
savefig=True, save_directory='./', figure_name=''):
"""
Determine the latitudinal motion implied by a pair of poles and utilize
the Monte Carlo sampling method of Swanson-Hysell (2014) to determine the
associated uncertainty.
Parameters:
------------
plon : longitude of pole
plat : latitude of pole
kappa : Fisher precision parameter for VPGs in pole
N : number of VGPs in pole
age : age assigned to pole in Ma
age_error : 1 sigma age uncertainty in million years
ref_loc_lon : longitude of reference location
ref_loc_lat : latitude of reference location
samplesize : number of draws from pole and age distributions (default set to 10000)
random_seed : set random seed for reproducible number generation (default is None)
plot : whether to make figures (default is True, optional)
savefig : whether to save figures (default is True, optional)
save_directory = default is local directory (optional)
figure_name = prefix for file names (optional)
Returns
--------
rate : rate of latitudinal motion in cm/yr along with estimated 2.5 and 97.5
percentile rate estimates
"""
ref_loc = [ref_loc_lon, ref_loc_lat]
pole1 = (pole1_plon, pole1_plat)
pole1_paleolat = 90 - pmag.angle(pole1, ref_loc)
pole2 = (pole2_plon, pole2_plat)
pole2_paleolat = 90 - pmag.angle(pole2, ref_loc)
print("The paleolatitude for ref_loc resulting from pole 1 is:" +
str(pole1_paleolat))
print("The paleolatitude for ref_loc resulting from pole 2 is:" +
str(pole2_paleolat))
rate = old_div(((pole1_paleolat - pole2_paleolat) * 111 *
100000), ((pole1_age - pole2_age) * 1000000))
print("The rate of paleolatitudinal change implied by the poles pairs in cm/yr is:" + str(rate))
if random_seed != None:
np.random.seed(random_seed)
pole1_MCages = np.random.normal(pole1_age, pole1_age_error, samplesize)
pole2_MCages = np.random.normal(pole2_age, pole2_age_error, samplesize)
plt.hist(pole1_MCages, 100, histtype='stepfilled',
color='darkred', label='Pole 1 ages')
plt.hist(pole2_MCages, 100, histtype='stepfilled',
color='darkblue', label='Pole 2 ages')
plt.xlabel('Age (Ma)')
plt.ylabel('n')
plt.legend(loc=3)
if savefig == True:
plot_extension = '_1.svg'
plt.savefig(save_directory + figure_name + plot_extension)
plt.show()
pole1_MCpoles = []
pole1_MCpole_lat = []
pole1_MCpole_long = []
pole1_MCpaleolat = []
for n in range(samplesize):
vgp_samples = []
for vgp in range(pole1_N):
# pmag.dev returns a direction from a fisher distribution with
# specified kappa
direction_atN = pmag.fshdev(pole1_kappa)
# this direction is centered at latitude of 90 degrees and needs to be rotated
# to be centered on the mean pole position
tilt_direction = pole1_plon
tilt_amount = 90 - pole1_plat
direction = pmag.dotilt(
direction_atN[0], direction_atN[1], tilt_direction, tilt_amount)
vgp_samples.append([direction[0], direction[1], 1.])
mean = pmag.fisher_mean(vgp_samples)
mean_pole_position = (mean['dec'], mean['inc'])
pole1_MCpoles.append([mean['dec'], mean['inc'], 1.])
pole1_MCpole_lat.append(mean['inc'])
pole1_MCpole_long.append(mean['dec'])
paleolat = 90 - pmag.angle(mean_pole_position, ref_loc)
pole1_MCpaleolat.append(paleolat[0])
pole2_MCpoles = []
pole2_MCpole_lat = []
pole2_MCpole_long = []
pole2_MCpaleolat = []
for n in range(samplesize):
vgp_samples = []
for vgp in range(pole2_N):
# pmag.dev returns a direction from a fisher distribution with
# specified kappa
direction_atN = pmag.fshdev(pole2_kappa)
# this direction is centered at latitude of 90 degrees and needs to be rotated
# to be centered on the mean pole position
tilt_direction = pole2_plon
tilt_amount = 90 - pole2_plat
direction = pmag.dotilt(
direction_atN[0], direction_atN[1], tilt_direction, tilt_amount)
vgp_samples.append([direction[0], direction[1], 1.])
mean = pmag.fisher_mean(vgp_samples)
mean_pole_position = (mean['dec'], mean['inc'])
pole2_MCpoles.append([mean['dec'], mean['inc'], 1.])
pole2_MCpole_lat.append(mean['inc'])
pole2_MCpole_long.append(mean['dec'])
paleolat = 90 - pmag.angle(mean_pole_position, ref_loc)
pole2_MCpaleolat.append(paleolat[0])
if plot is True:
plt.figure(figsize=(5, 5))
map_axis = make_mollweide_map()
plot_vgp(map_axis, pole1_MCpole_long, pole1_MCpole_lat, color='b')
plot_vgp(map_axis, pole2_MCpole_long, pole2_MCpole_lat, color='g')
if savefig == True:
plot_extension = '_2.svg'
plt.savefig(save_directory + figure_name + plot_extension)
plt.show()
# calculating the change in paleolatitude between the Monte Carlo pairs
pole1_pole2_Delta_degrees = []
pole1_pole2_Delta_kilometers = []
pole1_pole2_Delta_myr = []
pole1_pole2_degrees_per_myr = []
pole1_pole2_cm_per_yr = []
for n in range(samplesize):
Delta_degrees = pole1_MCpaleolat[n] - pole2_MCpaleolat[n]
Delta_Myr = pole1_MCages[n] - pole2_MCages[n]
pole1_pole2_Delta_degrees.append(Delta_degrees)
degrees_per_myr = old_div(Delta_degrees, Delta_Myr)
cm_per_yr = old_div(((Delta_degrees * 111) * 100000),
(Delta_Myr * 1000000))
pole1_pole2_degrees_per_myr.append(degrees_per_myr)
pole1_pole2_cm_per_yr.append(cm_per_yr)
if plot is True:
plotnumber = 100
plt.figure(num=None, figsize=(10, 4))
plt.subplot(1, 2, 1)
for n in range(plotnumber):
plt.plot([pole1_MCpaleolat[n], pole2_MCpaleolat[n]],
[pole1_MCages[n], pole2_MCages[n]], 'k-', linewidth=0.1, alpha=0.3)
plt.scatter(pole1_MCpaleolat[:plotnumber],
pole1_MCages[:plotnumber], color='b', s=3)
plt.scatter(pole1_paleolat, pole1_age, color='lightblue',
s=100, edgecolor='w', zorder=10000)
plt.scatter(pole2_MCpaleolat[:plotnumber],
pole2_MCages[:plotnumber], color='g', s=3)
plt.scatter(pole2_paleolat, pole2_age, color='lightgreen',
s=100, edgecolor='w', zorder=10000)
plt.plot([pole1_paleolat, pole2_paleolat], [
pole1_age, pole2_age], 'w-', linewidth=2)
plt.gca().invert_yaxis()
plt.xlabel('paleolatitude (degrees)', size=14)
plt.ylabel('time (Ma)', size=14)
plt.subplot(1, 2, 2)
plt.hist(pole1_pole2_cm_per_yr, bins=600)
plt.ylabel('n', size=14)
plt.xlabel('latitudinal drift rate (cm/yr)', size=14)
# plt.xlim([0,90])
if savefig == True:
plot_extension = '_3.svg'
plt.savefig(save_directory + figure_name + plot_extension)
plt.show()
twopointfive_percentile = stats.scoreatpercentile(
pole1_pole2_cm_per_yr, 2.5)
fifty_percentile = stats.scoreatpercentile(pole1_pole2_cm_per_yr, 50)
ninetysevenpointfive_percentile = stats.scoreatpercentile(
pole1_pole2_cm_per_yr, 97.5)
print("2.5th percentile is: " +
str(round(twopointfive_percentile, 2)) + " cm/yr")
print("50th percentile is: " + str(round(fifty_percentile, 2)) + " cm/yr")
print("97.5th percentile is: " +
str(round(ninetysevenpointfive_percentile, 2)) + " cm/yr")
return rate[0], twopointfive_percentile, ninetysevenpointfive_percentile | Determine the latitudinal motion implied by a pair of poles and utilize
the Monte Carlo sampling method of Swanson-Hysell (2014) to determine the
associated uncertainty.
Parameters:
------------
plon : longitude of pole
plat : latitude of pole
kappa : Fisher precision parameter for VPGs in pole
N : number of VGPs in pole
age : age assigned to pole in Ma
age_error : 1 sigma age uncertainty in million years
ref_loc_lon : longitude of reference location
ref_loc_lat : latitude of reference location
samplesize : number of draws from pole and age distributions (default set to 10000)
random_seed : set random seed for reproducible number generation (default is None)
plot : whether to make figures (default is True, optional)
savefig : whether to save figures (default is True, optional)
save_directory = default is local directory (optional)
figure_name = prefix for file names (optional)
Returns
--------
rate : rate of latitudinal motion in cm/yr along with estimated 2.5 and 97.5
percentile rate estimates | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L8065-L8245 |
PmagPy/PmagPy | pmagpy/ipmag.py | zeq | def zeq(path_to_file='.', file='', data="", units='U', calculation_type="DE-BFL",
save=False, save_folder='.', fmt='svg', begin_pca="", end_pca="", angle=0):
"""
NAME
zeq.py
DESCRIPTION
plots demagnetization data for a single specimen:
- The solid (open) symbols in the Zijderveld diagram are X,Y (X,Z) pairs. The demagnetization diagram plots the
fractional remanence remaining after each step. The green line is the fraction of the total remaence removed
between each step. If the principle direction is desired, specify begin_pca and end_pca steps as bounds for calculation.
-The equal area projection has the X direction (usually North in geographic coordinates)
to the top. The red line is the X axis of the Zijderveld diagram. Solid symbols are lower hemisphere.
- red dots and blue line is the remanence remaining after each step. The green line is the partial TRM removed in each interval
INPUT FORMAT
reads from file_name or takes a Pandas DataFrame data with specimen treatment intensity declination inclination as columns
Keywords:
file= FILE a space or tab delimited file with
specimen treatment declination inclination intensity
units= [mT,C] specify units of mT OR C, default is unscaled
save=[True,False] save figure and quit, default is False
fmt [svg,jpg,png,pdf] set figure format [default is svg]
begin_pca [step number] treatment step for beginning of PCA calculation, default
end_pca [step number] treatment step for end of PCA calculation, last step is default
calculation_type [DE-BFL,DE-BFP,DE-FM] Calculation Type: best-fit line, plane or fisher mean; line is default
angle=[0-360]: angle to subtract from declination to rotate in horizontal plane, default is 0
"""
if units == "C":
SIunits = "K"
if units == "mT":
SIunits = "T"
if units == "U":
SIunits = "U"
if file != "":
f = pd.read_csv(os.path.join(path_to_file, file),
delim_whitespace=True, header=None)
f.columns = ['specimen', 'treatment',
'intensity', 'declination', 'inclination']
# adjust for angle rotation
f['declination'] = (f['declination']-angle) % 360
f['quality'] = 'g'
f['type'] = ''
#
s = f['specimen'].tolist()[0]
if units == 'mT':
f['treatment'] = f['treatment']*1e-3
if units == 'C':
f['treatment'] = f['treatment']+273
data = f[['treatment', 'declination',
'inclination', 'intensity', 'type', 'quality']]
print(s)
datablock = data.values.tolist()
# define figure numbers in a dictionary for equal area, zijderveld,
# and intensity vs. demagnetiztion step respectively
ZED = {}
ZED['eqarea'], ZED['zijd'], ZED['demag'] = 2, 1, 3
plt.figure(num=ZED['zijd'], figsize=(5, 5))
plt.figure(num=ZED['eqarea'], figsize=(5, 5))
plt.figure(num=ZED['demag'], figsize=(5, 5))
#
#
pmagplotlib.plot_zed(ZED, datablock, angle, s, SIunits) # plot the data
#
# print out data for this sample to screen
#
recnum = 0
print('step treat intensity dec inc')
for plotrec in datablock:
if units == 'mT':
print('%i %7.1f %8.3e %7.1f %7.1f ' %
(recnum, plotrec[0]*1e3, plotrec[3], plotrec[1], plotrec[2]))
if units == 'C':
print('%i %7.1f %8.3e %7.1f %7.1f ' %
(recnum, plotrec[0]-273., plotrec[3], plotrec[1], plotrec[2]))
if units == 'U':
print('%i %7.1f %8.3e %7.1f %7.1f ' %
(recnum, plotrec[0], plotrec[3], plotrec[1], plotrec[2]))
recnum += 1
pmagplotlib.draw_figs(ZED)
if begin_pca != "" and end_pca != "" and calculation_type != "":
pmagplotlib.plot_zed(ZED, datablock, angle, s,
SIunits) # plot the data
# get best-fit direction/great circle
mpars = pmag.domean(datablock, begin_pca, end_pca, calculation_type)
# plot the best-fit direction/great circle
pmagplotlib.plot_dir(ZED, mpars, datablock, angle)
print('Specimen, calc_type, N, min, max, MAD, dec, inc')
if units == 'mT':
print('%s %s %i %6.2f %6.2f %6.1f %7.1f %7.1f' % (s, calculation_type,
mpars["specimen_n"], mpars["measurement_step_min"]*1e3, mpars["measurement_step_max"]*1e3, mpars["specimen_mad"], mpars["specimen_dec"], mpars["specimen_inc"]))
if units == 'C':
print('%s %s %i %6.2f %6.2f %6.1f %7.1f %7.1f' % (s, calculation_type,
mpars["specimen_n"], mpars["measurement_step_min"]-273, mpars["measurement_step_max"]-273, mpars["specimen_mad"], mpars["specimen_dec"], mpars["specimen_inc"]))
if units == 'U':
print('%s %s %i %6.2f %6.2f %6.1f %7.1f %7.1f' % (s, calculation_type,
mpars["specimen_n"], mpars["measurement_step_min"], mpars["measurement_step_max"], mpars["specimen_mad"], mpars["specimen_dec"], mpars["specimen_inc"]))
if save:
files = {}
for key in list(ZED.keys()):
files[key] = s+'_'+key+'.'+fmt
pmagplotlib.save_plots(ZED, files) | python | def zeq(path_to_file='.', file='', data="", units='U', calculation_type="DE-BFL",
save=False, save_folder='.', fmt='svg', begin_pca="", end_pca="", angle=0):
"""
NAME
zeq.py
DESCRIPTION
plots demagnetization data for a single specimen:
- The solid (open) symbols in the Zijderveld diagram are X,Y (X,Z) pairs. The demagnetization diagram plots the
fractional remanence remaining after each step. The green line is the fraction of the total remaence removed
between each step. If the principle direction is desired, specify begin_pca and end_pca steps as bounds for calculation.
-The equal area projection has the X direction (usually North in geographic coordinates)
to the top. The red line is the X axis of the Zijderveld diagram. Solid symbols are lower hemisphere.
- red dots and blue line is the remanence remaining after each step. The green line is the partial TRM removed in each interval
INPUT FORMAT
reads from file_name or takes a Pandas DataFrame data with specimen treatment intensity declination inclination as columns
Keywords:
file= FILE a space or tab delimited file with
specimen treatment declination inclination intensity
units= [mT,C] specify units of mT OR C, default is unscaled
save=[True,False] save figure and quit, default is False
fmt [svg,jpg,png,pdf] set figure format [default is svg]
begin_pca [step number] treatment step for beginning of PCA calculation, default
end_pca [step number] treatment step for end of PCA calculation, last step is default
calculation_type [DE-BFL,DE-BFP,DE-FM] Calculation Type: best-fit line, plane or fisher mean; line is default
angle=[0-360]: angle to subtract from declination to rotate in horizontal plane, default is 0
"""
if units == "C":
SIunits = "K"
if units == "mT":
SIunits = "T"
if units == "U":
SIunits = "U"
if file != "":
f = pd.read_csv(os.path.join(path_to_file, file),
delim_whitespace=True, header=None)
f.columns = ['specimen', 'treatment',
'intensity', 'declination', 'inclination']
# adjust for angle rotation
f['declination'] = (f['declination']-angle) % 360
f['quality'] = 'g'
f['type'] = ''
#
s = f['specimen'].tolist()[0]
if units == 'mT':
f['treatment'] = f['treatment']*1e-3
if units == 'C':
f['treatment'] = f['treatment']+273
data = f[['treatment', 'declination',
'inclination', 'intensity', 'type', 'quality']]
print(s)
datablock = data.values.tolist()
# define figure numbers in a dictionary for equal area, zijderveld,
# and intensity vs. demagnetiztion step respectively
ZED = {}
ZED['eqarea'], ZED['zijd'], ZED['demag'] = 2, 1, 3
plt.figure(num=ZED['zijd'], figsize=(5, 5))
plt.figure(num=ZED['eqarea'], figsize=(5, 5))
plt.figure(num=ZED['demag'], figsize=(5, 5))
#
#
pmagplotlib.plot_zed(ZED, datablock, angle, s, SIunits) # plot the data
#
# print out data for this sample to screen
#
recnum = 0
print('step treat intensity dec inc')
for plotrec in datablock:
if units == 'mT':
print('%i %7.1f %8.3e %7.1f %7.1f ' %
(recnum, plotrec[0]*1e3, plotrec[3], plotrec[1], plotrec[2]))
if units == 'C':
print('%i %7.1f %8.3e %7.1f %7.1f ' %
(recnum, plotrec[0]-273., plotrec[3], plotrec[1], plotrec[2]))
if units == 'U':
print('%i %7.1f %8.3e %7.1f %7.1f ' %
(recnum, plotrec[0], plotrec[3], plotrec[1], plotrec[2]))
recnum += 1
pmagplotlib.draw_figs(ZED)
if begin_pca != "" and end_pca != "" and calculation_type != "":
pmagplotlib.plot_zed(ZED, datablock, angle, s,
SIunits) # plot the data
# get best-fit direction/great circle
mpars = pmag.domean(datablock, begin_pca, end_pca, calculation_type)
# plot the best-fit direction/great circle
pmagplotlib.plot_dir(ZED, mpars, datablock, angle)
print('Specimen, calc_type, N, min, max, MAD, dec, inc')
if units == 'mT':
print('%s %s %i %6.2f %6.2f %6.1f %7.1f %7.1f' % (s, calculation_type,
mpars["specimen_n"], mpars["measurement_step_min"]*1e3, mpars["measurement_step_max"]*1e3, mpars["specimen_mad"], mpars["specimen_dec"], mpars["specimen_inc"]))
if units == 'C':
print('%s %s %i %6.2f %6.2f %6.1f %7.1f %7.1f' % (s, calculation_type,
mpars["specimen_n"], mpars["measurement_step_min"]-273, mpars["measurement_step_max"]-273, mpars["specimen_mad"], mpars["specimen_dec"], mpars["specimen_inc"]))
if units == 'U':
print('%s %s %i %6.2f %6.2f %6.1f %7.1f %7.1f' % (s, calculation_type,
mpars["specimen_n"], mpars["measurement_step_min"], mpars["measurement_step_max"], mpars["specimen_mad"], mpars["specimen_dec"], mpars["specimen_inc"]))
if save:
files = {}
for key in list(ZED.keys()):
files[key] = s+'_'+key+'.'+fmt
pmagplotlib.save_plots(ZED, files) | NAME
zeq.py
DESCRIPTION
plots demagnetization data for a single specimen:
- The solid (open) symbols in the Zijderveld diagram are X,Y (X,Z) pairs. The demagnetization diagram plots the
fractional remanence remaining after each step. The green line is the fraction of the total remaence removed
between each step. If the principle direction is desired, specify begin_pca and end_pca steps as bounds for calculation.
-The equal area projection has the X direction (usually North in geographic coordinates)
to the top. The red line is the X axis of the Zijderveld diagram. Solid symbols are lower hemisphere.
- red dots and blue line is the remanence remaining after each step. The green line is the partial TRM removed in each interval
INPUT FORMAT
reads from file_name or takes a Pandas DataFrame data with specimen treatment intensity declination inclination as columns
Keywords:
file= FILE a space or tab delimited file with
specimen treatment declination inclination intensity
units= [mT,C] specify units of mT OR C, default is unscaled
save=[True,False] save figure and quit, default is False
fmt [svg,jpg,png,pdf] set figure format [default is svg]
begin_pca [step number] treatment step for beginning of PCA calculation, default
end_pca [step number] treatment step for end of PCA calculation, last step is default
calculation_type [DE-BFL,DE-BFP,DE-FM] Calculation Type: best-fit line, plane or fisher mean; line is default
angle=[0-360]: angle to subtract from declination to rotate in horizontal plane, default is 0 | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L8248-L8353 |
PmagPy/PmagPy | pmagpy/ipmag.py | aniso_magic_nb | def aniso_magic_nb(infile='specimens.txt', samp_file='samples.txt', site_file='sites.txt', verbose=True,
ipar=False, ihext=True, ivec=False, isite=False, iloc=False, iboot=False, vec=0,
Dir=[], PDir=[], crd="s", num_bootstraps=1000, dir_path=".", fignum=1,
save_plots=True, interactive=False, fmt="png"):
"""
Makes plots of anisotropy eigenvectors, eigenvalues and confidence bounds
All directions are on the lower hemisphere.
Parameters
__________
infile : specimens formatted file with aniso_s data
samp_file : samples formatted file with sample => site relationship
site_file : sites formatted file with site => location relationship
verbose : if True, print messages to output
confidence bounds options:
ipar : if True - perform parametric bootstrap - requires non-blank aniso_s_sigma
ihext : if True - Hext ellipses
ivec : if True - plot bootstrapped eigenvectors instead of ellipses
isite : if True plot by site, requires non-blank samp_file
#iloc : if True plot by location, requires non-blank samp_file, and site_file NOT IMPLEMENTED
iboot : if True - bootstrap ellipses
vec : eigenvector for comparison with Dir
Dir : [Dec,Inc] list for comparison direction
PDir : [Pole_dec, Pole_Inc] for pole to plane for comparison
green dots are on the lower hemisphere, cyan are on the upper hemisphere
crd : ['s','g','t'], coordinate system for plotting whereby:
s : specimen coordinates, aniso_tile_correction = -1, or unspecified
g : geographic coordinates, aniso_tile_correction = 0
t : tilt corrected coordinates, aniso_tile_correction = 100
num_bootstraps : how many bootstraps to do, default 1000
dir_path : directory path
fignum : matplotlib figure number, default 1
save_plots : bool, default True
if True, create and save all requested plots
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line only)
fmt : str, default "svg"
format for figures, [svg, jpg, pdf, png]
"""
figs = {}
saved = []
# make sure boolean values are in integer form
# for backwards compatibility
ipar = int(ipar)
ihext = int(ihext)
ivec = int(ivec)
isite = int(isite)
#iloc = int(iloc) # NOT USED
iboot = int(iboot)
# fix directory
input_dir_path = os.path.realpath(dir_path)
# initialize some variables
version_num = pmag.get_version()
hpars, bpars = [], []
# set aniso_tilt_correction value
CS = -1 # specimen
if crd == 'g':
CS = 0
if crd == 't':
CS = 100
#
#
# read in the data
fnames = {'specimens': infile, 'samples': samp_file, 'sites': site_file}
con = cb.Contribution(input_dir_path, read_tables=['specimens', 'samples', 'sites', 'contribution'],
custom_filenames=fnames)
# get contribution id if available
con_id = ""
if 'contribution' in con.tables:
if 'id' in con.tables['contribution'].df.columns:
con_id = str(con.tables['contribution'].df['id'].values[0])
# get other data
con.propagate_location_to_specimens()
spec_container = con.tables['specimens']
spec_df = spec_container.df
# use only anisotropy records
spec_df = spec_df.dropna(subset=['aniso_s']).copy()
if 'aniso_tilt_correction' not in spec_df.columns:
spec_df['aniso_tilt_correction'] = -1 # assume specimen coordinates
if "aniso_s_n_measurements" not in spec_df.columns:
spec_df["aniso_s_n_measurements"] = "6"
if "aniso_s_sigma" not in spec_df.columns:
spec_df["aniso_sigma"] = "0"
orlist = spec_df['aniso_tilt_correction'].dropna().unique()
if CS not in orlist:
if len(orlist) > 0:
CS = orlist[0]
else:
CS = -1
if CS == -1:
crd = 's'
if CS == 0:
crd = 'g'
if CS == 100:
crd = 't'
if verbose:
print("desired coordinate system not available, using available: ", crd)
cs_df = spec_df[spec_df['aniso_tilt_correction'] == CS]
if isite:
sites = cs_df['site'].unique()
for site in list(sites):
site_df = cs_df[cs_df.site == site]
loc = ""
if 'sites' in con.tables:
if 'location' in con.tables['sites'].df.columns:
locs = con.tables['sites'].df.loc[site, 'location'].dropna()
if any(locs):
loc = locs.iloc[0]
figs = plot_aniso(fignum, site_df, Dir=Dir, PDir=PDir, ipar=ipar,
ihext=ihext, ivec=ivec, iboot=iboot,
vec=vec, num_bootstraps=num_bootstraps, title=site)
files = {key: loc + "_" + site +"_" + crd + "_aniso-" + key + ".png" for (key, value) in figs.items()}
if pmagplotlib.isServer:
for key in figs.keys():
files[key] = "LO:_" + loc + "_SI:_" + site + '_TY:_aniso_' + key + '_.' + fmt
titles = {}
titles['data'] = "Eigenvectors"
titles['tcdf'] = "Eigenvalue Confidence"
titles['conf'] = "Confidence Ellipses"
for key in figs:
if key not in titles:
titles[key] = key
pmagplotlib.add_borders(figs, titles, con_id=con_id)
if save_plots:
saved.extend(pmagplotlib.save_plots(figs, files))
elif interactive:
pmagplotlib.draw_figs(figs)
ans = pmagplotlib.save_or_quit()
if ans == 'a':
saved.extend(pmagplotlib.save_plots(figs, files))
else:
continue
else:
fignum += 2
if iboot:
fignum += 1
if len(Dir) > 0:
fignum += 1
else:
figs = plot_aniso(fignum, cs_df, Dir=Dir, PDir=PDir, ipar=ipar, ihext=ihext,
ivec=ivec, iboot=iboot, vec=vec, num_bootstraps=num_bootstraps)
try:
locs = cs_df['location'].unique()
except:
locs = [""]
locs = "-".join(locs)
files = {key: locs + "_" + crd + "_aniso-" + key + ".png" for (key, value) in figs.items()}
if pmagplotlib.isServer:
for key in figs.keys():
files[key] = 'MC:_' + con_id + '_TY:_aniso_' + key + '_.' + fmt
titles = {}
titles['data'] = "Eigenvectors"
titles['tcdf'] = "Eigenvalue Confidence"
titles['conf'] = "Confidence Ellipses"
for key in figs:
if key not in titles:
titles[key] = key
pmagplotlib.add_borders(figs, titles, con_id=con_id)
if save_plots:
saved.extend(pmagplotlib.save_plots(figs, files))
elif interactive:
pmagplotlib.draw_figs(figs)
ans = pmagplotlib.save_or_quit()
if ans == 'a':
saved.extend(pmagplotlib.save_plots(figs, files))
return True, saved | python | def aniso_magic_nb(infile='specimens.txt', samp_file='samples.txt', site_file='sites.txt', verbose=True,
ipar=False, ihext=True, ivec=False, isite=False, iloc=False, iboot=False, vec=0,
Dir=[], PDir=[], crd="s", num_bootstraps=1000, dir_path=".", fignum=1,
save_plots=True, interactive=False, fmt="png"):
"""
Makes plots of anisotropy eigenvectors, eigenvalues and confidence bounds
All directions are on the lower hemisphere.
Parameters
__________
infile : specimens formatted file with aniso_s data
samp_file : samples formatted file with sample => site relationship
site_file : sites formatted file with site => location relationship
verbose : if True, print messages to output
confidence bounds options:
ipar : if True - perform parametric bootstrap - requires non-blank aniso_s_sigma
ihext : if True - Hext ellipses
ivec : if True - plot bootstrapped eigenvectors instead of ellipses
isite : if True plot by site, requires non-blank samp_file
#iloc : if True plot by location, requires non-blank samp_file, and site_file NOT IMPLEMENTED
iboot : if True - bootstrap ellipses
vec : eigenvector for comparison with Dir
Dir : [Dec,Inc] list for comparison direction
PDir : [Pole_dec, Pole_Inc] for pole to plane for comparison
green dots are on the lower hemisphere, cyan are on the upper hemisphere
crd : ['s','g','t'], coordinate system for plotting whereby:
s : specimen coordinates, aniso_tile_correction = -1, or unspecified
g : geographic coordinates, aniso_tile_correction = 0
t : tilt corrected coordinates, aniso_tile_correction = 100
num_bootstraps : how many bootstraps to do, default 1000
dir_path : directory path
fignum : matplotlib figure number, default 1
save_plots : bool, default True
if True, create and save all requested plots
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line only)
fmt : str, default "svg"
format for figures, [svg, jpg, pdf, png]
"""
figs = {}
saved = []
# make sure boolean values are in integer form
# for backwards compatibility
ipar = int(ipar)
ihext = int(ihext)
ivec = int(ivec)
isite = int(isite)
#iloc = int(iloc) # NOT USED
iboot = int(iboot)
# fix directory
input_dir_path = os.path.realpath(dir_path)
# initialize some variables
version_num = pmag.get_version()
hpars, bpars = [], []
# set aniso_tilt_correction value
CS = -1 # specimen
if crd == 'g':
CS = 0
if crd == 't':
CS = 100
#
#
# read in the data
fnames = {'specimens': infile, 'samples': samp_file, 'sites': site_file}
con = cb.Contribution(input_dir_path, read_tables=['specimens', 'samples', 'sites', 'contribution'],
custom_filenames=fnames)
# get contribution id if available
con_id = ""
if 'contribution' in con.tables:
if 'id' in con.tables['contribution'].df.columns:
con_id = str(con.tables['contribution'].df['id'].values[0])
# get other data
con.propagate_location_to_specimens()
spec_container = con.tables['specimens']
spec_df = spec_container.df
# use only anisotropy records
spec_df = spec_df.dropna(subset=['aniso_s']).copy()
if 'aniso_tilt_correction' not in spec_df.columns:
spec_df['aniso_tilt_correction'] = -1 # assume specimen coordinates
if "aniso_s_n_measurements" not in spec_df.columns:
spec_df["aniso_s_n_measurements"] = "6"
if "aniso_s_sigma" not in spec_df.columns:
spec_df["aniso_sigma"] = "0"
orlist = spec_df['aniso_tilt_correction'].dropna().unique()
if CS not in orlist:
if len(orlist) > 0:
CS = orlist[0]
else:
CS = -1
if CS == -1:
crd = 's'
if CS == 0:
crd = 'g'
if CS == 100:
crd = 't'
if verbose:
print("desired coordinate system not available, using available: ", crd)
cs_df = spec_df[spec_df['aniso_tilt_correction'] == CS]
if isite:
sites = cs_df['site'].unique()
for site in list(sites):
site_df = cs_df[cs_df.site == site]
loc = ""
if 'sites' in con.tables:
if 'location' in con.tables['sites'].df.columns:
locs = con.tables['sites'].df.loc[site, 'location'].dropna()
if any(locs):
loc = locs.iloc[0]
figs = plot_aniso(fignum, site_df, Dir=Dir, PDir=PDir, ipar=ipar,
ihext=ihext, ivec=ivec, iboot=iboot,
vec=vec, num_bootstraps=num_bootstraps, title=site)
files = {key: loc + "_" + site +"_" + crd + "_aniso-" + key + ".png" for (key, value) in figs.items()}
if pmagplotlib.isServer:
for key in figs.keys():
files[key] = "LO:_" + loc + "_SI:_" + site + '_TY:_aniso_' + key + '_.' + fmt
titles = {}
titles['data'] = "Eigenvectors"
titles['tcdf'] = "Eigenvalue Confidence"
titles['conf'] = "Confidence Ellipses"
for key in figs:
if key not in titles:
titles[key] = key
pmagplotlib.add_borders(figs, titles, con_id=con_id)
if save_plots:
saved.extend(pmagplotlib.save_plots(figs, files))
elif interactive:
pmagplotlib.draw_figs(figs)
ans = pmagplotlib.save_or_quit()
if ans == 'a':
saved.extend(pmagplotlib.save_plots(figs, files))
else:
continue
else:
fignum += 2
if iboot:
fignum += 1
if len(Dir) > 0:
fignum += 1
else:
figs = plot_aniso(fignum, cs_df, Dir=Dir, PDir=PDir, ipar=ipar, ihext=ihext,
ivec=ivec, iboot=iboot, vec=vec, num_bootstraps=num_bootstraps)
try:
locs = cs_df['location'].unique()
except:
locs = [""]
locs = "-".join(locs)
files = {key: locs + "_" + crd + "_aniso-" + key + ".png" for (key, value) in figs.items()}
if pmagplotlib.isServer:
for key in figs.keys():
files[key] = 'MC:_' + con_id + '_TY:_aniso_' + key + '_.' + fmt
titles = {}
titles['data'] = "Eigenvectors"
titles['tcdf'] = "Eigenvalue Confidence"
titles['conf'] = "Confidence Ellipses"
for key in figs:
if key not in titles:
titles[key] = key
pmagplotlib.add_borders(figs, titles, con_id=con_id)
if save_plots:
saved.extend(pmagplotlib.save_plots(figs, files))
elif interactive:
pmagplotlib.draw_figs(figs)
ans = pmagplotlib.save_or_quit()
if ans == 'a':
saved.extend(pmagplotlib.save_plots(figs, files))
return True, saved | Makes plots of anisotropy eigenvectors, eigenvalues and confidence bounds
All directions are on the lower hemisphere.
Parameters
__________
infile : specimens formatted file with aniso_s data
samp_file : samples formatted file with sample => site relationship
site_file : sites formatted file with site => location relationship
verbose : if True, print messages to output
confidence bounds options:
ipar : if True - perform parametric bootstrap - requires non-blank aniso_s_sigma
ihext : if True - Hext ellipses
ivec : if True - plot bootstrapped eigenvectors instead of ellipses
isite : if True plot by site, requires non-blank samp_file
#iloc : if True plot by location, requires non-blank samp_file, and site_file NOT IMPLEMENTED
iboot : if True - bootstrap ellipses
vec : eigenvector for comparison with Dir
Dir : [Dec,Inc] list for comparison direction
PDir : [Pole_dec, Pole_Inc] for pole to plane for comparison
green dots are on the lower hemisphere, cyan are on the upper hemisphere
crd : ['s','g','t'], coordinate system for plotting whereby:
s : specimen coordinates, aniso_tile_correction = -1, or unspecified
g : geographic coordinates, aniso_tile_correction = 0
t : tilt corrected coordinates, aniso_tile_correction = 100
num_bootstraps : how many bootstraps to do, default 1000
dir_path : directory path
fignum : matplotlib figure number, default 1
save_plots : bool, default True
if True, create and save all requested plots
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line only)
fmt : str, default "svg"
format for figures, [svg, jpg, pdf, png] | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L8995-L9164 |
PmagPy/PmagPy | pmagpy/ipmag.py | plot_dmag | def plot_dmag(data="", title="", fignum=1, norm=1,dmag_key='treat_ac_field',intensity='',
quality=False):
"""
plots demagenetization data versus step for all specimens in pandas dataframe datablock
Parameters
______________
data : Pandas dataframe with MagIC data model 3 columns:
fignum : figure number
specimen : specimen name
dmag_key : one of these: ['treat_temp','treat_ac_field','treat_mw_energy']
selected using method_codes : ['LT_T-Z','LT-AF-Z','LT-M-Z'] respectively
intensity : if blank will choose one of these: ['magn_moment', 'magn_volume', 'magn_mass']
quality : if True use the quality column of the DataFrame
title : title for plot
norm : if True, normalize data to first step
Output :
matptlotlib plot
"""
plt.figure(num=fignum, figsize=(5, 5))
if intensity:
int_key=intensity
else:
intlist = ['magn_moment', 'magn_volume', 'magn_mass']
# get which key we have
IntMeths = [col_name for col_name in data.columns if col_name in intlist]
int_key = IntMeths[0]
data = data[data[int_key].notnull()] # fish out all data with this key
units = "U" # this sets the units for plotting to undefined
if not dmag_key:
if 'treat_temp' in data.columns: units = "K" # kelvin
elif 'treat_ac_field' in data.columns: units = "T" # tesla
elif 'treat_mw_energy' in data.columns: units = "J" # joules
if dmag_key=='treat_temp': units='K'
if dmag_key=='treat_ac_field': units='T'
if dmag_key=='treat_mw_energy': units='J'
spcs = data.specimen.unique() # get a list of all specimens in DataFrame data
if len(spcs)==0:
print('no data for plotting')
return
# step through specimens to put on plot
for spc in spcs:
spec_data = data[data.specimen.str.contains(spc)]
INTblock = []
for ind, rec in spec_data.iterrows():
INTblock.append([float(rec[dmag_key]), 0, 0,
float(rec[int_key]), 1, rec['quality']])
if len(INTblock) > 2:
pmagplotlib.plot_mag(fignum, INTblock, title, 0, units, norm) | python | def plot_dmag(data="", title="", fignum=1, norm=1,dmag_key='treat_ac_field',intensity='',
quality=False):
"""
plots demagenetization data versus step for all specimens in pandas dataframe datablock
Parameters
______________
data : Pandas dataframe with MagIC data model 3 columns:
fignum : figure number
specimen : specimen name
dmag_key : one of these: ['treat_temp','treat_ac_field','treat_mw_energy']
selected using method_codes : ['LT_T-Z','LT-AF-Z','LT-M-Z'] respectively
intensity : if blank will choose one of these: ['magn_moment', 'magn_volume', 'magn_mass']
quality : if True use the quality column of the DataFrame
title : title for plot
norm : if True, normalize data to first step
Output :
matptlotlib plot
"""
plt.figure(num=fignum, figsize=(5, 5))
if intensity:
int_key=intensity
else:
intlist = ['magn_moment', 'magn_volume', 'magn_mass']
# get which key we have
IntMeths = [col_name for col_name in data.columns if col_name in intlist]
int_key = IntMeths[0]
data = data[data[int_key].notnull()] # fish out all data with this key
units = "U" # this sets the units for plotting to undefined
if not dmag_key:
if 'treat_temp' in data.columns: units = "K" # kelvin
elif 'treat_ac_field' in data.columns: units = "T" # tesla
elif 'treat_mw_energy' in data.columns: units = "J" # joules
if dmag_key=='treat_temp': units='K'
if dmag_key=='treat_ac_field': units='T'
if dmag_key=='treat_mw_energy': units='J'
spcs = data.specimen.unique() # get a list of all specimens in DataFrame data
if len(spcs)==0:
print('no data for plotting')
return
# step through specimens to put on plot
for spc in spcs:
spec_data = data[data.specimen.str.contains(spc)]
INTblock = []
for ind, rec in spec_data.iterrows():
INTblock.append([float(rec[dmag_key]), 0, 0,
float(rec[int_key]), 1, rec['quality']])
if len(INTblock) > 2:
pmagplotlib.plot_mag(fignum, INTblock, title, 0, units, norm) | plots demagenetization data versus step for all specimens in pandas dataframe datablock
Parameters
______________
data : Pandas dataframe with MagIC data model 3 columns:
fignum : figure number
specimen : specimen name
dmag_key : one of these: ['treat_temp','treat_ac_field','treat_mw_energy']
selected using method_codes : ['LT_T-Z','LT-AF-Z','LT-M-Z'] respectively
intensity : if blank will choose one of these: ['magn_moment', 'magn_volume', 'magn_mass']
quality : if True use the quality column of the DataFrame
title : title for plot
norm : if True, normalize data to first step
Output :
matptlotlib plot | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L9167-L9215 |
PmagPy/PmagPy | pmagpy/ipmag.py | eigs_s | def eigs_s(infile="", dir_path='.'):
"""
Converts eigenparamters format data to s format
Parameters
___________________
Input:
file : input file name with eigenvalues (tau) and eigenvectors (V) with format:
tau_1 V1_dec V1_inc tau_2 V2_dec V2_inc tau_3 V3_dec V3_inc
Output
the six tensor elements as a nested array
[[x11,x22,x33,x12,x23,x13],....]
"""
file = os.path.join(dir_path, infile)
eigs_data = np.loadtxt(file)
Ss = []
for ind in range(eigs_data.shape[0]):
tau, Vdirs = [], []
for k in range(0, 9, 3):
tau.append(eigs_data[ind][k])
Vdirs.append([eigs_data[ind][k+1], eigs_data[ind][k+2]])
s = list(pmag.doeigs_s(tau, Vdirs))
Ss.append(s)
return Ss | python | def eigs_s(infile="", dir_path='.'):
"""
Converts eigenparamters format data to s format
Parameters
___________________
Input:
file : input file name with eigenvalues (tau) and eigenvectors (V) with format:
tau_1 V1_dec V1_inc tau_2 V2_dec V2_inc tau_3 V3_dec V3_inc
Output
the six tensor elements as a nested array
[[x11,x22,x33,x12,x23,x13],....]
"""
file = os.path.join(dir_path, infile)
eigs_data = np.loadtxt(file)
Ss = []
for ind in range(eigs_data.shape[0]):
tau, Vdirs = [], []
for k in range(0, 9, 3):
tau.append(eigs_data[ind][k])
Vdirs.append([eigs_data[ind][k+1], eigs_data[ind][k+2]])
s = list(pmag.doeigs_s(tau, Vdirs))
Ss.append(s)
return Ss | Converts eigenparamters format data to s format
Parameters
___________________
Input:
file : input file name with eigenvalues (tau) and eigenvectors (V) with format:
tau_1 V1_dec V1_inc tau_2 V2_dec V2_inc tau_3 V3_dec V3_inc
Output
the six tensor elements as a nested array
[[x11,x22,x33,x12,x23,x13],....] | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L9218-L9242 |
PmagPy/PmagPy | pmagpy/ipmag.py | plot_gc | def plot_gc(poles, color='g', fignum=1):
"""
plots a great circle on an equal area projection
Parameters
____________________
Input
fignum : number of matplotlib object
poles : nested list of [Dec,Inc] pairs of poles
color : color of lower hemisphere dots for great circle - must be in form: 'g','r','y','k',etc.
upper hemisphere is always cyan
"""
for pole in poles:
pmagplotlib.plot_circ(fignum, pole, 90., color) | python | def plot_gc(poles, color='g', fignum=1):
"""
plots a great circle on an equal area projection
Parameters
____________________
Input
fignum : number of matplotlib object
poles : nested list of [Dec,Inc] pairs of poles
color : color of lower hemisphere dots for great circle - must be in form: 'g','r','y','k',etc.
upper hemisphere is always cyan
"""
for pole in poles:
pmagplotlib.plot_circ(fignum, pole, 90., color) | plots a great circle on an equal area projection
Parameters
____________________
Input
fignum : number of matplotlib object
poles : nested list of [Dec,Inc] pairs of poles
color : color of lower hemisphere dots for great circle - must be in form: 'g','r','y','k',etc.
upper hemisphere is always cyan | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L9245-L9257 |
PmagPy/PmagPy | pmagpy/ipmag.py | aarm_magic | def aarm_magic(infile, dir_path=".", input_dir_path="",
spec_file='specimens.txt', samp_file="samples.txt", data_model_num=3,
coord='s'):
"""
Converts AARM data to best-fit tensor (6 elements plus sigma)
Parameters
----------
infile : str
input measurement file
dir_path : str
output directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
spec_file : str
input/output specimen file name, default "specimens.txt"
samp_file : str
input sample file name, default "samples.txt"
data_model_num : number
MagIC data model [2, 3], default 3
coord : str
coordinate system specimen/geographic/tilt-corrected,
['s', 'g', 't'], default 's'
Returns
---------
Tuple : (True or False indicating if conversion was sucessful, output file name written)
Info
---------
Input for is a series of baseline, ARM pairs.
The baseline should be the AF demagnetized state (3 axis demag is
preferable) for the following ARM acquisition. The order of the
measurements is:
positions 1,2,3, 6,7,8, 11,12,13 (for 9 positions)
positions 1,2,3,4, 6,7,8,9, 11,12,13,14 (for 12 positions)
positions 1-15 (for 15 positions)
"""
data_model_num = int(float(data_model_num))
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
# get full file names
meas_file = pmag.resolve_file_name(infile, input_dir_path)
spec_file = pmag.resolve_file_name(spec_file, input_dir_path)
samp_file = pmag.resolve_file_name(samp_file, input_dir_path)
output_spec_file = os.path.join(dir_path, os.path.split(spec_file)[1])
# get coordinate system
coords = {'s': '-1', 'g': '0', 't': '100'}
if coord not in coords.values():
coord = coords.get(str(coord), '-1')
if data_model_num == 3:
meas_data = []
meas_data3, file_type = pmag.magic_read(meas_file)
if file_type != 'measurements':
print(file_type, "This is not a valid MagIC 3.0. measurements file ")
return False, "{} is not a valid MagIC 3.0. measurements file ".format(meas_file)
# convert meas_data to 2.5
for rec in meas_data3:
meas_map = map_magic.meas_magic3_2_magic2_map
meas_data.append(map_magic.mapping(rec, meas_map))
spec_data = []
spec_data3, file_type = pmag.magic_read(spec_file)
for rec in spec_data3:
spec_map = map_magic.spec_magic3_2_magic2_map
spec_data.append(map_magic.mapping(rec, spec_map))
else: # data model 2
rmag_anis = "rmag_anisotropy.txt"
rmag_res = "rmag_results.txt"
rmag_anis = pmag.resolve_file_name(rmag_anis, input_dir_path)
rmag_res = pmag.resolve_file_name(rmag_res, input_dir_path)
meas_data, file_type = pmag.magic_read(meas_file)
if file_type != 'magic_measurements':
print(file_type, "This is not a valid MagIC 2.5 magic_measurements file ")
return False, "{} is not a valid MagIC 2.5. measurements file ".format(meas_file)
# fish out relevant data
meas_data = pmag.get_dictitem(
meas_data, 'magic_method_codes', 'LP-AN-ARM', 'has')
if coord != '-1': # need to read in sample data
if data_model_num == 3:
samp_data3, file_type = pmag.magic_read(samp_file)
if file_type != 'samples':
print(file_type, "This is not a valid samples file ")
print("Only specimen coordinates will be calculated")
coord = '-1'
else:
# translate to 2
samp_data = []
samp_map = map_magic.samp_magic3_2_magic2_map
for rec in samp_data3:
samp_data.append(map_magic.mapping(rec, samp_map))
else:
samp_data, file_type = pmag.magic_read(samp_file)
if file_type != 'er_samples':
print(file_type, "This is not a valid er_samples file ")
print("Only specimen coordinates will be calculated")
coord = '-1'
#
# sort the specimen names
#
ssort = []
for rec in meas_data:
spec = rec["er_specimen_name"]
if spec not in ssort:
ssort.append(spec)
if len(ssort) > 1:
sids = sorted(ssort)
else:
sids = ssort
#
# work on each specimen
#
specimen = 0
RmagSpecRecs, RmagResRecs = [], []
SpecRecs, SpecRecs3 = [], []
while specimen < len(sids):
s = sids[specimen]
RmagSpecRec = {}
RmagResRec = {}
# get old specrec here if applicable
if data_model_num == 3:
if spec_data:
try:
RmagResRec = pmag.get_dictitem(
spec_data, 'er_specimen_name', s, 'T')[0]
RmagSpecRec = pmag.get_dictitem(
spec_data, 'er_specimen_name', s, 'T')[0]
except IndexError:
pass
data = []
method_codes = []
#
# find the data from the meas_data file for this sample
#
data = pmag.get_dictitem(meas_data, 'er_specimen_name', s, 'T')
#
# find out the number of measurements (9, 12 or 15)
#
npos = int(len(data) / 2)
if npos == 9:
#
# get dec, inc, int and convert to x,y,z
#
# B matrix made from design matrix for positions
B, H, tmpH = pmag.designAARM(npos)
X = []
for rec in data:
Dir = []
Dir.append(float(rec["measurement_dec"]))
Dir.append(float(rec["measurement_inc"]))
Dir.append(float(rec["measurement_magn_moment"]))
X.append(pmag.dir2cart(Dir))
#
# subtract baseline and put in a work array
#
work = np.zeros((npos, 3), 'f')
for i in range(npos):
for j in range(3):
work[i][j] = X[2 * i + 1][j] - X[2 * i][j]
#
# calculate tensor elements
# first put ARM components in w vector
#
w = np.zeros((npos * 3), 'f')
index = 0
for i in range(npos):
for j in range(3):
w[index] = work[i][j]
index += 1
s = np.zeros((6), 'f') # initialize the s matrix
for i in range(6):
for j in range(len(w)):
s[i] += B[i][j] * w[j]
trace = s[0] + s[1] + s[2] # normalize by the trace
for i in range(6):
s[i] = s[i] / trace
a = pmag.s2a(s)
# ------------------------------------------------------------
# Calculating dels is different than in the Kappabridge
# routine. Use trace normalized tensor (a) and the applied
# unit field directions (tmpH) to generate model X,Y,Z
# components. Then compare these with the measured values.
# ------------------------------------------------------------
S = 0.
comp = np.zeros((npos * 3), 'f')
for i in range(npos):
for j in range(3):
index = i * 3 + j
compare = a[j][0] * tmpH[i][0] + a[j][1] * \
tmpH[i][1] + a[j][2] * tmpH[i][2]
comp[index] = compare
for i in range(npos * 3):
d = (w[i] / trace) - comp[i] # del values
S += d * d
nf = float(npos * 3 - 6) # number of degrees of freedom
if S > 0:
sigma = np.sqrt(S / nf)
else:
sigma = 0
RmagSpecRec["rmag_anisotropy_name"] = data[0]["er_specimen_name"]
RmagSpecRec["er_location_name"] = data[0].get(
"er_location_name", "")
RmagSpecRec["er_specimen_name"] = data[0]["er_specimen_name"]
if not "er_sample_name" in RmagSpecRec:
RmagSpecRec["er_sample_name"] = data[0].get(
"er_sample_name", "")
RmagSpecRec["er_site_name"] = data[0].get("er_site_name", "")
RmagSpecRec["magic_experiment_names"] = RmagSpecRec["rmag_anisotropy_name"] + ":AARM"
RmagSpecRec["er_citation_names"] = "This study"
RmagResRec["rmag_result_name"] = data[0]["er_specimen_name"] + ":AARM"
RmagResRec["er_location_names"] = data[0].get(
"er_location_name", "")
RmagResRec["er_specimen_names"] = data[0]["er_specimen_name"]
if not "er_sample_name" not in RmagResRec:
RmagResRec["er_sample_names"] = data[0].get(
"er_sample_name", "")
RmagResRec["er_site_names"] = data[0].get("er_site_name", "")
RmagResRec["magic_experiment_names"] = RmagSpecRec["rmag_anisotropy_name"] + ":AARM"
RmagResRec["er_citation_names"] = "This study"
if "magic_instrument_codes" in list(data[0].keys()):
RmagSpecRec["magic_instrument_codes"] = data[0]["magic_instrument_codes"]
else:
RmagSpecRec["magic_instrument_codes"] = ""
RmagSpecRec["anisotropy_type"] = "AARM"
RmagSpecRec["anisotropy_description"] = "Hext statistics adapted to AARM"
if coord != '-1': # need to rotate s
# set orientation priorities
SO_methods = []
for rec in samp_data:
if "magic_method_codes" not in rec:
rec['magic_method_codes'] = 'SO-NO'
if "magic_method_codes" in rec:
methlist = rec["magic_method_codes"]
for meth in methlist.split(":"):
if "SO" in meth and "SO-POM" not in meth.strip():
if meth.strip() not in SO_methods:
SO_methods.append(meth.strip())
SO_priorities = pmag.set_priorities(SO_methods, 0)
# continue here
redo, p = 1, 0
if len(SO_methods) <= 1:
az_type = SO_methods[0]
orient = pmag.find_samp_rec(
RmagSpecRec["er_sample_name"], samp_data, az_type)
if orient["sample_azimuth"] != "":
method_codes.append(az_type)
redo = 0
while redo == 1:
if p >= len(SO_priorities):
print("no orientation data for ", s)
orient["sample_azimuth"] = ""
orient["sample_dip"] = ""
method_codes.append("SO-NO")
redo = 0
else:
az_type = SO_methods[SO_methods.index(
SO_priorities[p])]
orient = pmag.find_samp_rec(
RmagSpecRec["er_sample_name"], samp_data, az_type)
if orient["sample_azimuth"] != "":
method_codes.append(az_type)
redo = 0
p += 1
az, pl = orient['sample_azimuth'], orient['sample_dip']
s = pmag.dosgeo(s, az, pl) # rotate to geographic coordinates
if coord == '100':
sample_bed_dir, sample_bed_dip = orient['sample_bed_dip_direction'], orient['sample_bed_dip']
# rotate to geographic coordinates
s = pmag.dostilt(s, sample_bed_dir, sample_bed_dip)
hpars = pmag.dohext(nf, sigma, s)
#
# prepare for output
#
RmagSpecRec["anisotropy_s1"] = '%8.6f' % (s[0])
RmagSpecRec["anisotropy_s2"] = '%8.6f' % (s[1])
RmagSpecRec["anisotropy_s3"] = '%8.6f' % (s[2])
RmagSpecRec["anisotropy_s4"] = '%8.6f' % (s[3])
RmagSpecRec["anisotropy_s5"] = '%8.6f' % (s[4])
RmagSpecRec["anisotropy_s6"] = '%8.6f' % (s[5])
RmagSpecRec["anisotropy_mean"] = '%8.3e' % (trace / 3)
RmagSpecRec["anisotropy_sigma"] = '%8.6f' % (sigma)
RmagSpecRec["anisotropy_unit"] = "Am^2"
RmagSpecRec["anisotropy_n"] = '%i' % (npos)
RmagSpecRec["anisotropy_tilt_correction"] = coord
# used by thellier_gui - must be taken out for uploading
RmagSpecRec["anisotropy_F"] = '%7.1f ' % (hpars["F"])
# used by thellier_gui - must be taken out for uploading
RmagSpecRec["anisotropy_F_crit"] = hpars["F_crit"]
RmagResRec["anisotropy_t1"] = '%8.6f ' % (hpars["t1"])
RmagResRec["anisotropy_t2"] = '%8.6f ' % (hpars["t2"])
RmagResRec["anisotropy_t3"] = '%8.6f ' % (hpars["t3"])
RmagResRec["anisotropy_v1_dec"] = '%7.1f ' % (hpars["v1_dec"])
RmagResRec["anisotropy_v2_dec"] = '%7.1f ' % (hpars["v2_dec"])
RmagResRec["anisotropy_v3_dec"] = '%7.1f ' % (hpars["v3_dec"])
RmagResRec["anisotropy_v1_inc"] = '%7.1f ' % (hpars["v1_inc"])
RmagResRec["anisotropy_v2_inc"] = '%7.1f ' % (hpars["v2_inc"])
RmagResRec["anisotropy_v3_inc"] = '%7.1f ' % (hpars["v3_inc"])
RmagResRec["anisotropy_ftest"] = '%7.1f ' % (hpars["F"])
RmagResRec["anisotropy_ftest12"] = '%7.1f ' % (hpars["F12"])
RmagResRec["anisotropy_ftest23"] = '%7.1f ' % (hpars["F23"])
RmagResRec["result_description"] = 'Critical F: ' + \
hpars["F_crit"] + ';Critical F12/F13: ' + hpars["F12_crit"]
if hpars["e12"] > hpars["e13"]:
RmagResRec["anisotropy_v1_zeta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v1_zeta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v1_zeta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v1_eta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v1_eta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v1_eta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
else:
RmagResRec["anisotropy_v1_zeta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v1_zeta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v1_zeta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v1_eta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v1_eta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v1_eta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
if hpars["e23"] > hpars['e12']:
RmagResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
else:
RmagResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["tilt_correction"] = '-1'
RmagResRec["anisotropy_type"] = 'AARM'
RmagResRec["magic_method_codes"] = 'LP-AN-ARM:AE-H'
RmagSpecRec["magic_method_codes"] = 'LP-AN-ARM:AE-H'
RmagResRec["magic_software_packages"] = pmag.get_version()
RmagSpecRec["magic_software_packages"] = pmag.get_version()
specimen += 1
RmagSpecRecs.append(RmagSpecRec)
RmagResRecs.append(RmagResRec)
if data_model_num == 3:
SpecRec = RmagResRec.copy()
SpecRec.update(RmagSpecRec)
SpecRecs.append(SpecRec)
else:
print('skipping specimen ', s,
' only 9 positions supported', '; this has ', npos)
specimen += 1
if data_model_num == 3:
# translate records
for rec in SpecRecs:
rec3 = map_magic.convert_aniso('magic3', rec)
SpecRecs3.append(rec3)
# write output to 3.0 specimens file
res, ofile = pmag.magic_write(output_spec_file, SpecRecs3, 'specimens')
print("specimen data stored in {}".format(output_spec_file))
if not res:
return False, "Something went wrong and no records were created. Are you sure your measurement file has the method code 'LP-AN-ARM'?"
return True, output_spec_file
else:
if rmag_anis == "":
rmag_anis = "rmag_anisotropy.txt"
pmag.magic_write(rmag_anis, RmagSpecRecs, 'rmag_anisotropy')
print("specimen tensor elements stored in ", rmag_anis)
if rmag_res == "":
rmag_res = "rmag_results.txt"
pmag.magic_write(rmag_res, RmagResRecs, 'rmag_results')
print("specimen statistics and eigenparameters stored in ", rmag_res)
return True, rmag_anis | python | def aarm_magic(infile, dir_path=".", input_dir_path="",
spec_file='specimens.txt', samp_file="samples.txt", data_model_num=3,
coord='s'):
"""
Converts AARM data to best-fit tensor (6 elements plus sigma)
Parameters
----------
infile : str
input measurement file
dir_path : str
output directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
spec_file : str
input/output specimen file name, default "specimens.txt"
samp_file : str
input sample file name, default "samples.txt"
data_model_num : number
MagIC data model [2, 3], default 3
coord : str
coordinate system specimen/geographic/tilt-corrected,
['s', 'g', 't'], default 's'
Returns
---------
Tuple : (True or False indicating if conversion was sucessful, output file name written)
Info
---------
Input for is a series of baseline, ARM pairs.
The baseline should be the AF demagnetized state (3 axis demag is
preferable) for the following ARM acquisition. The order of the
measurements is:
positions 1,2,3, 6,7,8, 11,12,13 (for 9 positions)
positions 1,2,3,4, 6,7,8,9, 11,12,13,14 (for 12 positions)
positions 1-15 (for 15 positions)
"""
data_model_num = int(float(data_model_num))
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
# get full file names
meas_file = pmag.resolve_file_name(infile, input_dir_path)
spec_file = pmag.resolve_file_name(spec_file, input_dir_path)
samp_file = pmag.resolve_file_name(samp_file, input_dir_path)
output_spec_file = os.path.join(dir_path, os.path.split(spec_file)[1])
# get coordinate system
coords = {'s': '-1', 'g': '0', 't': '100'}
if coord not in coords.values():
coord = coords.get(str(coord), '-1')
if data_model_num == 3:
meas_data = []
meas_data3, file_type = pmag.magic_read(meas_file)
if file_type != 'measurements':
print(file_type, "This is not a valid MagIC 3.0. measurements file ")
return False, "{} is not a valid MagIC 3.0. measurements file ".format(meas_file)
# convert meas_data to 2.5
for rec in meas_data3:
meas_map = map_magic.meas_magic3_2_magic2_map
meas_data.append(map_magic.mapping(rec, meas_map))
spec_data = []
spec_data3, file_type = pmag.magic_read(spec_file)
for rec in spec_data3:
spec_map = map_magic.spec_magic3_2_magic2_map
spec_data.append(map_magic.mapping(rec, spec_map))
else: # data model 2
rmag_anis = "rmag_anisotropy.txt"
rmag_res = "rmag_results.txt"
rmag_anis = pmag.resolve_file_name(rmag_anis, input_dir_path)
rmag_res = pmag.resolve_file_name(rmag_res, input_dir_path)
meas_data, file_type = pmag.magic_read(meas_file)
if file_type != 'magic_measurements':
print(file_type, "This is not a valid MagIC 2.5 magic_measurements file ")
return False, "{} is not a valid MagIC 2.5. measurements file ".format(meas_file)
# fish out relevant data
meas_data = pmag.get_dictitem(
meas_data, 'magic_method_codes', 'LP-AN-ARM', 'has')
if coord != '-1': # need to read in sample data
if data_model_num == 3:
samp_data3, file_type = pmag.magic_read(samp_file)
if file_type != 'samples':
print(file_type, "This is not a valid samples file ")
print("Only specimen coordinates will be calculated")
coord = '-1'
else:
# translate to 2
samp_data = []
samp_map = map_magic.samp_magic3_2_magic2_map
for rec in samp_data3:
samp_data.append(map_magic.mapping(rec, samp_map))
else:
samp_data, file_type = pmag.magic_read(samp_file)
if file_type != 'er_samples':
print(file_type, "This is not a valid er_samples file ")
print("Only specimen coordinates will be calculated")
coord = '-1'
#
# sort the specimen names
#
ssort = []
for rec in meas_data:
spec = rec["er_specimen_name"]
if spec not in ssort:
ssort.append(spec)
if len(ssort) > 1:
sids = sorted(ssort)
else:
sids = ssort
#
# work on each specimen
#
specimen = 0
RmagSpecRecs, RmagResRecs = [], []
SpecRecs, SpecRecs3 = [], []
while specimen < len(sids):
s = sids[specimen]
RmagSpecRec = {}
RmagResRec = {}
# get old specrec here if applicable
if data_model_num == 3:
if spec_data:
try:
RmagResRec = pmag.get_dictitem(
spec_data, 'er_specimen_name', s, 'T')[0]
RmagSpecRec = pmag.get_dictitem(
spec_data, 'er_specimen_name', s, 'T')[0]
except IndexError:
pass
data = []
method_codes = []
#
# find the data from the meas_data file for this sample
#
data = pmag.get_dictitem(meas_data, 'er_specimen_name', s, 'T')
#
# find out the number of measurements (9, 12 or 15)
#
npos = int(len(data) / 2)
if npos == 9:
#
# get dec, inc, int and convert to x,y,z
#
# B matrix made from design matrix for positions
B, H, tmpH = pmag.designAARM(npos)
X = []
for rec in data:
Dir = []
Dir.append(float(rec["measurement_dec"]))
Dir.append(float(rec["measurement_inc"]))
Dir.append(float(rec["measurement_magn_moment"]))
X.append(pmag.dir2cart(Dir))
#
# subtract baseline and put in a work array
#
work = np.zeros((npos, 3), 'f')
for i in range(npos):
for j in range(3):
work[i][j] = X[2 * i + 1][j] - X[2 * i][j]
#
# calculate tensor elements
# first put ARM components in w vector
#
w = np.zeros((npos * 3), 'f')
index = 0
for i in range(npos):
for j in range(3):
w[index] = work[i][j]
index += 1
s = np.zeros((6), 'f') # initialize the s matrix
for i in range(6):
for j in range(len(w)):
s[i] += B[i][j] * w[j]
trace = s[0] + s[1] + s[2] # normalize by the trace
for i in range(6):
s[i] = s[i] / trace
a = pmag.s2a(s)
# ------------------------------------------------------------
# Calculating dels is different than in the Kappabridge
# routine. Use trace normalized tensor (a) and the applied
# unit field directions (tmpH) to generate model X,Y,Z
# components. Then compare these with the measured values.
# ------------------------------------------------------------
S = 0.
comp = np.zeros((npos * 3), 'f')
for i in range(npos):
for j in range(3):
index = i * 3 + j
compare = a[j][0] * tmpH[i][0] + a[j][1] * \
tmpH[i][1] + a[j][2] * tmpH[i][2]
comp[index] = compare
for i in range(npos * 3):
d = (w[i] / trace) - comp[i] # del values
S += d * d
nf = float(npos * 3 - 6) # number of degrees of freedom
if S > 0:
sigma = np.sqrt(S / nf)
else:
sigma = 0
RmagSpecRec["rmag_anisotropy_name"] = data[0]["er_specimen_name"]
RmagSpecRec["er_location_name"] = data[0].get(
"er_location_name", "")
RmagSpecRec["er_specimen_name"] = data[0]["er_specimen_name"]
if not "er_sample_name" in RmagSpecRec:
RmagSpecRec["er_sample_name"] = data[0].get(
"er_sample_name", "")
RmagSpecRec["er_site_name"] = data[0].get("er_site_name", "")
RmagSpecRec["magic_experiment_names"] = RmagSpecRec["rmag_anisotropy_name"] + ":AARM"
RmagSpecRec["er_citation_names"] = "This study"
RmagResRec["rmag_result_name"] = data[0]["er_specimen_name"] + ":AARM"
RmagResRec["er_location_names"] = data[0].get(
"er_location_name", "")
RmagResRec["er_specimen_names"] = data[0]["er_specimen_name"]
if not "er_sample_name" not in RmagResRec:
RmagResRec["er_sample_names"] = data[0].get(
"er_sample_name", "")
RmagResRec["er_site_names"] = data[0].get("er_site_name", "")
RmagResRec["magic_experiment_names"] = RmagSpecRec["rmag_anisotropy_name"] + ":AARM"
RmagResRec["er_citation_names"] = "This study"
if "magic_instrument_codes" in list(data[0].keys()):
RmagSpecRec["magic_instrument_codes"] = data[0]["magic_instrument_codes"]
else:
RmagSpecRec["magic_instrument_codes"] = ""
RmagSpecRec["anisotropy_type"] = "AARM"
RmagSpecRec["anisotropy_description"] = "Hext statistics adapted to AARM"
if coord != '-1': # need to rotate s
# set orientation priorities
SO_methods = []
for rec in samp_data:
if "magic_method_codes" not in rec:
rec['magic_method_codes'] = 'SO-NO'
if "magic_method_codes" in rec:
methlist = rec["magic_method_codes"]
for meth in methlist.split(":"):
if "SO" in meth and "SO-POM" not in meth.strip():
if meth.strip() not in SO_methods:
SO_methods.append(meth.strip())
SO_priorities = pmag.set_priorities(SO_methods, 0)
# continue here
redo, p = 1, 0
if len(SO_methods) <= 1:
az_type = SO_methods[0]
orient = pmag.find_samp_rec(
RmagSpecRec["er_sample_name"], samp_data, az_type)
if orient["sample_azimuth"] != "":
method_codes.append(az_type)
redo = 0
while redo == 1:
if p >= len(SO_priorities):
print("no orientation data for ", s)
orient["sample_azimuth"] = ""
orient["sample_dip"] = ""
method_codes.append("SO-NO")
redo = 0
else:
az_type = SO_methods[SO_methods.index(
SO_priorities[p])]
orient = pmag.find_samp_rec(
RmagSpecRec["er_sample_name"], samp_data, az_type)
if orient["sample_azimuth"] != "":
method_codes.append(az_type)
redo = 0
p += 1
az, pl = orient['sample_azimuth'], orient['sample_dip']
s = pmag.dosgeo(s, az, pl) # rotate to geographic coordinates
if coord == '100':
sample_bed_dir, sample_bed_dip = orient['sample_bed_dip_direction'], orient['sample_bed_dip']
# rotate to geographic coordinates
s = pmag.dostilt(s, sample_bed_dir, sample_bed_dip)
hpars = pmag.dohext(nf, sigma, s)
#
# prepare for output
#
RmagSpecRec["anisotropy_s1"] = '%8.6f' % (s[0])
RmagSpecRec["anisotropy_s2"] = '%8.6f' % (s[1])
RmagSpecRec["anisotropy_s3"] = '%8.6f' % (s[2])
RmagSpecRec["anisotropy_s4"] = '%8.6f' % (s[3])
RmagSpecRec["anisotropy_s5"] = '%8.6f' % (s[4])
RmagSpecRec["anisotropy_s6"] = '%8.6f' % (s[5])
RmagSpecRec["anisotropy_mean"] = '%8.3e' % (trace / 3)
RmagSpecRec["anisotropy_sigma"] = '%8.6f' % (sigma)
RmagSpecRec["anisotropy_unit"] = "Am^2"
RmagSpecRec["anisotropy_n"] = '%i' % (npos)
RmagSpecRec["anisotropy_tilt_correction"] = coord
# used by thellier_gui - must be taken out for uploading
RmagSpecRec["anisotropy_F"] = '%7.1f ' % (hpars["F"])
# used by thellier_gui - must be taken out for uploading
RmagSpecRec["anisotropy_F_crit"] = hpars["F_crit"]
RmagResRec["anisotropy_t1"] = '%8.6f ' % (hpars["t1"])
RmagResRec["anisotropy_t2"] = '%8.6f ' % (hpars["t2"])
RmagResRec["anisotropy_t3"] = '%8.6f ' % (hpars["t3"])
RmagResRec["anisotropy_v1_dec"] = '%7.1f ' % (hpars["v1_dec"])
RmagResRec["anisotropy_v2_dec"] = '%7.1f ' % (hpars["v2_dec"])
RmagResRec["anisotropy_v3_dec"] = '%7.1f ' % (hpars["v3_dec"])
RmagResRec["anisotropy_v1_inc"] = '%7.1f ' % (hpars["v1_inc"])
RmagResRec["anisotropy_v2_inc"] = '%7.1f ' % (hpars["v2_inc"])
RmagResRec["anisotropy_v3_inc"] = '%7.1f ' % (hpars["v3_inc"])
RmagResRec["anisotropy_ftest"] = '%7.1f ' % (hpars["F"])
RmagResRec["anisotropy_ftest12"] = '%7.1f ' % (hpars["F12"])
RmagResRec["anisotropy_ftest23"] = '%7.1f ' % (hpars["F23"])
RmagResRec["result_description"] = 'Critical F: ' + \
hpars["F_crit"] + ';Critical F12/F13: ' + hpars["F12_crit"]
if hpars["e12"] > hpars["e13"]:
RmagResRec["anisotropy_v1_zeta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v1_zeta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v1_zeta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v1_eta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v1_eta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v1_eta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
else:
RmagResRec["anisotropy_v1_zeta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v1_zeta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v1_zeta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v1_eta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v1_eta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v1_eta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
if hpars["e23"] > hpars['e12']:
RmagResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
else:
RmagResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["tilt_correction"] = '-1'
RmagResRec["anisotropy_type"] = 'AARM'
RmagResRec["magic_method_codes"] = 'LP-AN-ARM:AE-H'
RmagSpecRec["magic_method_codes"] = 'LP-AN-ARM:AE-H'
RmagResRec["magic_software_packages"] = pmag.get_version()
RmagSpecRec["magic_software_packages"] = pmag.get_version()
specimen += 1
RmagSpecRecs.append(RmagSpecRec)
RmagResRecs.append(RmagResRec)
if data_model_num == 3:
SpecRec = RmagResRec.copy()
SpecRec.update(RmagSpecRec)
SpecRecs.append(SpecRec)
else:
print('skipping specimen ', s,
' only 9 positions supported', '; this has ', npos)
specimen += 1
if data_model_num == 3:
# translate records
for rec in SpecRecs:
rec3 = map_magic.convert_aniso('magic3', rec)
SpecRecs3.append(rec3)
# write output to 3.0 specimens file
res, ofile = pmag.magic_write(output_spec_file, SpecRecs3, 'specimens')
print("specimen data stored in {}".format(output_spec_file))
if not res:
return False, "Something went wrong and no records were created. Are you sure your measurement file has the method code 'LP-AN-ARM'?"
return True, output_spec_file
else:
if rmag_anis == "":
rmag_anis = "rmag_anisotropy.txt"
pmag.magic_write(rmag_anis, RmagSpecRecs, 'rmag_anisotropy')
print("specimen tensor elements stored in ", rmag_anis)
if rmag_res == "":
rmag_res = "rmag_results.txt"
pmag.magic_write(rmag_res, RmagResRecs, 'rmag_results')
print("specimen statistics and eigenparameters stored in ", rmag_res)
return True, rmag_anis | Converts AARM data to best-fit tensor (6 elements plus sigma)
Parameters
----------
infile : str
input measurement file
dir_path : str
output directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
spec_file : str
input/output specimen file name, default "specimens.txt"
samp_file : str
input sample file name, default "samples.txt"
data_model_num : number
MagIC data model [2, 3], default 3
coord : str
coordinate system specimen/geographic/tilt-corrected,
['s', 'g', 't'], default 's'
Returns
---------
Tuple : (True or False indicating if conversion was sucessful, output file name written)
Info
---------
Input for is a series of baseline, ARM pairs.
The baseline should be the AF demagnetized state (3 axis demag is
preferable) for the following ARM acquisition. The order of the
measurements is:
positions 1,2,3, 6,7,8, 11,12,13 (for 9 positions)
positions 1,2,3,4, 6,7,8,9, 11,12,13,14 (for 12 positions)
positions 1-15 (for 15 positions) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L9387-L9833 |
PmagPy/PmagPy | pmagpy/ipmag.py | atrm_magic | def atrm_magic(meas_file, dir_path=".", input_dir_path="",
input_spec_file='specimens.txt', output_spec_file='specimens.txt',
data_model_num=3):
"""
Converts ATRM data to best-fit tensor (6 elements plus sigma)
Parameters
----------
meas_file : str
input measurement file
dir_path : str
output directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
input_spec_file : str
input specimen file name, default "specimens.txt"
output_spec_file : str
output specimen file name, default "specimens.txt"
data_model_num : number
MagIC data model [2, 3], default 3
Returns
---------
Tuple : (True or False indicating if conversion was sucessful, output file name written)
"""
# fix up file names
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
meas_file = pmag.resolve_file_name(meas_file, input_dir_path)
rmag_anis = os.path.join(dir_path, 'rmag_anisotropy.txt')
rmag_res = os.path.join(dir_path, 'rmag_results.txt')
input_spec_file = pmag.resolve_file_name(input_spec_file, input_dir_path)
output_spec_file = pmag.resolve_file_name(output_spec_file, dir_path)
# read in data
if data_model_num == 3:
meas_data = []
meas_data3, file_type = pmag.magic_read(meas_file)
if file_type != 'measurements':
print(
"-E- {} is not a valid measurements file, {}".format(meas_file, file_type))
return False
# convert meas_data to 2.5
for rec in meas_data3:
meas_map = map_magic.meas_magic3_2_magic2_map
meas_data.append(map_magic.mapping(rec, meas_map))
old_spec_recs, file_type = pmag.magic_read(input_spec_file)
if file_type != 'specimens':
print("-W- {} is not a valid specimens file ".format(input_spec_file))
old_spec_recs = []
spec_recs = []
for rec in old_spec_recs:
spec_map = map_magic.spec_magic3_2_magic2_map
spec_recs.append(map_magic.mapping(rec, spec_map))
else:
meas_data, file_type = pmag.magic_read(meas_file)
if file_type != 'magic_measurements':
print("-E- {} is is not a valid magic_measurements file ".format(file_type))
return False, "{} is not a valid magic_measurements file, {}".format(meas_file, file_type)
meas_data = pmag.get_dictitem(
meas_data, 'magic_method_codes', 'LP-AN-TRM', 'has')
if not len(meas_data):
print("-E- No measurement records found with code LP-AN-TRM")
return False, "No measurement records found with code LP-AN-TRM"
#
#
# get sorted list of unique specimen names
ssort = []
for rec in meas_data:
spec = rec["er_specimen_name"]
if spec not in ssort:
ssort.append(spec)
sids = sorted(ssort)
#
#
# work on each specimen
#
specimen, npos = 0, 6
RmagSpecRecs, RmagResRecs = [], []
SpecRecs, SpecRecs3 = [], []
while specimen < len(sids):
nmeas = 0
s = sids[specimen]
RmagSpecRec = {}
RmagResRec = {}
# get old specrec here if applicable
if data_model_num == 3:
if spec_recs:
try:
RmagResRec = pmag.get_dictitem(
spec_recs, 'er_specimen_name', s, 'T')[0]
RmagSpecRec = pmag.get_dictitem(
spec_recs, 'er_specimen_name', s, 'T')[0]
except IndexError:
pass
BX, X = [], []
method_codes = []
Spec0 = ""
#
# find the data from the meas_data file for this sample
# and get dec, inc, int and convert to x,y,z
#
# fish out data for this specimen name
data = pmag.get_dictitem(meas_data, 'er_specimen_name', s, 'T')
if len(data) > 5:
RmagSpecRec["rmag_anisotropy_name"] = data[0]["er_specimen_name"]
RmagSpecRec["er_location_name"] = data[0].get(
"er_location_name", "")
RmagSpecRec["er_specimen_name"] = data[0]["er_specimen_name"]
if not "er_sample_name" in RmagSpecRec:
RmagSpecRec["er_sample_name"] = data[0].get(
"er_sample_name", "")
RmagSpecRec["er_site_name"] = data[0].get("er_site_name", "")
RmagSpecRec["magic_experiment_names"] = RmagSpecRec["rmag_anisotropy_name"] + ":ATRM"
RmagSpecRec["er_citation_names"] = "This study"
RmagResRec["rmag_result_name"] = data[0]["er_specimen_name"] + ":ATRM"
RmagResRec["er_location_names"] = data[0].get(
"er_location_names", "")
RmagResRec["er_specimen_names"] = data[0]["er_specimen_name"]
if data_model_num == 2:
RmagResRec["er_sample_names"] = data[0].get(
"er_sample_name", "")
RmagResRec["er_site_names"] = data[0].get("er_site_name", "")
RmagResRec["magic_experiment_names"] = RmagSpecRec["rmag_anisotropy_name"] + ":ATRM"
RmagResRec["er_citation_names"] = "This study"
RmagSpecRec["anisotropy_type"] = "ATRM"
if "magic_instrument_codes" in list(data[0].keys()):
RmagSpecRec["magic_instrument_codes"] = data[0]["magic_instrument_codes"]
else:
RmagSpecRec["magic_instrument_codes"] = ""
RmagSpecRec["anisotropy_description"] = "Hext statistics adapted to ATRM"
for rec in data:
meths = rec['magic_method_codes'].strip().split(':')
Dir = []
Dir.append(float(rec["measurement_dec"]))
Dir.append(float(rec["measurement_inc"]))
Dir.append(float(rec["measurement_magn_moment"]))
if "LT-T-Z" in meths:
BX.append(pmag.dir2cart(Dir)) # append baseline steps
elif "LT-T-I" in meths:
X.append(pmag.dir2cart(Dir))
nmeas += 1
#
if len(BX) == 1:
for i in range(len(X) - 1):
BX.append(BX[0]) # assume first 0 field step as baseline
elif len(BX) == 0: # assume baseline is zero
for i in range(len(X)):
BX.append([0., 0., 0.]) # assume baseline of 0
elif len(BX) != len(X): # if BX isn't just one measurement or one in between every infield step, just assume it is zero
print('something odd about the baselines - just assuming zero')
for i in range(len(X)):
BX.append([0., 0., 0.]) # assume baseline of 0
if nmeas < 6: # must have at least 6 measurements right now -
print('skipping specimen ', s, ' too few measurements')
specimen += 1
else:
# B matrix made from design matrix for positions
B, H, tmpH = pmag.designATRM(npos)
#
# subtract optional baseline and put in a work array
#
work = np.zeros((nmeas, 3), 'f')
for i in range(nmeas):
for j in range(3):
# subtract baseline, if available
work[i][j] = X[i][j] - BX[i][j]
#
# calculate tensor elements
# first put ARM components in w vector
#
w = np.zeros((npos * 3), 'f')
index = 0
for i in range(npos):
for j in range(3):
w[index] = work[i][j]
index += 1
s = np.zeros((6), 'f') # initialize the s matrix
for i in range(6):
for j in range(len(w)):
s[i] += B[i][j] * w[j]
trace = s[0] + s[1] + s[2] # normalize by the trace
for i in range(6):
s[i] = s[i] / trace
a = pmag.s2a(s)
# ------------------------------------------------------------
# Calculating dels is different than in the Kappabridge
# routine. Use trace normalized tensor (a) and the applied
# unit field directions (tmpH) to generate model X,Y,Z
# components. Then compare these with the measured values.
# ------------------------------------------------------------
S = 0.
comp = np.zeros((npos * 3), 'f')
for i in range(npos):
for j in range(3):
index = i * 3 + j
compare = a[j][0] * tmpH[i][0] + a[j][1] * \
tmpH[i][1] + a[j][2] * tmpH[i][2]
comp[index] = compare
for i in range(npos * 3):
d = (w[i] / trace) - comp[i] # del values
S += d * d
nf = float(npos * 3. - 6.) # number of degrees of freedom
if S > 0:
sigma = np.sqrt(S / nf)
else:
sigma = 0
hpars = pmag.dohext(nf, sigma, s)
#
# prepare for output
#
RmagSpecRec["anisotropy_s1"] = '%8.6f' % (s[0])
RmagSpecRec["anisotropy_s2"] = '%8.6f' % (s[1])
RmagSpecRec["anisotropy_s3"] = '%8.6f' % (s[2])
RmagSpecRec["anisotropy_s4"] = '%8.6f' % (s[3])
RmagSpecRec["anisotropy_s5"] = '%8.6f' % (s[4])
RmagSpecRec["anisotropy_s6"] = '%8.6f' % (s[5])
RmagSpecRec["anisotropy_mean"] = '%8.3e' % (trace / 3)
RmagSpecRec["anisotropy_sigma"] = '%8.6f' % (sigma)
RmagSpecRec["anisotropy_unit"] = "Am^2"
RmagSpecRec["anisotropy_n"] = '%i' % (npos)
RmagSpecRec["anisotropy_tilt_correction"] = '-1'
# used by thellier_gui - must be taken out for uploading
RmagSpecRec["anisotropy_F"] = '%7.1f ' % (hpars["F"])
# used by thellier_gui - must be taken out for uploading
RmagSpecRec["anisotropy_F_crit"] = hpars["F_crit"]
RmagResRec["anisotropy_t1"] = '%8.6f ' % (hpars["t1"])
RmagResRec["anisotropy_t2"] = '%8.6f ' % (hpars["t2"])
RmagResRec["anisotropy_t3"] = '%8.6f ' % (hpars["t3"])
RmagResRec["anisotropy_v1_dec"] = '%7.1f ' % (hpars["v1_dec"])
RmagResRec["anisotropy_v2_dec"] = '%7.1f ' % (hpars["v2_dec"])
RmagResRec["anisotropy_v3_dec"] = '%7.1f ' % (hpars["v3_dec"])
RmagResRec["anisotropy_v1_inc"] = '%7.1f ' % (hpars["v1_inc"])
RmagResRec["anisotropy_v2_inc"] = '%7.1f ' % (hpars["v2_inc"])
RmagResRec["anisotropy_v3_inc"] = '%7.1f ' % (hpars["v3_inc"])
RmagResRec["anisotropy_ftest"] = '%7.1f ' % (hpars["F"])
RmagResRec["anisotropy_ftest12"] = '%7.1f ' % (hpars["F12"])
RmagResRec["anisotropy_ftest23"] = '%7.1f ' % (hpars["F23"])
RmagResRec["result_description"] = 'Critical F: ' + \
hpars["F_crit"] + ';Critical F12/F13: ' + hpars["F12_crit"]
if hpars["e12"] > hpars["e13"]:
RmagResRec["anisotropy_v1_zeta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v1_zeta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v1_zeta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v1_eta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v1_eta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v1_eta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
else:
RmagResRec["anisotropy_v1_zeta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v1_zeta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v1_zeta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v1_eta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v1_eta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v1_eta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
if hpars["e23"] > hpars['e12']:
RmagResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
else:
RmagResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["tilt_correction"] = '-1'
RmagResRec["anisotropy_type"] = 'ATRM'
RmagResRec["magic_method_codes"] = 'LP-AN-TRM:AE-H'
RmagSpecRec["magic_method_codes"] = 'LP-AN-TRM:AE-H'
RmagResRec["magic_software_packages"] = pmag.get_version()
RmagSpecRec["magic_software_packages"] = pmag.get_version()
RmagSpecRecs.append(RmagSpecRec)
RmagResRecs.append(RmagResRec)
specimen += 1
if data_model_num == 3:
SpecRec = RmagResRec.copy()
SpecRec.update(RmagSpecRec)
SpecRecs.append(SpecRec)
# finished iterating through specimens,
# now we need to write out the data to files
if data_model_num == 3:
# translate records
for rec in SpecRecs:
rec3 = map_magic.convert_aniso('magic3', rec)
SpecRecs3.append(rec3)
# write output to 3.0 specimens file
pmag.magic_write(output_spec_file, SpecRecs3, 'specimens')
print("specimen data stored in {}".format(output_spec_file))
return True, output_spec_file
else:
# write output to 2.5 rmag_ files
pmag.magic_write(rmag_anis, RmagSpecRecs, 'rmag_anisotropy')
print("specimen tensor elements stored in ", rmag_anis)
pmag.magic_write(rmag_res, RmagResRecs, 'rmag_results')
print("specimen statistics and eigenparameters stored in ", rmag_res)
return True, rmag_anis | python | def atrm_magic(meas_file, dir_path=".", input_dir_path="",
input_spec_file='specimens.txt', output_spec_file='specimens.txt',
data_model_num=3):
"""
Converts ATRM data to best-fit tensor (6 elements plus sigma)
Parameters
----------
meas_file : str
input measurement file
dir_path : str
output directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
input_spec_file : str
input specimen file name, default "specimens.txt"
output_spec_file : str
output specimen file name, default "specimens.txt"
data_model_num : number
MagIC data model [2, 3], default 3
Returns
---------
Tuple : (True or False indicating if conversion was sucessful, output file name written)
"""
# fix up file names
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
meas_file = pmag.resolve_file_name(meas_file, input_dir_path)
rmag_anis = os.path.join(dir_path, 'rmag_anisotropy.txt')
rmag_res = os.path.join(dir_path, 'rmag_results.txt')
input_spec_file = pmag.resolve_file_name(input_spec_file, input_dir_path)
output_spec_file = pmag.resolve_file_name(output_spec_file, dir_path)
# read in data
if data_model_num == 3:
meas_data = []
meas_data3, file_type = pmag.magic_read(meas_file)
if file_type != 'measurements':
print(
"-E- {} is not a valid measurements file, {}".format(meas_file, file_type))
return False
# convert meas_data to 2.5
for rec in meas_data3:
meas_map = map_magic.meas_magic3_2_magic2_map
meas_data.append(map_magic.mapping(rec, meas_map))
old_spec_recs, file_type = pmag.magic_read(input_spec_file)
if file_type != 'specimens':
print("-W- {} is not a valid specimens file ".format(input_spec_file))
old_spec_recs = []
spec_recs = []
for rec in old_spec_recs:
spec_map = map_magic.spec_magic3_2_magic2_map
spec_recs.append(map_magic.mapping(rec, spec_map))
else:
meas_data, file_type = pmag.magic_read(meas_file)
if file_type != 'magic_measurements':
print("-E- {} is is not a valid magic_measurements file ".format(file_type))
return False, "{} is not a valid magic_measurements file, {}".format(meas_file, file_type)
meas_data = pmag.get_dictitem(
meas_data, 'magic_method_codes', 'LP-AN-TRM', 'has')
if not len(meas_data):
print("-E- No measurement records found with code LP-AN-TRM")
return False, "No measurement records found with code LP-AN-TRM"
#
#
# get sorted list of unique specimen names
ssort = []
for rec in meas_data:
spec = rec["er_specimen_name"]
if spec not in ssort:
ssort.append(spec)
sids = sorted(ssort)
#
#
# work on each specimen
#
specimen, npos = 0, 6
RmagSpecRecs, RmagResRecs = [], []
SpecRecs, SpecRecs3 = [], []
while specimen < len(sids):
nmeas = 0
s = sids[specimen]
RmagSpecRec = {}
RmagResRec = {}
# get old specrec here if applicable
if data_model_num == 3:
if spec_recs:
try:
RmagResRec = pmag.get_dictitem(
spec_recs, 'er_specimen_name', s, 'T')[0]
RmagSpecRec = pmag.get_dictitem(
spec_recs, 'er_specimen_name', s, 'T')[0]
except IndexError:
pass
BX, X = [], []
method_codes = []
Spec0 = ""
#
# find the data from the meas_data file for this sample
# and get dec, inc, int and convert to x,y,z
#
# fish out data for this specimen name
data = pmag.get_dictitem(meas_data, 'er_specimen_name', s, 'T')
if len(data) > 5:
RmagSpecRec["rmag_anisotropy_name"] = data[0]["er_specimen_name"]
RmagSpecRec["er_location_name"] = data[0].get(
"er_location_name", "")
RmagSpecRec["er_specimen_name"] = data[0]["er_specimen_name"]
if not "er_sample_name" in RmagSpecRec:
RmagSpecRec["er_sample_name"] = data[0].get(
"er_sample_name", "")
RmagSpecRec["er_site_name"] = data[0].get("er_site_name", "")
RmagSpecRec["magic_experiment_names"] = RmagSpecRec["rmag_anisotropy_name"] + ":ATRM"
RmagSpecRec["er_citation_names"] = "This study"
RmagResRec["rmag_result_name"] = data[0]["er_specimen_name"] + ":ATRM"
RmagResRec["er_location_names"] = data[0].get(
"er_location_names", "")
RmagResRec["er_specimen_names"] = data[0]["er_specimen_name"]
if data_model_num == 2:
RmagResRec["er_sample_names"] = data[0].get(
"er_sample_name", "")
RmagResRec["er_site_names"] = data[0].get("er_site_name", "")
RmagResRec["magic_experiment_names"] = RmagSpecRec["rmag_anisotropy_name"] + ":ATRM"
RmagResRec["er_citation_names"] = "This study"
RmagSpecRec["anisotropy_type"] = "ATRM"
if "magic_instrument_codes" in list(data[0].keys()):
RmagSpecRec["magic_instrument_codes"] = data[0]["magic_instrument_codes"]
else:
RmagSpecRec["magic_instrument_codes"] = ""
RmagSpecRec["anisotropy_description"] = "Hext statistics adapted to ATRM"
for rec in data:
meths = rec['magic_method_codes'].strip().split(':')
Dir = []
Dir.append(float(rec["measurement_dec"]))
Dir.append(float(rec["measurement_inc"]))
Dir.append(float(rec["measurement_magn_moment"]))
if "LT-T-Z" in meths:
BX.append(pmag.dir2cart(Dir)) # append baseline steps
elif "LT-T-I" in meths:
X.append(pmag.dir2cart(Dir))
nmeas += 1
#
if len(BX) == 1:
for i in range(len(X) - 1):
BX.append(BX[0]) # assume first 0 field step as baseline
elif len(BX) == 0: # assume baseline is zero
for i in range(len(X)):
BX.append([0., 0., 0.]) # assume baseline of 0
elif len(BX) != len(X): # if BX isn't just one measurement or one in between every infield step, just assume it is zero
print('something odd about the baselines - just assuming zero')
for i in range(len(X)):
BX.append([0., 0., 0.]) # assume baseline of 0
if nmeas < 6: # must have at least 6 measurements right now -
print('skipping specimen ', s, ' too few measurements')
specimen += 1
else:
# B matrix made from design matrix for positions
B, H, tmpH = pmag.designATRM(npos)
#
# subtract optional baseline and put in a work array
#
work = np.zeros((nmeas, 3), 'f')
for i in range(nmeas):
for j in range(3):
# subtract baseline, if available
work[i][j] = X[i][j] - BX[i][j]
#
# calculate tensor elements
# first put ARM components in w vector
#
w = np.zeros((npos * 3), 'f')
index = 0
for i in range(npos):
for j in range(3):
w[index] = work[i][j]
index += 1
s = np.zeros((6), 'f') # initialize the s matrix
for i in range(6):
for j in range(len(w)):
s[i] += B[i][j] * w[j]
trace = s[0] + s[1] + s[2] # normalize by the trace
for i in range(6):
s[i] = s[i] / trace
a = pmag.s2a(s)
# ------------------------------------------------------------
# Calculating dels is different than in the Kappabridge
# routine. Use trace normalized tensor (a) and the applied
# unit field directions (tmpH) to generate model X,Y,Z
# components. Then compare these with the measured values.
# ------------------------------------------------------------
S = 0.
comp = np.zeros((npos * 3), 'f')
for i in range(npos):
for j in range(3):
index = i * 3 + j
compare = a[j][0] * tmpH[i][0] + a[j][1] * \
tmpH[i][1] + a[j][2] * tmpH[i][2]
comp[index] = compare
for i in range(npos * 3):
d = (w[i] / trace) - comp[i] # del values
S += d * d
nf = float(npos * 3. - 6.) # number of degrees of freedom
if S > 0:
sigma = np.sqrt(S / nf)
else:
sigma = 0
hpars = pmag.dohext(nf, sigma, s)
#
# prepare for output
#
RmagSpecRec["anisotropy_s1"] = '%8.6f' % (s[0])
RmagSpecRec["anisotropy_s2"] = '%8.6f' % (s[1])
RmagSpecRec["anisotropy_s3"] = '%8.6f' % (s[2])
RmagSpecRec["anisotropy_s4"] = '%8.6f' % (s[3])
RmagSpecRec["anisotropy_s5"] = '%8.6f' % (s[4])
RmagSpecRec["anisotropy_s6"] = '%8.6f' % (s[5])
RmagSpecRec["anisotropy_mean"] = '%8.3e' % (trace / 3)
RmagSpecRec["anisotropy_sigma"] = '%8.6f' % (sigma)
RmagSpecRec["anisotropy_unit"] = "Am^2"
RmagSpecRec["anisotropy_n"] = '%i' % (npos)
RmagSpecRec["anisotropy_tilt_correction"] = '-1'
# used by thellier_gui - must be taken out for uploading
RmagSpecRec["anisotropy_F"] = '%7.1f ' % (hpars["F"])
# used by thellier_gui - must be taken out for uploading
RmagSpecRec["anisotropy_F_crit"] = hpars["F_crit"]
RmagResRec["anisotropy_t1"] = '%8.6f ' % (hpars["t1"])
RmagResRec["anisotropy_t2"] = '%8.6f ' % (hpars["t2"])
RmagResRec["anisotropy_t3"] = '%8.6f ' % (hpars["t3"])
RmagResRec["anisotropy_v1_dec"] = '%7.1f ' % (hpars["v1_dec"])
RmagResRec["anisotropy_v2_dec"] = '%7.1f ' % (hpars["v2_dec"])
RmagResRec["anisotropy_v3_dec"] = '%7.1f ' % (hpars["v3_dec"])
RmagResRec["anisotropy_v1_inc"] = '%7.1f ' % (hpars["v1_inc"])
RmagResRec["anisotropy_v2_inc"] = '%7.1f ' % (hpars["v2_inc"])
RmagResRec["anisotropy_v3_inc"] = '%7.1f ' % (hpars["v3_inc"])
RmagResRec["anisotropy_ftest"] = '%7.1f ' % (hpars["F"])
RmagResRec["anisotropy_ftest12"] = '%7.1f ' % (hpars["F12"])
RmagResRec["anisotropy_ftest23"] = '%7.1f ' % (hpars["F23"])
RmagResRec["result_description"] = 'Critical F: ' + \
hpars["F_crit"] + ';Critical F12/F13: ' + hpars["F12_crit"]
if hpars["e12"] > hpars["e13"]:
RmagResRec["anisotropy_v1_zeta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v1_zeta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v1_zeta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v1_eta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v1_eta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v1_eta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
else:
RmagResRec["anisotropy_v1_zeta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v1_zeta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v1_zeta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v1_eta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v1_eta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v1_eta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
if hpars["e23"] > hpars['e12']:
RmagResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
else:
RmagResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f ' % (
hpars['e12'])
RmagResRec["anisotropy_v2_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v2_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v3_eta_dec"] = '%7.1f ' % (
hpars['v2_dec'])
RmagResRec["anisotropy_v3_eta_inc"] = '%7.1f ' % (
hpars['v2_inc'])
RmagResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f ' % (
hpars['e13'])
RmagResRec["anisotropy_v3_zeta_dec"] = '%7.1f ' % (
hpars['v1_dec'])
RmagResRec["anisotropy_v3_zeta_inc"] = '%7.1f ' % (
hpars['v1_inc'])
RmagResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f ' % (
hpars['e23'])
RmagResRec["anisotropy_v2_eta_dec"] = '%7.1f ' % (
hpars['v3_dec'])
RmagResRec["anisotropy_v2_eta_inc"] = '%7.1f ' % (
hpars['v3_inc'])
RmagResRec["tilt_correction"] = '-1'
RmagResRec["anisotropy_type"] = 'ATRM'
RmagResRec["magic_method_codes"] = 'LP-AN-TRM:AE-H'
RmagSpecRec["magic_method_codes"] = 'LP-AN-TRM:AE-H'
RmagResRec["magic_software_packages"] = pmag.get_version()
RmagSpecRec["magic_software_packages"] = pmag.get_version()
RmagSpecRecs.append(RmagSpecRec)
RmagResRecs.append(RmagResRec)
specimen += 1
if data_model_num == 3:
SpecRec = RmagResRec.copy()
SpecRec.update(RmagSpecRec)
SpecRecs.append(SpecRec)
# finished iterating through specimens,
# now we need to write out the data to files
if data_model_num == 3:
# translate records
for rec in SpecRecs:
rec3 = map_magic.convert_aniso('magic3', rec)
SpecRecs3.append(rec3)
# write output to 3.0 specimens file
pmag.magic_write(output_spec_file, SpecRecs3, 'specimens')
print("specimen data stored in {}".format(output_spec_file))
return True, output_spec_file
else:
# write output to 2.5 rmag_ files
pmag.magic_write(rmag_anis, RmagSpecRecs, 'rmag_anisotropy')
print("specimen tensor elements stored in ", rmag_anis)
pmag.magic_write(rmag_res, RmagResRecs, 'rmag_results')
print("specimen statistics and eigenparameters stored in ", rmag_res)
return True, rmag_anis | Converts ATRM data to best-fit tensor (6 elements plus sigma)
Parameters
----------
meas_file : str
input measurement file
dir_path : str
output directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
input_spec_file : str
input specimen file name, default "specimens.txt"
output_spec_file : str
output specimen file name, default "specimens.txt"
data_model_num : number
MagIC data model [2, 3], default 3
Returns
---------
Tuple : (True or False indicating if conversion was sucessful, output file name written) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L9836-L10213 |
PmagPy/PmagPy | pmagpy/ipmag.py | zeq_magic | def zeq_magic(meas_file='measurements.txt', spec_file='',crd='s',input_dir_path='.', angle=0,
n_plots=5, save_plots=True, fmt="svg", interactive=False, specimen="",
samp_file='samples.txt', contribution=None,fignum=1):
"""
zeq_magic makes zijderveld and equal area plots for magic formatted measurements files.
Parameters
----------
meas_file : str
input measurement file
spec_file : str
input specimen interpretation file
samp_file : str
input sample orientations file
crd : str
coordinate system [s,g,t] for specimen, geographic, tilt corrected
g,t options require a sample file with specimen and bedding orientation
input_dir_path : str
input directory of meas_file, default "."
angle : float
angle of X direction with respect to specimen X
n_plots : int, default 5
maximum number of plots to make
if you want to make all possible plots, specify "all"
save_plots : bool, default True
if True, create and save all requested plots
fmt : str, default "svg"
format for figures, [svg, jpg, pdf, png]
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line only)
specimen : str, default ""
specimen name to plot
samp_file : str, default 'samples.txt'
name of samples file
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
fignum : matplotlib figure number
"""
def plot_interpretations(ZED, spec_container, this_specimen, this_specimen_measurements, datablock):
if cb.is_null(spec_container) or cb.is_null(this_specimen_measurements) or cb.is_null(datablock):
return ZED
if 'method_codes' not in spec_container.df.columns:
return ZED
prior_spec_data = spec_container.get_records_for_code(
'LP-DIR', strict_match=False) # look up all prior directional interpretations
prior_specimen_interpretations=[]
if not len(prior_spec_data):
return ZED
mpars = {"specimen_direction_type": "Error"}
if len(prior_spec_data):
prior_specimen_interpretations = prior_spec_data[prior_spec_data['specimen'].astype(str) == this_specimen] #.str.match(this_specimen) == True]
if len(prior_specimen_interpretations):
if len(prior_specimen_interpretations)>0:
beg_pcas = pd.to_numeric(
prior_specimen_interpretations.meas_step_min.values).tolist()
end_pcas = pd.to_numeric(
prior_specimen_interpretations.meas_step_max.values).tolist()
spec_methods = prior_specimen_interpretations.method_codes.tolist()
# step through all prior interpretations and plot them
for ind in range(len(beg_pcas)):
spec_meths = spec_methods[ind].split(':')
for m in spec_meths:
if 'DE-BFL' in m:
calculation_type = 'DE-BFL' # best fit line
if 'DE-BFP' in m:
calculation_type = 'DE-BFP' # best fit plane
if 'DE-FM' in m:
calculation_type = 'DE-FM' # fisher mean
if 'DE-BFL-A' in m:
calculation_type = 'DE-BFL-A' # anchored best fit line
treatments = pd.to_numeric(this_specimen_measurements.treatment).tolist()
if len(beg_pcas)!=0:
try:
# getting the starting and ending points
start, end = treatments.index(beg_pcas[ind]), treatments.index(end_pcas[ind])
mpars = pmag.domean(
datablock, start, end, calculation_type)
except ValueError as ex:
mpars['specimen_direction_type'] = "Error"
try:
if beg_pcas[ind] == 0:
start = 0
else:
start = treatments.index(beg_pcas[ind])
if end_pcas[ind] == 0:
end = 0
else:
end = treatments.index(end_pcas[ind])
mpars = pmag.domean(
datablock, start, end, calculation_type)
except ValueError:
mpars['specimen_direction_type'] = "Error"
# calculate direction/plane
if mpars["specimen_direction_type"] != "Error":
# put it on the plot
pmagplotlib.plot_dir(ZED, mpars, datablock, angle)
#if interactive:
# pmagplotlib.draw_figs(ZED)
else:
print('\n-W- Specimen {} record contains invalid start/stop bounds:'.format(this_specimen))
print(prior_spec_data.loc[this_specimen][['meas_step_min', 'meas_step_max']])
print('\n Measurement records:')
cols = list(set(['treat_ac_field', 'treat_temp']).intersection(this_specimen_measurements.columns))
print(this_specimen_measurements[cols])
print('\n Data will be plotted without interpretations\n')
return ZED
def make_plots(spec, cnt, meas_df, spec_container, samp_container=None):
# get sample data for orientation
if spec_container:
try:
samps = spec_container.df.loc[spec, 'sample']
except KeyError:
samps = ""
samp_df = []
if isinstance(samps, int) or isinstance(samps, float) or isinstance(samps, np.int64):
if np.isnan(samps):
samp = ""
samp_df = []
else:
samp = str(samps)
samp_container.df.index = samp_container.df.index.astype(str)
samp_df = samp_container.df[samp_container.df.index == samp]
elif isinstance(samps, type(None)):
samp = ""
samp_df = []
elif len(samps):
if isinstance(samps, str):
samp = samps
else:
samp = samps.iloc[0]
samp_df = samp_container.df[samp_container.df.index == samp]
else:
samp_df = []
# we can make the figure dictionary that pmagplotlib likes:
ZED = {'eqarea': cnt, 'zijd': cnt+1, 'demag': cnt+2} # make datablock
# get the relevant data
spec_df = meas_df[meas_df.specimen == s]
# remove ARM data
spec_df = spec_df[- spec_df.method_codes.str.contains(
'LP-*[\w]*-ARM')]
# split data into NRM, thermal, and af dataframes
spec_df_nrm = spec_df[spec_df.method_codes.str.contains(
'LT-NO')] # get the NRM data
spec_df_th = spec_df[spec_df.method_codes.str.contains(
'LT-T-Z')] # zero field thermal demag steps
try:
cond = spec_df.method_codes.str.contains('(^|[\s\:])LT-PTRM')
spec_df_th = spec_df_th[-cond] # get rid of some pTRM steps
except ValueError:
keep_inds = []
n = 0
for ind, row in spec_df_th.copy().iterrows():
if 'LT-PTRM' in row['method_codes'] and 'ALT-PTRM' not in row['method_codes']:
keep_inds.append(n)
else:
pass
n += 1
if len(keep_inds) < n:
spec_df_th = spec_df_th.iloc[keep_inds]
spec_df_af = spec_df[spec_df.method_codes.str.contains('LT-AF-Z')]
this_spec_meas_df = None
datablock = None
if (not len(spec_df_th.index) > 1) and (not len(spec_df_af.index) > 1):
return
if len(spec_df_th.index) > 1: # this is a thermal run
this_spec_meas_df = pd.concat([spec_df_nrm, spec_df_th])
# make sure all decs/incs are filled in
n_rows = len(this_spec_meas_df)
this_spec_meas_df = this_spec_meas_df.dropna(how='any', subset=['dir_dec', 'dir_inc', 'magn_moment'])
if n_rows > len(this_spec_meas_df):
print('-W- Some dec/inc/moment data were missing for specimen {}, so {} measurement row(s) were excluded'.format(s, n_rows - len(this_spec_meas_df)))
# geographic transformation
if coord != "-1" and len(samp_df):
this_spec_meas_df = transform_to_geographic(this_spec_meas_df, samp_df, samp, coord)
units = 'K' # units are kelvin
try:
this_spec_meas_df['magn_moment'] = this_spec_meas_df['magn_moment'].astype(float)
this_spec_meas_df['treat_temp'] = this_spec_meas_df['treat_temp'].astype(float)
except:
print('-W- There are malformed or missing data for specimen {}, skipping'.format(spec))
return
datablock = this_spec_meas_df[['treat_temp', 'dir_dec', 'dir_inc',
'magn_moment', 'blank', 'quality']].values.tolist()
ZED = pmagplotlib.plot_zed(ZED, datablock, angle, s, units)
if len(spec_df_af.index) > 1: # this is an af run
this_spec_meas_df = pd.concat([spec_df_nrm, spec_df_af])
# make sure all decs/incs are filled in
n_rows = len(this_spec_meas_df)
this_spec_meas_df = this_spec_meas_df.dropna(how='any', subset=['dir_dec', 'dir_inc', 'magn_moment'])
if n_rows > len(this_spec_meas_df):
print('-W- Some dec/inc/moment data were missing for specimen {}, so {} measurement row(s) were excluded'.format(s, n_rows - len(this_spec_meas_df)))
# geographic transformation
if coord != "-1" and len(samp_df):
this_spec_meas_df = transform_to_geographic(this_spec_meas_df, samp_df, samp, coord)
units = 'T' # these are AF data
try:
this_spec_meas_df['magn_moment'] = this_spec_meas_df['magn_moment'].astype(float)
this_spec_meas_df['treat_ac_field'] = this_spec_meas_df['treat_ac_field'].astype(float)
except:
print('-W- There are malformed or missing data for specimen {}, skipping'.format(spec))
return
datablock = this_spec_meas_df[['treat_ac_field', 'dir_dec', 'dir_inc',
'magn_moment', 'blank', 'quality']].values.tolist()
ZED = pmagplotlib.plot_zed(ZED, datablock, angle, s, units)
return plot_interpretations(ZED, spec_container, s, this_spec_meas_df, datablock)
if interactive:
save_plots = False
# read in MagIC formatted data if contribution object not provided
if not isinstance(contribution, cb.Contribution):
input_dir_path = os.path.realpath(input_dir_path)
file_path = pmag.resolve_file_name(meas_file, input_dir_path)
# read in magic formatted data
if not os.path.exists(file_path):
print('No such file:', file_path)
return False, []
custom_filenames = {'measurements': file_path, 'specimens': spec_file, 'samples': samp_file}
contribution = cb.Contribution(input_dir_path, custom_filenames=custom_filenames,
read_tables=['measurements', 'specimens',
'contribution', 'samples'])
if pmagplotlib.isServer:
try:
contribution.propagate_location_to_samples()
contribution.propagate_location_to_specimens()
contribution.propagate_location_to_measurements()
except KeyError as ex:
pass
meas_container = contribution.tables['measurements']
meas_df = contribution.tables['measurements'].df #
#meas_df=pd.read_csv(file_path, sep='\t', header=1)
spec_container = contribution.tables.get('specimens', None)
samp_container = contribution.tables.get('samples', None)
#if not spec_file:
# spec_file = os.path.join(os.path.split(file_path)[0], "specimens.txt")
#if os.path.exists(spec_file):
# spec_container = cb.MagicDataFrame(spec_file, dtype="specimens")
#else:
# spec_container = None
meas_df['blank'] = "" # this is a dummy variable expected by plotZED
if 'treat_ac_field' in meas_df.columns:
# create 'treatment' column.
# uses treat_temp if treat_ac_field is missing OR zero.
# (have to take this into account for plotting later)
if 'treat_temp' in meas_df.columns:
meas_df['treatment'] = meas_df['treat_ac_field'].where(
cond=meas_df['treat_ac_field'].astype(bool), other=meas_df['treat_temp'])
else:
meas_df['treatment'] = meas_df['treat_ac_field']
else:
meas_df['treatment'] = meas_df['treat_temp']
if crd == "s":
coord = "-1"
elif crd == "t":
coord = "100"
else:
coord = "0"
specimens = meas_df.specimen.unique() # list of specimen names
if len(specimens) == 0:
print('there are no data for plotting')
return False, []
# check measurement table for req'd fields
missing = []
reqd_cols_present = meas_df.columns.intersection(['dir_dec', 'dir_inc', 'magn_moment'])
for col in ['dir_dec', 'dir_inc', 'magn_moment']:
if col not in reqd_cols_present:
missing.append(col)
if missing:
print('-W- Missing required column(s) {}, cannot run zeq_magic'.format(', '.join(missing)))
return False, []
cnt = fignum
if n_plots != "all":
if len(specimens) > n_plots:
specimens = specimens[:n_plots]
saved = []
if specimen:
specimens = [specimen]
for s in specimens:
ZED = make_plots(s, cnt, meas_df, spec_container, samp_container)
if not ZED:
if pmagplotlib.verbose:
print('No plots could be created for specimen:', s)
continue
titles = {key: s + "_" + key + "." + fmt for key in ZED}
if pmagplotlib.isServer:
titles = {}
titles['eqarea'] = 'Equal Area Plot'
titles['zijd'] = 'Zijderveld Plot'
titles['demag'] = 'Demagnetization Plot'
con_id = ""
if 'contribution' in contribution.tables:
if 'id' in contribution.tables['contribution'].df.columns:
con_id = contribution.tables['contribution'].df['id'].values[0]
pmagplotlib.add_borders(ZED, titles, con_id=con_id)
for title in titles:
# try to get the full hierarchy for plot names
df_slice = meas_container.df[meas_container.df['specimen'] == s]
location = str(meas_container.get_name('location', df_slice))
site = str(meas_container.get_name('site', df_slice))
sample = str(meas_container.get_name('sample', df_slice))
# add coord here!
filename = 'LO:_'+location+'_SI:_'+site+'_SA:_'+sample + \
'_SP:_'+str(s)+'_CO:_' + '_TY:_'+title+'_.png'
titles[title] = filename
if save_plots:
saved.extend(pmagplotlib.save_plots(ZED, titles))
elif interactive:
pmagplotlib.draw_figs(ZED)
ans = pmagplotlib.save_or_quit()
if ans == 'a':
saved.extend(pmagplotlib.save_plots(ZED, titles))
else:
continue
else:
cnt += 3
return True, saved | python | def zeq_magic(meas_file='measurements.txt', spec_file='',crd='s',input_dir_path='.', angle=0,
n_plots=5, save_plots=True, fmt="svg", interactive=False, specimen="",
samp_file='samples.txt', contribution=None,fignum=1):
"""
zeq_magic makes zijderveld and equal area plots for magic formatted measurements files.
Parameters
----------
meas_file : str
input measurement file
spec_file : str
input specimen interpretation file
samp_file : str
input sample orientations file
crd : str
coordinate system [s,g,t] for specimen, geographic, tilt corrected
g,t options require a sample file with specimen and bedding orientation
input_dir_path : str
input directory of meas_file, default "."
angle : float
angle of X direction with respect to specimen X
n_plots : int, default 5
maximum number of plots to make
if you want to make all possible plots, specify "all"
save_plots : bool, default True
if True, create and save all requested plots
fmt : str, default "svg"
format for figures, [svg, jpg, pdf, png]
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line only)
specimen : str, default ""
specimen name to plot
samp_file : str, default 'samples.txt'
name of samples file
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
fignum : matplotlib figure number
"""
def plot_interpretations(ZED, spec_container, this_specimen, this_specimen_measurements, datablock):
if cb.is_null(spec_container) or cb.is_null(this_specimen_measurements) or cb.is_null(datablock):
return ZED
if 'method_codes' not in spec_container.df.columns:
return ZED
prior_spec_data = spec_container.get_records_for_code(
'LP-DIR', strict_match=False) # look up all prior directional interpretations
prior_specimen_interpretations=[]
if not len(prior_spec_data):
return ZED
mpars = {"specimen_direction_type": "Error"}
if len(prior_spec_data):
prior_specimen_interpretations = prior_spec_data[prior_spec_data['specimen'].astype(str) == this_specimen] #.str.match(this_specimen) == True]
if len(prior_specimen_interpretations):
if len(prior_specimen_interpretations)>0:
beg_pcas = pd.to_numeric(
prior_specimen_interpretations.meas_step_min.values).tolist()
end_pcas = pd.to_numeric(
prior_specimen_interpretations.meas_step_max.values).tolist()
spec_methods = prior_specimen_interpretations.method_codes.tolist()
# step through all prior interpretations and plot them
for ind in range(len(beg_pcas)):
spec_meths = spec_methods[ind].split(':')
for m in spec_meths:
if 'DE-BFL' in m:
calculation_type = 'DE-BFL' # best fit line
if 'DE-BFP' in m:
calculation_type = 'DE-BFP' # best fit plane
if 'DE-FM' in m:
calculation_type = 'DE-FM' # fisher mean
if 'DE-BFL-A' in m:
calculation_type = 'DE-BFL-A' # anchored best fit line
treatments = pd.to_numeric(this_specimen_measurements.treatment).tolist()
if len(beg_pcas)!=0:
try:
# getting the starting and ending points
start, end = treatments.index(beg_pcas[ind]), treatments.index(end_pcas[ind])
mpars = pmag.domean(
datablock, start, end, calculation_type)
except ValueError as ex:
mpars['specimen_direction_type'] = "Error"
try:
if beg_pcas[ind] == 0:
start = 0
else:
start = treatments.index(beg_pcas[ind])
if end_pcas[ind] == 0:
end = 0
else:
end = treatments.index(end_pcas[ind])
mpars = pmag.domean(
datablock, start, end, calculation_type)
except ValueError:
mpars['specimen_direction_type'] = "Error"
# calculate direction/plane
if mpars["specimen_direction_type"] != "Error":
# put it on the plot
pmagplotlib.plot_dir(ZED, mpars, datablock, angle)
#if interactive:
# pmagplotlib.draw_figs(ZED)
else:
print('\n-W- Specimen {} record contains invalid start/stop bounds:'.format(this_specimen))
print(prior_spec_data.loc[this_specimen][['meas_step_min', 'meas_step_max']])
print('\n Measurement records:')
cols = list(set(['treat_ac_field', 'treat_temp']).intersection(this_specimen_measurements.columns))
print(this_specimen_measurements[cols])
print('\n Data will be plotted without interpretations\n')
return ZED
def make_plots(spec, cnt, meas_df, spec_container, samp_container=None):
# get sample data for orientation
if spec_container:
try:
samps = spec_container.df.loc[spec, 'sample']
except KeyError:
samps = ""
samp_df = []
if isinstance(samps, int) or isinstance(samps, float) or isinstance(samps, np.int64):
if np.isnan(samps):
samp = ""
samp_df = []
else:
samp = str(samps)
samp_container.df.index = samp_container.df.index.astype(str)
samp_df = samp_container.df[samp_container.df.index == samp]
elif isinstance(samps, type(None)):
samp = ""
samp_df = []
elif len(samps):
if isinstance(samps, str):
samp = samps
else:
samp = samps.iloc[0]
samp_df = samp_container.df[samp_container.df.index == samp]
else:
samp_df = []
# we can make the figure dictionary that pmagplotlib likes:
ZED = {'eqarea': cnt, 'zijd': cnt+1, 'demag': cnt+2} # make datablock
# get the relevant data
spec_df = meas_df[meas_df.specimen == s]
# remove ARM data
spec_df = spec_df[- spec_df.method_codes.str.contains(
'LP-*[\w]*-ARM')]
# split data into NRM, thermal, and af dataframes
spec_df_nrm = spec_df[spec_df.method_codes.str.contains(
'LT-NO')] # get the NRM data
spec_df_th = spec_df[spec_df.method_codes.str.contains(
'LT-T-Z')] # zero field thermal demag steps
try:
cond = spec_df.method_codes.str.contains('(^|[\s\:])LT-PTRM')
spec_df_th = spec_df_th[-cond] # get rid of some pTRM steps
except ValueError:
keep_inds = []
n = 0
for ind, row in spec_df_th.copy().iterrows():
if 'LT-PTRM' in row['method_codes'] and 'ALT-PTRM' not in row['method_codes']:
keep_inds.append(n)
else:
pass
n += 1
if len(keep_inds) < n:
spec_df_th = spec_df_th.iloc[keep_inds]
spec_df_af = spec_df[spec_df.method_codes.str.contains('LT-AF-Z')]
this_spec_meas_df = None
datablock = None
if (not len(spec_df_th.index) > 1) and (not len(spec_df_af.index) > 1):
return
if len(spec_df_th.index) > 1: # this is a thermal run
this_spec_meas_df = pd.concat([spec_df_nrm, spec_df_th])
# make sure all decs/incs are filled in
n_rows = len(this_spec_meas_df)
this_spec_meas_df = this_spec_meas_df.dropna(how='any', subset=['dir_dec', 'dir_inc', 'magn_moment'])
if n_rows > len(this_spec_meas_df):
print('-W- Some dec/inc/moment data were missing for specimen {}, so {} measurement row(s) were excluded'.format(s, n_rows - len(this_spec_meas_df)))
# geographic transformation
if coord != "-1" and len(samp_df):
this_spec_meas_df = transform_to_geographic(this_spec_meas_df, samp_df, samp, coord)
units = 'K' # units are kelvin
try:
this_spec_meas_df['magn_moment'] = this_spec_meas_df['magn_moment'].astype(float)
this_spec_meas_df['treat_temp'] = this_spec_meas_df['treat_temp'].astype(float)
except:
print('-W- There are malformed or missing data for specimen {}, skipping'.format(spec))
return
datablock = this_spec_meas_df[['treat_temp', 'dir_dec', 'dir_inc',
'magn_moment', 'blank', 'quality']].values.tolist()
ZED = pmagplotlib.plot_zed(ZED, datablock, angle, s, units)
if len(spec_df_af.index) > 1: # this is an af run
this_spec_meas_df = pd.concat([spec_df_nrm, spec_df_af])
# make sure all decs/incs are filled in
n_rows = len(this_spec_meas_df)
this_spec_meas_df = this_spec_meas_df.dropna(how='any', subset=['dir_dec', 'dir_inc', 'magn_moment'])
if n_rows > len(this_spec_meas_df):
print('-W- Some dec/inc/moment data were missing for specimen {}, so {} measurement row(s) were excluded'.format(s, n_rows - len(this_spec_meas_df)))
# geographic transformation
if coord != "-1" and len(samp_df):
this_spec_meas_df = transform_to_geographic(this_spec_meas_df, samp_df, samp, coord)
units = 'T' # these are AF data
try:
this_spec_meas_df['magn_moment'] = this_spec_meas_df['magn_moment'].astype(float)
this_spec_meas_df['treat_ac_field'] = this_spec_meas_df['treat_ac_field'].astype(float)
except:
print('-W- There are malformed or missing data for specimen {}, skipping'.format(spec))
return
datablock = this_spec_meas_df[['treat_ac_field', 'dir_dec', 'dir_inc',
'magn_moment', 'blank', 'quality']].values.tolist()
ZED = pmagplotlib.plot_zed(ZED, datablock, angle, s, units)
return plot_interpretations(ZED, spec_container, s, this_spec_meas_df, datablock)
if interactive:
save_plots = False
# read in MagIC formatted data if contribution object not provided
if not isinstance(contribution, cb.Contribution):
input_dir_path = os.path.realpath(input_dir_path)
file_path = pmag.resolve_file_name(meas_file, input_dir_path)
# read in magic formatted data
if not os.path.exists(file_path):
print('No such file:', file_path)
return False, []
custom_filenames = {'measurements': file_path, 'specimens': spec_file, 'samples': samp_file}
contribution = cb.Contribution(input_dir_path, custom_filenames=custom_filenames,
read_tables=['measurements', 'specimens',
'contribution', 'samples'])
if pmagplotlib.isServer:
try:
contribution.propagate_location_to_samples()
contribution.propagate_location_to_specimens()
contribution.propagate_location_to_measurements()
except KeyError as ex:
pass
meas_container = contribution.tables['measurements']
meas_df = contribution.tables['measurements'].df #
#meas_df=pd.read_csv(file_path, sep='\t', header=1)
spec_container = contribution.tables.get('specimens', None)
samp_container = contribution.tables.get('samples', None)
#if not spec_file:
# spec_file = os.path.join(os.path.split(file_path)[0], "specimens.txt")
#if os.path.exists(spec_file):
# spec_container = cb.MagicDataFrame(spec_file, dtype="specimens")
#else:
# spec_container = None
meas_df['blank'] = "" # this is a dummy variable expected by plotZED
if 'treat_ac_field' in meas_df.columns:
# create 'treatment' column.
# uses treat_temp if treat_ac_field is missing OR zero.
# (have to take this into account for plotting later)
if 'treat_temp' in meas_df.columns:
meas_df['treatment'] = meas_df['treat_ac_field'].where(
cond=meas_df['treat_ac_field'].astype(bool), other=meas_df['treat_temp'])
else:
meas_df['treatment'] = meas_df['treat_ac_field']
else:
meas_df['treatment'] = meas_df['treat_temp']
if crd == "s":
coord = "-1"
elif crd == "t":
coord = "100"
else:
coord = "0"
specimens = meas_df.specimen.unique() # list of specimen names
if len(specimens) == 0:
print('there are no data for plotting')
return False, []
# check measurement table for req'd fields
missing = []
reqd_cols_present = meas_df.columns.intersection(['dir_dec', 'dir_inc', 'magn_moment'])
for col in ['dir_dec', 'dir_inc', 'magn_moment']:
if col not in reqd_cols_present:
missing.append(col)
if missing:
print('-W- Missing required column(s) {}, cannot run zeq_magic'.format(', '.join(missing)))
return False, []
cnt = fignum
if n_plots != "all":
if len(specimens) > n_plots:
specimens = specimens[:n_plots]
saved = []
if specimen:
specimens = [specimen]
for s in specimens:
ZED = make_plots(s, cnt, meas_df, spec_container, samp_container)
if not ZED:
if pmagplotlib.verbose:
print('No plots could be created for specimen:', s)
continue
titles = {key: s + "_" + key + "." + fmt for key in ZED}
if pmagplotlib.isServer:
titles = {}
titles['eqarea'] = 'Equal Area Plot'
titles['zijd'] = 'Zijderveld Plot'
titles['demag'] = 'Demagnetization Plot'
con_id = ""
if 'contribution' in contribution.tables:
if 'id' in contribution.tables['contribution'].df.columns:
con_id = contribution.tables['contribution'].df['id'].values[0]
pmagplotlib.add_borders(ZED, titles, con_id=con_id)
for title in titles:
# try to get the full hierarchy for plot names
df_slice = meas_container.df[meas_container.df['specimen'] == s]
location = str(meas_container.get_name('location', df_slice))
site = str(meas_container.get_name('site', df_slice))
sample = str(meas_container.get_name('sample', df_slice))
# add coord here!
filename = 'LO:_'+location+'_SI:_'+site+'_SA:_'+sample + \
'_SP:_'+str(s)+'_CO:_' + '_TY:_'+title+'_.png'
titles[title] = filename
if save_plots:
saved.extend(pmagplotlib.save_plots(ZED, titles))
elif interactive:
pmagplotlib.draw_figs(ZED)
ans = pmagplotlib.save_or_quit()
if ans == 'a':
saved.extend(pmagplotlib.save_plots(ZED, titles))
else:
continue
else:
cnt += 3
return True, saved | zeq_magic makes zijderveld and equal area plots for magic formatted measurements files.
Parameters
----------
meas_file : str
input measurement file
spec_file : str
input specimen interpretation file
samp_file : str
input sample orientations file
crd : str
coordinate system [s,g,t] for specimen, geographic, tilt corrected
g,t options require a sample file with specimen and bedding orientation
input_dir_path : str
input directory of meas_file, default "."
angle : float
angle of X direction with respect to specimen X
n_plots : int, default 5
maximum number of plots to make
if you want to make all possible plots, specify "all"
save_plots : bool, default True
if True, create and save all requested plots
fmt : str, default "svg"
format for figures, [svg, jpg, pdf, png]
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line only)
specimen : str, default ""
specimen name to plot
samp_file : str, default 'samples.txt'
name of samples file
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
fignum : matplotlib figure number | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L10216-L10540 |
PmagPy/PmagPy | pmagpy/ipmag.py | transform_to_geographic | def transform_to_geographic(this_spec_meas_df, samp_df, samp, coord="0"):
"""
Transform decs/incs to geographic coordinates.
Calls pmag.dogeo_V for the heavy lifting
Parameters
----------
this_spec_meas_df : pandas dataframe of measurements for a single specimen
samp_df : pandas dataframe of samples
samp : samp name
Returns
---------
this_spec_meas_df : measurements dataframe with transformed coordinates
"""
# we could return the type of coordinates ACTUALLY used
# transform geographic
decs = this_spec_meas_df['dir_dec'].values.tolist()
incs = this_spec_meas_df['dir_inc'].values.tolist()
or_info, az_type = pmag.get_orient(samp_df,samp,data_model=3)
if 'azimuth' in or_info.keys() and cb.not_null(or_info['azimuth'], False):
azimuths=len(decs)*[or_info['azimuth']]
dips=len(decs)*[or_info['dip']]
# if azimuth/dip is missing, or orientation is bad,
# stick with specimen coordinates
else:
return this_spec_meas_df
dirs = [decs, incs, azimuths, dips]
dirs_geo = np.array(list(map(list, list(zip(*dirs)))))
decs, incs = pmag.dogeo_V(dirs_geo)
if coord == '100' and 'bed_dip_direction' in or_info.keys() and or_info['bed_dip_direction']!="": # need to do tilt correction too
bed_dip_dirs = len(decs)*[or_info['bed_dip_direction']]
bed_dips = len(decs) * [or_info['bed_dip']]
dirs = [decs, incs, bed_dip_dirs, bed_dips]
## this transposes the columns and rows of the list of lists
dirs_tilt = np.array(list(map(list, list(zip(*dirs)))))
decs, incs = pmag.dotilt_V(dirs_tilt)
this_spec_meas_df['dir_dec'] = decs
this_spec_meas_df['dir_inc'] = incs
return this_spec_meas_df | python | def transform_to_geographic(this_spec_meas_df, samp_df, samp, coord="0"):
"""
Transform decs/incs to geographic coordinates.
Calls pmag.dogeo_V for the heavy lifting
Parameters
----------
this_spec_meas_df : pandas dataframe of measurements for a single specimen
samp_df : pandas dataframe of samples
samp : samp name
Returns
---------
this_spec_meas_df : measurements dataframe with transformed coordinates
"""
# we could return the type of coordinates ACTUALLY used
# transform geographic
decs = this_spec_meas_df['dir_dec'].values.tolist()
incs = this_spec_meas_df['dir_inc'].values.tolist()
or_info, az_type = pmag.get_orient(samp_df,samp,data_model=3)
if 'azimuth' in or_info.keys() and cb.not_null(or_info['azimuth'], False):
azimuths=len(decs)*[or_info['azimuth']]
dips=len(decs)*[or_info['dip']]
# if azimuth/dip is missing, or orientation is bad,
# stick with specimen coordinates
else:
return this_spec_meas_df
dirs = [decs, incs, azimuths, dips]
dirs_geo = np.array(list(map(list, list(zip(*dirs)))))
decs, incs = pmag.dogeo_V(dirs_geo)
if coord == '100' and 'bed_dip_direction' in or_info.keys() and or_info['bed_dip_direction']!="": # need to do tilt correction too
bed_dip_dirs = len(decs)*[or_info['bed_dip_direction']]
bed_dips = len(decs) * [or_info['bed_dip']]
dirs = [decs, incs, bed_dip_dirs, bed_dips]
## this transposes the columns and rows of the list of lists
dirs_tilt = np.array(list(map(list, list(zip(*dirs)))))
decs, incs = pmag.dotilt_V(dirs_tilt)
this_spec_meas_df['dir_dec'] = decs
this_spec_meas_df['dir_inc'] = incs
return this_spec_meas_df | Transform decs/incs to geographic coordinates.
Calls pmag.dogeo_V for the heavy lifting
Parameters
----------
this_spec_meas_df : pandas dataframe of measurements for a single specimen
samp_df : pandas dataframe of samples
samp : samp name
Returns
---------
this_spec_meas_df : measurements dataframe with transformed coordinates | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L10542-L10581 |
PmagPy/PmagPy | pmagpy/ipmag.py | thellier_magic | def thellier_magic(meas_file="measurements.txt", dir_path=".", input_dir_path="",
spec="", n_specs=5, save_plots=True, fmt="svg", interactive=False,
contribution=None):
"""
thellier_magic plots arai and other useful plots for Thellier-type experimental data
Parameters
----------
meas_file : str
input measurement file, default "measurements.txt"
dir_path : str
output directory, default "."
Note: if using Windows, all figures will be saved to working directly
*not* dir_path
input_dir_path : str
input file directory IF different from dir_path, default ""
spec : str
default "", specimen to plot
n_specs : int
number of specimens to plot, default 5
if you want to make all possible plots, specify "all"
save_plots : bool, default True
if True, create and save all requested plots
fmt : str
format of saved figures (default is 'svg')
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line only)
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
Returns
---------
status : True or False
saved : list of figures saved
"""
def make_plots(this_specimen, thel_data, cnt=1):
"""
Take specimen name and measurement data
and produce plots.
Return a dictionary of plots created, or False if
no plots could be created.
"""
zed = False
if pmagplotlib.verbose:
print(this_specimen)
# make the figure dictionary that pmagplotlib likes:
#AZD = {'arai': 1, 'zijd': 2, 'eqarea': 3, 'deremag': 4} # make datablock
#if save_plots:
# AZD = {'arai': 1, 'zijd': 2, 'eqarea': 3, 'deremag': 4} # make datablock
#else:
AZD = {'arai': cnt, 'zijd': cnt+1, 'eqarea': cnt +
2, 'deremag': cnt+3} # make datablock
#cnt += 4 # increment the figure counter
spec_df = thel_data[thel_data.specimen ==
this_specimen] # get data for this specimen
# get the data block for Arai plot
if len(spec_df) >= 3:
# just skip specimen if arai data is malformed
try:
araiblock, field = pmag.sortarai(spec_df, this_specimen, 0, version=3)
except Exception as ex:
print('-W-', ex)
return zed
if not save_plots:
for key, val in AZD.items():
pmagplotlib.plot_init(val, 5, 5)
# get the datablock for Zijderveld plot
zijdblock, units = pmag.find_dmag_rec(
this_specimen, spec_df, version=3)
if not len(units):
unit_string = ""
else:
unit_string = units[-1]
zed = pmagplotlib.plot_arai_zij(
AZD, araiblock, zijdblock, this_specimen, unit_string) # make the plots
return zed
# format some things
if interactive:
save_plots = False
if not isinstance(contribution, cb.Contribution):
# get proper paths
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
file_path = pmag.resolve_file_name(meas_file, input_dir_path)
input_dir_path = os.path.split(file_path)[0]
# read in magic formatted data
contribution = cb.Contribution(input_dir_path)
if not contribution.tables.get('measurements'):
print('-W- No measurements table found')
return False, []
try:
contribution.propagate_location_to_samples()
contribution.propagate_location_to_specimens()
contribution.propagate_location_to_measurements()
except KeyError as ex:
pass
meas_df = contribution.tables['measurements'].df
# try to get contribution id for server plotting
if pmagplotlib.isServer:
con_id = contribution.get_con_id()
# get key for intensity records
int_key = cb.get_intensity_col(meas_df)
# list for saved figs
saved = []
# get all the records with measurement data
meas_data = meas_df[meas_df[int_key].notnull()]
# get all the Thellier data
thel_data = meas_data.dropna(subset=['method_codes'])
thel_data = thel_data[thel_data['method_codes'].str.contains('LP-PI-TRM')]
specimens = meas_data.specimen.unique() # list of specimen names
if len(specimens) == 0:
print('there are no data for plotting')
return False, []
if spec:
if spec not in specimens:
print('could not find specimen {}'.format(spec))
return False, []
specimens = [spec]
elif n_specs != "all":
try:
specimens = specimens[:n_specs]
except Exception as ex:
pass
cnt = 1 # set the figure counter to 1
for this_specimen in specimens: # step through the specimens list
zed = make_plots(this_specimen, thel_data, cnt)
# if plots were produced
if zed:
if interactive:
# draw and save interactively
pmagplotlib.draw_figs(zed)
ans = input(
"S[a]ve plots, [q]uit, <return> to continue\n ")
if ans == 'q':
return True, []
if ans == 'a':
files = {key : this_specimen + "_" + key + "." + fmt for (key, value) in zed.items()}
if not set_env.IS_WIN:
files = {key: os.path.join(dir_path, value) for (key, value) in files.items()}
incl_directory = True
saved.append(pmagplotlib.save_plots(zed, files, incl_directory=incl_directory))
elif save_plots:
# don't draw, just save figures
files = {key : this_specimen + "_" + key + "." + fmt for (key, value) in zed.items()}
incl_directory = False
if not pmagplotlib.isServer:
# not server
if not set_env.IS_WIN:
files = {key: os.path.join(dir_path, value) for (key, value) in files.items()}
incl_directory = True
else:
# isServer, fix plot titles, formatting, and file names for server
for key, value in files.copy().items():
files[key] = "SP:_{}_TY:_{}_.{}".format(this_specimen, key, fmt)
titles = {}
titles['deremag'] = 'DeReMag Plot'
titles['zijd'] = 'Zijderveld Plot'
titles['arai'] = 'Arai Plot'
titles['TRM'] = 'TRM Acquisition data'
titles['eqarea'] = 'Equal Area Plot'
zed = pmagplotlib.add_borders(
zed, titles, con_id=con_id)
saved.append(pmagplotlib.save_plots(zed, files, incl_directory=incl_directory))
# just let the plots appear (notebook)
else:
cnt += len(zed)
# don't even need to draw 'em! They just appear.
#pmagplotlib.draw_figs(zed)
# no plots were produced
else:
print ('no data for ',this_specimen)
print ('skipping')
return True, saved | python | def thellier_magic(meas_file="measurements.txt", dir_path=".", input_dir_path="",
spec="", n_specs=5, save_plots=True, fmt="svg", interactive=False,
contribution=None):
"""
thellier_magic plots arai and other useful plots for Thellier-type experimental data
Parameters
----------
meas_file : str
input measurement file, default "measurements.txt"
dir_path : str
output directory, default "."
Note: if using Windows, all figures will be saved to working directly
*not* dir_path
input_dir_path : str
input file directory IF different from dir_path, default ""
spec : str
default "", specimen to plot
n_specs : int
number of specimens to plot, default 5
if you want to make all possible plots, specify "all"
save_plots : bool, default True
if True, create and save all requested plots
fmt : str
format of saved figures (default is 'svg')
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line only)
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
Returns
---------
status : True or False
saved : list of figures saved
"""
def make_plots(this_specimen, thel_data, cnt=1):
"""
Take specimen name and measurement data
and produce plots.
Return a dictionary of plots created, or False if
no plots could be created.
"""
zed = False
if pmagplotlib.verbose:
print(this_specimen)
# make the figure dictionary that pmagplotlib likes:
#AZD = {'arai': 1, 'zijd': 2, 'eqarea': 3, 'deremag': 4} # make datablock
#if save_plots:
# AZD = {'arai': 1, 'zijd': 2, 'eqarea': 3, 'deremag': 4} # make datablock
#else:
AZD = {'arai': cnt, 'zijd': cnt+1, 'eqarea': cnt +
2, 'deremag': cnt+3} # make datablock
#cnt += 4 # increment the figure counter
spec_df = thel_data[thel_data.specimen ==
this_specimen] # get data for this specimen
# get the data block for Arai plot
if len(spec_df) >= 3:
# just skip specimen if arai data is malformed
try:
araiblock, field = pmag.sortarai(spec_df, this_specimen, 0, version=3)
except Exception as ex:
print('-W-', ex)
return zed
if not save_plots:
for key, val in AZD.items():
pmagplotlib.plot_init(val, 5, 5)
# get the datablock for Zijderveld plot
zijdblock, units = pmag.find_dmag_rec(
this_specimen, spec_df, version=3)
if not len(units):
unit_string = ""
else:
unit_string = units[-1]
zed = pmagplotlib.plot_arai_zij(
AZD, araiblock, zijdblock, this_specimen, unit_string) # make the plots
return zed
# format some things
if interactive:
save_plots = False
if not isinstance(contribution, cb.Contribution):
# get proper paths
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
file_path = pmag.resolve_file_name(meas_file, input_dir_path)
input_dir_path = os.path.split(file_path)[0]
# read in magic formatted data
contribution = cb.Contribution(input_dir_path)
if not contribution.tables.get('measurements'):
print('-W- No measurements table found')
return False, []
try:
contribution.propagate_location_to_samples()
contribution.propagate_location_to_specimens()
contribution.propagate_location_to_measurements()
except KeyError as ex:
pass
meas_df = contribution.tables['measurements'].df
# try to get contribution id for server plotting
if pmagplotlib.isServer:
con_id = contribution.get_con_id()
# get key for intensity records
int_key = cb.get_intensity_col(meas_df)
# list for saved figs
saved = []
# get all the records with measurement data
meas_data = meas_df[meas_df[int_key].notnull()]
# get all the Thellier data
thel_data = meas_data.dropna(subset=['method_codes'])
thel_data = thel_data[thel_data['method_codes'].str.contains('LP-PI-TRM')]
specimens = meas_data.specimen.unique() # list of specimen names
if len(specimens) == 0:
print('there are no data for plotting')
return False, []
if spec:
if spec not in specimens:
print('could not find specimen {}'.format(spec))
return False, []
specimens = [spec]
elif n_specs != "all":
try:
specimens = specimens[:n_specs]
except Exception as ex:
pass
cnt = 1 # set the figure counter to 1
for this_specimen in specimens: # step through the specimens list
zed = make_plots(this_specimen, thel_data, cnt)
# if plots were produced
if zed:
if interactive:
# draw and save interactively
pmagplotlib.draw_figs(zed)
ans = input(
"S[a]ve plots, [q]uit, <return> to continue\n ")
if ans == 'q':
return True, []
if ans == 'a':
files = {key : this_specimen + "_" + key + "." + fmt for (key, value) in zed.items()}
if not set_env.IS_WIN:
files = {key: os.path.join(dir_path, value) for (key, value) in files.items()}
incl_directory = True
saved.append(pmagplotlib.save_plots(zed, files, incl_directory=incl_directory))
elif save_plots:
# don't draw, just save figures
files = {key : this_specimen + "_" + key + "." + fmt for (key, value) in zed.items()}
incl_directory = False
if not pmagplotlib.isServer:
# not server
if not set_env.IS_WIN:
files = {key: os.path.join(dir_path, value) for (key, value) in files.items()}
incl_directory = True
else:
# isServer, fix plot titles, formatting, and file names for server
for key, value in files.copy().items():
files[key] = "SP:_{}_TY:_{}_.{}".format(this_specimen, key, fmt)
titles = {}
titles['deremag'] = 'DeReMag Plot'
titles['zijd'] = 'Zijderveld Plot'
titles['arai'] = 'Arai Plot'
titles['TRM'] = 'TRM Acquisition data'
titles['eqarea'] = 'Equal Area Plot'
zed = pmagplotlib.add_borders(
zed, titles, con_id=con_id)
saved.append(pmagplotlib.save_plots(zed, files, incl_directory=incl_directory))
# just let the plots appear (notebook)
else:
cnt += len(zed)
# don't even need to draw 'em! They just appear.
#pmagplotlib.draw_figs(zed)
# no plots were produced
else:
print ('no data for ',this_specimen)
print ('skipping')
return True, saved | thellier_magic plots arai and other useful plots for Thellier-type experimental data
Parameters
----------
meas_file : str
input measurement file, default "measurements.txt"
dir_path : str
output directory, default "."
Note: if using Windows, all figures will be saved to working directly
*not* dir_path
input_dir_path : str
input file directory IF different from dir_path, default ""
spec : str
default "", specimen to plot
n_specs : int
number of specimens to plot, default 5
if you want to make all possible plots, specify "all"
save_plots : bool, default True
if True, create and save all requested plots
fmt : str
format of saved figures (default is 'svg')
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line only)
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
Returns
---------
status : True or False
saved : list of figures saved | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L10585-L10770 |
PmagPy/PmagPy | pmagpy/ipmag.py | hysteresis_magic | def hysteresis_magic(output_dir_path=".", input_dir_path="", spec_file="specimens.txt",
meas_file="measurements.txt", fmt="svg",
save_plots=True, make_plots=True, pltspec="", n_specs=5, interactive=False):
"""
Calculate hysteresis parameters and plot hysteresis data.
Plotting may be called interactively with save_plots==False,
or be suppressed entirely with make_plots==False.
Parameters
----------
output_dir_path : str, default "."
Note: if using Windows, all figures will be saved to working directly
*not* dir_path
input_dir_path : str
path for intput file if different from output_dir_path (default is same)
spec_file : str, default "specimens.txt"
output file to save hysteresis data
meas_file : str, default "measurements.txt"
input measurement file
fmt : str, default "svg"
format for figures, [svg, jpg, pdf, png]
save_plots : bool, default True
if True, generate and save all requested plots
make_plots : bool, default True
if False, skip making plots and just save hysteresis data
(if False, save_plots will be set to False also)
pltspec : str, default ""
specimen name to plot, otherwise will plot all specimens
n_specs : int
number of specimens to plot, default 5
if you want to make all possible plots, specify "all"
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line or in the Python interpreter)
Returns
---------
Tuple : (True or False indicating if conversion was sucessful, output file names written)
"""
# put plots in output_dir_path, unless isServer
incl_directory = True
if pmagplotlib.isServer or set_env.IS_WIN:
incl_directory = False
# figure out directory/file paths
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, output_dir_path)
spec_file = pmag.resolve_file_name(spec_file, input_dir_path)
meas_file = pmag.resolve_file_name(meas_file, input_dir_path)
# format and initialize variables
verbose = pmagplotlib.verbose
version_num = pmag.get_version()
if not make_plots:
irm_init, imag_init = -1, -1
save_plots = False
if save_plots:
verbose = False
if pltspec:
pass
if interactive:
save_plots = False
SpecRecs = []
#
#
meas_data, file_type = pmag.magic_read(meas_file)
if file_type != 'measurements':
print('bad file', meas_file)
return False, []
#
# initialize some variables
# define figure numbers for hyst,deltaM,DdeltaM curves
HystRecs, RemRecs = [], []
HDD = {}
if verbose and make_plots:
print("Plots may be on top of each other - use mouse to place ")
if make_plots:
HDD['hyst'], HDD['deltaM'], HDD['DdeltaM'] = 1, 2, 3
if make_plots and (not save_plots):
pmagplotlib.plot_init(HDD['DdeltaM'], 5, 5)
pmagplotlib.plot_init(HDD['deltaM'], 5, 5)
pmagplotlib.plot_init(HDD['hyst'], 5, 5)
imag_init = 0
irm_init = 0
else:
HDD['hyst'], HDD['deltaM'], HDD['DdeltaM'], HDD['irm'], HDD['imag'] = 0, 0, 0, 0, 0
#
if spec_file:
prior_data, file_type = pmag.magic_read(spec_file)
#
# get list of unique experiment names and specimen names
#
experiment_names, sids = [], []
hys_data = pmag.get_dictitem(meas_data, 'method_codes', 'LP-HYS', 'has')
dcd_data = pmag.get_dictitem(
meas_data, 'method_codes', 'LP-IRM-DCD', 'has')
imag_data = pmag.get_dictitem(meas_data, 'method_codes', 'LP-IMAG', 'has')
for rec in hys_data:
if rec['experiment'] not in experiment_names:
experiment_names.append(rec['experiment'])
if rec['specimen'] not in sids:
sids.append(rec['specimen'])
#
k = 0
# if plotting only one specimen, find it
if pltspec:
k = sids.index(pltspec)
# if plotting only n specimens, remove others from the list
elif n_specs != "all":
try:
sids = sids[:n_specs]
except:
pass
cnt = 0
while k < len(sids):
specimen = sids[k]
if pltspec:
if specimen != pltspec:
k += 1
continue
else:
for key, value in HDD.items():
cnt += 1
HDD[key] = cnt
#HDD = {key: value + len(HDD) + k for (key, value) in HDD.items()}
# initialize a new specimen hysteresis record
HystRec = {'specimen': specimen, 'experiment': ""}
if verbose and make_plots:
print(specimen, k+1, 'out of ', len(sids))
#
#
# B,M for hysteresis, Bdcd,Mdcd for irm-dcd data
B, M, Bdcd, Mdcd = [], [], [], []
Bimag, Mimag = [], [] # Bimag,Mimag for initial magnetization curves
# fish out all the LP-HYS data for this specimen
spec_data = pmag.get_dictitem(hys_data, 'specimen', specimen, 'T')
if len(spec_data) > 0:
meths = spec_data[0]['method_codes'].split(':')
e = spec_data[0]['experiment']
HystRec['experiment'] = spec_data[0]['experiment']
for rec in spec_data:
B.append(float(rec['meas_field_dc']))
M.append(float(rec['magn_moment']))
# fish out all the data for this specimen
spec_data = pmag.get_dictitem(dcd_data, 'specimen', specimen, 'T')
if len(spec_data) > 0:
HystRec['experiment'] = HystRec['experiment'] + \
':'+spec_data[0]['experiment']
irm_exp = spec_data[0]['experiment']
for rec in spec_data:
Bdcd.append(float(rec['treat_dc_field']))
Mdcd.append(float(rec['magn_moment']))
# fish out all the data for this specimen
spec_data = pmag.get_dictitem(imag_data, 'specimen', specimen, 'T')
if len(spec_data) > 0:
imag_exp = spec_data[0]['experiment']
for rec in spec_data:
Bimag.append(float(rec['meas_field_dc']))
Mimag.append(float(rec['magn_moment']))
#
# now plot the hysteresis curve
#
if len(B) > 0:
hmeths = []
for meth in meths:
hmeths.append(meth)
hpars = pmagplotlib.plot_hdd(HDD, B, M, e)
if interactive:
if not set_env.IS_WIN:
pmagplotlib.draw_figs(HDD)
#
if make_plots:
pmagplotlib.plot_hpars(HDD, hpars, 'bs')
HystRec['hyst_mr_moment'] = hpars['hysteresis_mr_moment']
HystRec['hyst_ms_moment'] = hpars['hysteresis_ms_moment']
HystRec['hyst_bc'] = hpars['hysteresis_bc']
HystRec['hyst_bcr'] = hpars['hysteresis_bcr']
HystRec['hyst_xhf'] = hpars['hysteresis_xhf']
HystRec['experiments'] = e
HystRec['software_packages'] = version_num
if hpars["magic_method_codes"] not in hmeths:
hmeths.append(hpars["magic_method_codes"])
methods = ""
for meth in hmeths:
methods = methods+meth.strip()+":"
HystRec["method_codes"] = methods[:-1]
HystRec["citations"] = "This study"
#
if len(Bdcd) > 0:
rmeths = []
for meth in meths:
rmeths.append(meth)
if verbose and make_plots:
print('plotting IRM')
if irm_init == 0:
cnt += 1
HDD['irm'] = cnt #5 if 'imag' in HDD else 4
if make_plots and (not save_plots):
pmagplotlib.plot_init(HDD['irm'], 5, 5)
irm_init = 1
rpars = pmagplotlib.plot_irm(HDD['irm'], Bdcd, Mdcd, irm_exp)
HystRec['rem_mr_moment'] = rpars['remanence_mr_moment']
HystRec['rem_bcr'] = rpars['remanence_bcr']
HystRec['experiments'] = specimen+':'+irm_exp
if rpars["magic_method_codes"] not in meths:
meths.append(rpars["magic_method_codes"])
methods = ""
for meth in rmeths:
methods = methods+meth.strip()+":"
HystRec["method_codes"] = HystRec['method_codes']+':'+methods[:-1]
HystRec["citations"] = "This study"
else:
if irm_init:
pmagplotlib.clearFIG(HDD['irm'])
if len(Bimag) > 0:
if verbose and make_plots:
print('plotting initial magnetization curve')
# first normalize by Ms
Mnorm = []
for m in Mimag:
Mnorm.append(m / float(hpars['hysteresis_ms_moment']))
if imag_init == 0:
HDD['imag'] = 4
if make_plots and (not save_plots):
pmagplotlib.plot_init(HDD['imag'], 5, 5)
imag_init = 1
pmagplotlib.plot_imag(HDD['imag'], Bimag, Mnorm, imag_exp)
else:
if imag_init:
pmagplotlib.clearFIG(HDD['imag'])
if len(list(HystRec.keys())) > 0:
HystRecs.append(HystRec)
#
files = {}
if save_plots and make_plots:
if pltspec:
s = pltspec
else:
s = specimen
files = {}
for key in list(HDD.keys()):
if incl_directory:
files[key] = os.path.join(output_dir_path, s+'_'+key+'.'+fmt)
else:
files[key] = s+'_'+key+'.'+fmt
if make_plots and save_plots:
pmagplotlib.save_plots(HDD, files, incl_directory=incl_directory)
#if pltspec:
# return True, []
if interactive:
pmagplotlib.draw_figs(HDD)
ans = input(
"S[a]ve plots, [s]pecimen name, [q]uit, <return> to continue\n ")
if ans == "a":
files = {}
for key in list(HDD.keys()):
if incl_directory:
files[key] = os.path.join(output_dir_path, specimen+'_'+key+'.'+fmt)
else:
files[key] = specimen+'_'+key+'.'+fmt
pmagplotlib.save_plots(HDD, files, incl_directory=incl_directory)
if ans == '':
k += 1
if ans == "p":
del HystRecs[-1]
k -= 1
if ans == 'q':
print("Good bye")
return True, []
if ans == 's':
keepon = 1
specimen = input(
'Enter desired specimen name (or first part there of): ')
while keepon == 1:
try:
k = sids.index(specimen)
keepon = 0
except:
tmplist = []
for qq in range(len(sids)):
if specimen in sids[qq]:
tmplist.append(sids[qq])
print(specimen, " not found, but this was: ")
print(tmplist)
specimen = input('Select one or try again\n ')
k = sids.index(specimen)
else:
k += 1
if len(B) == 0 and len(Bdcd) == 0:
if verbose:
print('skipping this one - no hysteresis data')
k += 1
if k < len(sids):
# must re-init figs for Windows to keep size
if make_plots and set_env.IS_WIN:
if not save_plots:
pmagplotlib.plot_init(HDD['DdeltaM'], 5, 5)
pmagplotlib.plot_init(HDD['deltaM'], 5, 5)
pmagplotlib.plot_init(HDD['hyst'], 5, 5)
if len(Bimag) > 0:
HDD['imag'] = 4
if not save_plots:
pmagplotlib.plot_init(HDD['imag'], 5, 5)
if len(Bdcd) > 0:
HDD['irm'] = 5 if 'imag' in HDD else 4
if not save_plots:
pmagplotlib.plot_init(HDD['irm'], 5, 5)
elif not make_plots and set_env.IS_WIN:
HDD['hyst'], HDD['deltaM'], HDD['DdeltaM'], HDD['irm'], HDD['imag'] = 0, 0, 0, 0, 0
if len(HystRecs) > 0:
# go through prior_data, clean out prior results and save combined file as spec_file
SpecRecs, keys = [], list(HystRecs[0].keys())
if len(prior_data) > 0:
prior_keys = list(prior_data[0].keys())
else:
prior_keys = []
for rec in prior_data:
for key in keys:
if key not in list(rec.keys()):
rec[key] = ""
if 'LP-HYS' not in rec['method_codes']:
SpecRecs.append(rec)
for rec in HystRecs:
for key in prior_keys:
if key not in list(rec.keys()):
rec[key] = ""
prior = pmag.get_dictitem(
prior_data, 'specimen', rec['specimen'], 'T')
if len(prior) > 0 and 'sample' in list(prior[0].keys()):
# pull sample name from prior specimens table
rec['sample'] = prior[0]['sample']
SpecRecs.append(rec)
# drop unnecessary/duplicate rows
#dir_path = os.path.split(spec_file)[0]
con = cb.Contribution(input_dir_path, read_tables=[])
con.add_magic_table_from_data('specimens', SpecRecs)
con.tables['specimens'].drop_duplicate_rows(
ignore_cols=['specimen', 'sample', 'citations', 'software_packages'])
con.tables['specimens'].df = con.tables['specimens'].df.drop_duplicates()
spec_file = os.path.join(output_dir_path, os.path.split(spec_file)[1])
con.write_table_to_file('specimens', custom_name=spec_file)
if verbose:
print("hysteresis parameters saved in ", spec_file)
return True, [spec_file] | python | def hysteresis_magic(output_dir_path=".", input_dir_path="", spec_file="specimens.txt",
meas_file="measurements.txt", fmt="svg",
save_plots=True, make_plots=True, pltspec="", n_specs=5, interactive=False):
"""
Calculate hysteresis parameters and plot hysteresis data.
Plotting may be called interactively with save_plots==False,
or be suppressed entirely with make_plots==False.
Parameters
----------
output_dir_path : str, default "."
Note: if using Windows, all figures will be saved to working directly
*not* dir_path
input_dir_path : str
path for intput file if different from output_dir_path (default is same)
spec_file : str, default "specimens.txt"
output file to save hysteresis data
meas_file : str, default "measurements.txt"
input measurement file
fmt : str, default "svg"
format for figures, [svg, jpg, pdf, png]
save_plots : bool, default True
if True, generate and save all requested plots
make_plots : bool, default True
if False, skip making plots and just save hysteresis data
(if False, save_plots will be set to False also)
pltspec : str, default ""
specimen name to plot, otherwise will plot all specimens
n_specs : int
number of specimens to plot, default 5
if you want to make all possible plots, specify "all"
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line or in the Python interpreter)
Returns
---------
Tuple : (True or False indicating if conversion was sucessful, output file names written)
"""
# put plots in output_dir_path, unless isServer
incl_directory = True
if pmagplotlib.isServer or set_env.IS_WIN:
incl_directory = False
# figure out directory/file paths
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, output_dir_path)
spec_file = pmag.resolve_file_name(spec_file, input_dir_path)
meas_file = pmag.resolve_file_name(meas_file, input_dir_path)
# format and initialize variables
verbose = pmagplotlib.verbose
version_num = pmag.get_version()
if not make_plots:
irm_init, imag_init = -1, -1
save_plots = False
if save_plots:
verbose = False
if pltspec:
pass
if interactive:
save_plots = False
SpecRecs = []
#
#
meas_data, file_type = pmag.magic_read(meas_file)
if file_type != 'measurements':
print('bad file', meas_file)
return False, []
#
# initialize some variables
# define figure numbers for hyst,deltaM,DdeltaM curves
HystRecs, RemRecs = [], []
HDD = {}
if verbose and make_plots:
print("Plots may be on top of each other - use mouse to place ")
if make_plots:
HDD['hyst'], HDD['deltaM'], HDD['DdeltaM'] = 1, 2, 3
if make_plots and (not save_plots):
pmagplotlib.plot_init(HDD['DdeltaM'], 5, 5)
pmagplotlib.plot_init(HDD['deltaM'], 5, 5)
pmagplotlib.plot_init(HDD['hyst'], 5, 5)
imag_init = 0
irm_init = 0
else:
HDD['hyst'], HDD['deltaM'], HDD['DdeltaM'], HDD['irm'], HDD['imag'] = 0, 0, 0, 0, 0
#
if spec_file:
prior_data, file_type = pmag.magic_read(spec_file)
#
# get list of unique experiment names and specimen names
#
experiment_names, sids = [], []
hys_data = pmag.get_dictitem(meas_data, 'method_codes', 'LP-HYS', 'has')
dcd_data = pmag.get_dictitem(
meas_data, 'method_codes', 'LP-IRM-DCD', 'has')
imag_data = pmag.get_dictitem(meas_data, 'method_codes', 'LP-IMAG', 'has')
for rec in hys_data:
if rec['experiment'] not in experiment_names:
experiment_names.append(rec['experiment'])
if rec['specimen'] not in sids:
sids.append(rec['specimen'])
#
k = 0
# if plotting only one specimen, find it
if pltspec:
k = sids.index(pltspec)
# if plotting only n specimens, remove others from the list
elif n_specs != "all":
try:
sids = sids[:n_specs]
except:
pass
cnt = 0
while k < len(sids):
specimen = sids[k]
if pltspec:
if specimen != pltspec:
k += 1
continue
else:
for key, value in HDD.items():
cnt += 1
HDD[key] = cnt
#HDD = {key: value + len(HDD) + k for (key, value) in HDD.items()}
# initialize a new specimen hysteresis record
HystRec = {'specimen': specimen, 'experiment': ""}
if verbose and make_plots:
print(specimen, k+1, 'out of ', len(sids))
#
#
# B,M for hysteresis, Bdcd,Mdcd for irm-dcd data
B, M, Bdcd, Mdcd = [], [], [], []
Bimag, Mimag = [], [] # Bimag,Mimag for initial magnetization curves
# fish out all the LP-HYS data for this specimen
spec_data = pmag.get_dictitem(hys_data, 'specimen', specimen, 'T')
if len(spec_data) > 0:
meths = spec_data[0]['method_codes'].split(':')
e = spec_data[0]['experiment']
HystRec['experiment'] = spec_data[0]['experiment']
for rec in spec_data:
B.append(float(rec['meas_field_dc']))
M.append(float(rec['magn_moment']))
# fish out all the data for this specimen
spec_data = pmag.get_dictitem(dcd_data, 'specimen', specimen, 'T')
if len(spec_data) > 0:
HystRec['experiment'] = HystRec['experiment'] + \
':'+spec_data[0]['experiment']
irm_exp = spec_data[0]['experiment']
for rec in spec_data:
Bdcd.append(float(rec['treat_dc_field']))
Mdcd.append(float(rec['magn_moment']))
# fish out all the data for this specimen
spec_data = pmag.get_dictitem(imag_data, 'specimen', specimen, 'T')
if len(spec_data) > 0:
imag_exp = spec_data[0]['experiment']
for rec in spec_data:
Bimag.append(float(rec['meas_field_dc']))
Mimag.append(float(rec['magn_moment']))
#
# now plot the hysteresis curve
#
if len(B) > 0:
hmeths = []
for meth in meths:
hmeths.append(meth)
hpars = pmagplotlib.plot_hdd(HDD, B, M, e)
if interactive:
if not set_env.IS_WIN:
pmagplotlib.draw_figs(HDD)
#
if make_plots:
pmagplotlib.plot_hpars(HDD, hpars, 'bs')
HystRec['hyst_mr_moment'] = hpars['hysteresis_mr_moment']
HystRec['hyst_ms_moment'] = hpars['hysteresis_ms_moment']
HystRec['hyst_bc'] = hpars['hysteresis_bc']
HystRec['hyst_bcr'] = hpars['hysteresis_bcr']
HystRec['hyst_xhf'] = hpars['hysteresis_xhf']
HystRec['experiments'] = e
HystRec['software_packages'] = version_num
if hpars["magic_method_codes"] not in hmeths:
hmeths.append(hpars["magic_method_codes"])
methods = ""
for meth in hmeths:
methods = methods+meth.strip()+":"
HystRec["method_codes"] = methods[:-1]
HystRec["citations"] = "This study"
#
if len(Bdcd) > 0:
rmeths = []
for meth in meths:
rmeths.append(meth)
if verbose and make_plots:
print('plotting IRM')
if irm_init == 0:
cnt += 1
HDD['irm'] = cnt #5 if 'imag' in HDD else 4
if make_plots and (not save_plots):
pmagplotlib.plot_init(HDD['irm'], 5, 5)
irm_init = 1
rpars = pmagplotlib.plot_irm(HDD['irm'], Bdcd, Mdcd, irm_exp)
HystRec['rem_mr_moment'] = rpars['remanence_mr_moment']
HystRec['rem_bcr'] = rpars['remanence_bcr']
HystRec['experiments'] = specimen+':'+irm_exp
if rpars["magic_method_codes"] not in meths:
meths.append(rpars["magic_method_codes"])
methods = ""
for meth in rmeths:
methods = methods+meth.strip()+":"
HystRec["method_codes"] = HystRec['method_codes']+':'+methods[:-1]
HystRec["citations"] = "This study"
else:
if irm_init:
pmagplotlib.clearFIG(HDD['irm'])
if len(Bimag) > 0:
if verbose and make_plots:
print('plotting initial magnetization curve')
# first normalize by Ms
Mnorm = []
for m in Mimag:
Mnorm.append(m / float(hpars['hysteresis_ms_moment']))
if imag_init == 0:
HDD['imag'] = 4
if make_plots and (not save_plots):
pmagplotlib.plot_init(HDD['imag'], 5, 5)
imag_init = 1
pmagplotlib.plot_imag(HDD['imag'], Bimag, Mnorm, imag_exp)
else:
if imag_init:
pmagplotlib.clearFIG(HDD['imag'])
if len(list(HystRec.keys())) > 0:
HystRecs.append(HystRec)
#
files = {}
if save_plots and make_plots:
if pltspec:
s = pltspec
else:
s = specimen
files = {}
for key in list(HDD.keys()):
if incl_directory:
files[key] = os.path.join(output_dir_path, s+'_'+key+'.'+fmt)
else:
files[key] = s+'_'+key+'.'+fmt
if make_plots and save_plots:
pmagplotlib.save_plots(HDD, files, incl_directory=incl_directory)
#if pltspec:
# return True, []
if interactive:
pmagplotlib.draw_figs(HDD)
ans = input(
"S[a]ve plots, [s]pecimen name, [q]uit, <return> to continue\n ")
if ans == "a":
files = {}
for key in list(HDD.keys()):
if incl_directory:
files[key] = os.path.join(output_dir_path, specimen+'_'+key+'.'+fmt)
else:
files[key] = specimen+'_'+key+'.'+fmt
pmagplotlib.save_plots(HDD, files, incl_directory=incl_directory)
if ans == '':
k += 1
if ans == "p":
del HystRecs[-1]
k -= 1
if ans == 'q':
print("Good bye")
return True, []
if ans == 's':
keepon = 1
specimen = input(
'Enter desired specimen name (or first part there of): ')
while keepon == 1:
try:
k = sids.index(specimen)
keepon = 0
except:
tmplist = []
for qq in range(len(sids)):
if specimen in sids[qq]:
tmplist.append(sids[qq])
print(specimen, " not found, but this was: ")
print(tmplist)
specimen = input('Select one or try again\n ')
k = sids.index(specimen)
else:
k += 1
if len(B) == 0 and len(Bdcd) == 0:
if verbose:
print('skipping this one - no hysteresis data')
k += 1
if k < len(sids):
# must re-init figs for Windows to keep size
if make_plots and set_env.IS_WIN:
if not save_plots:
pmagplotlib.plot_init(HDD['DdeltaM'], 5, 5)
pmagplotlib.plot_init(HDD['deltaM'], 5, 5)
pmagplotlib.plot_init(HDD['hyst'], 5, 5)
if len(Bimag) > 0:
HDD['imag'] = 4
if not save_plots:
pmagplotlib.plot_init(HDD['imag'], 5, 5)
if len(Bdcd) > 0:
HDD['irm'] = 5 if 'imag' in HDD else 4
if not save_plots:
pmagplotlib.plot_init(HDD['irm'], 5, 5)
elif not make_plots and set_env.IS_WIN:
HDD['hyst'], HDD['deltaM'], HDD['DdeltaM'], HDD['irm'], HDD['imag'] = 0, 0, 0, 0, 0
if len(HystRecs) > 0:
# go through prior_data, clean out prior results and save combined file as spec_file
SpecRecs, keys = [], list(HystRecs[0].keys())
if len(prior_data) > 0:
prior_keys = list(prior_data[0].keys())
else:
prior_keys = []
for rec in prior_data:
for key in keys:
if key not in list(rec.keys()):
rec[key] = ""
if 'LP-HYS' not in rec['method_codes']:
SpecRecs.append(rec)
for rec in HystRecs:
for key in prior_keys:
if key not in list(rec.keys()):
rec[key] = ""
prior = pmag.get_dictitem(
prior_data, 'specimen', rec['specimen'], 'T')
if len(prior) > 0 and 'sample' in list(prior[0].keys()):
# pull sample name from prior specimens table
rec['sample'] = prior[0]['sample']
SpecRecs.append(rec)
# drop unnecessary/duplicate rows
#dir_path = os.path.split(spec_file)[0]
con = cb.Contribution(input_dir_path, read_tables=[])
con.add_magic_table_from_data('specimens', SpecRecs)
con.tables['specimens'].drop_duplicate_rows(
ignore_cols=['specimen', 'sample', 'citations', 'software_packages'])
con.tables['specimens'].df = con.tables['specimens'].df.drop_duplicates()
spec_file = os.path.join(output_dir_path, os.path.split(spec_file)[1])
con.write_table_to_file('specimens', custom_name=spec_file)
if verbose:
print("hysteresis parameters saved in ", spec_file)
return True, [spec_file] | Calculate hysteresis parameters and plot hysteresis data.
Plotting may be called interactively with save_plots==False,
or be suppressed entirely with make_plots==False.
Parameters
----------
output_dir_path : str, default "."
Note: if using Windows, all figures will be saved to working directly
*not* dir_path
input_dir_path : str
path for intput file if different from output_dir_path (default is same)
spec_file : str, default "specimens.txt"
output file to save hysteresis data
meas_file : str, default "measurements.txt"
input measurement file
fmt : str, default "svg"
format for figures, [svg, jpg, pdf, png]
save_plots : bool, default True
if True, generate and save all requested plots
make_plots : bool, default True
if False, skip making plots and just save hysteresis data
(if False, save_plots will be set to False also)
pltspec : str, default ""
specimen name to plot, otherwise will plot all specimens
n_specs : int
number of specimens to plot, default 5
if you want to make all possible plots, specify "all"
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line or in the Python interpreter)
Returns
---------
Tuple : (True or False indicating if conversion was sucessful, output file names written) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L10773-L11118 |
PmagPy/PmagPy | pmagpy/ipmag.py | sites_extract | def sites_extract(site_file='sites.txt', directions_file='directions.xls',
intensity_file='intensity.xls', info_file='site_info.xls',
output_dir_path='.', input_dir_path='', latex=False):
"""
Extracts directional and/or intensity data from a MagIC 3.0 format sites.txt file.
Default output format is an Excel file.
Optional latex format longtable file which can be uploaded to Overleaf or
typeset with latex on your own computer.
Parameters
___________
site_file : str
input file name
directions_file : str
output file name for directional data
intensity_file : str
output file name for intensity data
site_info : str
output file name for site information (lat, lon, location, age....)
output_dir_path : str
path for output files
input_dir_path : str
path for intput file if different from output_dir_path (default is same)
latex : boolean
if True, output file should be latex formatted table with a .tex ending
Return :
[True,False], error type : True if successful
Effects :
writes Excel or LaTeX formatted tables for use in publications
"""
# initialize outfile names
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, output_dir_path)
try:
fname = pmag.resolve_file_name(site_file, input_dir_path)
except IOError:
print("bad site file name")
return False, "bad site file name"
sites_df = pd.read_csv(fname, sep='\t', header=1)
dir_df = map_magic.convert_site_dm3_table_directions(sites_df)
dir_file = pmag.resolve_file_name(directions_file, output_dir_path)
if len(dir_df):
if latex:
if dir_file.endswith('.xls'):
dir_file = dir_file[:-4] + ".tex"
directions_out = open(dir_file, 'w+', errors="backslashreplace")
directions_out.write('\documentclass{article}\n')
directions_out.write('\\usepackage{booktabs}\n')
directions_out.write('\\usepackage{longtable}\n')
directions_out.write('\\begin{document}')
directions_out.write(dir_df.to_latex(
index=False, longtable=True, multicolumn=False))
directions_out.write('\end{document}\n')
directions_out.close()
else:
dir_df.to_excel(dir_file, index=False)
else:
print("No directional data for ouput.")
dir_file = None
intensity_file = pmag.resolve_file_name(intensity_file, output_dir_path)
int_df = map_magic.convert_site_dm3_table_intensity(sites_df)
if len(int_df):
if latex:
if intensity_file.endswith('.xls'):
intensity_file = intensity_file[:-4] + ".tex"
intensities_out = open(intensity_file, 'w+',
errors="backslashreplace")
intensities_out.write('\documentclass{article}\n')
intensities_out.write('\\usepackage{booktabs}\n')
intensities_out.write('\\usepackage{longtable}\n')
intensities_out.write('\\begin{document}')
intensities_out.write(int_df.to_latex(
index=False, longtable=True, multicolumn=False))
intensities_out.write('\end{document}\n')
intensities_out.close()
else:
int_df.to_excel(intensity_file, index=False)
else:
print("No intensity data for ouput.")
intensity_file = None
# site info
nfo_df = sites_df.dropna(subset=['lat', 'lon']) # delete blank locations
if len(nfo_df) > 0:
SiteCols = ["Site", "Location", "Lat. (N)", "Long. (E)"]
info_file = pmag.resolve_file_name(info_file, output_dir_path)
age_cols = ['age', 'age_sigma', 'age_unit']
for col in age_cols:
if col not in nfo_df:
nfo_df[col] = None
test_age = nfo_df.dropna(subset=['age', 'age_sigma', 'age_unit'])
if len(test_age) > 0:
SiteCols.append("Age ")
SiteCols.append("Age sigma")
SiteCols.append("Units")
nfo_df = nfo_df[['site', 'location', 'lat',
'lon', 'age', 'age_sigma', 'age_unit']]
else:
nfo_df = nfo_df[['site', 'location', 'lat', 'lon']]
nfo_df.drop_duplicates(inplace=True)
nfo_df.columns = SiteCols
nfo_df.fillna(value='', inplace=True)
if latex:
if info_file.endswith('.xls'):
info_file = info_file[:-4] + ".tex"
info_out = open(info_file, 'w+', errors="backslashreplace")
info_out.write('\documentclass{article}\n')
info_out.write('\\usepackage{booktabs}\n')
info_out.write('\\usepackage{longtable}\n')
info_out.write('\\begin{document}')
info_out.write(nfo_df.to_latex(
index=False, longtable=True, multicolumn=False))
info_out.write('\end{document}\n')
info_out.close()
else:
nfo_df.to_excel(info_file, index=False)
else:
print("No location information for ouput.")
info_file = None
return True, [fname for fname in [info_file, intensity_file, dir_file] if fname] | python | def sites_extract(site_file='sites.txt', directions_file='directions.xls',
intensity_file='intensity.xls', info_file='site_info.xls',
output_dir_path='.', input_dir_path='', latex=False):
"""
Extracts directional and/or intensity data from a MagIC 3.0 format sites.txt file.
Default output format is an Excel file.
Optional latex format longtable file which can be uploaded to Overleaf or
typeset with latex on your own computer.
Parameters
___________
site_file : str
input file name
directions_file : str
output file name for directional data
intensity_file : str
output file name for intensity data
site_info : str
output file name for site information (lat, lon, location, age....)
output_dir_path : str
path for output files
input_dir_path : str
path for intput file if different from output_dir_path (default is same)
latex : boolean
if True, output file should be latex formatted table with a .tex ending
Return :
[True,False], error type : True if successful
Effects :
writes Excel or LaTeX formatted tables for use in publications
"""
# initialize outfile names
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, output_dir_path)
try:
fname = pmag.resolve_file_name(site_file, input_dir_path)
except IOError:
print("bad site file name")
return False, "bad site file name"
sites_df = pd.read_csv(fname, sep='\t', header=1)
dir_df = map_magic.convert_site_dm3_table_directions(sites_df)
dir_file = pmag.resolve_file_name(directions_file, output_dir_path)
if len(dir_df):
if latex:
if dir_file.endswith('.xls'):
dir_file = dir_file[:-4] + ".tex"
directions_out = open(dir_file, 'w+', errors="backslashreplace")
directions_out.write('\documentclass{article}\n')
directions_out.write('\\usepackage{booktabs}\n')
directions_out.write('\\usepackage{longtable}\n')
directions_out.write('\\begin{document}')
directions_out.write(dir_df.to_latex(
index=False, longtable=True, multicolumn=False))
directions_out.write('\end{document}\n')
directions_out.close()
else:
dir_df.to_excel(dir_file, index=False)
else:
print("No directional data for ouput.")
dir_file = None
intensity_file = pmag.resolve_file_name(intensity_file, output_dir_path)
int_df = map_magic.convert_site_dm3_table_intensity(sites_df)
if len(int_df):
if latex:
if intensity_file.endswith('.xls'):
intensity_file = intensity_file[:-4] + ".tex"
intensities_out = open(intensity_file, 'w+',
errors="backslashreplace")
intensities_out.write('\documentclass{article}\n')
intensities_out.write('\\usepackage{booktabs}\n')
intensities_out.write('\\usepackage{longtable}\n')
intensities_out.write('\\begin{document}')
intensities_out.write(int_df.to_latex(
index=False, longtable=True, multicolumn=False))
intensities_out.write('\end{document}\n')
intensities_out.close()
else:
int_df.to_excel(intensity_file, index=False)
else:
print("No intensity data for ouput.")
intensity_file = None
# site info
nfo_df = sites_df.dropna(subset=['lat', 'lon']) # delete blank locations
if len(nfo_df) > 0:
SiteCols = ["Site", "Location", "Lat. (N)", "Long. (E)"]
info_file = pmag.resolve_file_name(info_file, output_dir_path)
age_cols = ['age', 'age_sigma', 'age_unit']
for col in age_cols:
if col not in nfo_df:
nfo_df[col] = None
test_age = nfo_df.dropna(subset=['age', 'age_sigma', 'age_unit'])
if len(test_age) > 0:
SiteCols.append("Age ")
SiteCols.append("Age sigma")
SiteCols.append("Units")
nfo_df = nfo_df[['site', 'location', 'lat',
'lon', 'age', 'age_sigma', 'age_unit']]
else:
nfo_df = nfo_df[['site', 'location', 'lat', 'lon']]
nfo_df.drop_duplicates(inplace=True)
nfo_df.columns = SiteCols
nfo_df.fillna(value='', inplace=True)
if latex:
if info_file.endswith('.xls'):
info_file = info_file[:-4] + ".tex"
info_out = open(info_file, 'w+', errors="backslashreplace")
info_out.write('\documentclass{article}\n')
info_out.write('\\usepackage{booktabs}\n')
info_out.write('\\usepackage{longtable}\n')
info_out.write('\\begin{document}')
info_out.write(nfo_df.to_latex(
index=False, longtable=True, multicolumn=False))
info_out.write('\end{document}\n')
info_out.close()
else:
nfo_df.to_excel(info_file, index=False)
else:
print("No location information for ouput.")
info_file = None
return True, [fname for fname in [info_file, intensity_file, dir_file] if fname] | Extracts directional and/or intensity data from a MagIC 3.0 format sites.txt file.
Default output format is an Excel file.
Optional latex format longtable file which can be uploaded to Overleaf or
typeset with latex on your own computer.
Parameters
___________
site_file : str
input file name
directions_file : str
output file name for directional data
intensity_file : str
output file name for intensity data
site_info : str
output file name for site information (lat, lon, location, age....)
output_dir_path : str
path for output files
input_dir_path : str
path for intput file if different from output_dir_path (default is same)
latex : boolean
if True, output file should be latex formatted table with a .tex ending
Return :
[True,False], error type : True if successful
Effects :
writes Excel or LaTeX formatted tables for use in publications | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L11121-L11240 |
PmagPy/PmagPy | pmagpy/ipmag.py | specimens_extract | def specimens_extract(spec_file='specimens.txt', output_file='specimens.xls', landscape=False,
longtable=False, output_dir_path='.', input_dir_path='', latex=False):
"""
Extracts specimen results from a MagIC 3.0 format specimens.txt file.
Default output format is an Excel file.
typeset with latex on your own computer.
Parameters
___________
spec_file : str, default "specimens.txt"
input file name
output_file : str, default "specimens.xls"
output file name
landscape : boolean, default False
if True output latex landscape table
longtable : boolean
if True output latex longtable
output_dir_path : str, default "."
output file directory
input_dir_path : str, default ""
path for intput file if different from output_dir_path (default is same)
latex : boolean, default False
if True, output file should be latex formatted table with a .tex ending
Return :
[True,False], data table error type : True if successful
Effects :
writes xls or latex formatted tables for use in publications
"""
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, output_dir_path)
try:
fname = pmag.resolve_file_name(spec_file, input_dir_path)
except IOError:
print("bad specimen file name")
return False, "bad specimen file name"
spec_df = pd.read_csv(fname, sep='\t', header=1)
spec_df.dropna('columns', how='all', inplace=True)
if 'int_abs' in spec_df.columns:
spec_df.dropna(subset=['int_abs'], inplace=True)
if len(spec_df) > 0:
table_df = map_magic.convert_specimen_dm3_table(spec_df)
out_file = pmag.resolve_file_name(output_file, output_dir_path)
if latex:
if out_file.endswith('.xls'):
out_file = out_file.rsplit('.')[0] + ".tex"
info_out = open(out_file, 'w+', errors="backslashreplace")
info_out.write('\documentclass{article}\n')
info_out.write('\\usepackage{booktabs}\n')
if landscape:
info_out.write('\\usepackage{lscape}')
if longtable:
info_out.write('\\usepackage{longtable}\n')
info_out.write('\\begin{document}\n')
if landscape:
info_out.write('\\begin{landscape}\n')
info_out.write(table_df.to_latex(index=False, longtable=longtable,
escape=True, multicolumn=False))
if landscape:
info_out.write('\end{landscape}\n')
info_out.write('\end{document}\n')
info_out.close()
else:
table_df.to_excel(out_file, index=False)
else:
print("No specimen data for ouput.")
return True, [out_file] | python | def specimens_extract(spec_file='specimens.txt', output_file='specimens.xls', landscape=False,
longtable=False, output_dir_path='.', input_dir_path='', latex=False):
"""
Extracts specimen results from a MagIC 3.0 format specimens.txt file.
Default output format is an Excel file.
typeset with latex on your own computer.
Parameters
___________
spec_file : str, default "specimens.txt"
input file name
output_file : str, default "specimens.xls"
output file name
landscape : boolean, default False
if True output latex landscape table
longtable : boolean
if True output latex longtable
output_dir_path : str, default "."
output file directory
input_dir_path : str, default ""
path for intput file if different from output_dir_path (default is same)
latex : boolean, default False
if True, output file should be latex formatted table with a .tex ending
Return :
[True,False], data table error type : True if successful
Effects :
writes xls or latex formatted tables for use in publications
"""
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, output_dir_path)
try:
fname = pmag.resolve_file_name(spec_file, input_dir_path)
except IOError:
print("bad specimen file name")
return False, "bad specimen file name"
spec_df = pd.read_csv(fname, sep='\t', header=1)
spec_df.dropna('columns', how='all', inplace=True)
if 'int_abs' in spec_df.columns:
spec_df.dropna(subset=['int_abs'], inplace=True)
if len(spec_df) > 0:
table_df = map_magic.convert_specimen_dm3_table(spec_df)
out_file = pmag.resolve_file_name(output_file, output_dir_path)
if latex:
if out_file.endswith('.xls'):
out_file = out_file.rsplit('.')[0] + ".tex"
info_out = open(out_file, 'w+', errors="backslashreplace")
info_out.write('\documentclass{article}\n')
info_out.write('\\usepackage{booktabs}\n')
if landscape:
info_out.write('\\usepackage{lscape}')
if longtable:
info_out.write('\\usepackage{longtable}\n')
info_out.write('\\begin{document}\n')
if landscape:
info_out.write('\\begin{landscape}\n')
info_out.write(table_df.to_latex(index=False, longtable=longtable,
escape=True, multicolumn=False))
if landscape:
info_out.write('\end{landscape}\n')
info_out.write('\end{document}\n')
info_out.close()
else:
table_df.to_excel(out_file, index=False)
else:
print("No specimen data for ouput.")
return True, [out_file] | Extracts specimen results from a MagIC 3.0 format specimens.txt file.
Default output format is an Excel file.
typeset with latex on your own computer.
Parameters
___________
spec_file : str, default "specimens.txt"
input file name
output_file : str, default "specimens.xls"
output file name
landscape : boolean, default False
if True output latex landscape table
longtable : boolean
if True output latex longtable
output_dir_path : str, default "."
output file directory
input_dir_path : str, default ""
path for intput file if different from output_dir_path (default is same)
latex : boolean, default False
if True, output file should be latex formatted table with a .tex ending
Return :
[True,False], data table error type : True if successful
Effects :
writes xls or latex formatted tables for use in publications | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L11243-L11310 |
PmagPy/PmagPy | pmagpy/ipmag.py | criteria_extract | def criteria_extract(crit_file='criteria.txt', output_file='criteria.xls',
output_dir_path='.', input_dir_path='', latex=False):
"""
Extracts criteria from a MagIC 3.0 format criteria.txt file.
Default output format is an Excel file.
typeset with latex on your own computer.
Parameters
___________
crit_file : str, default "criteria.txt"
input file name
output_file : str, default "criteria.xls"
output file name
output_dir_path : str, default "."
output file directory
input_dir_path : str, default ""
path for intput file if different from output_dir_path (default is same)
latex : boolean, default False
if True, output file should be latex formatted table with a .tex ending
Return :
[True,False], data table error type : True if successful
Effects :
writes xls or latex formatted tables for use in publications
"""
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, output_dir_path)
try:
fname = pmag.resolve_file_name(crit_file, input_dir_path)
except IOError:
print("bad criteria file name")
return False, "bad criteria file name"
crit_df = pd.read_csv(fname, sep='\t', header=1)
if len(crit_df) > 0:
out_file = pmag.resolve_file_name(output_file, output_dir_path)
s = crit_df['table_column'].str.split(pat='.', expand=True)
crit_df['table'] = s[0]
crit_df['column'] = s[1]
crit_df = crit_df[['table', 'column',
'criterion_value', 'criterion_operation']]
crit_df.columns = ['Table', 'Statistic', 'Threshold', 'Operation']
if latex:
if out_file.endswith('.xls'):
out_file = out_file.rsplit('.')[0] + ".tex"
crit_df.loc[crit_df['Operation'].str.contains(
'<'), 'operation'] = 'maximum'
crit_df.loc[crit_df['Operation'].str.contains(
'>'), 'operation'] = 'minimum'
crit_df.loc[crit_df['Operation'] == '=', 'operation'] = 'equal to'
info_out = open(out_file, 'w+', errors="backslashreplace")
info_out.write('\documentclass{article}\n')
info_out.write('\\usepackage{booktabs}\n')
# info_out.write('\\usepackage{longtable}\n')
# T1 will ensure that symbols like '<' are formatted correctly
info_out.write("\\usepackage[T1]{fontenc}\n")
info_out.write('\\begin{document}')
info_out.write(crit_df.to_latex(index=False, longtable=False,
escape=True, multicolumn=False))
info_out.write('\end{document}\n')
info_out.close()
else:
crit_df.to_excel(out_file, index=False)
else:
print("No criteria for ouput.")
return True, [out_file] | python | def criteria_extract(crit_file='criteria.txt', output_file='criteria.xls',
output_dir_path='.', input_dir_path='', latex=False):
"""
Extracts criteria from a MagIC 3.0 format criteria.txt file.
Default output format is an Excel file.
typeset with latex on your own computer.
Parameters
___________
crit_file : str, default "criteria.txt"
input file name
output_file : str, default "criteria.xls"
output file name
output_dir_path : str, default "."
output file directory
input_dir_path : str, default ""
path for intput file if different from output_dir_path (default is same)
latex : boolean, default False
if True, output file should be latex formatted table with a .tex ending
Return :
[True,False], data table error type : True if successful
Effects :
writes xls or latex formatted tables for use in publications
"""
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, output_dir_path)
try:
fname = pmag.resolve_file_name(crit_file, input_dir_path)
except IOError:
print("bad criteria file name")
return False, "bad criteria file name"
crit_df = pd.read_csv(fname, sep='\t', header=1)
if len(crit_df) > 0:
out_file = pmag.resolve_file_name(output_file, output_dir_path)
s = crit_df['table_column'].str.split(pat='.', expand=True)
crit_df['table'] = s[0]
crit_df['column'] = s[1]
crit_df = crit_df[['table', 'column',
'criterion_value', 'criterion_operation']]
crit_df.columns = ['Table', 'Statistic', 'Threshold', 'Operation']
if latex:
if out_file.endswith('.xls'):
out_file = out_file.rsplit('.')[0] + ".tex"
crit_df.loc[crit_df['Operation'].str.contains(
'<'), 'operation'] = 'maximum'
crit_df.loc[crit_df['Operation'].str.contains(
'>'), 'operation'] = 'minimum'
crit_df.loc[crit_df['Operation'] == '=', 'operation'] = 'equal to'
info_out = open(out_file, 'w+', errors="backslashreplace")
info_out.write('\documentclass{article}\n')
info_out.write('\\usepackage{booktabs}\n')
# info_out.write('\\usepackage{longtable}\n')
# T1 will ensure that symbols like '<' are formatted correctly
info_out.write("\\usepackage[T1]{fontenc}\n")
info_out.write('\\begin{document}')
info_out.write(crit_df.to_latex(index=False, longtable=False,
escape=True, multicolumn=False))
info_out.write('\end{document}\n')
info_out.close()
else:
crit_df.to_excel(out_file, index=False)
else:
print("No criteria for ouput.")
return True, [out_file] | Extracts criteria from a MagIC 3.0 format criteria.txt file.
Default output format is an Excel file.
typeset with latex on your own computer.
Parameters
___________
crit_file : str, default "criteria.txt"
input file name
output_file : str, default "criteria.xls"
output file name
output_dir_path : str, default "."
output file directory
input_dir_path : str, default ""
path for intput file if different from output_dir_path (default is same)
latex : boolean, default False
if True, output file should be latex formatted table with a .tex ending
Return :
[True,False], data table error type : True if successful
Effects :
writes xls or latex formatted tables for use in publications | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L11313-L11381 |
PmagPy/PmagPy | pmagpy/ipmag.py | eqarea_magic | def eqarea_magic(in_file='sites.txt', dir_path=".", input_dir_path="",
spec_file="specimens.txt", samp_file="samples.txt",
site_file="sites.txt", loc_file="locations.txt",
plot_by="all", crd="g", ignore_tilt=False,
save_plots=True, fmt="svg", contour=False, color_map="coolwarm",
plot_ell="", n_plots=5, interactive=False):
"""
makes equal area projections from declination/inclination data
Parameters
----------
in_file : str, default "sites.txt"
dir_path : str
output directory, default "."
input_dir_path : str
input file directory (if different from dir_path), default ""
spec_file : str
input specimen file name, default "specimens.txt"
samp_file: str
input sample file name, default "samples.txt"
site_file : str
input site file name, default "sites.txt"
loc_file : str
input location file name, default "locations.txt"
plot_by : str
[spc, sam, sit, loc, all] (specimen, sample, site, location, all), default "all"
crd : ['s','g','t'], coordinate system for plotting whereby:
s : specimen coordinates, aniso_tile_correction = -1
g : geographic coordinates, aniso_tile_correction = 0 (default)
t : tilt corrected coordinates, aniso_tile_correction = 100
ignore_tilt : bool
default False. If True, data are unoriented (allows plotting of measurement dec/inc)
save_plots : bool
plot and save non-interactively, default True
fmt : str
["png", "svg", "pdf", "jpg"], default "svg"
contour : bool
plot as color contour
colormap : str
color map for contour plotting, default "coolwarm"
see cartopy documentation for more options
plot_ell : str
[F,K,B,Be,Bv] plot Fisher, Kent, Bingham, Bootstrap ellipses or Boostrap eigenvectors
default "" plots none
n_plots : int
maximum number of plots to make, default 5
if you want to make all possible plots, specify "all"
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line or in the Python interpreter)
Returns
---------
type - Tuple : (True or False indicating if conversion was sucessful, file name(s) written)
"""
saved = []
# parse out input/out directories
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
# initialize some variables
verbose = pmagplotlib.verbose
FIG = {} # plot dictionary
FIG['eqarea'] = 1 # eqarea is figure 1
pmagplotlib.plot_init(FIG['eqarea'], 5, 5)
# get coordinate system
if crd == "s":
coord = "-1"
elif crd == "t":
coord = "100"
else:
coord = "0"
# get item to plot by
if plot_by == 'all':
plot_key = 'all'
elif plot_by == 'sit':
plot_key = 'site'
elif plot_by == 'sam':
plot_key = 'sample'
elif plot_by == 'spc':
plot_key = 'specimen'
else:
plot_by = 'all'
plot_key = 'all'
# get distribution to plot ellipses/eigenvectors if desired
if save_plots:
verbose = False
# set keys
dec_key = 'dir_dec'
inc_key = 'dir_inc'
tilt_key = 'dir_tilt_correction'
# create contribution
fnames = {"specimens": spec_file, "samples": samp_file,
'sites': site_file, 'locations': loc_file}
if not os.path.exists(pmag.resolve_file_name(in_file, input_dir_path)):
print('-E- Could not find {}'.format(in_file))
return False, []
contribution = cb.Contribution(input_dir_path, custom_filenames=fnames,
single_file=in_file)
table_name = list(contribution.tables.keys())[0]
contribution.add_magic_table("contribution")
# get contribution id if available for server plots
if pmagplotlib.isServer:
con_id = contribution.get_con_id()
# try to propagate all names to measurement level
try:
contribution.propagate_location_to_samples()
contribution.propagate_location_to_specimens()
contribution.propagate_location_to_measurements()
except KeyError as ex:
pass
# the object that contains the DataFrame + useful helper methods:
data_container = contribution.tables[table_name]
# the actual DataFrame:
data = data_container.df
plot_type = data_container.dtype
if plot_key != "all" and plot_key not in data.columns:
print("-E- You can't plot by {} with the data provided".format(plot_key))
return False, []
# add tilt key into DataFrame columns if it isn't there already
if tilt_key not in data.columns:
data.loc[:, tilt_key] = None
if verbose:
print(len(data), ' records read from ', in_file)
# find desired dec,inc data:
dir_type_key = ''
#
# get plotlist if not plotting all records
#
plotlist = []
if plot_key != "all":
# return all where plot_key is not blank
if plot_key not in data.columns:
print('-E- Can\'t plot by "{}". That header is not in infile: {}'.format(
plot_key, in_file))
return False, []
plots = data[data[plot_key].notnull()]
plotlist = plots[plot_key].unique() # grab unique values
else:
plotlist.append('All')
if n_plots != "all":
if len(plotlist) > n_plots:
plotlist = plotlist[:n_plots]
fignum = 0
for plot in plotlist:
fignum += 1
FIG['eqarea'] = fignum
pmagplotlib.plot_init(FIG['eqarea'], 5, 5)
if plot_ell:
dist = plot_ell.upper()
# if dist type is unrecognized, use Fisher
if dist not in ['F', 'K', 'B', 'BE', 'BV']:
dist = 'F'
if dist == "BV":
fignum += 1
FIG['bdirs'] = fignum
pmagplotlib.plot_init(FIG['bdirs'], 5, 5)
if verbose:
print(plot)
if plot == 'All':
# plot everything at once
plot_data = data
else:
# pull out only partial data
plot_data = data[data[plot_key] == plot]
# get location names for the data
locs = []
if 'location' in plot_data.columns:
locs = plot_data['location'].dropna().unique()
DIblock = []
GCblock = []
# SLblock, SPblock = [], []
title = plot
mode = 1
if dec_key not in plot_data.columns:
print("-W- No dec/inc data")
continue
# get all records where dec & inc values exist
plot_data = plot_data[plot_data[dec_key].notnull()
& plot_data[inc_key].notnull()]
if plot_data.empty:
print("-W- No dec/inc data")
continue
# get metadata for naming the plot file
locations = str(data_container.get_name('location', df_slice=plot_data))
site = str(data_container.get_name('site', df_slice=plot_data))
sample = str(data_container.get_name('sample', df_slice=plot_data))
specimen = str(data_container.get_name('specimen', df_slice=plot_data))
# make sure method_codes is in plot_data
if 'method_codes' not in plot_data.columns:
plot_data['method_codes'] = ''
# get data blocks
# would have to ignore tilt to use measurement level data
DIblock = data_container.get_di_block(df_slice=plot_data,
tilt_corr=coord, excl=['DE-BFP'], ignore_tilt=ignore_tilt)
if title == 'All':
if len(locs):
title = " ,".join(locs) + " - {} {} plotted".format(str(len(DIblock)), plot_type)
else:
title = "{} {} plotted".format(str(len(DIblock)), plot_type)
#SLblock = [[ind, row['method_codes']] for ind, row in plot_data.iterrows()]
# get great circles
great_circle_data = data_container.get_records_for_code('DE-BFP', incl=True,
use_slice=True, sli=plot_data)
if len(great_circle_data) > 0:
gc_cond = great_circle_data[tilt_key] == coord
GCblock = [[float(row[dec_key]), float(row[inc_key])]
for ind, row in great_circle_data[gc_cond].iterrows()]
#SPblock = [[ind, row['method_codes']] for ind, row in great_circle_data[gc_cond].iterrows()]
if len(DIblock) > 0:
if not contour:
pmagplotlib.plot_eq(FIG['eqarea'], DIblock, title)
else:
pmagplotlib.plot_eq_cont(
FIG['eqarea'], DIblock, color_map=color_map)
else:
pmagplotlib.plot_net(FIG['eqarea'])
if len(GCblock) > 0:
for rec in GCblock:
pmagplotlib.plot_circ(FIG['eqarea'], rec, 90., 'g')
if len(DIblock) == 0 and len(GCblock) == 0:
if verbose:
print("no records for plotting")
fignum -= 1
if 'bdirs' in FIG:
fignum -= 1
continue
# sys.exit()
if plot_ell:
ppars = pmag.doprinc(DIblock) # get principal directions
nDIs, rDIs, npars, rpars = [], [], [], []
for rec in DIblock:
angle = pmag.angle([rec[0], rec[1]], [
ppars['dec'], ppars['inc']])
if angle > 90.:
rDIs.append(rec)
else:
nDIs.append(rec)
if dist == 'B': # do on whole dataset
etitle = "Bingham confidence ellipse"
bpars = pmag.dobingham(DIblock)
for key in list(bpars.keys()):
if key != 'n' and verbose:
print(" ", key, '%7.1f' % (bpars[key]))
if key == 'n' and verbose:
print(" ", key, ' %i' % (bpars[key]))
npars.append(bpars['dec'])
npars.append(bpars['inc'])
npars.append(bpars['Zeta'])
npars.append(bpars['Zdec'])
npars.append(bpars['Zinc'])
npars.append(bpars['Eta'])
npars.append(bpars['Edec'])
npars.append(bpars['Einc'])
if dist == 'F':
etitle = "Fisher confidence cone"
if len(nDIs) > 2:
fpars = pmag.fisher_mean(nDIs)
for key in list(fpars.keys()):
if key != 'n' and verbose:
print(" ", key, '%7.1f' % (fpars[key]))
if key == 'n' and verbose:
print(" ", key, ' %i' % (fpars[key]))
mode += 1
npars.append(fpars['dec'])
npars.append(fpars['inc'])
npars.append(fpars['alpha95']) # Beta
npars.append(fpars['dec'])
isign = abs(fpars['inc']) / fpars['inc']
npars.append(fpars['inc']-isign*90.) # Beta inc
npars.append(fpars['alpha95']) # gamma
npars.append(fpars['dec']+90.) # Beta dec
npars.append(0.) # Beta inc
if len(rDIs) > 2:
fpars = pmag.fisher_mean(rDIs)
if verbose:
print("mode ", mode)
for key in list(fpars.keys()):
if key != 'n' and verbose:
print(" ", key, '%7.1f' % (fpars[key]))
if key == 'n' and verbose:
print(" ", key, ' %i' % (fpars[key]))
mode += 1
rpars.append(fpars['dec'])
rpars.append(fpars['inc'])
rpars.append(fpars['alpha95']) # Beta
rpars.append(fpars['dec'])
isign = abs(fpars['inc']) / fpars['inc']
rpars.append(fpars['inc']-isign*90.) # Beta inc
rpars.append(fpars['alpha95']) # gamma
rpars.append(fpars['dec']+90.) # Beta dec
rpars.append(0.) # Beta inc
if dist == 'K':
etitle = "Kent confidence ellipse"
if len(nDIs) > 3:
kpars = pmag.dokent(nDIs, len(nDIs))
if verbose:
print("mode ", mode)
for key in list(kpars.keys()):
if key != 'n' and verbose:
print(" ", key, '%7.1f' % (kpars[key]))
if key == 'n' and verbose:
print(" ", key, ' %i' % (kpars[key]))
mode += 1
npars.append(kpars['dec'])
npars.append(kpars['inc'])
npars.append(kpars['Zeta'])
npars.append(kpars['Zdec'])
npars.append(kpars['Zinc'])
npars.append(kpars['Eta'])
npars.append(kpars['Edec'])
npars.append(kpars['Einc'])
if len(rDIs) > 3:
kpars = pmag.dokent(rDIs, len(rDIs))
if verbose:
print("mode ", mode)
for key in list(kpars.keys()):
if key != 'n' and verbose:
print(" ", key, '%7.1f' % (kpars[key]))
if key == 'n' and verbose:
print(" ", key, ' %i' % (kpars[key]))
mode += 1
rpars.append(kpars['dec'])
rpars.append(kpars['inc'])
rpars.append(kpars['Zeta'])
rpars.append(kpars['Zdec'])
rpars.append(kpars['Zinc'])
rpars.append(kpars['Eta'])
rpars.append(kpars['Edec'])
rpars.append(kpars['Einc'])
else: # assume bootstrap
if dist == 'BE':
if len(nDIs) > 5:
BnDIs = pmag.di_boot(nDIs)
Bkpars = pmag.dokent(BnDIs, 1.)
if verbose:
print("mode ", mode)
for key in list(Bkpars.keys()):
if key != 'n' and verbose:
print(" ", key, '%7.1f' % (Bkpars[key]))
if key == 'n' and verbose:
print(" ", key, ' %i' % (Bkpars[key]))
mode += 1
npars.append(Bkpars['dec'])
npars.append(Bkpars['inc'])
npars.append(Bkpars['Zeta'])
npars.append(Bkpars['Zdec'])
npars.append(Bkpars['Zinc'])
npars.append(Bkpars['Eta'])
npars.append(Bkpars['Edec'])
npars.append(Bkpars['Einc'])
if len(rDIs) > 5:
BrDIs = pmag.di_boot(rDIs)
Bkpars = pmag.dokent(BrDIs, 1.)
if verbose:
print("mode ", mode)
for key in list(Bkpars.keys()):
if key != 'n' and verbose:
print(" ", key, '%7.1f' % (Bkpars[key]))
if key == 'n' and verbose:
print(" ", key, ' %i' % (Bkpars[key]))
mode += 1
rpars.append(Bkpars['dec'])
rpars.append(Bkpars['inc'])
rpars.append(Bkpars['Zeta'])
rpars.append(Bkpars['Zdec'])
rpars.append(Bkpars['Zinc'])
rpars.append(Bkpars['Eta'])
rpars.append(Bkpars['Edec'])
rpars.append(Bkpars['Einc'])
etitle = "Bootstrapped confidence ellipse"
elif dist == 'BV':
sym = {'lower': ['o', 'c'], 'upper': [
'o', 'g'], 'size': 3, 'edgecolor': 'face'}
if len(nDIs) > 5:
BnDIs = pmag.di_boot(nDIs)
pmagplotlib.plot_eq_sym(
FIG['bdirs'], BnDIs, 'Bootstrapped Eigenvectors', sym)
if len(rDIs) > 5:
BrDIs = pmag.di_boot(rDIs)
if len(nDIs) > 5: # plot on existing plots
pmagplotlib.plot_di_sym(FIG['bdirs'], BrDIs, sym)
else:
pmagplotlib.plot_eq(
FIG['bdirs'], BrDIs, 'Bootstrapped Eigenvectors')
if dist == 'B':
if len(nDIs) > 3 or len(rDIs) > 3:
pmagplotlib.plot_conf(FIG['eqarea'], etitle, [], npars, 0)
elif len(nDIs) > 3 and dist != 'BV':
pmagplotlib.plot_conf(FIG['eqarea'], etitle, [], npars, 0)
if len(rDIs) > 3:
pmagplotlib.plot_conf(FIG['eqarea'], etitle, [], rpars, 0)
elif len(rDIs) > 3 and dist != 'BV':
pmagplotlib.plot_conf(FIG['eqarea'], etitle, [], rpars, 0)
for key in list(FIG.keys()):
files = {}
#if filename: # use provided filename
# filename += '.' + fmt
if pmagplotlib.isServer: # use server plot naming convention
if plot_key == 'all':
filename = 'LO:_'+locations+'_SI:__SA:__SP:__CO:_'+crd+'_TY:_'+key+'_.'+fmt
else:
filename = 'LO:_'+locations+'_SI:_'+site+'_SA:_'+sample + \
'_SP:_'+str(specimen)+'_CO:_'+crd+'_TY:_'+key+'_.'+fmt
elif plot_key == 'all':
filename = 'all'
if locs:
loc_string = "_".join(
[str(loc).replace(' ', '_') for loc in locs])
filename += "_" + loc_string
filename += "_" + crd + "_" + key
filename += ".{}".format(fmt)
else: # use more readable naming convention
filename = ''
# fix this if plot_by is location , for example
use_names = {'location': [locations], 'site': [locations, site],
'sample': [locations, site, sample],
'specimen': [locations, site, sample, specimen]}
use = use_names[plot_key]
use.extend([crd, key])
# [locations, site, sample, specimen, crd, key]:
for item in use:
if item:
item = item.replace(' ', '_')
filename += item + '_'
if filename.endswith('_'):
filename = filename[:-1]
filename += ".{}".format(fmt)
if not pmagplotlib.isServer:
filename = os.path.join(dir_path, filename)
files[key] = filename
if pmagplotlib.isServer:
titles = {'eqarea': 'Equal Area Plot'}
FIG = pmagplotlib.add_borders(FIG, titles, con_id=con_id)
saved_figs = pmagplotlib.save_plots(FIG, files)
saved.extend(saved_figs)
elif save_plots:
saved_figs = pmagplotlib.save_plots(FIG, files, incl_directory=True)
saved.extend(saved_figs)
continue
elif interactive:
pmagplotlib.draw_figs(FIG)
ans = input(" S[a]ve to save plot, [q]uit, Return to continue: ")
if ans == "q":
return True, []
if ans == "a":
saved_figs = pmagplotlib.save_plots(FIG, files, incl_directory=True)
saved.extend(saved)
continue
return True, saved | python | def eqarea_magic(in_file='sites.txt', dir_path=".", input_dir_path="",
spec_file="specimens.txt", samp_file="samples.txt",
site_file="sites.txt", loc_file="locations.txt",
plot_by="all", crd="g", ignore_tilt=False,
save_plots=True, fmt="svg", contour=False, color_map="coolwarm",
plot_ell="", n_plots=5, interactive=False):
"""
makes equal area projections from declination/inclination data
Parameters
----------
in_file : str, default "sites.txt"
dir_path : str
output directory, default "."
input_dir_path : str
input file directory (if different from dir_path), default ""
spec_file : str
input specimen file name, default "specimens.txt"
samp_file: str
input sample file name, default "samples.txt"
site_file : str
input site file name, default "sites.txt"
loc_file : str
input location file name, default "locations.txt"
plot_by : str
[spc, sam, sit, loc, all] (specimen, sample, site, location, all), default "all"
crd : ['s','g','t'], coordinate system for plotting whereby:
s : specimen coordinates, aniso_tile_correction = -1
g : geographic coordinates, aniso_tile_correction = 0 (default)
t : tilt corrected coordinates, aniso_tile_correction = 100
ignore_tilt : bool
default False. If True, data are unoriented (allows plotting of measurement dec/inc)
save_plots : bool
plot and save non-interactively, default True
fmt : str
["png", "svg", "pdf", "jpg"], default "svg"
contour : bool
plot as color contour
colormap : str
color map for contour plotting, default "coolwarm"
see cartopy documentation for more options
plot_ell : str
[F,K,B,Be,Bv] plot Fisher, Kent, Bingham, Bootstrap ellipses or Boostrap eigenvectors
default "" plots none
n_plots : int
maximum number of plots to make, default 5
if you want to make all possible plots, specify "all"
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line or in the Python interpreter)
Returns
---------
type - Tuple : (True or False indicating if conversion was sucessful, file name(s) written)
"""
saved = []
# parse out input/out directories
input_dir_path, dir_path = pmag.fix_directories(input_dir_path, dir_path)
# initialize some variables
verbose = pmagplotlib.verbose
FIG = {} # plot dictionary
FIG['eqarea'] = 1 # eqarea is figure 1
pmagplotlib.plot_init(FIG['eqarea'], 5, 5)
# get coordinate system
if crd == "s":
coord = "-1"
elif crd == "t":
coord = "100"
else:
coord = "0"
# get item to plot by
if plot_by == 'all':
plot_key = 'all'
elif plot_by == 'sit':
plot_key = 'site'
elif plot_by == 'sam':
plot_key = 'sample'
elif plot_by == 'spc':
plot_key = 'specimen'
else:
plot_by = 'all'
plot_key = 'all'
# get distribution to plot ellipses/eigenvectors if desired
if save_plots:
verbose = False
# set keys
dec_key = 'dir_dec'
inc_key = 'dir_inc'
tilt_key = 'dir_tilt_correction'
# create contribution
fnames = {"specimens": spec_file, "samples": samp_file,
'sites': site_file, 'locations': loc_file}
if not os.path.exists(pmag.resolve_file_name(in_file, input_dir_path)):
print('-E- Could not find {}'.format(in_file))
return False, []
contribution = cb.Contribution(input_dir_path, custom_filenames=fnames,
single_file=in_file)
table_name = list(contribution.tables.keys())[0]
contribution.add_magic_table("contribution")
# get contribution id if available for server plots
if pmagplotlib.isServer:
con_id = contribution.get_con_id()
# try to propagate all names to measurement level
try:
contribution.propagate_location_to_samples()
contribution.propagate_location_to_specimens()
contribution.propagate_location_to_measurements()
except KeyError as ex:
pass
# the object that contains the DataFrame + useful helper methods:
data_container = contribution.tables[table_name]
# the actual DataFrame:
data = data_container.df
plot_type = data_container.dtype
if plot_key != "all" and plot_key not in data.columns:
print("-E- You can't plot by {} with the data provided".format(plot_key))
return False, []
# add tilt key into DataFrame columns if it isn't there already
if tilt_key not in data.columns:
data.loc[:, tilt_key] = None
if verbose:
print(len(data), ' records read from ', in_file)
# find desired dec,inc data:
dir_type_key = ''
#
# get plotlist if not plotting all records
#
plotlist = []
if plot_key != "all":
# return all where plot_key is not blank
if plot_key not in data.columns:
print('-E- Can\'t plot by "{}". That header is not in infile: {}'.format(
plot_key, in_file))
return False, []
plots = data[data[plot_key].notnull()]
plotlist = plots[plot_key].unique() # grab unique values
else:
plotlist.append('All')
if n_plots != "all":
if len(plotlist) > n_plots:
plotlist = plotlist[:n_plots]
fignum = 0
for plot in plotlist:
fignum += 1
FIG['eqarea'] = fignum
pmagplotlib.plot_init(FIG['eqarea'], 5, 5)
if plot_ell:
dist = plot_ell.upper()
# if dist type is unrecognized, use Fisher
if dist not in ['F', 'K', 'B', 'BE', 'BV']:
dist = 'F'
if dist == "BV":
fignum += 1
FIG['bdirs'] = fignum
pmagplotlib.plot_init(FIG['bdirs'], 5, 5)
if verbose:
print(plot)
if plot == 'All':
# plot everything at once
plot_data = data
else:
# pull out only partial data
plot_data = data[data[plot_key] == plot]
# get location names for the data
locs = []
if 'location' in plot_data.columns:
locs = plot_data['location'].dropna().unique()
DIblock = []
GCblock = []
# SLblock, SPblock = [], []
title = plot
mode = 1
if dec_key not in plot_data.columns:
print("-W- No dec/inc data")
continue
# get all records where dec & inc values exist
plot_data = plot_data[plot_data[dec_key].notnull()
& plot_data[inc_key].notnull()]
if plot_data.empty:
print("-W- No dec/inc data")
continue
# get metadata for naming the plot file
locations = str(data_container.get_name('location', df_slice=plot_data))
site = str(data_container.get_name('site', df_slice=plot_data))
sample = str(data_container.get_name('sample', df_slice=plot_data))
specimen = str(data_container.get_name('specimen', df_slice=plot_data))
# make sure method_codes is in plot_data
if 'method_codes' not in plot_data.columns:
plot_data['method_codes'] = ''
# get data blocks
# would have to ignore tilt to use measurement level data
DIblock = data_container.get_di_block(df_slice=plot_data,
tilt_corr=coord, excl=['DE-BFP'], ignore_tilt=ignore_tilt)
if title == 'All':
if len(locs):
title = " ,".join(locs) + " - {} {} plotted".format(str(len(DIblock)), plot_type)
else:
title = "{} {} plotted".format(str(len(DIblock)), plot_type)
#SLblock = [[ind, row['method_codes']] for ind, row in plot_data.iterrows()]
# get great circles
great_circle_data = data_container.get_records_for_code('DE-BFP', incl=True,
use_slice=True, sli=plot_data)
if len(great_circle_data) > 0:
gc_cond = great_circle_data[tilt_key] == coord
GCblock = [[float(row[dec_key]), float(row[inc_key])]
for ind, row in great_circle_data[gc_cond].iterrows()]
#SPblock = [[ind, row['method_codes']] for ind, row in great_circle_data[gc_cond].iterrows()]
if len(DIblock) > 0:
if not contour:
pmagplotlib.plot_eq(FIG['eqarea'], DIblock, title)
else:
pmagplotlib.plot_eq_cont(
FIG['eqarea'], DIblock, color_map=color_map)
else:
pmagplotlib.plot_net(FIG['eqarea'])
if len(GCblock) > 0:
for rec in GCblock:
pmagplotlib.plot_circ(FIG['eqarea'], rec, 90., 'g')
if len(DIblock) == 0 and len(GCblock) == 0:
if verbose:
print("no records for plotting")
fignum -= 1
if 'bdirs' in FIG:
fignum -= 1
continue
# sys.exit()
if plot_ell:
ppars = pmag.doprinc(DIblock) # get principal directions
nDIs, rDIs, npars, rpars = [], [], [], []
for rec in DIblock:
angle = pmag.angle([rec[0], rec[1]], [
ppars['dec'], ppars['inc']])
if angle > 90.:
rDIs.append(rec)
else:
nDIs.append(rec)
if dist == 'B': # do on whole dataset
etitle = "Bingham confidence ellipse"
bpars = pmag.dobingham(DIblock)
for key in list(bpars.keys()):
if key != 'n' and verbose:
print(" ", key, '%7.1f' % (bpars[key]))
if key == 'n' and verbose:
print(" ", key, ' %i' % (bpars[key]))
npars.append(bpars['dec'])
npars.append(bpars['inc'])
npars.append(bpars['Zeta'])
npars.append(bpars['Zdec'])
npars.append(bpars['Zinc'])
npars.append(bpars['Eta'])
npars.append(bpars['Edec'])
npars.append(bpars['Einc'])
if dist == 'F':
etitle = "Fisher confidence cone"
if len(nDIs) > 2:
fpars = pmag.fisher_mean(nDIs)
for key in list(fpars.keys()):
if key != 'n' and verbose:
print(" ", key, '%7.1f' % (fpars[key]))
if key == 'n' and verbose:
print(" ", key, ' %i' % (fpars[key]))
mode += 1
npars.append(fpars['dec'])
npars.append(fpars['inc'])
npars.append(fpars['alpha95']) # Beta
npars.append(fpars['dec'])
isign = abs(fpars['inc']) / fpars['inc']
npars.append(fpars['inc']-isign*90.) # Beta inc
npars.append(fpars['alpha95']) # gamma
npars.append(fpars['dec']+90.) # Beta dec
npars.append(0.) # Beta inc
if len(rDIs) > 2:
fpars = pmag.fisher_mean(rDIs)
if verbose:
print("mode ", mode)
for key in list(fpars.keys()):
if key != 'n' and verbose:
print(" ", key, '%7.1f' % (fpars[key]))
if key == 'n' and verbose:
print(" ", key, ' %i' % (fpars[key]))
mode += 1
rpars.append(fpars['dec'])
rpars.append(fpars['inc'])
rpars.append(fpars['alpha95']) # Beta
rpars.append(fpars['dec'])
isign = abs(fpars['inc']) / fpars['inc']
rpars.append(fpars['inc']-isign*90.) # Beta inc
rpars.append(fpars['alpha95']) # gamma
rpars.append(fpars['dec']+90.) # Beta dec
rpars.append(0.) # Beta inc
if dist == 'K':
etitle = "Kent confidence ellipse"
if len(nDIs) > 3:
kpars = pmag.dokent(nDIs, len(nDIs))
if verbose:
print("mode ", mode)
for key in list(kpars.keys()):
if key != 'n' and verbose:
print(" ", key, '%7.1f' % (kpars[key]))
if key == 'n' and verbose:
print(" ", key, ' %i' % (kpars[key]))
mode += 1
npars.append(kpars['dec'])
npars.append(kpars['inc'])
npars.append(kpars['Zeta'])
npars.append(kpars['Zdec'])
npars.append(kpars['Zinc'])
npars.append(kpars['Eta'])
npars.append(kpars['Edec'])
npars.append(kpars['Einc'])
if len(rDIs) > 3:
kpars = pmag.dokent(rDIs, len(rDIs))
if verbose:
print("mode ", mode)
for key in list(kpars.keys()):
if key != 'n' and verbose:
print(" ", key, '%7.1f' % (kpars[key]))
if key == 'n' and verbose:
print(" ", key, ' %i' % (kpars[key]))
mode += 1
rpars.append(kpars['dec'])
rpars.append(kpars['inc'])
rpars.append(kpars['Zeta'])
rpars.append(kpars['Zdec'])
rpars.append(kpars['Zinc'])
rpars.append(kpars['Eta'])
rpars.append(kpars['Edec'])
rpars.append(kpars['Einc'])
else: # assume bootstrap
if dist == 'BE':
if len(nDIs) > 5:
BnDIs = pmag.di_boot(nDIs)
Bkpars = pmag.dokent(BnDIs, 1.)
if verbose:
print("mode ", mode)
for key in list(Bkpars.keys()):
if key != 'n' and verbose:
print(" ", key, '%7.1f' % (Bkpars[key]))
if key == 'n' and verbose:
print(" ", key, ' %i' % (Bkpars[key]))
mode += 1
npars.append(Bkpars['dec'])
npars.append(Bkpars['inc'])
npars.append(Bkpars['Zeta'])
npars.append(Bkpars['Zdec'])
npars.append(Bkpars['Zinc'])
npars.append(Bkpars['Eta'])
npars.append(Bkpars['Edec'])
npars.append(Bkpars['Einc'])
if len(rDIs) > 5:
BrDIs = pmag.di_boot(rDIs)
Bkpars = pmag.dokent(BrDIs, 1.)
if verbose:
print("mode ", mode)
for key in list(Bkpars.keys()):
if key != 'n' and verbose:
print(" ", key, '%7.1f' % (Bkpars[key]))
if key == 'n' and verbose:
print(" ", key, ' %i' % (Bkpars[key]))
mode += 1
rpars.append(Bkpars['dec'])
rpars.append(Bkpars['inc'])
rpars.append(Bkpars['Zeta'])
rpars.append(Bkpars['Zdec'])
rpars.append(Bkpars['Zinc'])
rpars.append(Bkpars['Eta'])
rpars.append(Bkpars['Edec'])
rpars.append(Bkpars['Einc'])
etitle = "Bootstrapped confidence ellipse"
elif dist == 'BV':
sym = {'lower': ['o', 'c'], 'upper': [
'o', 'g'], 'size': 3, 'edgecolor': 'face'}
if len(nDIs) > 5:
BnDIs = pmag.di_boot(nDIs)
pmagplotlib.plot_eq_sym(
FIG['bdirs'], BnDIs, 'Bootstrapped Eigenvectors', sym)
if len(rDIs) > 5:
BrDIs = pmag.di_boot(rDIs)
if len(nDIs) > 5: # plot on existing plots
pmagplotlib.plot_di_sym(FIG['bdirs'], BrDIs, sym)
else:
pmagplotlib.plot_eq(
FIG['bdirs'], BrDIs, 'Bootstrapped Eigenvectors')
if dist == 'B':
if len(nDIs) > 3 or len(rDIs) > 3:
pmagplotlib.plot_conf(FIG['eqarea'], etitle, [], npars, 0)
elif len(nDIs) > 3 and dist != 'BV':
pmagplotlib.plot_conf(FIG['eqarea'], etitle, [], npars, 0)
if len(rDIs) > 3:
pmagplotlib.plot_conf(FIG['eqarea'], etitle, [], rpars, 0)
elif len(rDIs) > 3 and dist != 'BV':
pmagplotlib.plot_conf(FIG['eqarea'], etitle, [], rpars, 0)
for key in list(FIG.keys()):
files = {}
#if filename: # use provided filename
# filename += '.' + fmt
if pmagplotlib.isServer: # use server plot naming convention
if plot_key == 'all':
filename = 'LO:_'+locations+'_SI:__SA:__SP:__CO:_'+crd+'_TY:_'+key+'_.'+fmt
else:
filename = 'LO:_'+locations+'_SI:_'+site+'_SA:_'+sample + \
'_SP:_'+str(specimen)+'_CO:_'+crd+'_TY:_'+key+'_.'+fmt
elif plot_key == 'all':
filename = 'all'
if locs:
loc_string = "_".join(
[str(loc).replace(' ', '_') for loc in locs])
filename += "_" + loc_string
filename += "_" + crd + "_" + key
filename += ".{}".format(fmt)
else: # use more readable naming convention
filename = ''
# fix this if plot_by is location , for example
use_names = {'location': [locations], 'site': [locations, site],
'sample': [locations, site, sample],
'specimen': [locations, site, sample, specimen]}
use = use_names[plot_key]
use.extend([crd, key])
# [locations, site, sample, specimen, crd, key]:
for item in use:
if item:
item = item.replace(' ', '_')
filename += item + '_'
if filename.endswith('_'):
filename = filename[:-1]
filename += ".{}".format(fmt)
if not pmagplotlib.isServer:
filename = os.path.join(dir_path, filename)
files[key] = filename
if pmagplotlib.isServer:
titles = {'eqarea': 'Equal Area Plot'}
FIG = pmagplotlib.add_borders(FIG, titles, con_id=con_id)
saved_figs = pmagplotlib.save_plots(FIG, files)
saved.extend(saved_figs)
elif save_plots:
saved_figs = pmagplotlib.save_plots(FIG, files, incl_directory=True)
saved.extend(saved_figs)
continue
elif interactive:
pmagplotlib.draw_figs(FIG)
ans = input(" S[a]ve to save plot, [q]uit, Return to continue: ")
if ans == "q":
return True, []
if ans == "a":
saved_figs = pmagplotlib.save_plots(FIG, files, incl_directory=True)
saved.extend(saved)
continue
return True, saved | makes equal area projections from declination/inclination data
Parameters
----------
in_file : str, default "sites.txt"
dir_path : str
output directory, default "."
input_dir_path : str
input file directory (if different from dir_path), default ""
spec_file : str
input specimen file name, default "specimens.txt"
samp_file: str
input sample file name, default "samples.txt"
site_file : str
input site file name, default "sites.txt"
loc_file : str
input location file name, default "locations.txt"
plot_by : str
[spc, sam, sit, loc, all] (specimen, sample, site, location, all), default "all"
crd : ['s','g','t'], coordinate system for plotting whereby:
s : specimen coordinates, aniso_tile_correction = -1
g : geographic coordinates, aniso_tile_correction = 0 (default)
t : tilt corrected coordinates, aniso_tile_correction = 100
ignore_tilt : bool
default False. If True, data are unoriented (allows plotting of measurement dec/inc)
save_plots : bool
plot and save non-interactively, default True
fmt : str
["png", "svg", "pdf", "jpg"], default "svg"
contour : bool
plot as color contour
colormap : str
color map for contour plotting, default "coolwarm"
see cartopy documentation for more options
plot_ell : str
[F,K,B,Be,Bv] plot Fisher, Kent, Bingham, Bootstrap ellipses or Boostrap eigenvectors
default "" plots none
n_plots : int
maximum number of plots to make, default 5
if you want to make all possible plots, specify "all"
interactive : bool, default False
interactively plot and display for each specimen
(this is best used on the command line or in the Python interpreter)
Returns
---------
type - Tuple : (True or False indicating if conversion was sucessful, file name(s) written) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L11384-L11861 |
PmagPy/PmagPy | pmagpy/ipmag.py | polemap_magic | def polemap_magic(loc_file="locations.txt", dir_path=".", interactive=False, crd="",
sym='ro', symsize=40, rsym='g^', rsymsize=40,
fmt="pdf", res="c", proj="ortho",
flip=False, anti=False, fancy=False,
ell=False, ages=False, lat_0=90., lon_0=0., save_plots=True):
"""
Use a MagIC format locations table to plot poles.
Parameters
----------
loc_file : str, default "locations.txt"
dir_path : str, default "."
directory name to find loc_file in (if not included in loc_file)
interactive : bool, default False
if True, interactively plot and display
(this is best used on the command line only)
crd : str, default ""
coordinate system [g, t] (geographic, tilt_corrected)
sym : str, default "ro"
symbol color and shape, default red circles
(see matplotlib documentation for more options)
symsize : int, default 40
symbol size
rsym : str, default "g^"
symbol for plotting reverse poles
rsymsize : int, default 40
symbol size for reverse poles
fmt : str, default "pdf"
format for figures, ["svg", "jpg", "pdf", "png"]
res : str, default "c"
resolution [c, l, i, h] (crude, low, intermediate, high)
proj : str, default "ortho"
ortho = orthographic
lcc = lambert conformal
moll = molweide
merc = mercator
flip : bool, default False
if True, flip reverse poles to normal antipode
anti : bool, default False
if True, plot antipodes for each pole
fancy : bool, default False
if True, plot topography (not yet implementedj)
ell : bool, default False
if True, plot ellipses
ages : bool, default False
if True, plot ages
lat_0 : float, default 90.
eyeball latitude
lon_0 : float, default 0.
eyeball longitude
save_plots : bool, default True
if True, create and save all requested plots
"""
# initialize and format variables
saved = []
lats, lons = [], []
Pars = []
dates, rlats, rlons = [], [], []
polarities = []
if interactive:
save_plots = False
full_path = pmag.resolve_file_name(loc_file, dir_path)
dir_path, loc_file = os.path.split(full_path)
# create MagIC contribution
con = cb.Contribution(dir_path, single_file=loc_file)
if not list(con.tables.keys()):
print("-W - Couldn't read in data")
return False, "Couldn't read in data"
FIG = {'map': 1}
pmagplotlib.plot_init(FIG['map'], 6, 6)
pole_container = con.tables['locations']
pole_df = pole_container.df
if 'pole_lat' not in pole_df.columns or 'pole_lon' not in pole_df.columns:
print("-W- pole_lat and pole_lon are required columns to run polemap_magic.py")
return False, "pole_lat and pole_lon are required columns to run polemap_magic.py"
# use records with pole_lat and pole_lon
cond1, cond2 = pole_df['pole_lat'].notnull(), pole_df['pole_lon'].notnull()
Results = pole_df[cond1 & cond2]
# don't plot identical poles twice
Results.drop_duplicates(subset=['pole_lat', 'pole_lon', 'location'], inplace=True)
# use tilt correction if available
# prioritize tilt-corrected poles
if 'dir_tilt_correction' in Results.columns:
if not crd:
coords = Results['dir_tilt_correction'].unique()
if 100. in coords:
crd = 't'
elif 0. in coords:
crd = 'g'
else:
crd = ''
coord_dict = {'g': 0, 't': 100}
coord = coord_dict[crd] if crd else ""
# filter results by dir_tilt_correction if available
if (coord or coord == 0) and 'dir_tilt_correction' in Results.columns:
Results = Results[Results['dir_tilt_correction'] == coord]
# get location name and average ages
loc_list = Results['location'].values
locations = ":".join(Results['location'].unique())
if 'age' not in Results.columns and 'age_low' in Results.columns and 'age_high' in Results.columns:
Results['age'] = Results['age_low']+0.5 * \
(Results['age_high']-Results['age_low'])
if 'age' in Results.columns and ages:
dates = Results['age'].unique()
if not any(Results.index):
print("-W- No poles could be plotted")
return False, "No poles could be plotted"
# go through rows and extract data
for ind, row in Results.iterrows():
lat, lon = float(row['pole_lat']), float(row['pole_lon'])
if 'dir_polarity' in row:
polarities.append(row['dir_polarity'])
if anti:
lats.append(-lat)
lon = lon + 180.
if lon > 360:
lon = lon - 360.
lons.append(lon)
elif not flip:
lats.append(lat)
lons.append(lon)
elif flip:
if lat < 0:
rlats.append(-lat)
lon = lon + 180.
if lon > 360:
lon = lon - 360
rlons.append(lon)
else:
lats.append(lat)
lons.append(lon)
ppars = []
ppars.append(lon)
ppars.append(lat)
ell1, ell2 = "", ""
if 'pole_dm' in list(row.keys()) and row['pole_dm']:
ell1 = float(row['pole_dm'])
if 'pole_dp' in list(row.keys()) and row['pole_dp']:
ell2 = float(row['pole_dp'])
if 'pole_alpha95' in list(row.keys()) and row['pole_alpha95']:
ell1, ell2 = float(row['pole_alpha95']), float(row['pole_alpha95'])
if ell1 and ell2 and lons:
ppars = []
ppars.append(lons[-1])
ppars.append(lats[-1])
ppars.append(ell1)
ppars.append(lons[-1])
try:
isign = abs(lats[-1]) / lats[-1]
except ZeroDivisionError:
isign = 1
ppars.append(lats[-1] - isign * 90.)
ppars.append(ell2)
ppars.append(lons[-1] + 90.)
ppars.append(0.)
Pars.append(ppars)
locations = locations.strip(':')
Opts = {'latmin': -90, 'latmax': 90, 'lonmin': 0., 'lonmax': 360.,
'lat_0': lat_0, 'lon_0': lon_0, 'proj': proj, 'sym': 'b+',
'symsize': 40, 'pltgrid': 0, 'res': res, 'boundinglat': 0.,
'edgecolor': 'face'}
Opts['details'] = {'coasts': 1, 'rivers': 0, 'states': 0,
'countries': 0, 'ocean': 1, 'fancy': fancy}
base_Opts = Opts.copy()
# make the base map with a blue triangle at the pole
pmagplotlib.plot_map(FIG['map'], [90.], [0.], Opts)
#Opts['pltgrid'] = -1
if proj=='merc':Opts['pltgrid']=1
Opts['sym'] = sym
Opts['symsize'] = symsize
if len(dates) > 0:
Opts['names'] = dates
if len(lats) > 0:
pole_lats = []
pole_lons = []
for num, lat in enumerate(lats):
lon = lons[num]
if lat > 0:
pole_lats.append(lat)
pole_lons.append(lon)
# plot the lats and lons of the poles
pmagplotlib.plot_map(FIG['map'], pole_lats, pole_lons, Opts)
# do reverse poles
if len(rlats) > 0:
reverse_Opts = Opts.copy()
reverse_Opts['sym'] = rsym
reverse_Opts['symsize'] = rsymsize
reverse_Opts['edgecolor'] = 'black'
# plot the lats and lons of the reverse poles
pmagplotlib.plot_map(FIG['map'], rlats, rlons, reverse_Opts)
Opts['names'] = []
titles = {}
files = {}
if pmagplotlib.isServer:
# plot each indvidual pole for the server
for ind in range(len(lats)):
lat = lats[ind]
lon = lons[ind]
polarity = ""
if 'polarites' in locals():
polarity = polarities[ind]
polarity = "_" + polarity if polarity else ""
location = loc_list[ind]
FIG["map_{}".format(ind)] = ind+2
pmagplotlib.plot_init(FIG['map_{}'.format(ind)], 6, 6)
pmagplotlib.plot_map(FIG['map_{}'.format(ind)], [90.], [0.], base_Opts)
pmagplotlib.plot_map(ind+2, [lat], [lon], Opts)
titles["map_{}".format(ind)] = location
if crd:
fname = "LO:_{}{}_TY:_POLE_map_{}.{}".format(location, polarity, crd, fmt)
fname_short = "LO:_{}{}_TY:_POLE_map_{}".format(location, polarity, crd)
else:
fname = "LO:_{}{}_TY:_POLE_map.{}".format(location, polarity, fmt)
fname_short = "LO:_{}{}_TY:_POLE_map".format(location, polarity)
# don't allow identically named files
if files:
file_values = files.values()
file_values_short = [fname.rsplit('.')[0] for fname in file_values]
if fname_short in file_values_short:
for val in [str(n) for n in range(1, 10)]:
fname = fname_short + "_{}.".format(val) + fmt
if fname not in file_values:
break
files["map_{}".format(ind)] = fname
# truncate location names so that ultra long filenames are not created
if len(locations) > 50:
locations = locations[:50]
if pmagplotlib.isServer:
# use server plot naming convention
con_id = ''
if 'contribution' in con.tables:
# try to get contribution id
if 'id' in con.tables['contribution'].df.columns:
con_id = con.tables['contribution'].df.iloc[0]['id']
files['map'] = 'MC:_{}_TY:_POLE_map_{}.{}'.format(con_id, crd, fmt)
else:
# no contribution id available
files['map'] = 'LO:_' + locations + '_TY:_POLE_map_{}.{}'.format(crd, fmt)
else:
# use readable naming convention for non-database use
files['map'] = '{}_POLE_map_{}.{}'.format(locations, crd, fmt)
#
if interactive and (not set_env.IS_WIN):
pmagplotlib.draw_figs(FIG)
if ell: # add ellipses if desired.
Opts['details'] = {'coasts': 0, 'rivers': 0, 'states': 0,
'countries': 0, 'ocean': 0, 'fancy': fancy}
Opts['pltgrid'] = -1 # turn off meridian replotting
Opts['symsize'] = 2
Opts['sym'] = 'g-'
for ppars in Pars:
if ppars[2] != 0:
PTS = pmagplotlib.plot_ell(FIG['map'], ppars, 'g.', 0, 0)
elats, elons = [], []
for pt in PTS:
elons.append(pt[0])
elats.append(pt[1])
# make the base map with a blue triangle at the pole
pmagplotlib.plot_map(FIG['map'], elats, elons, Opts)
if interactive and (not set_env.IS_WIN):
pmagplotlib.draw_figs(FIG)
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles['map'] = 'LO:_' + locations + '_POLE_map'
con_id = ''
if 'contribution' in con.tables:
if 'id' in con.tables['contribution'].df.columns:
con_id = con.tables['contribution'].df.iloc[0]['id']
loc_string = ""
if 'locations' in con.tables:
num_locs = len(con.tables['locations'].df.index.unique())
loc_string = "{} location{}".format(num_locs, 's' if num_locs > 1 else '')
num_lats = len([lat for lat in lats if lat > 0])
num_rlats = len(rlats)
npole_string = ""
rpole_string = ""
if num_lats:
npole_string = "{} normal ".format(num_lats) #, 's' if num_lats > 1 else '')
if num_rlats:
rpole_string = "{} reverse".format(num_rlats)
if num_lats + num_rlats > 1:
pole_string = "poles"
elif num_lats + num_rlats == 0:
pole_string = ""
else:
pole_string = "pole"
title = "MagIC contribution {}\n {} {}{} {}".format(con_id, loc_string, npole_string, rpole_string, pole_string)
titles['map'] = title.replace(' ', ' ')
FIG = pmagplotlib.add_borders(FIG, titles, black, purple, con_id)
saved = pmagplotlib.save_plots(FIG, files)
elif interactive:
pmagplotlib.draw_figs(FIG)
ans = input(" S[a]ve to save plot, Return to quit: ")
if ans == "a":
saved = pmagplotlib.save_plots(FIG, files)
else:
print("Good bye")
elif save_plots:
saved = pmagplotlib.save_plots(FIG, files)
return True, saved | python | def polemap_magic(loc_file="locations.txt", dir_path=".", interactive=False, crd="",
sym='ro', symsize=40, rsym='g^', rsymsize=40,
fmt="pdf", res="c", proj="ortho",
flip=False, anti=False, fancy=False,
ell=False, ages=False, lat_0=90., lon_0=0., save_plots=True):
"""
Use a MagIC format locations table to plot poles.
Parameters
----------
loc_file : str, default "locations.txt"
dir_path : str, default "."
directory name to find loc_file in (if not included in loc_file)
interactive : bool, default False
if True, interactively plot and display
(this is best used on the command line only)
crd : str, default ""
coordinate system [g, t] (geographic, tilt_corrected)
sym : str, default "ro"
symbol color and shape, default red circles
(see matplotlib documentation for more options)
symsize : int, default 40
symbol size
rsym : str, default "g^"
symbol for plotting reverse poles
rsymsize : int, default 40
symbol size for reverse poles
fmt : str, default "pdf"
format for figures, ["svg", "jpg", "pdf", "png"]
res : str, default "c"
resolution [c, l, i, h] (crude, low, intermediate, high)
proj : str, default "ortho"
ortho = orthographic
lcc = lambert conformal
moll = molweide
merc = mercator
flip : bool, default False
if True, flip reverse poles to normal antipode
anti : bool, default False
if True, plot antipodes for each pole
fancy : bool, default False
if True, plot topography (not yet implementedj)
ell : bool, default False
if True, plot ellipses
ages : bool, default False
if True, plot ages
lat_0 : float, default 90.
eyeball latitude
lon_0 : float, default 0.
eyeball longitude
save_plots : bool, default True
if True, create and save all requested plots
"""
# initialize and format variables
saved = []
lats, lons = [], []
Pars = []
dates, rlats, rlons = [], [], []
polarities = []
if interactive:
save_plots = False
full_path = pmag.resolve_file_name(loc_file, dir_path)
dir_path, loc_file = os.path.split(full_path)
# create MagIC contribution
con = cb.Contribution(dir_path, single_file=loc_file)
if not list(con.tables.keys()):
print("-W - Couldn't read in data")
return False, "Couldn't read in data"
FIG = {'map': 1}
pmagplotlib.plot_init(FIG['map'], 6, 6)
pole_container = con.tables['locations']
pole_df = pole_container.df
if 'pole_lat' not in pole_df.columns or 'pole_lon' not in pole_df.columns:
print("-W- pole_lat and pole_lon are required columns to run polemap_magic.py")
return False, "pole_lat and pole_lon are required columns to run polemap_magic.py"
# use records with pole_lat and pole_lon
cond1, cond2 = pole_df['pole_lat'].notnull(), pole_df['pole_lon'].notnull()
Results = pole_df[cond1 & cond2]
# don't plot identical poles twice
Results.drop_duplicates(subset=['pole_lat', 'pole_lon', 'location'], inplace=True)
# use tilt correction if available
# prioritize tilt-corrected poles
if 'dir_tilt_correction' in Results.columns:
if not crd:
coords = Results['dir_tilt_correction'].unique()
if 100. in coords:
crd = 't'
elif 0. in coords:
crd = 'g'
else:
crd = ''
coord_dict = {'g': 0, 't': 100}
coord = coord_dict[crd] if crd else ""
# filter results by dir_tilt_correction if available
if (coord or coord == 0) and 'dir_tilt_correction' in Results.columns:
Results = Results[Results['dir_tilt_correction'] == coord]
# get location name and average ages
loc_list = Results['location'].values
locations = ":".join(Results['location'].unique())
if 'age' not in Results.columns and 'age_low' in Results.columns and 'age_high' in Results.columns:
Results['age'] = Results['age_low']+0.5 * \
(Results['age_high']-Results['age_low'])
if 'age' in Results.columns and ages:
dates = Results['age'].unique()
if not any(Results.index):
print("-W- No poles could be plotted")
return False, "No poles could be plotted"
# go through rows and extract data
for ind, row in Results.iterrows():
lat, lon = float(row['pole_lat']), float(row['pole_lon'])
if 'dir_polarity' in row:
polarities.append(row['dir_polarity'])
if anti:
lats.append(-lat)
lon = lon + 180.
if lon > 360:
lon = lon - 360.
lons.append(lon)
elif not flip:
lats.append(lat)
lons.append(lon)
elif flip:
if lat < 0:
rlats.append(-lat)
lon = lon + 180.
if lon > 360:
lon = lon - 360
rlons.append(lon)
else:
lats.append(lat)
lons.append(lon)
ppars = []
ppars.append(lon)
ppars.append(lat)
ell1, ell2 = "", ""
if 'pole_dm' in list(row.keys()) and row['pole_dm']:
ell1 = float(row['pole_dm'])
if 'pole_dp' in list(row.keys()) and row['pole_dp']:
ell2 = float(row['pole_dp'])
if 'pole_alpha95' in list(row.keys()) and row['pole_alpha95']:
ell1, ell2 = float(row['pole_alpha95']), float(row['pole_alpha95'])
if ell1 and ell2 and lons:
ppars = []
ppars.append(lons[-1])
ppars.append(lats[-1])
ppars.append(ell1)
ppars.append(lons[-1])
try:
isign = abs(lats[-1]) / lats[-1]
except ZeroDivisionError:
isign = 1
ppars.append(lats[-1] - isign * 90.)
ppars.append(ell2)
ppars.append(lons[-1] + 90.)
ppars.append(0.)
Pars.append(ppars)
locations = locations.strip(':')
Opts = {'latmin': -90, 'latmax': 90, 'lonmin': 0., 'lonmax': 360.,
'lat_0': lat_0, 'lon_0': lon_0, 'proj': proj, 'sym': 'b+',
'symsize': 40, 'pltgrid': 0, 'res': res, 'boundinglat': 0.,
'edgecolor': 'face'}
Opts['details'] = {'coasts': 1, 'rivers': 0, 'states': 0,
'countries': 0, 'ocean': 1, 'fancy': fancy}
base_Opts = Opts.copy()
# make the base map with a blue triangle at the pole
pmagplotlib.plot_map(FIG['map'], [90.], [0.], Opts)
#Opts['pltgrid'] = -1
if proj=='merc':Opts['pltgrid']=1
Opts['sym'] = sym
Opts['symsize'] = symsize
if len(dates) > 0:
Opts['names'] = dates
if len(lats) > 0:
pole_lats = []
pole_lons = []
for num, lat in enumerate(lats):
lon = lons[num]
if lat > 0:
pole_lats.append(lat)
pole_lons.append(lon)
# plot the lats and lons of the poles
pmagplotlib.plot_map(FIG['map'], pole_lats, pole_lons, Opts)
# do reverse poles
if len(rlats) > 0:
reverse_Opts = Opts.copy()
reverse_Opts['sym'] = rsym
reverse_Opts['symsize'] = rsymsize
reverse_Opts['edgecolor'] = 'black'
# plot the lats and lons of the reverse poles
pmagplotlib.plot_map(FIG['map'], rlats, rlons, reverse_Opts)
Opts['names'] = []
titles = {}
files = {}
if pmagplotlib.isServer:
# plot each indvidual pole for the server
for ind in range(len(lats)):
lat = lats[ind]
lon = lons[ind]
polarity = ""
if 'polarites' in locals():
polarity = polarities[ind]
polarity = "_" + polarity if polarity else ""
location = loc_list[ind]
FIG["map_{}".format(ind)] = ind+2
pmagplotlib.plot_init(FIG['map_{}'.format(ind)], 6, 6)
pmagplotlib.plot_map(FIG['map_{}'.format(ind)], [90.], [0.], base_Opts)
pmagplotlib.plot_map(ind+2, [lat], [lon], Opts)
titles["map_{}".format(ind)] = location
if crd:
fname = "LO:_{}{}_TY:_POLE_map_{}.{}".format(location, polarity, crd, fmt)
fname_short = "LO:_{}{}_TY:_POLE_map_{}".format(location, polarity, crd)
else:
fname = "LO:_{}{}_TY:_POLE_map.{}".format(location, polarity, fmt)
fname_short = "LO:_{}{}_TY:_POLE_map".format(location, polarity)
# don't allow identically named files
if files:
file_values = files.values()
file_values_short = [fname.rsplit('.')[0] for fname in file_values]
if fname_short in file_values_short:
for val in [str(n) for n in range(1, 10)]:
fname = fname_short + "_{}.".format(val) + fmt
if fname not in file_values:
break
files["map_{}".format(ind)] = fname
# truncate location names so that ultra long filenames are not created
if len(locations) > 50:
locations = locations[:50]
if pmagplotlib.isServer:
# use server plot naming convention
con_id = ''
if 'contribution' in con.tables:
# try to get contribution id
if 'id' in con.tables['contribution'].df.columns:
con_id = con.tables['contribution'].df.iloc[0]['id']
files['map'] = 'MC:_{}_TY:_POLE_map_{}.{}'.format(con_id, crd, fmt)
else:
# no contribution id available
files['map'] = 'LO:_' + locations + '_TY:_POLE_map_{}.{}'.format(crd, fmt)
else:
# use readable naming convention for non-database use
files['map'] = '{}_POLE_map_{}.{}'.format(locations, crd, fmt)
#
if interactive and (not set_env.IS_WIN):
pmagplotlib.draw_figs(FIG)
if ell: # add ellipses if desired.
Opts['details'] = {'coasts': 0, 'rivers': 0, 'states': 0,
'countries': 0, 'ocean': 0, 'fancy': fancy}
Opts['pltgrid'] = -1 # turn off meridian replotting
Opts['symsize'] = 2
Opts['sym'] = 'g-'
for ppars in Pars:
if ppars[2] != 0:
PTS = pmagplotlib.plot_ell(FIG['map'], ppars, 'g.', 0, 0)
elats, elons = [], []
for pt in PTS:
elons.append(pt[0])
elats.append(pt[1])
# make the base map with a blue triangle at the pole
pmagplotlib.plot_map(FIG['map'], elats, elons, Opts)
if interactive and (not set_env.IS_WIN):
pmagplotlib.draw_figs(FIG)
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles['map'] = 'LO:_' + locations + '_POLE_map'
con_id = ''
if 'contribution' in con.tables:
if 'id' in con.tables['contribution'].df.columns:
con_id = con.tables['contribution'].df.iloc[0]['id']
loc_string = ""
if 'locations' in con.tables:
num_locs = len(con.tables['locations'].df.index.unique())
loc_string = "{} location{}".format(num_locs, 's' if num_locs > 1 else '')
num_lats = len([lat for lat in lats if lat > 0])
num_rlats = len(rlats)
npole_string = ""
rpole_string = ""
if num_lats:
npole_string = "{} normal ".format(num_lats) #, 's' if num_lats > 1 else '')
if num_rlats:
rpole_string = "{} reverse".format(num_rlats)
if num_lats + num_rlats > 1:
pole_string = "poles"
elif num_lats + num_rlats == 0:
pole_string = ""
else:
pole_string = "pole"
title = "MagIC contribution {}\n {} {}{} {}".format(con_id, loc_string, npole_string, rpole_string, pole_string)
titles['map'] = title.replace(' ', ' ')
FIG = pmagplotlib.add_borders(FIG, titles, black, purple, con_id)
saved = pmagplotlib.save_plots(FIG, files)
elif interactive:
pmagplotlib.draw_figs(FIG)
ans = input(" S[a]ve to save plot, Return to quit: ")
if ans == "a":
saved = pmagplotlib.save_plots(FIG, files)
else:
print("Good bye")
elif save_plots:
saved = pmagplotlib.save_plots(FIG, files)
return True, saved | Use a MagIC format locations table to plot poles.
Parameters
----------
loc_file : str, default "locations.txt"
dir_path : str, default "."
directory name to find loc_file in (if not included in loc_file)
interactive : bool, default False
if True, interactively plot and display
(this is best used on the command line only)
crd : str, default ""
coordinate system [g, t] (geographic, tilt_corrected)
sym : str, default "ro"
symbol color and shape, default red circles
(see matplotlib documentation for more options)
symsize : int, default 40
symbol size
rsym : str, default "g^"
symbol for plotting reverse poles
rsymsize : int, default 40
symbol size for reverse poles
fmt : str, default "pdf"
format for figures, ["svg", "jpg", "pdf", "png"]
res : str, default "c"
resolution [c, l, i, h] (crude, low, intermediate, high)
proj : str, default "ortho"
ortho = orthographic
lcc = lambert conformal
moll = molweide
merc = mercator
flip : bool, default False
if True, flip reverse poles to normal antipode
anti : bool, default False
if True, plot antipodes for each pole
fancy : bool, default False
if True, plot topography (not yet implementedj)
ell : bool, default False
if True, plot ellipses
ages : bool, default False
if True, plot ages
lat_0 : float, default 90.
eyeball latitude
lon_0 : float, default 0.
eyeball longitude
save_plots : bool, default True
if True, create and save all requested plots | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L11864-L12180 |
PmagPy/PmagPy | pmagpy/ipmag.py | chi_magic | def chi_magic(infile="measurements.txt", dir_path=".", experiments="",
fmt="svg", save_plots=True, interactive=False, contribution=None):
"""
Parameters
----------
infile : str, default "measurements.txt"
measurement infile
dir_path : str, default "."
input directory
experiments : str, default ""
experiment name to plot
fmt : str, default "svg"
format for figures, ["svg", "jpg", "pdf", "png"]
save_plots : bool, default True
save figures
interactive : bool, default False
if True, interactively plot and display
(this is best used on the command line only)
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
Returns
---------
(status, output_files) - Tuple : (True or False indicating if conversion was sucessful, file name(s) written)
"""
saved = []
if contribution:
chi_data_all = contribution.tables['measurements'].df
else:
infile = pmag.resolve_file_name(infile, dir_path)
chi_data_all = pd.read_csv(infile, sep='\t', header=1)
if not experiments:
try:
experiments = chi_data_all.experiment.unique()
except Exception as ex:
print(ex)
experiments = ["all"]
else:
experiments = [experiments]
plotnum = 0
figs = {}
fnames = {}
for exp in experiments:
if exp == "all":
chi_data = chi_data_all
chi_data = chi_data_all[chi_data_all.experiment == exp]
if len(chi_data) <= 1:
print('Not enough data to plot {}'.format(exp))
continue
plotnum += 1
if not save_plots:
pmagplotlib.plot_init(plotnum, 5, 5) # set up plot
else:
plt.figure(plotnum)
figs[str(plotnum)] = plotnum
fnames[str(plotnum)] = exp + '_temperature.{}'.format(fmt)
# get arrays of available temps, frequencies and fields
Ts = np.sort(chi_data.meas_temp.unique())
Fs = np.sort(chi_data.meas_freq.unique())
Bs = np.sort(chi_data.meas_field_ac.unique())
# plot chi versus temperature at constant field
b = Bs.max()
for num, f in enumerate(Fs):
this_f = chi_data[chi_data.meas_freq == f]
this_f = this_f[this_f.meas_field_ac == b]
plt.plot(this_f.meas_temp, 1e6*this_f.susc_chi_volume,
label='%i' % (f)+' Hz')
plt.legend()
plt.xlabel('Temperature (K)')
plt.ylabel('$\chi$ ($\mu$SI)')
plt.title('B = '+'%7.2e' % (b) + ' T')
plotnum += 1
figs[str(plotnum)] = plotnum
fnames[str(plotnum)] = exp + '_frequency.{}'.format(fmt)
if not save_plots:
pmagplotlib.plot_init(plotnum, 5, 5) # set up plot
else:
plt.figure(plotnum)
## plot chi versus frequency at constant B
b = Bs.max()
t = Ts.min()
this_t = chi_data[chi_data.meas_temp == t]
this_t = this_t[this_t.meas_field_ac == b]
plt.semilogx(this_t.meas_freq, 1e6 *
this_t.susc_chi_volume, label='%i' % (t)+' K')
plt.legend()
plt.xlabel('Frequency (Hz)')
plt.ylabel('$\chi$ ($\mu$SI)')
plt.title('B = '+'%7.2e' % (b) + ' T')
if interactive:
pmagplotlib.draw_figs(figs)
ans = input(
"enter s[a]ve to save files, [return] to quit ")
if ans == 'a':
saved.extend(pmagplotlib.save_plots(figs, fnames))
else:
return True, []
elif save_plots:
saved.extend(pmagplotlib.save_plots(figs, fnames))
return True, saved | python | def chi_magic(infile="measurements.txt", dir_path=".", experiments="",
fmt="svg", save_plots=True, interactive=False, contribution=None):
"""
Parameters
----------
infile : str, default "measurements.txt"
measurement infile
dir_path : str, default "."
input directory
experiments : str, default ""
experiment name to plot
fmt : str, default "svg"
format for figures, ["svg", "jpg", "pdf", "png"]
save_plots : bool, default True
save figures
interactive : bool, default False
if True, interactively plot and display
(this is best used on the command line only)
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
Returns
---------
(status, output_files) - Tuple : (True or False indicating if conversion was sucessful, file name(s) written)
"""
saved = []
if contribution:
chi_data_all = contribution.tables['measurements'].df
else:
infile = pmag.resolve_file_name(infile, dir_path)
chi_data_all = pd.read_csv(infile, sep='\t', header=1)
if not experiments:
try:
experiments = chi_data_all.experiment.unique()
except Exception as ex:
print(ex)
experiments = ["all"]
else:
experiments = [experiments]
plotnum = 0
figs = {}
fnames = {}
for exp in experiments:
if exp == "all":
chi_data = chi_data_all
chi_data = chi_data_all[chi_data_all.experiment == exp]
if len(chi_data) <= 1:
print('Not enough data to plot {}'.format(exp))
continue
plotnum += 1
if not save_plots:
pmagplotlib.plot_init(plotnum, 5, 5) # set up plot
else:
plt.figure(plotnum)
figs[str(plotnum)] = plotnum
fnames[str(plotnum)] = exp + '_temperature.{}'.format(fmt)
# get arrays of available temps, frequencies and fields
Ts = np.sort(chi_data.meas_temp.unique())
Fs = np.sort(chi_data.meas_freq.unique())
Bs = np.sort(chi_data.meas_field_ac.unique())
# plot chi versus temperature at constant field
b = Bs.max()
for num, f in enumerate(Fs):
this_f = chi_data[chi_data.meas_freq == f]
this_f = this_f[this_f.meas_field_ac == b]
plt.plot(this_f.meas_temp, 1e6*this_f.susc_chi_volume,
label='%i' % (f)+' Hz')
plt.legend()
plt.xlabel('Temperature (K)')
plt.ylabel('$\chi$ ($\mu$SI)')
plt.title('B = '+'%7.2e' % (b) + ' T')
plotnum += 1
figs[str(plotnum)] = plotnum
fnames[str(plotnum)] = exp + '_frequency.{}'.format(fmt)
if not save_plots:
pmagplotlib.plot_init(plotnum, 5, 5) # set up plot
else:
plt.figure(plotnum)
## plot chi versus frequency at constant B
b = Bs.max()
t = Ts.min()
this_t = chi_data[chi_data.meas_temp == t]
this_t = this_t[this_t.meas_field_ac == b]
plt.semilogx(this_t.meas_freq, 1e6 *
this_t.susc_chi_volume, label='%i' % (t)+' K')
plt.legend()
plt.xlabel('Frequency (Hz)')
plt.ylabel('$\chi$ ($\mu$SI)')
plt.title('B = '+'%7.2e' % (b) + ' T')
if interactive:
pmagplotlib.draw_figs(figs)
ans = input(
"enter s[a]ve to save files, [return] to quit ")
if ans == 'a':
saved.extend(pmagplotlib.save_plots(figs, fnames))
else:
return True, []
elif save_plots:
saved.extend(pmagplotlib.save_plots(figs, fnames))
return True, saved | Parameters
----------
infile : str, default "measurements.txt"
measurement infile
dir_path : str, default "."
input directory
experiments : str, default ""
experiment name to plot
fmt : str, default "svg"
format for figures, ["svg", "jpg", "pdf", "png"]
save_plots : bool, default True
save figures
interactive : bool, default False
if True, interactively plot and display
(this is best used on the command line only)
contribution : cb.Contribution, default None
if provided, use Contribution object instead of reading in
data from files
Returns
---------
(status, output_files) - Tuple : (True or False indicating if conversion was sucessful, file name(s) written) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L12183-L12291 |
PmagPy/PmagPy | pmagpy/ipmag.py | quick_hyst | def quick_hyst(dir_path=".", meas_file="measurements.txt", save_plots=True,
interactive=False, fmt="png", specimen="", verbose=True, n_plots=10,
contribution=None):
"""
makes specimen plots of hysteresis data
Parameters
----------
dir_path : str, default "."
input directory
meas_file : str, default "measurements.txt"
name of MagIC measurement file
save_plots : bool, default True
save figures
interactive : bool, default False
if True, interactively plot and display
(this is best used on the command line only)
fmt : str, default "svg"
format for figures, ["svg", "jpg", "pdf", "png"]
specimen : str, default ""
specific specimen to plot
verbose : bool, default True
if True, print more verbose output
Returns
---------
Tuple : (True or False indicating if conversion was sucessful, output file name(s) written)
"""
if contribution is None:
con = cb.Contribution(dir_path, read_tables=['measurements'],
custom_filenames={'measurements': meas_file})
else:
con = contribution
# get as much name data as possible (used for naming plots)
if 'measurements' not in con.tables:
print("-W- No measurement file found")
return False, []
con.propagate_location_to_measurements()
if 'measurements' not in con.tables:
print(main.__doc__)
print('bad file')
return False, []
meas_container = con.tables['measurements']
#meas_df = meas_container.df
#
# initialize some variables
# define figure numbers for hyst,deltaM,DdeltaM curves
saved = []
HystRecs = []
HDD = {}
HDD['hyst'] = 1
pmagplotlib.plot_init(HDD['hyst'], 5, 5)
#
# get list of unique experiment names and specimen names
#
sids = []
hyst_data = meas_container.get_records_for_code('LP-HYS')
#experiment_names = hyst_data['experiment_name'].unique()
if not len(hyst_data):
print("-W- No hysteresis data found")
return False, []
if 'specimen' not in hyst_data.columns:
print('-W- No specimen names in measurements data, cannot complete quick_hyst.py')
return False, []
sids = hyst_data['specimen'].unique()
# if 'treat_temp' is provided, use that value, otherwise assume 300
hyst_data['treat_temp'].where(
hyst_data['treat_temp'].notnull(), '300', inplace=True)
# start at first specimen, or at provided specimen ('-spc')
k = 0
if specimen:
try:
print(sids)
k = list(sids).index(specimen)
except ValueError:
print('-W- No specimen named: {}.'.format(specimen))
print('-W- Please provide a valid specimen name')
return False, []
intlist = ['magn_moment', 'magn_volume', 'magn_mass']
if len(sids) > n_plots:
sids = sids[:n_plots]
while k < len(sids):
locname, site, sample, synth = '', '', '', ''
s = sids[k]
if verbose:
print(s, k + 1, 'out of ', len(sids))
# B, M for hysteresis, Bdcd,Mdcd for irm-dcd data
B, M = [], []
# get all measurements for this specimen
spec = hyst_data[hyst_data['specimen'] == s]
# get names
if 'location' in spec:
locname = spec['location'].iloc[0]
if 'site' in spec:
site = spec['sample'].iloc[0]
if 'sample' in spec:
sample = spec['sample'].iloc[0]
# get all records with non-blank values in any intlist column
# find intensity data
for int_column in intlist:
if int_column in spec.columns:
int_col = int_column
break
meas_data = spec[spec[int_column].notnull()]
if len(meas_data) == 0:
break
#
c = ['k-', 'b-', 'c-', 'g-', 'm-', 'r-', 'y-']
cnum = 0
Temps = []
xlab, ylab, title = '', '', ''
Temps = meas_data['treat_temp'].unique()
for t in Temps:
print('working on t: ', t)
t_data = meas_data[meas_data['treat_temp'] == t]
m = int_col
B = t_data['meas_field_dc'].astype(float).values
M = t_data[m].astype(float).values
# now plot the hysteresis curve(s)
#
if len(B) > 0:
B = np.array(B)
M = np.array(M)
if t == Temps[-1]:
xlab = 'Field (T)'
ylab = m
title = 'Hysteresis: ' + s
if t == Temps[0]:
pmagplotlib.clearFIG(HDD['hyst'])
pmagplotlib.plot_xy(
HDD['hyst'], B, M, sym=c[cnum], xlab=xlab, ylab=ylab, title=title)
pmagplotlib.plot_xy(HDD['hyst'], [
1.1*B.min(), 1.1*B.max()], [0, 0], sym='k-', xlab=xlab, ylab=ylab, title=title)
pmagplotlib.plot_xy(HDD['hyst'], [0, 0], [
1.1*M.min(), 1.1*M.max()], sym='k-', xlab=xlab, ylab=ylab, title=title)
if not save_plots and not set_env.IS_WIN:
pmagplotlib.draw_figs(HDD)
cnum += 1
if cnum == len(c):
cnum = 0
#
files = {}
if save_plots:
if specimen != "":
s = specimen
for key in list(HDD.keys()):
if pmagplotlib.isServer:
if synth == '':
files[key] = "LO:_"+locname+'_SI:_'+site + \
'_SA:_'+sample+'_SP:_'+s+'_TY:_'+key+'_.'+fmt
else:
files[key] = 'SY:_'+synth+'_TY:_'+key+'_.'+fmt
else:
if synth == '':
filename = ''
for item in [locname, site, sample, s, key]:
if item:
item = item.replace(' ', '_')
filename += item + '_'
if filename.endswith('_'):
filename = filename[:-1]
filename += ".{}".format(fmt)
files[key] = filename
else:
files[key] = "{}_{}.{}".format(synth, key, fmt)
pmagplotlib.save_plots(HDD, files)
saved.extend([value for value in files.values()])
if specimen:
return True, saved
if interactive:
pmagplotlib.draw_figs(HDD)
ans = input(
"S[a]ve plots, [s]pecimen name, [q]uit, <return> to continue\n ")
if ans == "a":
files = {}
for key in list(HDD.keys()):
if pmagplotlib.isServer: # use server plot naming convention
locname = locname if locname else ""
site = site if site else ""
sample = sample if sample else ""
files[key] = "LO:_"+locname+'_SI:_'+site + \
'_SA:_'+sample+'_SP:_'+s+'_TY:_'+key+'_.'+fmt
else: # use more readable plot naming convention
filename = ''
for item in [locname, site, sample, s, key]:
if item:
item = item.replace(' ', '_')
filename += item + '_'
if filename.endswith('_'):
filename = filename[:-1]
filename += ".{}".format(fmt)
files[key] = filename
pmagplotlib.save_plots(HDD, files)
saved.extend([value for value in files.values()])
if ans == '':
k += 1
if ans == "p":
del HystRecs[-1]
k -= 1
if ans == 'q':
print("Good bye")
return True, []
if ans == 's':
keepon = 1
specimen = input(
'Enter desired specimen name (or first part there of): ')
while keepon == 1:
try:
k = list(sids).index(specimen)
keepon = 0
except ValueError:
tmplist = []
for qq in range(len(sids)):
if specimen in sids[qq]:
tmplist.append(sids[qq])
print(specimen, " not found, but this was: ")
print(tmplist)
specimen = input('Select one or try again\n ')
k = list(sids).index(specimen)
else:
k += 1
if not len(B):
if verbose:
print('skipping this one - no hysteresis data')
k += 1
return True, saved | python | def quick_hyst(dir_path=".", meas_file="measurements.txt", save_plots=True,
interactive=False, fmt="png", specimen="", verbose=True, n_plots=10,
contribution=None):
"""
makes specimen plots of hysteresis data
Parameters
----------
dir_path : str, default "."
input directory
meas_file : str, default "measurements.txt"
name of MagIC measurement file
save_plots : bool, default True
save figures
interactive : bool, default False
if True, interactively plot and display
(this is best used on the command line only)
fmt : str, default "svg"
format for figures, ["svg", "jpg", "pdf", "png"]
specimen : str, default ""
specific specimen to plot
verbose : bool, default True
if True, print more verbose output
Returns
---------
Tuple : (True or False indicating if conversion was sucessful, output file name(s) written)
"""
if contribution is None:
con = cb.Contribution(dir_path, read_tables=['measurements'],
custom_filenames={'measurements': meas_file})
else:
con = contribution
# get as much name data as possible (used for naming plots)
if 'measurements' not in con.tables:
print("-W- No measurement file found")
return False, []
con.propagate_location_to_measurements()
if 'measurements' not in con.tables:
print(main.__doc__)
print('bad file')
return False, []
meas_container = con.tables['measurements']
#meas_df = meas_container.df
#
# initialize some variables
# define figure numbers for hyst,deltaM,DdeltaM curves
saved = []
HystRecs = []
HDD = {}
HDD['hyst'] = 1
pmagplotlib.plot_init(HDD['hyst'], 5, 5)
#
# get list of unique experiment names and specimen names
#
sids = []
hyst_data = meas_container.get_records_for_code('LP-HYS')
#experiment_names = hyst_data['experiment_name'].unique()
if not len(hyst_data):
print("-W- No hysteresis data found")
return False, []
if 'specimen' not in hyst_data.columns:
print('-W- No specimen names in measurements data, cannot complete quick_hyst.py')
return False, []
sids = hyst_data['specimen'].unique()
# if 'treat_temp' is provided, use that value, otherwise assume 300
hyst_data['treat_temp'].where(
hyst_data['treat_temp'].notnull(), '300', inplace=True)
# start at first specimen, or at provided specimen ('-spc')
k = 0
if specimen:
try:
print(sids)
k = list(sids).index(specimen)
except ValueError:
print('-W- No specimen named: {}.'.format(specimen))
print('-W- Please provide a valid specimen name')
return False, []
intlist = ['magn_moment', 'magn_volume', 'magn_mass']
if len(sids) > n_plots:
sids = sids[:n_plots]
while k < len(sids):
locname, site, sample, synth = '', '', '', ''
s = sids[k]
if verbose:
print(s, k + 1, 'out of ', len(sids))
# B, M for hysteresis, Bdcd,Mdcd for irm-dcd data
B, M = [], []
# get all measurements for this specimen
spec = hyst_data[hyst_data['specimen'] == s]
# get names
if 'location' in spec:
locname = spec['location'].iloc[0]
if 'site' in spec:
site = spec['sample'].iloc[0]
if 'sample' in spec:
sample = spec['sample'].iloc[0]
# get all records with non-blank values in any intlist column
# find intensity data
for int_column in intlist:
if int_column in spec.columns:
int_col = int_column
break
meas_data = spec[spec[int_column].notnull()]
if len(meas_data) == 0:
break
#
c = ['k-', 'b-', 'c-', 'g-', 'm-', 'r-', 'y-']
cnum = 0
Temps = []
xlab, ylab, title = '', '', ''
Temps = meas_data['treat_temp'].unique()
for t in Temps:
print('working on t: ', t)
t_data = meas_data[meas_data['treat_temp'] == t]
m = int_col
B = t_data['meas_field_dc'].astype(float).values
M = t_data[m].astype(float).values
# now plot the hysteresis curve(s)
#
if len(B) > 0:
B = np.array(B)
M = np.array(M)
if t == Temps[-1]:
xlab = 'Field (T)'
ylab = m
title = 'Hysteresis: ' + s
if t == Temps[0]:
pmagplotlib.clearFIG(HDD['hyst'])
pmagplotlib.plot_xy(
HDD['hyst'], B, M, sym=c[cnum], xlab=xlab, ylab=ylab, title=title)
pmagplotlib.plot_xy(HDD['hyst'], [
1.1*B.min(), 1.1*B.max()], [0, 0], sym='k-', xlab=xlab, ylab=ylab, title=title)
pmagplotlib.plot_xy(HDD['hyst'], [0, 0], [
1.1*M.min(), 1.1*M.max()], sym='k-', xlab=xlab, ylab=ylab, title=title)
if not save_plots and not set_env.IS_WIN:
pmagplotlib.draw_figs(HDD)
cnum += 1
if cnum == len(c):
cnum = 0
#
files = {}
if save_plots:
if specimen != "":
s = specimen
for key in list(HDD.keys()):
if pmagplotlib.isServer:
if synth == '':
files[key] = "LO:_"+locname+'_SI:_'+site + \
'_SA:_'+sample+'_SP:_'+s+'_TY:_'+key+'_.'+fmt
else:
files[key] = 'SY:_'+synth+'_TY:_'+key+'_.'+fmt
else:
if synth == '':
filename = ''
for item in [locname, site, sample, s, key]:
if item:
item = item.replace(' ', '_')
filename += item + '_'
if filename.endswith('_'):
filename = filename[:-1]
filename += ".{}".format(fmt)
files[key] = filename
else:
files[key] = "{}_{}.{}".format(synth, key, fmt)
pmagplotlib.save_plots(HDD, files)
saved.extend([value for value in files.values()])
if specimen:
return True, saved
if interactive:
pmagplotlib.draw_figs(HDD)
ans = input(
"S[a]ve plots, [s]pecimen name, [q]uit, <return> to continue\n ")
if ans == "a":
files = {}
for key in list(HDD.keys()):
if pmagplotlib.isServer: # use server plot naming convention
locname = locname if locname else ""
site = site if site else ""
sample = sample if sample else ""
files[key] = "LO:_"+locname+'_SI:_'+site + \
'_SA:_'+sample+'_SP:_'+s+'_TY:_'+key+'_.'+fmt
else: # use more readable plot naming convention
filename = ''
for item in [locname, site, sample, s, key]:
if item:
item = item.replace(' ', '_')
filename += item + '_'
if filename.endswith('_'):
filename = filename[:-1]
filename += ".{}".format(fmt)
files[key] = filename
pmagplotlib.save_plots(HDD, files)
saved.extend([value for value in files.values()])
if ans == '':
k += 1
if ans == "p":
del HystRecs[-1]
k -= 1
if ans == 'q':
print("Good bye")
return True, []
if ans == 's':
keepon = 1
specimen = input(
'Enter desired specimen name (or first part there of): ')
while keepon == 1:
try:
k = list(sids).index(specimen)
keepon = 0
except ValueError:
tmplist = []
for qq in range(len(sids)):
if specimen in sids[qq]:
tmplist.append(sids[qq])
print(specimen, " not found, but this was: ")
print(tmplist)
specimen = input('Select one or try again\n ')
k = list(sids).index(specimen)
else:
k += 1
if not len(B):
if verbose:
print('skipping this one - no hysteresis data')
k += 1
return True, saved | makes specimen plots of hysteresis data
Parameters
----------
dir_path : str, default "."
input directory
meas_file : str, default "measurements.txt"
name of MagIC measurement file
save_plots : bool, default True
save figures
interactive : bool, default False
if True, interactively plot and display
(this is best used on the command line only)
fmt : str, default "svg"
format for figures, ["svg", "jpg", "pdf", "png"]
specimen : str, default ""
specific specimen to plot
verbose : bool, default True
if True, print more verbose output
Returns
---------
Tuple : (True or False indicating if conversion was sucessful, output file name(s) written) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L12294-L12527 |
PmagPy/PmagPy | pmagpy/ipmag.py | vgpmap_magic | def vgpmap_magic(dir_path=".", results_file="sites.txt", crd="",
sym='ro', size=8, rsym="g^", rsize=8,
fmt="pdf", res="c", proj="ortho",
flip=False, anti=False, fancy=False,
ell=False, ages=False, lat_0=0, lon_0=0,
save_plots=True, interactive=False, contribution=None):
"""
makes a map of vgps and a95/dp,dm for site means in a sites table
Parameters
----------
dir_path : str, default "."
input directory path
results_file : str, default "sites.txt"
name of MagIC format sites file
crd : str, default ""
coordinate system [g, t] (geographic, tilt_corrected)
sym : str, default "ro"
symbol color and shape, default red circles
(see matplotlib documentation for more color/shape options)
size : int, default 8
symbol size
rsym : str, default "g^"
symbol for plotting reverse poles
(see matplotlib documentation for more color/shape options)
rsize : int, default 8
symbol size for reverse poles
fmt : str, default "pdf"
format for figures, ["svg", "jpg", "pdf", "png"]
res : str, default "c"
resolution [c, l, i, h] (crude, low, intermediate, high)
proj : str, default "ortho"
ortho = orthographic
lcc = lambert conformal
moll = molweide
merc = mercator
flip : bool, default False
if True, flip reverse poles to normal antipode
anti : bool, default False
if True, plot antipodes for each pole
fancy : bool, default False
if True, plot topography (not yet implemented)
ell : bool, default False
if True, plot ellipses
ages : bool, default False
if True, plot ages
lat_0 : float, default 0.
eyeball latitude
lon_0 : float, default 0.
eyeball longitude
save_plots : bool, default True
if True, create and save all requested plots
interactive : bool, default False
if True, interactively plot and display
(this is best used on the command line only)
Returns
---------
(status, output_files) - Tuple : (True or False indicating if conversion was sucessful, file name(s) written)
"""
coord_dict = {'g': 0, 't': 100}
coord = coord_dict[crd] if crd else ""
if contribution is None:
con = cb.Contribution(dir_path, single_file=results_file)
else:
con = contribution
if not list(con.tables.keys()):
print("-W - Couldn't read in data")
return False, []
if 'sites' not in con.tables:
print("-W - No sites data")
return False, []
FIG = {'map': 1}
pmagplotlib.plot_init(FIG['map'], 6, 6)
# read in sites file
lats, lons = [], []
Pars = []
dates, rlats, rlons = [], [], []
site_container = con.tables['sites']
site_container.front_and_backfill(['location'])
site_df = site_container.df
# use records with vgp_lat and vgp_lon
if 'vgp_lat' in site_df.columns and 'vgp_lon' in site_df.columns:
cond1, cond2 = site_df['vgp_lat'].notnull(), site_df['vgp_lon'].notnull()
else:
print ('nothing to plot')
sys.exit()
Results = site_df[cond1 & cond2]
# use tilt correction
if coord and 'dir_tilt_correction' in Results.columns:
Results = Results[Results['dir_tilt_correction'] == coord]
# get location name and average ages
locs = Results['location'].dropna().unique()
if len(locs):
location = ":".join(Results['location'].unique())
else:
location = ""
if 'age' in Results.columns and ages == 1:
dates = Results['age'].unique()
# go through rows and extract data
for ind, row in Results.iterrows():
try:
lat, lon = float(row['vgp_lat']), float(row['vgp_lon'])
except ValueError:
lat = float(str(row['vgp_lat']).replace(' ', '').translate({0x2c: '.', 0xa0: None, 0x2212: '-'}))
lon = float(str(row['vgp_lon']).replace(' ', '').translate({0x2c: '.', 0xa0: None, 0x2212: '-'}))
if anti == 1:
lats.append(-lat)
lon = lon + 180.
if lon > 360:
lon = lon - 360.
lons.append(lon)
elif flip == 0:
lats.append(lat)
lons.append(lon)
elif flip == 1:
if lat < 0:
rlats.append(-lat)
lon = lon + 180.
if lon > 360:
lon = lon - 360
rlons.append(lon)
else:
lats.append(lat)
lons.append(lon)
ppars = []
ppars.append(lon)
ppars.append(lat)
ell1, ell2 = "", ""
if 'vgp_dm' in list(row.keys()) and row['vgp_dm']:
ell1 = float(row['vgp_dm'])
if 'vgp_dp' in list(row.keys()) and row['vgp_dp']:
ell2 = float(row['vgp_dp'])
if 'vgp_alpha95' in list(row.keys()) and (row['vgp_alpha95'] or row['vgp_alpha95'] == 0):
ell1, ell2 = float(row['vgp_alpha95']), float(row['vgp_alpha95'])
if ell1 and ell2:
ppars = []
ppars.append(lons[-1])
ppars.append(lats[-1])
ppars.append(ell1)
ppars.append(lons[-1])
try:
isign = abs(lats[-1]) / lats[-1]
except ZeroDivisionError:
isign = 1
ppars.append(lats[-1] - isign * 90.)
ppars.append(ell2)
ppars.append(lons[-1] + 90.)
ppars.append(0.)
Pars.append(ppars)
location = location.strip(':')
Opts = {'latmin': -90, 'latmax': 90, 'lonmin': 0., 'lonmax': 360.,
'lat_0': lat_0, 'lon_0': lon_0, 'proj': proj, 'sym': 'bs',
'symsize': 3, 'pltgrid': 0, 'res': res, 'boundinglat': 0.}
Opts['details'] = {'coasts': 1, 'rivers': 0, 'states': 0,
'countries': 0, 'ocean': 1, 'fancy': fancy}
# make the base map with a blue triangle at the pole
pmagplotlib.plot_map(FIG['map'], [90.], [0.], Opts)
Opts['pltgrid'] = -1
Opts['sym'] = sym
Opts['symsize'] = size
if len(dates) > 0:
Opts['names'] = dates
if len(lats) > 0:
# add the lats and lons of the poles
pmagplotlib.plot_map(FIG['map'], lats, lons, Opts)
Opts['names'] = []
if len(rlats) > 0:
Opts['sym'] = rsym
Opts['symsize'] = rsize
# add the lats and lons of the poles
pmagplotlib.plot_map(FIG['map'], rlats, rlons, Opts)
if not save_plots and not set_env.IS_WIN:
pmagplotlib.draw_figs(FIG)
if ell == 1: # add ellipses if desired.
Opts['details'] = {'coasts': 0, 'rivers': 0, 'states': 0,
'countries': 0, 'ocean': 0, 'fancy': fancy}
Opts['pltgrid'] = -1 # turn off meridian replotting
Opts['symsize'] = 2
Opts['sym'] = 'g-'
for ppars in Pars:
if ppars[2] != 0:
PTS = pmagplotlib.plot_ell(FIG['map'], ppars, 'g.', 0, 0)
elats, elons = [], []
for pt in PTS:
elons.append(pt[0])
elats.append(pt[1])
# make the base map with a blue triangle at the pole
pmagplotlib.plot_map(FIG['map'], elats, elons, Opts)
if not save_plots and not set_env.IS_WIN:
pmagplotlib.draw_figs(FIG)
files = {}
for key in list(FIG.keys()):
if pmagplotlib.isServer: # use server plot naming convention
files[key] = 'LO:_' + location + '_TY:_VGP_map.' + fmt
con.add_magic_table('contribution')
con_id = con.get_con_id()
if con_id:
files[key] = 'MC:_' + str(con_id) + '_' + files[key]
else: # use more readable naming convention
files[key] = '{}_VGP_map.{}'.format(location, fmt)
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles = {}
titles['map'] = location + ' VGP map'
FIG = pmagplotlib.add_borders(FIG, titles, black, purple)
pmagplotlib.save_plots(FIG, files)
elif interactive:
pmagplotlib.draw_figs(FIG)
ans = input(" S[a]ve to save plot, Return to quit: ")
if ans == "a":
pmagplotlib.save_plots(FIG, files)
return True, files.values()
else:
print("Good bye")
return True, []
elif save_plots:
pmagplotlib.save_plots(FIG, files)
return True, files.values() | python | def vgpmap_magic(dir_path=".", results_file="sites.txt", crd="",
sym='ro', size=8, rsym="g^", rsize=8,
fmt="pdf", res="c", proj="ortho",
flip=False, anti=False, fancy=False,
ell=False, ages=False, lat_0=0, lon_0=0,
save_plots=True, interactive=False, contribution=None):
"""
makes a map of vgps and a95/dp,dm for site means in a sites table
Parameters
----------
dir_path : str, default "."
input directory path
results_file : str, default "sites.txt"
name of MagIC format sites file
crd : str, default ""
coordinate system [g, t] (geographic, tilt_corrected)
sym : str, default "ro"
symbol color and shape, default red circles
(see matplotlib documentation for more color/shape options)
size : int, default 8
symbol size
rsym : str, default "g^"
symbol for plotting reverse poles
(see matplotlib documentation for more color/shape options)
rsize : int, default 8
symbol size for reverse poles
fmt : str, default "pdf"
format for figures, ["svg", "jpg", "pdf", "png"]
res : str, default "c"
resolution [c, l, i, h] (crude, low, intermediate, high)
proj : str, default "ortho"
ortho = orthographic
lcc = lambert conformal
moll = molweide
merc = mercator
flip : bool, default False
if True, flip reverse poles to normal antipode
anti : bool, default False
if True, plot antipodes for each pole
fancy : bool, default False
if True, plot topography (not yet implemented)
ell : bool, default False
if True, plot ellipses
ages : bool, default False
if True, plot ages
lat_0 : float, default 0.
eyeball latitude
lon_0 : float, default 0.
eyeball longitude
save_plots : bool, default True
if True, create and save all requested plots
interactive : bool, default False
if True, interactively plot and display
(this is best used on the command line only)
Returns
---------
(status, output_files) - Tuple : (True or False indicating if conversion was sucessful, file name(s) written)
"""
coord_dict = {'g': 0, 't': 100}
coord = coord_dict[crd] if crd else ""
if contribution is None:
con = cb.Contribution(dir_path, single_file=results_file)
else:
con = contribution
if not list(con.tables.keys()):
print("-W - Couldn't read in data")
return False, []
if 'sites' not in con.tables:
print("-W - No sites data")
return False, []
FIG = {'map': 1}
pmagplotlib.plot_init(FIG['map'], 6, 6)
# read in sites file
lats, lons = [], []
Pars = []
dates, rlats, rlons = [], [], []
site_container = con.tables['sites']
site_container.front_and_backfill(['location'])
site_df = site_container.df
# use records with vgp_lat and vgp_lon
if 'vgp_lat' in site_df.columns and 'vgp_lon' in site_df.columns:
cond1, cond2 = site_df['vgp_lat'].notnull(), site_df['vgp_lon'].notnull()
else:
print ('nothing to plot')
sys.exit()
Results = site_df[cond1 & cond2]
# use tilt correction
if coord and 'dir_tilt_correction' in Results.columns:
Results = Results[Results['dir_tilt_correction'] == coord]
# get location name and average ages
locs = Results['location'].dropna().unique()
if len(locs):
location = ":".join(Results['location'].unique())
else:
location = ""
if 'age' in Results.columns and ages == 1:
dates = Results['age'].unique()
# go through rows and extract data
for ind, row in Results.iterrows():
try:
lat, lon = float(row['vgp_lat']), float(row['vgp_lon'])
except ValueError:
lat = float(str(row['vgp_lat']).replace(' ', '').translate({0x2c: '.', 0xa0: None, 0x2212: '-'}))
lon = float(str(row['vgp_lon']).replace(' ', '').translate({0x2c: '.', 0xa0: None, 0x2212: '-'}))
if anti == 1:
lats.append(-lat)
lon = lon + 180.
if lon > 360:
lon = lon - 360.
lons.append(lon)
elif flip == 0:
lats.append(lat)
lons.append(lon)
elif flip == 1:
if lat < 0:
rlats.append(-lat)
lon = lon + 180.
if lon > 360:
lon = lon - 360
rlons.append(lon)
else:
lats.append(lat)
lons.append(lon)
ppars = []
ppars.append(lon)
ppars.append(lat)
ell1, ell2 = "", ""
if 'vgp_dm' in list(row.keys()) and row['vgp_dm']:
ell1 = float(row['vgp_dm'])
if 'vgp_dp' in list(row.keys()) and row['vgp_dp']:
ell2 = float(row['vgp_dp'])
if 'vgp_alpha95' in list(row.keys()) and (row['vgp_alpha95'] or row['vgp_alpha95'] == 0):
ell1, ell2 = float(row['vgp_alpha95']), float(row['vgp_alpha95'])
if ell1 and ell2:
ppars = []
ppars.append(lons[-1])
ppars.append(lats[-1])
ppars.append(ell1)
ppars.append(lons[-1])
try:
isign = abs(lats[-1]) / lats[-1]
except ZeroDivisionError:
isign = 1
ppars.append(lats[-1] - isign * 90.)
ppars.append(ell2)
ppars.append(lons[-1] + 90.)
ppars.append(0.)
Pars.append(ppars)
location = location.strip(':')
Opts = {'latmin': -90, 'latmax': 90, 'lonmin': 0., 'lonmax': 360.,
'lat_0': lat_0, 'lon_0': lon_0, 'proj': proj, 'sym': 'bs',
'symsize': 3, 'pltgrid': 0, 'res': res, 'boundinglat': 0.}
Opts['details'] = {'coasts': 1, 'rivers': 0, 'states': 0,
'countries': 0, 'ocean': 1, 'fancy': fancy}
# make the base map with a blue triangle at the pole
pmagplotlib.plot_map(FIG['map'], [90.], [0.], Opts)
Opts['pltgrid'] = -1
Opts['sym'] = sym
Opts['symsize'] = size
if len(dates) > 0:
Opts['names'] = dates
if len(lats) > 0:
# add the lats and lons of the poles
pmagplotlib.plot_map(FIG['map'], lats, lons, Opts)
Opts['names'] = []
if len(rlats) > 0:
Opts['sym'] = rsym
Opts['symsize'] = rsize
# add the lats and lons of the poles
pmagplotlib.plot_map(FIG['map'], rlats, rlons, Opts)
if not save_plots and not set_env.IS_WIN:
pmagplotlib.draw_figs(FIG)
if ell == 1: # add ellipses if desired.
Opts['details'] = {'coasts': 0, 'rivers': 0, 'states': 0,
'countries': 0, 'ocean': 0, 'fancy': fancy}
Opts['pltgrid'] = -1 # turn off meridian replotting
Opts['symsize'] = 2
Opts['sym'] = 'g-'
for ppars in Pars:
if ppars[2] != 0:
PTS = pmagplotlib.plot_ell(FIG['map'], ppars, 'g.', 0, 0)
elats, elons = [], []
for pt in PTS:
elons.append(pt[0])
elats.append(pt[1])
# make the base map with a blue triangle at the pole
pmagplotlib.plot_map(FIG['map'], elats, elons, Opts)
if not save_plots and not set_env.IS_WIN:
pmagplotlib.draw_figs(FIG)
files = {}
for key in list(FIG.keys()):
if pmagplotlib.isServer: # use server plot naming convention
files[key] = 'LO:_' + location + '_TY:_VGP_map.' + fmt
con.add_magic_table('contribution')
con_id = con.get_con_id()
if con_id:
files[key] = 'MC:_' + str(con_id) + '_' + files[key]
else: # use more readable naming convention
files[key] = '{}_VGP_map.{}'.format(location, fmt)
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles = {}
titles['map'] = location + ' VGP map'
FIG = pmagplotlib.add_borders(FIG, titles, black, purple)
pmagplotlib.save_plots(FIG, files)
elif interactive:
pmagplotlib.draw_figs(FIG)
ans = input(" S[a]ve to save plot, Return to quit: ")
if ans == "a":
pmagplotlib.save_plots(FIG, files)
return True, files.values()
else:
print("Good bye")
return True, []
elif save_plots:
pmagplotlib.save_plots(FIG, files)
return True, files.values() | makes a map of vgps and a95/dp,dm for site means in a sites table
Parameters
----------
dir_path : str, default "."
input directory path
results_file : str, default "sites.txt"
name of MagIC format sites file
crd : str, default ""
coordinate system [g, t] (geographic, tilt_corrected)
sym : str, default "ro"
symbol color and shape, default red circles
(see matplotlib documentation for more color/shape options)
size : int, default 8
symbol size
rsym : str, default "g^"
symbol for plotting reverse poles
(see matplotlib documentation for more color/shape options)
rsize : int, default 8
symbol size for reverse poles
fmt : str, default "pdf"
format for figures, ["svg", "jpg", "pdf", "png"]
res : str, default "c"
resolution [c, l, i, h] (crude, low, intermediate, high)
proj : str, default "ortho"
ortho = orthographic
lcc = lambert conformal
moll = molweide
merc = mercator
flip : bool, default False
if True, flip reverse poles to normal antipode
anti : bool, default False
if True, plot antipodes for each pole
fancy : bool, default False
if True, plot topography (not yet implemented)
ell : bool, default False
if True, plot ellipses
ages : bool, default False
if True, plot ages
lat_0 : float, default 0.
eyeball latitude
lon_0 : float, default 0.
eyeball longitude
save_plots : bool, default True
if True, create and save all requested plots
interactive : bool, default False
if True, interactively plot and display
(this is best used on the command line only)
Returns
---------
(status, output_files) - Tuple : (True or False indicating if conversion was sucessful, file name(s) written) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L12530-L12756 |
PmagPy/PmagPy | pmagpy/ipmag.py | histplot | def histplot(infile="", data=(), outfile="",
xlab='x', binsize=False, norm=1,
fmt='svg', save_plots=True, interactive=False):
"""
makes histograms for data
Parameters
----------
infile : str, default ""
input file name
format: single variable
data : list-like, default ()
list/array of values to plot if infile is not provided
outfile : str, default ""
name for plot, if not provided defaults to hist.FMT
xlab : str, default 'x'
label for x axis
binsize : int, default False
desired binsize. if not specified, an appropriate binsize will be calculated.
norm : int, default 1
1: norm, 0: don't norm, -1: show normed and non-normed axes
fmt : str, default "svg"
format for figures, ["svg", "jpg", "pdf", "png"]
save_plots : bool, default True
if True, create and save all requested plots
interactive : bool, default False
interactively plot and display
(this is best used on the command line only)
"""
# set outfile name
if outfile:
fmt = ""
else:
outfile = 'hist.'+fmt
# read in data from infile or use data argument
if os.path.exists(infile):
D = np.loadtxt(infile)
else:
D = np.array(data)
try:
if not len(D):
print('-W- No data found')
return False, []
except ValueError:
pass
fig = pmagplotlib.plot_init(1, 8, 7)
try:
len(D)
except TypeError:
D = np.array([D])
if len(D) < 5:
print("-W- Not enough points to plot histogram ({} point(s) provided, 5 required)".format(len(D)))
return False, []
# if binsize not provided, calculate reasonable binsize
if not binsize:
binsize = int(np.around(1 + 3.22 * np.log(len(D))))
binsize = int(binsize)
Nbins = int(len(D) / binsize)
ax = fig.add_subplot(111)
if norm == 1:
print('normalizing')
n, bins, patches = ax.hist(
D, bins=Nbins, facecolor='#D3D3D3', histtype='stepfilled', color='black', density=True)
ax.set_ylabel('Frequency')
elif norm == 0:
print('not normalizing')
n, bins, patches = ax.hist(
D, bins=Nbins, facecolor='#D3D3D3', histtype='stepfilled', color='black', density=False)
ax.set_ylabel('Number')
elif norm == -1:
#print('trying twin')
n, bins, patches = ax.hist(
D, bins=Nbins, facecolor='#D3D3D3', histtype='stepfilled', color='black', density=True)
ax.set_ylabel('Frequency')
ax2 = ax.twinx()
n, bins, patches = ax2.hist(
D, bins=Nbins, facecolor='#D3D3D3', histtype='stepfilled', color='black', density=False)
ax2.set_ylabel('Number', rotation=-90)
plt.axis([D.min(), D.max(), 0, n.max()+.1*n.max()])
ax.set_xlabel(xlab)
name = 'N = ' + str(len(D))
plt.title(name)
if interactive:
pmagplotlib.draw_figs({1: 'hist'})
p = input('s[a]ve to save plot, [q]uit to exit without saving ')
if p != 'a':
return True, []
plt.savefig(outfile)
print('plot saved in ', outfile)
return True, [outfile]
if pmagplotlib.isServer:
pmagplotlib.add_borders({'hist': 1}, {'hist': 'Intensity Histogram'})
if save_plots:
plt.savefig(outfile)
print('plot saved in ', outfile)
return True, [outfile] | python | def histplot(infile="", data=(), outfile="",
xlab='x', binsize=False, norm=1,
fmt='svg', save_plots=True, interactive=False):
"""
makes histograms for data
Parameters
----------
infile : str, default ""
input file name
format: single variable
data : list-like, default ()
list/array of values to plot if infile is not provided
outfile : str, default ""
name for plot, if not provided defaults to hist.FMT
xlab : str, default 'x'
label for x axis
binsize : int, default False
desired binsize. if not specified, an appropriate binsize will be calculated.
norm : int, default 1
1: norm, 0: don't norm, -1: show normed and non-normed axes
fmt : str, default "svg"
format for figures, ["svg", "jpg", "pdf", "png"]
save_plots : bool, default True
if True, create and save all requested plots
interactive : bool, default False
interactively plot and display
(this is best used on the command line only)
"""
# set outfile name
if outfile:
fmt = ""
else:
outfile = 'hist.'+fmt
# read in data from infile or use data argument
if os.path.exists(infile):
D = np.loadtxt(infile)
else:
D = np.array(data)
try:
if not len(D):
print('-W- No data found')
return False, []
except ValueError:
pass
fig = pmagplotlib.plot_init(1, 8, 7)
try:
len(D)
except TypeError:
D = np.array([D])
if len(D) < 5:
print("-W- Not enough points to plot histogram ({} point(s) provided, 5 required)".format(len(D)))
return False, []
# if binsize not provided, calculate reasonable binsize
if not binsize:
binsize = int(np.around(1 + 3.22 * np.log(len(D))))
binsize = int(binsize)
Nbins = int(len(D) / binsize)
ax = fig.add_subplot(111)
if norm == 1:
print('normalizing')
n, bins, patches = ax.hist(
D, bins=Nbins, facecolor='#D3D3D3', histtype='stepfilled', color='black', density=True)
ax.set_ylabel('Frequency')
elif norm == 0:
print('not normalizing')
n, bins, patches = ax.hist(
D, bins=Nbins, facecolor='#D3D3D3', histtype='stepfilled', color='black', density=False)
ax.set_ylabel('Number')
elif norm == -1:
#print('trying twin')
n, bins, patches = ax.hist(
D, bins=Nbins, facecolor='#D3D3D3', histtype='stepfilled', color='black', density=True)
ax.set_ylabel('Frequency')
ax2 = ax.twinx()
n, bins, patches = ax2.hist(
D, bins=Nbins, facecolor='#D3D3D3', histtype='stepfilled', color='black', density=False)
ax2.set_ylabel('Number', rotation=-90)
plt.axis([D.min(), D.max(), 0, n.max()+.1*n.max()])
ax.set_xlabel(xlab)
name = 'N = ' + str(len(D))
plt.title(name)
if interactive:
pmagplotlib.draw_figs({1: 'hist'})
p = input('s[a]ve to save plot, [q]uit to exit without saving ')
if p != 'a':
return True, []
plt.savefig(outfile)
print('plot saved in ', outfile)
return True, [outfile]
if pmagplotlib.isServer:
pmagplotlib.add_borders({'hist': 1}, {'hist': 'Intensity Histogram'})
if save_plots:
plt.savefig(outfile)
print('plot saved in ', outfile)
return True, [outfile] | makes histograms for data
Parameters
----------
infile : str, default ""
input file name
format: single variable
data : list-like, default ()
list/array of values to plot if infile is not provided
outfile : str, default ""
name for plot, if not provided defaults to hist.FMT
xlab : str, default 'x'
label for x axis
binsize : int, default False
desired binsize. if not specified, an appropriate binsize will be calculated.
norm : int, default 1
1: norm, 0: don't norm, -1: show normed and non-normed axes
fmt : str, default "svg"
format for figures, ["svg", "jpg", "pdf", "png"]
save_plots : bool, default True
if True, create and save all requested plots
interactive : bool, default False
interactively plot and display
(this is best used on the command line only) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L12759-L12855 |
PmagPy/PmagPy | pmagpy/ipmag.py | Site.parse_fits | def parse_fits(self, fit_name):
'''USE PARSE_ALL_FITS unless otherwise necessary
Isolate fits by the name of the fit; we also set 'specimen_tilt_correction' to zero in order
to only include data in geographic coordinates - THIS NEEDS TO BE GENERALIZED
'''
fits = self.fits.loc[self.fits.specimen_comp_name ==
fit_name].loc[self.fits.specimen_tilt_correction == 0]
fits.reset_index(inplace=True)
means = self.means.loc[self.means.site_comp_name ==
fit_name].loc[self.means.site_tilt_correction == 0]
means.reset_index(inplace=True)
mean_name = str(fit_name) + "_mean"
setattr(self, fit_name, fits)
setattr(self, mean_name, means) | python | def parse_fits(self, fit_name):
'''USE PARSE_ALL_FITS unless otherwise necessary
Isolate fits by the name of the fit; we also set 'specimen_tilt_correction' to zero in order
to only include data in geographic coordinates - THIS NEEDS TO BE GENERALIZED
'''
fits = self.fits.loc[self.fits.specimen_comp_name ==
fit_name].loc[self.fits.specimen_tilt_correction == 0]
fits.reset_index(inplace=True)
means = self.means.loc[self.means.site_comp_name ==
fit_name].loc[self.means.site_tilt_correction == 0]
means.reset_index(inplace=True)
mean_name = str(fit_name) + "_mean"
setattr(self, fit_name, fits)
setattr(self, mean_name, means) | USE PARSE_ALL_FITS unless otherwise necessary
Isolate fits by the name of the fit; we also set 'specimen_tilt_correction' to zero in order
to only include data in geographic coordinates - THIS NEEDS TO BE GENERALIZED | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L6411-L6424 |
PmagPy/PmagPy | programs/deprecated/measurements_normalize.py | main | def main():
"""
NAME
measurements_normalize.py
DESCRIPTION
takes magic_measurements file and normalized moment by sample_weight and sample_volume in the er_specimens table
SYNTAX
measurements_normalize.py [command line options]
OPTIONS
-f FILE: specify input file, default is: magic_measurements.txt
-fsp FILE: specify input specimen file, default is: er_specimens.txt
-F FILE: specify output measurements, default is to overwrite input file
"""
#
# initialize variables
#
#
#
dir_path='.'
if "-WD" in sys.argv:
ind=sys.argv.index("-WD")
dir_path=sys.argv[ind+1]
meas_file,spec_file= dir_path+"/magic_measurements.txt",dir_path+"/er_specimens.txt"
out_file=meas_file
MeasRecs,SpecRecs=[],[]
OutRecs=[]
if "-h" in sys.argv:
print(main.__doc__)
sys.exit()
if "-f" in sys.argv:
ind=sys.argv.index("-f")
meas_file=dir_path+'/'+sys.argv[ind+1]
if "-fsp" in sys.argv:
ind=sys.argv.index("-fsp")
spec_file=dir_path+'/'+sys.argv[ind+1]
if "-F" in sys.argv:
ind=sys.argv.index("-F")
out_file=dir_path+'/'+sys.argv[ind+1]
MeasRecs,file_type=pmag.magic_read(meas_file)
Specs,file_type=pmag.magic_read(spec_file)
for rec in MeasRecs:
if 'measurement_magn_moment' in list(rec.keys()) and rec['measurement_magn_moment'] != "":
for spec in Specs:
if spec['er_specimen_name']==rec['er_specimen_name']:
if 'specimen_weight' in list(spec.keys()) and spec['specimen_weight']!="":
rec['measurement_magn_mass']='%e'%(old_div(float(rec['measurement_magn_moment']),float(spec['specimen_weight'])))
if 'specimen_volume' in list(spec.keys()) and spec['specimen_volume']!="":
rec['measurement_magn_volume']='%e'%(old_div(float(rec['measurement_magn_moment']),float(spec['specimen_volume'])))
break
if 'measurement_magn_volume' not in list(rec.keys()): rec['measurement_magn_volume']=''
if 'measurement_magn_mass' not in list(rec.keys()): rec['measurement_magn_mass']=''
OutRecs.append(rec)
pmag.magic_write(out_file,OutRecs,"magic_measurements")
print("Data saved in ", out_file) | python | def main():
"""
NAME
measurements_normalize.py
DESCRIPTION
takes magic_measurements file and normalized moment by sample_weight and sample_volume in the er_specimens table
SYNTAX
measurements_normalize.py [command line options]
OPTIONS
-f FILE: specify input file, default is: magic_measurements.txt
-fsp FILE: specify input specimen file, default is: er_specimens.txt
-F FILE: specify output measurements, default is to overwrite input file
"""
#
# initialize variables
#
#
#
dir_path='.'
if "-WD" in sys.argv:
ind=sys.argv.index("-WD")
dir_path=sys.argv[ind+1]
meas_file,spec_file= dir_path+"/magic_measurements.txt",dir_path+"/er_specimens.txt"
out_file=meas_file
MeasRecs,SpecRecs=[],[]
OutRecs=[]
if "-h" in sys.argv:
print(main.__doc__)
sys.exit()
if "-f" in sys.argv:
ind=sys.argv.index("-f")
meas_file=dir_path+'/'+sys.argv[ind+1]
if "-fsp" in sys.argv:
ind=sys.argv.index("-fsp")
spec_file=dir_path+'/'+sys.argv[ind+1]
if "-F" in sys.argv:
ind=sys.argv.index("-F")
out_file=dir_path+'/'+sys.argv[ind+1]
MeasRecs,file_type=pmag.magic_read(meas_file)
Specs,file_type=pmag.magic_read(spec_file)
for rec in MeasRecs:
if 'measurement_magn_moment' in list(rec.keys()) and rec['measurement_magn_moment'] != "":
for spec in Specs:
if spec['er_specimen_name']==rec['er_specimen_name']:
if 'specimen_weight' in list(spec.keys()) and spec['specimen_weight']!="":
rec['measurement_magn_mass']='%e'%(old_div(float(rec['measurement_magn_moment']),float(spec['specimen_weight'])))
if 'specimen_volume' in list(spec.keys()) and spec['specimen_volume']!="":
rec['measurement_magn_volume']='%e'%(old_div(float(rec['measurement_magn_moment']),float(spec['specimen_volume'])))
break
if 'measurement_magn_volume' not in list(rec.keys()): rec['measurement_magn_volume']=''
if 'measurement_magn_mass' not in list(rec.keys()): rec['measurement_magn_mass']=''
OutRecs.append(rec)
pmag.magic_write(out_file,OutRecs,"magic_measurements")
print("Data saved in ", out_file) | NAME
measurements_normalize.py
DESCRIPTION
takes magic_measurements file and normalized moment by sample_weight and sample_volume in the er_specimens table
SYNTAX
measurements_normalize.py [command line options]
OPTIONS
-f FILE: specify input file, default is: magic_measurements.txt
-fsp FILE: specify input specimen file, default is: er_specimens.txt
-F FILE: specify output measurements, default is to overwrite input file | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/deprecated/measurements_normalize.py#L9-L66 |
PmagPy/PmagPy | dialogs/pmag_gui_menu3.py | MagICMenu.on_quit | def on_quit(self, event, wind=None):
"""
shut down application if in the main frame.
otherwise, destroy the top window (wind) and restore
the main frame.
"""
if wind:
wind.Destroy()
if not self.parent.IsShown():
self.on_show_mainframe(None)
# re-do the quit binding
self.parent.Bind(wx.EVT_MENU, self.on_quit, self.file_quit)
else:
self.parent.Close() | python | def on_quit(self, event, wind=None):
"""
shut down application if in the main frame.
otherwise, destroy the top window (wind) and restore
the main frame.
"""
if wind:
wind.Destroy()
if not self.parent.IsShown():
self.on_show_mainframe(None)
# re-do the quit binding
self.parent.Bind(wx.EVT_MENU, self.on_quit, self.file_quit)
else:
self.parent.Close() | shut down application if in the main frame.
otherwise, destroy the top window (wind) and restore
the main frame. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_gui_menu3.py#L146-L159 |
PmagPy/PmagPy | dialogs/pmag_gui_menu3.py | MagICMenu.on_show_mainframe | def on_show_mainframe(self, event):
"""
Show mainframe window
"""
self.parent.Enable()
self.parent.Show()
self.parent.Raise() | python | def on_show_mainframe(self, event):
"""
Show mainframe window
"""
self.parent.Enable()
self.parent.Show()
self.parent.Raise() | Show mainframe window | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_gui_menu3.py#L162-L168 |
PmagPy/PmagPy | dialogs/pmag_gui_menu3.py | MagICMenu.on_clear | def on_clear(self, event):
"""
initialize window to allow user to empty the working directory
"""
dia = pmag_menu_dialogs.ClearWD(self.parent, self.parent.WD)
clear = dia.do_clear()
if clear:
# clear directory, but use previously acquired data_model
if self.data_model_num == 2.5:
self.parent.er_magic = builder.ErMagicBuilder(self.parent.WD, self.parent.er_magic.data_model)
elif self.data_model_num == 3:
self.parent.contribution = cb.Contribution(self.parent.WD,
dmodel=self.parent.contribution.data_model) | python | def on_clear(self, event):
"""
initialize window to allow user to empty the working directory
"""
dia = pmag_menu_dialogs.ClearWD(self.parent, self.parent.WD)
clear = dia.do_clear()
if clear:
# clear directory, but use previously acquired data_model
if self.data_model_num == 2.5:
self.parent.er_magic = builder.ErMagicBuilder(self.parent.WD, self.parent.er_magic.data_model)
elif self.data_model_num == 3:
self.parent.contribution = cb.Contribution(self.parent.WD,
dmodel=self.parent.contribution.data_model) | initialize window to allow user to empty the working directory | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_gui_menu3.py#L171-L183 |
PmagPy/PmagPy | programs/deprecated/plotxy_magic.py | main | def main():
"""
NAME
plotxy_magic.py
DESCRIPTION
Makes simple X,Y plots
INPUT FORMAT
Any MagIC formatted file
SYNTAX
plotxy_magic.py [command line options]
OPTIONS
-h prints this help message
-f FILE to set file name on command rec
-c col1 col2 specify columns names to plot
-sym SYM SIZE specify symbol and size to plot: default is red dots
-S don't plot symbols
-xlab XLAB
-ylab YLAB
-l connect symbols with lines
-b xmin xmax ymin ymax, sets bounds
# -b [key:max:min,key:max:min,etc.] leave or min blank for no cutoff
"""
col1,col2=0,1
sym,size = 'ro',20
xlab,ylab='',''
lines=0
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
else:
'-f option is a required field'
print(main.__doc__)
sys.exit()
if '-c' in sys.argv:
ind=sys.argv.index('-c')
col1=sys.argv[ind+1]
col2=sys.argv[ind+2]
else:
'Column headers a required field'
print(main.__doc__)
sys.exit()
if '-xlab' in sys.argv:
ind=sys.argv.index('-xlab')
xlab=sys.argv[ind+1]
if '-ylab' in sys.argv:
ind=sys.argv.index('-ylab')
ylab=sys.argv[ind+1]
# if '-b' in sys.argv:
# ind=sys.argv.index('-b')
# bounds=sys.argv[ind+1].split(',')
if '-b' in sys.argv:
ind=sys.argv.index('-b')
xmin=float(sys.argv[ind+1])
xmax=float(sys.argv[ind+2])
ymin=float(sys.argv[ind+3])
ymax=float(sys.argv[ind+4])
if '-sym' in sys.argv:
ind=sys.argv.index('-sym')
sym=sys.argv[ind+1]
size=int(sys.argv[ind+2])
if '-l' in sys.argv: lines=1
if '-S' in sys.argv: sym=''
X,Y=[],[]
data,file_type=pmag.magic_read(file)
print(file_type)
for rec in data:
if col1 not in list(rec.keys()) or col2 not in list(rec.keys()):
print(col1,' and/or ',col2, ' not in file headers')
print('try again')
sys.exit()
if rec[col1]!='' and rec[col2]!='':
skip=0
if '-crit' in sys.argv:
for crit in bounds:
crits=crit.split(':')
crit_key=crits[0]
crit_min=crits[1]
crit_max=crits[2]
if rec[crit_key]=="":
skip=1
else:
if crit_min!="" and float(rec[crit_key])<float(crit_min):skip=1
if crit_max!="" and float(rec[crit_key])>float(crit_min):skip=1
if skip==0:
X.append(float(rec[col1]))
Y.append(float(rec[col2]))
if len(X)==0:
print(col1,' and/or ',col2, ' have no data ')
print('try again')
sys.exit()
else:
print(len(X),' data points')
if sym!='':pylab.scatter(X,Y,c=sym[0],marker=sym[1],s=size)
if xlab!='':pylab.xlabel(xlab)
if ylab!='':pylab.ylabel(ylab)
if lines==1:pylab.plot(X,Y,'k-')
if '-b' in sys.argv:pylab.axis([xmin,xmax,ymin,ymax])
pylab.draw()
ans=input("Press return to quit ")
sys.exit() | python | def main():
"""
NAME
plotxy_magic.py
DESCRIPTION
Makes simple X,Y plots
INPUT FORMAT
Any MagIC formatted file
SYNTAX
plotxy_magic.py [command line options]
OPTIONS
-h prints this help message
-f FILE to set file name on command rec
-c col1 col2 specify columns names to plot
-sym SYM SIZE specify symbol and size to plot: default is red dots
-S don't plot symbols
-xlab XLAB
-ylab YLAB
-l connect symbols with lines
-b xmin xmax ymin ymax, sets bounds
# -b [key:max:min,key:max:min,etc.] leave or min blank for no cutoff
"""
col1,col2=0,1
sym,size = 'ro',20
xlab,ylab='',''
lines=0
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
else:
'-f option is a required field'
print(main.__doc__)
sys.exit()
if '-c' in sys.argv:
ind=sys.argv.index('-c')
col1=sys.argv[ind+1]
col2=sys.argv[ind+2]
else:
'Column headers a required field'
print(main.__doc__)
sys.exit()
if '-xlab' in sys.argv:
ind=sys.argv.index('-xlab')
xlab=sys.argv[ind+1]
if '-ylab' in sys.argv:
ind=sys.argv.index('-ylab')
ylab=sys.argv[ind+1]
# if '-b' in sys.argv:
# ind=sys.argv.index('-b')
# bounds=sys.argv[ind+1].split(',')
if '-b' in sys.argv:
ind=sys.argv.index('-b')
xmin=float(sys.argv[ind+1])
xmax=float(sys.argv[ind+2])
ymin=float(sys.argv[ind+3])
ymax=float(sys.argv[ind+4])
if '-sym' in sys.argv:
ind=sys.argv.index('-sym')
sym=sys.argv[ind+1]
size=int(sys.argv[ind+2])
if '-l' in sys.argv: lines=1
if '-S' in sys.argv: sym=''
X,Y=[],[]
data,file_type=pmag.magic_read(file)
print(file_type)
for rec in data:
if col1 not in list(rec.keys()) or col2 not in list(rec.keys()):
print(col1,' and/or ',col2, ' not in file headers')
print('try again')
sys.exit()
if rec[col1]!='' and rec[col2]!='':
skip=0
if '-crit' in sys.argv:
for crit in bounds:
crits=crit.split(':')
crit_key=crits[0]
crit_min=crits[1]
crit_max=crits[2]
if rec[crit_key]=="":
skip=1
else:
if crit_min!="" and float(rec[crit_key])<float(crit_min):skip=1
if crit_max!="" and float(rec[crit_key])>float(crit_min):skip=1
if skip==0:
X.append(float(rec[col1]))
Y.append(float(rec[col2]))
if len(X)==0:
print(col1,' and/or ',col2, ' have no data ')
print('try again')
sys.exit()
else:
print(len(X),' data points')
if sym!='':pylab.scatter(X,Y,c=sym[0],marker=sym[1],s=size)
if xlab!='':pylab.xlabel(xlab)
if ylab!='':pylab.ylabel(ylab)
if lines==1:pylab.plot(X,Y,'k-')
if '-b' in sys.argv:pylab.axis([xmin,xmax,ymin,ymax])
pylab.draw()
ans=input("Press return to quit ")
sys.exit() | NAME
plotxy_magic.py
DESCRIPTION
Makes simple X,Y plots
INPUT FORMAT
Any MagIC formatted file
SYNTAX
plotxy_magic.py [command line options]
OPTIONS
-h prints this help message
-f FILE to set file name on command rec
-c col1 col2 specify columns names to plot
-sym SYM SIZE specify symbol and size to plot: default is red dots
-S don't plot symbols
-xlab XLAB
-ylab YLAB
-l connect symbols with lines
-b xmin xmax ymin ymax, sets bounds
# -b [key:max:min,key:max:min,etc.] leave or min blank for no cutoff | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/deprecated/plotxy_magic.py#L11-L117 |
PmagPy/PmagPy | programs/lowrie.py | main | def main():
"""
NAME
lowrie.py
DESCRIPTION
plots intensity decay curves for Lowrie experiments
SYNTAX
lowrie -h [command line options]
INPUT
takes SIO formatted input files
OPTIONS
-h prints help message and quits
-f FILE: specify input file
-N do not normalize by maximum magnetization
-fmt [svg, pdf, eps, png] specify fmt, default is svg
-sav save plots and quit
"""
fmt, plot = 'svg', 0
FIG = {} # plot dictionary
FIG['lowrie'] = 1 # demag is figure 1
pmagplotlib.plot_init(FIG['lowrie'], 6, 6)
norm = 1 # default is to normalize by maximum axis
if len(sys.argv) > 1:
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-N' in sys.argv:
norm = 0 # don't normalize
if '-sav' in sys.argv:
plot = 1 # don't normalize
if '-fmt' in sys.argv: # sets input filename
ind = sys.argv.index("-fmt")
fmt = sys.argv[ind + 1]
if '-f' in sys.argv: # sets input filename
ind = sys.argv.index("-f")
in_file = sys.argv[ind + 1]
else:
print(main.__doc__)
print('you must supply a file name')
sys.exit()
else:
print(main.__doc__)
print('you must supply a file name')
sys.exit()
data = pmag.open_file(in_file)
PmagRecs = [] # set up a list for the results
keys = ['specimen', 'treatment', 'csd', 'M', 'dec', 'inc']
for line in data:
PmagRec = {}
rec = line.replace('\n', '').split()
for k in range(len(keys)):
PmagRec[keys[k]] = rec[k]
PmagRecs.append(PmagRec)
specs = pmag.get_dictkey(PmagRecs, 'specimen', '')
sids = []
for spec in specs:
if spec not in sids:
sids.append(spec) # get list of unique specimen names
for spc in sids: # step through the specimen names
pmagplotlib.plot_init(FIG['lowrie'], 6, 6)
print(spc)
specdata = pmag.get_dictitem(
PmagRecs, 'specimen', spc, 'T') # get all this one's data
DIMs, Temps = [], []
for dat in specdata: # step through the data
DIMs.append([float(dat['dec']), float(
dat['inc']), float(dat['M']) * 1e-3])
Temps.append(float(dat['treatment']))
carts = pmag.dir2cart(DIMs).transpose()
# if norm==1: # want to normalize
# nrm=max(max(abs(carts[0])),max(abs(carts[1])),max(abs(carts[2]))) # by maximum of x,y,z values
# ylab="M/M_max"
if norm == 1: # want to normalize
nrm = (DIMs[0][2]) # normalize by NRM
ylab = "M/M_o"
else:
nrm = 1. # don't normalize
ylab = "Magnetic moment (Am^2)"
xlab = "Temperature (C)"
pmagplotlib.plot_xy(FIG['lowrie'], Temps, old_div(
abs(carts[0]), nrm), sym='r-')
pmagplotlib.plot_xy(FIG['lowrie'], Temps, old_div(
abs(carts[0]), nrm), sym='ro') # X direction
pmagplotlib.plot_xy(FIG['lowrie'], Temps, old_div(
abs(carts[1]), nrm), sym='c-')
pmagplotlib.plot_xy(FIG['lowrie'], Temps, old_div(
abs(carts[1]), nrm), sym='cs') # Y direction
pmagplotlib.plot_xy(FIG['lowrie'], Temps, old_div(
abs(carts[2]), nrm), sym='k-')
pmagplotlib.plot_xy(FIG['lowrie'], Temps, old_div(
abs(carts[2]), nrm), sym='k^', title=spc, xlab=xlab, ylab=ylab) # Z direction
files = {'lowrie': 'lowrie:_' + spc + '_.' + fmt}
if plot == 0:
pmagplotlib.draw_figs(FIG)
ans = input('S[a]ve figure? [q]uit, <return> to continue ')
if ans == 'a':
pmagplotlib.save_plots(FIG, files)
elif ans == 'q':
sys.exit()
else:
pmagplotlib.save_plots(FIG, files)
pmagplotlib.clearFIG(FIG['lowrie']) | python | def main():
"""
NAME
lowrie.py
DESCRIPTION
plots intensity decay curves for Lowrie experiments
SYNTAX
lowrie -h [command line options]
INPUT
takes SIO formatted input files
OPTIONS
-h prints help message and quits
-f FILE: specify input file
-N do not normalize by maximum magnetization
-fmt [svg, pdf, eps, png] specify fmt, default is svg
-sav save plots and quit
"""
fmt, plot = 'svg', 0
FIG = {} # plot dictionary
FIG['lowrie'] = 1 # demag is figure 1
pmagplotlib.plot_init(FIG['lowrie'], 6, 6)
norm = 1 # default is to normalize by maximum axis
if len(sys.argv) > 1:
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-N' in sys.argv:
norm = 0 # don't normalize
if '-sav' in sys.argv:
plot = 1 # don't normalize
if '-fmt' in sys.argv: # sets input filename
ind = sys.argv.index("-fmt")
fmt = sys.argv[ind + 1]
if '-f' in sys.argv: # sets input filename
ind = sys.argv.index("-f")
in_file = sys.argv[ind + 1]
else:
print(main.__doc__)
print('you must supply a file name')
sys.exit()
else:
print(main.__doc__)
print('you must supply a file name')
sys.exit()
data = pmag.open_file(in_file)
PmagRecs = [] # set up a list for the results
keys = ['specimen', 'treatment', 'csd', 'M', 'dec', 'inc']
for line in data:
PmagRec = {}
rec = line.replace('\n', '').split()
for k in range(len(keys)):
PmagRec[keys[k]] = rec[k]
PmagRecs.append(PmagRec)
specs = pmag.get_dictkey(PmagRecs, 'specimen', '')
sids = []
for spec in specs:
if spec not in sids:
sids.append(spec) # get list of unique specimen names
for spc in sids: # step through the specimen names
pmagplotlib.plot_init(FIG['lowrie'], 6, 6)
print(spc)
specdata = pmag.get_dictitem(
PmagRecs, 'specimen', spc, 'T') # get all this one's data
DIMs, Temps = [], []
for dat in specdata: # step through the data
DIMs.append([float(dat['dec']), float(
dat['inc']), float(dat['M']) * 1e-3])
Temps.append(float(dat['treatment']))
carts = pmag.dir2cart(DIMs).transpose()
# if norm==1: # want to normalize
# nrm=max(max(abs(carts[0])),max(abs(carts[1])),max(abs(carts[2]))) # by maximum of x,y,z values
# ylab="M/M_max"
if norm == 1: # want to normalize
nrm = (DIMs[0][2]) # normalize by NRM
ylab = "M/M_o"
else:
nrm = 1. # don't normalize
ylab = "Magnetic moment (Am^2)"
xlab = "Temperature (C)"
pmagplotlib.plot_xy(FIG['lowrie'], Temps, old_div(
abs(carts[0]), nrm), sym='r-')
pmagplotlib.plot_xy(FIG['lowrie'], Temps, old_div(
abs(carts[0]), nrm), sym='ro') # X direction
pmagplotlib.plot_xy(FIG['lowrie'], Temps, old_div(
abs(carts[1]), nrm), sym='c-')
pmagplotlib.plot_xy(FIG['lowrie'], Temps, old_div(
abs(carts[1]), nrm), sym='cs') # Y direction
pmagplotlib.plot_xy(FIG['lowrie'], Temps, old_div(
abs(carts[2]), nrm), sym='k-')
pmagplotlib.plot_xy(FIG['lowrie'], Temps, old_div(
abs(carts[2]), nrm), sym='k^', title=spc, xlab=xlab, ylab=ylab) # Z direction
files = {'lowrie': 'lowrie:_' + spc + '_.' + fmt}
if plot == 0:
pmagplotlib.draw_figs(FIG)
ans = input('S[a]ve figure? [q]uit, <return> to continue ')
if ans == 'a':
pmagplotlib.save_plots(FIG, files)
elif ans == 'q':
sys.exit()
else:
pmagplotlib.save_plots(FIG, files)
pmagplotlib.clearFIG(FIG['lowrie']) | NAME
lowrie.py
DESCRIPTION
plots intensity decay curves for Lowrie experiments
SYNTAX
lowrie -h [command line options]
INPUT
takes SIO formatted input files
OPTIONS
-h prints help message and quits
-f FILE: specify input file
-N do not normalize by maximum magnetization
-fmt [svg, pdf, eps, png] specify fmt, default is svg
-sav save plots and quit | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/lowrie.py#L17-L122 |
PmagPy/PmagPy | programs/apwp.py | main | def main():
"""
NAME
apwp.py
DESCRIPTION
returns predicted paleolatitudes, directions and pole latitude/longitude
from apparent polar wander paths of Besse and Courtillot (2002).
SYNTAX
apwp.py [command line options][< filename]
OPTIONS
-h prints help message and quits
-i allows interactive data entry
f file: read plate, lat, lon, age data from file
-F output_file: write output to output_file
-P [NA, SA, AF, IN, EU, AU, ANT, GL] plate
-lat LAT specify present latitude (positive = North; negative=South)
-lon LON specify present longitude (positive = East, negative=West)
-age AGE specify Age in Ma
Note: must have all -P, -lat, -lon, -age or none.
OUTPUT
Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.
"""
infile,outfile,data,indata="","",[],[]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
outfile=sys.argv[ind+1]
out=open(outfile,'w')
if '-i' in sys.argv:
print("Welcome to paleolatitude calculator\n")
while 1:
data=[]
print("pick a plate: NA, SA, AF, IN, EU, AU, ANT, GL \n cntl-D to quit")
try:
plate=input("Plate\n").upper()
except:
print("Goodbye \n")
sys.exit()
lat=float(input( "Site latitude\n"))
lon=float(input(" Site longitude\n"))
age=float(input(" Age\n"))
data=[plate,lat,lon,age]
print("Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.")
print(spitout(data))
elif '-f' in sys.argv:
ind=sys.argv.index('-f')
infile=sys.argv[ind+1]
f=open(infile,'r')
inp=f.readlines()
elif '-P' in sys.argv:
ind=sys.argv.index('-P')
plate=sys.argv[ind+1].upper()
if '-lat' in sys.argv:
ind=sys.argv.index('-lat')
lat=float(sys.argv[ind+1])
else:
print(main.__doc__)
sys.exit()
if '-lon' in sys.argv:
ind=sys.argv.index('-lon')
lon=float(sys.argv[ind+1])
else:
print(main.__doc__)
sys.exit()
if '-age' in sys.argv:
ind=sys.argv.index('-age')
age=float(sys.argv[ind+1])
else:
print(main.__doc__)
sys.exit()
data=[plate,lat,lon,age]
outstring=spitout(data)
if outfile=="":
print("Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.")
print(outstring)
else:
out.write(outstring)
sys.exit()
else:
inp=sys.stdin.readlines() # read from standard input
if len(inp)>0:
for line in inp:
data=[]
rec=line.split()
data.append(rec[0])
for k in range(1,4): data.append(float(rec[k]))
indata.append(data)
if len(indata)>0:
for line in indata:
outstring=spitout(line)
if outfile=="":
print(outstring)
else:
out.write(outstring)
else:
print('no input data')
sys.exit() | python | def main():
"""
NAME
apwp.py
DESCRIPTION
returns predicted paleolatitudes, directions and pole latitude/longitude
from apparent polar wander paths of Besse and Courtillot (2002).
SYNTAX
apwp.py [command line options][< filename]
OPTIONS
-h prints help message and quits
-i allows interactive data entry
f file: read plate, lat, lon, age data from file
-F output_file: write output to output_file
-P [NA, SA, AF, IN, EU, AU, ANT, GL] plate
-lat LAT specify present latitude (positive = North; negative=South)
-lon LON specify present longitude (positive = East, negative=West)
-age AGE specify Age in Ma
Note: must have all -P, -lat, -lon, -age or none.
OUTPUT
Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.
"""
infile,outfile,data,indata="","",[],[]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
outfile=sys.argv[ind+1]
out=open(outfile,'w')
if '-i' in sys.argv:
print("Welcome to paleolatitude calculator\n")
while 1:
data=[]
print("pick a plate: NA, SA, AF, IN, EU, AU, ANT, GL \n cntl-D to quit")
try:
plate=input("Plate\n").upper()
except:
print("Goodbye \n")
sys.exit()
lat=float(input( "Site latitude\n"))
lon=float(input(" Site longitude\n"))
age=float(input(" Age\n"))
data=[plate,lat,lon,age]
print("Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.")
print(spitout(data))
elif '-f' in sys.argv:
ind=sys.argv.index('-f')
infile=sys.argv[ind+1]
f=open(infile,'r')
inp=f.readlines()
elif '-P' in sys.argv:
ind=sys.argv.index('-P')
plate=sys.argv[ind+1].upper()
if '-lat' in sys.argv:
ind=sys.argv.index('-lat')
lat=float(sys.argv[ind+1])
else:
print(main.__doc__)
sys.exit()
if '-lon' in sys.argv:
ind=sys.argv.index('-lon')
lon=float(sys.argv[ind+1])
else:
print(main.__doc__)
sys.exit()
if '-age' in sys.argv:
ind=sys.argv.index('-age')
age=float(sys.argv[ind+1])
else:
print(main.__doc__)
sys.exit()
data=[plate,lat,lon,age]
outstring=spitout(data)
if outfile=="":
print("Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.")
print(outstring)
else:
out.write(outstring)
sys.exit()
else:
inp=sys.stdin.readlines() # read from standard input
if len(inp)>0:
for line in inp:
data=[]
rec=line.split()
data.append(rec[0])
for k in range(1,4): data.append(float(rec[k]))
indata.append(data)
if len(indata)>0:
for line in indata:
outstring=spitout(line)
if outfile=="":
print(outstring)
else:
out.write(outstring)
else:
print('no input data')
sys.exit() | NAME
apwp.py
DESCRIPTION
returns predicted paleolatitudes, directions and pole latitude/longitude
from apparent polar wander paths of Besse and Courtillot (2002).
SYNTAX
apwp.py [command line options][< filename]
OPTIONS
-h prints help message and quits
-i allows interactive data entry
f file: read plate, lat, lon, age data from file
-F output_file: write output to output_file
-P [NA, SA, AF, IN, EU, AU, ANT, GL] plate
-lat LAT specify present latitude (positive = North; negative=South)
-lon LON specify present longitude (positive = East, negative=West)
-age AGE specify Age in Ma
Note: must have all -P, -lat, -lon, -age or none.
OUTPUT
Age Paleolat. Dec. Inc. Pole_lat. Pole_Long. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/apwp.py#L13-L117 |
PmagPy/PmagPy | SPD/lib/lib_directional_statistics.py | tauV | def tauV(T):
"""
gets the eigenvalues (tau) and eigenvectors (V) from matrix T
"""
t,V,tr=[],[],0.
ind1,ind2,ind3=0,1,2
evalues,evectmps=numpy.linalg.eig(T)
evectors=numpy.transpose(evectmps) # to make compatible with Numeric convention
for tau in evalues:
tr += tau # tr totals tau values
if tr != 0:
for i in range(3):
evalues[i]=old_div(evalues[i], tr) # convention is norming eigenvalues so they sum to 1.
else:
return t,V # if eigenvalues add up to zero, no sorting is needed
# sort evalues,evectors
t1, t2, t3 = 0., 0., 1.
for k in range(3):
if evalues[k] > t1:
t1,ind1 = evalues[k],k
if evalues[k] < t3:
t3,ind3 = evalues[k],k
for k in range(3):
if evalues[k] != t1 and evalues[k] != t3:
t2,ind2=evalues[k],k
V.append(evectors[ind1])
V.append(evectors[ind2])
V.append(evectors[ind3])
t.append(t1)
t.append(t2)
t.append(t3)
return t,V | python | def tauV(T):
"""
gets the eigenvalues (tau) and eigenvectors (V) from matrix T
"""
t,V,tr=[],[],0.
ind1,ind2,ind3=0,1,2
evalues,evectmps=numpy.linalg.eig(T)
evectors=numpy.transpose(evectmps) # to make compatible with Numeric convention
for tau in evalues:
tr += tau # tr totals tau values
if tr != 0:
for i in range(3):
evalues[i]=old_div(evalues[i], tr) # convention is norming eigenvalues so they sum to 1.
else:
return t,V # if eigenvalues add up to zero, no sorting is needed
# sort evalues,evectors
t1, t2, t3 = 0., 0., 1.
for k in range(3):
if evalues[k] > t1:
t1,ind1 = evalues[k],k
if evalues[k] < t3:
t3,ind3 = evalues[k],k
for k in range(3):
if evalues[k] != t1 and evalues[k] != t3:
t2,ind2=evalues[k],k
V.append(evectors[ind1])
V.append(evectors[ind2])
V.append(evectors[ind3])
t.append(t1)
t.append(t2)
t.append(t3)
return t,V | gets the eigenvalues (tau) and eigenvectors (V) from matrix T | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/lib/lib_directional_statistics.py#L48-L79 |
PmagPy/PmagPy | SPD/lib/lib_directional_statistics.py | get_PD_direction | def get_PD_direction(X1_prime, X2_prime, X3_prime, PD):
"""takes arrays of X1_prime, X2_prime, X3_prime, and the PD.
checks that the PD vector direction is correct"""
n = len(X1_prime) - 1
X1 = X1_prime[0] - X1_prime[n]
X2 = X2_prime[0] - X2_prime[n]
X3 = X3_prime[0] - X3_prime[n]
R= numpy.array([X1, X2, X3])
#print 'R (reference vector for PD direction)', R
dot = numpy.dot(PD, R) # dot product of reference vector and the principal axis of the V matrix
#print 'dot (dot of PD and R)', dot
if dot < -1:
dot = -1
elif dot > 1:
dot = 1
if numpy.arccos(dot) > old_div(numpy.pi, 2.):
#print 'numpy.arccos(dot) {} > numpy.pi / 2. {}'.format(numpy.arccos(dot), numpy.pi / 2)
#print 'correcting PD direction'
PD = -1. * numpy.array(PD)
#print 'PD after get PD direction', PD
return PD | python | def get_PD_direction(X1_prime, X2_prime, X3_prime, PD):
"""takes arrays of X1_prime, X2_prime, X3_prime, and the PD.
checks that the PD vector direction is correct"""
n = len(X1_prime) - 1
X1 = X1_prime[0] - X1_prime[n]
X2 = X2_prime[0] - X2_prime[n]
X3 = X3_prime[0] - X3_prime[n]
R= numpy.array([X1, X2, X3])
#print 'R (reference vector for PD direction)', R
dot = numpy.dot(PD, R) # dot product of reference vector and the principal axis of the V matrix
#print 'dot (dot of PD and R)', dot
if dot < -1:
dot = -1
elif dot > 1:
dot = 1
if numpy.arccos(dot) > old_div(numpy.pi, 2.):
#print 'numpy.arccos(dot) {} > numpy.pi / 2. {}'.format(numpy.arccos(dot), numpy.pi / 2)
#print 'correcting PD direction'
PD = -1. * numpy.array(PD)
#print 'PD after get PD direction', PD
return PD | takes arrays of X1_prime, X2_prime, X3_prime, and the PD.
checks that the PD vector direction is correct | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/lib/lib_directional_statistics.py#L81-L101 |
PmagPy/PmagPy | SPD/lib/lib_directional_statistics.py | get_MAD | def get_MAD(tau):
"""
input: eigenvalues of PCA matrix
output: Maximum Angular Deviation
"""
# tau is ordered so that tau[0] > tau[1] > tau[2]
for t in tau:
if isinstance(t, complex):
return -999
MAD = math.degrees(numpy.arctan(numpy.sqrt(old_div((tau[1] + tau[2]), tau[0]))) )
return MAD | python | def get_MAD(tau):
"""
input: eigenvalues of PCA matrix
output: Maximum Angular Deviation
"""
# tau is ordered so that tau[0] > tau[1] > tau[2]
for t in tau:
if isinstance(t, complex):
return -999
MAD = math.degrees(numpy.arctan(numpy.sqrt(old_div((tau[1] + tau[2]), tau[0]))) )
return MAD | input: eigenvalues of PCA matrix
output: Maximum Angular Deviation | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/lib/lib_directional_statistics.py#L140-L150 |
PmagPy/PmagPy | SPD/lib/lib_directional_statistics.py | dir2cart | def dir2cart(d): # from pmag.py
"""converts list or array of vector directions, in degrees, to array of cartesian coordinates, in x,y,z form """
ints = numpy.ones(len(d)).transpose() # get an array of ones to plug into dec,inc pairs
d = numpy.array(d)
rad = old_div(numpy.pi, 180.)
if len(d.shape) > 1: # array of vectors
decs, incs = d[:,0] * rad, d[:,1] * rad
if d.shape[1] == 3: ints = d[:,2] # take the given lengths
else: # single vector
decs, incs = numpy.array(d[0]) * rad, numpy.array(d[1]) * rad
if len(d) == 3:
ints = numpy.array(d[2])
else:
ints = numpy.array([1.])
cart = numpy.array([ints * numpy.cos(decs) * numpy.cos(incs),
ints * numpy.sin(decs) * numpy.cos(incs),
ints * numpy.sin(incs)
]).transpose()
return cart | python | def dir2cart(d): # from pmag.py
"""converts list or array of vector directions, in degrees, to array of cartesian coordinates, in x,y,z form """
ints = numpy.ones(len(d)).transpose() # get an array of ones to plug into dec,inc pairs
d = numpy.array(d)
rad = old_div(numpy.pi, 180.)
if len(d.shape) > 1: # array of vectors
decs, incs = d[:,0] * rad, d[:,1] * rad
if d.shape[1] == 3: ints = d[:,2] # take the given lengths
else: # single vector
decs, incs = numpy.array(d[0]) * rad, numpy.array(d[1]) * rad
if len(d) == 3:
ints = numpy.array(d[2])
else:
ints = numpy.array([1.])
cart = numpy.array([ints * numpy.cos(decs) * numpy.cos(incs),
ints * numpy.sin(decs) * numpy.cos(incs),
ints * numpy.sin(incs)
]).transpose()
return cart | converts list or array of vector directions, in degrees, to array of cartesian coordinates, in x,y,z form | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/lib/lib_directional_statistics.py#L152-L170 |
PmagPy/PmagPy | SPD/lib/lib_directional_statistics.py | pmag_angle | def pmag_angle(D1,D2): # use this
"""
finds the angle between lists of two directions D1,D2
"""
D1 = numpy.array(D1)
if len(D1.shape) > 1:
D1 = D1[:,0:2] # strip off intensity
else: D1 = D1[:2]
D2 = numpy.array(D2)
if len(D2.shape) > 1:
D2 = D2[:,0:2] # strip off intensity
else: D2 = D2[:2]
X1 = dir2cart(D1) # convert to cartesian from polar
X2 = dir2cart(D2)
angles = [] # set up a list for angles
for k in range(X1.shape[0]): # single vector
angle = numpy.arccos(numpy.dot(X1[k],X2[k]))*180./numpy.pi # take the dot product
angle = angle%360.
angles.append(angle)
return numpy.array(angles) | python | def pmag_angle(D1,D2): # use this
"""
finds the angle between lists of two directions D1,D2
"""
D1 = numpy.array(D1)
if len(D1.shape) > 1:
D1 = D1[:,0:2] # strip off intensity
else: D1 = D1[:2]
D2 = numpy.array(D2)
if len(D2.shape) > 1:
D2 = D2[:,0:2] # strip off intensity
else: D2 = D2[:2]
X1 = dir2cart(D1) # convert to cartesian from polar
X2 = dir2cart(D2)
angles = [] # set up a list for angles
for k in range(X1.shape[0]): # single vector
angle = numpy.arccos(numpy.dot(X1[k],X2[k]))*180./numpy.pi # take the dot product
angle = angle%360.
angles.append(angle)
return numpy.array(angles) | finds the angle between lists of two directions D1,D2 | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/lib/lib_directional_statistics.py#L172-L191 |
PmagPy/PmagPy | SPD/lib/lib_directional_statistics.py | new_get_angle_diff | def new_get_angle_diff(v1,v2):
"""returns angular difference in degrees between two vectors. may be more precise in certain cases. see SPD"""
v1 = numpy.array(v1)
v2 = numpy.array(v2)
angle = numpy.arctan2(numpy.linalg.norm(numpy.cross(v1, v2)), numpy.dot(v1, v2))
return math.degrees(angle) | python | def new_get_angle_diff(v1,v2):
"""returns angular difference in degrees between two vectors. may be more precise in certain cases. see SPD"""
v1 = numpy.array(v1)
v2 = numpy.array(v2)
angle = numpy.arctan2(numpy.linalg.norm(numpy.cross(v1, v2)), numpy.dot(v1, v2))
return math.degrees(angle) | returns angular difference in degrees between two vectors. may be more precise in certain cases. see SPD | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/lib/lib_directional_statistics.py#L193-L198 |
PmagPy/PmagPy | SPD/lib/lib_directional_statistics.py | get_angle_difference | def get_angle_difference(v1, v2):
"""returns angular difference in degrees between two vectors. takes in cartesian coordinates."""
v1 = numpy.array(v1)
v2 = numpy.array(v2)
angle=numpy.arccos(old_div((numpy.dot(v1, v2) ), (numpy.sqrt(math.fsum(v1**2)) * numpy.sqrt(math.fsum(v2**2)))))
return math.degrees(angle) | python | def get_angle_difference(v1, v2):
"""returns angular difference in degrees between two vectors. takes in cartesian coordinates."""
v1 = numpy.array(v1)
v2 = numpy.array(v2)
angle=numpy.arccos(old_div((numpy.dot(v1, v2) ), (numpy.sqrt(math.fsum(v1**2)) * numpy.sqrt(math.fsum(v2**2)))))
return math.degrees(angle) | returns angular difference in degrees between two vectors. takes in cartesian coordinates. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/lib/lib_directional_statistics.py#L201-L206 |
PmagPy/PmagPy | SPD/lib/lib_directional_statistics.py | get_ptrms_angle | def get_ptrms_angle(ptrms_best_fit_vector, B_lab_vector):
"""
gives angle between principal direction of the ptrm data and the b_lab vector. this is NOT in SPD, but taken from Ron Shaar's old thellier_gui.py code. see PmagPy on github
"""
ptrms_angle = math.degrees(math.acos(old_div(numpy.dot(ptrms_best_fit_vector,B_lab_vector),(numpy.sqrt(sum(ptrms_best_fit_vector**2)) * numpy.sqrt(sum(B_lab_vector**2)))))) # from old thellier_gui.py code
return ptrms_angle | python | def get_ptrms_angle(ptrms_best_fit_vector, B_lab_vector):
"""
gives angle between principal direction of the ptrm data and the b_lab vector. this is NOT in SPD, but taken from Ron Shaar's old thellier_gui.py code. see PmagPy on github
"""
ptrms_angle = math.degrees(math.acos(old_div(numpy.dot(ptrms_best_fit_vector,B_lab_vector),(numpy.sqrt(sum(ptrms_best_fit_vector**2)) * numpy.sqrt(sum(B_lab_vector**2)))))) # from old thellier_gui.py code
return ptrms_angle | gives angle between principal direction of the ptrm data and the b_lab vector. this is NOT in SPD, but taken from Ron Shaar's old thellier_gui.py code. see PmagPy on github | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/lib/lib_directional_statistics.py#L236-L241 |
PmagPy/PmagPy | programs/remove_bad_chars.py | main | def main():
"""
Take out dos problem characters from any file
"""
filename = pmag.get_named_arg('-f')
if not filename:
return
with open(filename, 'rb+') as f:
content = f.read()
f.seek(0)
f.write(content.replace(b'\r', b''))
f.truncate() | python | def main():
"""
Take out dos problem characters from any file
"""
filename = pmag.get_named_arg('-f')
if not filename:
return
with open(filename, 'rb+') as f:
content = f.read()
f.seek(0)
f.write(content.replace(b'\r', b''))
f.truncate() | Take out dos problem characters from any file | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/remove_bad_chars.py#L5-L16 |
PmagPy/PmagPy | dialogs/pmag_menu_dialogs.py | add_thellier_gui_criteria | def add_thellier_gui_criteria(acceptance_criteria):
'''criteria used only in thellier gui
these criteria are not written to pmag_criteria.txt
'''
category="thellier_gui"
for crit in ['sample_int_n_outlier_check','site_int_n_outlier_check']:
acceptance_criteria[crit]={}
acceptance_criteria[crit]['category']=category
acceptance_criteria[crit]['criterion_name']=crit
acceptance_criteria[crit]['value']=-999
acceptance_criteria[crit]['threshold_type']="low"
acceptance_criteria[crit]['decimal_points']=0
for crit in ['sample_int_interval_uT','sample_int_interval_perc',\
'site_int_interval_uT','site_int_interval_perc',\
'sample_int_BS_68_uT','sample_int_BS_95_uT','sample_int_BS_68_perc','sample_int_BS_95_perc','specimen_int_max_slope_diff']:
acceptance_criteria[crit]={}
acceptance_criteria[crit]['category']=category
acceptance_criteria[crit]['criterion_name']=crit
acceptance_criteria[crit]['value']=-999
acceptance_criteria[crit]['threshold_type']="high"
if crit in ['specimen_int_max_slope_diff']:
acceptance_criteria[crit]['decimal_points']=-999
else:
acceptance_criteria[crit]['decimal_points']=1
acceptance_criteria[crit]['comments']="thellier_gui_only"
for crit in ['average_by_sample_or_site','interpreter_method']:
acceptance_criteria[crit]={}
acceptance_criteria[crit]['category']=category
acceptance_criteria[crit]['criterion_name']=crit
if crit in ['average_by_sample_or_site']:
acceptance_criteria[crit]['value']='sample'
if crit in ['interpreter_method']:
acceptance_criteria[crit]['value']='stdev_opt'
acceptance_criteria[crit]['threshold_type']="flag"
acceptance_criteria[crit]['decimal_points']=-999
for crit in ['include_nrm']:
acceptance_criteria[crit]={}
acceptance_criteria[crit]['category']=category
acceptance_criteria[crit]['criterion_name']=crit
acceptance_criteria[crit]['value']=True
acceptance_criteria[crit]['threshold_type']="bool"
acceptance_criteria[crit]['decimal_points']=-999 | python | def add_thellier_gui_criteria(acceptance_criteria):
'''criteria used only in thellier gui
these criteria are not written to pmag_criteria.txt
'''
category="thellier_gui"
for crit in ['sample_int_n_outlier_check','site_int_n_outlier_check']:
acceptance_criteria[crit]={}
acceptance_criteria[crit]['category']=category
acceptance_criteria[crit]['criterion_name']=crit
acceptance_criteria[crit]['value']=-999
acceptance_criteria[crit]['threshold_type']="low"
acceptance_criteria[crit]['decimal_points']=0
for crit in ['sample_int_interval_uT','sample_int_interval_perc',\
'site_int_interval_uT','site_int_interval_perc',\
'sample_int_BS_68_uT','sample_int_BS_95_uT','sample_int_BS_68_perc','sample_int_BS_95_perc','specimen_int_max_slope_diff']:
acceptance_criteria[crit]={}
acceptance_criteria[crit]['category']=category
acceptance_criteria[crit]['criterion_name']=crit
acceptance_criteria[crit]['value']=-999
acceptance_criteria[crit]['threshold_type']="high"
if crit in ['specimen_int_max_slope_diff']:
acceptance_criteria[crit]['decimal_points']=-999
else:
acceptance_criteria[crit]['decimal_points']=1
acceptance_criteria[crit]['comments']="thellier_gui_only"
for crit in ['average_by_sample_or_site','interpreter_method']:
acceptance_criteria[crit]={}
acceptance_criteria[crit]['category']=category
acceptance_criteria[crit]['criterion_name']=crit
if crit in ['average_by_sample_or_site']:
acceptance_criteria[crit]['value']='sample'
if crit in ['interpreter_method']:
acceptance_criteria[crit]['value']='stdev_opt'
acceptance_criteria[crit]['threshold_type']="flag"
acceptance_criteria[crit]['decimal_points']=-999
for crit in ['include_nrm']:
acceptance_criteria[crit]={}
acceptance_criteria[crit]['category']=category
acceptance_criteria[crit]['criterion_name']=crit
acceptance_criteria[crit]['value']=True
acceptance_criteria[crit]['threshold_type']="bool"
acceptance_criteria[crit]['decimal_points']=-999 | criteria used only in thellier gui
these criteria are not written to pmag_criteria.txt | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_menu_dialogs.py#L1435-L1479 |
PmagPy/PmagPy | dialogs/pmag_menu_dialogs.py | Core_depthplot.on_okButton | def on_okButton(self, event):
"""
meas_file # -f magic_measurements_file
samp_file #-fsa er_samples_file
age_file # -fa er_ages_file
depth_scale # -ds scale
dmin, dmax # -d 1 50 # depth to plot
timescale, amin, amax (also sets pTS, pcol, width) = # -ts scale min max
sym, size # -sym symbol size
method, step (also may set suc_key) # -LP protocol step
pltDec (also sets pcol, pel, width)# -D (don't plot dec)
pltInc (also sets pcol, pel, width)# -I (don't plot inc)
pltMag (also sets pcol, pel, width)# -M (don't plot intensity)
logit # -log ( plot log scale)
fmt # -fmt format
"""
def check_input_dir_path(input_dir_path, new_dir_path):
if input_dir_path and input_dir_path != new_dir_path:
pw.simple_warning("Please make sure that all input files come from the same directory")
return False
if not input_dir_path and new_dir_path:
return new_dir_path
elif input_dir_path == new_dir_path:
return input_dir_path
wait = wx.BusyInfo('Making plots, please wait...')
wx.SafeYield()
os.chdir(self.WD)
input_dir_path = None
meas_file = self.bSizer0.return_value()
if meas_file:
input_dir_path, meas_file = os.path.split(meas_file)
pmag_spec_file = self.bSizer0a.return_value()
if pmag_spec_file:
new_dir_path, pmag_spec_file = os.path.split(pmag_spec_file)
input_dir_path = check_input_dir_path(input_dir_path, new_dir_path)
if not input_dir_path:
del wait
return False
sum_file = self.bSizer2.return_value()
if sum_file:
new_dir_path, sum_file = os.path.split(sum_file)
input_dir_path = check_input_dir_path(input_dir_path, new_dir_path)
if not input_dir_path:
del wait
return False
spec_sym, spec_sym_shape, spec_sym_color, spec_sym_size = "", "", "", ""
if pmag_spec_file:
# get symbol/size for dots
spec_sym_shape = self.shape_choices_dict[self.bSizer0a2.return_value()]
spec_sym_color = self.bSizer0a1.return_value()[0]
spec_sym_size = self.bSizer0a3.return_value()
spec_sym = str(spec_sym_color) + str(spec_sym_shape)
use_sampfile = self.bSizer1a.return_value()
if use_sampfile:
new_dir_path, samp_file = os.path.split(str(self.bSizer1.return_value()))
age_file = ''
input_dir_path = check_input_dir_path(input_dir_path, new_dir_path)
if not input_dir_path:
del wait
return False
else:
samp_file = ''
new_dir_path, age_file = os.path.split(self.bSizer1.return_value())
input_dir_path = check_input_dir_path(input_dir_path, new_dir_path)
if not input_dir_path:
del wait
return False
depth_scale = self.bSizer8.return_value()
if age_file:
depth_scale='age'
elif depth_scale:
depth_scale = 'sample_core_depth' #'mbsf'
else:
depth_scale = 'sample_composite_depth' #'mcd'
dmin = self.bSizer6.return_value()
dmax = self.bSizer7.return_value()
if self.bSizer9.return_value(): # if plot GPTS is checked
pltTime = 1
timescale = self.bSizer10.return_value()
amin = self.bSizer11.return_value()
amax = self.bSizer12.return_value()
if not amin or not amax:
del wait
pw.simple_warning("If plotting timescale, you must provide both a lower and an upper bound.\nIf you don't want to plot timescale, uncheck the 'Plot GPTS' checkbox")
return False
else: # if plot GPTS is not checked
pltTime, timescale, amin, amax = 0, '', -1, -1
sym_shape = self.shape_choices_dict[self.bSizer5.return_value()]
sym_color = self.bSizer4.return_value()[0]
sym = sym_color + sym_shape
size = self.bSizer5a.return_value()
pltLine = self.bSizer5b.return_value()
if pltLine:
pltLine = 1
else:
pltLine = 0
method = str(self.bSizer13.return_value())
step = self.bSizer14.return_value()
if not step:
step = 0
method = 'LT-NO'
#if not step:
# #-LP [AF,T,ARM,IRM, X] step [in mT,C,mT,mT, mass/vol] to plot
# units_dict = {'AF': 'millitesla', 'T': 'degrees C', 'ARM': 'millitesla', 'IRM': 'millitesla', 'X': 'mass/vol'}
#unit = units_dict[method]
#pw.simple_warning("You must provide the experiment step in {}".format(unit))
#return False
pltDec, pltInc, pltMag, logit = 0, 0, 0, 0
for val in self.bSizer3.return_value():
if 'declination' in val:
pltDec = 1
if 'inclination' in val:
pltInc = 1
if 'magnetization' in val:
pltMag = 1
if 'log' in val:
logit = 1
#pltSus = self.bSizer15.return_value()
#if pltSus:
# pltSus = 0
#else:
# pltSus = 1
fmt = self.bSizer16.return_value()
#print "meas_file", meas_file, "pmag_spec_file", pmag_spec_file, "spec_sym_shape", spec_sym_shape, "spec_sym_color", spec_sym_color, "spec_sym_size", spec_sym_size, "samp_file", samp_file, "age_file", age_file, "depth_scale", depth_scale, "dmin", dmin, "dmax", dmax, "timescale", timescale, "amin", amin, "amax", amax, "sym", sym, "size", size, "method", method, "step", step, "pltDec", pltDec, "pltInc", pltInc, "pltMag", pltMag, "pltTime", pltTime, "logit", logit, "fmt", fmt
# for use as module:
#print "pltLine:", pltLine
#print "pltSus:", pltSus
fig, figname = ipmag.core_depthplot(input_dir_path or self.WD, meas_file, pmag_spec_file, samp_file, age_file, sum_file, '', depth_scale, dmin, dmax, sym, size, spec_sym, spec_sym_size, method, step, fmt, pltDec, pltInc, pltMag, pltLine, 1, logit, pltTime, timescale, amin, amax)
if fig:
self.Destroy()
dpi = fig.get_dpi()
pixel_width = dpi * fig.get_figwidth()
pixel_height = dpi * fig.get_figheight()
plot_frame = PlotFrame((pixel_width, pixel_height + 50), fig, figname)
del wait
return plot_frame
else:
del wait
pw.simple_warning("No data points met your criteria - try again\nError message: {}".format(figname))
return False
# for use as command_line:
if meas_file:
meas_file = os.path.split(meas_file)[1]
meas_file = pmag.add_flag(meas_file, '-f')
if pmag_spec_file:
pmag_spec_file = os.path.split(pmag_spec_file)[1]
pmag_spec_file = pmag.add_flag(pmag_spec_file, '-fsp')
pmag_spec_file = pmag_spec_file + ' ' + spec_sym_color + spec_sym_shape + ' ' + str(spec_sym_size)
sym = '-sym ' + sym + ' ' + str(size)
if samp_file:
samp_file = os.path.split(samp_file)[1]
samp_file = pmag.add_flag(samp_file, '-fsa')
if age_file:
age_file = os.path.split(age_file)[1]
age_file = pmag.add_flag(age_file, '-fa')
depth_scale = pmag.add_flag(depth_scale, '-ds')
depth_range = ''
if dmin and dmax:
depth_range = '-d ' + str(dmin) + ' ' + str(dmax)
if pltTime and amin and amax:
timescale = '-ts ' + timescale + ' ' + str(amin) + ' ' + str(amax)
else:
timescale = ''
method = pmag.add_flag(method, '-LP') + ' ' + str(step)
#if not pltSus:
# pltSus = "-L"
#else:
# pltSus = ''
if not pltDec:
pltDec = "-D"
else:
pltDec = ''
if not pltInc:
pltInc = "-I"
else:
pltInc = ''
if not pltMag:
pltMag = "-M"
else:
pltMag = ''
if pltLine:
pltLine = ""
else:
pltLine = '-L' # suppress line
if logit:
logit = "-log"
else:
logit = ''
fmt = pmag.add_flag(fmt, '-fmt')
COMMAND = "core_depthplot.py {meas_file} {pmag_spec_file} {sym} {samp_file} {age_file} {depth_scale} {depth_range} {timescale} {method} {pltDec} {pltInc} {pltMag} {logit} {fmt} {pltLine} -WD {WD}".format(meas_file=meas_file, pmag_spec_file=pmag_spec_file, sym=sym, samp_file=samp_file, age_file=age_file, depth_scale=depth_scale, depth_range=depth_range, timescale=timescale, method=method, pltDec=pltDec, pltInc=pltInc, pltMag=pltMag, logit=logit, fmt=fmt, pltLine=pltLine, WD=self.WD)
print(COMMAND)
#os.system(COMMAND)
"""
haven't done these options yet
wt_file (also sets norm)# -n specimen_filename
spc_file, spc_sym, spc_size # -fsp spec_file symbol_shape symbol_size
res_file, res_sym, res_size # -fres pmag_results_file symbol_shape symbol_size
wig_file (also sets pcol, width) # -fwig wiggle_file(???)
sum_file # -fsum IODP_core_summary_csv_file
(sets plots & verbose) # -sav
""" | python | def on_okButton(self, event):
"""
meas_file # -f magic_measurements_file
samp_file #-fsa er_samples_file
age_file # -fa er_ages_file
depth_scale # -ds scale
dmin, dmax # -d 1 50 # depth to plot
timescale, amin, amax (also sets pTS, pcol, width) = # -ts scale min max
sym, size # -sym symbol size
method, step (also may set suc_key) # -LP protocol step
pltDec (also sets pcol, pel, width)# -D (don't plot dec)
pltInc (also sets pcol, pel, width)# -I (don't plot inc)
pltMag (also sets pcol, pel, width)# -M (don't plot intensity)
logit # -log ( plot log scale)
fmt # -fmt format
"""
def check_input_dir_path(input_dir_path, new_dir_path):
if input_dir_path and input_dir_path != new_dir_path:
pw.simple_warning("Please make sure that all input files come from the same directory")
return False
if not input_dir_path and new_dir_path:
return new_dir_path
elif input_dir_path == new_dir_path:
return input_dir_path
wait = wx.BusyInfo('Making plots, please wait...')
wx.SafeYield()
os.chdir(self.WD)
input_dir_path = None
meas_file = self.bSizer0.return_value()
if meas_file:
input_dir_path, meas_file = os.path.split(meas_file)
pmag_spec_file = self.bSizer0a.return_value()
if pmag_spec_file:
new_dir_path, pmag_spec_file = os.path.split(pmag_spec_file)
input_dir_path = check_input_dir_path(input_dir_path, new_dir_path)
if not input_dir_path:
del wait
return False
sum_file = self.bSizer2.return_value()
if sum_file:
new_dir_path, sum_file = os.path.split(sum_file)
input_dir_path = check_input_dir_path(input_dir_path, new_dir_path)
if not input_dir_path:
del wait
return False
spec_sym, spec_sym_shape, spec_sym_color, spec_sym_size = "", "", "", ""
if pmag_spec_file:
# get symbol/size for dots
spec_sym_shape = self.shape_choices_dict[self.bSizer0a2.return_value()]
spec_sym_color = self.bSizer0a1.return_value()[0]
spec_sym_size = self.bSizer0a3.return_value()
spec_sym = str(spec_sym_color) + str(spec_sym_shape)
use_sampfile = self.bSizer1a.return_value()
if use_sampfile:
new_dir_path, samp_file = os.path.split(str(self.bSizer1.return_value()))
age_file = ''
input_dir_path = check_input_dir_path(input_dir_path, new_dir_path)
if not input_dir_path:
del wait
return False
else:
samp_file = ''
new_dir_path, age_file = os.path.split(self.bSizer1.return_value())
input_dir_path = check_input_dir_path(input_dir_path, new_dir_path)
if not input_dir_path:
del wait
return False
depth_scale = self.bSizer8.return_value()
if age_file:
depth_scale='age'
elif depth_scale:
depth_scale = 'sample_core_depth' #'mbsf'
else:
depth_scale = 'sample_composite_depth' #'mcd'
dmin = self.bSizer6.return_value()
dmax = self.bSizer7.return_value()
if self.bSizer9.return_value(): # if plot GPTS is checked
pltTime = 1
timescale = self.bSizer10.return_value()
amin = self.bSizer11.return_value()
amax = self.bSizer12.return_value()
if not amin or not amax:
del wait
pw.simple_warning("If plotting timescale, you must provide both a lower and an upper bound.\nIf you don't want to plot timescale, uncheck the 'Plot GPTS' checkbox")
return False
else: # if plot GPTS is not checked
pltTime, timescale, amin, amax = 0, '', -1, -1
sym_shape = self.shape_choices_dict[self.bSizer5.return_value()]
sym_color = self.bSizer4.return_value()[0]
sym = sym_color + sym_shape
size = self.bSizer5a.return_value()
pltLine = self.bSizer5b.return_value()
if pltLine:
pltLine = 1
else:
pltLine = 0
method = str(self.bSizer13.return_value())
step = self.bSizer14.return_value()
if not step:
step = 0
method = 'LT-NO'
#if not step:
# #-LP [AF,T,ARM,IRM, X] step [in mT,C,mT,mT, mass/vol] to plot
# units_dict = {'AF': 'millitesla', 'T': 'degrees C', 'ARM': 'millitesla', 'IRM': 'millitesla', 'X': 'mass/vol'}
#unit = units_dict[method]
#pw.simple_warning("You must provide the experiment step in {}".format(unit))
#return False
pltDec, pltInc, pltMag, logit = 0, 0, 0, 0
for val in self.bSizer3.return_value():
if 'declination' in val:
pltDec = 1
if 'inclination' in val:
pltInc = 1
if 'magnetization' in val:
pltMag = 1
if 'log' in val:
logit = 1
#pltSus = self.bSizer15.return_value()
#if pltSus:
# pltSus = 0
#else:
# pltSus = 1
fmt = self.bSizer16.return_value()
#print "meas_file", meas_file, "pmag_spec_file", pmag_spec_file, "spec_sym_shape", spec_sym_shape, "spec_sym_color", spec_sym_color, "spec_sym_size", spec_sym_size, "samp_file", samp_file, "age_file", age_file, "depth_scale", depth_scale, "dmin", dmin, "dmax", dmax, "timescale", timescale, "amin", amin, "amax", amax, "sym", sym, "size", size, "method", method, "step", step, "pltDec", pltDec, "pltInc", pltInc, "pltMag", pltMag, "pltTime", pltTime, "logit", logit, "fmt", fmt
# for use as module:
#print "pltLine:", pltLine
#print "pltSus:", pltSus
fig, figname = ipmag.core_depthplot(input_dir_path or self.WD, meas_file, pmag_spec_file, samp_file, age_file, sum_file, '', depth_scale, dmin, dmax, sym, size, spec_sym, spec_sym_size, method, step, fmt, pltDec, pltInc, pltMag, pltLine, 1, logit, pltTime, timescale, amin, amax)
if fig:
self.Destroy()
dpi = fig.get_dpi()
pixel_width = dpi * fig.get_figwidth()
pixel_height = dpi * fig.get_figheight()
plot_frame = PlotFrame((pixel_width, pixel_height + 50), fig, figname)
del wait
return plot_frame
else:
del wait
pw.simple_warning("No data points met your criteria - try again\nError message: {}".format(figname))
return False
# for use as command_line:
if meas_file:
meas_file = os.path.split(meas_file)[1]
meas_file = pmag.add_flag(meas_file, '-f')
if pmag_spec_file:
pmag_spec_file = os.path.split(pmag_spec_file)[1]
pmag_spec_file = pmag.add_flag(pmag_spec_file, '-fsp')
pmag_spec_file = pmag_spec_file + ' ' + spec_sym_color + spec_sym_shape + ' ' + str(spec_sym_size)
sym = '-sym ' + sym + ' ' + str(size)
if samp_file:
samp_file = os.path.split(samp_file)[1]
samp_file = pmag.add_flag(samp_file, '-fsa')
if age_file:
age_file = os.path.split(age_file)[1]
age_file = pmag.add_flag(age_file, '-fa')
depth_scale = pmag.add_flag(depth_scale, '-ds')
depth_range = ''
if dmin and dmax:
depth_range = '-d ' + str(dmin) + ' ' + str(dmax)
if pltTime and amin and amax:
timescale = '-ts ' + timescale + ' ' + str(amin) + ' ' + str(amax)
else:
timescale = ''
method = pmag.add_flag(method, '-LP') + ' ' + str(step)
#if not pltSus:
# pltSus = "-L"
#else:
# pltSus = ''
if not pltDec:
pltDec = "-D"
else:
pltDec = ''
if not pltInc:
pltInc = "-I"
else:
pltInc = ''
if not pltMag:
pltMag = "-M"
else:
pltMag = ''
if pltLine:
pltLine = ""
else:
pltLine = '-L' # suppress line
if logit:
logit = "-log"
else:
logit = ''
fmt = pmag.add_flag(fmt, '-fmt')
COMMAND = "core_depthplot.py {meas_file} {pmag_spec_file} {sym} {samp_file} {age_file} {depth_scale} {depth_range} {timescale} {method} {pltDec} {pltInc} {pltMag} {logit} {fmt} {pltLine} -WD {WD}".format(meas_file=meas_file, pmag_spec_file=pmag_spec_file, sym=sym, samp_file=samp_file, age_file=age_file, depth_scale=depth_scale, depth_range=depth_range, timescale=timescale, method=method, pltDec=pltDec, pltInc=pltInc, pltMag=pltMag, logit=logit, fmt=fmt, pltLine=pltLine, WD=self.WD)
print(COMMAND)
#os.system(COMMAND)
"""
haven't done these options yet
wt_file (also sets norm)# -n specimen_filename
spc_file, spc_sym, spc_size # -fsp spec_file symbol_shape symbol_size
res_file, res_sym, res_size # -fres pmag_results_file symbol_shape symbol_size
wig_file (also sets pcol, width) # -fwig wiggle_file(???)
sum_file # -fsum IODP_core_summary_csv_file
(sets plots & verbose) # -sav
""" | meas_file # -f magic_measurements_file
samp_file #-fsa er_samples_file
age_file # -fa er_ages_file
depth_scale # -ds scale
dmin, dmax # -d 1 50 # depth to plot
timescale, amin, amax (also sets pTS, pcol, width) = # -ts scale min max
sym, size # -sym symbol size
method, step (also may set suc_key) # -LP protocol step
pltDec (also sets pcol, pel, width)# -D (don't plot dec)
pltInc (also sets pcol, pel, width)# -I (don't plot inc)
pltMag (also sets pcol, pel, width)# -M (don't plot intensity)
logit # -log ( plot log scale)
fmt # -fmt format | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_menu_dialogs.py#L1877-L2093 |
PmagPy/PmagPy | programs/deprecated/odp_srm_magic.py | main | def main():
"""
NAME
odp_srm_magic.py
DESCRIPTION
converts ODP measurement format files to magic_measurements format files
SYNTAX
odp_srm_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-F FILE: specify output measurements file, default is magic_measurements.txt
-Fsa FILE: specify output er_sample.txt file, default is er_sample.txt
-A : don't average replicate measurements
INPUT
put data from a single core into a directory. depths will be below core top
"""
#
#
version_num=pmag.get_version()
meas_file='magic_measurements.txt'
samp_file='er_samples.txt'
ErSpecs,ErSamps,ErSites,ErLocs,ErCits=[],[],[],[],[]
MagRecs=[]
citation="This study"
dir_path,demag='.','NRM'
args=sys.argv
noave=0,
if '-WD' in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if "-A" in args: noave=1
if '-F' in args:
ind=args.index("-F")
meas_file=args[ind+1]
if '-Fsa' in args:
ind=args.index("-Fsa")
samp_file=args[ind+1]
if '-LP' in args:
ind=args.index("-LP")
codelist=args[ind+1]
codes=codelist.split(':')
if "AF" in codes:
demag='AF'
if'-dc' not in args: methcode="LT-AF-Z"
if'-dc' in args: methcode="LT-AF-I"
if "T" in codes:
demag="T"
if '-dc' not in args: methcode="LT-T-Z"
if '-dc' in args: methcode="LT-T-I"
if "I" in codes:
methcode="LP-IRM"
if "S" in codes:
demag="S"
methcode="LP-PI-TRM:LP-PI-ALT-AFARM"
trm_labfield=labfield
ans=input("DC lab field for ARM step: [50uT] ")
if ans=="":
arm_labfield=50e-6
else:
arm_labfield=float(ans)*1e-6
ans=input("temperature for total trm step: [600 C] ")
if ans=="":
trm_peakT=600+273 # convert to kelvin
else:
trm_peakT=float(ans)+273 # convert to kelvin
if "G" in codes: methcode="LT-AF-G"
if "D" in codes: methcode="LT-AF-D"
if "TRM" in codes:
demag="T"
trm=1
if demag=="T" and "ANI" in codes:
methcode="LP-AN-TRM"
if demag=="AF" and "ANI" in codes:
methcode="LP-AN-ARM"
if labfield==0: labfield=50e-6
if peakfield==0: peakfield=.180
samp_file=dir_path+'/'+samp_file
meas_file=dir_path+'/'+meas_file
filelist=os.listdir(dir_path) # read in list of files to import
specimens,samples,sites=[],[],[]
MagRecs,SpecRecs,SampRecs=[],[],[]
for file in filelist: # parse each file
if file[-3:].lower()=='srm':
print('processing: ',file)
Nfo=file.split('_')[0].split('-')
try:
sect=int(Nfo[3][:-1])
except:
sect=1
input=open(file,'r').readlines()
MagRec,SpecRec,SampRec={},{},{}
alt_spec,treatment_type,treatment_value,user="","","",""
inst="ODP-SRM"
SampRec['sample_azimuth']='0'
SampRec['sample_dip']='0'
SampRec['magic_method_code']='FS-C-DRILL-IODP:SP-SS-C'
MagRec['er_analyst_mail_names']=user
MagRec['magic_method_codes']='LT-NO'
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["treatment_ac_field"]='0'
MagRec["treatment_dc_field"]='0'
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
MagRec["measurement_flag"]='g' # assume all data are "good"
MagRec["measurement_standard"]='u' # assume all data are "good"
MagRec["measurement_csd"]='' # set csd to blank
SpecRec['er_specimen_alternatives']=alt_spec
vol=7e-6 # assume 7 cc samples
datestamp=input[1].split() # date time is second line of file
mmddyy=datestamp[0].split('/') # break into month day year
date=mmddyy[2]+':'+mmddyy[0]+":"+mmddyy[1] +':' +datestamp[1]
MagRec["measurement_date"]=date
treatment_value,inst="","ODP-SRM"
k=0
while 1:
fields= input[k].replace('\n','').split("=")
if 'treatment_type' in fields[0]:
if "Alternating Frequency Demagnetization" in fields[1]:
MagRec['magic_method_codes'] = 'LT-AF-Z'
inst=inst+':ODP-DTECH' # measured on shipboard AF DTECH D2000
if "treatment_value" in fields[0]:
value=fields[1]
if value!=" ":
treatment_value=float(value)*1e-3
MagRec["treatment_ac_field"]='%8.3e'%(treatment_value) # AF demag in treat mT => T
if 'user' in fields[0]:
user=fields[-1]
MagRec["er_analyst_mail_names"]=user
MagRec["measurement_standard"]='u' # assume all data are "good"
if 'sample_area' in fields[0]: vol=float(fields[1])*1e-6 # takes volume (cc) and converts to m^3
if 'run_number' in fields[0]:
MagRec['external_database_ids']=fields[1] # run number is the LIMS measurement number
MagRec['external_database_names']='LIMS'
k+=1
if input[k][0:7]=='<MULTI>':
break
while 1:
k+=1
line = input[k]
if line[0:5]=='<RAW>':
break
treatment_value=""
rec=line.replace('\n','').split(',') # list of data
if len(rec)>2:
MeasRec,SampRec={},{'core_depth':'0','er_sample_name':'0','er_site_name':'0','er_location_name':'location'}
for key in list(MagRec.keys()):MeasRec[key]=MagRec[key]
for item in rec:
items=item.split('=')
if 'demag_level' in items[0]:
treat= float(items[1])
if treat!=0:
MeasRec['magic_method_codes']='LT-AF-Z'
inst=inst+':ODP-SRM-AF'
MeasRec["treatment_ac_field"]='%8.3e'%(treat*1e-3) # AF demag in treat mT => T
if 'inclination_w_tray_w_bkgrd' in items[0]: MeasRec['measurement_inc']=items[1]
if 'declination_w_tray_w_bkgrd' in items[0]: MeasRec['measurement_dec']=items[1]
if 'intensity_w_tray_w_bkgrd' in items[0]: MeasRec['measurement_magn_moment']='%8.3e'%(float(items[1])*vol) # convert intensity from A/m to Am^2 using vol
MeasRec['magic_instrument_codes']=inst
if 'offset' in items[0]:
depth='%7.3f'%(float(sect-1)*1.5+float(items[1]))
SampRec['core_depth']=depth
MeasRec['er_specimen_name']=depth
MeasRec['er_sample_name']=depth
MeasRec['er_site_name']=depth
MeasRec['er_location_name']='location'
SampRec['er_sample_name']=depth
SampRec['er_site_name']=depth
SampRec['er_location_name']='location'
MeasRec['measurement_number']='1'
SampRecs.append(SampRec)
MagRecs.append(MeasRec)
pmag.magic_write(samp_file,SampRecs,'er_samples')
print('samples stored in ',samp_file)
Fixed=pmag.measurements_methods(MagRecs,noave)
pmag.magic_write(meas_file,Fixed,'magic_measurements')
print('data stored in ',meas_file) | python | def main():
"""
NAME
odp_srm_magic.py
DESCRIPTION
converts ODP measurement format files to magic_measurements format files
SYNTAX
odp_srm_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-F FILE: specify output measurements file, default is magic_measurements.txt
-Fsa FILE: specify output er_sample.txt file, default is er_sample.txt
-A : don't average replicate measurements
INPUT
put data from a single core into a directory. depths will be below core top
"""
#
#
version_num=pmag.get_version()
meas_file='magic_measurements.txt'
samp_file='er_samples.txt'
ErSpecs,ErSamps,ErSites,ErLocs,ErCits=[],[],[],[],[]
MagRecs=[]
citation="This study"
dir_path,demag='.','NRM'
args=sys.argv
noave=0,
if '-WD' in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if "-A" in args: noave=1
if '-F' in args:
ind=args.index("-F")
meas_file=args[ind+1]
if '-Fsa' in args:
ind=args.index("-Fsa")
samp_file=args[ind+1]
if '-LP' in args:
ind=args.index("-LP")
codelist=args[ind+1]
codes=codelist.split(':')
if "AF" in codes:
demag='AF'
if'-dc' not in args: methcode="LT-AF-Z"
if'-dc' in args: methcode="LT-AF-I"
if "T" in codes:
demag="T"
if '-dc' not in args: methcode="LT-T-Z"
if '-dc' in args: methcode="LT-T-I"
if "I" in codes:
methcode="LP-IRM"
if "S" in codes:
demag="S"
methcode="LP-PI-TRM:LP-PI-ALT-AFARM"
trm_labfield=labfield
ans=input("DC lab field for ARM step: [50uT] ")
if ans=="":
arm_labfield=50e-6
else:
arm_labfield=float(ans)*1e-6
ans=input("temperature for total trm step: [600 C] ")
if ans=="":
trm_peakT=600+273 # convert to kelvin
else:
trm_peakT=float(ans)+273 # convert to kelvin
if "G" in codes: methcode="LT-AF-G"
if "D" in codes: methcode="LT-AF-D"
if "TRM" in codes:
demag="T"
trm=1
if demag=="T" and "ANI" in codes:
methcode="LP-AN-TRM"
if demag=="AF" and "ANI" in codes:
methcode="LP-AN-ARM"
if labfield==0: labfield=50e-6
if peakfield==0: peakfield=.180
samp_file=dir_path+'/'+samp_file
meas_file=dir_path+'/'+meas_file
filelist=os.listdir(dir_path) # read in list of files to import
specimens,samples,sites=[],[],[]
MagRecs,SpecRecs,SampRecs=[],[],[]
for file in filelist: # parse each file
if file[-3:].lower()=='srm':
print('processing: ',file)
Nfo=file.split('_')[0].split('-')
try:
sect=int(Nfo[3][:-1])
except:
sect=1
input=open(file,'r').readlines()
MagRec,SpecRec,SampRec={},{},{}
alt_spec,treatment_type,treatment_value,user="","","",""
inst="ODP-SRM"
SampRec['sample_azimuth']='0'
SampRec['sample_dip']='0'
SampRec['magic_method_code']='FS-C-DRILL-IODP:SP-SS-C'
MagRec['er_analyst_mail_names']=user
MagRec['magic_method_codes']='LT-NO'
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["treatment_ac_field"]='0'
MagRec["treatment_dc_field"]='0'
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
MagRec["measurement_flag"]='g' # assume all data are "good"
MagRec["measurement_standard"]='u' # assume all data are "good"
MagRec["measurement_csd"]='' # set csd to blank
SpecRec['er_specimen_alternatives']=alt_spec
vol=7e-6 # assume 7 cc samples
datestamp=input[1].split() # date time is second line of file
mmddyy=datestamp[0].split('/') # break into month day year
date=mmddyy[2]+':'+mmddyy[0]+":"+mmddyy[1] +':' +datestamp[1]
MagRec["measurement_date"]=date
treatment_value,inst="","ODP-SRM"
k=0
while 1:
fields= input[k].replace('\n','').split("=")
if 'treatment_type' in fields[0]:
if "Alternating Frequency Demagnetization" in fields[1]:
MagRec['magic_method_codes'] = 'LT-AF-Z'
inst=inst+':ODP-DTECH' # measured on shipboard AF DTECH D2000
if "treatment_value" in fields[0]:
value=fields[1]
if value!=" ":
treatment_value=float(value)*1e-3
MagRec["treatment_ac_field"]='%8.3e'%(treatment_value) # AF demag in treat mT => T
if 'user' in fields[0]:
user=fields[-1]
MagRec["er_analyst_mail_names"]=user
MagRec["measurement_standard"]='u' # assume all data are "good"
if 'sample_area' in fields[0]: vol=float(fields[1])*1e-6 # takes volume (cc) and converts to m^3
if 'run_number' in fields[0]:
MagRec['external_database_ids']=fields[1] # run number is the LIMS measurement number
MagRec['external_database_names']='LIMS'
k+=1
if input[k][0:7]=='<MULTI>':
break
while 1:
k+=1
line = input[k]
if line[0:5]=='<RAW>':
break
treatment_value=""
rec=line.replace('\n','').split(',') # list of data
if len(rec)>2:
MeasRec,SampRec={},{'core_depth':'0','er_sample_name':'0','er_site_name':'0','er_location_name':'location'}
for key in list(MagRec.keys()):MeasRec[key]=MagRec[key]
for item in rec:
items=item.split('=')
if 'demag_level' in items[0]:
treat= float(items[1])
if treat!=0:
MeasRec['magic_method_codes']='LT-AF-Z'
inst=inst+':ODP-SRM-AF'
MeasRec["treatment_ac_field"]='%8.3e'%(treat*1e-3) # AF demag in treat mT => T
if 'inclination_w_tray_w_bkgrd' in items[0]: MeasRec['measurement_inc']=items[1]
if 'declination_w_tray_w_bkgrd' in items[0]: MeasRec['measurement_dec']=items[1]
if 'intensity_w_tray_w_bkgrd' in items[0]: MeasRec['measurement_magn_moment']='%8.3e'%(float(items[1])*vol) # convert intensity from A/m to Am^2 using vol
MeasRec['magic_instrument_codes']=inst
if 'offset' in items[0]:
depth='%7.3f'%(float(sect-1)*1.5+float(items[1]))
SampRec['core_depth']=depth
MeasRec['er_specimen_name']=depth
MeasRec['er_sample_name']=depth
MeasRec['er_site_name']=depth
MeasRec['er_location_name']='location'
SampRec['er_sample_name']=depth
SampRec['er_site_name']=depth
SampRec['er_location_name']='location'
MeasRec['measurement_number']='1'
SampRecs.append(SampRec)
MagRecs.append(MeasRec)
pmag.magic_write(samp_file,SampRecs,'er_samples')
print('samples stored in ',samp_file)
Fixed=pmag.measurements_methods(MagRecs,noave)
pmag.magic_write(meas_file,Fixed,'magic_measurements')
print('data stored in ',meas_file) | NAME
odp_srm_magic.py
DESCRIPTION
converts ODP measurement format files to magic_measurements format files
SYNTAX
odp_srm_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-F FILE: specify output measurements file, default is magic_measurements.txt
-Fsa FILE: specify output er_sample.txt file, default is er_sample.txt
-A : don't average replicate measurements
INPUT
put data from a single core into a directory. depths will be below core top | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/deprecated/odp_srm_magic.py#L8-L192 |
PmagPy/PmagPy | programs/conversion_scripts/kly4s_magic.py | main | def main():
"""
NAME
kly4s_magic.py
DESCRIPTION
converts files generated by SIO kly4S labview program to MagIC formated
files for use with PmagPy plotting software
SYNTAX
kly4s_magic.py -h [command line options]
OPTIONS
-h: prints the help message and quits
-f FILE: specify .ams input file name
-fad AZDIP: specify AZDIP file with orientations, will create er_samples.txt file
-fsa SFILE: specify existing er_samples.txt file with orientation information
-fsp SPFILE: specify existing er_specimens.txt file for appending
-F MFILE: specify magic_measurements output file
-Fa AFILE: specify rmag_anisotropy output file
-ocn ORCON: specify orientation convention: default is #3 below -only with AZDIP file
-usr USER: specify who made the measurements
-loc LOC: specify location name for study
-ins INST: specify instrument used
-spc SPEC: specify number of characters to specify specimen from sample
-ncn NCON: specify naming convention: default is #1 below
DEFAULTS
MFILE: magic_measurements.txt
AFILE: rmag_anisotropy.txt
SPFILE: create new er_specimens.txt file
USER: ""
LOC: "unknown"
INST: "SIO-KLY4S"
SPEC: 1 specimen name is same as sample (if SPEC is 1, sample is all but last character)
NOTES:
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXXYYY: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail [email protected] for help.
Orientation convention:
[1] Lab arrow azimuth= azimuth; Lab arrow dip=-dip
i.e., dip is degrees from vertical down - the hade [default]
[2] Lab arrow azimuth = azimuth-90; Lab arrow dip = -dip
i.e., azimuth is strike and dip is hade
[3] Lab arrow azimuth = azimuth; Lab arrow dip = dip-90
e.g. dip is degrees from horizontal of drill direction
[4] Lab arrow azimuth = azimuth; Lab arrow dip = dip
[5] Lab arrow azimuth = azimuth; Lab arrow dip = 90-dip
[6] all others you will have to either customize your
self or e-mail [email protected] for help.
"""
args = sys.argv
if '-h' in args:
print(main.__doc__)
sys.exit()
dataframe = extractor.command_line_dataframe([['f', True, ''], ['fad', False, ''],
['fsa', False, ''], ['fsp', False, ''],
['Fsp', False, 'specimens.txt'], ['F', False, 'measurements.txt'],
['Fa', False, 'rmag_anisotropy.txt'], ['ocn', False, '3'],
['usr', False, ''], ['loc', False, ''],
['ins', False, 'SIO-KLY4S'], ['spc', False, 0],
['ncn', False, '1'], ['WD', False, '.'],
['ID', False, '.'], ['DM', False, 3 ]])
checked_args = extractor.extract_and_check_args(args, dataframe)
infile, azdip_infile, samp_infile, spec_infile, spec_outfile, measfile, aniso_outfile, or_con, user, locname, inst, specnum, samp_con, output_dir_path, input_dir_path, data_model_num = extractor.get_vars(['f', 'fad', 'fsa', 'fsp', 'Fsp', 'F', 'Fa', 'ocn', 'usr', 'loc', 'ins', 'spc', 'ncn', 'WD', 'ID', 'DM'], checked_args)
convert.kly4s(infile, specnum=specnum, locname=locname, inst=inst,
user=user, measfile=measfile,or_con=or_con,
samp_con=samp_con, aniso_outfile=aniso_outfile,
samp_infile=samp_infile, spec_infile=spec_infile,
spec_outfile=spec_outfile, azdip_infile=azdip_infile,
dir_path=output_dir_path, input_dir_path=input_dir_path,
data_model_num=data_model_num) | python | def main():
"""
NAME
kly4s_magic.py
DESCRIPTION
converts files generated by SIO kly4S labview program to MagIC formated
files for use with PmagPy plotting software
SYNTAX
kly4s_magic.py -h [command line options]
OPTIONS
-h: prints the help message and quits
-f FILE: specify .ams input file name
-fad AZDIP: specify AZDIP file with orientations, will create er_samples.txt file
-fsa SFILE: specify existing er_samples.txt file with orientation information
-fsp SPFILE: specify existing er_specimens.txt file for appending
-F MFILE: specify magic_measurements output file
-Fa AFILE: specify rmag_anisotropy output file
-ocn ORCON: specify orientation convention: default is #3 below -only with AZDIP file
-usr USER: specify who made the measurements
-loc LOC: specify location name for study
-ins INST: specify instrument used
-spc SPEC: specify number of characters to specify specimen from sample
-ncn NCON: specify naming convention: default is #1 below
DEFAULTS
MFILE: magic_measurements.txt
AFILE: rmag_anisotropy.txt
SPFILE: create new er_specimens.txt file
USER: ""
LOC: "unknown"
INST: "SIO-KLY4S"
SPEC: 1 specimen name is same as sample (if SPEC is 1, sample is all but last character)
NOTES:
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXXYYY: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail [email protected] for help.
Orientation convention:
[1] Lab arrow azimuth= azimuth; Lab arrow dip=-dip
i.e., dip is degrees from vertical down - the hade [default]
[2] Lab arrow azimuth = azimuth-90; Lab arrow dip = -dip
i.e., azimuth is strike and dip is hade
[3] Lab arrow azimuth = azimuth; Lab arrow dip = dip-90
e.g. dip is degrees from horizontal of drill direction
[4] Lab arrow azimuth = azimuth; Lab arrow dip = dip
[5] Lab arrow azimuth = azimuth; Lab arrow dip = 90-dip
[6] all others you will have to either customize your
self or e-mail [email protected] for help.
"""
args = sys.argv
if '-h' in args:
print(main.__doc__)
sys.exit()
dataframe = extractor.command_line_dataframe([['f', True, ''], ['fad', False, ''],
['fsa', False, ''], ['fsp', False, ''],
['Fsp', False, 'specimens.txt'], ['F', False, 'measurements.txt'],
['Fa', False, 'rmag_anisotropy.txt'], ['ocn', False, '3'],
['usr', False, ''], ['loc', False, ''],
['ins', False, 'SIO-KLY4S'], ['spc', False, 0],
['ncn', False, '1'], ['WD', False, '.'],
['ID', False, '.'], ['DM', False, 3 ]])
checked_args = extractor.extract_and_check_args(args, dataframe)
infile, azdip_infile, samp_infile, spec_infile, spec_outfile, measfile, aniso_outfile, or_con, user, locname, inst, specnum, samp_con, output_dir_path, input_dir_path, data_model_num = extractor.get_vars(['f', 'fad', 'fsa', 'fsp', 'Fsp', 'F', 'Fa', 'ocn', 'usr', 'loc', 'ins', 'spc', 'ncn', 'WD', 'ID', 'DM'], checked_args)
convert.kly4s(infile, specnum=specnum, locname=locname, inst=inst,
user=user, measfile=measfile,or_con=or_con,
samp_con=samp_con, aniso_outfile=aniso_outfile,
samp_infile=samp_infile, spec_infile=spec_infile,
spec_outfile=spec_outfile, azdip_infile=azdip_infile,
dir_path=output_dir_path, input_dir_path=input_dir_path,
data_model_num=data_model_num) | NAME
kly4s_magic.py
DESCRIPTION
converts files generated by SIO kly4S labview program to MagIC formated
files for use with PmagPy plotting software
SYNTAX
kly4s_magic.py -h [command line options]
OPTIONS
-h: prints the help message and quits
-f FILE: specify .ams input file name
-fad AZDIP: specify AZDIP file with orientations, will create er_samples.txt file
-fsa SFILE: specify existing er_samples.txt file with orientation information
-fsp SPFILE: specify existing er_specimens.txt file for appending
-F MFILE: specify magic_measurements output file
-Fa AFILE: specify rmag_anisotropy output file
-ocn ORCON: specify orientation convention: default is #3 below -only with AZDIP file
-usr USER: specify who made the measurements
-loc LOC: specify location name for study
-ins INST: specify instrument used
-spc SPEC: specify number of characters to specify specimen from sample
-ncn NCON: specify naming convention: default is #1 below
DEFAULTS
MFILE: magic_measurements.txt
AFILE: rmag_anisotropy.txt
SPFILE: create new er_specimens.txt file
USER: ""
LOC: "unknown"
INST: "SIO-KLY4S"
SPEC: 1 specimen name is same as sample (if SPEC is 1, sample is all but last character)
NOTES:
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXXYYY: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail [email protected] for help.
Orientation convention:
[1] Lab arrow azimuth= azimuth; Lab arrow dip=-dip
i.e., dip is degrees from vertical down - the hade [default]
[2] Lab arrow azimuth = azimuth-90; Lab arrow dip = -dip
i.e., azimuth is strike and dip is hade
[3] Lab arrow azimuth = azimuth; Lab arrow dip = dip-90
e.g. dip is degrees from horizontal of drill direction
[4] Lab arrow azimuth = azimuth; Lab arrow dip = dip
[5] Lab arrow azimuth = azimuth; Lab arrow dip = 90-dip
[6] all others you will have to either customize your
self or e-mail [email protected] for help. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/conversion_scripts/kly4s_magic.py#L6-L89 |
PmagPy/PmagPy | programs/plotxy.py | main | def main():
"""
NAME
plotXY.py
DESCRIPTION
Makes simple X,Y plots
INPUT FORMAT
X,Y data in columns
SYNTAX
plotxy.py [command line options]
OPTIONS
-h prints this help message
-f FILE to set file name on command line
-c col1 col2 specify columns to plot
-xsig col3 specify xsigma if desired
-ysig col4 specify xsigma if desired
-b xmin xmax ymin ymax, sets bounds
-sym SYM SIZE specify symbol to plot: default is red dots, 10 pt
-S don't plot the symbols
-xlab XLAB
-ylab YLAB
-l connect symbols with lines
-fmt [svg,png,pdf,eps] specify output format, default is svg
-sav saves plot and quits
-poly X plot a degree X polynomial through the data
-skip n Number of lines to skip before reading in data
"""
fmt,plot='svg',0
col1,col2=0,1
sym,size = 'ro',50
xlab,ylab='',''
lines=0
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-sav' in sys.argv:plot=1
if '-c' in sys.argv:
ind=sys.argv.index('-c')
col1=int(sys.argv[ind+1])-1
col2=int(sys.argv[ind+2])-1
if '-xsig' in sys.argv:
ind=sys.argv.index('-xsig')
col3=int(sys.argv[ind+1])-1
if '-ysig' in sys.argv:
ind=sys.argv.index('-ysig')
col4=int(sys.argv[ind+1])-1
if '-xlab' in sys.argv:
ind=sys.argv.index('-xlab')
xlab=sys.argv[ind+1]
if '-ylab' in sys.argv:
ind=sys.argv.index('-ylab')
ylab=sys.argv[ind+1]
if '-b' in sys.argv:
ind=sys.argv.index('-b')
xmin=float(sys.argv[ind+1])
xmax=float(sys.argv[ind+2])
ymin=float(sys.argv[ind+3])
ymax=float(sys.argv[ind+4])
if '-poly' in sys.argv:
ind=sys.argv.index('-poly')
degr=sys.argv[ind+1]
if '-sym' in sys.argv:
ind=sys.argv.index('-sym')
sym=sys.argv[ind+1]
size=int(sys.argv[ind+2])
if '-l' in sys.argv: lines=1
if '-S' in sys.argv: sym=''
skip = int(pmag.get_named_arg('-skip', default_val=0))
X,Y=[],[]
Xerrs,Yerrs=[],[]
f=open(file,'r')
for num in range(skip):
f.readline()
data=f.readlines()
for line in data:
line.replace('\n','')
line.replace('\t',' ')
rec=line.split()
X.append(float(rec[col1]))
Y.append(float(rec[col2]))
if '-xsig' in sys.argv:Xerrs.append(float(rec[col3]))
if '-ysig' in sys.argv:Yerrs.append(float(rec[col4]))
if '-poly' in sys.argv:
pylab.plot(xs,ys)
coeffs=numpy.polyfit(X,Y,degr)
correl=numpy.corrcoef(X,Y)**2
polynomial=numpy.poly1d(coeffs)
xs=numpy.linspace(numpy.min(X),numpy.max(X),10)
ys=polynomial(xs)
pylab.plot(xs,ys)
print(polynomial)
if degr=='1': print('R-square value =', '%5.4f'%(correl[0,1]))
if sym!='':
pylab.scatter(X,Y,marker=sym[1],c=sym[0],s=size)
else:
pylab.plot(X,Y)
if '-xsig' in sys.argv and '-ysig' in sys.argv:
pylab.errorbar(X,Y,xerr=Xerrs,yerr=Yerrs,fmt=None)
if '-xsig' in sys.argv and '-ysig' not in sys.argv:
pylab.errorbar(X,Y,xerr=Xerrs,fmt=None)
if '-xsig' not in sys.argv and '-ysig' in sys.argv:
pylab.errorbar(X,Y,yerr=Yerrs,fmt=None)
if xlab!='':pylab.xlabel(xlab)
if ylab!='':pylab.ylabel(ylab)
if lines==1:pylab.plot(X,Y,'k-')
if '-b' in sys.argv:pylab.axis([xmin,xmax,ymin,ymax])
if plot==0:
pylab.show()
else:
pylab.savefig('plotXY.'+fmt)
print('Figure saved as ','plotXY.'+fmt)
sys.exit() | python | def main():
"""
NAME
plotXY.py
DESCRIPTION
Makes simple X,Y plots
INPUT FORMAT
X,Y data in columns
SYNTAX
plotxy.py [command line options]
OPTIONS
-h prints this help message
-f FILE to set file name on command line
-c col1 col2 specify columns to plot
-xsig col3 specify xsigma if desired
-ysig col4 specify xsigma if desired
-b xmin xmax ymin ymax, sets bounds
-sym SYM SIZE specify symbol to plot: default is red dots, 10 pt
-S don't plot the symbols
-xlab XLAB
-ylab YLAB
-l connect symbols with lines
-fmt [svg,png,pdf,eps] specify output format, default is svg
-sav saves plot and quits
-poly X plot a degree X polynomial through the data
-skip n Number of lines to skip before reading in data
"""
fmt,plot='svg',0
col1,col2=0,1
sym,size = 'ro',50
xlab,ylab='',''
lines=0
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-sav' in sys.argv:plot=1
if '-c' in sys.argv:
ind=sys.argv.index('-c')
col1=int(sys.argv[ind+1])-1
col2=int(sys.argv[ind+2])-1
if '-xsig' in sys.argv:
ind=sys.argv.index('-xsig')
col3=int(sys.argv[ind+1])-1
if '-ysig' in sys.argv:
ind=sys.argv.index('-ysig')
col4=int(sys.argv[ind+1])-1
if '-xlab' in sys.argv:
ind=sys.argv.index('-xlab')
xlab=sys.argv[ind+1]
if '-ylab' in sys.argv:
ind=sys.argv.index('-ylab')
ylab=sys.argv[ind+1]
if '-b' in sys.argv:
ind=sys.argv.index('-b')
xmin=float(sys.argv[ind+1])
xmax=float(sys.argv[ind+2])
ymin=float(sys.argv[ind+3])
ymax=float(sys.argv[ind+4])
if '-poly' in sys.argv:
ind=sys.argv.index('-poly')
degr=sys.argv[ind+1]
if '-sym' in sys.argv:
ind=sys.argv.index('-sym')
sym=sys.argv[ind+1]
size=int(sys.argv[ind+2])
if '-l' in sys.argv: lines=1
if '-S' in sys.argv: sym=''
skip = int(pmag.get_named_arg('-skip', default_val=0))
X,Y=[],[]
Xerrs,Yerrs=[],[]
f=open(file,'r')
for num in range(skip):
f.readline()
data=f.readlines()
for line in data:
line.replace('\n','')
line.replace('\t',' ')
rec=line.split()
X.append(float(rec[col1]))
Y.append(float(rec[col2]))
if '-xsig' in sys.argv:Xerrs.append(float(rec[col3]))
if '-ysig' in sys.argv:Yerrs.append(float(rec[col4]))
if '-poly' in sys.argv:
pylab.plot(xs,ys)
coeffs=numpy.polyfit(X,Y,degr)
correl=numpy.corrcoef(X,Y)**2
polynomial=numpy.poly1d(coeffs)
xs=numpy.linspace(numpy.min(X),numpy.max(X),10)
ys=polynomial(xs)
pylab.plot(xs,ys)
print(polynomial)
if degr=='1': print('R-square value =', '%5.4f'%(correl[0,1]))
if sym!='':
pylab.scatter(X,Y,marker=sym[1],c=sym[0],s=size)
else:
pylab.plot(X,Y)
if '-xsig' in sys.argv and '-ysig' in sys.argv:
pylab.errorbar(X,Y,xerr=Xerrs,yerr=Yerrs,fmt=None)
if '-xsig' in sys.argv and '-ysig' not in sys.argv:
pylab.errorbar(X,Y,xerr=Xerrs,fmt=None)
if '-xsig' not in sys.argv and '-ysig' in sys.argv:
pylab.errorbar(X,Y,yerr=Yerrs,fmt=None)
if xlab!='':pylab.xlabel(xlab)
if ylab!='':pylab.ylabel(ylab)
if lines==1:pylab.plot(X,Y,'k-')
if '-b' in sys.argv:pylab.axis([xmin,xmax,ymin,ymax])
if plot==0:
pylab.show()
else:
pylab.savefig('plotXY.'+fmt)
print('Figure saved as ','plotXY.'+fmt)
sys.exit() | NAME
plotXY.py
DESCRIPTION
Makes simple X,Y plots
INPUT FORMAT
X,Y data in columns
SYNTAX
plotxy.py [command line options]
OPTIONS
-h prints this help message
-f FILE to set file name on command line
-c col1 col2 specify columns to plot
-xsig col3 specify xsigma if desired
-ysig col4 specify xsigma if desired
-b xmin xmax ymin ymax, sets bounds
-sym SYM SIZE specify symbol to plot: default is red dots, 10 pt
-S don't plot the symbols
-xlab XLAB
-ylab YLAB
-l connect symbols with lines
-fmt [svg,png,pdf,eps] specify output format, default is svg
-sav saves plot and quits
-poly X plot a degree X polynomial through the data
-skip n Number of lines to skip before reading in data | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/plotxy.py#L14-L135 |
PmagPy/PmagPy | programs/sort_specimens.py | main | def main():
"""
NAME
sort_specimens.py
DESCRIPTION
Reads in a pmag_specimen formatted file and separates it into different components (A,B...etc.)
SYNTAX
sort_specimens.py [-h] [command line options]
INPUT
takes pmag_specimens.txt formatted input file
OPTIONS
-h: prints help message and quits
-f FILE: specify input file, default is 'pmag_specimens.txt'
OUTPUT
makes pmag_specimen formatted files with input filename plus _X_Y
where X is the component name and Y is s,g,t for coordinate system
"""
dir_path='.'
inspec="pmag_specimens.txt"
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
inspec=sys.argv[ind+1]
basename=inspec.split('.')[:-1]
inspec=dir_path+"/"+inspec
ofile_base=dir_path+"/"+basename[0]
#
# read in data
#
prior_spec_data,file_type=pmag.magic_read(inspec)
if file_type != 'pmag_specimens':
print(file_type, " this is not a valid pmag_specimens file")
sys.exit()
# get list of specimens in file, components, coordinate systems available
specs,comps,coords=[],[],[]
for spec in prior_spec_data:
if spec['er_specimen_name'] not in specs:specs.append(spec['er_specimen_name'])
if 'specimen_comp_name' not in list(spec.keys()):spec['specimen_comp_name']='A'
if 'specimen_tilt_correction' not in list(spec.keys()):spec['tilt_correction']='-1' # assume specimen coordinates
if spec['specimen_comp_name'] not in comps:comps.append(spec['specimen_comp_name'])
if spec['specimen_tilt_correction'] not in coords:coords.append(spec['specimen_tilt_correction'])
# work on separating out components, coordinate systems by specimen
for coord in coords:
print(coord)
for comp in comps:
print(comp)
speclist=[]
for spec in prior_spec_data:
if spec['specimen_tilt_correction']==coord and spec['specimen_comp_name']==comp:speclist.append(spec)
ofile=ofile_base+'_'+coord+'_'+comp+'.txt'
pmag.magic_write(ofile,speclist,'pmag_specimens')
print('coordinate system: ',coord,' component name: ',comp,' saved in ',ofile) | python | def main():
"""
NAME
sort_specimens.py
DESCRIPTION
Reads in a pmag_specimen formatted file and separates it into different components (A,B...etc.)
SYNTAX
sort_specimens.py [-h] [command line options]
INPUT
takes pmag_specimens.txt formatted input file
OPTIONS
-h: prints help message and quits
-f FILE: specify input file, default is 'pmag_specimens.txt'
OUTPUT
makes pmag_specimen formatted files with input filename plus _X_Y
where X is the component name and Y is s,g,t for coordinate system
"""
dir_path='.'
inspec="pmag_specimens.txt"
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
inspec=sys.argv[ind+1]
basename=inspec.split('.')[:-1]
inspec=dir_path+"/"+inspec
ofile_base=dir_path+"/"+basename[0]
#
# read in data
#
prior_spec_data,file_type=pmag.magic_read(inspec)
if file_type != 'pmag_specimens':
print(file_type, " this is not a valid pmag_specimens file")
sys.exit()
# get list of specimens in file, components, coordinate systems available
specs,comps,coords=[],[],[]
for spec in prior_spec_data:
if spec['er_specimen_name'] not in specs:specs.append(spec['er_specimen_name'])
if 'specimen_comp_name' not in list(spec.keys()):spec['specimen_comp_name']='A'
if 'specimen_tilt_correction' not in list(spec.keys()):spec['tilt_correction']='-1' # assume specimen coordinates
if spec['specimen_comp_name'] not in comps:comps.append(spec['specimen_comp_name'])
if spec['specimen_tilt_correction'] not in coords:coords.append(spec['specimen_tilt_correction'])
# work on separating out components, coordinate systems by specimen
for coord in coords:
print(coord)
for comp in comps:
print(comp)
speclist=[]
for spec in prior_spec_data:
if spec['specimen_tilt_correction']==coord and spec['specimen_comp_name']==comp:speclist.append(spec)
ofile=ofile_base+'_'+coord+'_'+comp+'.txt'
pmag.magic_write(ofile,speclist,'pmag_specimens')
print('coordinate system: ',coord,' component name: ',comp,' saved in ',ofile) | NAME
sort_specimens.py
DESCRIPTION
Reads in a pmag_specimen formatted file and separates it into different components (A,B...etc.)
SYNTAX
sort_specimens.py [-h] [command line options]
INPUT
takes pmag_specimens.txt formatted input file
OPTIONS
-h: prints help message and quits
-f FILE: specify input file, default is 'pmag_specimens.txt'
OUTPUT
makes pmag_specimen formatted files with input filename plus _X_Y
where X is the component name and Y is s,g,t for coordinate system | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/sort_specimens.py#L6-L67 |
PmagPy/PmagPy | programs/conversion_scripts/livdb_magic.py | convert_livdb_files_to_MagIC.create_menu | def create_menu(self):
""" Create menu
"""
self.menubar = wx.MenuBar()
menu_about = wx.Menu()
menu_help = menu_about.Append(-1, "&Some notes", "")
self.Bind(wx.EVT_MENU, self.on_menu_help, menu_help)
self.menubar.Append(menu_about, "& Instructions")
self.SetMenuBar(self.menubar) | python | def create_menu(self):
""" Create menu
"""
self.menubar = wx.MenuBar()
menu_about = wx.Menu()
menu_help = menu_about.Append(-1, "&Some notes", "")
self.Bind(wx.EVT_MENU, self.on_menu_help, menu_help)
self.menubar.Append(menu_about, "& Instructions")
self.SetMenuBar(self.menubar) | Create menu | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/conversion_scripts/livdb_magic.py#L48-L59 |
PmagPy/PmagPy | dialogs/pmag_er_magic_dialogs.py | ErMagicCheckFrame3.InitSpecCheck | def InitSpecCheck(self):
"""
make an interactive grid in which users can edit specimen names
as well as which sample a specimen belongs to
"""
#wait = wx.BusyInfo("Please wait, working...")
#wx.SafeYield()
self.contribution.propagate_lithology_cols()
spec_df = self.contribution.tables['specimens'].df
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'specimens', 'specimens', self.panel,
main_frame=self.main_frame)
# redefine default 'save & exit grid' button to go to next dialog instead
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON,
lambda event: self.onContinue(event, grid, self.InitSampCheck),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.backButton.Disable()
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, self.min_size)
# center
self.grid_frame.Centre()
return | python | def InitSpecCheck(self):
"""
make an interactive grid in which users can edit specimen names
as well as which sample a specimen belongs to
"""
#wait = wx.BusyInfo("Please wait, working...")
#wx.SafeYield()
self.contribution.propagate_lithology_cols()
spec_df = self.contribution.tables['specimens'].df
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'specimens', 'specimens', self.panel,
main_frame=self.main_frame)
# redefine default 'save & exit grid' button to go to next dialog instead
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON,
lambda event: self.onContinue(event, grid, self.InitSampCheck),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.backButton.Disable()
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, self.min_size)
# center
self.grid_frame.Centre()
return | make an interactive grid in which users can edit specimen names
as well as which sample a specimen belongs to | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L36-L65 |
PmagPy/PmagPy | dialogs/pmag_er_magic_dialogs.py | ErMagicCheckFrame3.InitSiteCheck | def InitSiteCheck(self):
"""
make an interactive grid in which users can edit site names
as well as which location a site belongs to
"""
# propagate average lat/lon info from samples table if
# available in samples and missing in sites
self.contribution.propagate_average_up(cols=['lat', 'lon', 'height'],
target_df_name='sites',
source_df_name='samples')
# propagate lithology columns
self.contribution.propagate_lithology_cols()
site_df = self.contribution.tables['sites'].df
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'sites', 'sites', self.panel,
main_frame=self.main_frame)
# redefine default 'save & exit grid' button to go to next dialog instead
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON,
lambda event: self.onContinue(event, grid, self.InitLocCheck),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, self.InitSampCheck),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, self.min_size)
# center
self.grid_frame.Centre()
return | python | def InitSiteCheck(self):
"""
make an interactive grid in which users can edit site names
as well as which location a site belongs to
"""
# propagate average lat/lon info from samples table if
# available in samples and missing in sites
self.contribution.propagate_average_up(cols=['lat', 'lon', 'height'],
target_df_name='sites',
source_df_name='samples')
# propagate lithology columns
self.contribution.propagate_lithology_cols()
site_df = self.contribution.tables['sites'].df
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'sites', 'sites', self.panel,
main_frame=self.main_frame)
# redefine default 'save & exit grid' button to go to next dialog instead
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON,
lambda event: self.onContinue(event, grid, self.InitLocCheck),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, self.InitSampCheck),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, self.min_size)
# center
self.grid_frame.Centre()
return | make an interactive grid in which users can edit site names
as well as which location a site belongs to | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L103-L138 |
PmagPy/PmagPy | dialogs/pmag_er_magic_dialogs.py | ErMagicCheckFrame3.InitLocCheck | def InitLocCheck(self):
"""
make an interactive grid in which users can edit locations
"""
# if there is a location without a name, name it 'unknown'
self.contribution.rename_item('locations', 'nan', 'unknown')
# propagate lat/lon values from sites table
self.contribution.get_min_max_lat_lon()
# propagate lithologies & geologic classes from sites table
self.contribution.propagate_cols_up(['lithologies',
'geologic_classes'], 'locations', 'sites')
res = self.contribution.propagate_min_max_up()
if cb.not_null(res):
self.contribution.propagate_cols_up(['age_unit'], 'locations', 'sites')
# set up frame
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'locations', 'locations', self.panel,
main_frame=self.main_frame)
# redefine default 'save & exit grid' button to go to next dialog instead
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON,
lambda event: self.onContinue(event, grid, self.InitAgeCheck),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, self.InitSiteCheck),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, min_size=self.min_size)
# center
self.grid_frame.Centre()
return | python | def InitLocCheck(self):
"""
make an interactive grid in which users can edit locations
"""
# if there is a location without a name, name it 'unknown'
self.contribution.rename_item('locations', 'nan', 'unknown')
# propagate lat/lon values from sites table
self.contribution.get_min_max_lat_lon()
# propagate lithologies & geologic classes from sites table
self.contribution.propagate_cols_up(['lithologies',
'geologic_classes'], 'locations', 'sites')
res = self.contribution.propagate_min_max_up()
if cb.not_null(res):
self.contribution.propagate_cols_up(['age_unit'], 'locations', 'sites')
# set up frame
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'locations', 'locations', self.panel,
main_frame=self.main_frame)
# redefine default 'save & exit grid' button to go to next dialog instead
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON,
lambda event: self.onContinue(event, grid, self.InitAgeCheck),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, self.InitSiteCheck),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, min_size=self.min_size)
# center
self.grid_frame.Centre()
return | make an interactive grid in which users can edit locations | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L141-L178 |
PmagPy/PmagPy | dialogs/pmag_er_magic_dialogs.py | ErMagicCheckFrame3.InitAgeCheck | def InitAgeCheck(self):
"""make an interactive grid in which users can edit ages"""
age_df = self.contribution.tables['ages'].df
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'ages', 'ages', self.panel,
main_frame=self.main_frame)
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON, lambda event: self.onContinue(event, grid, None),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, self.InitLocCheck),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, self.min_size)
# center
self.grid_frame.Centre()
return | python | def InitAgeCheck(self):
"""make an interactive grid in which users can edit ages"""
age_df = self.contribution.tables['ages'].df
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'ages', 'ages', self.panel,
main_frame=self.main_frame)
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON, lambda event: self.onContinue(event, grid, None),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, self.InitLocCheck),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, self.min_size)
# center
self.grid_frame.Centre()
return | make an interactive grid in which users can edit ages | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L181-L203 |
PmagPy/PmagPy | dialogs/pmag_er_magic_dialogs.py | ErMagicCheckFrame3.onContinue | def onContinue(self, event, grid, next_dia=None):#, age_data_type='site'):
"""
Save grid data in the data object
"""
# deselect column, including remove 'EDIT ALL' label
if self.grid_frame.drop_down_menu:
self.grid_frame.drop_down_menu.clean_up()
# remove '**' and '^^' from col names
#self.remove_starred_labels(grid)
grid.remove_starred_labels()
grid.SaveEditControlValue() # locks in value in cell currently edited
grid_name = str(grid.GetName())
# save all changes to data object and write to file
self.grid_frame.grid_builder.save_grid_data()
# check that all required data are present
validation_errors = self.validate(grid)
if validation_errors:
warn_string = ""
for error_name, error_cols in list(validation_errors.items()):
if error_cols:
warn_string += "You have {}: {}.\n\n".format(error_name, ", ".join(error_cols))
warn_string += "Are you sure you want to continue?"
result = pw.warning_with_override(warn_string)
if result == wx.ID_YES:
pass
else:
return False
else:
wx.MessageBox('Saved!', 'Info',
style=wx.OK | wx.ICON_INFORMATION)
self.panel.Destroy()
if next_dia:
next_dia()
else:
# propagate any type/lithology/class data from sites to samples table
# will only overwrite if sample values are blank or "Not Specified"
self.contribution.propagate_lithology_cols()
wx.MessageBox('Done!', 'Info',
style=wx.OK | wx.ICON_INFORMATION) | python | def onContinue(self, event, grid, next_dia=None):#, age_data_type='site'):
"""
Save grid data in the data object
"""
# deselect column, including remove 'EDIT ALL' label
if self.grid_frame.drop_down_menu:
self.grid_frame.drop_down_menu.clean_up()
# remove '**' and '^^' from col names
#self.remove_starred_labels(grid)
grid.remove_starred_labels()
grid.SaveEditControlValue() # locks in value in cell currently edited
grid_name = str(grid.GetName())
# save all changes to data object and write to file
self.grid_frame.grid_builder.save_grid_data()
# check that all required data are present
validation_errors = self.validate(grid)
if validation_errors:
warn_string = ""
for error_name, error_cols in list(validation_errors.items()):
if error_cols:
warn_string += "You have {}: {}.\n\n".format(error_name, ", ".join(error_cols))
warn_string += "Are you sure you want to continue?"
result = pw.warning_with_override(warn_string)
if result == wx.ID_YES:
pass
else:
return False
else:
wx.MessageBox('Saved!', 'Info',
style=wx.OK | wx.ICON_INFORMATION)
self.panel.Destroy()
if next_dia:
next_dia()
else:
# propagate any type/lithology/class data from sites to samples table
# will only overwrite if sample values are blank or "Not Specified"
self.contribution.propagate_lithology_cols()
wx.MessageBox('Done!', 'Info',
style=wx.OK | wx.ICON_INFORMATION) | Save grid data in the data object | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L209-L252 |
PmagPy/PmagPy | dialogs/pmag_er_magic_dialogs.py | ErMagicCheckFrame3.validate | def validate(self, grid):
"""
Using the MagIC data model, generate validation errors on a MagicGrid.
Parameters
----------
grid : dialogs.magic_grid3.MagicGrid
The MagicGrid to be validated
Returns
---------
warnings: dict
Empty dict if no warnings, otherwise a dict with format {name of problem: [problem_columns]}
"""
grid_name = str(grid.GetName())
dmodel = self.contribution.dmodel
reqd_headers = dmodel.get_reqd_headers(grid_name)
df = self.contribution.tables[grid_name].df
df = df.replace('', np.nan) # python does not view empty strings as null
if df.empty:
return {}
col_names = set(df.columns)
missing_headers = set(reqd_headers) - col_names
present_headers = set(reqd_headers) - set(missing_headers)
non_null_headers = df.dropna(how='all', axis='columns').columns
null_reqd_headers = present_headers - set(non_null_headers)
if any(missing_headers) or any (null_reqd_headers):
warnings = {'missing required column(s)': sorted(missing_headers),
'no data in required column(s)': sorted(null_reqd_headers)}
else:
warnings = {}
return warnings | python | def validate(self, grid):
"""
Using the MagIC data model, generate validation errors on a MagicGrid.
Parameters
----------
grid : dialogs.magic_grid3.MagicGrid
The MagicGrid to be validated
Returns
---------
warnings: dict
Empty dict if no warnings, otherwise a dict with format {name of problem: [problem_columns]}
"""
grid_name = str(grid.GetName())
dmodel = self.contribution.dmodel
reqd_headers = dmodel.get_reqd_headers(grid_name)
df = self.contribution.tables[grid_name].df
df = df.replace('', np.nan) # python does not view empty strings as null
if df.empty:
return {}
col_names = set(df.columns)
missing_headers = set(reqd_headers) - col_names
present_headers = set(reqd_headers) - set(missing_headers)
non_null_headers = df.dropna(how='all', axis='columns').columns
null_reqd_headers = present_headers - set(non_null_headers)
if any(missing_headers) or any (null_reqd_headers):
warnings = {'missing required column(s)': sorted(missing_headers),
'no data in required column(s)': sorted(null_reqd_headers)}
else:
warnings = {}
return warnings | Using the MagIC data model, generate validation errors on a MagicGrid.
Parameters
----------
grid : dialogs.magic_grid3.MagicGrid
The MagicGrid to be validated
Returns
---------
warnings: dict
Empty dict if no warnings, otherwise a dict with format {name of problem: [problem_columns]} | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L265-L296 |
PmagPy/PmagPy | dialogs/pmag_er_magic_dialogs.py | ErMagicCheckFrame3.on_saveButton | def on_saveButton(self, event, grid):
"""saves any editing of the grid but does not continue to the next window"""
wait = wx.BusyInfo("Please wait, working...")
wx.SafeYield()
if self.grid_frame.drop_down_menu: # unhighlight selected columns, etc.
self.grid_frame.drop_down_menu.clean_up()
# remove '**' and '^^' from col labels
starred_cols, hatted_cols = grid.remove_starred_labels()
grid.SaveEditControlValue() # locks in value in cell currently edited
grid.HideCellEditControl() # removes focus from cell that was being edited
if grid.changes:
self.onSave(grid)
for col in starred_cols:
label = grid.GetColLabelValue(col)
grid.SetColLabelValue(col, label + '**')
for col in hatted_cols:
label = grid.GetColLabelValue(col)
grid.SetColLabelValue(col, label + '^^')
del wait | python | def on_saveButton(self, event, grid):
"""saves any editing of the grid but does not continue to the next window"""
wait = wx.BusyInfo("Please wait, working...")
wx.SafeYield()
if self.grid_frame.drop_down_menu: # unhighlight selected columns, etc.
self.grid_frame.drop_down_menu.clean_up()
# remove '**' and '^^' from col labels
starred_cols, hatted_cols = grid.remove_starred_labels()
grid.SaveEditControlValue() # locks in value in cell currently edited
grid.HideCellEditControl() # removes focus from cell that was being edited
if grid.changes:
self.onSave(grid)
for col in starred_cols:
label = grid.GetColLabelValue(col)
grid.SetColLabelValue(col, label + '**')
for col in hatted_cols:
label = grid.GetColLabelValue(col)
grid.SetColLabelValue(col, label + '^^')
del wait | saves any editing of the grid but does not continue to the next window | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L299-L322 |
PmagPy/PmagPy | dialogs/pmag_er_magic_dialogs.py | ErMagicCheckFrame.InitSpecCheck | def InitSpecCheck(self):
"""make an interactive grid in which users can edit specimen names
as well as which sample a specimen belongs to"""
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
#import wx.lib.scrolledpanel as libpanel # does not work well
#self.panel = libpanel.ScrolledPanel(self, style=wx.SIMPLE_BORDER)
text = """Step 1:
Check that all specimens belong to the correct sample
(if sample name is simply wrong, that will be fixed in step 2)"""
label = wx.StaticText(self.panel, label=text)
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'specimen',
self.er_magic_data.headers, self.panel,
'sample')
self.spec_grid = self.grid_builder.make_grid(incl_pmag=False)
self.grid = self.spec_grid
self.spec_grid.InitUI()
self.grid_builder.add_data_to_grid(self.spec_grid, 'specimen', incl_pmag=False)
samples = self.er_magic_data.make_name_list(self.er_magic_data.samples)
self.drop_down_menu = drop_down_menus.Menus("specimen", self, self.spec_grid, samples)
#### Create Buttons ####
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.addSampleButton = wx.Button(self.panel, label="Add a new sample")
self.samples = [name for name in self.er_magic_data.samples]
self.Bind(wx.EVT_BUTTON, self.on_addSampleButton, self.addSampleButton)
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, "ErMagicSpecimenHelp.html"), self.helpButton)
hbox_one.Add(self.addSampleButton, flag=wx.ALIGN_LEFT|wx.RIGHT, border=10)
hbox_one.Add(self.helpButton)
#
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.spec_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.spec_grid, next_dia=self.InitSampCheck), self.continueButton)
hboxok.Add(self.saveButton, flag=wx.ALIGN_LEFT|wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.ALIGN_LEFT|wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.ALIGN_LEFT)
#
hboxgrid = pw.hbox_grid(self.panel, self.onDeleteRow, 'specimen', self.grid)
self.deleteRowButton = hboxgrid.deleteRowButton
self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
### Create Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.AddSpacer(10)
vbox.Add(label, flag=wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, border=10)
vbox.Add(hbox_one, flag=wx.TOP|wx.LEFT|wx.BOTTOM, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(hboxgrid, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(self.spec_grid, flag=wx.ALL, border=10)#|wx.EXPAND, border=30)
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
self.Hide()
self.Show() | python | def InitSpecCheck(self):
"""make an interactive grid in which users can edit specimen names
as well as which sample a specimen belongs to"""
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
#import wx.lib.scrolledpanel as libpanel # does not work well
#self.panel = libpanel.ScrolledPanel(self, style=wx.SIMPLE_BORDER)
text = """Step 1:
Check that all specimens belong to the correct sample
(if sample name is simply wrong, that will be fixed in step 2)"""
label = wx.StaticText(self.panel, label=text)
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'specimen',
self.er_magic_data.headers, self.panel,
'sample')
self.spec_grid = self.grid_builder.make_grid(incl_pmag=False)
self.grid = self.spec_grid
self.spec_grid.InitUI()
self.grid_builder.add_data_to_grid(self.spec_grid, 'specimen', incl_pmag=False)
samples = self.er_magic_data.make_name_list(self.er_magic_data.samples)
self.drop_down_menu = drop_down_menus.Menus("specimen", self, self.spec_grid, samples)
#### Create Buttons ####
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.addSampleButton = wx.Button(self.panel, label="Add a new sample")
self.samples = [name for name in self.er_magic_data.samples]
self.Bind(wx.EVT_BUTTON, self.on_addSampleButton, self.addSampleButton)
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, "ErMagicSpecimenHelp.html"), self.helpButton)
hbox_one.Add(self.addSampleButton, flag=wx.ALIGN_LEFT|wx.RIGHT, border=10)
hbox_one.Add(self.helpButton)
#
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.spec_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.spec_grid, next_dia=self.InitSampCheck), self.continueButton)
hboxok.Add(self.saveButton, flag=wx.ALIGN_LEFT|wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.ALIGN_LEFT|wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.ALIGN_LEFT)
#
hboxgrid = pw.hbox_grid(self.panel, self.onDeleteRow, 'specimen', self.grid)
self.deleteRowButton = hboxgrid.deleteRowButton
self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
### Create Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.AddSpacer(10)
vbox.Add(label, flag=wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, border=10)
vbox.Add(hbox_one, flag=wx.TOP|wx.LEFT|wx.BOTTOM, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(hboxgrid, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(self.spec_grid, flag=wx.ALL, border=10)#|wx.EXPAND, border=30)
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
self.Hide()
self.Show() | make an interactive grid in which users can edit specimen names
as well as which sample a specimen belongs to | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L380-L457 |
PmagPy/PmagPy | dialogs/pmag_er_magic_dialogs.py | ErMagicCheckFrame.InitSampCheck | def InitSampCheck(self):
"""make an interactive grid in which users can edit sample names
as well as which site a sample belongs to"""
self.sample_window += 1
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
if self.sample_window == 1:
text = """Step 2:
Check that all samples are correctly named,
and that they belong to the correct site
(if site name is simply wrong, that will be fixed in step 3)"""
step_label = wx.StaticText(self.panel, label=text)#, size=(900, 100))
else:
text = """Step 4:
Some of the data from the er_sites table has propogated into er_samples.
Check that these data are correct, and fill in missing cells using controlled vocabularies.
The columns for class, lithology, and type can take multiple values in the form of a colon-delimited list.
You may use the drop-down menus to add as many values as needed in these columns.
(see Help button for more details)\n\n** Denotes controlled vocabulary"""
step_label = wx.StaticText(self.panel, label=text)#, size=(900, 100))
if self.sample_window == 1:
# provide no extra headers
headers = {'sample': {'er': [[], [], []],
'pmag': [[], [], []]}}
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'sample',
headers, self.panel,
'site')
if self.sample_window > 1:
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'sample',
self.er_magic_data.headers, self.panel,
'site')
self.samp_grid = self.grid_builder.make_grid(incl_pmag=False)
self.samp_grid.InitUI()
self.grid_builder.add_data_to_grid(self.samp_grid, 'sample', incl_pmag=False)
self.grid = self.samp_grid
sites = sorted(self.er_magic_data.make_name_list(self.er_magic_data.sites))
self.drop_down_menu = drop_down_menus.Menus("sample", self, self.samp_grid, sites) # initialize all needed drop-down menus
### Create Buttons ###
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.addSiteButton = wx.Button(self.panel, label="Add a new site")
self.Bind(wx.EVT_BUTTON, self.on_addSiteButton, self.addSiteButton)
hbox_one.Add(self.addSiteButton, flag=wx.RIGHT, border=10)
if self.sample_window == 1:
html_help = "ErMagicSampleHelp1.html"
if self.sample_window > 1:
html_help = "ErMagicSampleHelp.html"
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, html_help), self.helpButton)
hbox_one.Add(self.helpButton)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.samp_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
next_dia = self.InitSiteCheck if self.sample_window < 2 else self.InitLocCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.samp_grid, next_dia=next_dia), self.continueButton)
self.backButton = wx.Button(self.panel, wx.ID_ANY, "&Back")
previous_dia = self.InitSpecCheck if self.sample_window < 2 else self.InitSiteCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_backButton(event, previous_dia=previous_dia), self.backButton)
hboxok.Add(self.saveButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.backButton)
hboxgrid = pw.hbox_grid(self.panel, self.onDeleteRow, 'sample', self.grid)
self.deleteRowButton = hboxgrid.deleteRowButton
self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
### Make Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(step_label, flag=wx.ALIGN_LEFT|wx.TOP|wx.BOTTOM, border=20)
vbox.Add(hbox_one, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(hboxgrid, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(self.samp_grid, flag=wx.ALL, border=10) # using wx.EXPAND or not does not affect re-size problem
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#if sys.platform in ['win32', 'win64']:
# self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
## this combination may prevent a display error that (without the fix) only resolves on manually resizing the window
self.panel.Refresh()
self.samp_grid.ForceRefresh()
self.panel.Refresh()
self.Refresh()
# this prevents display errors
self.Hide()
self.Show() | python | def InitSampCheck(self):
"""make an interactive grid in which users can edit sample names
as well as which site a sample belongs to"""
self.sample_window += 1
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
if self.sample_window == 1:
text = """Step 2:
Check that all samples are correctly named,
and that they belong to the correct site
(if site name is simply wrong, that will be fixed in step 3)"""
step_label = wx.StaticText(self.panel, label=text)#, size=(900, 100))
else:
text = """Step 4:
Some of the data from the er_sites table has propogated into er_samples.
Check that these data are correct, and fill in missing cells using controlled vocabularies.
The columns for class, lithology, and type can take multiple values in the form of a colon-delimited list.
You may use the drop-down menus to add as many values as needed in these columns.
(see Help button for more details)\n\n** Denotes controlled vocabulary"""
step_label = wx.StaticText(self.panel, label=text)#, size=(900, 100))
if self.sample_window == 1:
# provide no extra headers
headers = {'sample': {'er': [[], [], []],
'pmag': [[], [], []]}}
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'sample',
headers, self.panel,
'site')
if self.sample_window > 1:
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'sample',
self.er_magic_data.headers, self.panel,
'site')
self.samp_grid = self.grid_builder.make_grid(incl_pmag=False)
self.samp_grid.InitUI()
self.grid_builder.add_data_to_grid(self.samp_grid, 'sample', incl_pmag=False)
self.grid = self.samp_grid
sites = sorted(self.er_magic_data.make_name_list(self.er_magic_data.sites))
self.drop_down_menu = drop_down_menus.Menus("sample", self, self.samp_grid, sites) # initialize all needed drop-down menus
### Create Buttons ###
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.addSiteButton = wx.Button(self.panel, label="Add a new site")
self.Bind(wx.EVT_BUTTON, self.on_addSiteButton, self.addSiteButton)
hbox_one.Add(self.addSiteButton, flag=wx.RIGHT, border=10)
if self.sample_window == 1:
html_help = "ErMagicSampleHelp1.html"
if self.sample_window > 1:
html_help = "ErMagicSampleHelp.html"
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, html_help), self.helpButton)
hbox_one.Add(self.helpButton)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.samp_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
next_dia = self.InitSiteCheck if self.sample_window < 2 else self.InitLocCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.samp_grid, next_dia=next_dia), self.continueButton)
self.backButton = wx.Button(self.panel, wx.ID_ANY, "&Back")
previous_dia = self.InitSpecCheck if self.sample_window < 2 else self.InitSiteCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_backButton(event, previous_dia=previous_dia), self.backButton)
hboxok.Add(self.saveButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.backButton)
hboxgrid = pw.hbox_grid(self.panel, self.onDeleteRow, 'sample', self.grid)
self.deleteRowButton = hboxgrid.deleteRowButton
self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
### Make Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(step_label, flag=wx.ALIGN_LEFT|wx.TOP|wx.BOTTOM, border=20)
vbox.Add(hbox_one, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(hboxgrid, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(self.samp_grid, flag=wx.ALL, border=10) # using wx.EXPAND or not does not affect re-size problem
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#if sys.platform in ['win32', 'win64']:
# self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
## this combination may prevent a display error that (without the fix) only resolves on manually resizing the window
self.panel.Refresh()
self.samp_grid.ForceRefresh()
self.panel.Refresh()
self.Refresh()
# this prevents display errors
self.Hide()
self.Show() | make an interactive grid in which users can edit sample names
as well as which site a sample belongs to | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L460-L568 |
PmagPy/PmagPy | dialogs/pmag_er_magic_dialogs.py | ErMagicCheckFrame.InitSiteCheck | def InitSiteCheck(self):
"""make an interactive grid in which users can edit site names
as well as which location a site belongs to"""
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
text = """Step 3:
Check that all sites are correctly named, and that they belong to the correct location.
Fill in the additional columns with controlled vocabularies.
The columns for class, lithology, and type can take multiple values in the form of a colon-delimited list.
You may use the drop-down menus to add as many values as needed in these columns.
(see the help button for more details)
note: Changes to site_class, site_lithology, or site_type will overwrite er_samples.txt
However, you will be able to edit sample_class, sample_lithology, and sample_type in step 4
**Denotes controlled vocabulary"""
label = wx.StaticText(self.panel, label=text)
#self.Data_hierarchy = self.ErMagic.Data_hierarchy
self.sites = sorted(self.er_magic_data.make_name_list(self.er_magic_data.sites))
#for val in ['er_citation_names', 'er_location_name', 'er_site_name', 'site_class', 'site_lithology', 'site_type', 'site_definition', 'site_lat', 'site_lon']: #
# try:
# self.er_magic_data.headers['site']['er'][0].remove(val)
# except ValueError:
# pass
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'site',
self.er_magic_data.headers, self.panel,
'location')
self.site_grid = self.grid_builder.make_grid(incl_pmag=False)
self.site_grid.InitUI()
self.grid_builder.add_data_to_grid(self.site_grid, 'site', incl_pmag=False)
self.grid = self.site_grid
# populate site_definition as 's' by default if no value is provided (indicates that site is single, not composite)
rows = self.site_grid.GetNumberRows()
col = 6
for row in range(rows):
cell = self.site_grid.GetCellValue(row, col)
if not cell:
self.site_grid.SetCellValue(row, col, 's')
# initialize all needed drop-down menus
locations = sorted(self.er_magic_data.make_name_list(self.er_magic_data.locations))
self.drop_down_menu = drop_down_menus.Menus("site", self, self.site_grid, locations)
### Create Buttons ###
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.addLocButton = wx.Button(self.panel, label="Add a new location")
self.Bind(wx.EVT_BUTTON, self.on_addLocButton, self.addLocButton)
hbox_one.Add(self.addLocButton, flag=wx.RIGHT, border=10)
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, "ErMagicSiteHelp.html"), self.helpButton)
hbox_one.Add(self.helpButton)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.site_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.site_grid, next_dia=self.InitSampCheck), self.continueButton)
self.backButton = wx.Button(self.panel, wx.ID_ANY, "&Back")
previous_dia = self.InitSampCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_backButton(event, previous_dia=previous_dia), self.backButton)
hboxok.Add(self.saveButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.backButton)
#
hboxgrid = pw.hbox_grid(self.panel, self.onDeleteRow, 'site', self.grid)
self.deleteRowButton = hboxgrid.deleteRowButton
self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
### Make Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(label, flag=wx.ALIGN_CENTER|wx.BOTTOM|wx.TOP, border=20)
vbox.Add(hbox_one, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(hboxgrid, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(self.site_grid, flag=wx.ALL|wx.EXPAND, border=10) # EXPAND ??
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#if sys.platform in ['win32', 'win64']:
# self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
# this combination prevents a display error that (without the fix) only resolves on manually resizing the window
self.site_grid.ForceRefresh()
self.panel.Refresh()
self.Hide()
self.Show() | python | def InitSiteCheck(self):
"""make an interactive grid in which users can edit site names
as well as which location a site belongs to"""
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
text = """Step 3:
Check that all sites are correctly named, and that they belong to the correct location.
Fill in the additional columns with controlled vocabularies.
The columns for class, lithology, and type can take multiple values in the form of a colon-delimited list.
You may use the drop-down menus to add as many values as needed in these columns.
(see the help button for more details)
note: Changes to site_class, site_lithology, or site_type will overwrite er_samples.txt
However, you will be able to edit sample_class, sample_lithology, and sample_type in step 4
**Denotes controlled vocabulary"""
label = wx.StaticText(self.panel, label=text)
#self.Data_hierarchy = self.ErMagic.Data_hierarchy
self.sites = sorted(self.er_magic_data.make_name_list(self.er_magic_data.sites))
#for val in ['er_citation_names', 'er_location_name', 'er_site_name', 'site_class', 'site_lithology', 'site_type', 'site_definition', 'site_lat', 'site_lon']: #
# try:
# self.er_magic_data.headers['site']['er'][0].remove(val)
# except ValueError:
# pass
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'site',
self.er_magic_data.headers, self.panel,
'location')
self.site_grid = self.grid_builder.make_grid(incl_pmag=False)
self.site_grid.InitUI()
self.grid_builder.add_data_to_grid(self.site_grid, 'site', incl_pmag=False)
self.grid = self.site_grid
# populate site_definition as 's' by default if no value is provided (indicates that site is single, not composite)
rows = self.site_grid.GetNumberRows()
col = 6
for row in range(rows):
cell = self.site_grid.GetCellValue(row, col)
if not cell:
self.site_grid.SetCellValue(row, col, 's')
# initialize all needed drop-down menus
locations = sorted(self.er_magic_data.make_name_list(self.er_magic_data.locations))
self.drop_down_menu = drop_down_menus.Menus("site", self, self.site_grid, locations)
### Create Buttons ###
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.addLocButton = wx.Button(self.panel, label="Add a new location")
self.Bind(wx.EVT_BUTTON, self.on_addLocButton, self.addLocButton)
hbox_one.Add(self.addLocButton, flag=wx.RIGHT, border=10)
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, "ErMagicSiteHelp.html"), self.helpButton)
hbox_one.Add(self.helpButton)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.site_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.site_grid, next_dia=self.InitSampCheck), self.continueButton)
self.backButton = wx.Button(self.panel, wx.ID_ANY, "&Back")
previous_dia = self.InitSampCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_backButton(event, previous_dia=previous_dia), self.backButton)
hboxok.Add(self.saveButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.backButton)
#
hboxgrid = pw.hbox_grid(self.panel, self.onDeleteRow, 'site', self.grid)
self.deleteRowButton = hboxgrid.deleteRowButton
self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
### Make Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(label, flag=wx.ALIGN_CENTER|wx.BOTTOM|wx.TOP, border=20)
vbox.Add(hbox_one, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(hboxgrid, flag=wx.BOTTOM|wx.LEFT, border=10)
vbox.Add(self.site_grid, flag=wx.ALL|wx.EXPAND, border=10) # EXPAND ??
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#if sys.platform in ['win32', 'win64']:
# self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
# this combination prevents a display error that (without the fix) only resolves on manually resizing the window
self.site_grid.ForceRefresh()
self.panel.Refresh()
self.Hide()
self.Show() | make an interactive grid in which users can edit site names
as well as which location a site belongs to | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L577-L681 |
PmagPy/PmagPy | dialogs/pmag_er_magic_dialogs.py | ErMagicCheckFrame.InitLocCheck | def InitLocCheck(self):
"""make an interactive grid in which users can edit specimen names
as well as which sample a specimen belongs to"""
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
text = """Step 5:
Check that locations are correctly named.
Fill in any blank cells using controlled vocabularies.
(See Help button for details)
** Denotes controlled vocabulary"""
label = wx.StaticText(self.panel, label=text)
#self.Data_hierarchy = self.ErMagic.Data_hierarchy
self.locations = self.er_magic_data.locations
#
if not self.er_magic_data.locations:
msg = "You have no data in er_locations, so we are skipping step 5.\n Note that location names must be entered at the measurements level,so you may need to re-import your data, or you can add a location in step 3"
dlg = wx.MessageDialog(None, caption="Message:", message=msg, style=wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
self.panel.Destroy()
self.InitAgeCheck()
return
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'location',
self.er_magic_data.headers, self.panel)
self.loc_grid = self.grid_builder.make_grid(incl_pmag=False)
self.loc_grid.InitUI()
self.grid_builder.add_data_to_grid(self.loc_grid, 'location', incl_pmag=False)
self.grid = self.loc_grid
# initialize all needed drop-down menus
self.drop_down_menu = drop_down_menus.Menus("location", self,
self.loc_grid, None)
# need to find max/min lat/lon here IF they were added in the previous grid
sites = self.er_magic_data.sites
location_lat_lon = self.er_magic_data.get_min_max_lat_lon(self.er_magic_data.locations)
col_names = ('location_begin_lat', 'location_end_lat', 'location_begin_lon', 'location_end_lon')
col_inds = [self.grid.col_labels.index(name) for name in col_names]
col_info = list(zip(col_names, col_inds))
for loc in self.er_magic_data.locations:
row_ind = self.grid.row_labels.index(loc.name)
for col_name, col_ind in col_info:
info = location_lat_lon[loc.name][col_name]
self.grid.SetCellValue(row_ind, col_ind, str(info))
### Create Buttons ###
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, "ErMagicLocationHelp.html"), self.helpButton)
hbox_one.Add(self.helpButton)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.loc_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.loc_grid, next_dia=self.InitAgeCheck), self.continueButton)
self.backButton = wx.Button(self.panel, wx.ID_ANY, "&Back")
previous_dia = self.InitSampCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_backButton(event, previous_dia, current_dia=self.InitLocCheck), self.backButton)
hboxok.Add(self.saveButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.backButton)
#
hboxgrid = pw.hbox_grid(self.panel, self.onDeleteRow, 'location', self.grid)
self.deleteRowButton = hboxgrid.deleteRowButton
self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
### Make Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(label, flag=wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, border=20)
vbox.Add(hbox_one, flag=wx.BOTTOM|wx.ALIGN_LEFT, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM|wx.ALIGN_LEFT, border=10)
vbox.Add(hboxgrid, flag=wx.BOTTOM|wx.ALIGN_LEFT, border=10)
vbox.Add(self.loc_grid, flag=wx.TOP|wx.BOTTOM, border=10)
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#if sys.platform in ['win32', 'win64']:
# self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
self.Hide()
self.Show() | python | def InitLocCheck(self):
"""make an interactive grid in which users can edit specimen names
as well as which sample a specimen belongs to"""
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
text = """Step 5:
Check that locations are correctly named.
Fill in any blank cells using controlled vocabularies.
(See Help button for details)
** Denotes controlled vocabulary"""
label = wx.StaticText(self.panel, label=text)
#self.Data_hierarchy = self.ErMagic.Data_hierarchy
self.locations = self.er_magic_data.locations
#
if not self.er_magic_data.locations:
msg = "You have no data in er_locations, so we are skipping step 5.\n Note that location names must be entered at the measurements level,so you may need to re-import your data, or you can add a location in step 3"
dlg = wx.MessageDialog(None, caption="Message:", message=msg, style=wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
self.panel.Destroy()
self.InitAgeCheck()
return
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'location',
self.er_magic_data.headers, self.panel)
self.loc_grid = self.grid_builder.make_grid(incl_pmag=False)
self.loc_grid.InitUI()
self.grid_builder.add_data_to_grid(self.loc_grid, 'location', incl_pmag=False)
self.grid = self.loc_grid
# initialize all needed drop-down menus
self.drop_down_menu = drop_down_menus.Menus("location", self,
self.loc_grid, None)
# need to find max/min lat/lon here IF they were added in the previous grid
sites = self.er_magic_data.sites
location_lat_lon = self.er_magic_data.get_min_max_lat_lon(self.er_magic_data.locations)
col_names = ('location_begin_lat', 'location_end_lat', 'location_begin_lon', 'location_end_lon')
col_inds = [self.grid.col_labels.index(name) for name in col_names]
col_info = list(zip(col_names, col_inds))
for loc in self.er_magic_data.locations:
row_ind = self.grid.row_labels.index(loc.name)
for col_name, col_ind in col_info:
info = location_lat_lon[loc.name][col_name]
self.grid.SetCellValue(row_ind, col_ind, str(info))
### Create Buttons ###
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, "ErMagicLocationHelp.html"), self.helpButton)
hbox_one.Add(self.helpButton)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.loc_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.loc_grid, next_dia=self.InitAgeCheck), self.continueButton)
self.backButton = wx.Button(self.panel, wx.ID_ANY, "&Back")
previous_dia = self.InitSampCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_backButton(event, previous_dia, current_dia=self.InitLocCheck), self.backButton)
hboxok.Add(self.saveButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.backButton)
#
hboxgrid = pw.hbox_grid(self.panel, self.onDeleteRow, 'location', self.grid)
self.deleteRowButton = hboxgrid.deleteRowButton
self.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
### Make Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(label, flag=wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, border=20)
vbox.Add(hbox_one, flag=wx.BOTTOM|wx.ALIGN_LEFT, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM|wx.ALIGN_LEFT, border=10)
vbox.Add(hboxgrid, flag=wx.BOTTOM|wx.ALIGN_LEFT, border=10)
vbox.Add(self.loc_grid, flag=wx.TOP|wx.BOTTOM, border=10)
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#if sys.platform in ['win32', 'win64']:
# self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
self.Hide()
self.Show() | make an interactive grid in which users can edit specimen names
as well as which sample a specimen belongs to | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L684-L781 |
PmagPy/PmagPy | dialogs/pmag_er_magic_dialogs.py | ErMagicCheckFrame.InitAgeCheck | def InitAgeCheck(self):
"""make an interactive grid in which users can edit ages"""
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
text = """Step 6:
Fill in or correct any cells with information about ages.
The column for magic_method_codes can take multiple values in the form of a colon-delimited list.
You may use the drop-down menus to add as many values as needed in these columns.
(See Help button for details)
**Denotes controlled vocabulary """
label = wx.StaticText(self.panel, label=text)
self.items = self.er_magic_data.data_lists[self.er_magic_data.age_type][0]
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'age',
self.er_magic_data.headers, self.panel, 'location')
self.age_grid = self.grid_builder.make_grid(incl_pmag=False)
self.age_grid.InitUI()
self.grid_builder.add_data_to_grid(self.age_grid, 'age', incl_pmag=False)
self.grid_builder.add_age_data_to_grid()
self.grid = self.age_grid
#
# make it impossible to edit the 1st and 3rd columns
for row in range(self.age_grid.GetNumberRows()):
for col in (0, 2):
self.age_grid.SetReadOnly(row, col, True)
# initialize all needed drop-down menus
self.drop_down_menu = drop_down_menus.Menus("age", self, self.age_grid, None)
# re-set first column name
self.age_grid.SetColLabelValue(0, 'er_site_name')
### Create Buttons ###
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, "ErMagicAgeHelp.html"), self.helpButton)
hbox_one.Add(self.helpButton)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.age_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.age_grid, next_dia=None), self.continueButton)
self.backButton = wx.Button(self.panel, wx.ID_ANY, "&Back")
previous_dia = self.InitLocCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_backButton(event, previous_dia), self.backButton)
self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
hboxok.Add(self.saveButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.backButton)
### Make Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(label, flag=wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, border=20)#, flag=wx.ALIGN_LEFT|wx.BOTTOM, border=20)
vbox.Add(hbox_one, flag=wx.BOTTOM, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM, border=10)
vbox.Add(self.age_grid, flag=wx.TOP|wx.BOTTOM, border=10) # EXPAND ??
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#if sys.platform in ['win32', 'win64']:
# self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
self.Hide()
self.Show() | python | def InitAgeCheck(self):
"""make an interactive grid in which users can edit ages"""
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
text = """Step 6:
Fill in or correct any cells with information about ages.
The column for magic_method_codes can take multiple values in the form of a colon-delimited list.
You may use the drop-down menus to add as many values as needed in these columns.
(See Help button for details)
**Denotes controlled vocabulary """
label = wx.StaticText(self.panel, label=text)
self.items = self.er_magic_data.data_lists[self.er_magic_data.age_type][0]
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'age',
self.er_magic_data.headers, self.panel, 'location')
self.age_grid = self.grid_builder.make_grid(incl_pmag=False)
self.age_grid.InitUI()
self.grid_builder.add_data_to_grid(self.age_grid, 'age', incl_pmag=False)
self.grid_builder.add_age_data_to_grid()
self.grid = self.age_grid
#
# make it impossible to edit the 1st and 3rd columns
for row in range(self.age_grid.GetNumberRows()):
for col in (0, 2):
self.age_grid.SetReadOnly(row, col, True)
# initialize all needed drop-down menus
self.drop_down_menu = drop_down_menus.Menus("age", self, self.age_grid, None)
# re-set first column name
self.age_grid.SetColLabelValue(0, 'er_site_name')
### Create Buttons ###
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, "ErMagicAgeHelp.html"), self.helpButton)
hbox_one.Add(self.helpButton)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.age_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.age_grid, next_dia=None), self.continueButton)
self.backButton = wx.Button(self.panel, wx.ID_ANY, "&Back")
previous_dia = self.InitLocCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_backButton(event, previous_dia), self.backButton)
self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
hboxok.Add(self.saveButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.backButton)
### Make Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(label, flag=wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, border=20)#, flag=wx.ALIGN_LEFT|wx.BOTTOM, border=20)
vbox.Add(hbox_one, flag=wx.BOTTOM, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM, border=10)
vbox.Add(self.age_grid, flag=wx.TOP|wx.BOTTOM, border=10) # EXPAND ??
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#if sys.platform in ['win32', 'win64']:
# self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
self.Hide()
self.Show() | make an interactive grid in which users can edit ages | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L784-L863 |
PmagPy/PmagPy | dialogs/pmag_er_magic_dialogs.py | ErMagicCheckFrame.onMouseOver | def onMouseOver(self, event, grid):
"""
Displays a tooltip over any cell in a certain column
"""
x, y = grid.CalcUnscrolledPosition(event.GetX(), event.GetY())
coords = grid.XYToCell(x, y)
col = coords[1]
row = coords[0]
# creates tooltip message for cells with long values
# note: this works with EPD for windows, and modern wxPython, but not with Canopy Python
msg = grid.GetCellValue(row, col)
if len(msg) > 15:
event.GetEventObject().SetToolTipString(msg)
else:
event.GetEventObject().SetToolTipString('') | python | def onMouseOver(self, event, grid):
"""
Displays a tooltip over any cell in a certain column
"""
x, y = grid.CalcUnscrolledPosition(event.GetX(), event.GetY())
coords = grid.XYToCell(x, y)
col = coords[1]
row = coords[0]
# creates tooltip message for cells with long values
# note: this works with EPD for windows, and modern wxPython, but not with Canopy Python
msg = grid.GetCellValue(row, col)
if len(msg) > 15:
event.GetEventObject().SetToolTipString(msg)
else:
event.GetEventObject().SetToolTipString('') | Displays a tooltip over any cell in a certain column | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L891-L906 |
PmagPy/PmagPy | dialogs/pmag_er_magic_dialogs.py | ErMagicCheckFrame.on_helpButton | def on_helpButton(self, event, page=None):
"""shows html help page"""
# for use on the command line:
path = find_pmag_dir.get_pmag_dir()
# for use with pyinstaller
#path = self.main_frame.resource_dir
help_page = os.path.join(path, 'dialogs', 'help_files', page)
# if using with py2app, the directory structure is flat,
# so check to see where the resource actually is
if not os.path.exists(help_page):
help_page = os.path.join(path, 'help_files', page)
html_frame = pw.HtmlFrame(self, page=help_page)
html_frame.Show() | python | def on_helpButton(self, event, page=None):
"""shows html help page"""
# for use on the command line:
path = find_pmag_dir.get_pmag_dir()
# for use with pyinstaller
#path = self.main_frame.resource_dir
help_page = os.path.join(path, 'dialogs', 'help_files', page)
# if using with py2app, the directory structure is flat,
# so check to see where the resource actually is
if not os.path.exists(help_page):
help_page = os.path.join(path, 'help_files', page)
html_frame = pw.HtmlFrame(self, page=help_page)
html_frame.Show() | shows html help page | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L981-L993 |
PmagPy/PmagPy | dialogs/pmag_er_magic_dialogs.py | ErMagicCheckFrame.on_continueButton | def on_continueButton(self, event, grid, next_dia=None):
"""
pulls up next dialog, if there is one.
gets any updated information from the current grid and runs ErMagicBuilder
"""
#wait = wx.BusyInfo("Please wait, working...")
# unhighlight selected columns, etc.
if self.drop_down_menu:
self.drop_down_menu.clean_up()
# remove '**' from col names
#self.remove_starred_labels(grid)
grid.remove_starred_labels()
grid.SaveEditControlValue() # locks in value in cell currently edited
grid_name = str(grid.GetName())
# check that all required data are present
validation_errors = self.validate(grid)
if validation_errors:
result = pw.warning_with_override("You are missing required data in these columns: {}\nAre you sure you want to continue without these data?".format(', '.join(validation_errors)))
if result == wx.ID_YES:
pass
else:
return False
if grid.changes:
self.onSave(grid)
self.deleteRowButton = None
#self.panel.Destroy() # calling Destroy here breaks with Anaconda Python (segfault)
# make sure that specimens get propagated with
# any default sample info
if next_dia == self.InitLocCheck:
if self.er_magic_data.specimens:
for spec in self.er_magic_data.specimens:
spec.propagate_data()
if next_dia:
wait = wx.BusyInfo("Please wait, working...")
wx.SafeYield()
wx.CallAfter(self.panel.Destroy) # no segfault here!
next_dia()
# need to wait to process the resize:
event = wx.PyCommandEvent(wx.EVT_SIZE.typeId, self.GetId())
wx.CallAfter(self.GetEventHandler().ProcessEvent, event)
del wait
else:
wait = wx.BusyInfo("Please wait, writing data to files...")
wx.SafeYield()
# actually write data:
self.er_magic_data.write_files()
self.Destroy()
del wait | python | def on_continueButton(self, event, grid, next_dia=None):
"""
pulls up next dialog, if there is one.
gets any updated information from the current grid and runs ErMagicBuilder
"""
#wait = wx.BusyInfo("Please wait, working...")
# unhighlight selected columns, etc.
if self.drop_down_menu:
self.drop_down_menu.clean_up()
# remove '**' from col names
#self.remove_starred_labels(grid)
grid.remove_starred_labels()
grid.SaveEditControlValue() # locks in value in cell currently edited
grid_name = str(grid.GetName())
# check that all required data are present
validation_errors = self.validate(grid)
if validation_errors:
result = pw.warning_with_override("You are missing required data in these columns: {}\nAre you sure you want to continue without these data?".format(', '.join(validation_errors)))
if result == wx.ID_YES:
pass
else:
return False
if grid.changes:
self.onSave(grid)
self.deleteRowButton = None
#self.panel.Destroy() # calling Destroy here breaks with Anaconda Python (segfault)
# make sure that specimens get propagated with
# any default sample info
if next_dia == self.InitLocCheck:
if self.er_magic_data.specimens:
for spec in self.er_magic_data.specimens:
spec.propagate_data()
if next_dia:
wait = wx.BusyInfo("Please wait, working...")
wx.SafeYield()
wx.CallAfter(self.panel.Destroy) # no segfault here!
next_dia()
# need to wait to process the resize:
event = wx.PyCommandEvent(wx.EVT_SIZE.typeId, self.GetId())
wx.CallAfter(self.GetEventHandler().ProcessEvent, event)
del wait
else:
wait = wx.BusyInfo("Please wait, writing data to files...")
wx.SafeYield()
# actually write data:
self.er_magic_data.write_files()
self.Destroy()
del wait | pulls up next dialog, if there is one.
gets any updated information from the current grid and runs ErMagicBuilder | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L995-L1049 |
PmagPy/PmagPy | dialogs/pmag_er_magic_dialogs.py | ErMagicCheckFrame.onDeleteRow | def onDeleteRow(self, event, data_type):
"""
On button click, remove relevant object from both the data model and the grid.
"""
ancestry = self.er_magic_data.ancestry
child_type = ancestry[ancestry.index(data_type) - 1]
names = [self.grid.GetCellValue(row, 0) for row in self.selected_rows]
if data_type == 'site':
how_to_fix = 'Make sure to select a new site for each orphaned sample in the next step'
else:
how_to_fix = 'Go back a step and select a new {} for each orphaned {}'.format(data_type, child_type)
orphans = []
for name in names:
row = self.grid.row_labels.index(name)
orphan = self.er_magic_data.delete_methods[data_type](name)
if orphan:
orphans.extend(orphan)
self.grid.remove_row(row)
if orphans:
orphan_names = self.er_magic_data.make_name_list(orphans)
pw.simple_warning('You have deleted:\n\n {}\n\nthe parent(s) of {}(s):\n\n {}\n\n{}'.format(', '.join(names), child_type, ', '.join(orphan_names), how_to_fix))
self.selected_rows = set()
# update grid and data model
self.update_grid(self.grid)#, grids[grid_name])
self.grid.Refresh() | python | def onDeleteRow(self, event, data_type):
"""
On button click, remove relevant object from both the data model and the grid.
"""
ancestry = self.er_magic_data.ancestry
child_type = ancestry[ancestry.index(data_type) - 1]
names = [self.grid.GetCellValue(row, 0) for row in self.selected_rows]
if data_type == 'site':
how_to_fix = 'Make sure to select a new site for each orphaned sample in the next step'
else:
how_to_fix = 'Go back a step and select a new {} for each orphaned {}'.format(data_type, child_type)
orphans = []
for name in names:
row = self.grid.row_labels.index(name)
orphan = self.er_magic_data.delete_methods[data_type](name)
if orphan:
orphans.extend(orphan)
self.grid.remove_row(row)
if orphans:
orphan_names = self.er_magic_data.make_name_list(orphans)
pw.simple_warning('You have deleted:\n\n {}\n\nthe parent(s) of {}(s):\n\n {}\n\n{}'.format(', '.join(names), child_type, ', '.join(orphan_names), how_to_fix))
self.selected_rows = set()
# update grid and data model
self.update_grid(self.grid)#, grids[grid_name])
self.grid.Refresh() | On button click, remove relevant object from both the data model and the grid. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L1100-L1128 |
PmagPy/PmagPy | dialogs/pmag_er_magic_dialogs.py | ErMagicCheckFrame.onLeftClickLabel | def onLeftClickLabel(self, event):
"""
When user clicks on a grid label, determine if it is a row label or a col label.
Pass along the event to the appropriate function.
(It will either highlight a column for editing all values, or highlight a row for deletion).
"""
if event.Col == -1 and event.Row == -1:
pass
elif event.Col < 0:
self.onSelectRow(event)
elif event.Row < 0:
self.drop_down_menu.on_label_click(event) | python | def onLeftClickLabel(self, event):
"""
When user clicks on a grid label, determine if it is a row label or a col label.
Pass along the event to the appropriate function.
(It will either highlight a column for editing all values, or highlight a row for deletion).
"""
if event.Col == -1 and event.Row == -1:
pass
elif event.Col < 0:
self.onSelectRow(event)
elif event.Row < 0:
self.drop_down_menu.on_label_click(event) | When user clicks on a grid label, determine if it is a row label or a col label.
Pass along the event to the appropriate function.
(It will either highlight a column for editing all values, or highlight a row for deletion). | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L1131-L1142 |
PmagPy/PmagPy | dialogs/pmag_er_magic_dialogs.py | ErMagicCheckFrame.onSelectRow | def onSelectRow(self, event):
"""
Highlight or unhighlight a row for possible deletion.
"""
grid = self.grid
row = event.Row
default = (255, 255, 255, 255)
highlight = (191, 216, 216, 255)
cell_color = grid.GetCellBackgroundColour(row, 0)
attr = wx.grid.GridCellAttr()
if cell_color == default:
attr.SetBackgroundColour(highlight)
self.selected_rows.add(row)
else:
attr.SetBackgroundColour(default)
try:
self.selected_rows.remove(row)
except KeyError:
pass
if self.selected_rows and self.deleteRowButton:
self.deleteRowButton.Enable()
else:
self.deleteRowButton.Disable()
grid.SetRowAttr(row, attr)
grid.Refresh() | python | def onSelectRow(self, event):
"""
Highlight or unhighlight a row for possible deletion.
"""
grid = self.grid
row = event.Row
default = (255, 255, 255, 255)
highlight = (191, 216, 216, 255)
cell_color = grid.GetCellBackgroundColour(row, 0)
attr = wx.grid.GridCellAttr()
if cell_color == default:
attr.SetBackgroundColour(highlight)
self.selected_rows.add(row)
else:
attr.SetBackgroundColour(default)
try:
self.selected_rows.remove(row)
except KeyError:
pass
if self.selected_rows and self.deleteRowButton:
self.deleteRowButton.Enable()
else:
self.deleteRowButton.Disable()
grid.SetRowAttr(row, attr)
grid.Refresh() | Highlight or unhighlight a row for possible deletion. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L1145-L1169 |
PmagPy/PmagPy | dialogs/pmag_er_magic_dialogs.py | ErMagicCheckFrame.update_grid | def update_grid(self, grid):
"""
takes in wxPython grid and ErMagic data object to be updated
"""
data_methods = {'specimen': self.er_magic_data.change_specimen,
'sample': self.er_magic_data.change_sample,
'site': self.er_magic_data.change_site,
'location': self.er_magic_data.change_location,
'age': self.er_magic_data.change_age}
grid_name = str(grid.GetName())
cols = list(range(grid.GetNumberCols()))
col_labels = []
for col in cols:
col_labels.append(grid.GetColLabelValue(col))
for row in grid.changes: # go through changes and update data structures
if row == -1:
continue
else:
data_dict = {}
for num, label in enumerate(col_labels):
if label:
data_dict[str(label)] = str(grid.GetCellValue(row, num))
new_name = str(grid.GetCellValue(row, 0))
old_name = self.temp_data[grid_name][row]
data_methods[grid_name](new_name, old_name, data_dict)
grid.changes = False | python | def update_grid(self, grid):
"""
takes in wxPython grid and ErMagic data object to be updated
"""
data_methods = {'specimen': self.er_magic_data.change_specimen,
'sample': self.er_magic_data.change_sample,
'site': self.er_magic_data.change_site,
'location': self.er_magic_data.change_location,
'age': self.er_magic_data.change_age}
grid_name = str(grid.GetName())
cols = list(range(grid.GetNumberCols()))
col_labels = []
for col in cols:
col_labels.append(grid.GetColLabelValue(col))
for row in grid.changes: # go through changes and update data structures
if row == -1:
continue
else:
data_dict = {}
for num, label in enumerate(col_labels):
if label:
data_dict[str(label)] = str(grid.GetCellValue(row, num))
new_name = str(grid.GetCellValue(row, 0))
old_name = self.temp_data[grid_name][row]
data_methods[grid_name](new_name, old_name, data_dict)
grid.changes = False | takes in wxPython grid and ErMagic data object to be updated | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L1172-L1202 |
PmagPy/PmagPy | dialogs/pmag_er_magic_dialogs.py | ErMagicCheckFrame.onSave | def onSave(self, grid):#, age_data_type='site'):
"""
Save grid data in the data object
"""
# deselect column, including remove 'EDIT ALL' label
if self.drop_down_menu:
self.drop_down_menu.clean_up()
# save all changes to er_magic data object
self.grid_builder.save_grid_data()
# don't actually write data in this step (time-consuming)
# instead, write to files when user is done editing
#self.er_magic_data.write_files()
wx.MessageBox('Saved!', 'Info',
style=wx.OK | wx.ICON_INFORMATION) | python | def onSave(self, grid):#, age_data_type='site'):
"""
Save grid data in the data object
"""
# deselect column, including remove 'EDIT ALL' label
if self.drop_down_menu:
self.drop_down_menu.clean_up()
# save all changes to er_magic data object
self.grid_builder.save_grid_data()
# don't actually write data in this step (time-consuming)
# instead, write to files when user is done editing
#self.er_magic_data.write_files()
wx.MessageBox('Saved!', 'Info',
style=wx.OK | wx.ICON_INFORMATION) | Save grid data in the data object | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/pmag_er_magic_dialogs.py#L1205-L1221 |
PmagPy/PmagPy | programs/deprecated/update_measurements.py | main | def main():
"""
NAME
update_measurements.py
DESCRIPTION
update the magic_measurements table with new orientation info
SYNTAX
update_measurements.py [command line options]
OPTIONS
-h prints help message and quits
-f MFILE, specify magic_measurements file; default is magic_measurements.txt
-fsa SFILE, specify er_samples table; default is er_samples.txt
-F OFILE, specify output file, default is same as MFILE
"""
dir_path='.'
meas_file='magic_measurements.txt'
samp_file="er_samples.txt"
out_file='magic_measurements.txt'
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-f' in sys.argv:
ind = sys.argv.index('-f')
meas_file=sys.argv[ind+1]
if '-fsa' in sys.argv:
ind = sys.argv.index('-fsa')
samp_file=sys.argv[ind+1]
if '-F' in sys.argv:
ind = sys.argv.index('-F')
out_file=sys.argv[ind+1]
# read in measurements file
meas_file=dir_path+'/'+meas_file
out_file=dir_path+'/'+out_file
samp_file=dir_path+'/'+samp_file
data,file_type=pmag.magic_read(meas_file)
samps,file_type=pmag.magic_read(samp_file)
MeasRecs=[]
sampnames,sflag=[],0
for rec in data:
for samp in samps:
if samp['er_sample_name'].lower()==rec['er_sample_name'].lower():
if samp['er_sample_name'] not in sampnames:sampnames.append(samp['er_sample_name'].lower())
rec['er_site_name']=samp['er_site_name']
rec['er_location_name']=samp['er_location_name']
MeasRecs.append(rec)
break
if rec['er_sample_name'].lower() not in sampnames:
sampnames.append(rec['er_sample_name'].lower())
sflag=1
SampRec={}
for key in list(samps[0].keys()):SampRec[key]=""
SampRec['er_sample_name']=rec['er_sample_name']
SampRec['er_citation_names']="This study"
SampRec['er_site_name']='MISSING'
SampRec['er_location_name']='MISSING'
SampRec['sample_desription']='recorded added by update_measurements - edit as needed'
samps.append(SampRec)
print(rec['er_sample_name'],' missing from er_samples.txt file - edit orient.txt file and re-import')
rec['er_site_name']='MISSING'
rec['er_location_name']='MISSING'
MeasRecs.append(rec)
pmag.magic_write(out_file,MeasRecs,'magic_measurements')
print("updated measurements file stored in ", out_file)
if sflag==1:
pmag.magic_write(samp_file,samps,'er_samples')
print("updated sample file stored in ", samp_file) | python | def main():
"""
NAME
update_measurements.py
DESCRIPTION
update the magic_measurements table with new orientation info
SYNTAX
update_measurements.py [command line options]
OPTIONS
-h prints help message and quits
-f MFILE, specify magic_measurements file; default is magic_measurements.txt
-fsa SFILE, specify er_samples table; default is er_samples.txt
-F OFILE, specify output file, default is same as MFILE
"""
dir_path='.'
meas_file='magic_measurements.txt'
samp_file="er_samples.txt"
out_file='magic_measurements.txt'
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-f' in sys.argv:
ind = sys.argv.index('-f')
meas_file=sys.argv[ind+1]
if '-fsa' in sys.argv:
ind = sys.argv.index('-fsa')
samp_file=sys.argv[ind+1]
if '-F' in sys.argv:
ind = sys.argv.index('-F')
out_file=sys.argv[ind+1]
# read in measurements file
meas_file=dir_path+'/'+meas_file
out_file=dir_path+'/'+out_file
samp_file=dir_path+'/'+samp_file
data,file_type=pmag.magic_read(meas_file)
samps,file_type=pmag.magic_read(samp_file)
MeasRecs=[]
sampnames,sflag=[],0
for rec in data:
for samp in samps:
if samp['er_sample_name'].lower()==rec['er_sample_name'].lower():
if samp['er_sample_name'] not in sampnames:sampnames.append(samp['er_sample_name'].lower())
rec['er_site_name']=samp['er_site_name']
rec['er_location_name']=samp['er_location_name']
MeasRecs.append(rec)
break
if rec['er_sample_name'].lower() not in sampnames:
sampnames.append(rec['er_sample_name'].lower())
sflag=1
SampRec={}
for key in list(samps[0].keys()):SampRec[key]=""
SampRec['er_sample_name']=rec['er_sample_name']
SampRec['er_citation_names']="This study"
SampRec['er_site_name']='MISSING'
SampRec['er_location_name']='MISSING'
SampRec['sample_desription']='recorded added by update_measurements - edit as needed'
samps.append(SampRec)
print(rec['er_sample_name'],' missing from er_samples.txt file - edit orient.txt file and re-import')
rec['er_site_name']='MISSING'
rec['er_location_name']='MISSING'
MeasRecs.append(rec)
pmag.magic_write(out_file,MeasRecs,'magic_measurements')
print("updated measurements file stored in ", out_file)
if sflag==1:
pmag.magic_write(samp_file,samps,'er_samples')
print("updated sample file stored in ", samp_file) | NAME
update_measurements.py
DESCRIPTION
update the magic_measurements table with new orientation info
SYNTAX
update_measurements.py [command line options]
OPTIONS
-h prints help message and quits
-f MFILE, specify magic_measurements file; default is magic_measurements.txt
-fsa SFILE, specify er_samples table; default is er_samples.txt
-F OFILE, specify output file, default is same as MFILE | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/deprecated/update_measurements.py#L7-L78 |
PmagPy/PmagPy | programs/conversion_scripts2/ldeo_magic2.py | main | def main(command_line=True, **kwargs):
"""
NAME
ldeo_magic.py
DESCRIPTION
converts LDEO format files to magic_measurements format files
SYNTAX
ldeo_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-usr USER: identify user, default is ""
-f FILE: specify .ldeo format input file, required
-F FILE: specify output file, default is magic_measurements.txt
-Fsy: specify er_synthetics file, default is er_sythetics.txt
-LP [colon delimited list of protocols, include all that apply]
AF: af demag
T: thermal including thellier but not trm acquisition
S: Shaw method
I: IRM (acquisition)
N: NRM only
TRM: trm acquisition
ANI: anisotropy experiment
D: double AF demag
G: triple AF demag (GRM protocol)
-V [1,2,3] units of IRM field in volts using ASC coil #1,2 or 3
-spc NUM : specify number of characters to designate a specimen, default = 0
-loc LOCNAME : specify location/study name, must have either LOCNAME or SAMPFILE or be a synthetic
-syn INST TYPE: sets these specimens as synthetics created at institution INST and of type TYPE
-ins INST : specify which demag instrument was used (e.g, SIO-Suzy or SIO-Odette),default is ""
-dc B PHI THETA: dc lab field (in micro tesla) and phi,theta, default is none
NB: use PHI, THETA = -1 -1 to signal that it changes, i.e. in anisotropy experiment
-ac B : peak AF field (in mT) for ARM acquisition, default is none
-ARM_dc # default value is 50e-6
-ARM_temp # default is 600c
-ncn NCON: specify naming convention: default is #1 below
-A: don't average replicate measurements
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail [email protected] for help.
[8] synthetic - has no site name
INPUT
Best to put separate experiments (all AF, thermal, thellier, trm aquisition, Shaw, etc.) in
seperate .mag files (eg. af.mag, thermal.mag, etc.)
Format of LDEO files:
isaf2.fix
LAT: .00 LON: .00
ID TREAT I CD J CDECL CINCL GDECL GINCL BDECL BINCL SUSC M/V
________________________________________________________________________________
is031c2 .0 SD 0 461.600 163.9 17.5 337.1 74.5 319.1 74.4 .0 .0
ID: specimen name
TREAT: treatment step
I: Instrument
CD: Circular standard devation
J: intensity. assumed to be total moment in 10^-4 (emu)
CDECL: Declination in specimen coordinate system
CINCL: Declination in specimen coordinate system
GDECL: Declination in geographic coordinate system
GINCL: Declination in geographic coordinate system
BDECL: Declination in bedding adjusted coordinate system
BINCL: Declination in bedding adjusted coordinate system
SUSC: magnetic susceptibility (in micro SI)a
M/V: mass or volume for nomalizing (0 won't normalize)
"""
# initialize some stuff
noave=0
codelist = ''
methcode,inst="LP-NO",""
phi,theta,peakfield,labfield=0,0,0,0
pTRM,MD,samp_con,Z=0,0,'1',1
dec=[315,225,180,135,45,90,270,270,270,90,180,180,0,0,0]
inc=[0,0,0,0,0,-45,-45,0,45,45,45,-45,-90,-45,45]
tdec=[0,90,0,180,270,0,0,90,0]
tinc=[0,0,90,0,0,-90,0,0,90]
missing=1
demag="N"
er_location_name=""
citation='This study'
args=sys.argv
fmt='old'
syn=0
synfile='er_synthetics.txt'
magfile = ''
trm=0
irm=0
specnum=0
coil=""
arm_labfield = 50e-6
trm_peakT = 600+273
#
# get command line arguments
#
meas_file="magic_measurements.txt"
user=""
if command_line:
if "-h" in args:
print(main.__doc__)
return False
if "-usr" in args:
ind=args.index("-usr")
user=args[ind+1]
if '-F' in args:
ind=args.index("-F")
meas_file=args[ind+1]
if '-Fsy' in args:
ind=args.index("-Fsy")
synfile=args[ind+1]
if '-f' in args:
ind=args.index("-f")
magfile=args[ind+1]
if "-dc" in args:
ind=args.index("-dc")
labfield=float(args[ind+1])*1e-6
phi=float(args[ind+2])
theta=float(args[ind+3])
if "-ac" in args:
ind=args.index("-ac")
peakfield=float(args[ind+1])*1e-3
if "-spc" in args:
ind=args.index("-spc")
specnum=int(args[ind+1])
if "-loc" in args:
ind=args.index("-loc")
er_location_name=args[ind+1]
if '-syn' in args:
syn=1
ind=args.index("-syn")
institution=args[ind+1]
syntype=args[ind+2]
if '-fsy' in args:
ind=args.index("-fsy")
synfile=args[ind+1]
if "-ins" in args:
ind=args.index("-ins")
inst=args[ind+1]
if "-A" in args: noave=1
if "-ncn" in args:
ind=args.index("-ncn")
samp_con=sys.argv[ind+1]
if '-LP' in args:
ind=args.index("-LP")
codelist=args[ind+1]
if "-V" in args:
ind=args.index("-V")
coil=args[ind+1]
if '-ARM_dc' in args:
ind = args.index("-ARM_dc")
arm_labfield = args[ind+1]
if '-ARM_temp' in args:
ind = args.index('-ARM_temp')
trm_peakT = args[ind+1]
if not command_line:
user = kwargs.get('user', '')
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
synfile = kwargs.get('synfile', 'er_synthetics.txt')
# rm samp_file = kwargs.get('samp_file', '')
magfile = kwargs.get('magfile', '')
labfield = int(kwargs.get('labfield', 0)) *1e-6
phi = int(kwargs.get('phi', 0))
theta = int(kwargs.get('theta', 0))
peakfield = int(kwargs.get('peakfield', 0))*1e-3
specnum = int(kwargs.get('specnum', 0))
er_location_name = kwargs.get('er_location_name', '')
# rm samp_infile = kwargs.get('samp_infile', '')
syn = kwargs.get('syn', 0)
institution = kwargs.get('institution', '')
syntype = kwargs.get('syntype', '')
inst = kwargs.get('inst', '')
noave = kwargs.get('noave', 0) # 0 means "do average", is default
samp_con = kwargs.get('samp_con', '1')
codelist = kwargs.get('codelist', '')
coil = kwargs.get('coil', '')
arm_labfield = kwargs.get('arm_labfield', 50e-6)
trm_peakT = kwargs.get('trm_peakT', 600+273)
# format/organize variables
if magfile:
try:
input=open(magfile,'r')
except:
print("bad mag file name")
return False, "bad mag file name"
else:
print("mag_file field is required option")
print(main.__doc__)
return False, "mag_file field is required option"
if specnum!=0:specnum=-specnum
if "4" in samp_con:
if "-" not in samp_con:
print("naming convention option [4] must be in form 4-Z where Z is an integer")
return False, "naming convention option [4] must be in form 4-Z where Z is an integer"
else:
Z=samp_con.split("-")[1]
samp_con="4"
if "7" in samp_con:
if "-" not in samp_con:
print("naming convention option [7] must be in form 7-Z where Z is an integer")
return False, "naming convention option [7] must be in form 7-Z where Z is an integer"
else:
Z=samp_con.split("-")[1]
samp_con="4"
codes=codelist.split(':')
if "AF" in codes:
demag='AF'
if not labfield: methcode="LT-AF-Z"
if labfield: methcode="LT-AF-I"
if "T" in codes:
demag="T"
if not labfield: methcode="LT-T-Z"
if labfield: methcode="LT-T-I"
if "I" in codes:
methcode="LP-IRM"
irmunits="mT"
if "S" in codes:
demag="S"
methcode="LP-PI-TRM:LP-PI-ALT-AFARM"
trm_labfield=labfield
# should use arm_labfield and trm_peakT as well, but these values are currently never asked for
if "G" in codes: methcode="LT-AF-G"
if "D" in codes: methcode="LT-AF-D"
if "TRM" in codes:
demag="T"
trm=1
if coil:
methcode="LP-IRM"
irmunits="V"
if coil not in ["1","2","3"]:
print(main.__doc__)
print('not a valid coil specification')
return False, 'not a valid coil specification'
if demag=="T" and "ANI" in codes:
methcode="LP-AN-TRM"
if demag=="AF" and "ANI" in codes:
methcode="LP-AN-ARM"
if labfield==0: labfield=50e-6
if peakfield==0: peakfield=.180
SynRecs,MagRecs=[],[]
version_num=pmag.get_version()
if 1: # ldeo file format
#
# find start of data:
#
DIspec=[]
Data,k=input.readlines(),0
for k in range(len(Data)):
rec=Data[k].split()
if rec[0][0]=="_" or rec[0][0:2]=="!_":
break
start=k+1
for k in range(start,len(Data)):
rec=Data[k].split()
if len(rec)>0:
MagRec={}
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["treatment_ac_field"]='0'
MagRec["treatment_dc_field"]='0'
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
meas_type="LT-NO"
MagRec["measurement_flag"]='g'
MagRec["measurement_standard"]='u'
MagRec["measurement_number"]='1'
MagRec["er_specimen_name"]=rec[0]
if specnum!=0:
MagRec["er_sample_name"]=rec[0][:specnum]
else:
MagRec["er_sample_name"]=rec[0]
site=pmag.parse_site(MagRec['er_sample_name'],samp_con,Z)
MagRec["er_site_name"]=site
MagRec["er_location_name"]=er_location_name
MagRec["measurement_csd"]=rec[3]
MagRec["measurement_magn_moment"]='%10.3e'% (float(rec[4])*1e-7) # moment in Am^2 (from 10^-4 emu)
#
#if samp_file!="" and MagRec["er_sample_name"] not in Samps: # create er_samples.txt file with these data
# cdec,cinc=float(rec[5]),float(rec[6])
# gdec,ginc=float(rec[7]),float(rec[8])
# az,pl=pmag.get_azpl(cdec,cinc,gdec,ginc)
# bdec,binc=float(rec[9]),float(rec[10])
# if rec[7]!=rec[9] and rec[6]!=rec[8]:
# dipdir,dip=pmag.get_tilt(gdec,ginc,bdec,binc)
# else:
# dipdir,dip=0,0
# ErSampRec={}
# ErSampRec['er_location_name']=MagRec['er_location_name']
# ErSampRec['er_sample_name']=MagRec['er_sample_name']
# ErSampRec['er_site_name']=MagRec['er_site_name']
# ErSampRec['sample_azimuth']='%7.1f'%(az)
# ErSampRec['sample_dip']='%7.1f'%(pl)
# ErSampRec['sample_bed_dip_direction']='%7.1f'%(dipdir)
# ErSampRec['sample_bed_dip']='%7.1f'%(dip)
# ErSampRec['sample_description']='az,pl,dip_dir and dip recalculated from [c,g,b][dec,inc] in ldeo file'
# ErSampRec['magic_method_codes']='SO-REC'
# ErSamps.append(ErSampRec)
# Samps.append(ErSampRec['er_sample_name'])
MagRec["measurement_dec"]=rec[5]
MagRec["measurement_inc"]=rec[6]
MagRec["measurement_chi"]='%10.3e'%(float(rec[11])*1e-5)#convert to SI (assume Bartington, 10-5 SI)
#MagRec["magic_instrument_codes"]=rec[2]
#MagRec["er_analyst_mail_names"]=""
MagRec["er_citation_names"]="This study"
MagRec["magic_method_codes"]=meas_type
if demag=="AF":
if methcode != "LP-AN-ARM":
MagRec["treatment_ac_field"]='%8.3e' %(float(rec[1])*1e-3) # peak field in tesla
meas_type="LT-AF-Z"
MagRec["treatment_dc_field"]='0'
else: # AARM experiment
if treat[1][0]=='0':
meas_type="LT-AF-Z"
MagRec["treatment_ac_field"]='%8.3e' %(peakfield) # peak field in tesla
else:
meas_type="LT-AF-I"
ipos=int(treat[0])-1
MagRec["treatment_dc_field_phi"]='%7.1f' %(dec[ipos])
MagRec["treatment_dc_field_theta"]='%7.1f'% (inc[ipos])
MagRec["treatment_dc_field"]='%8.3e'%(labfield)
MagRec["treatment_ac_field"]='%8.3e' %(peakfield) # peak field in tesla
elif demag=="T":
if rec[1][0]==".":rec[1]="0"+rec[1]
treat=rec[1].split('.')
if len(treat)==1:treat.append('0')
MagRec["treatment_temp"]='%8.3e' % (float(rec[1])+273.) # temp in kelvin
meas_type="LT-T-Z"
MagRec["treatment_temp"]='%8.3e' % (float(treat[0])+273.) # temp in kelvin
if trm==0: # demag=T and not trmaq
if treat[1][0]=='0':
meas_type="LT-T-Z"
else:
MagRec["treatment_dc_field"]='%8.3e' % (labfield) # labfield in tesla (convert from microT)
MagRec["treatment_dc_field_phi"]='%7.1f' % (phi) # labfield phi
MagRec["treatment_dc_field_theta"]='%7.1f' % (theta) # labfield theta
if treat[1][0]=='1':meas_type="LT-T-I" # in-field thermal step
if treat[1][0]=='2':
meas_type="LT-PTRM-I" # pTRM check
pTRM=1
if treat[1][0]=='3':
MagRec["treatment_dc_field"]='0' # this is a zero field step
meas_type="LT-PTRM-MD" # pTRM tail check
else:
meas_type="LT-T-I" # trm acquisition experiment
MagRec['magic_method_codes']=meas_type
MagRecs.append(MagRec)
MagOuts=pmag.measurements_methods(MagRecs,noave)
pmag.magic_write(meas_file,MagOuts,'magic_measurements')
print("results put in ",meas_file)
if len(SynRecs)>0:
pmag.magic_write(synfile,SynRecs,'er_synthetics')
print("synthetics put in ",synfile)
return True, meas_file | python | def main(command_line=True, **kwargs):
"""
NAME
ldeo_magic.py
DESCRIPTION
converts LDEO format files to magic_measurements format files
SYNTAX
ldeo_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-usr USER: identify user, default is ""
-f FILE: specify .ldeo format input file, required
-F FILE: specify output file, default is magic_measurements.txt
-Fsy: specify er_synthetics file, default is er_sythetics.txt
-LP [colon delimited list of protocols, include all that apply]
AF: af demag
T: thermal including thellier but not trm acquisition
S: Shaw method
I: IRM (acquisition)
N: NRM only
TRM: trm acquisition
ANI: anisotropy experiment
D: double AF demag
G: triple AF demag (GRM protocol)
-V [1,2,3] units of IRM field in volts using ASC coil #1,2 or 3
-spc NUM : specify number of characters to designate a specimen, default = 0
-loc LOCNAME : specify location/study name, must have either LOCNAME or SAMPFILE or be a synthetic
-syn INST TYPE: sets these specimens as synthetics created at institution INST and of type TYPE
-ins INST : specify which demag instrument was used (e.g, SIO-Suzy or SIO-Odette),default is ""
-dc B PHI THETA: dc lab field (in micro tesla) and phi,theta, default is none
NB: use PHI, THETA = -1 -1 to signal that it changes, i.e. in anisotropy experiment
-ac B : peak AF field (in mT) for ARM acquisition, default is none
-ARM_dc # default value is 50e-6
-ARM_temp # default is 600c
-ncn NCON: specify naming convention: default is #1 below
-A: don't average replicate measurements
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail [email protected] for help.
[8] synthetic - has no site name
INPUT
Best to put separate experiments (all AF, thermal, thellier, trm aquisition, Shaw, etc.) in
seperate .mag files (eg. af.mag, thermal.mag, etc.)
Format of LDEO files:
isaf2.fix
LAT: .00 LON: .00
ID TREAT I CD J CDECL CINCL GDECL GINCL BDECL BINCL SUSC M/V
________________________________________________________________________________
is031c2 .0 SD 0 461.600 163.9 17.5 337.1 74.5 319.1 74.4 .0 .0
ID: specimen name
TREAT: treatment step
I: Instrument
CD: Circular standard devation
J: intensity. assumed to be total moment in 10^-4 (emu)
CDECL: Declination in specimen coordinate system
CINCL: Declination in specimen coordinate system
GDECL: Declination in geographic coordinate system
GINCL: Declination in geographic coordinate system
BDECL: Declination in bedding adjusted coordinate system
BINCL: Declination in bedding adjusted coordinate system
SUSC: magnetic susceptibility (in micro SI)a
M/V: mass or volume for nomalizing (0 won't normalize)
"""
# initialize some stuff
noave=0
codelist = ''
methcode,inst="LP-NO",""
phi,theta,peakfield,labfield=0,0,0,0
pTRM,MD,samp_con,Z=0,0,'1',1
dec=[315,225,180,135,45,90,270,270,270,90,180,180,0,0,0]
inc=[0,0,0,0,0,-45,-45,0,45,45,45,-45,-90,-45,45]
tdec=[0,90,0,180,270,0,0,90,0]
tinc=[0,0,90,0,0,-90,0,0,90]
missing=1
demag="N"
er_location_name=""
citation='This study'
args=sys.argv
fmt='old'
syn=0
synfile='er_synthetics.txt'
magfile = ''
trm=0
irm=0
specnum=0
coil=""
arm_labfield = 50e-6
trm_peakT = 600+273
#
# get command line arguments
#
meas_file="magic_measurements.txt"
user=""
if command_line:
if "-h" in args:
print(main.__doc__)
return False
if "-usr" in args:
ind=args.index("-usr")
user=args[ind+1]
if '-F' in args:
ind=args.index("-F")
meas_file=args[ind+1]
if '-Fsy' in args:
ind=args.index("-Fsy")
synfile=args[ind+1]
if '-f' in args:
ind=args.index("-f")
magfile=args[ind+1]
if "-dc" in args:
ind=args.index("-dc")
labfield=float(args[ind+1])*1e-6
phi=float(args[ind+2])
theta=float(args[ind+3])
if "-ac" in args:
ind=args.index("-ac")
peakfield=float(args[ind+1])*1e-3
if "-spc" in args:
ind=args.index("-spc")
specnum=int(args[ind+1])
if "-loc" in args:
ind=args.index("-loc")
er_location_name=args[ind+1]
if '-syn' in args:
syn=1
ind=args.index("-syn")
institution=args[ind+1]
syntype=args[ind+2]
if '-fsy' in args:
ind=args.index("-fsy")
synfile=args[ind+1]
if "-ins" in args:
ind=args.index("-ins")
inst=args[ind+1]
if "-A" in args: noave=1
if "-ncn" in args:
ind=args.index("-ncn")
samp_con=sys.argv[ind+1]
if '-LP' in args:
ind=args.index("-LP")
codelist=args[ind+1]
if "-V" in args:
ind=args.index("-V")
coil=args[ind+1]
if '-ARM_dc' in args:
ind = args.index("-ARM_dc")
arm_labfield = args[ind+1]
if '-ARM_temp' in args:
ind = args.index('-ARM_temp')
trm_peakT = args[ind+1]
if not command_line:
user = kwargs.get('user', '')
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
synfile = kwargs.get('synfile', 'er_synthetics.txt')
# rm samp_file = kwargs.get('samp_file', '')
magfile = kwargs.get('magfile', '')
labfield = int(kwargs.get('labfield', 0)) *1e-6
phi = int(kwargs.get('phi', 0))
theta = int(kwargs.get('theta', 0))
peakfield = int(kwargs.get('peakfield', 0))*1e-3
specnum = int(kwargs.get('specnum', 0))
er_location_name = kwargs.get('er_location_name', '')
# rm samp_infile = kwargs.get('samp_infile', '')
syn = kwargs.get('syn', 0)
institution = kwargs.get('institution', '')
syntype = kwargs.get('syntype', '')
inst = kwargs.get('inst', '')
noave = kwargs.get('noave', 0) # 0 means "do average", is default
samp_con = kwargs.get('samp_con', '1')
codelist = kwargs.get('codelist', '')
coil = kwargs.get('coil', '')
arm_labfield = kwargs.get('arm_labfield', 50e-6)
trm_peakT = kwargs.get('trm_peakT', 600+273)
# format/organize variables
if magfile:
try:
input=open(magfile,'r')
except:
print("bad mag file name")
return False, "bad mag file name"
else:
print("mag_file field is required option")
print(main.__doc__)
return False, "mag_file field is required option"
if specnum!=0:specnum=-specnum
if "4" in samp_con:
if "-" not in samp_con:
print("naming convention option [4] must be in form 4-Z where Z is an integer")
return False, "naming convention option [4] must be in form 4-Z where Z is an integer"
else:
Z=samp_con.split("-")[1]
samp_con="4"
if "7" in samp_con:
if "-" not in samp_con:
print("naming convention option [7] must be in form 7-Z where Z is an integer")
return False, "naming convention option [7] must be in form 7-Z where Z is an integer"
else:
Z=samp_con.split("-")[1]
samp_con="4"
codes=codelist.split(':')
if "AF" in codes:
demag='AF'
if not labfield: methcode="LT-AF-Z"
if labfield: methcode="LT-AF-I"
if "T" in codes:
demag="T"
if not labfield: methcode="LT-T-Z"
if labfield: methcode="LT-T-I"
if "I" in codes:
methcode="LP-IRM"
irmunits="mT"
if "S" in codes:
demag="S"
methcode="LP-PI-TRM:LP-PI-ALT-AFARM"
trm_labfield=labfield
# should use arm_labfield and trm_peakT as well, but these values are currently never asked for
if "G" in codes: methcode="LT-AF-G"
if "D" in codes: methcode="LT-AF-D"
if "TRM" in codes:
demag="T"
trm=1
if coil:
methcode="LP-IRM"
irmunits="V"
if coil not in ["1","2","3"]:
print(main.__doc__)
print('not a valid coil specification')
return False, 'not a valid coil specification'
if demag=="T" and "ANI" in codes:
methcode="LP-AN-TRM"
if demag=="AF" and "ANI" in codes:
methcode="LP-AN-ARM"
if labfield==0: labfield=50e-6
if peakfield==0: peakfield=.180
SynRecs,MagRecs=[],[]
version_num=pmag.get_version()
if 1: # ldeo file format
#
# find start of data:
#
DIspec=[]
Data,k=input.readlines(),0
for k in range(len(Data)):
rec=Data[k].split()
if rec[0][0]=="_" or rec[0][0:2]=="!_":
break
start=k+1
for k in range(start,len(Data)):
rec=Data[k].split()
if len(rec)>0:
MagRec={}
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["treatment_ac_field"]='0'
MagRec["treatment_dc_field"]='0'
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
meas_type="LT-NO"
MagRec["measurement_flag"]='g'
MagRec["measurement_standard"]='u'
MagRec["measurement_number"]='1'
MagRec["er_specimen_name"]=rec[0]
if specnum!=0:
MagRec["er_sample_name"]=rec[0][:specnum]
else:
MagRec["er_sample_name"]=rec[0]
site=pmag.parse_site(MagRec['er_sample_name'],samp_con,Z)
MagRec["er_site_name"]=site
MagRec["er_location_name"]=er_location_name
MagRec["measurement_csd"]=rec[3]
MagRec["measurement_magn_moment"]='%10.3e'% (float(rec[4])*1e-7) # moment in Am^2 (from 10^-4 emu)
#
#if samp_file!="" and MagRec["er_sample_name"] not in Samps: # create er_samples.txt file with these data
# cdec,cinc=float(rec[5]),float(rec[6])
# gdec,ginc=float(rec[7]),float(rec[8])
# az,pl=pmag.get_azpl(cdec,cinc,gdec,ginc)
# bdec,binc=float(rec[9]),float(rec[10])
# if rec[7]!=rec[9] and rec[6]!=rec[8]:
# dipdir,dip=pmag.get_tilt(gdec,ginc,bdec,binc)
# else:
# dipdir,dip=0,0
# ErSampRec={}
# ErSampRec['er_location_name']=MagRec['er_location_name']
# ErSampRec['er_sample_name']=MagRec['er_sample_name']
# ErSampRec['er_site_name']=MagRec['er_site_name']
# ErSampRec['sample_azimuth']='%7.1f'%(az)
# ErSampRec['sample_dip']='%7.1f'%(pl)
# ErSampRec['sample_bed_dip_direction']='%7.1f'%(dipdir)
# ErSampRec['sample_bed_dip']='%7.1f'%(dip)
# ErSampRec['sample_description']='az,pl,dip_dir and dip recalculated from [c,g,b][dec,inc] in ldeo file'
# ErSampRec['magic_method_codes']='SO-REC'
# ErSamps.append(ErSampRec)
# Samps.append(ErSampRec['er_sample_name'])
MagRec["measurement_dec"]=rec[5]
MagRec["measurement_inc"]=rec[6]
MagRec["measurement_chi"]='%10.3e'%(float(rec[11])*1e-5)#convert to SI (assume Bartington, 10-5 SI)
#MagRec["magic_instrument_codes"]=rec[2]
#MagRec["er_analyst_mail_names"]=""
MagRec["er_citation_names"]="This study"
MagRec["magic_method_codes"]=meas_type
if demag=="AF":
if methcode != "LP-AN-ARM":
MagRec["treatment_ac_field"]='%8.3e' %(float(rec[1])*1e-3) # peak field in tesla
meas_type="LT-AF-Z"
MagRec["treatment_dc_field"]='0'
else: # AARM experiment
if treat[1][0]=='0':
meas_type="LT-AF-Z"
MagRec["treatment_ac_field"]='%8.3e' %(peakfield) # peak field in tesla
else:
meas_type="LT-AF-I"
ipos=int(treat[0])-1
MagRec["treatment_dc_field_phi"]='%7.1f' %(dec[ipos])
MagRec["treatment_dc_field_theta"]='%7.1f'% (inc[ipos])
MagRec["treatment_dc_field"]='%8.3e'%(labfield)
MagRec["treatment_ac_field"]='%8.3e' %(peakfield) # peak field in tesla
elif demag=="T":
if rec[1][0]==".":rec[1]="0"+rec[1]
treat=rec[1].split('.')
if len(treat)==1:treat.append('0')
MagRec["treatment_temp"]='%8.3e' % (float(rec[1])+273.) # temp in kelvin
meas_type="LT-T-Z"
MagRec["treatment_temp"]='%8.3e' % (float(treat[0])+273.) # temp in kelvin
if trm==0: # demag=T and not trmaq
if treat[1][0]=='0':
meas_type="LT-T-Z"
else:
MagRec["treatment_dc_field"]='%8.3e' % (labfield) # labfield in tesla (convert from microT)
MagRec["treatment_dc_field_phi"]='%7.1f' % (phi) # labfield phi
MagRec["treatment_dc_field_theta"]='%7.1f' % (theta) # labfield theta
if treat[1][0]=='1':meas_type="LT-T-I" # in-field thermal step
if treat[1][0]=='2':
meas_type="LT-PTRM-I" # pTRM check
pTRM=1
if treat[1][0]=='3':
MagRec["treatment_dc_field"]='0' # this is a zero field step
meas_type="LT-PTRM-MD" # pTRM tail check
else:
meas_type="LT-T-I" # trm acquisition experiment
MagRec['magic_method_codes']=meas_type
MagRecs.append(MagRec)
MagOuts=pmag.measurements_methods(MagRecs,noave)
pmag.magic_write(meas_file,MagOuts,'magic_measurements')
print("results put in ",meas_file)
if len(SynRecs)>0:
pmag.magic_write(synfile,SynRecs,'er_synthetics')
print("synthetics put in ",synfile)
return True, meas_file | NAME
ldeo_magic.py
DESCRIPTION
converts LDEO format files to magic_measurements format files
SYNTAX
ldeo_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-usr USER: identify user, default is ""
-f FILE: specify .ldeo format input file, required
-F FILE: specify output file, default is magic_measurements.txt
-Fsy: specify er_synthetics file, default is er_sythetics.txt
-LP [colon delimited list of protocols, include all that apply]
AF: af demag
T: thermal including thellier but not trm acquisition
S: Shaw method
I: IRM (acquisition)
N: NRM only
TRM: trm acquisition
ANI: anisotropy experiment
D: double AF demag
G: triple AF demag (GRM protocol)
-V [1,2,3] units of IRM field in volts using ASC coil #1,2 or 3
-spc NUM : specify number of characters to designate a specimen, default = 0
-loc LOCNAME : specify location/study name, must have either LOCNAME or SAMPFILE or be a synthetic
-syn INST TYPE: sets these specimens as synthetics created at institution INST and of type TYPE
-ins INST : specify which demag instrument was used (e.g, SIO-Suzy or SIO-Odette),default is ""
-dc B PHI THETA: dc lab field (in micro tesla) and phi,theta, default is none
NB: use PHI, THETA = -1 -1 to signal that it changes, i.e. in anisotropy experiment
-ac B : peak AF field (in mT) for ARM acquisition, default is none
-ARM_dc # default value is 50e-6
-ARM_temp # default is 600c
-ncn NCON: specify naming convention: default is #1 below
-A: don't average replicate measurements
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail [email protected] for help.
[8] synthetic - has no site name
INPUT
Best to put separate experiments (all AF, thermal, thellier, trm aquisition, Shaw, etc.) in
seperate .mag files (eg. af.mag, thermal.mag, etc.)
Format of LDEO files:
isaf2.fix
LAT: .00 LON: .00
ID TREAT I CD J CDECL CINCL GDECL GINCL BDECL BINCL SUSC M/V
________________________________________________________________________________
is031c2 .0 SD 0 461.600 163.9 17.5 337.1 74.5 319.1 74.4 .0 .0
ID: specimen name
TREAT: treatment step
I: Instrument
CD: Circular standard devation
J: intensity. assumed to be total moment in 10^-4 (emu)
CDECL: Declination in specimen coordinate system
CINCL: Declination in specimen coordinate system
GDECL: Declination in geographic coordinate system
GINCL: Declination in geographic coordinate system
BDECL: Declination in bedding adjusted coordinate system
BINCL: Declination in bedding adjusted coordinate system
SUSC: magnetic susceptibility (in micro SI)a
M/V: mass or volume for nomalizing (0 won't normalize) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/conversion_scripts2/ldeo_magic2.py#L7-L382 |
PmagPy/PmagPy | programs/deprecated/odp_dsc_magic.py | main | def main():
"""
NAME
odp_dcs_magic.py
DESCRIPTION
converts ODP discrete sample format files to magic_measurements format files
SYNTAX
odp_dsc_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-F FILE: specify output measurements file, default is magic_measurements.txt
-Fsp FILE: specify output er_specimens.txt file, default is er_specimens.txt
-Fsa FILE: specify output er_samples.txt file for appending, default is er_samples.txt
-Fsi FILE: specify output er_sites.txt file, default is er_sites.txt
-dc B PHI THETA: dc lab field (in micro tesla) and phi,theta, default is none
NB: use PHI, THETA = -1 -1 to signal that it changes, i.e. in anisotropy experiment
-ac B : peak AF field (in mT) for ARM acquisition, default is none
-A : don't average replicate measurements
INPUT
Put data from separate experiments (all AF, thermal, thellier, trm aquisition, Shaw, etc.) in separate directory
"""
#
#
version_num=pmag.get_version()
meas_file='magic_measurements.txt'
spec_file='er_specimens.txt'
samp_file='er_samples.txt'
site_file='er_sites.txt'
ErSpecs,ErSamps,ErSites,ErLocs,ErCits=[],[],[],[],[]
MagRecs=[]
citation="This study"
dir_path,demag='.','NRM'
args=sys.argv
noave=0
if '-WD' in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if "-A" in args: noave=1
if '-F' in args:
ind=args.index("-F")
meas_file=args[ind+1]
if '-Fsp' in args:
ind=args.index("-Fsp")
spec_file=args[ind+1]
if '-Fsa' in args:
ind=args.index("-Fsa")
samp_file=dir_path+'/'+args[ind+1]
ErSamps,file_type=pmag.magic_read(samp_file)
else:
samp_file=dir_path+'/'+samp_file
if '-LP' in args:
ind=args.index("-LP")
codelist=args[ind+1]
codes=codelist.split(':')
if "AF" in codes:
demag='AF'
if'-dc' not in args: methcode="LT-AF-Z"
if'-dc' in args: methcode="LT-AF-I"
if "T" in codes:
demag="T"
if '-dc' not in args: methcode="LT-T-Z"
if '-dc' in args: methcode="LT-T-I"
if "I" in codes:
methcode="LP-IRM"
if "S" in codes:
demag="S"
methcode="LP-PI-TRM:LP-PI-ALT-AFARM"
trm_labfield=labfield
ans=input("DC lab field for ARM step: [50uT] ")
if ans=="":
arm_labfield=50e-6
else:
arm_labfield=float(ans)*1e-6
ans=input("temperature for total trm step: [600 C] ")
if ans=="":
trm_peakT=600+273 # convert to kelvin
else:
trm_peakT=float(ans)+273 # convert to kelvin
if "G" in codes: methcode="LT-AF-G"
if "D" in codes: methcode="LT-AF-D"
if "TRM" in codes:
demag="T"
trm=1
if demag=="T" and "ANI" in codes:
methcode="LP-AN-TRM"
if demag=="AF" and "ANI" in codes:
methcode="LP-AN-ARM"
if labfield==0: labfield=50e-6
if peakfield==0: peakfield=.180
spec_file=dir_path+'/'+spec_file
site_file=dir_path+'/'+site_file
meas_file=dir_path+'/'+meas_file
filelist=os.listdir(dir_path) # read in list of files to import
specimens,samples,sites=[],[],[]
MagRecs,SpecRecs,SampRecs=[],[],[]
for samp in ErSamps:
if samp['er_sample_name'] not in samples:
samples.append(samp['er_sample_name'])
SampRecs.append(samp)
for file in filelist: # parse each file
if file[-3:].lower()=='dsc':
print('processing: ',file)
MagRec,SpecRec,SampRec={},{},{}
treatment_type,treatment_value,user="","",""
inst="ODP-SRM"
input=open(dir_path+'/'+file,'r').readlines()
IDs=file.split('_') # splits on underscores
pieces=IDs[0].split('-')
expedition=pieces[0]
location=pieces[1]
if file[0]!='_':
while len(pieces[2])<4:pieces[2]='0'+pieces[2] # pad core to be 3 characters
specimen=""
else:
specimen="test"
for piece in pieces:
specimen=specimen+piece+'-'
specimen=specimen[:-1]
alt_spec=IDs[1] # alternate specimen is second field in field name
# set up specimen record for Er_specimens table
SpecRec['er_expedition_name']=expedition
SpecRec['er_location_name']=location
SpecRec['er_site_name']=specimen
SpecRec['er_sample_name']=specimen
SpecRec['er_citation_names']=citation
for key in list(SpecRec.keys()):SampRec[key]=SpecRec[key]
SampRec['sample_azimuth']='0'
SampRec['sample_dip']='0'
SampRec['magic_method_codes']='FS-C-DRILL-IODP:SP-SS-C:SO-V'
SpecRec['er_specimen_name']=specimen
SampRec['er_specimen_names']=specimen
for key in list(SpecRec.keys()):MagRec[key]=SpecRec[key]
# set up measurement record - default is NRM
MagRec['er_analyst_mail_names']=user
MagRec['magic_method_codes']='LT-NO'
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["treatment_ac_field"]=0.
MagRec["treatment_dc_field"]='0'
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
MagRec["measurement_flag"]='g' # assume all data are "good"
MagRec["measurement_standard"]='u' # assume all data are "good"
MagRec["measurement_csd"]='' # set csd to blank
SpecRec['er_specimen_alternatives']=alt_spec
vol=7e-6 # assume 7 cc samples
datestamp=input[1].split() # date time is second line of file
mmddyy=datestamp[0].split('/') # break into month day year
date=mmddyy[2]+':'+mmddyy[0]+":"+mmddyy[1] +':' +datestamp[1]
MagRec["measurement_date"]=date
for k in range(len(input)):
fields= input[k].split("=")
if 'treatment_type' in fields[0]:
if "Alternating Frequency Demagnetization" in fields[1]:
MagRec['magic_method_codes'] = 'LT-AF-Z'
inst=inst+':ODP-DTECH' # measured on shipboard AF DTECH D2000
treatment_type="AF"
if "Anhysteretic Remanent Magnetization" in fields[1]:
MagRec['magic_method_codes'] = 'LT-AF-I'
inst=inst+':ODP-DTECH' # measured on shipboard AF DTECH D2000
treatment_type="ARM"
if "Isothermal Remanent Magnetization" in fields[1]:
MagRec['magic_method_codes'] = 'LT-IRM'
inst=inst+':ODP-IMP' # measured on shipboard ASC IMPULSE magnetizer
treatment_type="IRM"
if "treatment_value" in fields[0]:
values=fields[1].split(',')
value=values[0]
if value!=" \n":
if treatment_type=="AF":
treatment_value=float(value)*1e-3
MagRec["treatment_ac_field"]=treatment_value # AF demag in treat mT => T
elif treatment_type=="IRM":
treatment_value=float(value)*1e-3
MagRec["treatment_dc_field"]='%8.3e'%(treatment_value) # IRM treat mT => T
if treatment_type=="ARM":
treatment_value=float(value)*1e-3
dc_value=float(values[1])*1e-3
MagRec["treatment_ac_field"]=treatment_value # AF demag in treat mT => T
MagRec["treatment_dc_field"]='%8.3e'%(dc_value) # DC mT => T
if 'user' in fields[0]:
user=fields[-1]
MagRec["er_analyst_mail_names"]=user
if 'sample_orientation' in fields[0]:
MagRec["measurement_description"]=fields[-1]
MagRec["measurement_standard"]='u' # assume all data are "good"
if 'sample_area' in fields[0]: vol=float(fields[1])*1e-6 # takes volume (cc) and converts to m^3
if 'run_number' in fields[0]:
MagRec['external_database_ids']=fields[1] # run number is the LIMS measurement number
MagRec['external_database_names']='LIMS'
if input[k][0:7]=='<MULTI>':
rec=input[k+1].split(',') # list of data
for item in rec:
items=item.split('=')
if items[0].strip()=='demag_level' and treatment_value=="" :
treat= float(items[1])
if treat!=0:
MagRec['magic_method_codes']='LT-AF-Z'
inst=inst+':ODP-SRM-AF'
MagRec["treatment_ac_field"]=treat*1e-3 # AF demag in treat mT => T
if items[0].strip()=='inclination_w_tray_w_bkgrd': MagRec['measurement_inc']=items[1]
if items[0].strip()=='declination_w_tray_w_bkgrd': MagRec['measurement_dec']=items[1]
if items[0].strip()=='intensity_w_tray_w_bkgrd': MagRec['measurement_magn_moment']='%8.3e'%(float(items[1])*vol) # convert intensity from A/m to Am^2 using vol
if items[0].strip()=='x_stdev':MagRec['measurement_x_sd']=items[1]
if items[0].strip()=='y_stdev':MagRec['measurement_y_sd']=items[1]
if items[0].strip()=='z_stdev':MagRec['measurement_sd_z']=items[1]
MagRec['magic_instrument_codes']=inst
MagRec['measurement_number']='1'
MagRec['measurement_positions']=''
MagRecs.append(MagRec)
if specimen not in specimens:
specimens.append(specimen)
SpecRecs.append(SpecRec)
if MagRec['er_sample_name'] not in samples:
samples.append(MagRec['er_sample_name'])
SampRecs.append(SampRec)
MagOuts=pmag.sort_diclist(MagRecs,'treatment_ac_field')
for MagRec in MagOuts:
MagRec["treatment_ac_field"]='%8.3e'%(MagRec["treatment_ac_field"]) # convert to string
pmag.magic_write(spec_file,SpecRecs,'er_specimens')
if len(SampRecs)>0:
SampOut,keys=pmag.fillkeys(SampRecs)
pmag.magic_write(samp_file,SampOut,'er_samples')
print('samples stored in ',samp_file)
pmag.magic_write(samp_file,SampRecs,'er_samples')
print('specimens stored in ',spec_file)
Fixed=pmag.measurements_methods(MagOuts,noave)
pmag.magic_write(meas_file,Fixed,'magic_measurements')
print('data stored in ',meas_file) | python | def main():
"""
NAME
odp_dcs_magic.py
DESCRIPTION
converts ODP discrete sample format files to magic_measurements format files
SYNTAX
odp_dsc_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-F FILE: specify output measurements file, default is magic_measurements.txt
-Fsp FILE: specify output er_specimens.txt file, default is er_specimens.txt
-Fsa FILE: specify output er_samples.txt file for appending, default is er_samples.txt
-Fsi FILE: specify output er_sites.txt file, default is er_sites.txt
-dc B PHI THETA: dc lab field (in micro tesla) and phi,theta, default is none
NB: use PHI, THETA = -1 -1 to signal that it changes, i.e. in anisotropy experiment
-ac B : peak AF field (in mT) for ARM acquisition, default is none
-A : don't average replicate measurements
INPUT
Put data from separate experiments (all AF, thermal, thellier, trm aquisition, Shaw, etc.) in separate directory
"""
#
#
version_num=pmag.get_version()
meas_file='magic_measurements.txt'
spec_file='er_specimens.txt'
samp_file='er_samples.txt'
site_file='er_sites.txt'
ErSpecs,ErSamps,ErSites,ErLocs,ErCits=[],[],[],[],[]
MagRecs=[]
citation="This study"
dir_path,demag='.','NRM'
args=sys.argv
noave=0
if '-WD' in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if "-A" in args: noave=1
if '-F' in args:
ind=args.index("-F")
meas_file=args[ind+1]
if '-Fsp' in args:
ind=args.index("-Fsp")
spec_file=args[ind+1]
if '-Fsa' in args:
ind=args.index("-Fsa")
samp_file=dir_path+'/'+args[ind+1]
ErSamps,file_type=pmag.magic_read(samp_file)
else:
samp_file=dir_path+'/'+samp_file
if '-LP' in args:
ind=args.index("-LP")
codelist=args[ind+1]
codes=codelist.split(':')
if "AF" in codes:
demag='AF'
if'-dc' not in args: methcode="LT-AF-Z"
if'-dc' in args: methcode="LT-AF-I"
if "T" in codes:
demag="T"
if '-dc' not in args: methcode="LT-T-Z"
if '-dc' in args: methcode="LT-T-I"
if "I" in codes:
methcode="LP-IRM"
if "S" in codes:
demag="S"
methcode="LP-PI-TRM:LP-PI-ALT-AFARM"
trm_labfield=labfield
ans=input("DC lab field for ARM step: [50uT] ")
if ans=="":
arm_labfield=50e-6
else:
arm_labfield=float(ans)*1e-6
ans=input("temperature for total trm step: [600 C] ")
if ans=="":
trm_peakT=600+273 # convert to kelvin
else:
trm_peakT=float(ans)+273 # convert to kelvin
if "G" in codes: methcode="LT-AF-G"
if "D" in codes: methcode="LT-AF-D"
if "TRM" in codes:
demag="T"
trm=1
if demag=="T" and "ANI" in codes:
methcode="LP-AN-TRM"
if demag=="AF" and "ANI" in codes:
methcode="LP-AN-ARM"
if labfield==0: labfield=50e-6
if peakfield==0: peakfield=.180
spec_file=dir_path+'/'+spec_file
site_file=dir_path+'/'+site_file
meas_file=dir_path+'/'+meas_file
filelist=os.listdir(dir_path) # read in list of files to import
specimens,samples,sites=[],[],[]
MagRecs,SpecRecs,SampRecs=[],[],[]
for samp in ErSamps:
if samp['er_sample_name'] not in samples:
samples.append(samp['er_sample_name'])
SampRecs.append(samp)
for file in filelist: # parse each file
if file[-3:].lower()=='dsc':
print('processing: ',file)
MagRec,SpecRec,SampRec={},{},{}
treatment_type,treatment_value,user="","",""
inst="ODP-SRM"
input=open(dir_path+'/'+file,'r').readlines()
IDs=file.split('_') # splits on underscores
pieces=IDs[0].split('-')
expedition=pieces[0]
location=pieces[1]
if file[0]!='_':
while len(pieces[2])<4:pieces[2]='0'+pieces[2] # pad core to be 3 characters
specimen=""
else:
specimen="test"
for piece in pieces:
specimen=specimen+piece+'-'
specimen=specimen[:-1]
alt_spec=IDs[1] # alternate specimen is second field in field name
# set up specimen record for Er_specimens table
SpecRec['er_expedition_name']=expedition
SpecRec['er_location_name']=location
SpecRec['er_site_name']=specimen
SpecRec['er_sample_name']=specimen
SpecRec['er_citation_names']=citation
for key in list(SpecRec.keys()):SampRec[key]=SpecRec[key]
SampRec['sample_azimuth']='0'
SampRec['sample_dip']='0'
SampRec['magic_method_codes']='FS-C-DRILL-IODP:SP-SS-C:SO-V'
SpecRec['er_specimen_name']=specimen
SampRec['er_specimen_names']=specimen
for key in list(SpecRec.keys()):MagRec[key]=SpecRec[key]
# set up measurement record - default is NRM
MagRec['er_analyst_mail_names']=user
MagRec['magic_method_codes']='LT-NO'
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["treatment_ac_field"]=0.
MagRec["treatment_dc_field"]='0'
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
MagRec["measurement_flag"]='g' # assume all data are "good"
MagRec["measurement_standard"]='u' # assume all data are "good"
MagRec["measurement_csd"]='' # set csd to blank
SpecRec['er_specimen_alternatives']=alt_spec
vol=7e-6 # assume 7 cc samples
datestamp=input[1].split() # date time is second line of file
mmddyy=datestamp[0].split('/') # break into month day year
date=mmddyy[2]+':'+mmddyy[0]+":"+mmddyy[1] +':' +datestamp[1]
MagRec["measurement_date"]=date
for k in range(len(input)):
fields= input[k].split("=")
if 'treatment_type' in fields[0]:
if "Alternating Frequency Demagnetization" in fields[1]:
MagRec['magic_method_codes'] = 'LT-AF-Z'
inst=inst+':ODP-DTECH' # measured on shipboard AF DTECH D2000
treatment_type="AF"
if "Anhysteretic Remanent Magnetization" in fields[1]:
MagRec['magic_method_codes'] = 'LT-AF-I'
inst=inst+':ODP-DTECH' # measured on shipboard AF DTECH D2000
treatment_type="ARM"
if "Isothermal Remanent Magnetization" in fields[1]:
MagRec['magic_method_codes'] = 'LT-IRM'
inst=inst+':ODP-IMP' # measured on shipboard ASC IMPULSE magnetizer
treatment_type="IRM"
if "treatment_value" in fields[0]:
values=fields[1].split(',')
value=values[0]
if value!=" \n":
if treatment_type=="AF":
treatment_value=float(value)*1e-3
MagRec["treatment_ac_field"]=treatment_value # AF demag in treat mT => T
elif treatment_type=="IRM":
treatment_value=float(value)*1e-3
MagRec["treatment_dc_field"]='%8.3e'%(treatment_value) # IRM treat mT => T
if treatment_type=="ARM":
treatment_value=float(value)*1e-3
dc_value=float(values[1])*1e-3
MagRec["treatment_ac_field"]=treatment_value # AF demag in treat mT => T
MagRec["treatment_dc_field"]='%8.3e'%(dc_value) # DC mT => T
if 'user' in fields[0]:
user=fields[-1]
MagRec["er_analyst_mail_names"]=user
if 'sample_orientation' in fields[0]:
MagRec["measurement_description"]=fields[-1]
MagRec["measurement_standard"]='u' # assume all data are "good"
if 'sample_area' in fields[0]: vol=float(fields[1])*1e-6 # takes volume (cc) and converts to m^3
if 'run_number' in fields[0]:
MagRec['external_database_ids']=fields[1] # run number is the LIMS measurement number
MagRec['external_database_names']='LIMS'
if input[k][0:7]=='<MULTI>':
rec=input[k+1].split(',') # list of data
for item in rec:
items=item.split('=')
if items[0].strip()=='demag_level' and treatment_value=="" :
treat= float(items[1])
if treat!=0:
MagRec['magic_method_codes']='LT-AF-Z'
inst=inst+':ODP-SRM-AF'
MagRec["treatment_ac_field"]=treat*1e-3 # AF demag in treat mT => T
if items[0].strip()=='inclination_w_tray_w_bkgrd': MagRec['measurement_inc']=items[1]
if items[0].strip()=='declination_w_tray_w_bkgrd': MagRec['measurement_dec']=items[1]
if items[0].strip()=='intensity_w_tray_w_bkgrd': MagRec['measurement_magn_moment']='%8.3e'%(float(items[1])*vol) # convert intensity from A/m to Am^2 using vol
if items[0].strip()=='x_stdev':MagRec['measurement_x_sd']=items[1]
if items[0].strip()=='y_stdev':MagRec['measurement_y_sd']=items[1]
if items[0].strip()=='z_stdev':MagRec['measurement_sd_z']=items[1]
MagRec['magic_instrument_codes']=inst
MagRec['measurement_number']='1'
MagRec['measurement_positions']=''
MagRecs.append(MagRec)
if specimen not in specimens:
specimens.append(specimen)
SpecRecs.append(SpecRec)
if MagRec['er_sample_name'] not in samples:
samples.append(MagRec['er_sample_name'])
SampRecs.append(SampRec)
MagOuts=pmag.sort_diclist(MagRecs,'treatment_ac_field')
for MagRec in MagOuts:
MagRec["treatment_ac_field"]='%8.3e'%(MagRec["treatment_ac_field"]) # convert to string
pmag.magic_write(spec_file,SpecRecs,'er_specimens')
if len(SampRecs)>0:
SampOut,keys=pmag.fillkeys(SampRecs)
pmag.magic_write(samp_file,SampOut,'er_samples')
print('samples stored in ',samp_file)
pmag.magic_write(samp_file,SampRecs,'er_samples')
print('specimens stored in ',spec_file)
Fixed=pmag.measurements_methods(MagOuts,noave)
pmag.magic_write(meas_file,Fixed,'magic_measurements')
print('data stored in ',meas_file) | NAME
odp_dcs_magic.py
DESCRIPTION
converts ODP discrete sample format files to magic_measurements format files
SYNTAX
odp_dsc_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-F FILE: specify output measurements file, default is magic_measurements.txt
-Fsp FILE: specify output er_specimens.txt file, default is er_specimens.txt
-Fsa FILE: specify output er_samples.txt file for appending, default is er_samples.txt
-Fsi FILE: specify output er_sites.txt file, default is er_sites.txt
-dc B PHI THETA: dc lab field (in micro tesla) and phi,theta, default is none
NB: use PHI, THETA = -1 -1 to signal that it changes, i.e. in anisotropy experiment
-ac B : peak AF field (in mT) for ARM acquisition, default is none
-A : don't average replicate measurements
INPUT
Put data from separate experiments (all AF, thermal, thellier, trm aquisition, Shaw, etc.) in separate directory | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/deprecated/odp_dsc_magic.py#L10-L247 |
PmagPy/PmagPy | programs/angle.py | main | def main():
"""
NAME
angle.py
DESCRIPTION
calculates angle between two input directions D1,D2
INPUT (COMMAND LINE ENTRY)
D1_dec D1_inc D1_dec D2_inc
OUTPUT
angle
SYNTAX
angle.py [-h][-i] [command line options] [< filename]
OPTIONS
-h prints help and quits
-i for interactive data entry
-f FILE input filename
-F FILE output filename (required if -F set)
Standard I/O
"""
out = ""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind = sys.argv.index('-F')
o = sys.argv[ind + 1]
out = open(o, 'w')
if '-i' in sys.argv:
cont = 1
while cont == 1:
dir1, dir2 = [], []
try:
ans = input('Declination 1: [ctrl-D to quit] ')
dir1.append(float(ans))
ans = input('Inclination 1: ')
dir1.append(float(ans))
ans = input('Declination 2: ')
dir2.append(float(ans))
ans = input('Inclination 2: ')
dir2.append(float(ans))
except:
print("\nGood bye\n")
sys.exit()
# send dirs to angle and spit out result
ang = pmag.angle(dir1, dir2)
print('%7.1f ' % (ang))
elif '-f' in sys.argv:
ind = sys.argv.index('-f')
file = sys.argv[ind + 1]
file_input = numpy.loadtxt(file)
else:
# read from standard input
file_input = numpy.loadtxt(sys.stdin.readlines(), dtype=numpy.float)
if len(file_input.shape) > 1: # list of directions
dir1, dir2 = file_input[:, 0:2], file_input[:, 2:]
else:
dir1, dir2 = file_input[0:2], file_input[2:]
angs = pmag.angle(dir1, dir2)
for ang in angs: # read in the data (as string variable), line by line
print('%7.1f' % (ang))
if out != "":
out.write('%7.1f \n' % (ang))
if out:
out.close() | python | def main():
"""
NAME
angle.py
DESCRIPTION
calculates angle between two input directions D1,D2
INPUT (COMMAND LINE ENTRY)
D1_dec D1_inc D1_dec D2_inc
OUTPUT
angle
SYNTAX
angle.py [-h][-i] [command line options] [< filename]
OPTIONS
-h prints help and quits
-i for interactive data entry
-f FILE input filename
-F FILE output filename (required if -F set)
Standard I/O
"""
out = ""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind = sys.argv.index('-F')
o = sys.argv[ind + 1]
out = open(o, 'w')
if '-i' in sys.argv:
cont = 1
while cont == 1:
dir1, dir2 = [], []
try:
ans = input('Declination 1: [ctrl-D to quit] ')
dir1.append(float(ans))
ans = input('Inclination 1: ')
dir1.append(float(ans))
ans = input('Declination 2: ')
dir2.append(float(ans))
ans = input('Inclination 2: ')
dir2.append(float(ans))
except:
print("\nGood bye\n")
sys.exit()
# send dirs to angle and spit out result
ang = pmag.angle(dir1, dir2)
print('%7.1f ' % (ang))
elif '-f' in sys.argv:
ind = sys.argv.index('-f')
file = sys.argv[ind + 1]
file_input = numpy.loadtxt(file)
else:
# read from standard input
file_input = numpy.loadtxt(sys.stdin.readlines(), dtype=numpy.float)
if len(file_input.shape) > 1: # list of directions
dir1, dir2 = file_input[:, 0:2], file_input[:, 2:]
else:
dir1, dir2 = file_input[0:2], file_input[2:]
angs = pmag.angle(dir1, dir2)
for ang in angs: # read in the data (as string variable), line by line
print('%7.1f' % (ang))
if out != "":
out.write('%7.1f \n' % (ang))
if out:
out.close() | NAME
angle.py
DESCRIPTION
calculates angle between two input directions D1,D2
INPUT (COMMAND LINE ENTRY)
D1_dec D1_inc D1_dec D2_inc
OUTPUT
angle
SYNTAX
angle.py [-h][-i] [command line options] [< filename]
OPTIONS
-h prints help and quits
-i for interactive data entry
-f FILE input filename
-F FILE output filename (required if -F set)
Standard I/O | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/angle.py#L11-L79 |
PmagPy/PmagPy | programs/fishrot.py | main | def main():
"""
NAME
fishrot.py
DESCRIPTION
generates set of Fisher distributed data from specified distribution
SYNTAX
fishrot.py [-h][-i][command line options]
OPTIONS
-h prints help message and quits
-i for interactive entry
-k kappa specify kappa, default is 20
-n N specify N, default is 100
-D D specify mean Dec, default is 0
-I I specify mean Inc, default is 90
where:
kappa: fisher distribution concentration parameter
N: number of directions desired
OUTPUT
dec, inc
"""
N,kappa,D,I=100,20.,0.,90.
if len(sys.argv)!=0 and '-h' in sys.argv:
print(main.__doc__)
sys.exit()
elif '-i' in sys.argv:
ans=input(' Kappa: ')
kappa=float(ans)
ans=input(' N: ')
N=int(ans)
ans=input(' Mean Dec: ')
D=float(ans)
ans=input(' Mean Inc: ')
I=float(ans)
else:
if '-k' in sys.argv:
ind=sys.argv.index('-k')
kappa=float(sys.argv[ind+1])
if '-n' in sys.argv:
ind=sys.argv.index('-n')
N=int(sys.argv[ind+1])
if '-D' in sys.argv:
ind=sys.argv.index('-D')
D=float(sys.argv[ind+1])
if '-I' in sys.argv:
ind=sys.argv.index('-I')
I=float(sys.argv[ind+1])
for k in range(N):
dec,inc= pmag.fshdev(kappa) # send kappa to fshdev
drot,irot=pmag.dodirot(dec,inc,D,I)
print('%7.1f %7.1f ' % (drot,irot)) | python | def main():
"""
NAME
fishrot.py
DESCRIPTION
generates set of Fisher distributed data from specified distribution
SYNTAX
fishrot.py [-h][-i][command line options]
OPTIONS
-h prints help message and quits
-i for interactive entry
-k kappa specify kappa, default is 20
-n N specify N, default is 100
-D D specify mean Dec, default is 0
-I I specify mean Inc, default is 90
where:
kappa: fisher distribution concentration parameter
N: number of directions desired
OUTPUT
dec, inc
"""
N,kappa,D,I=100,20.,0.,90.
if len(sys.argv)!=0 and '-h' in sys.argv:
print(main.__doc__)
sys.exit()
elif '-i' in sys.argv:
ans=input(' Kappa: ')
kappa=float(ans)
ans=input(' N: ')
N=int(ans)
ans=input(' Mean Dec: ')
D=float(ans)
ans=input(' Mean Inc: ')
I=float(ans)
else:
if '-k' in sys.argv:
ind=sys.argv.index('-k')
kappa=float(sys.argv[ind+1])
if '-n' in sys.argv:
ind=sys.argv.index('-n')
N=int(sys.argv[ind+1])
if '-D' in sys.argv:
ind=sys.argv.index('-D')
D=float(sys.argv[ind+1])
if '-I' in sys.argv:
ind=sys.argv.index('-I')
I=float(sys.argv[ind+1])
for k in range(N):
dec,inc= pmag.fshdev(kappa) # send kappa to fshdev
drot,irot=pmag.dodirot(dec,inc,D,I)
print('%7.1f %7.1f ' % (drot,irot)) | NAME
fishrot.py
DESCRIPTION
generates set of Fisher distributed data from specified distribution
SYNTAX
fishrot.py [-h][-i][command line options]
OPTIONS
-h prints help message and quits
-i for interactive entry
-k kappa specify kappa, default is 20
-n N specify N, default is 100
-D D specify mean Dec, default is 0
-I I specify mean Inc, default is 90
where:
kappa: fisher distribution concentration parameter
N: number of directions desired
OUTPUT
dec, inc | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/fishrot.py#L8-L63 |
PmagPy/PmagPy | programs/squish.py | main | def main():
"""
NAME
squish.py
DESCRIPTION
takes dec/inc data and "squishes" with specified flattening factor, flt
using formula tan(Io)=flt*tan(If)
INPUT
declination inclination
OUTPUT
"squished" declincation inclination
SYNTAX
squish.py [command line options] [< filename]
OPTIONS
-h print help and quit
-f FILE, input file
-F FILE, output file
-flt FLT, flattening factor [required]
"""
ofile=""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
ofile=sys.argv[ind+1]
out=open(ofile,'w')
if '-flt' in sys.argv:
ind=sys.argv.index('-flt')
flt=float(sys.argv[ind+1])
else:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
input=np.loadtxt(file)
else:
input=np.loadtxt(sys.stdin,dtype=np.float)
# read in inclination data
di=input.transpose()
decs,incs=di[0],di[1]
incnew=pmag.squish(incs,flt)
for k in range(input.shape[0]):
if ofile=="":
print('%7.1f %7.1f'% (decs[k],incnew[k]))
else:
out.write('%7.1f %7.1f'% (decs[k],incnew[k])+'\n') | python | def main():
"""
NAME
squish.py
DESCRIPTION
takes dec/inc data and "squishes" with specified flattening factor, flt
using formula tan(Io)=flt*tan(If)
INPUT
declination inclination
OUTPUT
"squished" declincation inclination
SYNTAX
squish.py [command line options] [< filename]
OPTIONS
-h print help and quit
-f FILE, input file
-F FILE, output file
-flt FLT, flattening factor [required]
"""
ofile=""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
ofile=sys.argv[ind+1]
out=open(ofile,'w')
if '-flt' in sys.argv:
ind=sys.argv.index('-flt')
flt=float(sys.argv[ind+1])
else:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
input=np.loadtxt(file)
else:
input=np.loadtxt(sys.stdin,dtype=np.float)
# read in inclination data
di=input.transpose()
decs,incs=di[0],di[1]
incnew=pmag.squish(incs,flt)
for k in range(input.shape[0]):
if ofile=="":
print('%7.1f %7.1f'% (decs[k],incnew[k]))
else:
out.write('%7.1f %7.1f'% (decs[k],incnew[k])+'\n') | NAME
squish.py
DESCRIPTION
takes dec/inc data and "squishes" with specified flattening factor, flt
using formula tan(Io)=flt*tan(If)
INPUT
declination inclination
OUTPUT
"squished" declincation inclination
SYNTAX
squish.py [command line options] [< filename]
OPTIONS
-h print help and quit
-f FILE, input file
-F FILE, output file
-flt FLT, flattening factor [required] | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/squish.py#L7-L59 |
PmagPy/PmagPy | programs/conversion_scripts/iodp_samples_magic.py | main | def main():
"""
iodp_samples_magic.py
OPTIONS:
-f FILE, input csv file
-Fsa FILE, output samples file for updating, default is to overwrite existing samples file
"""
if "-h" in sys.argv:
print(main.__doc__)
sys.exit()
dataframe = extractor.command_line_dataframe([['WD', False, '.'], ['ID', False, '.'], ['f', True, ''], ['Fsa', False, 'samples.txt'], ['DM', False, 3]])
args = sys.argv
checked_args = extractor.extract_and_check_args(args, dataframe)
samp_file, output_samp_file, output_dir_path, input_dir_path, data_model_num = extractor.get_vars(['f', 'Fsa', 'WD', 'ID', 'DM'], checked_args)
data_model_num = int(float(data_model_num))
if '-Fsa' not in args and data_model_num == 2:
output_samp_file = "er_samples.txt"
ran, error = convert.iodp_samples(samp_file, output_samp_file, output_dir_path,
input_dir_path, data_model_num=data_model_num)
if not ran:
print("-W- " + error) | python | def main():
"""
iodp_samples_magic.py
OPTIONS:
-f FILE, input csv file
-Fsa FILE, output samples file for updating, default is to overwrite existing samples file
"""
if "-h" in sys.argv:
print(main.__doc__)
sys.exit()
dataframe = extractor.command_line_dataframe([['WD', False, '.'], ['ID', False, '.'], ['f', True, ''], ['Fsa', False, 'samples.txt'], ['DM', False, 3]])
args = sys.argv
checked_args = extractor.extract_and_check_args(args, dataframe)
samp_file, output_samp_file, output_dir_path, input_dir_path, data_model_num = extractor.get_vars(['f', 'Fsa', 'WD', 'ID', 'DM'], checked_args)
data_model_num = int(float(data_model_num))
if '-Fsa' not in args and data_model_num == 2:
output_samp_file = "er_samples.txt"
ran, error = convert.iodp_samples(samp_file, output_samp_file, output_dir_path,
input_dir_path, data_model_num=data_model_num)
if not ran:
print("-W- " + error) | iodp_samples_magic.py
OPTIONS:
-f FILE, input csv file
-Fsa FILE, output samples file for updating, default is to overwrite existing samples file | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/conversion_scripts/iodp_samples_magic.py#L7-L28 |
PmagPy/PmagPy | SPD/new_lj_thellier_gui_spd.py | Arai_GUI.cart2dir | def cart2dir(self,cart):
"""
converts a direction to cartesian coordinates
"""
# print "calling cart2dir(), not in anything"
cart=numpy.array(cart)
rad=old_div(numpy.pi,180.) # constant to convert degrees to radians
if len(cart.shape)>1:
Xs,Ys,Zs=cart[:,0],cart[:,1],cart[:,2]
else: #single vector
Xs,Ys,Zs=cart[0],cart[1],cart[2]
Rs=numpy.sqrt(Xs**2+Ys**2+Zs**2) # calculate resultant vector length
Decs=(old_div(numpy.arctan2(Ys,Xs),rad))%360. # calculate declination taking care of correct quadrants (arctan2) and making modulo 360.
try:
Incs=old_div(numpy.arcsin(old_div(Zs,Rs)),rad) # calculate inclination (converting to degrees) #
except:
print('trouble in cart2dir') # most likely division by zero somewhere
return numpy.zeros(3)
return numpy.array([Decs,Incs,Rs]).transpose() | python | def cart2dir(self,cart):
"""
converts a direction to cartesian coordinates
"""
# print "calling cart2dir(), not in anything"
cart=numpy.array(cart)
rad=old_div(numpy.pi,180.) # constant to convert degrees to radians
if len(cart.shape)>1:
Xs,Ys,Zs=cart[:,0],cart[:,1],cart[:,2]
else: #single vector
Xs,Ys,Zs=cart[0],cart[1],cart[2]
Rs=numpy.sqrt(Xs**2+Ys**2+Zs**2) # calculate resultant vector length
Decs=(old_div(numpy.arctan2(Ys,Xs),rad))%360. # calculate declination taking care of correct quadrants (arctan2) and making modulo 360.
try:
Incs=old_div(numpy.arcsin(old_div(Zs,Rs)),rad) # calculate inclination (converting to degrees) #
except:
print('trouble in cart2dir') # most likely division by zero somewhere
return numpy.zeros(3)
return numpy.array([Decs,Incs,Rs]).transpose() | converts a direction to cartesian coordinates | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/new_lj_thellier_gui_spd.py#L1060-L1079 |
PmagPy/PmagPy | SPD/new_lj_thellier_gui_spd.py | Arai_GUI.magic_read | def magic_read(self,infile):
"""
reads a Magic template file, puts data in a list of dictionaries
"""
# print "calling magic_read(self, infile)", infile
hold,magic_data,magic_record,magic_keys=[],[],{},[]
try:
f=open(infile,"r")
except:
return [],'bad_file'
d = f.readline()[:-1].strip('\n')
if d[0]=="s" or d[1]=="s":
delim='space'
elif d[0]=="t" or d[1]=="t":
delim='tab'
else:
print('error reading ', infile)
sys.exit()
if delim=='space':file_type=d.split()[1]
if delim=='tab':file_type=d.split('\t')[1]
if file_type=='delimited':
if delim=='space':file_type=d.split()[2]
if delim=='tab':file_type=d.split('\t')[2]
if delim=='space':line =f.readline()[:-1].split()
if delim=='tab':line =f.readline()[:-1].split('\t')
for key in line:
magic_keys.append(key)
lines=f.readlines()
for line in lines[:-1]:
line.replace('\n','')
if delim=='space':rec=line[:-1].split()
if delim=='tab':rec=line[:-1].split('\t')
hold.append(rec)
line = lines[-1].replace('\n','')
if delim=='space':rec=line[:-1].split()
if delim=='tab':rec=line.split('\t')
hold.append(rec)
for rec in hold:
magic_record={}
if len(magic_keys) != len(rec):
print("Warning: Uneven record lengths detected: ")
#print magic_keys
#print rec
for k in range(len(rec)):
magic_record[magic_keys[k]]=rec[k].strip('\n')
magic_data.append(magic_record)
magictype=file_type.lower().split("_")
Types=['er','magic','pmag','rmag']
if magictype in Types:file_type=file_type.lower()
# print "magic data from magic_read:"
# print str(magic_data)[:500] + "..."
# print "file_type", file_type
return magic_data,file_type | python | def magic_read(self,infile):
"""
reads a Magic template file, puts data in a list of dictionaries
"""
# print "calling magic_read(self, infile)", infile
hold,magic_data,magic_record,magic_keys=[],[],{},[]
try:
f=open(infile,"r")
except:
return [],'bad_file'
d = f.readline()[:-1].strip('\n')
if d[0]=="s" or d[1]=="s":
delim='space'
elif d[0]=="t" or d[1]=="t":
delim='tab'
else:
print('error reading ', infile)
sys.exit()
if delim=='space':file_type=d.split()[1]
if delim=='tab':file_type=d.split('\t')[1]
if file_type=='delimited':
if delim=='space':file_type=d.split()[2]
if delim=='tab':file_type=d.split('\t')[2]
if delim=='space':line =f.readline()[:-1].split()
if delim=='tab':line =f.readline()[:-1].split('\t')
for key in line:
magic_keys.append(key)
lines=f.readlines()
for line in lines[:-1]:
line.replace('\n','')
if delim=='space':rec=line[:-1].split()
if delim=='tab':rec=line[:-1].split('\t')
hold.append(rec)
line = lines[-1].replace('\n','')
if delim=='space':rec=line[:-1].split()
if delim=='tab':rec=line.split('\t')
hold.append(rec)
for rec in hold:
magic_record={}
if len(magic_keys) != len(rec):
print("Warning: Uneven record lengths detected: ")
#print magic_keys
#print rec
for k in range(len(rec)):
magic_record[magic_keys[k]]=rec[k].strip('\n')
magic_data.append(magic_record)
magictype=file_type.lower().split("_")
Types=['er','magic','pmag','rmag']
if magictype in Types:file_type=file_type.lower()
# print "magic data from magic_read:"
# print str(magic_data)[:500] + "..."
# print "file_type", file_type
return magic_data,file_type | reads a Magic template file, puts data in a list of dictionaries | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/new_lj_thellier_gui_spd.py#L1102-L1155 |
PmagPy/PmagPy | SPD/new_lj_thellier_gui_spd.py | Arai_GUI.get_specs | def get_specs(self,data):
"""
takes a magic format file and returns a list of unique specimen names
"""
# sort the specimen names
#
# print "calling get_specs()"
speclist=[]
for rec in data:
spec=rec["er_specimen_name"]
if spec not in speclist:speclist.append(spec)
speclist.sort()
#print speclist
return speclist | python | def get_specs(self,data):
"""
takes a magic format file and returns a list of unique specimen names
"""
# sort the specimen names
#
# print "calling get_specs()"
speclist=[]
for rec in data:
spec=rec["er_specimen_name"]
if spec not in speclist:speclist.append(spec)
speclist.sort()
#print speclist
return speclist | takes a magic format file and returns a list of unique specimen names | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/new_lj_thellier_gui_spd.py#L1158-L1171 |
PmagPy/PmagPy | SPD/new_lj_thellier_gui_spd.py | Arai_GUI.sortarai | def sortarai(self,datablock,s,Zdiff):
"""
sorts data block in to first_Z, first_I, etc.
"""
# print "calling sortarai()"
first_Z,first_I,zptrm_check,ptrm_check,ptrm_tail=[],[],[],[],[]
field,phi,theta="","",""
starthere=0
Treat_I,Treat_Z,Treat_PZ,Treat_PI,Treat_M,Treat_AC=[],[],[],[],[],[]
ISteps,ZSteps,PISteps,PZSteps,MSteps,ACSteps=[],[],[],[],[],[]
GammaChecks=[] # comparison of pTRM direction acquired and lab field
Mkeys=['measurement_magn_moment','measurement_magn_volume','measurement_magn_mass','measurement_magnitude']
rec=datablock[0] # finds which type of magnetic measurement is present in magic_measurements.txt, then assigns momkey to that value
for key in Mkeys:
if key in list(rec.keys()) and rec[key]!="":
momkey=key
break
# first find all the steps
for k in range(len(datablock)): # iterates through records.
rec=datablock[k]
if "treatment_temp" in list(rec.keys()):
temp=float(rec["treatment_temp"])
elif "treatment_mw_power" in list(rec.keys()):
temp=float(rec["treatment_mw_power"])
methcodes=[]
tmp=rec["magic_method_codes"].split(":")
for meth in tmp:
methcodes.append(meth.strip())
# methchodes contains all codes for a particular record
# for thellier-thellier
if 'LT-T-I' in methcodes and 'LP-PI-TRM' in methcodes and 'LP-TRM' not in methcodes :
# IF specimen cooling AND using a laboratory trm AND NOT trm acquisition
Treat_I.append(temp)
ISteps.append(k)
if field=="":field=float(rec["treatment_dc_field"])
if phi=="":
phi=float(rec['treatment_dc_field_phi'])
theta=float(rec['treatment_dc_field_theta'])
# for Microwave
if 'LT-M-I' in methcodes and 'LP-PI-M' in methcodes :
# if using microwave radiation in lab field AND using microwave demagnetisation
Treat_I.append(temp)
ISteps.append(k)
if field=="":field=float(rec["treatment_dc_field"])
if phi=="":
phi=float(rec['treatment_dc_field_phi'])
theta=float(rec['treatment_dc_field_theta'])
# stick first zero field stuff into first_Z
if 'LT-NO' in methcodes:
# if no treatments applied before measurements
Treat_Z.append(temp)
ZSteps.append(k)
if 'LT-T-Z' in methcodes or 'LT-M-Z' in methcodes:
# if specimen cooling in zero field OR using microwave radiation: In zero field
Treat_Z.append(temp)
ZSteps.append(k)
if 'LT-PTRM-Z' : # maybe this should be in methcodes ?? note I no longer understand
# if pTRM tail check
Treat_PZ.append(temp)
PZSteps.append(k)
if 'LT-PTRM-I' in methcodes or 'LT-PMRM-I' in methcodes:
# if pTRM check
Treat_PI.append(temp)
PISteps.append(k)
if 'LT-PTRM-MD' in methcodes:
# if pTRM tail check
Treat_M.append(temp)
MSteps.append(k)
if 'LT-PTRM-AC' in methcodes or 'LT-PMRM-AC' in methcodes:
Treat_AC.append(temp)
ACSteps.append(k)
if 'LT-NO' in methcodes:
# if no treatments applied before measurement
dec=float(rec["measurement_dec"])
inc=float(rec["measurement_inc"])
str=float(rec[momkey])
if 'LP-PI-M' not in methcodes:
# if not using microwave demagnetisation
first_I.append([273,0.,0.,0.,1])
first_Z.append([273,dec,inc,str,1]) # NRM step
else:
first_I.append([0,0.,0.,0.,1])
first_Z.append([0,dec,inc,str,1]) # NRM step
# the block above seems to be sorting out into wheter it is Treat_Z (zero field), Treat_I (infield), a ptrm check, or a ptrm tail check. so, each record has been appended to whichever of those it belongs in.
#---------------------
# find IZ and ZI
#---------------------
for temp in Treat_I: # look through infield steps and find matching Z step
if temp in Treat_Z: # found a match
istep=ISteps[Treat_I.index(temp)]
irec=datablock[istep]
methcodes=[]
tmp=irec["magic_method_codes"].split(":")
for meth in tmp: methcodes.append(meth.strip())
brec=datablock[istep-1] # take last record as baseline to subtract
zstep=ZSteps[Treat_Z.index(temp)]
zrec=datablock[zstep]
# sort out first_Z records
if "LP-PI-TRM-IZ" in methcodes or "LP-PI-M-IZ" in methcodes:
ZI=0
else:
ZI=1
dec=float(zrec["measurement_dec"])
inc=float(zrec["measurement_inc"])
str=float(zrec[momkey])
first_Z.append([temp,dec,inc,str,ZI])
# sort out first_I records
#print 'irec', irec # full data set for infield measurement
#print 'zrec', zrec # coresponding zerofield measurement
idec=float(irec["measurement_dec"])
iinc=float(irec["measurement_inc"])
istr=float(irec[momkey])
X=self.dir2cart([idec,iinc,istr])
BL=self.dir2cart([dec,inc,str])
I=[]
for c in range(3):
I.append((X[c]-BL[c]))
iDir=self.cart2dir(I)
first_I.append([temp,iDir[0],iDir[1],iDir[2],ZI])
now_ignore = """
#if I[2]!=0: # lj PUT THIS BACK
if True:
iDir=self.cart2dir(I)
if Zdiff==0:
print "Zdiff == 0, appending to first_I" #lj
print [temp,iDir[0],iDir[1],iDir[2],ZI] #lj
first_I.append([temp,iDir[0],iDir[1],iDir[2],ZI])
else:
print "Zdiff != 0, appending to first_I" #lj
print [temp,0.,0.,I[2],ZI] #lj
first_I.append([temp,0.,0.,I[2],ZI])
## gamma=angle([iDir[0],iDir[1]],[phi,theta])
else:
print "0,0,0 appending to first_I"
print [temp,0.,0.,0.,ZI]
first_I.append([temp,0.,0.,0.,ZI])
## gamma=0.0
## # put in Gamma check (infield trm versus lab field)
## if 180.-gamma<gamma:
## gamma=180.-gamma
## GammaChecks.append([temp-273.,gamma])
"""
#---------------------
# find Thellier Thellier protocol
#---------------------
if 'LP-PI-II'in methcodes or 'LP-PI-T-II' in methcodes or 'LP-PI-M-II' in methcodes:
for i in range(1,len(Treat_I)): # look through infield steps and find matching Z step
if Treat_I[i] == Treat_I[i-1]:
# ignore, if there are more than
temp= Treat_I[i]
irec1=datablock[ISteps[i-1]]
dec1=float(irec1["measurement_dec"])
inc1=float(irec1["measurement_inc"])
moment1=float(irec1["measurement_magn_moment"])
if len(first_I)<2:
dec_initial=dec1;inc_initial=inc1
cart1=numpy.array(self.dir2cart([dec1,inc1,moment1]))
irec2=datablock[ISteps[i]]
dec2=float(irec2["measurement_dec"])
inc2=float(irec2["measurement_inc"])
moment2=float(irec2["measurement_magn_moment"])
cart2=numpy.array(self.dir2cart([dec2,inc2,moment2]))
# check if its in the same treatment
if Treat_I[i] == Treat_I[i-2] and dec2!=dec_initial and inc2!=inc_initial:
continue
if dec1!=dec2 and inc1!=inc2:
zerofield=old_div((cart2+cart1),2)
infield=old_div((cart2-cart1),2)
DIR_zerofield=self.cart2dir(zerofield)
DIR_infield=self.cart2dir(infield)
first_Z.append([temp,DIR_zerofield[0],DIR_zerofield[1],DIR_zerofield[2],0])
print("appending to first_I") # LJ remove this
print([temp,DIR_infield[0],DIR_infield[1],DIR_infield[2],0]) # LJ remove this
first_I.append([temp,DIR_infield[0],DIR_infield[1],DIR_infield[2],0])
#---------------------
# find pTRM checks
#---------------------
for temp in Treat_PI: # look through infield steps and find matching Z step
if 'LP-PI-II' not in methcodes:
step=PISteps[Treat_PI.index(temp)]
rec=datablock[step]
dec=float(rec["measurement_dec"])
inc=float(rec["measurement_inc"])
str=float(rec[momkey])
brec=datablock[step-1] # take last record as baseline to subtract
pdec=float(brec["measurement_dec"])
pinc=float(brec["measurement_inc"])
pint=float(brec[momkey])
X=self.dir2cart([dec,inc,str])
prevX=self.dir2cart([pdec,pinc,pint])
I=[]
for c in range(3): I.append(X[c]-prevX[c])
dir1=self.cart2dir(I)
if Zdiff==0:
ptrm_check.append([temp,dir1[0],dir1[1],dir1[2]])
else:
ptrm_check.append([temp,0.,0.,I[2]])
else:
step=PISteps[Treat_PI.index(temp)]
rec=datablock[step]
dec=float(rec["measurement_dec"])
inc=float(rec["measurement_inc"])
moment=float(rec["measurement_magn_moment"])
for zerofield in first_Z:
if zerofield[0]==temp:
M1=numpy.array(self.dir2cart([dec,inc,moment]))
M2=numpy.array(self.dir2cart([zerofield[1],zerofield[2],zerofield[3]]))
diff=M1-M2
diff_cart=self.cart2dir(diff)
ptrm_check.append([temp,diff_cart[0],diff_cart[1],diff_cart[2]])
# in case there are zero-field pTRM checks (not the SIO way)
for temp in Treat_PZ:
step=PZSteps[Treat_PZ.index(temp)]
rec=datablock[step]
dec=float(rec["measurement_dec"])
inc=float(rec["measurement_inc"])
str=float(rec[momkey])
brec=datablock[step-1]
pdec=float(brec["measurement_dec"])
pinc=float(brec["measurement_inc"])
pint=float(brec[momkey])
X=self.dir2cart([dec,inc,str])
prevX=self.dir2cart([pdec,pinc,pint])
I=[]
for c in range(3): I.append(X[c]-prevX[c])
dir2=self.cart2dir(I)
zptrm_check.append([temp,dir2[0],dir2[1],dir2[2]])
## get pTRM tail checks together -
for temp in Treat_M:
step=MSteps[Treat_M.index(temp)] # tail check step - just do a difference in magnitude!
rec=datablock[step]
str=float(rec[momkey])
if temp in Treat_Z:
step=ZSteps[Treat_Z.index(temp)]
brec=datablock[step]
pint=float(brec[momkey])
ptrm_tail.append([temp,0,0,str-pint]) # difference - if negative, negative tail!
else:
print(s, ' has a tail check with no first zero field step - check input file! for step',temp-273.)
#
# final check
#
if len(first_Z)!=len(first_I):
print(len(first_Z),len(first_I))
print(" Something wrong with this specimen! Better fix it or delete it ")
input(" press return to acknowledge message")
#---------------------
# find Additivity (patch by rshaar)
#---------------------
additivity_check=[]
for i in range(len(Treat_AC)):
step_0=ACSteps[i]
temp=Treat_AC[i]
dec0=float(datablock[step_0]["measurement_dec"])
inc0=float(datablock[step_0]["measurement_inc"])
moment0=float(datablock[step_0]['measurement_magn_moment'])
V0=self.dir2cart([dec0,inc0,moment0])
# find the infield step that comes before the additivity check
foundit=False
for j in range(step_0,1,-1):
if "LT-T-I" in datablock[j]['magic_method_codes']:
foundit=True ; break
if foundit:
dec1=float(datablock[j]["measurement_dec"])
inc1=float(datablock[j]["measurement_inc"])
moment1=float(datablock[j]['measurement_magn_moment'])
#lj
start_temp=float(datablock[j]['treatment_temp']);
#lj
V1=self.dir2cart([dec1,inc1,moment1])
I=[]
#print "temp (K)", temp - 273
#print "start_temp (K)", start_temp - 273
#print "dec0: {}, inc0: {}, moment0: {}".format(dec0, inc0, moment0)
#print "V0: ", V0
#print "dec1: {}, inc1: {}, moment1: {}".format(dec1, inc1,moment1)
#print "V1: ", V1
#print "---"
for c in range(3): I.append(V1[c]-V0[c])
dir1=self.cart2dir(I)
additivity_check.append([temp,dir1[0],dir1[1],dir1[2]])
araiblock=(first_Z,first_I,ptrm_check,ptrm_tail,zptrm_check,GammaChecks,additivity_check)
# print "done with sortarai()"
# print "araiblock[0] (first_Z) "
# [[273, 277.5, 79.6, 1.66e-09, 1], .....]
# print araiblock[0]
# print "araiblock[0][0]:"
# print araiblock[0][0]
# print "araiblock[1] (first_I)"
# print araiblock[1]
# print "araiblock[2] (ptrm_check)"
# print araiblock[2]
# print "araiblock[3] (ptrm_tail)"
# print araiblock[3]
# print "araiblock[4] (zptrm_check)"
# print araiblock[4]
# print "araiblock[5] (GammaChecks) "
# print araiblock[5]
# print "field ", field
return araiblock,field | python | def sortarai(self,datablock,s,Zdiff):
"""
sorts data block in to first_Z, first_I, etc.
"""
# print "calling sortarai()"
first_Z,first_I,zptrm_check,ptrm_check,ptrm_tail=[],[],[],[],[]
field,phi,theta="","",""
starthere=0
Treat_I,Treat_Z,Treat_PZ,Treat_PI,Treat_M,Treat_AC=[],[],[],[],[],[]
ISteps,ZSteps,PISteps,PZSteps,MSteps,ACSteps=[],[],[],[],[],[]
GammaChecks=[] # comparison of pTRM direction acquired and lab field
Mkeys=['measurement_magn_moment','measurement_magn_volume','measurement_magn_mass','measurement_magnitude']
rec=datablock[0] # finds which type of magnetic measurement is present in magic_measurements.txt, then assigns momkey to that value
for key in Mkeys:
if key in list(rec.keys()) and rec[key]!="":
momkey=key
break
# first find all the steps
for k in range(len(datablock)): # iterates through records.
rec=datablock[k]
if "treatment_temp" in list(rec.keys()):
temp=float(rec["treatment_temp"])
elif "treatment_mw_power" in list(rec.keys()):
temp=float(rec["treatment_mw_power"])
methcodes=[]
tmp=rec["magic_method_codes"].split(":")
for meth in tmp:
methcodes.append(meth.strip())
# methchodes contains all codes for a particular record
# for thellier-thellier
if 'LT-T-I' in methcodes and 'LP-PI-TRM' in methcodes and 'LP-TRM' not in methcodes :
# IF specimen cooling AND using a laboratory trm AND NOT trm acquisition
Treat_I.append(temp)
ISteps.append(k)
if field=="":field=float(rec["treatment_dc_field"])
if phi=="":
phi=float(rec['treatment_dc_field_phi'])
theta=float(rec['treatment_dc_field_theta'])
# for Microwave
if 'LT-M-I' in methcodes and 'LP-PI-M' in methcodes :
# if using microwave radiation in lab field AND using microwave demagnetisation
Treat_I.append(temp)
ISteps.append(k)
if field=="":field=float(rec["treatment_dc_field"])
if phi=="":
phi=float(rec['treatment_dc_field_phi'])
theta=float(rec['treatment_dc_field_theta'])
# stick first zero field stuff into first_Z
if 'LT-NO' in methcodes:
# if no treatments applied before measurements
Treat_Z.append(temp)
ZSteps.append(k)
if 'LT-T-Z' in methcodes or 'LT-M-Z' in methcodes:
# if specimen cooling in zero field OR using microwave radiation: In zero field
Treat_Z.append(temp)
ZSteps.append(k)
if 'LT-PTRM-Z' : # maybe this should be in methcodes ?? note I no longer understand
# if pTRM tail check
Treat_PZ.append(temp)
PZSteps.append(k)
if 'LT-PTRM-I' in methcodes or 'LT-PMRM-I' in methcodes:
# if pTRM check
Treat_PI.append(temp)
PISteps.append(k)
if 'LT-PTRM-MD' in methcodes:
# if pTRM tail check
Treat_M.append(temp)
MSteps.append(k)
if 'LT-PTRM-AC' in methcodes or 'LT-PMRM-AC' in methcodes:
Treat_AC.append(temp)
ACSteps.append(k)
if 'LT-NO' in methcodes:
# if no treatments applied before measurement
dec=float(rec["measurement_dec"])
inc=float(rec["measurement_inc"])
str=float(rec[momkey])
if 'LP-PI-M' not in methcodes:
# if not using microwave demagnetisation
first_I.append([273,0.,0.,0.,1])
first_Z.append([273,dec,inc,str,1]) # NRM step
else:
first_I.append([0,0.,0.,0.,1])
first_Z.append([0,dec,inc,str,1]) # NRM step
# the block above seems to be sorting out into wheter it is Treat_Z (zero field), Treat_I (infield), a ptrm check, or a ptrm tail check. so, each record has been appended to whichever of those it belongs in.
#---------------------
# find IZ and ZI
#---------------------
for temp in Treat_I: # look through infield steps and find matching Z step
if temp in Treat_Z: # found a match
istep=ISteps[Treat_I.index(temp)]
irec=datablock[istep]
methcodes=[]
tmp=irec["magic_method_codes"].split(":")
for meth in tmp: methcodes.append(meth.strip())
brec=datablock[istep-1] # take last record as baseline to subtract
zstep=ZSteps[Treat_Z.index(temp)]
zrec=datablock[zstep]
# sort out first_Z records
if "LP-PI-TRM-IZ" in methcodes or "LP-PI-M-IZ" in methcodes:
ZI=0
else:
ZI=1
dec=float(zrec["measurement_dec"])
inc=float(zrec["measurement_inc"])
str=float(zrec[momkey])
first_Z.append([temp,dec,inc,str,ZI])
# sort out first_I records
#print 'irec', irec # full data set for infield measurement
#print 'zrec', zrec # coresponding zerofield measurement
idec=float(irec["measurement_dec"])
iinc=float(irec["measurement_inc"])
istr=float(irec[momkey])
X=self.dir2cart([idec,iinc,istr])
BL=self.dir2cart([dec,inc,str])
I=[]
for c in range(3):
I.append((X[c]-BL[c]))
iDir=self.cart2dir(I)
first_I.append([temp,iDir[0],iDir[1],iDir[2],ZI])
now_ignore = """
#if I[2]!=0: # lj PUT THIS BACK
if True:
iDir=self.cart2dir(I)
if Zdiff==0:
print "Zdiff == 0, appending to first_I" #lj
print [temp,iDir[0],iDir[1],iDir[2],ZI] #lj
first_I.append([temp,iDir[0],iDir[1],iDir[2],ZI])
else:
print "Zdiff != 0, appending to first_I" #lj
print [temp,0.,0.,I[2],ZI] #lj
first_I.append([temp,0.,0.,I[2],ZI])
## gamma=angle([iDir[0],iDir[1]],[phi,theta])
else:
print "0,0,0 appending to first_I"
print [temp,0.,0.,0.,ZI]
first_I.append([temp,0.,0.,0.,ZI])
## gamma=0.0
## # put in Gamma check (infield trm versus lab field)
## if 180.-gamma<gamma:
## gamma=180.-gamma
## GammaChecks.append([temp-273.,gamma])
"""
#---------------------
# find Thellier Thellier protocol
#---------------------
if 'LP-PI-II'in methcodes or 'LP-PI-T-II' in methcodes or 'LP-PI-M-II' in methcodes:
for i in range(1,len(Treat_I)): # look through infield steps and find matching Z step
if Treat_I[i] == Treat_I[i-1]:
# ignore, if there are more than
temp= Treat_I[i]
irec1=datablock[ISteps[i-1]]
dec1=float(irec1["measurement_dec"])
inc1=float(irec1["measurement_inc"])
moment1=float(irec1["measurement_magn_moment"])
if len(first_I)<2:
dec_initial=dec1;inc_initial=inc1
cart1=numpy.array(self.dir2cart([dec1,inc1,moment1]))
irec2=datablock[ISteps[i]]
dec2=float(irec2["measurement_dec"])
inc2=float(irec2["measurement_inc"])
moment2=float(irec2["measurement_magn_moment"])
cart2=numpy.array(self.dir2cart([dec2,inc2,moment2]))
# check if its in the same treatment
if Treat_I[i] == Treat_I[i-2] and dec2!=dec_initial and inc2!=inc_initial:
continue
if dec1!=dec2 and inc1!=inc2:
zerofield=old_div((cart2+cart1),2)
infield=old_div((cart2-cart1),2)
DIR_zerofield=self.cart2dir(zerofield)
DIR_infield=self.cart2dir(infield)
first_Z.append([temp,DIR_zerofield[0],DIR_zerofield[1],DIR_zerofield[2],0])
print("appending to first_I") # LJ remove this
print([temp,DIR_infield[0],DIR_infield[1],DIR_infield[2],0]) # LJ remove this
first_I.append([temp,DIR_infield[0],DIR_infield[1],DIR_infield[2],0])
#---------------------
# find pTRM checks
#---------------------
for temp in Treat_PI: # look through infield steps and find matching Z step
if 'LP-PI-II' not in methcodes:
step=PISteps[Treat_PI.index(temp)]
rec=datablock[step]
dec=float(rec["measurement_dec"])
inc=float(rec["measurement_inc"])
str=float(rec[momkey])
brec=datablock[step-1] # take last record as baseline to subtract
pdec=float(brec["measurement_dec"])
pinc=float(brec["measurement_inc"])
pint=float(brec[momkey])
X=self.dir2cart([dec,inc,str])
prevX=self.dir2cart([pdec,pinc,pint])
I=[]
for c in range(3): I.append(X[c]-prevX[c])
dir1=self.cart2dir(I)
if Zdiff==0:
ptrm_check.append([temp,dir1[0],dir1[1],dir1[2]])
else:
ptrm_check.append([temp,0.,0.,I[2]])
else:
step=PISteps[Treat_PI.index(temp)]
rec=datablock[step]
dec=float(rec["measurement_dec"])
inc=float(rec["measurement_inc"])
moment=float(rec["measurement_magn_moment"])
for zerofield in first_Z:
if zerofield[0]==temp:
M1=numpy.array(self.dir2cart([dec,inc,moment]))
M2=numpy.array(self.dir2cart([zerofield[1],zerofield[2],zerofield[3]]))
diff=M1-M2
diff_cart=self.cart2dir(diff)
ptrm_check.append([temp,diff_cart[0],diff_cart[1],diff_cart[2]])
# in case there are zero-field pTRM checks (not the SIO way)
for temp in Treat_PZ:
step=PZSteps[Treat_PZ.index(temp)]
rec=datablock[step]
dec=float(rec["measurement_dec"])
inc=float(rec["measurement_inc"])
str=float(rec[momkey])
brec=datablock[step-1]
pdec=float(brec["measurement_dec"])
pinc=float(brec["measurement_inc"])
pint=float(brec[momkey])
X=self.dir2cart([dec,inc,str])
prevX=self.dir2cart([pdec,pinc,pint])
I=[]
for c in range(3): I.append(X[c]-prevX[c])
dir2=self.cart2dir(I)
zptrm_check.append([temp,dir2[0],dir2[1],dir2[2]])
## get pTRM tail checks together -
for temp in Treat_M:
step=MSteps[Treat_M.index(temp)] # tail check step - just do a difference in magnitude!
rec=datablock[step]
str=float(rec[momkey])
if temp in Treat_Z:
step=ZSteps[Treat_Z.index(temp)]
brec=datablock[step]
pint=float(brec[momkey])
ptrm_tail.append([temp,0,0,str-pint]) # difference - if negative, negative tail!
else:
print(s, ' has a tail check with no first zero field step - check input file! for step',temp-273.)
#
# final check
#
if len(first_Z)!=len(first_I):
print(len(first_Z),len(first_I))
print(" Something wrong with this specimen! Better fix it or delete it ")
input(" press return to acknowledge message")
#---------------------
# find Additivity (patch by rshaar)
#---------------------
additivity_check=[]
for i in range(len(Treat_AC)):
step_0=ACSteps[i]
temp=Treat_AC[i]
dec0=float(datablock[step_0]["measurement_dec"])
inc0=float(datablock[step_0]["measurement_inc"])
moment0=float(datablock[step_0]['measurement_magn_moment'])
V0=self.dir2cart([dec0,inc0,moment0])
# find the infield step that comes before the additivity check
foundit=False
for j in range(step_0,1,-1):
if "LT-T-I" in datablock[j]['magic_method_codes']:
foundit=True ; break
if foundit:
dec1=float(datablock[j]["measurement_dec"])
inc1=float(datablock[j]["measurement_inc"])
moment1=float(datablock[j]['measurement_magn_moment'])
#lj
start_temp=float(datablock[j]['treatment_temp']);
#lj
V1=self.dir2cart([dec1,inc1,moment1])
I=[]
#print "temp (K)", temp - 273
#print "start_temp (K)", start_temp - 273
#print "dec0: {}, inc0: {}, moment0: {}".format(dec0, inc0, moment0)
#print "V0: ", V0
#print "dec1: {}, inc1: {}, moment1: {}".format(dec1, inc1,moment1)
#print "V1: ", V1
#print "---"
for c in range(3): I.append(V1[c]-V0[c])
dir1=self.cart2dir(I)
additivity_check.append([temp,dir1[0],dir1[1],dir1[2]])
araiblock=(first_Z,first_I,ptrm_check,ptrm_tail,zptrm_check,GammaChecks,additivity_check)
# print "done with sortarai()"
# print "araiblock[0] (first_Z) "
# [[273, 277.5, 79.6, 1.66e-09, 1], .....]
# print araiblock[0]
# print "araiblock[0][0]:"
# print araiblock[0][0]
# print "araiblock[1] (first_I)"
# print araiblock[1]
# print "araiblock[2] (ptrm_check)"
# print araiblock[2]
# print "araiblock[3] (ptrm_tail)"
# print araiblock[3]
# print "araiblock[4] (zptrm_check)"
# print araiblock[4]
# print "araiblock[5] (GammaChecks) "
# print araiblock[5]
# print "field ", field
return araiblock,field | sorts data block in to first_Z, first_I, etc. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/new_lj_thellier_gui_spd.py#L1175-L1496 |
PmagPy/PmagPy | programs/vgpmap_magic.py | main | def main():
"""
NAME
vgpmap_magic.py
DESCRIPTION
makes a map of vgps and a95/dp,dm for site means in a sites table
SYNTAX
vgpmap_magic.py [command line options]
OPTIONS
-h prints help and quits
-eye ELAT ELON [specify eyeball location], default is 90., 0.
-f FILE sites format file, [default is sites.txt]
-res [c,l,i,h] specify resolution (crude, low, intermediate, high]
-etp plot the etopo20 topographpy data (requires high resolution data set)
-prj PROJ, specify one of the following:
ortho = orthographic
lcc = lambert conformal
moll = molweide
merc = mercator
-sym SYM SIZE: choose a symbol and size, examples:
ro 5 : small red circles
bs 10 : intermediate blue squares
g^ 20 : large green triangles
-ell plot dp/dm or a95 ellipses
-rev RSYM RSIZE : flip reverse poles to normal antipode
-S: plot antipodes of all poles
-age : plot the ages next to the poles
-crd [g,t] : choose coordinate system, default is to plot all site VGPs
-fmt [pdf, png, eps...] specify output format, default is pdf
-sav save and quit
DEFAULTS
FILE: sites.txt
res: c
prj: ortho
ELAT,ELON = 0,0
SYM SIZE: ro 8
RSYM RSIZE: g^ 8
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
dir_path = pmag.get_named_arg("-WD", ".")
# plot: default is 0, if -sav in sys.argv should be 1
interactive = True
save_plots = pmag.get_flag_arg_from_sys("-sav", true=1, false=0)
if save_plots:
interactive = False
fmt = pmag.get_named_arg("-fmt", "pdf")
res = pmag.get_named_arg("-res", "c")
proj = pmag.get_named_arg("-prj", "ortho")
anti = pmag.get_flag_arg_from_sys("-S", true=1, false=0)
fancy = pmag.get_flag_arg_from_sys("-etp", true=1, false=0)
ell = pmag.get_flag_arg_from_sys("-ell", true=1, false=0)
ages = pmag.get_flag_arg_from_sys("-age", true=1, false=0)
if '-rev' in sys.argv:
flip = 1
ind = sys.argv.index('-rev')
rsym = (sys.argv[ind + 1])
rsize = int(sys.argv[ind + 2])
else:
flip, rsym, rsize = 0, "g^", 8
if '-sym' in sys.argv:
ind = sys.argv.index('-sym')
sym = (sys.argv[ind + 1])
size = int(sys.argv[ind + 2])
else:
sym, size = 'ro', 8
if '-eye' in sys.argv:
ind = sys.argv.index('-eye')
lat_0 = float(sys.argv[ind + 1])
lon_0 = float(sys.argv[ind + 2])
else:
lat_0, lon_0 = 90., 0.
crd = pmag.get_named_arg("-crd", "")
results_file = pmag.get_named_arg("-f", "sites.txt")
ipmag.vgpmap_magic(dir_path, results_file, crd, sym, size, rsym, rsize,
fmt, res, proj, flip, anti, fancy, ell, ages, lat_0, lon_0,
save_plots, interactive) | python | def main():
"""
NAME
vgpmap_magic.py
DESCRIPTION
makes a map of vgps and a95/dp,dm for site means in a sites table
SYNTAX
vgpmap_magic.py [command line options]
OPTIONS
-h prints help and quits
-eye ELAT ELON [specify eyeball location], default is 90., 0.
-f FILE sites format file, [default is sites.txt]
-res [c,l,i,h] specify resolution (crude, low, intermediate, high]
-etp plot the etopo20 topographpy data (requires high resolution data set)
-prj PROJ, specify one of the following:
ortho = orthographic
lcc = lambert conformal
moll = molweide
merc = mercator
-sym SYM SIZE: choose a symbol and size, examples:
ro 5 : small red circles
bs 10 : intermediate blue squares
g^ 20 : large green triangles
-ell plot dp/dm or a95 ellipses
-rev RSYM RSIZE : flip reverse poles to normal antipode
-S: plot antipodes of all poles
-age : plot the ages next to the poles
-crd [g,t] : choose coordinate system, default is to plot all site VGPs
-fmt [pdf, png, eps...] specify output format, default is pdf
-sav save and quit
DEFAULTS
FILE: sites.txt
res: c
prj: ortho
ELAT,ELON = 0,0
SYM SIZE: ro 8
RSYM RSIZE: g^ 8
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
dir_path = pmag.get_named_arg("-WD", ".")
# plot: default is 0, if -sav in sys.argv should be 1
interactive = True
save_plots = pmag.get_flag_arg_from_sys("-sav", true=1, false=0)
if save_plots:
interactive = False
fmt = pmag.get_named_arg("-fmt", "pdf")
res = pmag.get_named_arg("-res", "c")
proj = pmag.get_named_arg("-prj", "ortho")
anti = pmag.get_flag_arg_from_sys("-S", true=1, false=0)
fancy = pmag.get_flag_arg_from_sys("-etp", true=1, false=0)
ell = pmag.get_flag_arg_from_sys("-ell", true=1, false=0)
ages = pmag.get_flag_arg_from_sys("-age", true=1, false=0)
if '-rev' in sys.argv:
flip = 1
ind = sys.argv.index('-rev')
rsym = (sys.argv[ind + 1])
rsize = int(sys.argv[ind + 2])
else:
flip, rsym, rsize = 0, "g^", 8
if '-sym' in sys.argv:
ind = sys.argv.index('-sym')
sym = (sys.argv[ind + 1])
size = int(sys.argv[ind + 2])
else:
sym, size = 'ro', 8
if '-eye' in sys.argv:
ind = sys.argv.index('-eye')
lat_0 = float(sys.argv[ind + 1])
lon_0 = float(sys.argv[ind + 2])
else:
lat_0, lon_0 = 90., 0.
crd = pmag.get_named_arg("-crd", "")
results_file = pmag.get_named_arg("-f", "sites.txt")
ipmag.vgpmap_magic(dir_path, results_file, crd, sym, size, rsym, rsize,
fmt, res, proj, flip, anti, fancy, ell, ages, lat_0, lon_0,
save_plots, interactive) | NAME
vgpmap_magic.py
DESCRIPTION
makes a map of vgps and a95/dp,dm for site means in a sites table
SYNTAX
vgpmap_magic.py [command line options]
OPTIONS
-h prints help and quits
-eye ELAT ELON [specify eyeball location], default is 90., 0.
-f FILE sites format file, [default is sites.txt]
-res [c,l,i,h] specify resolution (crude, low, intermediate, high]
-etp plot the etopo20 topographpy data (requires high resolution data set)
-prj PROJ, specify one of the following:
ortho = orthographic
lcc = lambert conformal
moll = molweide
merc = mercator
-sym SYM SIZE: choose a symbol and size, examples:
ro 5 : small red circles
bs 10 : intermediate blue squares
g^ 20 : large green triangles
-ell plot dp/dm or a95 ellipses
-rev RSYM RSIZE : flip reverse poles to normal antipode
-S: plot antipodes of all poles
-age : plot the ages next to the poles
-crd [g,t] : choose coordinate system, default is to plot all site VGPs
-fmt [pdf, png, eps...] specify output format, default is pdf
-sav save and quit
DEFAULTS
FILE: sites.txt
res: c
prj: ortho
ELAT,ELON = 0,0
SYM SIZE: ro 8
RSYM RSIZE: g^ 8 | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/vgpmap_magic.py#L14-L95 |
PmagPy/PmagPy | programs/dayplot_magic2.py | main | def main():
"""
NAME
dayplot_magic.py
DESCRIPTION
makes 'day plots' (Day et al. 1977) and squareness/coercivity,
plots 'linear mixing' curve from Dunlop and Carter-Stiglitz (2006).
squareness coercivity of remanence (Neel, 1955) plots after
Tauxe et al. (2002)
SYNTAX
dayplot_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f: specify input hysteresis file, default is rmag_hysteresis.txt
-fr: specify input remanence file, default is rmag_remanence.txt
-fmt [svg,png,jpg] format for output plots
-sav saves plots and quits quietly
-n label specimen names
"""
args = sys.argv
hyst_file, rem_file = "rmag_hysteresis.txt", "rmag_remanence.txt"
dir_path = '.'
verbose = pmagplotlib.verbose
fmt = 'svg' # default file format
if '-WD' in args:
ind = args.index('-WD')
dir_path = args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if '-f' in args:
ind = args.index("-f")
hyst_file = args[ind+1]
if '-fr' in args:
ind = args.index("-fr")
rem_file = args[ind+1]
if '-fmt' in sys.argv:
ind = sys.argv.index("-fmt")
fmt = sys.argv[ind+1]
if '-sav' in sys.argv:
plots = 1
verbose = 0
else:
plots = 0
if '-n' in sys.argv:
label = 1
else:
label = 0
hyst_file = os.path.realpath(os.path.join(dir_path, hyst_file))
rem_file = os.path.realpath(os.path.join(dir_path, rem_file))
#
# initialize some variables
# define figure numbers for Day,S-Bc,S-Bcr
DSC = {}
DSC['day'], DSC['S-Bc'], DSC['S-Bcr'], DSC['bcr1-bcr2'] = 1, 2, 3, 4
pmagplotlib.plot_init(DSC['day'], 5, 5)
pmagplotlib.plot_init(DSC['S-Bc'], 5, 5)
pmagplotlib.plot_init(DSC['S-Bcr'], 5, 5)
pmagplotlib.plot_init(DSC['bcr1-bcr2'], 5, 5)
#
#
hyst_data, file_type = pmag.magic_read(hyst_file)
rem_data, file_type = pmag.magic_read(rem_file)
#
S, BcrBc, Bcr2, Bc, hsids, Bcr = [], [], [], [], [], []
Ms, Bcr1, Bcr1Bc, S1 = [], [], [], []
names = []
locations = ''
for rec in hyst_data:
if 'er_location_name' in rec.keys() and rec['er_location_name'] not in locations:
locations = locations+rec['er_location_name']+'_'
if rec['hysteresis_bcr'] != "" and rec['hysteresis_mr_moment'] != "":
S.append(float(rec['hysteresis_mr_moment']) /
float(rec['hysteresis_ms_moment']))
Bcr.append(float(rec['hysteresis_bcr']))
Bc.append(float(rec['hysteresis_bc']))
BcrBc.append(Bcr[-1]/Bc[-1])
if 'er_synthetic_name' in rec.keys() and rec['er_synthetic_name'] != "":
rec['er_specimen_name'] = rec['er_synthetic_name']
hsids.append(rec['er_specimen_name'])
names.append(rec['er_specimen_name'])
if len(rem_data) > 0:
for rec in rem_data:
if rec['remanence_bcr'] != "" and float(rec['remanence_bcr']) > 0:
try:
ind = hsids.index(rec['er_specimen_name'])
Bcr1.append(float(rec['remanence_bcr']))
Bcr1Bc.append(Bcr1[-1]/Bc[ind])
S1.append(S[ind])
Bcr2.append(Bcr[ind])
except ValueError:
if verbose:
print('hysteresis data for ',
rec['er_specimen_name'], ' not found')
#
# now plot the day and S-Bc, S-Bcr plots
#
leglist = []
if label == 0:
names = []
if len(Bcr1) > 0:
pmagplotlib.plot_day(DSC['day'], Bcr1Bc, S1, 'ro', names=names)
pmagplotlib.plot_s_bcr(DSC['S-Bcr'], Bcr1, S1, 'ro')
pmagplotlib.plot_init(DSC['bcr1-bcr2'], 5, 5)
pmagplotlib.plot_bcr(DSC['bcr1-bcr2'], Bcr1, Bcr2)
else:
del DSC['bcr1-bcr2']
pmagplotlib.plot_day(DSC['day'], BcrBc, S, 'bs', names=names)
pmagplotlib.plot_s_bcr(DSC['S-Bcr'], Bcr, S, 'bs')
pmagplotlib.plot_s_bc(DSC['S-Bc'], Bc, S, 'bs')
files = {}
if len(locations) > 0:
locations = locations[:-1]
for key in DSC.keys():
if pmagplotlib.isServer: # use server plot naming convention
files[key] = 'LO:_'+locations+'_' + \
'SI:__SA:__SP:__TY:_'+key+'_.'+fmt
else: # use more readable plot naming convention
files[key] = '{}_{}.{}'.format(locations, key, fmt)
if verbose:
pmagplotlib.draw_figs(DSC)
ans = raw_input(" S[a]ve to save plots, return to quit: ")
if ans == "a":
pmagplotlib.save_plots(DSC, files)
else:
sys.exit()
if plots:
pmagplotlib.save_plots(DSC, files) | python | def main():
"""
NAME
dayplot_magic.py
DESCRIPTION
makes 'day plots' (Day et al. 1977) and squareness/coercivity,
plots 'linear mixing' curve from Dunlop and Carter-Stiglitz (2006).
squareness coercivity of remanence (Neel, 1955) plots after
Tauxe et al. (2002)
SYNTAX
dayplot_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f: specify input hysteresis file, default is rmag_hysteresis.txt
-fr: specify input remanence file, default is rmag_remanence.txt
-fmt [svg,png,jpg] format for output plots
-sav saves plots and quits quietly
-n label specimen names
"""
args = sys.argv
hyst_file, rem_file = "rmag_hysteresis.txt", "rmag_remanence.txt"
dir_path = '.'
verbose = pmagplotlib.verbose
fmt = 'svg' # default file format
if '-WD' in args:
ind = args.index('-WD')
dir_path = args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if '-f' in args:
ind = args.index("-f")
hyst_file = args[ind+1]
if '-fr' in args:
ind = args.index("-fr")
rem_file = args[ind+1]
if '-fmt' in sys.argv:
ind = sys.argv.index("-fmt")
fmt = sys.argv[ind+1]
if '-sav' in sys.argv:
plots = 1
verbose = 0
else:
plots = 0
if '-n' in sys.argv:
label = 1
else:
label = 0
hyst_file = os.path.realpath(os.path.join(dir_path, hyst_file))
rem_file = os.path.realpath(os.path.join(dir_path, rem_file))
#
# initialize some variables
# define figure numbers for Day,S-Bc,S-Bcr
DSC = {}
DSC['day'], DSC['S-Bc'], DSC['S-Bcr'], DSC['bcr1-bcr2'] = 1, 2, 3, 4
pmagplotlib.plot_init(DSC['day'], 5, 5)
pmagplotlib.plot_init(DSC['S-Bc'], 5, 5)
pmagplotlib.plot_init(DSC['S-Bcr'], 5, 5)
pmagplotlib.plot_init(DSC['bcr1-bcr2'], 5, 5)
#
#
hyst_data, file_type = pmag.magic_read(hyst_file)
rem_data, file_type = pmag.magic_read(rem_file)
#
S, BcrBc, Bcr2, Bc, hsids, Bcr = [], [], [], [], [], []
Ms, Bcr1, Bcr1Bc, S1 = [], [], [], []
names = []
locations = ''
for rec in hyst_data:
if 'er_location_name' in rec.keys() and rec['er_location_name'] not in locations:
locations = locations+rec['er_location_name']+'_'
if rec['hysteresis_bcr'] != "" and rec['hysteresis_mr_moment'] != "":
S.append(float(rec['hysteresis_mr_moment']) /
float(rec['hysteresis_ms_moment']))
Bcr.append(float(rec['hysteresis_bcr']))
Bc.append(float(rec['hysteresis_bc']))
BcrBc.append(Bcr[-1]/Bc[-1])
if 'er_synthetic_name' in rec.keys() and rec['er_synthetic_name'] != "":
rec['er_specimen_name'] = rec['er_synthetic_name']
hsids.append(rec['er_specimen_name'])
names.append(rec['er_specimen_name'])
if len(rem_data) > 0:
for rec in rem_data:
if rec['remanence_bcr'] != "" and float(rec['remanence_bcr']) > 0:
try:
ind = hsids.index(rec['er_specimen_name'])
Bcr1.append(float(rec['remanence_bcr']))
Bcr1Bc.append(Bcr1[-1]/Bc[ind])
S1.append(S[ind])
Bcr2.append(Bcr[ind])
except ValueError:
if verbose:
print('hysteresis data for ',
rec['er_specimen_name'], ' not found')
#
# now plot the day and S-Bc, S-Bcr plots
#
leglist = []
if label == 0:
names = []
if len(Bcr1) > 0:
pmagplotlib.plot_day(DSC['day'], Bcr1Bc, S1, 'ro', names=names)
pmagplotlib.plot_s_bcr(DSC['S-Bcr'], Bcr1, S1, 'ro')
pmagplotlib.plot_init(DSC['bcr1-bcr2'], 5, 5)
pmagplotlib.plot_bcr(DSC['bcr1-bcr2'], Bcr1, Bcr2)
else:
del DSC['bcr1-bcr2']
pmagplotlib.plot_day(DSC['day'], BcrBc, S, 'bs', names=names)
pmagplotlib.plot_s_bcr(DSC['S-Bcr'], Bcr, S, 'bs')
pmagplotlib.plot_s_bc(DSC['S-Bc'], Bc, S, 'bs')
files = {}
if len(locations) > 0:
locations = locations[:-1]
for key in DSC.keys():
if pmagplotlib.isServer: # use server plot naming convention
files[key] = 'LO:_'+locations+'_' + \
'SI:__SA:__SP:__TY:_'+key+'_.'+fmt
else: # use more readable plot naming convention
files[key] = '{}_{}.{}'.format(locations, key, fmt)
if verbose:
pmagplotlib.draw_figs(DSC)
ans = raw_input(" S[a]ve to save plots, return to quit: ")
if ans == "a":
pmagplotlib.save_plots(DSC, files)
else:
sys.exit()
if plots:
pmagplotlib.save_plots(DSC, files) | NAME
dayplot_magic.py
DESCRIPTION
makes 'day plots' (Day et al. 1977) and squareness/coercivity,
plots 'linear mixing' curve from Dunlop and Carter-Stiglitz (2006).
squareness coercivity of remanence (Neel, 1955) plots after
Tauxe et al. (2002)
SYNTAX
dayplot_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f: specify input hysteresis file, default is rmag_hysteresis.txt
-fr: specify input remanence file, default is rmag_remanence.txt
-fmt [svg,png,jpg] format for output plots
-sav saves plots and quits quietly
-n label specimen names | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/dayplot_magic2.py#L11-L141 |
PmagPy/PmagPy | programs/orientation_magic.py | main | def main():
"""
NAME
orientation_magic.py
DESCRIPTION
takes tab delimited field notebook information and converts to MagIC formatted tables
SYNTAX
orientation_magic.py [command line options]
OPTIONS
-f FILE: specify input file, default is: orient.txt
-Fsa FILE: specify output file, default is: er_samples.txt
-Fsi FILE: specify output site location file, default is: er_sites.txt
-app append/update these data in existing er_samples.txt, er_sites.txt files
-ocn OCON: specify orientation convention, default is #1 below
-dcn DCON [DEC]: specify declination convention, default is #1 below
if DCON = 2, you must supply the declination correction
-BCN don't correct bedding_dip_dir for magnetic declination -already corrected
-ncn NCON: specify naming convention: default is #1 below
-a: averages all bedding poles and uses average for all samples: default is NO
-gmt HRS: specify hours to subtract from local time to get GMT: default is 0
-mcd: specify sampling method codes as a colon delimited string: [default is: FS-FD:SO-POM]
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
-DM: specify data model (2 or 3). Default: 3. Will output to the appropriate format.
Orientation convention:
Samples are oriented in the field with a "field arrow" and measured in the laboratory with a "lab arrow". The lab arrow is the positive X direction of the right handed coordinate system of the specimen measurements. The lab and field arrows may not be the same. In the MagIC database, we require the orientation (azimuth and plunge) of the X direction of the measurements (lab arrow). Here are some popular conventions that convert the field arrow azimuth (mag_azimuth in the orient.txt file) and dip (field_dip in orient.txt) to the azimuth and plunge of the laboratory arrow (sample_azimuth and sample_dip in er_samples.txt). The two angles, mag_azimuth and field_dip are explained below.
[1] Standard Pomeroy convention of azimuth and hade (degrees from vertical down)
of the drill direction (field arrow). lab arrow azimuth= sample_azimuth = mag_azimuth;
lab arrow dip = sample_dip =-field_dip. i.e. the lab arrow dip is minus the hade.
[2] Field arrow is the strike of the plane orthogonal to the drill direction,
Field dip is the hade of the drill direction. Lab arrow azimuth = mag_azimuth-90
Lab arrow dip = -field_dip
[3] Lab arrow is the same as the drill direction;
hade was measured in the field.
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
[4] lab azimuth and dip are same as mag_azimuth, field_dip : use this for unoriented samples too
[5] Same as AZDIP convention explained below -
azimuth and inclination of the drill direction are mag_azimuth and field_dip;
lab arrow is as in [1] above.
lab azimuth is same as mag_azimuth,lab arrow dip=field_dip-90
[6] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = 90-field_dip
[7] all others you will have to either customize your
self or e-mail [email protected] for help.
Magnetic declination convention:
[1] Use the IGRF value at the lat/long and date supplied [default]
[2] Will supply declination correction
[3] mag_az is already corrected in file
[4] Correct mag_az but not bedding_dip_dir
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail [email protected] for help.
OUTPUT
output saved in er_samples.txt and er_sites.txt (or samples.txt and sites.txt if using data model 3.0)
- this will overwrite any existing files
"""
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
else:
info = [['WD', False, '.'], ['ID', False, ''], ['f', False, 'orient.txt'],
['app', False, False], ['ocn', False, 1], ['dcn', False, 1],
['BCN', False, True], ['ncn', False, '1'], ['gmt', False, 0],
['mcd', False, ''], ['a', False, False], ['DM', False, 3]]
#output_dir_path, input_dir_path, orient_file, append, or_con, dec_correction_con, samp_con, hours_from_gmt, method_codes, average_bedding
# leave off -Fsa, -Fsi b/c defaults in command_line_extractor
dataframe = extractor.command_line_dataframe(info)
checked_args = extractor.extract_and_check_args(args, dataframe)
output_dir_path, input_dir_path, orient_file, append, or_con, dec_correction_con, bed_correction, samp_con, hours_from_gmt, method_codes, average_bedding, samp_file, site_file, data_model = extractor.get_vars(['WD', 'ID', 'f', 'app', 'ocn', 'dcn', 'BCN', 'ncn', 'gmt', 'mcd', 'a', 'Fsa', 'Fsi', 'DM'], checked_args)
if input_dir_path == '.':
input_dir_path = output_dir_path
if not isinstance(dec_correction_con, int):
if len(dec_correction_con) > 1:
dec_correction = int(dec_correction_con.split()[1])
dec_correction_con = int(dec_correction_con.split()[0])
else:
dec_correction = 0
else:
dec_correction = 0
ipmag.orientation_magic(or_con, dec_correction_con, dec_correction, bed_correction, samp_con, hours_from_gmt, method_codes, average_bedding, orient_file, samp_file, site_file, output_dir_path, input_dir_path, append, data_model) | python | def main():
"""
NAME
orientation_magic.py
DESCRIPTION
takes tab delimited field notebook information and converts to MagIC formatted tables
SYNTAX
orientation_magic.py [command line options]
OPTIONS
-f FILE: specify input file, default is: orient.txt
-Fsa FILE: specify output file, default is: er_samples.txt
-Fsi FILE: specify output site location file, default is: er_sites.txt
-app append/update these data in existing er_samples.txt, er_sites.txt files
-ocn OCON: specify orientation convention, default is #1 below
-dcn DCON [DEC]: specify declination convention, default is #1 below
if DCON = 2, you must supply the declination correction
-BCN don't correct bedding_dip_dir for magnetic declination -already corrected
-ncn NCON: specify naming convention: default is #1 below
-a: averages all bedding poles and uses average for all samples: default is NO
-gmt HRS: specify hours to subtract from local time to get GMT: default is 0
-mcd: specify sampling method codes as a colon delimited string: [default is: FS-FD:SO-POM]
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
-DM: specify data model (2 or 3). Default: 3. Will output to the appropriate format.
Orientation convention:
Samples are oriented in the field with a "field arrow" and measured in the laboratory with a "lab arrow". The lab arrow is the positive X direction of the right handed coordinate system of the specimen measurements. The lab and field arrows may not be the same. In the MagIC database, we require the orientation (azimuth and plunge) of the X direction of the measurements (lab arrow). Here are some popular conventions that convert the field arrow azimuth (mag_azimuth in the orient.txt file) and dip (field_dip in orient.txt) to the azimuth and plunge of the laboratory arrow (sample_azimuth and sample_dip in er_samples.txt). The two angles, mag_azimuth and field_dip are explained below.
[1] Standard Pomeroy convention of azimuth and hade (degrees from vertical down)
of the drill direction (field arrow). lab arrow azimuth= sample_azimuth = mag_azimuth;
lab arrow dip = sample_dip =-field_dip. i.e. the lab arrow dip is minus the hade.
[2] Field arrow is the strike of the plane orthogonal to the drill direction,
Field dip is the hade of the drill direction. Lab arrow azimuth = mag_azimuth-90
Lab arrow dip = -field_dip
[3] Lab arrow is the same as the drill direction;
hade was measured in the field.
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
[4] lab azimuth and dip are same as mag_azimuth, field_dip : use this for unoriented samples too
[5] Same as AZDIP convention explained below -
azimuth and inclination of the drill direction are mag_azimuth and field_dip;
lab arrow is as in [1] above.
lab azimuth is same as mag_azimuth,lab arrow dip=field_dip-90
[6] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = 90-field_dip
[7] all others you will have to either customize your
self or e-mail [email protected] for help.
Magnetic declination convention:
[1] Use the IGRF value at the lat/long and date supplied [default]
[2] Will supply declination correction
[3] mag_az is already corrected in file
[4] Correct mag_az but not bedding_dip_dir
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail [email protected] for help.
OUTPUT
output saved in er_samples.txt and er_sites.txt (or samples.txt and sites.txt if using data model 3.0)
- this will overwrite any existing files
"""
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
else:
info = [['WD', False, '.'], ['ID', False, ''], ['f', False, 'orient.txt'],
['app', False, False], ['ocn', False, 1], ['dcn', False, 1],
['BCN', False, True], ['ncn', False, '1'], ['gmt', False, 0],
['mcd', False, ''], ['a', False, False], ['DM', False, 3]]
#output_dir_path, input_dir_path, orient_file, append, or_con, dec_correction_con, samp_con, hours_from_gmt, method_codes, average_bedding
# leave off -Fsa, -Fsi b/c defaults in command_line_extractor
dataframe = extractor.command_line_dataframe(info)
checked_args = extractor.extract_and_check_args(args, dataframe)
output_dir_path, input_dir_path, orient_file, append, or_con, dec_correction_con, bed_correction, samp_con, hours_from_gmt, method_codes, average_bedding, samp_file, site_file, data_model = extractor.get_vars(['WD', 'ID', 'f', 'app', 'ocn', 'dcn', 'BCN', 'ncn', 'gmt', 'mcd', 'a', 'Fsa', 'Fsi', 'DM'], checked_args)
if input_dir_path == '.':
input_dir_path = output_dir_path
if not isinstance(dec_correction_con, int):
if len(dec_correction_con) > 1:
dec_correction = int(dec_correction_con.split()[1])
dec_correction_con = int(dec_correction_con.split()[0])
else:
dec_correction = 0
else:
dec_correction = 0
ipmag.orientation_magic(or_con, dec_correction_con, dec_correction, bed_correction, samp_con, hours_from_gmt, method_codes, average_bedding, orient_file, samp_file, site_file, output_dir_path, input_dir_path, append, data_model) | NAME
orientation_magic.py
DESCRIPTION
takes tab delimited field notebook information and converts to MagIC formatted tables
SYNTAX
orientation_magic.py [command line options]
OPTIONS
-f FILE: specify input file, default is: orient.txt
-Fsa FILE: specify output file, default is: er_samples.txt
-Fsi FILE: specify output site location file, default is: er_sites.txt
-app append/update these data in existing er_samples.txt, er_sites.txt files
-ocn OCON: specify orientation convention, default is #1 below
-dcn DCON [DEC]: specify declination convention, default is #1 below
if DCON = 2, you must supply the declination correction
-BCN don't correct bedding_dip_dir for magnetic declination -already corrected
-ncn NCON: specify naming convention: default is #1 below
-a: averages all bedding poles and uses average for all samples: default is NO
-gmt HRS: specify hours to subtract from local time to get GMT: default is 0
-mcd: specify sampling method codes as a colon delimited string: [default is: FS-FD:SO-POM]
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
-DM: specify data model (2 or 3). Default: 3. Will output to the appropriate format.
Orientation convention:
Samples are oriented in the field with a "field arrow" and measured in the laboratory with a "lab arrow". The lab arrow is the positive X direction of the right handed coordinate system of the specimen measurements. The lab and field arrows may not be the same. In the MagIC database, we require the orientation (azimuth and plunge) of the X direction of the measurements (lab arrow). Here are some popular conventions that convert the field arrow azimuth (mag_azimuth in the orient.txt file) and dip (field_dip in orient.txt) to the azimuth and plunge of the laboratory arrow (sample_azimuth and sample_dip in er_samples.txt). The two angles, mag_azimuth and field_dip are explained below.
[1] Standard Pomeroy convention of azimuth and hade (degrees from vertical down)
of the drill direction (field arrow). lab arrow azimuth= sample_azimuth = mag_azimuth;
lab arrow dip = sample_dip =-field_dip. i.e. the lab arrow dip is minus the hade.
[2] Field arrow is the strike of the plane orthogonal to the drill direction,
Field dip is the hade of the drill direction. Lab arrow azimuth = mag_azimuth-90
Lab arrow dip = -field_dip
[3] Lab arrow is the same as the drill direction;
hade was measured in the field.
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
[4] lab azimuth and dip are same as mag_azimuth, field_dip : use this for unoriented samples too
[5] Same as AZDIP convention explained below -
azimuth and inclination of the drill direction are mag_azimuth and field_dip;
lab arrow is as in [1] above.
lab azimuth is same as mag_azimuth,lab arrow dip=field_dip-90
[6] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = 90-field_dip
[7] all others you will have to either customize your
self or e-mail [email protected] for help.
Magnetic declination convention:
[1] Use the IGRF value at the lat/long and date supplied [default]
[2] Will supply declination correction
[3] mag_az is already corrected in file
[4] Correct mag_az but not bedding_dip_dir
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail [email protected] for help.
OUTPUT
output saved in er_samples.txt and er_sites.txt (or samples.txt and sites.txt if using data model 3.0)
- this will overwrite any existing files | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/orientation_magic.py#L8-L111 |
PmagPy/PmagPy | dialogs/magic_grid2.py | MagicGrid.add_items | def add_items(self, items_list, incl_pmag=True, incl_parents=True):
"""
Add items and/or update existing items in grid
"""
num_rows = self.GetNumberRows()
current_grid_rows = [self.GetCellValue(num, 0) for num in range(num_rows)]
er_data = {item.name: item.er_data for item in items_list}
pmag_data = {item.name: item.pmag_data for item in items_list}
items_list = sorted(items_list, key=lambda item: item.name)
for item in items_list[:]:
if item.name in current_grid_rows:
pass
else:
self.add_row(item.name, item)
self.add_data(er_data)#, pmag=False)
if incl_pmag:
self.add_data(pmag_data, pmag=True)
if incl_parents:
self.add_parents() | python | def add_items(self, items_list, incl_pmag=True, incl_parents=True):
"""
Add items and/or update existing items in grid
"""
num_rows = self.GetNumberRows()
current_grid_rows = [self.GetCellValue(num, 0) for num in range(num_rows)]
er_data = {item.name: item.er_data for item in items_list}
pmag_data = {item.name: item.pmag_data for item in items_list}
items_list = sorted(items_list, key=lambda item: item.name)
for item in items_list[:]:
if item.name in current_grid_rows:
pass
else:
self.add_row(item.name, item)
self.add_data(er_data)#, pmag=False)
if incl_pmag:
self.add_data(pmag_data, pmag=True)
if incl_parents:
self.add_parents() | Add items and/or update existing items in grid | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/magic_grid2.py#L71-L89 |
PmagPy/PmagPy | dialogs/magic_grid2.py | MagicGrid.add_row | def add_row(self, label='', item=''):
"""
Add a row to the grid
"""
self.AppendRows(1)
last_row = self.GetNumberRows() - 1
self.SetCellValue(last_row, 0, str(label))
self.row_labels.append(label)
self.row_items.append(item) | python | def add_row(self, label='', item=''):
"""
Add a row to the grid
"""
self.AppendRows(1)
last_row = self.GetNumberRows() - 1
self.SetCellValue(last_row, 0, str(label))
self.row_labels.append(label)
self.row_items.append(item) | Add a row to the grid | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/magic_grid2.py#L266-L274 |
PmagPy/PmagPy | dialogs/magic_grid2.py | MagicGrid.remove_row | def remove_row(self, row_num=None):
"""
Remove a row from the grid
"""
#DeleteRows(self, pos, numRows, updateLabel
if not row_num and row_num != 0:
row_num = self.GetNumberRows() - 1
label = self.GetCellValue(row_num, 0)
self.DeleteRows(pos=row_num, numRows=1, updateLabels=True)
# remove label from row_labels
try:
self.row_labels.remove(label)
except ValueError:
# if label name hasn't been saved yet, simply truncate row_labels
self.row_labels = self.row_labels[:-1]
self.row_items.pop(row_num)
if not self.changes:
self.changes = set()
self.changes.add(-1)
# fix #s for rows edited:
self.update_changes_after_row_delete(row_num) | python | def remove_row(self, row_num=None):
"""
Remove a row from the grid
"""
#DeleteRows(self, pos, numRows, updateLabel
if not row_num and row_num != 0:
row_num = self.GetNumberRows() - 1
label = self.GetCellValue(row_num, 0)
self.DeleteRows(pos=row_num, numRows=1, updateLabels=True)
# remove label from row_labels
try:
self.row_labels.remove(label)
except ValueError:
# if label name hasn't been saved yet, simply truncate row_labels
self.row_labels = self.row_labels[:-1]
self.row_items.pop(row_num)
if not self.changes:
self.changes = set()
self.changes.add(-1)
# fix #s for rows edited:
self.update_changes_after_row_delete(row_num) | Remove a row from the grid | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/magic_grid2.py#L276-L297 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.