repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
PmagPy/PmagPy | pmagpy/pmag.py | initialize_acceptance_criteria | def initialize_acceptance_criteria(**kwargs):
'''
initialize acceptance criteria with NULL values for thellier_gui and demag_gui
acceptance criteria format is doctionaries:
acceptance_criteria={}
acceptance_criteria[crit]={}
acceptance_criteria[crit]['category']=
acceptance_criteria[crit]['criterion_name']=
acceptance_criteria[crit]['value']=
acceptance_criteria[crit]['threshold_type']
acceptance_criteria[crit]['decimal_points']
'category':
'DE-SPEC','DE-SAMP'..etc
'criterion_name':
MagIC name
'value':
a number (for 'regular criteria')
a string (for 'flag')
1 for True (if criteria is bullean)
0 for False (if criteria is bullean)
-999 means N/A
'threshold_type':
'low'for low threshold value
'high'for high threshold value
[flag1.flag2]: for flags
'bool' for boolean flags (can be 'g','b' or True/Flase or 1/0)
'decimal_points':
number of decimal points in rounding
(this is used in displaying criteria in the dialog box)
-999 means Exponent with 3 descimal points for floats and string for string
'''
acceptance_criteria = {}
# --------------------------------
# 'DE-SPEC'
# --------------------------------
# low cutoff value
category = 'DE-SPEC'
for crit in ['specimen_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
category = 'DE-SPEC'
for crit in ['specimen_mad', 'specimen_dang', 'specimen_alpha95']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
acceptance_criteria[crit]['decimal_points'] = 1
# flag
for crit in ['specimen_direction_type']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
if crit == 'specimen_direction_type':
acceptance_criteria[crit]['threshold_type'] = ['l', 'p']
if crit == 'specimen_polarity':
acceptance_criteria[crit]['threshold_type'] = [
'n', 'r', 't', 'e', 'i']
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'DE-SAMP'
# --------------------------------
# low cutoff value
category = 'DE-SAMP'
for crit in ['sample_n', 'sample_n_lines', 'sample_n_planes']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
category = 'DE-SAMP'
for crit in ['sample_r', 'sample_alpha95', 'sample_sigma', 'sample_k', 'sample_tilt_correction']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
if crit in ['sample_tilt_correction']:
acceptance_criteria[crit]['decimal_points'] = 0
elif crit in ['sample_alpha95']:
acceptance_criteria[crit]['decimal_points'] = 1
else:
acceptance_criteria[crit]['decimal_points'] = -999
# flag
for crit in ['sample_direction_type', 'sample_polarity']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
if crit == 'sample_direction_type':
acceptance_criteria[crit]['threshold_type'] = ['l', 'p']
if crit == 'sample_polarity':
acceptance_criteria[crit]['threshold_type'] = [
'n', 'r', 't', 'e', 'i']
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'DE-SITE'
# --------------------------------
# low cutoff value
category = 'DE-SITE'
for crit in ['site_n', 'site_n_lines', 'site_n_planes']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
for crit in ['site_k', 'site_r', 'site_alpha95', 'site_sigma', 'site_tilt_correction']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
if crit in ['site_tilt_correction']:
acceptance_criteria[crit]['decimal_points'] = 0
else:
acceptance_criteria[crit]['decimal_points'] = 1
# flag
for crit in ['site_direction_type', 'site_polarity']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
if crit == 'site_direction_type':
acceptance_criteria[crit]['threshold_type'] = ['l', 'p']
if crit == 'site_polarity':
acceptance_criteria[crit]['threshold_type'] = [
'n', 'r', 't', 'e', 'i']
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'DE-STUDY'
# --------------------------------
category = 'DE-STUDY'
# low cutoff value
for crit in ['average_k', 'average_n', 'average_nn', 'average_nnn', 'average_r']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
if crit in ['average_n', 'average_nn', 'average_nnn']:
acceptance_criteria[crit]['decimal_points'] = 0
elif crit in ['average_alpha95']:
acceptance_criteria[crit]['decimal_points'] = 1
else:
acceptance_criteria[crit]['decimal_points'] = -999
# high cutoff value
for crit in ['average_alpha95', 'average_sigma']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
if crit in ['average_alpha95']:
acceptance_criteria[crit]['decimal_points'] = 1
else:
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'IE-SPEC' (a long list from SPD.v.1.0)
# --------------------------------
category = 'IE-SPEC'
# low cutoff value
for crit in ['specimen_int_n', 'specimen_f', 'specimen_fvds', 'specimen_frac', 'specimen_q', 'specimen_w', 'specimen_r_sq', 'specimen_int_ptrm_n',
'specimen_int_ptrm_tail_n', 'specimen_ac_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
if crit in ['specimen_int_n', 'specimen_int_ptrm_n', 'specimen_int_ptrm_tail_n', 'specimen_ac_n']:
acceptance_criteria[crit]['decimal_points'] = 0
elif crit in ['specimen_f', 'specimen_fvds', 'specimen_frac', 'specimen_q']:
acceptance_criteria[crit]['decimal_points'] = 2
else:
acceptance_criteria[crit]['decimal_points'] = -999
# high cutoff value
for crit in ['specimen_b_sigma', 'specimen_b_beta', 'specimen_g', 'specimen_gmax', 'specimen_k', 'specimen_k_sse', 'specimen_k_prime', 'specimen_k_prime_sse',
'specimen_coeff_det_sq', 'specimen_z', 'specimen_z_md', 'specimen_int_mad', 'specimen_int_mad_anc', 'specimen_int_alpha', 'specimen_alpha', 'specimen_alpha_prime',
'specimen_theta', 'specimen_int_dang', 'specimen_int_crm', 'specimen_ptrm', 'specimen_dck', 'specimen_drat', 'specimen_maxdev', 'specimen_cdrat',
'specimen_drats', 'specimen_mdrat', 'specimen_mdev', 'specimen_dpal', 'specimen_tail_drat', 'specimen_dtr', 'specimen_md', 'specimen_dt', 'specimen_dac', 'specimen_gamma']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
if crit in ['specimen_int_mad', 'specimen_int_mad_anc', 'specimen_int_dang', 'specimen_drat', 'specimen_cdrat', 'specimen_drats', 'specimen_tail_drat', 'specimen_dtr', 'specimen_md', 'specimen_dac', 'specimen_gamma']:
acceptance_criteria[crit]['decimal_points'] = 1
elif crit in ['specimen_gmax']:
acceptance_criteria[crit]['decimal_points'] = 2
elif crit in ['specimen_b_sigma', 'specimen_b_beta', 'specimen_g', 'specimen_k', 'specimen_k_prime']:
acceptance_criteria[crit]['decimal_points'] = 3
else:
acceptance_criteria[crit]['decimal_points'] = -999
# flags
for crit in ['specimen_scat']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = 'bool'
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'IE-SAMP'
# --------------------------------
category = 'IE-SAMP'
# low cutoff value
for crit in ['sample_int_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
for crit in ['sample_int_rel_sigma', 'sample_int_rel_sigma_perc', 'sample_int_sigma', 'sample_int_sigma_perc']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
if crit in ['sample_int_rel_sigma_perc', 'sample_int_sigma_perc']:
acceptance_criteria[crit]['decimal_points'] = 1
else:
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'IE-SITE'
# --------------------------------
category = 'IE-SITE'
# low cutoff value
for crit in ['site_int_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
for crit in ['site_int_rel_sigma', 'site_int_rel_sigma_perc', 'site_int_sigma', 'site_int_sigma_perc']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
if crit in ['site_int_rel_sigma_perc', 'site_int_sigma_perc']:
acceptance_criteria[crit]['decimal_points'] = 1
else:
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'IE-STUDY'
# --------------------------------
category = 'IE-STUDY'
# low cutoff value
for crit in ['average_int_n', 'average_int_n', 'average_int_nn', 'average_int_nnn', ]:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
for crit in ['average_int_rel_sigma', 'average_int_rel_sigma_perc', 'average_int_sigma']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
if crit in ['average_int_rel_sigma_perc']:
acceptance_criteria[crit]['decimal_points'] = 1
else:
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'NPOLE'
# --------------------------------
category = 'NPOLE'
# flags
for crit in ['site_polarity']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = ['n', 'r']
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'NPOLE'
# --------------------------------
category = 'RPOLE'
# flags
for crit in ['site_polarity']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = ['n', 'r']
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'VADM'
# --------------------------------
category = 'VADM'
# low cutoff value
for crit in ['vadm_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
if crit in ['vadm_n']:
acceptance_criteria[crit]['decimal_points'] = 0
else:
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'VADM'
# --------------------------------
category = 'VADM'
# low cutoff value
for crit in ['vadm_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
for crit in ['vadm_sigma']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'VADM'
# --------------------------------
category = 'VDM'
# low cutoff value
for crit in ['vdm_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
for crit in ['vdm_sigma']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'VGP'
# --------------------------------
category = 'VDM'
# low cutoff value
for crit in ['vgp_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
for crit in ['vgp_alpha95', 'vgp_dm', 'vgp_dp', 'vgp_sigma']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
if crit in ['vgp_alpha95']:
acceptance_criteria[crit]['decimal_points', 'vgp_dm', 'vgp_dp'] = 1
else:
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'AGE'
# --------------------------------
category = 'AGE'
# low cutoff value
for crit in ['average_age_min']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = -999
# high cutoff value
for crit in ['average_age_max', 'average_age_sigma']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
acceptance_criteria[crit]['decimal_points'] = -999
# flags
for crit in ['average_age_unit']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = [
'Ga', 'Ka', 'Ma', 'Years AD (+/-)', 'Years BP', 'Years Cal AD (+/-)', 'Years Cal BP']
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'ANI'
# --------------------------------
category = 'ANI'
# high cutoff value
for crit in ['anisotropy_alt', 'sample_aniso_mean', 'site_aniso_mean']: # value is in precent
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
acceptance_criteria[crit]['decimal_points'] = 3
# flags
for crit in ['specimen_aniso_ftest_flag']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = 'bool'
acceptance_criteria[crit]['decimal_points'] = -999
return(acceptance_criteria) | python | def initialize_acceptance_criteria(**kwargs):
'''
initialize acceptance criteria with NULL values for thellier_gui and demag_gui
acceptance criteria format is doctionaries:
acceptance_criteria={}
acceptance_criteria[crit]={}
acceptance_criteria[crit]['category']=
acceptance_criteria[crit]['criterion_name']=
acceptance_criteria[crit]['value']=
acceptance_criteria[crit]['threshold_type']
acceptance_criteria[crit]['decimal_points']
'category':
'DE-SPEC','DE-SAMP'..etc
'criterion_name':
MagIC name
'value':
a number (for 'regular criteria')
a string (for 'flag')
1 for True (if criteria is bullean)
0 for False (if criteria is bullean)
-999 means N/A
'threshold_type':
'low'for low threshold value
'high'for high threshold value
[flag1.flag2]: for flags
'bool' for boolean flags (can be 'g','b' or True/Flase or 1/0)
'decimal_points':
number of decimal points in rounding
(this is used in displaying criteria in the dialog box)
-999 means Exponent with 3 descimal points for floats and string for string
'''
acceptance_criteria = {}
# --------------------------------
# 'DE-SPEC'
# --------------------------------
# low cutoff value
category = 'DE-SPEC'
for crit in ['specimen_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
category = 'DE-SPEC'
for crit in ['specimen_mad', 'specimen_dang', 'specimen_alpha95']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
acceptance_criteria[crit]['decimal_points'] = 1
# flag
for crit in ['specimen_direction_type']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
if crit == 'specimen_direction_type':
acceptance_criteria[crit]['threshold_type'] = ['l', 'p']
if crit == 'specimen_polarity':
acceptance_criteria[crit]['threshold_type'] = [
'n', 'r', 't', 'e', 'i']
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'DE-SAMP'
# --------------------------------
# low cutoff value
category = 'DE-SAMP'
for crit in ['sample_n', 'sample_n_lines', 'sample_n_planes']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
category = 'DE-SAMP'
for crit in ['sample_r', 'sample_alpha95', 'sample_sigma', 'sample_k', 'sample_tilt_correction']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
if crit in ['sample_tilt_correction']:
acceptance_criteria[crit]['decimal_points'] = 0
elif crit in ['sample_alpha95']:
acceptance_criteria[crit]['decimal_points'] = 1
else:
acceptance_criteria[crit]['decimal_points'] = -999
# flag
for crit in ['sample_direction_type', 'sample_polarity']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
if crit == 'sample_direction_type':
acceptance_criteria[crit]['threshold_type'] = ['l', 'p']
if crit == 'sample_polarity':
acceptance_criteria[crit]['threshold_type'] = [
'n', 'r', 't', 'e', 'i']
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'DE-SITE'
# --------------------------------
# low cutoff value
category = 'DE-SITE'
for crit in ['site_n', 'site_n_lines', 'site_n_planes']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
for crit in ['site_k', 'site_r', 'site_alpha95', 'site_sigma', 'site_tilt_correction']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
if crit in ['site_tilt_correction']:
acceptance_criteria[crit]['decimal_points'] = 0
else:
acceptance_criteria[crit]['decimal_points'] = 1
# flag
for crit in ['site_direction_type', 'site_polarity']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
if crit == 'site_direction_type':
acceptance_criteria[crit]['threshold_type'] = ['l', 'p']
if crit == 'site_polarity':
acceptance_criteria[crit]['threshold_type'] = [
'n', 'r', 't', 'e', 'i']
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'DE-STUDY'
# --------------------------------
category = 'DE-STUDY'
# low cutoff value
for crit in ['average_k', 'average_n', 'average_nn', 'average_nnn', 'average_r']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
if crit in ['average_n', 'average_nn', 'average_nnn']:
acceptance_criteria[crit]['decimal_points'] = 0
elif crit in ['average_alpha95']:
acceptance_criteria[crit]['decimal_points'] = 1
else:
acceptance_criteria[crit]['decimal_points'] = -999
# high cutoff value
for crit in ['average_alpha95', 'average_sigma']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
if crit in ['average_alpha95']:
acceptance_criteria[crit]['decimal_points'] = 1
else:
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'IE-SPEC' (a long list from SPD.v.1.0)
# --------------------------------
category = 'IE-SPEC'
# low cutoff value
for crit in ['specimen_int_n', 'specimen_f', 'specimen_fvds', 'specimen_frac', 'specimen_q', 'specimen_w', 'specimen_r_sq', 'specimen_int_ptrm_n',
'specimen_int_ptrm_tail_n', 'specimen_ac_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
if crit in ['specimen_int_n', 'specimen_int_ptrm_n', 'specimen_int_ptrm_tail_n', 'specimen_ac_n']:
acceptance_criteria[crit]['decimal_points'] = 0
elif crit in ['specimen_f', 'specimen_fvds', 'specimen_frac', 'specimen_q']:
acceptance_criteria[crit]['decimal_points'] = 2
else:
acceptance_criteria[crit]['decimal_points'] = -999
# high cutoff value
for crit in ['specimen_b_sigma', 'specimen_b_beta', 'specimen_g', 'specimen_gmax', 'specimen_k', 'specimen_k_sse', 'specimen_k_prime', 'specimen_k_prime_sse',
'specimen_coeff_det_sq', 'specimen_z', 'specimen_z_md', 'specimen_int_mad', 'specimen_int_mad_anc', 'specimen_int_alpha', 'specimen_alpha', 'specimen_alpha_prime',
'specimen_theta', 'specimen_int_dang', 'specimen_int_crm', 'specimen_ptrm', 'specimen_dck', 'specimen_drat', 'specimen_maxdev', 'specimen_cdrat',
'specimen_drats', 'specimen_mdrat', 'specimen_mdev', 'specimen_dpal', 'specimen_tail_drat', 'specimen_dtr', 'specimen_md', 'specimen_dt', 'specimen_dac', 'specimen_gamma']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
if crit in ['specimen_int_mad', 'specimen_int_mad_anc', 'specimen_int_dang', 'specimen_drat', 'specimen_cdrat', 'specimen_drats', 'specimen_tail_drat', 'specimen_dtr', 'specimen_md', 'specimen_dac', 'specimen_gamma']:
acceptance_criteria[crit]['decimal_points'] = 1
elif crit in ['specimen_gmax']:
acceptance_criteria[crit]['decimal_points'] = 2
elif crit in ['specimen_b_sigma', 'specimen_b_beta', 'specimen_g', 'specimen_k', 'specimen_k_prime']:
acceptance_criteria[crit]['decimal_points'] = 3
else:
acceptance_criteria[crit]['decimal_points'] = -999
# flags
for crit in ['specimen_scat']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = 'bool'
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'IE-SAMP'
# --------------------------------
category = 'IE-SAMP'
# low cutoff value
for crit in ['sample_int_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
for crit in ['sample_int_rel_sigma', 'sample_int_rel_sigma_perc', 'sample_int_sigma', 'sample_int_sigma_perc']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
if crit in ['sample_int_rel_sigma_perc', 'sample_int_sigma_perc']:
acceptance_criteria[crit]['decimal_points'] = 1
else:
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'IE-SITE'
# --------------------------------
category = 'IE-SITE'
# low cutoff value
for crit in ['site_int_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
for crit in ['site_int_rel_sigma', 'site_int_rel_sigma_perc', 'site_int_sigma', 'site_int_sigma_perc']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
if crit in ['site_int_rel_sigma_perc', 'site_int_sigma_perc']:
acceptance_criteria[crit]['decimal_points'] = 1
else:
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'IE-STUDY'
# --------------------------------
category = 'IE-STUDY'
# low cutoff value
for crit in ['average_int_n', 'average_int_n', 'average_int_nn', 'average_int_nnn', ]:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
for crit in ['average_int_rel_sigma', 'average_int_rel_sigma_perc', 'average_int_sigma']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
if crit in ['average_int_rel_sigma_perc']:
acceptance_criteria[crit]['decimal_points'] = 1
else:
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'NPOLE'
# --------------------------------
category = 'NPOLE'
# flags
for crit in ['site_polarity']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = ['n', 'r']
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'NPOLE'
# --------------------------------
category = 'RPOLE'
# flags
for crit in ['site_polarity']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = ['n', 'r']
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'VADM'
# --------------------------------
category = 'VADM'
# low cutoff value
for crit in ['vadm_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
if crit in ['vadm_n']:
acceptance_criteria[crit]['decimal_points'] = 0
else:
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'VADM'
# --------------------------------
category = 'VADM'
# low cutoff value
for crit in ['vadm_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
for crit in ['vadm_sigma']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'VADM'
# --------------------------------
category = 'VDM'
# low cutoff value
for crit in ['vdm_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
for crit in ['vdm_sigma']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'VGP'
# --------------------------------
category = 'VDM'
# low cutoff value
for crit in ['vgp_n']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = 0
# high cutoff value
for crit in ['vgp_alpha95', 'vgp_dm', 'vgp_dp', 'vgp_sigma']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
if crit in ['vgp_alpha95']:
acceptance_criteria[crit]['decimal_points', 'vgp_dm', 'vgp_dp'] = 1
else:
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'AGE'
# --------------------------------
category = 'AGE'
# low cutoff value
for crit in ['average_age_min']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "low"
acceptance_criteria[crit]['decimal_points'] = -999
# high cutoff value
for crit in ['average_age_max', 'average_age_sigma']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
acceptance_criteria[crit]['decimal_points'] = -999
# flags
for crit in ['average_age_unit']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = [
'Ga', 'Ka', 'Ma', 'Years AD (+/-)', 'Years BP', 'Years Cal AD (+/-)', 'Years Cal BP']
acceptance_criteria[crit]['decimal_points'] = -999
# --------------------------------
# 'ANI'
# --------------------------------
category = 'ANI'
# high cutoff value
for crit in ['anisotropy_alt', 'sample_aniso_mean', 'site_aniso_mean']: # value is in precent
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = "high"
acceptance_criteria[crit]['decimal_points'] = 3
# flags
for crit in ['specimen_aniso_ftest_flag']:
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['category'] = category
acceptance_criteria[crit]['criterion_name'] = crit
acceptance_criteria[crit]['value'] = -999
acceptance_criteria[crit]['threshold_type'] = 'bool'
acceptance_criteria[crit]['decimal_points'] = -999
return(acceptance_criteria) | initialize acceptance criteria with NULL values for thellier_gui and demag_gui
acceptance criteria format is doctionaries:
acceptance_criteria={}
acceptance_criteria[crit]={}
acceptance_criteria[crit]['category']=
acceptance_criteria[crit]['criterion_name']=
acceptance_criteria[crit]['value']=
acceptance_criteria[crit]['threshold_type']
acceptance_criteria[crit]['decimal_points']
'category':
'DE-SPEC','DE-SAMP'..etc
'criterion_name':
MagIC name
'value':
a number (for 'regular criteria')
a string (for 'flag')
1 for True (if criteria is bullean)
0 for False (if criteria is bullean)
-999 means N/A
'threshold_type':
'low'for low threshold value
'high'for high threshold value
[flag1.flag2]: for flags
'bool' for boolean flags (can be 'g','b' or True/Flase or 1/0)
'decimal_points':
number of decimal points in rounding
(this is used in displaying criteria in the dialog box)
-999 means Exponent with 3 descimal points for floats and string for string | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L9708-L10183 |
PmagPy/PmagPy | pmagpy/pmag.py | read_criteria_from_file | def read_criteria_from_file(path, acceptance_criteria, **kwargs):
'''
Read accceptance criteria from magic criteria file
# old format:
multiple lines. pmag_criteria_code defines the type of criteria
to deal with old format this function reads all the lines and ignore empty cells.
i.e., the program assumes that in each column there is only one value (in one of the lines)
special case in the old format:
specimen_dang has a value and pmag_criteria_code is IE-specimen.
The program assumes that the user means specimen_int_dang
# New format for thellier_gui and demag_gui:
one long line. pmag_criteria_code=ACCEPT
path is the full path to the criteria file
the function takes exiting acceptance_criteria
and updtate it with criteria from file
output:
acceptance_criteria={}
acceptance_criteria[MagIC Variable Names]={}
acceptance_criteria[MagIC Variable Names]['value']:
a number for acceptance criteria value
-999 for N/A
1/0 for True/False or Good/Bad
acceptance_criteria[MagIC Variable Names]['threshold_type']:
"low": lower cutoff value i.e. crit>=value pass criteria
"high": high cutoff value i.e. crit<=value pass criteria
[string1,string2,....]: for flags
acceptance_criteria[MagIC Variable Names]['decimal_points']:number of decimal points in rounding
(this is used in displaying criteria in the dialog box)
'''
warnings = []
acceptance_criteria_list = list(acceptance_criteria.keys())
if 'data_model' in list(kwargs.keys()) and kwargs['data_model'] == 3:
crit_data = acceptance_criteria # data already read in
else:
crit_data, file_type = magic_read(path)
if 'criteria' not in file_type:
if 'empty' in file_type:
print('-W- No criteria found: {} '.format(path))
else:
print(
'-W- {} could not be read and may be improperly formatted...'.format(path))
for rec in crit_data:
# gather metadata
metadata_dict = {'pmag_criteria_code': '',
'criteria_definition': '', 'er_citation_names': ''}
for metadata in metadata_dict:
if metadata in rec:
metadata_dict[metadata] = rec[metadata]
# check each record for correct name and compatibility
for crit in list(rec.keys()):
if crit == 'anisotropy_ftest_flag' and crit not in list(rec.keys()):
crit = 'specimen_aniso_ftest_flag' # convert legacy criterion to 2.5
rec[crit] = rec[crit].strip('\n')
if crit in ['pmag_criteria_code', 'criteria_definition', 'magic_experiment_names', 'er_citation_names']:
continue
elif rec[crit] == "":
continue
# this catches all the ones that are being overwritten
if crit in acceptance_criteria:
if acceptance_criteria[crit]['value'] not in [-999, '-999', -999]:
print(
"-W- You have multiple different criteria that both use column: {}.\nThe last will be used:\n{}.".format(crit, rec))
warn_string = 'multiple criteria for column: {} (only last will be used)'.format(
crit)
if warn_string not in warnings:
warnings.append(warn_string)
if crit == "specimen_dang" and "pmag_criteria_code" in list(rec.keys()) and "IE-SPEC" in rec["pmag_criteria_code"]:
crit = "specimen_int_dang"
print("-W- Found backward compatibility problem with selection criteria specimen_dang. Cannot be associated with IE-SPEC. Program assumes that the statistic is specimen_int_dang")
if 'specimen_int_dang' not in acceptance_criteria:
acceptance_criteria["specimen_int_dang"] = {}
acceptance_criteria["specimen_int_dang"]['value'] = float(
rec["specimen_dang"])
elif crit not in acceptance_criteria_list:
print(
"-W- WARNING: criteria code %s is not supported by PmagPy GUI. please check" % crit)
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['value'] = rec[crit]
acceptance_criteria[crit]['threshold_type'] = "inherited"
acceptance_criteria[crit]['decimal_points'] = -999
acceptance_criteria[crit]['category'] = None
# boolean flag
elif acceptance_criteria[crit]['threshold_type'] == 'bool':
if str(rec[crit]) in ['1', 'g', 'True', 'TRUE']:
acceptance_criteria[crit]['value'] = True
else:
acceptance_criteria[crit]['value'] = False
# criteria as flags
elif type(acceptance_criteria[crit]['threshold_type']) == list:
if str(rec[crit]) in acceptance_criteria[crit]['threshold_type']:
acceptance_criteria[crit]['value'] = str(rec[crit])
else:
print(
"-W- WARNING: data %s from criteria code %s and is not supported by PmagPy GUI. please check" % (crit, rec[crit]))
elif float(rec[crit]) == -999:
pass
else:
acceptance_criteria[crit]['value'] = float(rec[crit])
# add in metadata to each record
acceptance_criteria[crit].update(metadata_dict)
if "return_warnings" in kwargs:
return (acceptance_criteria, warnings)
else:
return(acceptance_criteria) | python | def read_criteria_from_file(path, acceptance_criteria, **kwargs):
'''
Read accceptance criteria from magic criteria file
# old format:
multiple lines. pmag_criteria_code defines the type of criteria
to deal with old format this function reads all the lines and ignore empty cells.
i.e., the program assumes that in each column there is only one value (in one of the lines)
special case in the old format:
specimen_dang has a value and pmag_criteria_code is IE-specimen.
The program assumes that the user means specimen_int_dang
# New format for thellier_gui and demag_gui:
one long line. pmag_criteria_code=ACCEPT
path is the full path to the criteria file
the function takes exiting acceptance_criteria
and updtate it with criteria from file
output:
acceptance_criteria={}
acceptance_criteria[MagIC Variable Names]={}
acceptance_criteria[MagIC Variable Names]['value']:
a number for acceptance criteria value
-999 for N/A
1/0 for True/False or Good/Bad
acceptance_criteria[MagIC Variable Names]['threshold_type']:
"low": lower cutoff value i.e. crit>=value pass criteria
"high": high cutoff value i.e. crit<=value pass criteria
[string1,string2,....]: for flags
acceptance_criteria[MagIC Variable Names]['decimal_points']:number of decimal points in rounding
(this is used in displaying criteria in the dialog box)
'''
warnings = []
acceptance_criteria_list = list(acceptance_criteria.keys())
if 'data_model' in list(kwargs.keys()) and kwargs['data_model'] == 3:
crit_data = acceptance_criteria # data already read in
else:
crit_data, file_type = magic_read(path)
if 'criteria' not in file_type:
if 'empty' in file_type:
print('-W- No criteria found: {} '.format(path))
else:
print(
'-W- {} could not be read and may be improperly formatted...'.format(path))
for rec in crit_data:
# gather metadata
metadata_dict = {'pmag_criteria_code': '',
'criteria_definition': '', 'er_citation_names': ''}
for metadata in metadata_dict:
if metadata in rec:
metadata_dict[metadata] = rec[metadata]
# check each record for correct name and compatibility
for crit in list(rec.keys()):
if crit == 'anisotropy_ftest_flag' and crit not in list(rec.keys()):
crit = 'specimen_aniso_ftest_flag' # convert legacy criterion to 2.5
rec[crit] = rec[crit].strip('\n')
if crit in ['pmag_criteria_code', 'criteria_definition', 'magic_experiment_names', 'er_citation_names']:
continue
elif rec[crit] == "":
continue
# this catches all the ones that are being overwritten
if crit in acceptance_criteria:
if acceptance_criteria[crit]['value'] not in [-999, '-999', -999]:
print(
"-W- You have multiple different criteria that both use column: {}.\nThe last will be used:\n{}.".format(crit, rec))
warn_string = 'multiple criteria for column: {} (only last will be used)'.format(
crit)
if warn_string not in warnings:
warnings.append(warn_string)
if crit == "specimen_dang" and "pmag_criteria_code" in list(rec.keys()) and "IE-SPEC" in rec["pmag_criteria_code"]:
crit = "specimen_int_dang"
print("-W- Found backward compatibility problem with selection criteria specimen_dang. Cannot be associated with IE-SPEC. Program assumes that the statistic is specimen_int_dang")
if 'specimen_int_dang' not in acceptance_criteria:
acceptance_criteria["specimen_int_dang"] = {}
acceptance_criteria["specimen_int_dang"]['value'] = float(
rec["specimen_dang"])
elif crit not in acceptance_criteria_list:
print(
"-W- WARNING: criteria code %s is not supported by PmagPy GUI. please check" % crit)
acceptance_criteria[crit] = {}
acceptance_criteria[crit]['value'] = rec[crit]
acceptance_criteria[crit]['threshold_type'] = "inherited"
acceptance_criteria[crit]['decimal_points'] = -999
acceptance_criteria[crit]['category'] = None
# boolean flag
elif acceptance_criteria[crit]['threshold_type'] == 'bool':
if str(rec[crit]) in ['1', 'g', 'True', 'TRUE']:
acceptance_criteria[crit]['value'] = True
else:
acceptance_criteria[crit]['value'] = False
# criteria as flags
elif type(acceptance_criteria[crit]['threshold_type']) == list:
if str(rec[crit]) in acceptance_criteria[crit]['threshold_type']:
acceptance_criteria[crit]['value'] = str(rec[crit])
else:
print(
"-W- WARNING: data %s from criteria code %s and is not supported by PmagPy GUI. please check" % (crit, rec[crit]))
elif float(rec[crit]) == -999:
pass
else:
acceptance_criteria[crit]['value'] = float(rec[crit])
# add in metadata to each record
acceptance_criteria[crit].update(metadata_dict)
if "return_warnings" in kwargs:
return (acceptance_criteria, warnings)
else:
return(acceptance_criteria) | Read accceptance criteria from magic criteria file
# old format:
multiple lines. pmag_criteria_code defines the type of criteria
to deal with old format this function reads all the lines and ignore empty cells.
i.e., the program assumes that in each column there is only one value (in one of the lines)
special case in the old format:
specimen_dang has a value and pmag_criteria_code is IE-specimen.
The program assumes that the user means specimen_int_dang
# New format for thellier_gui and demag_gui:
one long line. pmag_criteria_code=ACCEPT
path is the full path to the criteria file
the function takes exiting acceptance_criteria
and updtate it with criteria from file
output:
acceptance_criteria={}
acceptance_criteria[MagIC Variable Names]={}
acceptance_criteria[MagIC Variable Names]['value']:
a number for acceptance criteria value
-999 for N/A
1/0 for True/False or Good/Bad
acceptance_criteria[MagIC Variable Names]['threshold_type']:
"low": lower cutoff value i.e. crit>=value pass criteria
"high": high cutoff value i.e. crit<=value pass criteria
[string1,string2,....]: for flags
acceptance_criteria[MagIC Variable Names]['decimal_points']:number of decimal points in rounding
(this is used in displaying criteria in the dialog box) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10186-L10299 |
PmagPy/PmagPy | pmagpy/pmag.py | add_flag | def add_flag(var, flag):
"""
for use when calling command-line scripts from withing a program.
if a variable is present, add its proper command_line flag.
return a string.
"""
if var:
var = flag + " " + str(var)
else:
var = ""
return var | python | def add_flag(var, flag):
"""
for use when calling command-line scripts from withing a program.
if a variable is present, add its proper command_line flag.
return a string.
"""
if var:
var = flag + " " + str(var)
else:
var = ""
return var | for use when calling command-line scripts from withing a program.
if a variable is present, add its proper command_line flag.
return a string. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10407-L10417 |
PmagPy/PmagPy | pmagpy/pmag.py | get_named_arg | def get_named_arg(name, default_val=None, reqd=False):
"""
Extract the value after a command-line flag such as '-f' and return it.
If the command-line flag is missing, return default_val.
If reqd == True and the command-line flag is missing, throw an error.
Parameters
----------
name : str
command line flag, e.g. "-f"
default_val
value to use if command line flag is missing, e.g. "measurements.txt"
default is None
reqd : bool
throw error if reqd==True and command line flag is missing.
if reqd == True, default_val will be ignored.
default is False.
Returns
---------
Desired value from sys.argv if available, otherwise default_val.
"""
if name in sys.argv: # if the command line flag is found in sys.argv
ind = sys.argv.index(name)
return sys.argv[ind + 1]
if reqd: # if arg is required but not present
raise MissingCommandLineArgException(name)
return default_val | python | def get_named_arg(name, default_val=None, reqd=False):
"""
Extract the value after a command-line flag such as '-f' and return it.
If the command-line flag is missing, return default_val.
If reqd == True and the command-line flag is missing, throw an error.
Parameters
----------
name : str
command line flag, e.g. "-f"
default_val
value to use if command line flag is missing, e.g. "measurements.txt"
default is None
reqd : bool
throw error if reqd==True and command line flag is missing.
if reqd == True, default_val will be ignored.
default is False.
Returns
---------
Desired value from sys.argv if available, otherwise default_val.
"""
if name in sys.argv: # if the command line flag is found in sys.argv
ind = sys.argv.index(name)
return sys.argv[ind + 1]
if reqd: # if arg is required but not present
raise MissingCommandLineArgException(name)
return default_val | Extract the value after a command-line flag such as '-f' and return it.
If the command-line flag is missing, return default_val.
If reqd == True and the command-line flag is missing, throw an error.
Parameters
----------
name : str
command line flag, e.g. "-f"
default_val
value to use if command line flag is missing, e.g. "measurements.txt"
default is None
reqd : bool
throw error if reqd==True and command line flag is missing.
if reqd == True, default_val will be ignored.
default is False.
Returns
---------
Desired value from sys.argv if available, otherwise default_val. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10420-L10447 |
PmagPy/PmagPy | pmagpy/pmag.py | merge_recs_headers | def merge_recs_headers(recs):
'''
take a list of recs [rec1,rec2,rec3....], each rec is a dictionary.
make sure that all recs have the same headers.
'''
headers = []
for rec in recs:
keys = list(rec.keys())
for key in keys:
if key not in headers:
headers.append(key)
for rec in recs:
for header in headers:
if header not in list(rec.keys()):
rec[header] = ""
return recs | python | def merge_recs_headers(recs):
'''
take a list of recs [rec1,rec2,rec3....], each rec is a dictionary.
make sure that all recs have the same headers.
'''
headers = []
for rec in recs:
keys = list(rec.keys())
for key in keys:
if key not in headers:
headers.append(key)
for rec in recs:
for header in headers:
if header not in list(rec.keys()):
rec[header] = ""
return recs | take a list of recs [rec1,rec2,rec3....], each rec is a dictionary.
make sure that all recs have the same headers. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10459-L10474 |
PmagPy/PmagPy | pmagpy/pmag.py | resolve_file_name | def resolve_file_name(fname, dir_path='.'):
"""
Parse file name information and output full path.
Allows input as:
fname == /path/to/file.txt
or
fname == file.txt, dir_path == /path/to
Either way, returns /path/to/file.txt.
Used in conversion scripts.
Parameters
----------
fname : str
short filename or full path to file
dir_path : str
directory, optional
Returns
----------
full_file : str
full path/to/file.txt
"""
if not fname:
return ''
file_dir_path, file_name = os.path.split(fname)
if (not file_dir_path) or (file_dir_path == '.'):
full_file = os.path.join(dir_path, fname)
else:
full_file = fname
return os.path.realpath(full_file) | python | def resolve_file_name(fname, dir_path='.'):
"""
Parse file name information and output full path.
Allows input as:
fname == /path/to/file.txt
or
fname == file.txt, dir_path == /path/to
Either way, returns /path/to/file.txt.
Used in conversion scripts.
Parameters
----------
fname : str
short filename or full path to file
dir_path : str
directory, optional
Returns
----------
full_file : str
full path/to/file.txt
"""
if not fname:
return ''
file_dir_path, file_name = os.path.split(fname)
if (not file_dir_path) or (file_dir_path == '.'):
full_file = os.path.join(dir_path, fname)
else:
full_file = fname
return os.path.realpath(full_file) | Parse file name information and output full path.
Allows input as:
fname == /path/to/file.txt
or
fname == file.txt, dir_path == /path/to
Either way, returns /path/to/file.txt.
Used in conversion scripts.
Parameters
----------
fname : str
short filename or full path to file
dir_path : str
directory, optional
Returns
----------
full_file : str
full path/to/file.txt | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10477-L10506 |
PmagPy/PmagPy | pmagpy/pmag.py | adjust_to_360 | def adjust_to_360(val, key):
"""
Take in a value and a key. If the key is of the type:
declination/longitude/azimuth/direction, adjust it to be within
the range 0-360 as required by the MagIC data model
"""
CheckDec = ['_dec', '_lon', '_azimuth', 'dip_direction']
adjust = False
for dec_key in CheckDec:
if dec_key in key:
if key.endswith(dec_key) or key.endswith('_'):
adjust = True
if not val:
return ''
elif not adjust:
return val
elif adjust:
new_val = float(val) % 360
if new_val != float(val):
print('-I- adjusted {} {} to 0=>360.: {}'.format(key, val, new_val))
return new_val | python | def adjust_to_360(val, key):
"""
Take in a value and a key. If the key is of the type:
declination/longitude/azimuth/direction, adjust it to be within
the range 0-360 as required by the MagIC data model
"""
CheckDec = ['_dec', '_lon', '_azimuth', 'dip_direction']
adjust = False
for dec_key in CheckDec:
if dec_key in key:
if key.endswith(dec_key) or key.endswith('_'):
adjust = True
if not val:
return ''
elif not adjust:
return val
elif adjust:
new_val = float(val) % 360
if new_val != float(val):
print('-I- adjusted {} {} to 0=>360.: {}'.format(key, val, new_val))
return new_val | Take in a value and a key. If the key is of the type:
declination/longitude/azimuth/direction, adjust it to be within
the range 0-360 as required by the MagIC data model | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10539-L10559 |
PmagPy/PmagPy | pmagpy/pmag.py | adjust_all_to_360 | def adjust_all_to_360(dictionary):
"""
Take a dictionary and check each key/value pair.
If this key is of type: declination/longitude/azimuth/direction,
adjust it to be within 0-360 as required by the MagIC data model
"""
for key in dictionary:
dictionary[key] = adjust_to_360(dictionary[key], key)
return dictionary | python | def adjust_all_to_360(dictionary):
"""
Take a dictionary and check each key/value pair.
If this key is of type: declination/longitude/azimuth/direction,
adjust it to be within 0-360 as required by the MagIC data model
"""
for key in dictionary:
dictionary[key] = adjust_to_360(dictionary[key], key)
return dictionary | Take a dictionary and check each key/value pair.
If this key is of type: declination/longitude/azimuth/direction,
adjust it to be within 0-360 as required by the MagIC data model | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10562-L10570 |
PmagPy/PmagPy | pmagpy/pmag.py | do_mag_map | def do_mag_map(date, lon_0=0, alt=0, file="", mod="cals10k",resolution='low'):
"""
returns lists of declination, inclination and intensities for lat/lon grid for
desired model and date.
Parameters:
_________________
date = Required date in decimal years (Common Era, negative for Before Common Era)
Optional Parameters:
______________
mod = model to use ('arch3k','cals3k','pfm9k','hfm10k','cals10k.2','shadif14k','cals10k.1b','custom')
file = l m g h formatted filefor custom model
lon_0 : central longitude for Hammer projection
alt = altitude
resolution = ['low','high'] default is low
Returns:
______________
Bdec=list of declinations
Binc=list of inclinations
B = list of total field intensities in nT
Br = list of radial field intensities
lons = list of longitudes evaluated
lats = list of latitudes evaluated
"""
if resolution=='low':
incr = 10 # we can vary to the resolution of the model
elif resolution=='high':
incr = 2 # we can vary to the resolution of the model
if lon_0 == 180:
lon_0 = 179.99
if lon_0 > 180:
lon_0 = lon_0-360.
# get some parameters for our arrays of lat/lon
lonmax = (lon_0 + 180.) % 360 + incr
lonmin = (lon_0 - 180.)
latmax = 90 + incr
# make a 1D array of longitudes (like elons)
lons = np.arange(lonmin, lonmax, incr)
# make a 1D array of longitudes (like elats)
lats = np.arange(-90, latmax, incr)
# set up some containers for the field elements
B = np.zeros((len(lats), len(lons)))
Binc = np.zeros((len(lats), len(lons)))
Bdec = np.zeros((len(lats), len(lons)))
Brad = np.zeros((len(lats), len(lons)))
if mod == 'custom' and file != '':
gh = []
lmgh = np.loadtxt(file).transpose()
gh.append(lmgh[2][0])
for i in range(1, lmgh.shape[1]):
gh.append(lmgh[2][i])
if lmgh[1][i] != 0:
gh.append(lmgh[3][i])
for j in range(len(lats)): # step through the latitudes
for i in range(len(lons)): # and the longitudes
# get the field elements
if mod == 'custom':
x, y, z, f = docustom(lons[i], lats[j], alt, gh)
else:
x, y, z, f = doigrf(
lons[i], lats[j], alt, date, mod=mod, file=file)
# turn them into polar coordinates
Dec, Inc, Int = cart2dir([x, y, z])
if mod != 'custom':
# convert the string to microtesla (from nT)
B[j][i] = Int * 1e-3
else:
B[j][i] = Int # convert the string to microtesla (from nT)
Binc[j][i] = Inc # store the inclination value
if Dec > 180:
Dec = Dec-360.
Bdec[j][i] = Dec # store the declination value
if mod != 'custom':
Brad[j][i] = z*1e-3
else:
Brad[j][i] = z
return Bdec, Binc, B, Brad, lons, lats | python | def do_mag_map(date, lon_0=0, alt=0, file="", mod="cals10k",resolution='low'):
"""
returns lists of declination, inclination and intensities for lat/lon grid for
desired model and date.
Parameters:
_________________
date = Required date in decimal years (Common Era, negative for Before Common Era)
Optional Parameters:
______________
mod = model to use ('arch3k','cals3k','pfm9k','hfm10k','cals10k.2','shadif14k','cals10k.1b','custom')
file = l m g h formatted filefor custom model
lon_0 : central longitude for Hammer projection
alt = altitude
resolution = ['low','high'] default is low
Returns:
______________
Bdec=list of declinations
Binc=list of inclinations
B = list of total field intensities in nT
Br = list of radial field intensities
lons = list of longitudes evaluated
lats = list of latitudes evaluated
"""
if resolution=='low':
incr = 10 # we can vary to the resolution of the model
elif resolution=='high':
incr = 2 # we can vary to the resolution of the model
if lon_0 == 180:
lon_0 = 179.99
if lon_0 > 180:
lon_0 = lon_0-360.
# get some parameters for our arrays of lat/lon
lonmax = (lon_0 + 180.) % 360 + incr
lonmin = (lon_0 - 180.)
latmax = 90 + incr
# make a 1D array of longitudes (like elons)
lons = np.arange(lonmin, lonmax, incr)
# make a 1D array of longitudes (like elats)
lats = np.arange(-90, latmax, incr)
# set up some containers for the field elements
B = np.zeros((len(lats), len(lons)))
Binc = np.zeros((len(lats), len(lons)))
Bdec = np.zeros((len(lats), len(lons)))
Brad = np.zeros((len(lats), len(lons)))
if mod == 'custom' and file != '':
gh = []
lmgh = np.loadtxt(file).transpose()
gh.append(lmgh[2][0])
for i in range(1, lmgh.shape[1]):
gh.append(lmgh[2][i])
if lmgh[1][i] != 0:
gh.append(lmgh[3][i])
for j in range(len(lats)): # step through the latitudes
for i in range(len(lons)): # and the longitudes
# get the field elements
if mod == 'custom':
x, y, z, f = docustom(lons[i], lats[j], alt, gh)
else:
x, y, z, f = doigrf(
lons[i], lats[j], alt, date, mod=mod, file=file)
# turn them into polar coordinates
Dec, Inc, Int = cart2dir([x, y, z])
if mod != 'custom':
# convert the string to microtesla (from nT)
B[j][i] = Int * 1e-3
else:
B[j][i] = Int # convert the string to microtesla (from nT)
Binc[j][i] = Inc # store the inclination value
if Dec > 180:
Dec = Dec-360.
Bdec[j][i] = Dec # store the declination value
if mod != 'custom':
Brad[j][i] = z*1e-3
else:
Brad[j][i] = z
return Bdec, Binc, B, Brad, lons, lats | returns lists of declination, inclination and intensities for lat/lon grid for
desired model and date.
Parameters:
_________________
date = Required date in decimal years (Common Era, negative for Before Common Era)
Optional Parameters:
______________
mod = model to use ('arch3k','cals3k','pfm9k','hfm10k','cals10k.2','shadif14k','cals10k.1b','custom')
file = l m g h formatted filefor custom model
lon_0 : central longitude for Hammer projection
alt = altitude
resolution = ['low','high'] default is low
Returns:
______________
Bdec=list of declinations
Binc=list of inclinations
B = list of total field intensities in nT
Br = list of radial field intensities
lons = list of longitudes evaluated
lats = list of latitudes evaluated | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10596-L10675 |
PmagPy/PmagPy | pmagpy/pmag.py | doeqdi | def doeqdi(x, y, UP=False):
"""
Takes digitized x,y, data and returns the dec,inc, assuming an
equal area projection
Parameters
__________________
x : array of digitized x from point on equal area projection
y : array of igitized y from point on equal area projection
UP : if True, is an upper hemisphere projection
Output :
dec : declination
inc : inclination
"""
xp, yp = y, x # need to switch into geographic convention
r = np.sqrt(xp**2+yp**2)
z = 1.-r**2
t = np.arcsin(z)
if UP == 1:
t = -t
p = np.arctan2(yp, xp)
dec, inc = np.degrees(p) % 360, np.degrees(t)
return dec, inc | python | def doeqdi(x, y, UP=False):
"""
Takes digitized x,y, data and returns the dec,inc, assuming an
equal area projection
Parameters
__________________
x : array of digitized x from point on equal area projection
y : array of igitized y from point on equal area projection
UP : if True, is an upper hemisphere projection
Output :
dec : declination
inc : inclination
"""
xp, yp = y, x # need to switch into geographic convention
r = np.sqrt(xp**2+yp**2)
z = 1.-r**2
t = np.arcsin(z)
if UP == 1:
t = -t
p = np.arctan2(yp, xp)
dec, inc = np.degrees(p) % 360, np.degrees(t)
return dec, inc | Takes digitized x,y, data and returns the dec,inc, assuming an
equal area projection
Parameters
__________________
x : array of digitized x from point on equal area projection
y : array of igitized y from point on equal area projection
UP : if True, is an upper hemisphere projection
Output :
dec : declination
inc : inclination | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10678-L10699 |
PmagPy/PmagPy | pmagpy/pmag.py | separate_directions | def separate_directions(di_block):
"""
Separates set of directions into two modes based on principal direction
Parameters
_______________
di_block : block of nested dec,inc pairs
Return
mode_1_block,mode_2_block : two lists of nested dec,inc pairs
"""
ppars = doprinc(di_block)
di_df = pd.DataFrame(di_block) # turn into a data frame for easy filtering
di_df.columns = ['dec', 'inc']
di_df['pdec'] = ppars['dec']
di_df['pinc'] = ppars['inc']
di_df['angle'] = angle(di_df[['dec', 'inc']].values,
di_df[['pdec', 'pinc']].values)
mode1_df = di_df[di_df['angle'] <= 90]
mode2_df = di_df[di_df['angle'] > 90]
mode1 = mode1_df[['dec', 'inc']].values.tolist()
mode2 = mode2_df[['dec', 'inc']].values.tolist()
return mode1, mode2 | python | def separate_directions(di_block):
"""
Separates set of directions into two modes based on principal direction
Parameters
_______________
di_block : block of nested dec,inc pairs
Return
mode_1_block,mode_2_block : two lists of nested dec,inc pairs
"""
ppars = doprinc(di_block)
di_df = pd.DataFrame(di_block) # turn into a data frame for easy filtering
di_df.columns = ['dec', 'inc']
di_df['pdec'] = ppars['dec']
di_df['pinc'] = ppars['inc']
di_df['angle'] = angle(di_df[['dec', 'inc']].values,
di_df[['pdec', 'pinc']].values)
mode1_df = di_df[di_df['angle'] <= 90]
mode2_df = di_df[di_df['angle'] > 90]
mode1 = mode1_df[['dec', 'inc']].values.tolist()
mode2 = mode2_df[['dec', 'inc']].values.tolist()
return mode1, mode2 | Separates set of directions into two modes based on principal direction
Parameters
_______________
di_block : block of nested dec,inc pairs
Return
mode_1_block,mode_2_block : two lists of nested dec,inc pairs | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10702-L10724 |
PmagPy/PmagPy | pmagpy/pmag.py | dovandamme | def dovandamme(vgp_df):
"""
determine the S_b value for VGPs using the Vandamme (1994) method
for determining cutoff value for "outliers".
Parameters
___________
vgp_df : pandas DataFrame with required column "vgp_lat"
This should be in the desired coordinate system and assumes one polarity
Returns
_________
vgp_df : after applying cutoff
cutoff : colatitude cutoff
S_b : S_b of vgp_df after applying cutoff
"""
vgp_df['delta'] = 90.-vgp_df['vgp_lat'].values
ASD = np.sqrt(np.sum(vgp_df.delta**2)/(vgp_df.shape[0]-1))
A = 1.8 * ASD + 5.
delta_max = vgp_df.delta.max()
while delta_max > A:
delta_max = vgp_df.delta.max()
if delta_max < A:
return vgp_df, A, ASD
vgp_df = vgp_df[vgp_df.delta < delta_max]
ASD = np.sqrt(np.sum(vgp_df.delta**2)/(vgp_df.shape[0]-1))
A = 1.8 * ASD + 5. | python | def dovandamme(vgp_df):
"""
determine the S_b value for VGPs using the Vandamme (1994) method
for determining cutoff value for "outliers".
Parameters
___________
vgp_df : pandas DataFrame with required column "vgp_lat"
This should be in the desired coordinate system and assumes one polarity
Returns
_________
vgp_df : after applying cutoff
cutoff : colatitude cutoff
S_b : S_b of vgp_df after applying cutoff
"""
vgp_df['delta'] = 90.-vgp_df['vgp_lat'].values
ASD = np.sqrt(np.sum(vgp_df.delta**2)/(vgp_df.shape[0]-1))
A = 1.8 * ASD + 5.
delta_max = vgp_df.delta.max()
while delta_max > A:
delta_max = vgp_df.delta.max()
if delta_max < A:
return vgp_df, A, ASD
vgp_df = vgp_df[vgp_df.delta < delta_max]
ASD = np.sqrt(np.sum(vgp_df.delta**2)/(vgp_df.shape[0]-1))
A = 1.8 * ASD + 5. | determine the S_b value for VGPs using the Vandamme (1994) method
for determining cutoff value for "outliers".
Parameters
___________
vgp_df : pandas DataFrame with required column "vgp_lat"
This should be in the desired coordinate system and assumes one polarity
Returns
_________
vgp_df : after applying cutoff
cutoff : colatitude cutoff
S_b : S_b of vgp_df after applying cutoff | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10727-L10752 |
PmagPy/PmagPy | pmagpy/pmag.py | scalc_vgp_df | def scalc_vgp_df(vgp_df, anti=0, rev=0, cutoff=180., kappa=0, n=0, spin=0, v=0, boot=0, mm97=0, nb=1000):
"""
Calculates Sf for a dataframe with VGP Lat., and optional Fisher's k, site latitude and N information can be used to correct for within site scatter (McElhinny & McFadden, 1997)
Parameters
_________
df : Pandas Dataframe with columns
REQUIRED:
vgp_lat : VGP latitude
ONLY REQUIRED for MM97 correction:
dir_k : Fisher kappa estimate
dir_n_samples : number of samples per site
lat : latitude of the site
mm97 : if True, will do the correction for within site scatter
OPTIONAL:
boot : if True. do bootstrap
nb : number of bootstraps, default is 1000
Returns
_____________
N : number of VGPs used in calculation
S : S
low : 95% confidence lower bound [0 if boot=0]
high 95% confidence upper bound [0 if boot=0]
cutoff : cutoff used in calculation of S
"""
vgp_df['delta'] = 90.-vgp_df.vgp_lat.values
# filter by cutoff, kappa, and n
vgp_df = vgp_df[vgp_df.delta <= cutoff]
vgp_df = vgp_df[vgp_df.dir_k >= kappa]
vgp_df = vgp_df[vgp_df.dir_n_samples >= n]
if spin: # do transformation to pole
Pvgps = vgp_df[['vgp_lon', 'vgp_lat']].values
ppars = doprinc(Pvgps)
Bdirs = np.full((Pvgps.shape[0]), ppars['dec']-180.)
Bdips = np.full((Pvgps.shape[0]), 90.-ppars['inc'])
Pvgps = np.column_stack((Pvgps, Bdirs, Bdips))
lons, lats = dotilt_V(Pvgps)
vgp_df['vgp_lon'] = lons
vgp_df['vgp_lat'] = lats
vgp_df['delta'] = 90.-vgp_df.vgp_lat
if anti:
print('flipping reverse')
vgp_rev = vgp_df[vgp_df.vgp_lat < 0]
vgp_norm = vgp_df[vgp_df.vgp_lat >= 0]
vgp_anti = vgp_rev
vgp_anti['vgp_lat'] = -vgp_anti['vgp_lat']
vgp_anti['vgp_lon'] = (vgp_anti['vgp_lon']-180) % 360
vgp_df = pd.concat([vgp_norm, vgp_anti], sort=True)
if rev:
vgp_df = vgp_df[vgp_df.vgp_lat < 0] # use only reverse data
if v:
vgp_df, cutoff, S_v = dovandamme(vgp_df) # do vandamme cutoff
S_B = get_sb_df(vgp_df, mm97=mm97) # get
N = vgp_df.shape[0]
SBs, low, high = [], 0, 0
if boot:
for i in range(nb): # now do bootstrap
bs_df = vgp_df.sample(n=N, replace=True)
Sb_bs = get_sb_df(bs_df)
SBs.append(Sb_bs)
SBs.sort()
low = SBs[int(.025 * nb)]
high = SBs[int(.975 * nb)]
return N, S_B, low, high, cutoff | python | def scalc_vgp_df(vgp_df, anti=0, rev=0, cutoff=180., kappa=0, n=0, spin=0, v=0, boot=0, mm97=0, nb=1000):
"""
Calculates Sf for a dataframe with VGP Lat., and optional Fisher's k, site latitude and N information can be used to correct for within site scatter (McElhinny & McFadden, 1997)
Parameters
_________
df : Pandas Dataframe with columns
REQUIRED:
vgp_lat : VGP latitude
ONLY REQUIRED for MM97 correction:
dir_k : Fisher kappa estimate
dir_n_samples : number of samples per site
lat : latitude of the site
mm97 : if True, will do the correction for within site scatter
OPTIONAL:
boot : if True. do bootstrap
nb : number of bootstraps, default is 1000
Returns
_____________
N : number of VGPs used in calculation
S : S
low : 95% confidence lower bound [0 if boot=0]
high 95% confidence upper bound [0 if boot=0]
cutoff : cutoff used in calculation of S
"""
vgp_df['delta'] = 90.-vgp_df.vgp_lat.values
# filter by cutoff, kappa, and n
vgp_df = vgp_df[vgp_df.delta <= cutoff]
vgp_df = vgp_df[vgp_df.dir_k >= kappa]
vgp_df = vgp_df[vgp_df.dir_n_samples >= n]
if spin: # do transformation to pole
Pvgps = vgp_df[['vgp_lon', 'vgp_lat']].values
ppars = doprinc(Pvgps)
Bdirs = np.full((Pvgps.shape[0]), ppars['dec']-180.)
Bdips = np.full((Pvgps.shape[0]), 90.-ppars['inc'])
Pvgps = np.column_stack((Pvgps, Bdirs, Bdips))
lons, lats = dotilt_V(Pvgps)
vgp_df['vgp_lon'] = lons
vgp_df['vgp_lat'] = lats
vgp_df['delta'] = 90.-vgp_df.vgp_lat
if anti:
print('flipping reverse')
vgp_rev = vgp_df[vgp_df.vgp_lat < 0]
vgp_norm = vgp_df[vgp_df.vgp_lat >= 0]
vgp_anti = vgp_rev
vgp_anti['vgp_lat'] = -vgp_anti['vgp_lat']
vgp_anti['vgp_lon'] = (vgp_anti['vgp_lon']-180) % 360
vgp_df = pd.concat([vgp_norm, vgp_anti], sort=True)
if rev:
vgp_df = vgp_df[vgp_df.vgp_lat < 0] # use only reverse data
if v:
vgp_df, cutoff, S_v = dovandamme(vgp_df) # do vandamme cutoff
S_B = get_sb_df(vgp_df, mm97=mm97) # get
N = vgp_df.shape[0]
SBs, low, high = [], 0, 0
if boot:
for i in range(nb): # now do bootstrap
bs_df = vgp_df.sample(n=N, replace=True)
Sb_bs = get_sb_df(bs_df)
SBs.append(Sb_bs)
SBs.sort()
low = SBs[int(.025 * nb)]
high = SBs[int(.975 * nb)]
return N, S_B, low, high, cutoff | Calculates Sf for a dataframe with VGP Lat., and optional Fisher's k, site latitude and N information can be used to correct for within site scatter (McElhinny & McFadden, 1997)
Parameters
_________
df : Pandas Dataframe with columns
REQUIRED:
vgp_lat : VGP latitude
ONLY REQUIRED for MM97 correction:
dir_k : Fisher kappa estimate
dir_n_samples : number of samples per site
lat : latitude of the site
mm97 : if True, will do the correction for within site scatter
OPTIONAL:
boot : if True. do bootstrap
nb : number of bootstraps, default is 1000
Returns
_____________
N : number of VGPs used in calculation
S : S
low : 95% confidence lower bound [0 if boot=0]
high 95% confidence upper bound [0 if boot=0]
cutoff : cutoff used in calculation of S | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10755-L10819 |
PmagPy/PmagPy | pmagpy/pmag.py | watsons_f | def watsons_f(DI1, DI2):
"""
calculates Watson's F statistic (equation 11.16 in Essentials text book).
Parameters
_________
DI1 : nested array of [Dec,Inc] pairs
DI2 : nested array of [Dec,Inc] pairs
Returns
_______
F : Watson's F
Fcrit : critical value from F table
"""
# first calculate R for the combined data set, then R1 and R2 for each individually.
# create a new array from two smaller ones
DI = np.concatenate((DI1, DI2), axis=0)
fpars = fisher_mean(DI) # re-use our functionfrom problem 1b
fpars1 = fisher_mean(DI1)
fpars2 = fisher_mean(DI2)
N = fpars['n']
R = fpars['r']
R1 = fpars1['r']
R2 = fpars2['r']
F = (N-2.)*((R1+R2-R)/(N-R1-R2))
Fcrit = fcalc(2, 2*(N-2))
return F, Fcrit | python | def watsons_f(DI1, DI2):
"""
calculates Watson's F statistic (equation 11.16 in Essentials text book).
Parameters
_________
DI1 : nested array of [Dec,Inc] pairs
DI2 : nested array of [Dec,Inc] pairs
Returns
_______
F : Watson's F
Fcrit : critical value from F table
"""
# first calculate R for the combined data set, then R1 and R2 for each individually.
# create a new array from two smaller ones
DI = np.concatenate((DI1, DI2), axis=0)
fpars = fisher_mean(DI) # re-use our functionfrom problem 1b
fpars1 = fisher_mean(DI1)
fpars2 = fisher_mean(DI2)
N = fpars['n']
R = fpars['r']
R1 = fpars1['r']
R2 = fpars2['r']
F = (N-2.)*((R1+R2-R)/(N-R1-R2))
Fcrit = fcalc(2, 2*(N-2))
return F, Fcrit | calculates Watson's F statistic (equation 11.16 in Essentials text book).
Parameters
_________
DI1 : nested array of [Dec,Inc] pairs
DI2 : nested array of [Dec,Inc] pairs
Returns
_______
F : Watson's F
Fcrit : critical value from F table | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10822-L10848 |
PmagPy/PmagPy | pmagpy/pmag.py | apwp | def apwp(data, print_results=False):
"""
calculates expected pole positions and directions for given plate, location and age
Parameters
_________
data : [plate,lat,lon,age]
plate : [NA, SA, AF, IN, EU, AU, ANT, GL]
NA : North America
SA : South America
AF : Africa
IN : India
EU : Eurasia
AU : Australia
ANT: Antarctica
GL : Greenland
lat/lon : latitude/longitude in degrees N/E
age : age in millions of years
print_results : if True will print out nicely formatted results
Returns
_________
if print_results is False, [Age,Paleolat, Dec, Inc, Pole_lat, Pole_lon]
"""
pole_lat, pole_lon = bc02(data) # get the pole for these parameters
# get the declination and inclination for that pole
ExpDec, ExpInc = vgp_di(pole_lat, pole_lon, data[1], data[2])
# convert the inclination to paleo latitude
paleo_lat = magnetic_lat(ExpInc)
if print_results:
# print everything out
print(' Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.')
print('%7.1f %7.1f %7.1f %7.1f %7.1f %7.1f\n'
% (data[3], paleo_lat, ExpDec, ExpInc, pole_lat, pole_lon))
else:
return [data[3], paleo_lat, ExpDec, ExpInc, pole_lat, pole_lon] | python | def apwp(data, print_results=False):
"""
calculates expected pole positions and directions for given plate, location and age
Parameters
_________
data : [plate,lat,lon,age]
plate : [NA, SA, AF, IN, EU, AU, ANT, GL]
NA : North America
SA : South America
AF : Africa
IN : India
EU : Eurasia
AU : Australia
ANT: Antarctica
GL : Greenland
lat/lon : latitude/longitude in degrees N/E
age : age in millions of years
print_results : if True will print out nicely formatted results
Returns
_________
if print_results is False, [Age,Paleolat, Dec, Inc, Pole_lat, Pole_lon]
"""
pole_lat, pole_lon = bc02(data) # get the pole for these parameters
# get the declination and inclination for that pole
ExpDec, ExpInc = vgp_di(pole_lat, pole_lon, data[1], data[2])
# convert the inclination to paleo latitude
paleo_lat = magnetic_lat(ExpInc)
if print_results:
# print everything out
print(' Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.')
print('%7.1f %7.1f %7.1f %7.1f %7.1f %7.1f\n'
% (data[3], paleo_lat, ExpDec, ExpInc, pole_lat, pole_lon))
else:
return [data[3], paleo_lat, ExpDec, ExpInc, pole_lat, pole_lon] | calculates expected pole positions and directions for given plate, location and age
Parameters
_________
data : [plate,lat,lon,age]
plate : [NA, SA, AF, IN, EU, AU, ANT, GL]
NA : North America
SA : South America
AF : Africa
IN : India
EU : Eurasia
AU : Australia
ANT: Antarctica
GL : Greenland
lat/lon : latitude/longitude in degrees N/E
age : age in millions of years
print_results : if True will print out nicely formatted results
Returns
_________
if print_results is False, [Age,Paleolat, Dec, Inc, Pole_lat, Pole_lon] | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10851-L10885 |
PmagPy/PmagPy | pmagpy/pmag.py | chart_maker | def chart_maker(Int, Top, start=100, outfile='chart.txt'):
"""
Makes a chart for performing IZZI experiments. Print out the file and
tape it to the oven. This chart will help keep track of the different
steps.
Z : performed in zero field - enter the temperature XXX.0 in the sio
formatted measurement file created by the LabView program
I : performed in the lab field written at the top of the form
P : a pTRM step - performed at the temperature and in the lab field.
Parameters
__________
Int : list of intervals [e.g., 50,10,5]
Top : list of upper bounds for each interval [e.g., 500, 550, 600]
start : first temperature step, default is 100
outfile : name of output file, default is 'chart.txt'
Output
_________
creates a file with:
file: write down the name of the measurement file
field: write down the lab field for the infield steps (in uT)
the type of step (Z: zerofield, I: infield, P: pTRM step
temperature of the step and code for SIO-like treatment steps
XXX.0 [zero field]
XXX.1 [in field]
XXX.2 [pTRM check] - done in a lab field
date : date the step was performed
run # : an optional run number
zones I-III : field in the zones in the oven
start : time the run was started
sp : time the setpoint was reached
cool : time cooling started
"""
low, k, iz = start, 0, 0
Tzero = []
f = open('chart.txt', 'w')
vline = '\t%s\n' % (
' | | | | | | | |')
hline = '______________________________________________________________________________\n'
f.write('file:_________________ field:___________uT\n\n\n')
f.write('%s\n' % (
' date | run# | zone I | zone II | zone III | start | sp | cool|'))
f.write(hline)
f.write('\t%s' % (' 0.0'))
f.write(vline)
f.write(hline)
for k in range(len(Top)):
for t in range(low, Top[k]+Int[k], Int[k]):
if iz == 0:
Tzero.append(t) # zero field first step
f.write('%s \t %s' % ('Z', str(t)+'.'+str(iz)))
f.write(vline)
f.write(hline)
if len(Tzero) > 1:
f.write('%s \t %s' % ('P', str(Tzero[-2])+'.'+str(2)))
f.write(vline)
f.write(hline)
iz = 1
# infield after zero field first
f.write('%s \t %s' % ('I', str(t)+'.'+str(iz)))
f.write(vline)
f.write(hline)
# f.write('%s \t %s'%('T',str(t)+'.'+str(3))) # print second zero field (tail check)
# f.write(vline)
# f.write(hline)
elif iz == 1:
# infield first step
f.write('%s \t %s' % ('I', str(t)+'.'+str(iz)))
f.write(vline)
f.write(hline)
iz = 0
# zero field step (after infield)
f.write('%s \t %s' % ('Z', str(t)+'.'+str(iz)))
f.write(vline)
f.write(hline)
try:
low = Top[k]+Int[k+1] # increment to next temp step
except:
f.close()
print("output stored in: chart.txt") | python | def chart_maker(Int, Top, start=100, outfile='chart.txt'):
"""
Makes a chart for performing IZZI experiments. Print out the file and
tape it to the oven. This chart will help keep track of the different
steps.
Z : performed in zero field - enter the temperature XXX.0 in the sio
formatted measurement file created by the LabView program
I : performed in the lab field written at the top of the form
P : a pTRM step - performed at the temperature and in the lab field.
Parameters
__________
Int : list of intervals [e.g., 50,10,5]
Top : list of upper bounds for each interval [e.g., 500, 550, 600]
start : first temperature step, default is 100
outfile : name of output file, default is 'chart.txt'
Output
_________
creates a file with:
file: write down the name of the measurement file
field: write down the lab field for the infield steps (in uT)
the type of step (Z: zerofield, I: infield, P: pTRM step
temperature of the step and code for SIO-like treatment steps
XXX.0 [zero field]
XXX.1 [in field]
XXX.2 [pTRM check] - done in a lab field
date : date the step was performed
run # : an optional run number
zones I-III : field in the zones in the oven
start : time the run was started
sp : time the setpoint was reached
cool : time cooling started
"""
low, k, iz = start, 0, 0
Tzero = []
f = open('chart.txt', 'w')
vline = '\t%s\n' % (
' | | | | | | | |')
hline = '______________________________________________________________________________\n'
f.write('file:_________________ field:___________uT\n\n\n')
f.write('%s\n' % (
' date | run# | zone I | zone II | zone III | start | sp | cool|'))
f.write(hline)
f.write('\t%s' % (' 0.0'))
f.write(vline)
f.write(hline)
for k in range(len(Top)):
for t in range(low, Top[k]+Int[k], Int[k]):
if iz == 0:
Tzero.append(t) # zero field first step
f.write('%s \t %s' % ('Z', str(t)+'.'+str(iz)))
f.write(vline)
f.write(hline)
if len(Tzero) > 1:
f.write('%s \t %s' % ('P', str(Tzero[-2])+'.'+str(2)))
f.write(vline)
f.write(hline)
iz = 1
# infield after zero field first
f.write('%s \t %s' % ('I', str(t)+'.'+str(iz)))
f.write(vline)
f.write(hline)
# f.write('%s \t %s'%('T',str(t)+'.'+str(3))) # print second zero field (tail check)
# f.write(vline)
# f.write(hline)
elif iz == 1:
# infield first step
f.write('%s \t %s' % ('I', str(t)+'.'+str(iz)))
f.write(vline)
f.write(hline)
iz = 0
# zero field step (after infield)
f.write('%s \t %s' % ('Z', str(t)+'.'+str(iz)))
f.write(vline)
f.write(hline)
try:
low = Top[k]+Int[k+1] # increment to next temp step
except:
f.close()
print("output stored in: chart.txt") | Makes a chart for performing IZZI experiments. Print out the file and
tape it to the oven. This chart will help keep track of the different
steps.
Z : performed in zero field - enter the temperature XXX.0 in the sio
formatted measurement file created by the LabView program
I : performed in the lab field written at the top of the form
P : a pTRM step - performed at the temperature and in the lab field.
Parameters
__________
Int : list of intervals [e.g., 50,10,5]
Top : list of upper bounds for each interval [e.g., 500, 550, 600]
start : first temperature step, default is 100
outfile : name of output file, default is 'chart.txt'
Output
_________
creates a file with:
file: write down the name of the measurement file
field: write down the lab field for the infield steps (in uT)
the type of step (Z: zerofield, I: infield, P: pTRM step
temperature of the step and code for SIO-like treatment steps
XXX.0 [zero field]
XXX.1 [in field]
XXX.2 [pTRM check] - done in a lab field
date : date the step was performed
run # : an optional run number
zones I-III : field in the zones in the oven
start : time the run was started
sp : time the setpoint was reached
cool : time cooling started | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10888-L10971 |
PmagPy/PmagPy | pmagpy/pmag.py | import_basemap | def import_basemap():
"""
Try to import Basemap and print out a useful help message
if Basemap is either not installed or is missing required
environment variables.
Returns
---------
has_basemap : bool
Basemap : Basemap package if possible else None
"""
Basemap = None
has_basemap = True
has_cartopy = import_cartopy()[0]
try:
from mpl_toolkits.basemap import Basemap
WARNINGS['has_basemap'] = True
except ImportError:
has_basemap = False
# if they have installed cartopy, no warning is needed
if has_cartopy:
return has_basemap, False
# if they haven't installed Basemap or cartopy, they need to be warned
if not WARNINGS['basemap']:
print(
"-W- You haven't installed a module for plotting maps (cartopy or Basemap)")
print(" Recommended: install cartopy. With conda:")
print(" conda install cartopy")
print(
" For more information, see http://earthref.org/PmagPy/Cookbook#getting_python")
except (KeyError, FileNotFoundError):
has_basemap = False
# if cartopy is installed, no warning is needed
if has_cartopy:
return has_basemap, False
if not WARNINGS['basemap']:
print('-W- Basemap is installed but could not be imported.')
print(' You are probably missing a required environment variable')
print(
' If you need to use Basemap, you will need to run this program or notebook in a conda env.')
print(' For more on how to create a conda env, see: https://conda.io/docs/user-guide/tasks/manage-environments.html')
print(
' Recommended alternative: install cartopy for plotting maps. With conda:')
print(' conda install cartopy')
if has_basemap and not has_cartopy:
print("-W- You have installed Basemap but not cartopy.")
print(" In the future, Basemap will no longer be supported.")
print(" To continue to make maps, install using conda:")
print(' conda install cartopy')
WARNINGS['basemap'] = True
return has_basemap, Basemap | python | def import_basemap():
"""
Try to import Basemap and print out a useful help message
if Basemap is either not installed or is missing required
environment variables.
Returns
---------
has_basemap : bool
Basemap : Basemap package if possible else None
"""
Basemap = None
has_basemap = True
has_cartopy = import_cartopy()[0]
try:
from mpl_toolkits.basemap import Basemap
WARNINGS['has_basemap'] = True
except ImportError:
has_basemap = False
# if they have installed cartopy, no warning is needed
if has_cartopy:
return has_basemap, False
# if they haven't installed Basemap or cartopy, they need to be warned
if not WARNINGS['basemap']:
print(
"-W- You haven't installed a module for plotting maps (cartopy or Basemap)")
print(" Recommended: install cartopy. With conda:")
print(" conda install cartopy")
print(
" For more information, see http://earthref.org/PmagPy/Cookbook#getting_python")
except (KeyError, FileNotFoundError):
has_basemap = False
# if cartopy is installed, no warning is needed
if has_cartopy:
return has_basemap, False
if not WARNINGS['basemap']:
print('-W- Basemap is installed but could not be imported.')
print(' You are probably missing a required environment variable')
print(
' If you need to use Basemap, you will need to run this program or notebook in a conda env.')
print(' For more on how to create a conda env, see: https://conda.io/docs/user-guide/tasks/manage-environments.html')
print(
' Recommended alternative: install cartopy for plotting maps. With conda:')
print(' conda install cartopy')
if has_basemap and not has_cartopy:
print("-W- You have installed Basemap but not cartopy.")
print(" In the future, Basemap will no longer be supported.")
print(" To continue to make maps, install using conda:")
print(' conda install cartopy')
WARNINGS['basemap'] = True
return has_basemap, Basemap | Try to import Basemap and print out a useful help message
if Basemap is either not installed or is missing required
environment variables.
Returns
---------
has_basemap : bool
Basemap : Basemap package if possible else None | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10974-L11026 |
PmagPy/PmagPy | pmagpy/pmag.py | import_cartopy | def import_cartopy():
"""
Try to import cartopy and print out a help message
if it is not installed
Returns
---------
has_cartopy : bool
cartopy : cartopy package if available else None
"""
cartopy = None
has_cartopy = True
try:
import cartopy
WARNINGS['has_cartopy'] = True
except ImportError:
has_cartopy = False
if not WARNINGS['cartopy']:
print('-W- cartopy is not installed')
print(' If you want to make maps, install using conda:')
print(' conda install cartopy')
WARNINGS['cartopy'] = True
return has_cartopy, cartopy | python | def import_cartopy():
"""
Try to import cartopy and print out a help message
if it is not installed
Returns
---------
has_cartopy : bool
cartopy : cartopy package if available else None
"""
cartopy = None
has_cartopy = True
try:
import cartopy
WARNINGS['has_cartopy'] = True
except ImportError:
has_cartopy = False
if not WARNINGS['cartopy']:
print('-W- cartopy is not installed')
print(' If you want to make maps, install using conda:')
print(' conda install cartopy')
WARNINGS['cartopy'] = True
return has_cartopy, cartopy | Try to import cartopy and print out a help message
if it is not installed
Returns
---------
has_cartopy : bool
cartopy : cartopy package if available else None | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L11029-L11051 |
PmagPy/PmagPy | pmagpy/pmag.py | age_to_BP | def age_to_BP(age, age_unit):
"""
Convert an age value into the equivalent in time Before Present(BP) where Present is 1950
Returns
---------
ageBP : number
"""
ageBP = -1e9
if age_unit == "Years AD (+/-)" or age_unit == "Years Cal AD (+/-)":
if age < 0:
age = age+1 # to correct for there being no 0 AD
ageBP = 1950-age
elif age_unit == "Years BP" or age_unit == "Years Cal BP":
ageBP = age
elif age_unit == "ka":
ageBP = age*1000
elif age_unit == "Ma":
ageBP = age*1e6
elif age_unit == "Ga":
ageBP = age*1e9
else:
print("Age unit invalid. Age set to -1.0e9")
return ageBP | python | def age_to_BP(age, age_unit):
"""
Convert an age value into the equivalent in time Before Present(BP) where Present is 1950
Returns
---------
ageBP : number
"""
ageBP = -1e9
if age_unit == "Years AD (+/-)" or age_unit == "Years Cal AD (+/-)":
if age < 0:
age = age+1 # to correct for there being no 0 AD
ageBP = 1950-age
elif age_unit == "Years BP" or age_unit == "Years Cal BP":
ageBP = age
elif age_unit == "ka":
ageBP = age*1000
elif age_unit == "Ma":
ageBP = age*1e6
elif age_unit == "Ga":
ageBP = age*1e9
else:
print("Age unit invalid. Age set to -1.0e9")
return ageBP | Convert an age value into the equivalent in time Before Present(BP) where Present is 1950
Returns
---------
ageBP : number | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L11054-L11078 |
PmagPy/PmagPy | pmagpy/pmag.py | vocab_convert | def vocab_convert(vocab, standard, key=''):
"""
Converts MagIC database terms (method codes, geologic_types, etc) to other standards.
May not be comprehensive for each standard. Terms added to standards as people need them
and may not be up-to-date.
'key' can be used to distinguish vocab terms that exist in two different lists.
Returns:
value of the MagIC vocab in the standard requested
Example:
vocab_convert('Egypt','GEOMAGIA') will return '1'
"""
places_to_geomagia = {
'Egypt': "1",
'Japan': "2",
'France': "3",
'Ukraine': "5",
'India': "6",
'China': "7",
'Finland': "8",
'Greece': "9",
'Italy': "11",
'Switzerland': "12",
'Bulgaria': "13",
'Syria': "14",
'Hungary': "15",
'East Pacific Ridge': "17",
'Hawaii': "18",
'Morocco': "19",
'Australia': "20",
'Georgia': "21",
'Azerbaijan': "22",
'Spain': "24",
'England': "25",
'Czech Republic': "26",
'Mexico': "27",
'Iraq': "28",
'Israel': "29",
'Iran': "30",
'Uzbekistan': "31",
'Turkmenistan': "32",
'Mongolia': "33",
'Iceland': "34",
'New Zealand': "35",
'Amsterdam Island': "36",
'Guadeloupe': "37",
'Mid Atlantic Ridge': "38",
'Austria': "39",
'Belgium': "40",
'Romania': "41",
'Guatemala': "42",
'El Salvador': "43",
'Canary Islands': "45",
'Moldova': "46",
'Latvia': "47",
'Lithuania': "48",
'Russia': "49",
'Germany': "51",
'Martinique': "52",
'Netherlands': "53",
'Turkey': "54",
'Denmark': "55",
'Cameroon': "56",
'Honduras': "57",
'Jordan': "58",
'Brazil': "59",
'Estonia': "61",
'Sweden': "62",
'Peru': "63",
'Bolivia': "64",
'Ecuador': "65",
'Ontario': "66",
'New Mexico': "67",
'Arizona': "68",
'California': "69",
'Colorado': "70",
'Utah': "71",
'Washington': "72",
'Oregon': "73",
'British Columbia': "74",
'Idaho': "75",
'Arkansas': "76",
'Tennessee': "78",
'Serbia': "79",
'Kosovo': "80",
'Portugal': "81",
'Thailand': "82",
'South Korea': "83",
'Kazakhstan': "84",
'Nebraska': "85",
'La Reunion': "86",
'Cyprus': "87",
'Papua New Guinea': "88",
'Vanuatu': "89",
'Fiji': "90",
'Argentina': "91",
'Tunisia': "92",
'Mali': "93",
'Senegal': "95",
'Alaska': "96",
'North Atlantic': "97",
'South Atlantic': "98",
'Beaufort Sea': "99",
'Chukchi Sea': "100",
'Kyrgyzstan': "101",
'Indonesia': "102",
'Azores': "103",
'Quebec': "104",
'Norway': "105",
'Northern Ireland': "106",
'Wales': "107",
'Scotland': "108",
'Virginia': "109",
'North West Pacific': "110",
'Mediterranean': "111",
'Slovakia': "121",
'Poland': "124"
}
geologic_types_to_geomagia = {
"Baked Clay": "2",
"Tile": "3",
"Lava": "4",
"Pottery": "5",
"Sun Dried Object": "6",
"Porcelain": "7",
"Ceramic": "8",
"Kiln": "9",
"Oven or Hearth (GEOMAGIA Only)": "10",
"Mixed Archeological Objects": "11",
"Slag": "12",
"Baked Rock": "13",
"Fresco": "14",
"Mosaic": "15",
"Wall": "16",
"Bath": "17",
"Burnt Floor": "18",
"Funeral Pyre": "19",
"Hypocaust": "20",
"Burnt Pit": "21",
"Bell Mould": "22",
"Smoking Chamber": "23",
"Baked Mud": "24",
"Volcanic Ash": "25",
"Burnt Structure": "26",
"Burnt Castle Wall": "27",
"Charcoal Pile": "28",
"Burnt Earth": "29",
"Vitrified Object": "30",
"Unbaked Sediment": "31",
"Tuyere": "32",
"Sauna": "33",
"Pit Structure": "35",
"Room": "36",
"Pit House": "37",
"Salt Kiln": "38",
"Burnt Sediment": "39",
"Archeological Ashes": "40",
"Volcanic Other or Undefined (GEOMAGIA Only)": "41",
"Mural": "42",
"Vitrified Stone": "43",
"Soil": "44",
"Kamadogu": "45",
"Foundry": "46",
"Obsidian": "47",
"Chert": "48",
"Burnt daub": "49",
"Amphora": "50",
"Granite": "51",
"Volcanic Glass": "52",
"Furnace": "53",
"Roasting Pit": "54"
}
# Some of the simple method code mappings are done here
method_codes_to_geomagia = {
"GM-NO": "0",
"GM-CC-ARCH": "101",
"GM-C14-CAL": "102",
"GM-C14-UNCAL": "103",
"GM-LUM-TH": "104",
"GM-HIST": "105",
"GM-PMAG-ARCH": "106",
"GM-ARAR": "107",
"GM-CC-TEPH": "108",
"GM-CC-STRAT": "109",
"GM-CC-REL": "110",
"GM-DENDRO": "111",
"GM-RATH": "112",
"GM-KAR": "113",
"GM-UTH": "114",
"GM-FT": "115",
"GM-C14-AMS": "116",
"GM-LUM-OS": "117",
"GM-HE3": "118",
"GM-VARVE": "119",
"GM-CS137": "120",
"GM-USD-PB210": "121",
"GM-C14-BETA": "122",
"GM-O18": "123",
"GM-PA": "124"
}
standard = standard.lower()
standard_value = ""
if standard == "geomagia":
if vocab in places_to_geomagia.keys():
standard_value = places_to_geomagia[vocab]
if vocab in geologic_types_to_geomagia.keys():
standard_value = geologic_types_to_geomagia[vocab]
if vocab in method_codes_to_geomagia.keys():
standard_value = method_codes_to_geomagia[vocab]
if standard_value == "":
if vocab=='':
standard_value="Fail:vocab_to_convert_is_null"
elif vocab.isspace() or vocab!='':
standard_value="Fail:vocab_to_convert_is_all_whitespace"
else:
print("pmag.vocab_convert:Magic vocab '", vocab, "' not found for standard ", standard, sep='')
return(vocab)
return standard_value | python | def vocab_convert(vocab, standard, key=''):
"""
Converts MagIC database terms (method codes, geologic_types, etc) to other standards.
May not be comprehensive for each standard. Terms added to standards as people need them
and may not be up-to-date.
'key' can be used to distinguish vocab terms that exist in two different lists.
Returns:
value of the MagIC vocab in the standard requested
Example:
vocab_convert('Egypt','GEOMAGIA') will return '1'
"""
places_to_geomagia = {
'Egypt': "1",
'Japan': "2",
'France': "3",
'Ukraine': "5",
'India': "6",
'China': "7",
'Finland': "8",
'Greece': "9",
'Italy': "11",
'Switzerland': "12",
'Bulgaria': "13",
'Syria': "14",
'Hungary': "15",
'East Pacific Ridge': "17",
'Hawaii': "18",
'Morocco': "19",
'Australia': "20",
'Georgia': "21",
'Azerbaijan': "22",
'Spain': "24",
'England': "25",
'Czech Republic': "26",
'Mexico': "27",
'Iraq': "28",
'Israel': "29",
'Iran': "30",
'Uzbekistan': "31",
'Turkmenistan': "32",
'Mongolia': "33",
'Iceland': "34",
'New Zealand': "35",
'Amsterdam Island': "36",
'Guadeloupe': "37",
'Mid Atlantic Ridge': "38",
'Austria': "39",
'Belgium': "40",
'Romania': "41",
'Guatemala': "42",
'El Salvador': "43",
'Canary Islands': "45",
'Moldova': "46",
'Latvia': "47",
'Lithuania': "48",
'Russia': "49",
'Germany': "51",
'Martinique': "52",
'Netherlands': "53",
'Turkey': "54",
'Denmark': "55",
'Cameroon': "56",
'Honduras': "57",
'Jordan': "58",
'Brazil': "59",
'Estonia': "61",
'Sweden': "62",
'Peru': "63",
'Bolivia': "64",
'Ecuador': "65",
'Ontario': "66",
'New Mexico': "67",
'Arizona': "68",
'California': "69",
'Colorado': "70",
'Utah': "71",
'Washington': "72",
'Oregon': "73",
'British Columbia': "74",
'Idaho': "75",
'Arkansas': "76",
'Tennessee': "78",
'Serbia': "79",
'Kosovo': "80",
'Portugal': "81",
'Thailand': "82",
'South Korea': "83",
'Kazakhstan': "84",
'Nebraska': "85",
'La Reunion': "86",
'Cyprus': "87",
'Papua New Guinea': "88",
'Vanuatu': "89",
'Fiji': "90",
'Argentina': "91",
'Tunisia': "92",
'Mali': "93",
'Senegal': "95",
'Alaska': "96",
'North Atlantic': "97",
'South Atlantic': "98",
'Beaufort Sea': "99",
'Chukchi Sea': "100",
'Kyrgyzstan': "101",
'Indonesia': "102",
'Azores': "103",
'Quebec': "104",
'Norway': "105",
'Northern Ireland': "106",
'Wales': "107",
'Scotland': "108",
'Virginia': "109",
'North West Pacific': "110",
'Mediterranean': "111",
'Slovakia': "121",
'Poland': "124"
}
geologic_types_to_geomagia = {
"Baked Clay": "2",
"Tile": "3",
"Lava": "4",
"Pottery": "5",
"Sun Dried Object": "6",
"Porcelain": "7",
"Ceramic": "8",
"Kiln": "9",
"Oven or Hearth (GEOMAGIA Only)": "10",
"Mixed Archeological Objects": "11",
"Slag": "12",
"Baked Rock": "13",
"Fresco": "14",
"Mosaic": "15",
"Wall": "16",
"Bath": "17",
"Burnt Floor": "18",
"Funeral Pyre": "19",
"Hypocaust": "20",
"Burnt Pit": "21",
"Bell Mould": "22",
"Smoking Chamber": "23",
"Baked Mud": "24",
"Volcanic Ash": "25",
"Burnt Structure": "26",
"Burnt Castle Wall": "27",
"Charcoal Pile": "28",
"Burnt Earth": "29",
"Vitrified Object": "30",
"Unbaked Sediment": "31",
"Tuyere": "32",
"Sauna": "33",
"Pit Structure": "35",
"Room": "36",
"Pit House": "37",
"Salt Kiln": "38",
"Burnt Sediment": "39",
"Archeological Ashes": "40",
"Volcanic Other or Undefined (GEOMAGIA Only)": "41",
"Mural": "42",
"Vitrified Stone": "43",
"Soil": "44",
"Kamadogu": "45",
"Foundry": "46",
"Obsidian": "47",
"Chert": "48",
"Burnt daub": "49",
"Amphora": "50",
"Granite": "51",
"Volcanic Glass": "52",
"Furnace": "53",
"Roasting Pit": "54"
}
# Some of the simple method code mappings are done here
method_codes_to_geomagia = {
"GM-NO": "0",
"GM-CC-ARCH": "101",
"GM-C14-CAL": "102",
"GM-C14-UNCAL": "103",
"GM-LUM-TH": "104",
"GM-HIST": "105",
"GM-PMAG-ARCH": "106",
"GM-ARAR": "107",
"GM-CC-TEPH": "108",
"GM-CC-STRAT": "109",
"GM-CC-REL": "110",
"GM-DENDRO": "111",
"GM-RATH": "112",
"GM-KAR": "113",
"GM-UTH": "114",
"GM-FT": "115",
"GM-C14-AMS": "116",
"GM-LUM-OS": "117",
"GM-HE3": "118",
"GM-VARVE": "119",
"GM-CS137": "120",
"GM-USD-PB210": "121",
"GM-C14-BETA": "122",
"GM-O18": "123",
"GM-PA": "124"
}
standard = standard.lower()
standard_value = ""
if standard == "geomagia":
if vocab in places_to_geomagia.keys():
standard_value = places_to_geomagia[vocab]
if vocab in geologic_types_to_geomagia.keys():
standard_value = geologic_types_to_geomagia[vocab]
if vocab in method_codes_to_geomagia.keys():
standard_value = method_codes_to_geomagia[vocab]
if standard_value == "":
if vocab=='':
standard_value="Fail:vocab_to_convert_is_null"
elif vocab.isspace() or vocab!='':
standard_value="Fail:vocab_to_convert_is_all_whitespace"
else:
print("pmag.vocab_convert:Magic vocab '", vocab, "' not found for standard ", standard, sep='')
return(vocab)
return standard_value | Converts MagIC database terms (method codes, geologic_types, etc) to other standards.
May not be comprehensive for each standard. Terms added to standards as people need them
and may not be up-to-date.
'key' can be used to distinguish vocab terms that exist in two different lists.
Returns:
value of the MagIC vocab in the standard requested
Example:
vocab_convert('Egypt','GEOMAGIA') will return '1' | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L11081-L11306 |
PmagPy/PmagPy | pmagpy/pmag.py | fix_directories | def fix_directories(input_dir_path, output_dir_path):
"""
Take arguments input/output directories and fixes them.
If no input_directory, default to output_dir_path for both.
Then return realpath for both values.
Parameters
----------
input_dir_path : str
output_dir_path : str
Returns
---------
input_dir_path, output_dir_path
"""
if not input_dir_path:
input_dir_path = output_dir_path
input_dir_path = os.path.realpath(input_dir_path)
output_dir_path = os.path.realpath(output_dir_path)
return input_dir_path, output_dir_path | python | def fix_directories(input_dir_path, output_dir_path):
"""
Take arguments input/output directories and fixes them.
If no input_directory, default to output_dir_path for both.
Then return realpath for both values.
Parameters
----------
input_dir_path : str
output_dir_path : str
Returns
---------
input_dir_path, output_dir_path
"""
if not input_dir_path:
input_dir_path = output_dir_path
input_dir_path = os.path.realpath(input_dir_path)
output_dir_path = os.path.realpath(output_dir_path)
return input_dir_path, output_dir_path | Take arguments input/output directories and fixes them.
If no input_directory, default to output_dir_path for both.
Then return realpath for both values.
Parameters
----------
input_dir_path : str
output_dir_path : str
Returns
---------
input_dir_path, output_dir_path | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L11309-L11328 |
PmagPy/PmagPy | programs/conversion_scripts/magic_geomagia.py | main | def main():
"""
NAME
magic_geomagia.py
DESCRIPTION
Takes a MagIC file and outputs data for easier input into Max Brown's GEOMAGIA database
SYNTAX
magic_geomagia.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: the MagIC data file name that will be converted to GEOMAGIA files
OUTPUT:
print to stdout the GEOMAGIA insert command for the reference and all of the site level data
EXAMPLE:
magic_geomagia.py -f magic_contribution_16578.txt
Nick Jarboe
"""
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file_name=sys.argv[ind+1]
else:
print("MagIC file name needed. Please add the file name after the -f option.")
# Create all the table files from the magic.txt file so they can be imported by the cb
command = "download_magic.py -f " + file_name
os.system(command)
md = cb.Contribution() #md stands for magic file data
md.propagate_location_to_measurements()
md.propagate_location_to_specimens()
md.propagate_location_to_samples()
if not md.tables:
print('-E- No MagIC tables could be found in this directory')
error_log("No MagIC tables found")
return
doi=md.tables['contribution'].df.iloc[0]['reference']
id=md.tables['contribution'].df.iloc[0]['id']
timestamp=md.tables['contribution'].df.iloc[0]['timestamp']
contributor=md.tables['contribution'].df.iloc[0]['contributor']
print("c=",contributor)
contributor=contributor.replace('@','')
print("c=",contributor)
cr = Crossref()
ref=cr.works(doi)
# authors = "Doe J.X., Alexander,T.G."
status= ref["status"]
message= ref["message"]
# print("message=",message)
authors= message["author"]
# print("authors=",authors)
authorList=""
for author in authors:
# print ("Name:",author['given'], author['family'])
author_given=""
names=author['given'].split(' ')
for name in names:
author_given +=name[0]+"."
authorList += author['family'] + " " + author_given + ", "
# print(authorList)
authorList=authorList[:-2]
# print(authorList)
title = message['title'][0]
year = message['created']['date-parts'][0][0]
# print(year)
journal = message['short-container-title'][0]
volume = message['volume']
# print(volume)
pages='0'
if "page" in message.keys():
pages = message['page']
# print(pages)
url = "https://earthref.org/MagIC/doi/" + doi
print("REFS")
print("Insert into REFS values(NULL,'", authorList, "','", title, "', ", year, ", '", journal, "', ", volume, ", '", pages, "', '", doi, "', '", url, "');", sep='')
print()
print("ARCHEODIJ")
sites=md.tables['sites'].df
locations=md.tables['locations'].df
print("UID,NUM_SAMPLES,NUM_ACC_SPEC,NUM_MEAS_SPEC,BA,SIGMA_BA,AGE, AGE_MIN,AGE_MAX,NUM_SIGMAS,AGE_ERROR_TYPE_ID,SITE_LAT, SITE_LON,VADM,SIGMA_VADM,SITE_ID,PI_METHODS_ID,AC_ID,MD_CK_ ID,AN_CORR_ID,CR_CORR_ID,DM_METHOD_ID,AF_STEP,T_STEP,DM_ ANALYSIS_ID,SPECIMEN_TYPE_ID,MATERIAL_ID,REFERENCE_ID,NUM_ C14_SAMPLES,C14_ID,CALIB_C14_AGE,CALIB_C14_AGE_SIGMA_MIN, CALIB_C14_AGE_SIGMA_MAX,NUM_C14_SIGMAS,CALC_CALIB_C14_AGE, CALC_CALIB_C14_AGE_SIGMA_MIN,CALC_CALIB_C14_AGE_SIGMA_MAX, C14_CALIB_SOFTWARE_ID,CALC_C14_CALIB_SOFTWARE_ID,C14_CALIB_DATASET_ID,CALC_C14_ CALIB_DATASET_ID,DENDRO_ID,TOT_NUM_DENDRO,NUM_DENDRO_ USED,DATING_METHOD_ID,NUM_DIR_SAMPLES,NUM_DIR_SPECIMENS,NUM_ DIR_SPEC_COLLECTED,DECL,INCL,ALPHA_95,K,VDM,SIGMA_VDM,SAMPLE_ID,c_csv,SITE_NAME, SITE_HORIZON,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014, SUPERSEEDED,UPLOAD_YEAR,UPLOAD_MONTH,UPLOADER,EDITOR,EDIT_DATE,NOTES")
for index, row in sites.iterrows():
int_n_samples,int_n_specimens,int_n_total_specimens,int_abs,int_abs_sigma=-1,-1,-1,-1,-1
if 'int_n_samples' in sites.columns.values:
int_n_samples=row['int_n_samples']
if 'int_n_specimens' in sites.columns.values:
int_n_specimens=row['int_n_specimens']
if 'int_n_total_specimens' in sites.columns.values:
int_n_total_specimens=row['int_n_total_specimens']
if int_n_specimens == -1 and int_n_samples >0:
int_n_spcimens = int_n_samples
if 'int_abs' in sites.columns.values:
int_abs=row['int_abs']
if int_abs is not None:
int_abs=round(int_abs*1e6,1)
if 'int_abs_sigma' in sites.columns.values:
int_abs_sigma=row['int_abs_sigma']
if int_abs_sigma is not None:
int_abs_sigma=round(row['int_abs_sigma']*1e6,1)
age,age_high,age_low=-1e9,-1e9,-1e9
age_error_type='0' #
if 'age_unit' not in sites.columns.values:
print("Malformed Magic sites data table. Required column row 'age_unit' is missing")
sys.exit()
age_unit=row['age_unit']
if 'age' in sites.columns.values:
age=row['age']
age=pmag.age_to_BP(age,age_unit)
if 'age_high' in sites.columns.values:
age_high=row['age_high']
age_high=pmag.age_to_BP(age_high,age_unit)
if 'age_low' in sites.columns.values:
age_low=row['age_low']
age_low=pmag.age_to_BP(age_low,age_unit)
if 'age_sigma' in sites.columns.values:
age_sigma=row['age_sigma']
age_sigma=pmag.age_to_BP(age_sigma,age_unit)
age_high=age+age_sigma
age_low=age-age_sigma
age_error_type='5' #Magic is one sigma for all sigma state/province column to data modelages
if age_low > age_high: # MagIC lets age_high and age_low be in any order. Fix that for GEOMAGIA
temp=age_high
age_high=age_low
age_low=temp
if age == -1e9: # If only age_low and age_high are in the MagIC file then calculate the age.
age=(age_high+age_low)/2
age_error_type='8' #If MagIC age only high and low then error type is "range"
age_min=age-age_low # GEOMAGIA has the max and min as differences from the age, not absolute.
age_max=age_high-age
age_BP=age
age=1950-age #GEOMAGIA want +-AD/BC so convert BP to AD/-BC
lat=row['lat']
lon=row['lon']
vadm,vadm_sigma=-1,-1
if 'vadm' in sites.columns.values:
vadm=row['vadm']
vadm=vadm/1e22
if 'vadm_sigma' in sites.columns.values:
vadm=row['vadm']
vadm=vadm/1e22
site_name=row['site']
# For paleointensity codes just give the method code list and Max will decide on the right
# GEOMAGIA code.
method_codes="No MagIC method codes available"
if 'method_codes' in sites.columns.values:
method_codes=row['method_codes']
# Just give Max all the method codes for him to decide for now
paleointensity_procedure=method_codes
alteration_monitor="0"
alteration_monitor=method_codes_to_geomagia(method_codes,'ALTERATION_MONIT_CORR')
multidomain_check="0"
multidomain_check=method_codes_to_geomagia(method_codes,'MD_CHECKS')
anisotropy_correction="0"
anisotropy_correction=method_codes_to_geomagia(method_codes,'ANISOTROPY_CORRECTION')
cooling_rate="0"
cooling_rate=method_codes_to_geomagia(method_codes,'COOLING_RATE')
demag_method="0"
demag_method=method_codes_to_geomagia(method_codes,'DM_METHODS')
demag_analysis="0"
demag_analysis=method_codes_to_geomagia(method_codes,'DM_ANALYSIS')
specimen_shape="0"
specimen_shape=method_codes_to_geomagia(method_codes,'SPECIMEN_TYPE_ID')
materials=""
geologic_types=""
if 'geologic_types' in sites.columns.values:
geologic_types=row['geologic_types']
if ":" in geologic_types:
gtypes=geologic_types.split(":")
for gtype in gtypes:
materials=materials+pmag.vocab_convert(gtype,"geomagia")+":"
materials=materials[:-1]
else:
materials=pmag.vocab_convert(geologic_types,"geomagia")
geochron_codes=""
if ":" in method_codes:
gcodes=method_codes.split(":")
for gcode in gcodes:
if "GM-" == gcode[:3]:
geochron_codes=geochron_codes+pmag.vocab_convert(gcode,"geomagia")+":"
geochron_codes=geochron_codes[:-1]
else:
geochron_codes=pmag.vocab_convert(geochron_codes,"geomagia")
if geochron_codes == "":
geochron_codes="0"
dir_n_samples="-1"
if 'dir_n_samples' in sites.columns.values:
dir_n_samples=row['dir_n_samples']
dir_n_samples="-1"
if 'dir_n_samples' in sites.columns.values:
dir_n_samples=row['dir_n_samples']
# Not in MagIC
dir_n_specimens="-1"
# using total number of samples for total specimen number
dir_n_total_samples="-1"
if 'dir_n_total_samples' in sites.columns.values:
dir_n_total_samples=row['dir_n_total_samples']
dir_dec="999"
if 'dir_dec' in sites.columns.values:
dir_dec=row['dir_dec']
dir_inc="999"
if 'dir_inc' in sites.columns.values:
dir_inc=row['dir_inc']
dir_alpha95="-1"
if 'dir_alpha95' in sites.columns.values:
dir_alpha95=row['dir_alpha95']
dir_k="-1"
if 'dir_k' in sites.columns.values:
dir_k=row['dir_k']
vdm=-1
if 'vdm' in sites.columns.values:
vdm=float(row['vdm'])
vdm=vdm/1e22
vdm_sigma=-1
if 'vdm_sigma' in sites.columns.values:
vdm_sigma=float(row['vdm_sigma'])
vdm_sigma=vdm_sigma/1e22
# Could try and get sample names from samples table (using Contribution object) but just taking the list
# if it exists for now.
sample_list="-1"
if 'samples' in sites.columns.values:
sample_list=row['samples']
# c_csv is in GEOMAGIA insert. What it is I don't know. Max said set to 0
c_csv='0'
# This place_id is SITE_ID in GEOMAGIA
place_id="0"
location=row['location']
if 'state_province' in locations.columns.values:
place=locations.loc[location,'state_province']
if place != "":
place_id=pmag.vocab_convert(place,'GEOMAGIA')
if place_id == "0":
if 'country' in locations.columns.values:
place=locations.loc[location,'country']
if place != "":
place_id=pmag.vocab_convert(place,'GEOMAGIA')
if place_id == "0":
if 'continent_ocean' in locations.columns.values:
place_id=locations.loc[location,'continent_ocean']
if place != "":
place_id=pmag.vocab_convert(place,'GEOMAGIA')
site=row['site']
dt=dateutil.parser.parse(timestamp)
description="-1"
if 'description' in sites.columns.values:
description=row['description']
if age_BP <= 50000:
print("0",int_n_samples,int_n_specimens,int_n_total_specimens,int_abs,int_abs_sigma,age,age_min,age_max,"1",age_error_type,lat,lon,vadm,vadm_sigma,place_id,paleointensity_procedure,alteration_monitor,multidomain_check,anisotropy_correction,cooling_rate,demag_method,"0","0",demag_analysis,specimen_shape,materials,doi,"-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1",geochron_codes,dir_n_samples,dir_n_samples,dir_n_total_samples,dir_dec,dir_inc,dir_alpha95,dir_k,vdm,vdm_sigma,sample_list,c_csv,location,site,"-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1","-1",dt.year,dt.month,contributor,"-1,-1",description,sep=',') | python | def main():
"""
NAME
magic_geomagia.py
DESCRIPTION
Takes a MagIC file and outputs data for easier input into Max Brown's GEOMAGIA database
SYNTAX
magic_geomagia.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: the MagIC data file name that will be converted to GEOMAGIA files
OUTPUT:
print to stdout the GEOMAGIA insert command for the reference and all of the site level data
EXAMPLE:
magic_geomagia.py -f magic_contribution_16578.txt
Nick Jarboe
"""
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file_name=sys.argv[ind+1]
else:
print("MagIC file name needed. Please add the file name after the -f option.")
# Create all the table files from the magic.txt file so they can be imported by the cb
command = "download_magic.py -f " + file_name
os.system(command)
md = cb.Contribution() #md stands for magic file data
md.propagate_location_to_measurements()
md.propagate_location_to_specimens()
md.propagate_location_to_samples()
if not md.tables:
print('-E- No MagIC tables could be found in this directory')
error_log("No MagIC tables found")
return
doi=md.tables['contribution'].df.iloc[0]['reference']
id=md.tables['contribution'].df.iloc[0]['id']
timestamp=md.tables['contribution'].df.iloc[0]['timestamp']
contributor=md.tables['contribution'].df.iloc[0]['contributor']
print("c=",contributor)
contributor=contributor.replace('@','')
print("c=",contributor)
cr = Crossref()
ref=cr.works(doi)
# authors = "Doe J.X., Alexander,T.G."
status= ref["status"]
message= ref["message"]
# print("message=",message)
authors= message["author"]
# print("authors=",authors)
authorList=""
for author in authors:
# print ("Name:",author['given'], author['family'])
author_given=""
names=author['given'].split(' ')
for name in names:
author_given +=name[0]+"."
authorList += author['family'] + " " + author_given + ", "
# print(authorList)
authorList=authorList[:-2]
# print(authorList)
title = message['title'][0]
year = message['created']['date-parts'][0][0]
# print(year)
journal = message['short-container-title'][0]
volume = message['volume']
# print(volume)
pages='0'
if "page" in message.keys():
pages = message['page']
# print(pages)
url = "https://earthref.org/MagIC/doi/" + doi
print("REFS")
print("Insert into REFS values(NULL,'", authorList, "','", title, "', ", year, ", '", journal, "', ", volume, ", '", pages, "', '", doi, "', '", url, "');", sep='')
print()
print("ARCHEODIJ")
sites=md.tables['sites'].df
locations=md.tables['locations'].df
print("UID,NUM_SAMPLES,NUM_ACC_SPEC,NUM_MEAS_SPEC,BA,SIGMA_BA,AGE, AGE_MIN,AGE_MAX,NUM_SIGMAS,AGE_ERROR_TYPE_ID,SITE_LAT, SITE_LON,VADM,SIGMA_VADM,SITE_ID,PI_METHODS_ID,AC_ID,MD_CK_ ID,AN_CORR_ID,CR_CORR_ID,DM_METHOD_ID,AF_STEP,T_STEP,DM_ ANALYSIS_ID,SPECIMEN_TYPE_ID,MATERIAL_ID,REFERENCE_ID,NUM_ C14_SAMPLES,C14_ID,CALIB_C14_AGE,CALIB_C14_AGE_SIGMA_MIN, CALIB_C14_AGE_SIGMA_MAX,NUM_C14_SIGMAS,CALC_CALIB_C14_AGE, CALC_CALIB_C14_AGE_SIGMA_MIN,CALC_CALIB_C14_AGE_SIGMA_MAX, C14_CALIB_SOFTWARE_ID,CALC_C14_CALIB_SOFTWARE_ID,C14_CALIB_DATASET_ID,CALC_C14_ CALIB_DATASET_ID,DENDRO_ID,TOT_NUM_DENDRO,NUM_DENDRO_ USED,DATING_METHOD_ID,NUM_DIR_SAMPLES,NUM_DIR_SPECIMENS,NUM_ DIR_SPEC_COLLECTED,DECL,INCL,ALPHA_95,K,VDM,SIGMA_VDM,SAMPLE_ID,c_csv,SITE_NAME, SITE_HORIZON,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014, SUPERSEEDED,UPLOAD_YEAR,UPLOAD_MONTH,UPLOADER,EDITOR,EDIT_DATE,NOTES")
for index, row in sites.iterrows():
int_n_samples,int_n_specimens,int_n_total_specimens,int_abs,int_abs_sigma=-1,-1,-1,-1,-1
if 'int_n_samples' in sites.columns.values:
int_n_samples=row['int_n_samples']
if 'int_n_specimens' in sites.columns.values:
int_n_specimens=row['int_n_specimens']
if 'int_n_total_specimens' in sites.columns.values:
int_n_total_specimens=row['int_n_total_specimens']
if int_n_specimens == -1 and int_n_samples >0:
int_n_spcimens = int_n_samples
if 'int_abs' in sites.columns.values:
int_abs=row['int_abs']
if int_abs is not None:
int_abs=round(int_abs*1e6,1)
if 'int_abs_sigma' in sites.columns.values:
int_abs_sigma=row['int_abs_sigma']
if int_abs_sigma is not None:
int_abs_sigma=round(row['int_abs_sigma']*1e6,1)
age,age_high,age_low=-1e9,-1e9,-1e9
age_error_type='0' #
if 'age_unit' not in sites.columns.values:
print("Malformed Magic sites data table. Required column row 'age_unit' is missing")
sys.exit()
age_unit=row['age_unit']
if 'age' in sites.columns.values:
age=row['age']
age=pmag.age_to_BP(age,age_unit)
if 'age_high' in sites.columns.values:
age_high=row['age_high']
age_high=pmag.age_to_BP(age_high,age_unit)
if 'age_low' in sites.columns.values:
age_low=row['age_low']
age_low=pmag.age_to_BP(age_low,age_unit)
if 'age_sigma' in sites.columns.values:
age_sigma=row['age_sigma']
age_sigma=pmag.age_to_BP(age_sigma,age_unit)
age_high=age+age_sigma
age_low=age-age_sigma
age_error_type='5' #Magic is one sigma for all sigma state/province column to data modelages
if age_low > age_high: # MagIC lets age_high and age_low be in any order. Fix that for GEOMAGIA
temp=age_high
age_high=age_low
age_low=temp
if age == -1e9: # If only age_low and age_high are in the MagIC file then calculate the age.
age=(age_high+age_low)/2
age_error_type='8' #If MagIC age only high and low then error type is "range"
age_min=age-age_low # GEOMAGIA has the max and min as differences from the age, not absolute.
age_max=age_high-age
age_BP=age
age=1950-age #GEOMAGIA want +-AD/BC so convert BP to AD/-BC
lat=row['lat']
lon=row['lon']
vadm,vadm_sigma=-1,-1
if 'vadm' in sites.columns.values:
vadm=row['vadm']
vadm=vadm/1e22
if 'vadm_sigma' in sites.columns.values:
vadm=row['vadm']
vadm=vadm/1e22
site_name=row['site']
# For paleointensity codes just give the method code list and Max will decide on the right
# GEOMAGIA code.
method_codes="No MagIC method codes available"
if 'method_codes' in sites.columns.values:
method_codes=row['method_codes']
# Just give Max all the method codes for him to decide for now
paleointensity_procedure=method_codes
alteration_monitor="0"
alteration_monitor=method_codes_to_geomagia(method_codes,'ALTERATION_MONIT_CORR')
multidomain_check="0"
multidomain_check=method_codes_to_geomagia(method_codes,'MD_CHECKS')
anisotropy_correction="0"
anisotropy_correction=method_codes_to_geomagia(method_codes,'ANISOTROPY_CORRECTION')
cooling_rate="0"
cooling_rate=method_codes_to_geomagia(method_codes,'COOLING_RATE')
demag_method="0"
demag_method=method_codes_to_geomagia(method_codes,'DM_METHODS')
demag_analysis="0"
demag_analysis=method_codes_to_geomagia(method_codes,'DM_ANALYSIS')
specimen_shape="0"
specimen_shape=method_codes_to_geomagia(method_codes,'SPECIMEN_TYPE_ID')
materials=""
geologic_types=""
if 'geologic_types' in sites.columns.values:
geologic_types=row['geologic_types']
if ":" in geologic_types:
gtypes=geologic_types.split(":")
for gtype in gtypes:
materials=materials+pmag.vocab_convert(gtype,"geomagia")+":"
materials=materials[:-1]
else:
materials=pmag.vocab_convert(geologic_types,"geomagia")
geochron_codes=""
if ":" in method_codes:
gcodes=method_codes.split(":")
for gcode in gcodes:
if "GM-" == gcode[:3]:
geochron_codes=geochron_codes+pmag.vocab_convert(gcode,"geomagia")+":"
geochron_codes=geochron_codes[:-1]
else:
geochron_codes=pmag.vocab_convert(geochron_codes,"geomagia")
if geochron_codes == "":
geochron_codes="0"
dir_n_samples="-1"
if 'dir_n_samples' in sites.columns.values:
dir_n_samples=row['dir_n_samples']
dir_n_samples="-1"
if 'dir_n_samples' in sites.columns.values:
dir_n_samples=row['dir_n_samples']
# Not in MagIC
dir_n_specimens="-1"
# using total number of samples for total specimen number
dir_n_total_samples="-1"
if 'dir_n_total_samples' in sites.columns.values:
dir_n_total_samples=row['dir_n_total_samples']
dir_dec="999"
if 'dir_dec' in sites.columns.values:
dir_dec=row['dir_dec']
dir_inc="999"
if 'dir_inc' in sites.columns.values:
dir_inc=row['dir_inc']
dir_alpha95="-1"
if 'dir_alpha95' in sites.columns.values:
dir_alpha95=row['dir_alpha95']
dir_k="-1"
if 'dir_k' in sites.columns.values:
dir_k=row['dir_k']
vdm=-1
if 'vdm' in sites.columns.values:
vdm=float(row['vdm'])
vdm=vdm/1e22
vdm_sigma=-1
if 'vdm_sigma' in sites.columns.values:
vdm_sigma=float(row['vdm_sigma'])
vdm_sigma=vdm_sigma/1e22
# Could try and get sample names from samples table (using Contribution object) but just taking the list
# if it exists for now.
sample_list="-1"
if 'samples' in sites.columns.values:
sample_list=row['samples']
# c_csv is in GEOMAGIA insert. What it is I don't know. Max said set to 0
c_csv='0'
# This place_id is SITE_ID in GEOMAGIA
place_id="0"
location=row['location']
if 'state_province' in locations.columns.values:
place=locations.loc[location,'state_province']
if place != "":
place_id=pmag.vocab_convert(place,'GEOMAGIA')
if place_id == "0":
if 'country' in locations.columns.values:
place=locations.loc[location,'country']
if place != "":
place_id=pmag.vocab_convert(place,'GEOMAGIA')
if place_id == "0":
if 'continent_ocean' in locations.columns.values:
place_id=locations.loc[location,'continent_ocean']
if place != "":
place_id=pmag.vocab_convert(place,'GEOMAGIA')
site=row['site']
dt=dateutil.parser.parse(timestamp)
description="-1"
if 'description' in sites.columns.values:
description=row['description']
if age_BP <= 50000:
print("0",int_n_samples,int_n_specimens,int_n_total_specimens,int_abs,int_abs_sigma,age,age_min,age_max,"1",age_error_type,lat,lon,vadm,vadm_sigma,place_id,paleointensity_procedure,alteration_monitor,multidomain_check,anisotropy_correction,cooling_rate,demag_method,"0","0",demag_analysis,specimen_shape,materials,doi,"-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1",geochron_codes,dir_n_samples,dir_n_samples,dir_n_total_samples,dir_dec,dir_inc,dir_alpha95,dir_k,vdm,vdm_sigma,sample_list,c_csv,location,site,"-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1","-1",dt.year,dt.month,contributor,"-1,-1",description,sep=',') | NAME
magic_geomagia.py
DESCRIPTION
Takes a MagIC file and outputs data for easier input into Max Brown's GEOMAGIA database
SYNTAX
magic_geomagia.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: the MagIC data file name that will be converted to GEOMAGIA files
OUTPUT:
print to stdout the GEOMAGIA insert command for the reference and all of the site level data
EXAMPLE:
magic_geomagia.py -f magic_contribution_16578.txt
Nick Jarboe | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/conversion_scripts/magic_geomagia.py#L12-L307 |
PmagPy/PmagPy | programs/conversion_scripts/magic_geomagia.py | method_codes_to_geomagia | def method_codes_to_geomagia(magic_method_codes,geomagia_table):
"""
Looks at the MagIC method code list and returns the correct GEOMAGIA code number depending
on the method code list and the GEOMAGIA table specified. Returns O, GEOMAGIA's "Not specified" value, if no match.
When mutiple codes are matched they are separated with -
"""
codes=magic_method_codes
geomagia=geomagia_table.lower()
geomagia_code='0'
if geomagia=='alteration_monit_corr':
if "DA-ALT-V" or "LP-PI-ALT-PTRM" or "LP-PI-ALT-PMRM" in codes:
geomagia_code='1'
elif "LP-PI-ALT-SUSC" in codes:
geomagia_code='2'
elif "DA-ALT-RS" or "LP-PI-ALT-AFARM" in codes:
geomagia_code='3'
elif "LP-PI-ALT-WALTON" in codes:
geomagia_code='4'
elif "LP-PI-ALT-TANGUY" in codes:
geomagia_code='5'
elif "DA-ALT" in codes:
geomagia_code='6' #at end to fill generic if others don't exist
elif "LP-PI-ALT-FABIAN" in codes:
geomagia_code='7'
if geomagia=='md_checks':
if ("LT-PTRM-MD" in codes) or ("LT-PMRM-MD" in codes):
geomagia_code='1:'
if ("LP-PI-BT-LT" in codes) or ("LT-LT-Z" in codes):
if "0" in geomagia_code:
geomagia_code="23:"
else:
geomagia_code+='2:'
geomagia_code=geomagia_code[:-1]
if geomagia=='anisotropy_correction':
if "DA-AC-AMS" in codes:
geomagia_code='1'
elif "DA-AC-AARM" in codes:
geomagia_code='2'
elif "DA-AC-ATRM" in codes:
geomagia_code='3'
elif "LT-NRM-PAR" in codes:
geomagia_code='4'
elif "DA-AC-AIRM" in codes:
geomagia_code='6'
elif "DA-AC" in codes: #at end to fill generic if others don't exist
geomagia_code='5'
if geomagia=='cooling_rate':
if "DA-CR" in codes: #all current CR codes but CR-EG are a 1 but may change in the future
geomagia_code='1'
if "DA-CR-EG" in codes:
geomagia_code='2'
if geomagia=='dm_methods':
if "LP-DIR-AF" in codes:
geomagia_code='1'
elif "LT-AF-D" in codes:
geomagia_code='1'
elif "LT-AF-G" in codes:
geomagia_code='1'
elif "LT-AF-Z" in codes:
geomagia_code='1'
elif "LP-DIR-T" in codes:
geomagia_code='2'
elif "LT-AF-Z" in codes:
geomagia_code='2'
elif "LP-DIR-M" in codes:
geomagia_code='5'
elif "LT-M-Z" in codes:
geomagia_code='5'
if geomagia=='dm_analysis':
if "DE-BFL" in codes:
geomagia_code='1'
elif "DE-BLANKET" in codes:
geomagia_code='2'
elif "DE-FM" in codes:
geomagia_code='3'
elif "DE-NRM" in codes:
geomagia_code='6'
if geomagia=='specimen_type_id':
if "SC-TYPE-CYC" in codes:
geomagia_code='1'
elif "SC-TYPE-CUBE" in codes:
geomagia_code='2'
elif "SC-TYPE-MINI" in codes:
geomagia_code='3'
elif "SC-TYPE-SC" in codes:
geomagia_code='4'
elif "SC-TYPE-UC" in codes:
geomagia_code='5'
elif "SC-TYPE-LARGE" in codes:
geomagia_code='6'
return geomagia_code | python | def method_codes_to_geomagia(magic_method_codes,geomagia_table):
"""
Looks at the MagIC method code list and returns the correct GEOMAGIA code number depending
on the method code list and the GEOMAGIA table specified. Returns O, GEOMAGIA's "Not specified" value, if no match.
When mutiple codes are matched they are separated with -
"""
codes=magic_method_codes
geomagia=geomagia_table.lower()
geomagia_code='0'
if geomagia=='alteration_monit_corr':
if "DA-ALT-V" or "LP-PI-ALT-PTRM" or "LP-PI-ALT-PMRM" in codes:
geomagia_code='1'
elif "LP-PI-ALT-SUSC" in codes:
geomagia_code='2'
elif "DA-ALT-RS" or "LP-PI-ALT-AFARM" in codes:
geomagia_code='3'
elif "LP-PI-ALT-WALTON" in codes:
geomagia_code='4'
elif "LP-PI-ALT-TANGUY" in codes:
geomagia_code='5'
elif "DA-ALT" in codes:
geomagia_code='6' #at end to fill generic if others don't exist
elif "LP-PI-ALT-FABIAN" in codes:
geomagia_code='7'
if geomagia=='md_checks':
if ("LT-PTRM-MD" in codes) or ("LT-PMRM-MD" in codes):
geomagia_code='1:'
if ("LP-PI-BT-LT" in codes) or ("LT-LT-Z" in codes):
if "0" in geomagia_code:
geomagia_code="23:"
else:
geomagia_code+='2:'
geomagia_code=geomagia_code[:-1]
if geomagia=='anisotropy_correction':
if "DA-AC-AMS" in codes:
geomagia_code='1'
elif "DA-AC-AARM" in codes:
geomagia_code='2'
elif "DA-AC-ATRM" in codes:
geomagia_code='3'
elif "LT-NRM-PAR" in codes:
geomagia_code='4'
elif "DA-AC-AIRM" in codes:
geomagia_code='6'
elif "DA-AC" in codes: #at end to fill generic if others don't exist
geomagia_code='5'
if geomagia=='cooling_rate':
if "DA-CR" in codes: #all current CR codes but CR-EG are a 1 but may change in the future
geomagia_code='1'
if "DA-CR-EG" in codes:
geomagia_code='2'
if geomagia=='dm_methods':
if "LP-DIR-AF" in codes:
geomagia_code='1'
elif "LT-AF-D" in codes:
geomagia_code='1'
elif "LT-AF-G" in codes:
geomagia_code='1'
elif "LT-AF-Z" in codes:
geomagia_code='1'
elif "LP-DIR-T" in codes:
geomagia_code='2'
elif "LT-AF-Z" in codes:
geomagia_code='2'
elif "LP-DIR-M" in codes:
geomagia_code='5'
elif "LT-M-Z" in codes:
geomagia_code='5'
if geomagia=='dm_analysis':
if "DE-BFL" in codes:
geomagia_code='1'
elif "DE-BLANKET" in codes:
geomagia_code='2'
elif "DE-FM" in codes:
geomagia_code='3'
elif "DE-NRM" in codes:
geomagia_code='6'
if geomagia=='specimen_type_id':
if "SC-TYPE-CYC" in codes:
geomagia_code='1'
elif "SC-TYPE-CUBE" in codes:
geomagia_code='2'
elif "SC-TYPE-MINI" in codes:
geomagia_code='3'
elif "SC-TYPE-SC" in codes:
geomagia_code='4'
elif "SC-TYPE-UC" in codes:
geomagia_code='5'
elif "SC-TYPE-LARGE" in codes:
geomagia_code='6'
return geomagia_code | Looks at the MagIC method code list and returns the correct GEOMAGIA code number depending
on the method code list and the GEOMAGIA table specified. Returns O, GEOMAGIA's "Not specified" value, if no match.
When mutiple codes are matched they are separated with - | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/conversion_scripts/magic_geomagia.py#L310-L410 |
PmagPy/PmagPy | programs/microwave_magic.py | main | def main():
"""
NAME
microwave_magic.py
DESCRIPTION
plots microwave paleointensity data, allowing interactive setting of bounds.
Saves and reads interpretations
from a pmag_specimen formatted table, default: microwave_specimens.txt
SYNTAX
microwave_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f MEAS, set magic_measurements input file
-fsp PRIOR, set pmag_specimen prior interpretations file
-fcr CRIT, set criteria file for grading.
-fmt [svg,png,jpg], format for images - default is svg
-sav, saves plots with out review (default format)
-spc SPEC, plots single specimen SPEC, saves plot with specified format
with optional -b bounds adn quits
-b BEG END: sets bounds for calculation
BEG: starting step for slope calculation
END: ending step for slope calculation
DEFAULTS
MEAS: magic_measurements.txt
CRIT: NONE
PRIOR: microwave_specimens.txt
OUTPUT
figures:
ALL: numbers refer to temperature steps in command line window
1) Arai plot: closed circles are zero-field first/infield
open circles are infield first/zero-field
triangles are pTRM checks
squares are pTRM tail checks
VDS is vector difference sum
diamonds are bounds for interpretation
2) Zijderveld plot: closed (open) symbols are X-Y (X-Z) planes
X rotated to NRM direction
3) (De/Re)Magnetization diagram:
circles are NRM remaining
squares are pTRM gained
command line window:
list is: temperature step numbers, power (J), Dec, Inc, Int (units of magic_measuements)
list of possible commands: type letter followed by return to select option
saving of plots creates .svg format files with specimen_name, plot type as name
"""
#
# initializations
#
meas_file,critout,inspec="magic_measurements.txt","","microwave_specimens.txt"
inlt=0
version_num=pmag.get_version()
Tinit,DCZ,field,first_save=0,0,-1,1
user,comment="",''
ans,specimen,recnum,start,end=0,0,0,0,0
plots,pmag_out,samp_file,style=0,"","","svg"
fmt='.'+style
#
# default acceptance criteria
#
accept_keys=['specimen_int_ptrm_n','specimen_md','specimen_fvds','specimen_b_beta','specimen_dang','specimen_drats','specimen_Z']
accept={}
accept['specimen_int_ptrm_n']=2
accept['specimen_md']=10
accept['specimen_fvds']=0.35
accept['specimen_b_beta']=.1
accept['specimen_int_mad']=7
accept['specimen_dang']=10
accept['specimen_drats']=10
accept['specimen_Z']=10
#
# parse command line options
#
spc,BEG,END="","",""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
meas_file=sys.argv[ind+1]
if '-fsp' in sys.argv:
ind=sys.argv.index('-fsp')
inspec=sys.argv[ind+1]
if '-fcr' in sys.argv:
ind=sys.argv.index('-fcr')
critout=sys.argv[ind+1]
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt='.'+sys.argv[ind+1]
if '-spc' in sys.argv:
ind=sys.argv.index('-spc')
spc=sys.argv[ind+1]
if '-b' in sys.argv:
ind=sys.argv.index('-b')
BEG=int(sys.argv[ind+1])
END=int(sys.argv[ind+2])
if critout!="":
crit_data,file_type=pmag.magic_read(critout)
if pmagplotlib.verbose:
print("Acceptance criteria read in from ", critout)
accept={}
accept['specimen_int_ptrm_n']=2.0
for critrec in crit_data:
if critrec["pmag_criteria_code"]=="IE-SPEC":
for key in accept_keys:
if key not in list(critrec.keys()):
accept[key]=-1
else:
accept[key]=float(critrec[key])
try:
open(inspec,'r')
PriorRecs,file_type=pmag.magic_read(inspec)
if file_type != 'pmag_specimens':
print(file_type)
print(file_type,inspec," is not a valid pmag_specimens file ")
sys.exit()
for rec in PriorRecs:
if 'magic_software_packages' not in list(rec.keys()):rec['magic_software_packages']=""
except IOError:
PriorRecs=[]
if pmagplotlib.verbose:print("starting new specimen interpretation file: ",inspec)
meas_data,file_type=pmag.magic_read(meas_file)
if file_type != 'magic_measurements':
print(file_type)
print(file_type,"This is not a valid magic_measurements file ")
sys.exit()
backup=0
# define figure numbers for arai, zijderveld and
# de-,re-magization diagrams
AZD={}
AZD['deremag'], AZD['zijd'],AZD['arai'],AZD['eqarea']=1,2,3,4
pmagplotlib.plot_init(AZD['arai'],4,4)
pmagplotlib.plot_init(AZD['zijd'],4,4)
pmagplotlib.plot_init(AZD['deremag'],4,4)
pmagplotlib.plot_init(AZD['eqarea'],4,4)
#
#
#
# get list of unique specimen names
#
CurrRec=[]
sids=pmag.get_specs(meas_data)
# get plots for specimen s - default is just to step through arai diagrams
#
if spc!="": specimen =sids.index(spc)
while specimen < len(sids):
methcodes=[]
if pmagplotlib.verbose and spc!="":
print(sids[specimen],specimen+1, 'of ', len(sids))
MeasRecs=[]
s=sids[specimen]
datablock,trmblock=[],[]
PmagSpecRec={}
PmagSpecRec["er_analyst_mail_names"]=user
PmagSpecRec["specimen_correction"]='u'
#
# find the data from the meas_data file for this specimen
#
for rec in meas_data:
if rec["er_specimen_name"]==s:
MeasRecs.append(rec)
methods=rec["magic_method_codes"].split(":")
meths=[]
for meth in methods:
meths.append(meth.strip()) # take off annoying spaces
methods=""
for meth in meths:
if meth.strip() not in methcodes and "LP-" in meth:methcodes.append(meth.strip())
methods=methods+meth+":"
methods=methods[:-1]
rec["magic_method_codes"]=methods
if "LP-PI-M" in meths: datablock.append(rec)
if "LP-MRM" in meths: trmblock.append(rec)
if len(trmblock)>2 and inspec!="":
if Tinit==0:
Tinit=1
AZD['MRM']=4
pmagplotlib.plot_init(AZD['MRM'],4,4)
elif Tinit==1:
pmagplotlib.clearFIG(AZD['MRM'])
if len(datablock) <4:
if backup==0:
specimen+=1
if pmagplotlib.verbose:
print('skipping specimen - moving forward ', s)
else:
specimen-=1
if pmagplotlib.verbose:
print('skipping specimen - moving backward ', s)
#
# collect info for the PmagSpecRec dictionary
#
else:
rec=datablock[0]
PmagSpecRec["er_citation_names"]="This study"
PmagSpecRec["er_specimen_name"]=s
PmagSpecRec["er_sample_name"]=rec["er_sample_name"]
PmagSpecRec["er_site_name"]=rec["er_site_name"]
PmagSpecRec["er_location_name"]=rec["er_location_name"]
if "magic_instrument_codes" not in list(rec.keys()):rec["magic_instrument_codes"]=""
PmagSpecRec["magic_instrument_codes"]=rec["magic_instrument_codes"]
PmagSpecRec["measurement_step_unit"]="J"
if "magic_experiment_name" not in list(rec.keys()):
rec["magic_experiment_name"]=""
else:
PmagSpecRec["magic_experiment_names"]=rec["magic_experiment_name"]
meths=rec["magic_method_codes"].split(':')
# sort data into types
if "LP-PI-M-D" in meths: # this is a double heating experiment
exp_type="LP-PI-M-D"
elif "LP-PI-M-S" in meths:
exp_type="LP-PI-M-S"
else:
print("experiment type not supported yet ")
break
araiblock,field=pmag.sortmwarai(datablock,exp_type)
first_Z=araiblock[0]
first_I=araiblock[1]
GammaChecks=araiblock[-3]
ThetaChecks=araiblock[-2]
DeltaChecks=araiblock[-1]
if len(first_Z)<3:
if backup==0:
specimen+=1
if pmagplotlib.verbose:
print('skipping specimen - moving forward ', s)
else:
specimen-=1
if pmagplotlib.verbose:
print('skipping specimen - moving backward ', s)
else:
backup=0
zijdblock,units=pmag.find_dmag_rec(s,meas_data)
if exp_type=="LP-PI-M-D":
recnum=0
print("ZStep Watts Dec Inc Int")
for plotrec in zijdblock:
if pmagplotlib.verbose:
print('%i %i %7.1f %7.1f %8.3e ' % (recnum,plotrec[0],plotrec[1],plotrec[2],plotrec[3]))
recnum += 1
recnum = 1
if GammaChecks!="":
print("IStep Watts Gamma")
for gamma in GammaChecks:
if pmagplotlib.verbose: print('%i %i %7.1f ' % (recnum, gamma[0],gamma[1]))
recnum += 1
if exp_type=="LP-PI-M-S":
if pmagplotlib.verbose:
print("IStep Watts Theta")
kk=0
for theta in ThetaChecks:
kk+=1
print('%i %i %7.1f ' % (kk,theta[0],theta[1]))
if pmagplotlib.verbose:
print("Watts Delta")
for delta in DeltaChecks:
print('%i %7.1f ' % (delta[0],delta[1]))
pmagplotlib.plot_arai_zij(AZD,araiblock,zijdblock,s,units[0])
if inspec !="":
if pmagplotlib.verbose: print('Looking up saved interpretation....')
found = 0
for k in range(len(PriorRecs)):
try:
if PriorRecs[k]["er_specimen_name"]==s:
found =1
CurrRec.append(PriorRecs[k])
for j in range(len(araiblock[0])):
if float(araiblock[0][j][0])==float(PriorRecs[k]["measurement_step_min"]):start=j
if float(araiblock[0][j][0])==float(PriorRecs[k]["measurement_step_max"]):end=j
pars,errcode=pmag.PintPars(araiblock,zijdblock,start,end)
pars['measurement_step_unit']="J"
del PriorRecs[k] # put in CurrRec, take out of PriorRecs
if errcode!=1:
pars["specimen_lab_field_dc"]=field
pars["specimen_int"]=-1*field*pars["specimen_b"]
pars["er_specimen_name"]=s
if pmagplotlib.verbose:
print('Saved interpretation: ')
pars=pmag.scoreit(pars,PmagSpecRec,accept,'',0)
pmagplotlib.plot_b(AZD,araiblock,zijdblock,pars)
if len(trmblock)>2:
blab=field
best=pars["specimen_int"]
Bs,TRMs=[],[]
for trec in trmblock:
Bs.append(float(trec['treatment_dc_field']))
TRMs.append(float(trec['measurement_magn_moment']))
NLpars=nlt.NLtrm(Bs,TRMs,best,blab,0) # calculate best fit parameters through TRM acquisition data, and get new banc
Mp,Bp=[],[]
for k in range(int(max(Bs)*1e6)):
Bp.append(float(k)*1e-6)
npred=nlt.TRM(Bp[-1],NLpars['xopt'][0],NLpars['xopt'][1]) # predicted NRM for this field
Mp.append(npred)
pmagplotlib.plot_trm(AZD['MRM'],Bs,TRMs,Bp,Mp,NLpars,trec['magic_experiment_name'])
print(npred)
print('Banc= ',float(NLpars['banc'])*1e6)
if pmagplotlib.verbose:
print('Banc= ',float(NLpars['banc'])*1e6)
pmagplotlib.draw_figs(AZD)
else:
print('error on specimen ',s)
except:
pass
if pmagplotlib.verbose and found==0: print(' None found :( ')
if spc!="":
if BEG!="":
pars,errcode=pmag.PintPars(araiblock,zijdblock,BEG,END)
pars['measurement_step_unit']="J"
pars["specimen_lab_field_dc"]=field
pars["specimen_int"]=-1*field*pars["specimen_b"]
pars["er_specimen_name"]=s
pars['specimen_grade']='' # ungraded
pmagplotlib.plot_b(AZD,araiblock,zijdblock,pars)
if len(trmblock)>2:
if inlt==0:
donlt()
inlt=1
blab=field
best=pars["specimen_int"]
Bs,TRMs=[],[]
for trec in trmblock:
Bs.append(float(trec['treatment_dc_field']))
TRMs.append(float(trec['measurement_magn_moment']))
NLpars=nlt.NLtrm(Bs,TRMs,best,blab,0) # calculate best fit parameters through TRM acquisition data, and get new banc
#
Mp,Bp=[],[]
for k in range(int(max(Bs)*1e6)):
Bp.append(float(k)*1e-6)
npred=nlt.TRM(Bp[-1],NLpars['xopt'][0],NLpars['xopt'][1]) # predicted NRM for this field
files={}
for key in list(AZD.keys()):
files[key]=s+'_'+key+fmt
pmagplotlib.save_plots(AZD,files)
sys.exit()
if plots==0:
ans='b'
while ans != "":
print("""
s[a]ve plot, set [b]ounds for calculation, [d]elete current interpretation, [p]revious, [s]ample, [q]uit:
""")
ans=input('Return for next specimen \n')
if ans=="":
specimen +=1
if ans=="d":
save_redo(PriorRecs,inspec)
CurrRec=[]
pmagplotlib.plot_arai_zij(AZD,araiblock,zijdblock,s,units[0])
pmagplotlib.draw_figs(AZD)
if ans=='a':
files={}
for key in list(AZD.keys()):
files[key]=s+'_'+key+fmt
pmagplotlib.save_plots(AZD,files)
ans=""
if ans=='q':
print("Good bye")
sys.exit()
if ans=='p':
specimen =specimen -1
backup = 1
ans=""
if ans=='s':
keepon=1
spec=input('Enter desired specimen name (or first part there of): ')
while keepon==1:
try:
specimen =sids.index(spec)
keepon=0
except:
tmplist=[]
for qq in range(len(sids)):
if spec in sids[qq]:tmplist.append(sids[qq])
print(specimen," not found, but this was: ")
print(tmplist)
spec=input('Select one or try again\n ')
ans=""
if ans=='b':
if end==0 or end >=len(araiblock[0]):end=len(araiblock[0])-1
GoOn=0
while GoOn==0:
print('Enter index of first point for calculation: ','[',start,']')
answer=input('return to keep default ')
if answer != "":start=int(answer)
print('Enter index of last point for calculation: ','[',end,']')
answer=input('return to keep default ')
if answer != "":
end=int(answer)
if start >=0 and start <len(araiblock[0])-2 and end >0 and end <len(araiblock[0]) and start<end:
GoOn=1
else:
print("Bad endpoints - try again! ")
start,end=0,len(araiblock)
s=sids[specimen]
pars,errcode=pmag.PintPars(araiblock,zijdblock,start,end)
pars['measurement_step_unit']="J"
pars["specimen_lab_field_dc"]=field
pars["specimen_int"]=-1*field*pars["specimen_b"]
pars["er_specimen_name"]=s
pars=pmag.scoreit(pars,PmagSpecRec,accept,'',0)
PmagSpecRec["measurement_step_min"]='%8.3e' % (pars["measurement_step_min"])
PmagSpecRec["measurement_step_max"]='%8.3e' % (pars["measurement_step_max"])
PmagSpecRec["measurement_step_unit"]="J"
PmagSpecRec["specimen_int_n"]='%i'%(pars["specimen_int_n"])
PmagSpecRec["specimen_lab_field_dc"]='%8.3e'%(pars["specimen_lab_field_dc"])
PmagSpecRec["specimen_int"]='%8.3e '%(pars["specimen_int"])
PmagSpecRec["specimen_b"]='%5.3f '%(pars["specimen_b"])
PmagSpecRec["specimen_q"]='%5.1f '%(pars["specimen_q"])
PmagSpecRec["specimen_f"]='%5.3f '%(pars["specimen_f"])
PmagSpecRec["specimen_fvds"]='%5.3f'%(pars["specimen_fvds"])
PmagSpecRec["specimen_b_beta"]='%5.3f'%(pars["specimen_b_beta"])
PmagSpecRec["specimen_int_mad"]='%7.1f'%(pars["specimen_int_mad"])
PmagSpecRec["specimen_Z"]='%7.1f'%(pars["specimen_Z"])
if pars["method_codes"]!="":
tmpcodes=pars["method_codes"].split(":")
for t in tmpcodes:
if t.strip() not in methcodes:methcodes.append(t.strip())
PmagSpecRec["specimen_dec"]='%7.1f'%(pars["specimen_dec"])
PmagSpecRec["specimen_inc"]='%7.1f'%(pars["specimen_inc"])
PmagSpecRec["specimen_tilt_correction"]='-1'
PmagSpecRec["specimen_direction_type"]='l'
PmagSpecRec["direction_type"]='l' # this is redudant, but helpful - won't be imported
PmagSpecRec["specimen_dang"]='%7.1f '%(pars["specimen_dang"])
PmagSpecRec["specimen_drats"]='%7.1f '%(pars["specimen_drats"])
PmagSpecRec["specimen_int_ptrm_n"]='%i '%(pars["specimen_int_ptrm_n"])
PmagSpecRec["specimen_rsc"]='%6.4f '%(pars["specimen_rsc"])
PmagSpecRec["specimen_md"]='%i '%(int(pars["specimen_md"]))
if PmagSpecRec["specimen_md"]=='-1':PmagSpecRec["specimen_md"]=""
PmagSpecRec["specimen_b_sigma"]='%5.3f '%(pars["specimen_b_sigma"])
if "IE-TT" not in methcodes:methcodes.append("IE-TT")
methods=""
for meth in methcodes:
methods=methods+meth+":"
PmagSpecRec["magic_method_codes"]=methods[:-1]
PmagSpecRec["specimen_description"]=comment
PmagSpecRec["magic_software_packages"]=version_num
pmagplotlib.plot_arai_zij(AZD,araiblock,zijdblock,s,units[0])
pmagplotlib.plot_b(AZD,araiblock,zijdblock,pars)
if len(trmblock)>2:
blab=field
best=pars["specimen_int"]
Bs,TRMs=[],[]
for trec in trmblock:
Bs.append(float(trec['treatment_dc_field']))
TRMs.append(float(trec['measurement_magn_moment']))
NLpars=nlt.NLtrm(Bs,TRMs,best,blab,0) # calculate best fit parameters through TRM acquisition data, and get new banc
Mp,Bp=[],[]
for k in range(int(max(Bs)*1e6)):
Bp.append(float(k)*1e-6)
npred=nlt.TRM(Bp[-1],NLpars['xopt'][0],NLpars['xopt'][1]) # predicted NRM for this field
Mp.append(npred)
pmagplotlib.plot_trm(AZD['MRM'],Bs,TRMs,Bp,Mp,NLpars,trec['magic_experiment_name'])
print('Banc= ',float(NLpars['banc'])*1e6)
pmagplotlib.draw_figs(AZD)
pars["specimen_lab_field_dc"]=field
pars["specimen_int"]=-1*field*pars["specimen_b"]
saveit=input("Save this interpretation? [y]/n \n")
if saveit!='n':
specimen+=1
PriorRecs.append(PmagSpecRec) # put back an interpretation
save_redo(PriorRecs,inspec)
ans=""
else:
specimen+=1
if fmt != ".pmag":
basename=s+'_microwave'+fmt
files={}
for key in list(AZD.keys()):
files[key]=s+'_'+key+fmt
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles={}
titles['deremag']='DeReMag Plot'
titles['zijd']='Zijderveld Plot'
titles['arai']='Arai Plot'
AZD = pmagplotlib.add_borders(AZD,titles,black,purple)
pmagplotlib.save_plots(AZD,files)
# pmagplotlib.combineFigs(s,files,3)
if len(CurrRec)>0:
for rec in CurrRec:
PriorRecs.append(rec)
CurrRec=[]
if plots!=1:
ans=input(" Save last plot? 1/[0] ")
if ans=="1":
if fmt != ".pmag":
files={}
for key in list(AZD.keys()):
files[key]=s+'_'+key+fmt
pmagplotlib.save_plots(AZD,files)
if len(CurrRec)>0:PriorRecs.append(CurrRec) # put back an interpretation
if len(PriorRecs)>0:
save_redo(PriorRecs,inspec)
print('Updated interpretations saved in ',inspec)
if pmagplotlib.verbose:
print("Good bye") | python | def main():
"""
NAME
microwave_magic.py
DESCRIPTION
plots microwave paleointensity data, allowing interactive setting of bounds.
Saves and reads interpretations
from a pmag_specimen formatted table, default: microwave_specimens.txt
SYNTAX
microwave_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f MEAS, set magic_measurements input file
-fsp PRIOR, set pmag_specimen prior interpretations file
-fcr CRIT, set criteria file for grading.
-fmt [svg,png,jpg], format for images - default is svg
-sav, saves plots with out review (default format)
-spc SPEC, plots single specimen SPEC, saves plot with specified format
with optional -b bounds adn quits
-b BEG END: sets bounds for calculation
BEG: starting step for slope calculation
END: ending step for slope calculation
DEFAULTS
MEAS: magic_measurements.txt
CRIT: NONE
PRIOR: microwave_specimens.txt
OUTPUT
figures:
ALL: numbers refer to temperature steps in command line window
1) Arai plot: closed circles are zero-field first/infield
open circles are infield first/zero-field
triangles are pTRM checks
squares are pTRM tail checks
VDS is vector difference sum
diamonds are bounds for interpretation
2) Zijderveld plot: closed (open) symbols are X-Y (X-Z) planes
X rotated to NRM direction
3) (De/Re)Magnetization diagram:
circles are NRM remaining
squares are pTRM gained
command line window:
list is: temperature step numbers, power (J), Dec, Inc, Int (units of magic_measuements)
list of possible commands: type letter followed by return to select option
saving of plots creates .svg format files with specimen_name, plot type as name
"""
#
# initializations
#
meas_file,critout,inspec="magic_measurements.txt","","microwave_specimens.txt"
inlt=0
version_num=pmag.get_version()
Tinit,DCZ,field,first_save=0,0,-1,1
user,comment="",''
ans,specimen,recnum,start,end=0,0,0,0,0
plots,pmag_out,samp_file,style=0,"","","svg"
fmt='.'+style
#
# default acceptance criteria
#
accept_keys=['specimen_int_ptrm_n','specimen_md','specimen_fvds','specimen_b_beta','specimen_dang','specimen_drats','specimen_Z']
accept={}
accept['specimen_int_ptrm_n']=2
accept['specimen_md']=10
accept['specimen_fvds']=0.35
accept['specimen_b_beta']=.1
accept['specimen_int_mad']=7
accept['specimen_dang']=10
accept['specimen_drats']=10
accept['specimen_Z']=10
#
# parse command line options
#
spc,BEG,END="","",""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
meas_file=sys.argv[ind+1]
if '-fsp' in sys.argv:
ind=sys.argv.index('-fsp')
inspec=sys.argv[ind+1]
if '-fcr' in sys.argv:
ind=sys.argv.index('-fcr')
critout=sys.argv[ind+1]
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt='.'+sys.argv[ind+1]
if '-spc' in sys.argv:
ind=sys.argv.index('-spc')
spc=sys.argv[ind+1]
if '-b' in sys.argv:
ind=sys.argv.index('-b')
BEG=int(sys.argv[ind+1])
END=int(sys.argv[ind+2])
if critout!="":
crit_data,file_type=pmag.magic_read(critout)
if pmagplotlib.verbose:
print("Acceptance criteria read in from ", critout)
accept={}
accept['specimen_int_ptrm_n']=2.0
for critrec in crit_data:
if critrec["pmag_criteria_code"]=="IE-SPEC":
for key in accept_keys:
if key not in list(critrec.keys()):
accept[key]=-1
else:
accept[key]=float(critrec[key])
try:
open(inspec,'r')
PriorRecs,file_type=pmag.magic_read(inspec)
if file_type != 'pmag_specimens':
print(file_type)
print(file_type,inspec," is not a valid pmag_specimens file ")
sys.exit()
for rec in PriorRecs:
if 'magic_software_packages' not in list(rec.keys()):rec['magic_software_packages']=""
except IOError:
PriorRecs=[]
if pmagplotlib.verbose:print("starting new specimen interpretation file: ",inspec)
meas_data,file_type=pmag.magic_read(meas_file)
if file_type != 'magic_measurements':
print(file_type)
print(file_type,"This is not a valid magic_measurements file ")
sys.exit()
backup=0
# define figure numbers for arai, zijderveld and
# de-,re-magization diagrams
AZD={}
AZD['deremag'], AZD['zijd'],AZD['arai'],AZD['eqarea']=1,2,3,4
pmagplotlib.plot_init(AZD['arai'],4,4)
pmagplotlib.plot_init(AZD['zijd'],4,4)
pmagplotlib.plot_init(AZD['deremag'],4,4)
pmagplotlib.plot_init(AZD['eqarea'],4,4)
#
#
#
# get list of unique specimen names
#
CurrRec=[]
sids=pmag.get_specs(meas_data)
# get plots for specimen s - default is just to step through arai diagrams
#
if spc!="": specimen =sids.index(spc)
while specimen < len(sids):
methcodes=[]
if pmagplotlib.verbose and spc!="":
print(sids[specimen],specimen+1, 'of ', len(sids))
MeasRecs=[]
s=sids[specimen]
datablock,trmblock=[],[]
PmagSpecRec={}
PmagSpecRec["er_analyst_mail_names"]=user
PmagSpecRec["specimen_correction"]='u'
#
# find the data from the meas_data file for this specimen
#
for rec in meas_data:
if rec["er_specimen_name"]==s:
MeasRecs.append(rec)
methods=rec["magic_method_codes"].split(":")
meths=[]
for meth in methods:
meths.append(meth.strip()) # take off annoying spaces
methods=""
for meth in meths:
if meth.strip() not in methcodes and "LP-" in meth:methcodes.append(meth.strip())
methods=methods+meth+":"
methods=methods[:-1]
rec["magic_method_codes"]=methods
if "LP-PI-M" in meths: datablock.append(rec)
if "LP-MRM" in meths: trmblock.append(rec)
if len(trmblock)>2 and inspec!="":
if Tinit==0:
Tinit=1
AZD['MRM']=4
pmagplotlib.plot_init(AZD['MRM'],4,4)
elif Tinit==1:
pmagplotlib.clearFIG(AZD['MRM'])
if len(datablock) <4:
if backup==0:
specimen+=1
if pmagplotlib.verbose:
print('skipping specimen - moving forward ', s)
else:
specimen-=1
if pmagplotlib.verbose:
print('skipping specimen - moving backward ', s)
#
# collect info for the PmagSpecRec dictionary
#
else:
rec=datablock[0]
PmagSpecRec["er_citation_names"]="This study"
PmagSpecRec["er_specimen_name"]=s
PmagSpecRec["er_sample_name"]=rec["er_sample_name"]
PmagSpecRec["er_site_name"]=rec["er_site_name"]
PmagSpecRec["er_location_name"]=rec["er_location_name"]
if "magic_instrument_codes" not in list(rec.keys()):rec["magic_instrument_codes"]=""
PmagSpecRec["magic_instrument_codes"]=rec["magic_instrument_codes"]
PmagSpecRec["measurement_step_unit"]="J"
if "magic_experiment_name" not in list(rec.keys()):
rec["magic_experiment_name"]=""
else:
PmagSpecRec["magic_experiment_names"]=rec["magic_experiment_name"]
meths=rec["magic_method_codes"].split(':')
# sort data into types
if "LP-PI-M-D" in meths: # this is a double heating experiment
exp_type="LP-PI-M-D"
elif "LP-PI-M-S" in meths:
exp_type="LP-PI-M-S"
else:
print("experiment type not supported yet ")
break
araiblock,field=pmag.sortmwarai(datablock,exp_type)
first_Z=araiblock[0]
first_I=araiblock[1]
GammaChecks=araiblock[-3]
ThetaChecks=araiblock[-2]
DeltaChecks=araiblock[-1]
if len(first_Z)<3:
if backup==0:
specimen+=1
if pmagplotlib.verbose:
print('skipping specimen - moving forward ', s)
else:
specimen-=1
if pmagplotlib.verbose:
print('skipping specimen - moving backward ', s)
else:
backup=0
zijdblock,units=pmag.find_dmag_rec(s,meas_data)
if exp_type=="LP-PI-M-D":
recnum=0
print("ZStep Watts Dec Inc Int")
for plotrec in zijdblock:
if pmagplotlib.verbose:
print('%i %i %7.1f %7.1f %8.3e ' % (recnum,plotrec[0],plotrec[1],plotrec[2],plotrec[3]))
recnum += 1
recnum = 1
if GammaChecks!="":
print("IStep Watts Gamma")
for gamma in GammaChecks:
if pmagplotlib.verbose: print('%i %i %7.1f ' % (recnum, gamma[0],gamma[1]))
recnum += 1
if exp_type=="LP-PI-M-S":
if pmagplotlib.verbose:
print("IStep Watts Theta")
kk=0
for theta in ThetaChecks:
kk+=1
print('%i %i %7.1f ' % (kk,theta[0],theta[1]))
if pmagplotlib.verbose:
print("Watts Delta")
for delta in DeltaChecks:
print('%i %7.1f ' % (delta[0],delta[1]))
pmagplotlib.plot_arai_zij(AZD,araiblock,zijdblock,s,units[0])
if inspec !="":
if pmagplotlib.verbose: print('Looking up saved interpretation....')
found = 0
for k in range(len(PriorRecs)):
try:
if PriorRecs[k]["er_specimen_name"]==s:
found =1
CurrRec.append(PriorRecs[k])
for j in range(len(araiblock[0])):
if float(araiblock[0][j][0])==float(PriorRecs[k]["measurement_step_min"]):start=j
if float(araiblock[0][j][0])==float(PriorRecs[k]["measurement_step_max"]):end=j
pars,errcode=pmag.PintPars(araiblock,zijdblock,start,end)
pars['measurement_step_unit']="J"
del PriorRecs[k] # put in CurrRec, take out of PriorRecs
if errcode!=1:
pars["specimen_lab_field_dc"]=field
pars["specimen_int"]=-1*field*pars["specimen_b"]
pars["er_specimen_name"]=s
if pmagplotlib.verbose:
print('Saved interpretation: ')
pars=pmag.scoreit(pars,PmagSpecRec,accept,'',0)
pmagplotlib.plot_b(AZD,araiblock,zijdblock,pars)
if len(trmblock)>2:
blab=field
best=pars["specimen_int"]
Bs,TRMs=[],[]
for trec in trmblock:
Bs.append(float(trec['treatment_dc_field']))
TRMs.append(float(trec['measurement_magn_moment']))
NLpars=nlt.NLtrm(Bs,TRMs,best,blab,0) # calculate best fit parameters through TRM acquisition data, and get new banc
Mp,Bp=[],[]
for k in range(int(max(Bs)*1e6)):
Bp.append(float(k)*1e-6)
npred=nlt.TRM(Bp[-1],NLpars['xopt'][0],NLpars['xopt'][1]) # predicted NRM for this field
Mp.append(npred)
pmagplotlib.plot_trm(AZD['MRM'],Bs,TRMs,Bp,Mp,NLpars,trec['magic_experiment_name'])
print(npred)
print('Banc= ',float(NLpars['banc'])*1e6)
if pmagplotlib.verbose:
print('Banc= ',float(NLpars['banc'])*1e6)
pmagplotlib.draw_figs(AZD)
else:
print('error on specimen ',s)
except:
pass
if pmagplotlib.verbose and found==0: print(' None found :( ')
if spc!="":
if BEG!="":
pars,errcode=pmag.PintPars(araiblock,zijdblock,BEG,END)
pars['measurement_step_unit']="J"
pars["specimen_lab_field_dc"]=field
pars["specimen_int"]=-1*field*pars["specimen_b"]
pars["er_specimen_name"]=s
pars['specimen_grade']='' # ungraded
pmagplotlib.plot_b(AZD,araiblock,zijdblock,pars)
if len(trmblock)>2:
if inlt==0:
donlt()
inlt=1
blab=field
best=pars["specimen_int"]
Bs,TRMs=[],[]
for trec in trmblock:
Bs.append(float(trec['treatment_dc_field']))
TRMs.append(float(trec['measurement_magn_moment']))
NLpars=nlt.NLtrm(Bs,TRMs,best,blab,0) # calculate best fit parameters through TRM acquisition data, and get new banc
#
Mp,Bp=[],[]
for k in range(int(max(Bs)*1e6)):
Bp.append(float(k)*1e-6)
npred=nlt.TRM(Bp[-1],NLpars['xopt'][0],NLpars['xopt'][1]) # predicted NRM for this field
files={}
for key in list(AZD.keys()):
files[key]=s+'_'+key+fmt
pmagplotlib.save_plots(AZD,files)
sys.exit()
if plots==0:
ans='b'
while ans != "":
print("""
s[a]ve plot, set [b]ounds for calculation, [d]elete current interpretation, [p]revious, [s]ample, [q]uit:
""")
ans=input('Return for next specimen \n')
if ans=="":
specimen +=1
if ans=="d":
save_redo(PriorRecs,inspec)
CurrRec=[]
pmagplotlib.plot_arai_zij(AZD,araiblock,zijdblock,s,units[0])
pmagplotlib.draw_figs(AZD)
if ans=='a':
files={}
for key in list(AZD.keys()):
files[key]=s+'_'+key+fmt
pmagplotlib.save_plots(AZD,files)
ans=""
if ans=='q':
print("Good bye")
sys.exit()
if ans=='p':
specimen =specimen -1
backup = 1
ans=""
if ans=='s':
keepon=1
spec=input('Enter desired specimen name (or first part there of): ')
while keepon==1:
try:
specimen =sids.index(spec)
keepon=0
except:
tmplist=[]
for qq in range(len(sids)):
if spec in sids[qq]:tmplist.append(sids[qq])
print(specimen," not found, but this was: ")
print(tmplist)
spec=input('Select one or try again\n ')
ans=""
if ans=='b':
if end==0 or end >=len(araiblock[0]):end=len(araiblock[0])-1
GoOn=0
while GoOn==0:
print('Enter index of first point for calculation: ','[',start,']')
answer=input('return to keep default ')
if answer != "":start=int(answer)
print('Enter index of last point for calculation: ','[',end,']')
answer=input('return to keep default ')
if answer != "":
end=int(answer)
if start >=0 and start <len(araiblock[0])-2 and end >0 and end <len(araiblock[0]) and start<end:
GoOn=1
else:
print("Bad endpoints - try again! ")
start,end=0,len(araiblock)
s=sids[specimen]
pars,errcode=pmag.PintPars(araiblock,zijdblock,start,end)
pars['measurement_step_unit']="J"
pars["specimen_lab_field_dc"]=field
pars["specimen_int"]=-1*field*pars["specimen_b"]
pars["er_specimen_name"]=s
pars=pmag.scoreit(pars,PmagSpecRec,accept,'',0)
PmagSpecRec["measurement_step_min"]='%8.3e' % (pars["measurement_step_min"])
PmagSpecRec["measurement_step_max"]='%8.3e' % (pars["measurement_step_max"])
PmagSpecRec["measurement_step_unit"]="J"
PmagSpecRec["specimen_int_n"]='%i'%(pars["specimen_int_n"])
PmagSpecRec["specimen_lab_field_dc"]='%8.3e'%(pars["specimen_lab_field_dc"])
PmagSpecRec["specimen_int"]='%8.3e '%(pars["specimen_int"])
PmagSpecRec["specimen_b"]='%5.3f '%(pars["specimen_b"])
PmagSpecRec["specimen_q"]='%5.1f '%(pars["specimen_q"])
PmagSpecRec["specimen_f"]='%5.3f '%(pars["specimen_f"])
PmagSpecRec["specimen_fvds"]='%5.3f'%(pars["specimen_fvds"])
PmagSpecRec["specimen_b_beta"]='%5.3f'%(pars["specimen_b_beta"])
PmagSpecRec["specimen_int_mad"]='%7.1f'%(pars["specimen_int_mad"])
PmagSpecRec["specimen_Z"]='%7.1f'%(pars["specimen_Z"])
if pars["method_codes"]!="":
tmpcodes=pars["method_codes"].split(":")
for t in tmpcodes:
if t.strip() not in methcodes:methcodes.append(t.strip())
PmagSpecRec["specimen_dec"]='%7.1f'%(pars["specimen_dec"])
PmagSpecRec["specimen_inc"]='%7.1f'%(pars["specimen_inc"])
PmagSpecRec["specimen_tilt_correction"]='-1'
PmagSpecRec["specimen_direction_type"]='l'
PmagSpecRec["direction_type"]='l' # this is redudant, but helpful - won't be imported
PmagSpecRec["specimen_dang"]='%7.1f '%(pars["specimen_dang"])
PmagSpecRec["specimen_drats"]='%7.1f '%(pars["specimen_drats"])
PmagSpecRec["specimen_int_ptrm_n"]='%i '%(pars["specimen_int_ptrm_n"])
PmagSpecRec["specimen_rsc"]='%6.4f '%(pars["specimen_rsc"])
PmagSpecRec["specimen_md"]='%i '%(int(pars["specimen_md"]))
if PmagSpecRec["specimen_md"]=='-1':PmagSpecRec["specimen_md"]=""
PmagSpecRec["specimen_b_sigma"]='%5.3f '%(pars["specimen_b_sigma"])
if "IE-TT" not in methcodes:methcodes.append("IE-TT")
methods=""
for meth in methcodes:
methods=methods+meth+":"
PmagSpecRec["magic_method_codes"]=methods[:-1]
PmagSpecRec["specimen_description"]=comment
PmagSpecRec["magic_software_packages"]=version_num
pmagplotlib.plot_arai_zij(AZD,araiblock,zijdblock,s,units[0])
pmagplotlib.plot_b(AZD,araiblock,zijdblock,pars)
if len(trmblock)>2:
blab=field
best=pars["specimen_int"]
Bs,TRMs=[],[]
for trec in trmblock:
Bs.append(float(trec['treatment_dc_field']))
TRMs.append(float(trec['measurement_magn_moment']))
NLpars=nlt.NLtrm(Bs,TRMs,best,blab,0) # calculate best fit parameters through TRM acquisition data, and get new banc
Mp,Bp=[],[]
for k in range(int(max(Bs)*1e6)):
Bp.append(float(k)*1e-6)
npred=nlt.TRM(Bp[-1],NLpars['xopt'][0],NLpars['xopt'][1]) # predicted NRM for this field
Mp.append(npred)
pmagplotlib.plot_trm(AZD['MRM'],Bs,TRMs,Bp,Mp,NLpars,trec['magic_experiment_name'])
print('Banc= ',float(NLpars['banc'])*1e6)
pmagplotlib.draw_figs(AZD)
pars["specimen_lab_field_dc"]=field
pars["specimen_int"]=-1*field*pars["specimen_b"]
saveit=input("Save this interpretation? [y]/n \n")
if saveit!='n':
specimen+=1
PriorRecs.append(PmagSpecRec) # put back an interpretation
save_redo(PriorRecs,inspec)
ans=""
else:
specimen+=1
if fmt != ".pmag":
basename=s+'_microwave'+fmt
files={}
for key in list(AZD.keys()):
files[key]=s+'_'+key+fmt
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles={}
titles['deremag']='DeReMag Plot'
titles['zijd']='Zijderveld Plot'
titles['arai']='Arai Plot'
AZD = pmagplotlib.add_borders(AZD,titles,black,purple)
pmagplotlib.save_plots(AZD,files)
# pmagplotlib.combineFigs(s,files,3)
if len(CurrRec)>0:
for rec in CurrRec:
PriorRecs.append(rec)
CurrRec=[]
if plots!=1:
ans=input(" Save last plot? 1/[0] ")
if ans=="1":
if fmt != ".pmag":
files={}
for key in list(AZD.keys()):
files[key]=s+'_'+key+fmt
pmagplotlib.save_plots(AZD,files)
if len(CurrRec)>0:PriorRecs.append(CurrRec) # put back an interpretation
if len(PriorRecs)>0:
save_redo(PriorRecs,inspec)
print('Updated interpretations saved in ',inspec)
if pmagplotlib.verbose:
print("Good bye") | NAME
microwave_magic.py
DESCRIPTION
plots microwave paleointensity data, allowing interactive setting of bounds.
Saves and reads interpretations
from a pmag_specimen formatted table, default: microwave_specimens.txt
SYNTAX
microwave_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f MEAS, set magic_measurements input file
-fsp PRIOR, set pmag_specimen prior interpretations file
-fcr CRIT, set criteria file for grading.
-fmt [svg,png,jpg], format for images - default is svg
-sav, saves plots with out review (default format)
-spc SPEC, plots single specimen SPEC, saves plot with specified format
with optional -b bounds adn quits
-b BEG END: sets bounds for calculation
BEG: starting step for slope calculation
END: ending step for slope calculation
DEFAULTS
MEAS: magic_measurements.txt
CRIT: NONE
PRIOR: microwave_specimens.txt
OUTPUT
figures:
ALL: numbers refer to temperature steps in command line window
1) Arai plot: closed circles are zero-field first/infield
open circles are infield first/zero-field
triangles are pTRM checks
squares are pTRM tail checks
VDS is vector difference sum
diamonds are bounds for interpretation
2) Zijderveld plot: closed (open) symbols are X-Y (X-Z) planes
X rotated to NRM direction
3) (De/Re)Magnetization diagram:
circles are NRM remaining
squares are pTRM gained
command line window:
list is: temperature step numbers, power (J), Dec, Inc, Int (units of magic_measuements)
list of possible commands: type letter followed by return to select option
saving of plots creates .svg format files with specimen_name, plot type as name | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/microwave_magic.py#L13-L513 |
PmagPy/PmagPy | programs/deprecated/odp_spn_magic.py | main | def main():
"""
NAME
odp_spn_magic.py
DESCRIPTION
converts ODP's Molspin's .spn format files to magic_measurements format files
SYNTAX
odp_spn_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify .spn format input file, required
-F FILE: specify output file, default is magic_measurements.txt
-LP [AF, T, A FIELD, I N] specify one (FIELD is DC field in uT)
AF: af demag
T: thermal
A: anhysteretic remanence
I: isothermal remanence
N: NRM only
-v vol , specify volume used in MolSpin program in cm^3
-A: don't average replicate measurements
INPUT
Best to put separate experiments (all AF, thermal, ARM, etc. files in
seperate .spn files
Format of .spn files:
header with:
Leg Sit H Cor T Sec Top Bot Dec Inc Intens Demag. Stage
followed by data
Leg: Expedition number
Sit: is ODP Site
H: Hole letter
Cor: Core number
T: Core type (R,H,X,etc.)
Sec: section number
top: top of sample interval
bot: bottom of sample interval
Intens in mA/m
Demag Stage:
XXX T in Centigrade
XXX AF in mT
"""
# initialize some stuff
noave=0
methcode,inst="",""
phi,theta,peakfield,labfield=0,0,0,0
dec=[315,225,180,135,45,90,270,270,270,90,180,180,0,0,0]
inc=[0,0,0,0,0,-45,-45,0,45,45,45,-45,-90,-45,45]
missing=1
demag="N"
er_location_name=""
citation='This study'
args=sys.argv
methcode="LP-NO"
trm=0
irm=0
dc="0"
dir_path='.'
#
# get command line arguments
#
meas_file="magic_measurements.txt"
user=""
if "-WD" in args:
ind=args.index("-WD")
dir_path=args[ind+1]
samp_file=dir_path+'/'+'er_samples.txt'
if "-h" in args:
print(main.__doc__)
sys.exit()
if '-F' in args:
ind=args.index("-F")
meas_file=args[ind+1]
if '-f' in args:
ind=args.index("-f")
mag_file=dir_path+'/'+args[ind+1]
try:
input=open(mag_file,'r')
except:
print("bad mag file name")
sys.exit()
else:
print("spn_file field is required option")
print(main.__doc__)
sys.exit()
vol=10.5e-6 # default for spinner program
if "-V" in args:
ind=args.index("-V")
vol=float(args[ind+1])*1e-6 # convert volume to m^3
if "-A" in args: noave=1
if '-LP' in args:
ind=args.index("-LP")
codelist=args[ind+1]
codes=codelist.split(':')
if "AF" in codes:
demag='AF'
methcode="LT-AF-Z"
if "T" in codes:
demag="T"
methcode="LT-T-Z"
if "I" in codes:
methcode="LP-IRM"
if "A" in codes:
methcode="LT-AF-I"
dc='%10.3e'%(1e-3*float(args[ind+1]))
MagRecs=[]
version_num=pmag.get_version()
meas_file=dir_path+'/'+meas_file
for line in input.readlines():
instcode="ODP-MSPN"
rec=line.split()
if len(rec)>2 and "Leg" not in line:
MagRec={}
MagRec['er_expedition_name']=rec[0]
MagRec['er_location_name']=rec[1]+rec[2]
MagRec["er_specimen_name"]=rec[0]+'-'+'U'+rec[1]+rec[2].upper()+"-"+rec[3]+rec[4].upper()+'-'+rec[5]+'-'+'W'+'-'+rec[6]
MagRec["er_site_name"]=MagRec['er_specimen_name']
MagRec["er_sample_name"]=MagRec['er_specimen_name']
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["treatment_ac_field"]='0'
MagRec["treatment_dc_field"]=dc
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
meas_type="LT-NO"
if float(rec[11])==0:
pass
elif demag=="AF":
MagRec["treatment_ac_field"]='%8.3e' %(float(rec[11])*1e-3) # peak field in tesla
meas_type="LT-AF-Z"
MagRec["treatment_dc_field"]='0'
else:
MagRec["treatment_temp"]='%8.3e' % (float(rec[11])+273.) # temp in kelvin
meas_type="LT-T-Z"
intens=1e-3*float(rec[10])*vol # convert mA/m to Am^2
MagRec["measurement_magn_moment"]='%10.3e'% (intens)
MagRec["measurement_dec"]=rec[8]
MagRec["measurement_inc"]=rec[9]
MagRec["magic_instrument_codes"]="ODP-MSPN"
MagRec["er_analyst_mail_names"]=user
MagRec["er_citation_names"]=citation
MagRec["magic_method_codes"]=meas_type
MagRec["measurement_flag"]='g'
MagRec["measurement_csd"]=''
MagRec["measurement_number"]='1'
MagRecs.append(MagRec)
MagOuts=pmag.measurements_methods(MagRecs,noave)
pmag.magic_write(meas_file,MagOuts,'magic_measurements')
print("results put in ",meas_file) | python | def main():
"""
NAME
odp_spn_magic.py
DESCRIPTION
converts ODP's Molspin's .spn format files to magic_measurements format files
SYNTAX
odp_spn_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify .spn format input file, required
-F FILE: specify output file, default is magic_measurements.txt
-LP [AF, T, A FIELD, I N] specify one (FIELD is DC field in uT)
AF: af demag
T: thermal
A: anhysteretic remanence
I: isothermal remanence
N: NRM only
-v vol , specify volume used in MolSpin program in cm^3
-A: don't average replicate measurements
INPUT
Best to put separate experiments (all AF, thermal, ARM, etc. files in
seperate .spn files
Format of .spn files:
header with:
Leg Sit H Cor T Sec Top Bot Dec Inc Intens Demag. Stage
followed by data
Leg: Expedition number
Sit: is ODP Site
H: Hole letter
Cor: Core number
T: Core type (R,H,X,etc.)
Sec: section number
top: top of sample interval
bot: bottom of sample interval
Intens in mA/m
Demag Stage:
XXX T in Centigrade
XXX AF in mT
"""
# initialize some stuff
noave=0
methcode,inst="",""
phi,theta,peakfield,labfield=0,0,0,0
dec=[315,225,180,135,45,90,270,270,270,90,180,180,0,0,0]
inc=[0,0,0,0,0,-45,-45,0,45,45,45,-45,-90,-45,45]
missing=1
demag="N"
er_location_name=""
citation='This study'
args=sys.argv
methcode="LP-NO"
trm=0
irm=0
dc="0"
dir_path='.'
#
# get command line arguments
#
meas_file="magic_measurements.txt"
user=""
if "-WD" in args:
ind=args.index("-WD")
dir_path=args[ind+1]
samp_file=dir_path+'/'+'er_samples.txt'
if "-h" in args:
print(main.__doc__)
sys.exit()
if '-F' in args:
ind=args.index("-F")
meas_file=args[ind+1]
if '-f' in args:
ind=args.index("-f")
mag_file=dir_path+'/'+args[ind+1]
try:
input=open(mag_file,'r')
except:
print("bad mag file name")
sys.exit()
else:
print("spn_file field is required option")
print(main.__doc__)
sys.exit()
vol=10.5e-6 # default for spinner program
if "-V" in args:
ind=args.index("-V")
vol=float(args[ind+1])*1e-6 # convert volume to m^3
if "-A" in args: noave=1
if '-LP' in args:
ind=args.index("-LP")
codelist=args[ind+1]
codes=codelist.split(':')
if "AF" in codes:
demag='AF'
methcode="LT-AF-Z"
if "T" in codes:
demag="T"
methcode="LT-T-Z"
if "I" in codes:
methcode="LP-IRM"
if "A" in codes:
methcode="LT-AF-I"
dc='%10.3e'%(1e-3*float(args[ind+1]))
MagRecs=[]
version_num=pmag.get_version()
meas_file=dir_path+'/'+meas_file
for line in input.readlines():
instcode="ODP-MSPN"
rec=line.split()
if len(rec)>2 and "Leg" not in line:
MagRec={}
MagRec['er_expedition_name']=rec[0]
MagRec['er_location_name']=rec[1]+rec[2]
MagRec["er_specimen_name"]=rec[0]+'-'+'U'+rec[1]+rec[2].upper()+"-"+rec[3]+rec[4].upper()+'-'+rec[5]+'-'+'W'+'-'+rec[6]
MagRec["er_site_name"]=MagRec['er_specimen_name']
MagRec["er_sample_name"]=MagRec['er_specimen_name']
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["treatment_ac_field"]='0'
MagRec["treatment_dc_field"]=dc
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
meas_type="LT-NO"
if float(rec[11])==0:
pass
elif demag=="AF":
MagRec["treatment_ac_field"]='%8.3e' %(float(rec[11])*1e-3) # peak field in tesla
meas_type="LT-AF-Z"
MagRec["treatment_dc_field"]='0'
else:
MagRec["treatment_temp"]='%8.3e' % (float(rec[11])+273.) # temp in kelvin
meas_type="LT-T-Z"
intens=1e-3*float(rec[10])*vol # convert mA/m to Am^2
MagRec["measurement_magn_moment"]='%10.3e'% (intens)
MagRec["measurement_dec"]=rec[8]
MagRec["measurement_inc"]=rec[9]
MagRec["magic_instrument_codes"]="ODP-MSPN"
MagRec["er_analyst_mail_names"]=user
MagRec["er_citation_names"]=citation
MagRec["magic_method_codes"]=meas_type
MagRec["measurement_flag"]='g'
MagRec["measurement_csd"]=''
MagRec["measurement_number"]='1'
MagRecs.append(MagRec)
MagOuts=pmag.measurements_methods(MagRecs,noave)
pmag.magic_write(meas_file,MagOuts,'magic_measurements')
print("results put in ",meas_file) | NAME
odp_spn_magic.py
DESCRIPTION
converts ODP's Molspin's .spn format files to magic_measurements format files
SYNTAX
odp_spn_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify .spn format input file, required
-F FILE: specify output file, default is magic_measurements.txt
-LP [AF, T, A FIELD, I N] specify one (FIELD is DC field in uT)
AF: af demag
T: thermal
A: anhysteretic remanence
I: isothermal remanence
N: NRM only
-v vol , specify volume used in MolSpin program in cm^3
-A: don't average replicate measurements
INPUT
Best to put separate experiments (all AF, thermal, ARM, etc. files in
seperate .spn files
Format of .spn files:
header with:
Leg Sit H Cor T Sec Top Bot Dec Inc Intens Demag. Stage
followed by data
Leg: Expedition number
Sit: is ODP Site
H: Hole letter
Cor: Core number
T: Core type (R,H,X,etc.)
Sec: section number
top: top of sample interval
bot: bottom of sample interval
Intens in mA/m
Demag Stage:
XXX T in Centigrade
XXX AF in mT | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/deprecated/odp_spn_magic.py#L6-L158 |
PmagPy/PmagPy | programs/conversion_scripts/template_magic.py | convert | def convert(**kwargs):
"""
EXAMPLE DOCSTRING for function (you would usually put the discription here)
Parameters
-----------
user : colon delimited list of analysts (default : "")
magfile : input magnetometer file (required)
Returns
-----------
type - Tuple : (True or False indicating if conversion was sucessful, meas_file name written)
"""
#get parameters from kwargs.get(parameter_name, default_value)
user = kwargs.get('user', '')
magfile = kwargs.get('magfile')
#do any extra formating you need to variables here
#open magfile to start reading data
try:
infile=open(magfile,'r')
except Exception as ex:
print(("bad file path: ", magfile))
return False, "bad file path"
#Depending on the dataset you may need to read in all data here put it in a list of dictionaries or something here. If you do just replace the "for line in infile.readlines():" bellow with "for d in data:" where data is the structure you put your data into
#define the lists that hold each line of data for their respective tables
SpecRecs,SampRecs,SiteRecs,LocRecs,MeasRecs=[],[],[],[],[]
#itterate over the contence of the file
for line in infile.readlines():
MeasRec,SpecRec,SampRec,SiteRec,LocRec={},{},{},{},{}
#extract data from line and put it in variables
#fill this line of the Specimen table using above variables
if specimen!="" and specimen not in [x['specimen'] if 'specimen' in list(x.keys()) else "" for x in SpecRecs]:
SpecRec['analysts']=user
SpecRecs.append(SpecRec)
#fill this line of the Sample table using above variables
if sample!="" and sample not in [x['sample'] if 'sample' in list(x.keys()) else "" for x in SampRecs]:
SampRec['analysts']=user
SampRecs.append(SampRec)
#fill this line of the Site table using above variables
if site!="" and site not in [x['site'] if 'site' in list(x.keys()) else "" for x in SiteRecs]:
SiteRec['analysts']=user
SiteRecs.append(SiteRec)
#fill this line of the Location table using above variables
if location!="" and location not in [x['location'] if 'location' in list(x.keys()) else "" for x in LocRecs]:
LocRec['analysts']=user
LocRecs.append(LocRec)
#Fill this line of Meas Table using data in line
MeasRec['analysts']=user
MeasRecs.append(MeasRec)
#close your file object so Python3 doesn't throw an annoying warning
infile.close()
#open a Contribution object
con = cb.Contribution(output_dir_path,read_tables=[])
#Create Magic Tables and add to a contribution
con.add_magic_table_from_data(dtype='specimens', data=SpecRecs)
con.add_magic_table_from_data(dtype='samples', data=SampRecs)
con.add_magic_table_from_data(dtype='sites', data=SiteRecs)
con.add_magic_table_from_data(dtype='locations', data=LocRecs)
MeasOuts=pmag.measurements_methods3(MeasRecs,noave) #figures out method codes for measuremet data
con.add_magic_table_from_data(dtype='measurements', data=MeasOuts)
#write to file
con.write_table_to_file('specimens', custom_name=spec_file)
con.write_table_to_file('samples', custom_name=samp_file)
con.write_table_to_file('sites', custom_name=site_file)
con.write_table_to_file('locations', custom_name=loc_file)
meas_file = con.write_table_to_file('measurements', custom_name=meas_file)
return True, meas_file | python | def convert(**kwargs):
"""
EXAMPLE DOCSTRING for function (you would usually put the discription here)
Parameters
-----------
user : colon delimited list of analysts (default : "")
magfile : input magnetometer file (required)
Returns
-----------
type - Tuple : (True or False indicating if conversion was sucessful, meas_file name written)
"""
#get parameters from kwargs.get(parameter_name, default_value)
user = kwargs.get('user', '')
magfile = kwargs.get('magfile')
#do any extra formating you need to variables here
#open magfile to start reading data
try:
infile=open(magfile,'r')
except Exception as ex:
print(("bad file path: ", magfile))
return False, "bad file path"
#Depending on the dataset you may need to read in all data here put it in a list of dictionaries or something here. If you do just replace the "for line in infile.readlines():" bellow with "for d in data:" where data is the structure you put your data into
#define the lists that hold each line of data for their respective tables
SpecRecs,SampRecs,SiteRecs,LocRecs,MeasRecs=[],[],[],[],[]
#itterate over the contence of the file
for line in infile.readlines():
MeasRec,SpecRec,SampRec,SiteRec,LocRec={},{},{},{},{}
#extract data from line and put it in variables
#fill this line of the Specimen table using above variables
if specimen!="" and specimen not in [x['specimen'] if 'specimen' in list(x.keys()) else "" for x in SpecRecs]:
SpecRec['analysts']=user
SpecRecs.append(SpecRec)
#fill this line of the Sample table using above variables
if sample!="" and sample not in [x['sample'] if 'sample' in list(x.keys()) else "" for x in SampRecs]:
SampRec['analysts']=user
SampRecs.append(SampRec)
#fill this line of the Site table using above variables
if site!="" and site not in [x['site'] if 'site' in list(x.keys()) else "" for x in SiteRecs]:
SiteRec['analysts']=user
SiteRecs.append(SiteRec)
#fill this line of the Location table using above variables
if location!="" and location not in [x['location'] if 'location' in list(x.keys()) else "" for x in LocRecs]:
LocRec['analysts']=user
LocRecs.append(LocRec)
#Fill this line of Meas Table using data in line
MeasRec['analysts']=user
MeasRecs.append(MeasRec)
#close your file object so Python3 doesn't throw an annoying warning
infile.close()
#open a Contribution object
con = cb.Contribution(output_dir_path,read_tables=[])
#Create Magic Tables and add to a contribution
con.add_magic_table_from_data(dtype='specimens', data=SpecRecs)
con.add_magic_table_from_data(dtype='samples', data=SampRecs)
con.add_magic_table_from_data(dtype='sites', data=SiteRecs)
con.add_magic_table_from_data(dtype='locations', data=LocRecs)
MeasOuts=pmag.measurements_methods3(MeasRecs,noave) #figures out method codes for measuremet data
con.add_magic_table_from_data(dtype='measurements', data=MeasOuts)
#write to file
con.write_table_to_file('specimens', custom_name=spec_file)
con.write_table_to_file('samples', custom_name=samp_file)
con.write_table_to_file('sites', custom_name=site_file)
con.write_table_to_file('locations', custom_name=loc_file)
meas_file = con.write_table_to_file('measurements', custom_name=meas_file)
return True, meas_file | EXAMPLE DOCSTRING for function (you would usually put the discription here)
Parameters
-----------
user : colon delimited list of analysts (default : "")
magfile : input magnetometer file (required)
Returns
-----------
type - Tuple : (True or False indicating if conversion was sucessful, meas_file name written) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/conversion_scripts/template_magic.py#L25-L105 |
PmagPy/PmagPy | programs/b_vdm.py | main | def main():
"""
NAME
b_vdm.py
DESCRIPTION
converts B (in microT) and (magnetic) latitude to V(A)DM
INPUT (COMMAND LINE ENTRY)
B (microtesla), latitude (positive north)
OUTPUT
V[A]DM
SYNTAX
b_vdm.py [command line options] [< filename]
OPTIONS
-h prints help and quits
-i for interactive data entry
-f FILE input file
-F FILE output
"""
inp,out="",""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
inp=f.readlines()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
o=sys.argv[ind+1]
out=open(o,'w')
if '-i' in sys.argv:
cont=1
while cont==1:
try:
b=1e-6*float(input('B (in microtesla): <cntl-D to quit '))
lat=float(input('Latitude: '))
except:
print("\nGood bye\n")
sys.exit()
vdm= pmag.b_vdm(b,lat)
print('%10.3e '%(vdm))
if inp=="":
inp = sys.stdin.readlines() # read from standard input
for line in inp:
vdm=spitout(line)
if out=="":
print('%10.3e'%(vdm))
else:
out.write('%10.3e \n'%(vdm)) | python | def main():
"""
NAME
b_vdm.py
DESCRIPTION
converts B (in microT) and (magnetic) latitude to V(A)DM
INPUT (COMMAND LINE ENTRY)
B (microtesla), latitude (positive north)
OUTPUT
V[A]DM
SYNTAX
b_vdm.py [command line options] [< filename]
OPTIONS
-h prints help and quits
-i for interactive data entry
-f FILE input file
-F FILE output
"""
inp,out="",""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
inp=f.readlines()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
o=sys.argv[ind+1]
out=open(o,'w')
if '-i' in sys.argv:
cont=1
while cont==1:
try:
b=1e-6*float(input('B (in microtesla): <cntl-D to quit '))
lat=float(input('Latitude: '))
except:
print("\nGood bye\n")
sys.exit()
vdm= pmag.b_vdm(b,lat)
print('%10.3e '%(vdm))
if inp=="":
inp = sys.stdin.readlines() # read from standard input
for line in inp:
vdm=spitout(line)
if out=="":
print('%10.3e'%(vdm))
else:
out.write('%10.3e \n'%(vdm)) | NAME
b_vdm.py
DESCRIPTION
converts B (in microT) and (magnetic) latitude to V(A)DM
INPUT (COMMAND LINE ENTRY)
B (microtesla), latitude (positive north)
OUTPUT
V[A]DM
SYNTAX
b_vdm.py [command line options] [< filename]
OPTIONS
-h prints help and quits
-i for interactive data entry
-f FILE input file
-F FILE output | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/b_vdm.py#L16-L72 |
PmagPy/PmagPy | setup.py | do_walk | def do_walk(data_path):
"""
Walk through data_files and list all in dict format
"""
data_files = {}
def cond(File, prefix):
"""
Return True for useful files
Return False for non-useful files
"""
file_path = path.join(prefix, 'data_files', File)
return (not File.startswith('!') and
not File.endswith('~') and
not File.endswith('#') and
not File.endswith('.pyc') and
not File.startswith('.') and
path.exists(path.join(prefix, File)))
for (dir_path, dirs, files) in os.walk(data_path):
data_files[dir_path] = [f for f in files if cond(f, dir_path)]
if not dirs:
continue
else:
for Dir in dirs:
do_walk(os.path.join(dir_path, Dir))
return data_files | python | def do_walk(data_path):
"""
Walk through data_files and list all in dict format
"""
data_files = {}
def cond(File, prefix):
"""
Return True for useful files
Return False for non-useful files
"""
file_path = path.join(prefix, 'data_files', File)
return (not File.startswith('!') and
not File.endswith('~') and
not File.endswith('#') and
not File.endswith('.pyc') and
not File.startswith('.') and
path.exists(path.join(prefix, File)))
for (dir_path, dirs, files) in os.walk(data_path):
data_files[dir_path] = [f for f in files if cond(f, dir_path)]
if not dirs:
continue
else:
for Dir in dirs:
do_walk(os.path.join(dir_path, Dir))
return data_files | Walk through data_files and list all in dict format | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/setup.py#L46-L71 |
PmagPy/PmagPy | programs/conversion_scripts2/bgc_magic2.py | main | def main(command_line=True, **kwargs):
"""
NAME
bgc_magic.py
DESCRIPTION
converts Berkeley Geochronology Center (BGC) format files to magic_measurements format files
SYNTAX
bgc_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input file, or
-F FILE: specify output file, default is magic_measurements.txt
-Fsa: specify er_samples format file for appending, default is new er_samples.txt (Not working yet)
-loc LOCNAME : specify location/study name
-site SITENAME : specify site name
-A: don't average replicate measurements
-mcd [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented
-v NUM : specify the volume in cc of the sample, default 2.5^3cc. Will use vol in data file if volume!=0 in file.
INPUT
BGC paleomag format file
"""
# initialize some stuff
noave = 0
volume = 0.025**3 #default volume is a 2.5cm cube
#inst=""
#samp_con,Z='1',""
#missing=1
#demag="N"
er_location_name = "unknown"
er_site_name = "unknown"
#citation='This study'
args = sys.argv
meth_code = "LP-NO"
#specnum=1
version_num = pmag.get_version()
mag_file = ""
dir_path = '.'
MagRecs = []
SampOuts = []
samp_file = 'er_samples.txt'
meas_file = 'magic_measurements.txt'
meth_code = ""
#
# get command line arguments
#
if command_line:
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind+1]
if '-ID' in sys.argv:
ind = sys.argv.index('-ID')
input_dir_path = sys.argv[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
if "-h" in args:
print(main.__doc__)
return False
if '-F' in args:
ind = args.index("-F")
meas_file = args[ind+1]
if '-Fsa' in args:
ind = args.index("-Fsa")
samp_file = args[ind+1]
#try:
# open(samp_file,'r')
# ErSamps,file_type=pmag.magic_read(samp_file)
# print 'sample information will be appended to ', samp_file
#except:
# print samp_file,' not found: sample information will be stored in new er_samples.txt file'
# samp_file = output_dir_path+'/er_samples.txt'
if '-f' in args:
ind = args.index("-f")
mag_file = args[ind+1]
if "-loc" in args:
ind = args.index("-loc")
er_location_name = args[ind+1]
if "-site" in args:
ind = args.index("-site")
er_site_name = args[ind+1]
if "-A" in args:
noave = 1
if "-mcd" in args:
ind = args.index("-mcd")
meth_code = args[ind+1]
#samp_con='5'
if "-v" in args:
ind = args.index("-v")
volume = float(args[ind+1]) * 1e-6 # enter volume in cc, convert to m^3
if not command_line:
dir_path = kwargs.get('dir_path', '.')
input_dir_path = kwargs.get('input_dir_path', dir_path)
output_dir_path = dir_path
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
mag_file = kwargs.get('mag_file')
samp_file = kwargs.get('samp_file', 'er_samples.txt')
er_location_name = kwargs.get('er_location_name', '')
er_site_name = kwargs.get('er_site_name', '')
noave = kwargs.get('noave', 0) # default (0) means DO average
meth_code = kwargs.get('meth_code', "LP-NO")
volume = float(kwargs.get('volume', 0))
if not volume:
volume = 0.025**3 #default volume is a 2.5 cm cube, translated to meters cubed
else:
#convert cm^3 to m^3
volume *= 1e-6
# format variables
if not mag_file:
return False, 'You must provide a BCG format file'
mag_file = os.path.join(input_dir_path, mag_file)
meas_file = os.path.join(output_dir_path, meas_file)
samp_file = os.path.join(output_dir_path, samp_file)
ErSampRec = {}
# parse data
# Open up the BGC file and read the header information
print('mag_file in bgc_magic', mag_file)
pre_data = open(mag_file, 'r')
line = pre_data.readline()
line_items = line.split(' ')
sample_name = line_items[2]
sample_name = sample_name.replace('\n', '')
line = pre_data.readline()
line = pre_data.readline()
line_items = line.split('\t')
sample_azimuth = float(line_items[1])
sample_dip = float(line_items[2])
sample_bed_dip = line_items[3]
sample_bed_azimuth = line_items[4]
sample_lon = line_items[5]
sample_lat = line_items[6]
tmp_volume = line_items[7]
if tmp_volume != 0.0:
volume = float(tmp_volume) * 1e-6
pre_data.close()
data = pd.read_csv(mag_file, sep='\t', header=3, index_col=False)
cart = np.array([data['X'], data['Y'], data['Z']]).transpose()
direction = pmag.cart2dir(cart).transpose()
data['measurement_dec'] = direction[0]
data['measurement_inc'] = direction[1]
data['measurement_magn_moment'] = old_div(direction[2], 1000) # the data are in EMU - this converts to Am^2
data['measurement_magn_volume'] = old_div((old_div(direction[2], 1000)), volume) # EMU - data converted to A/m
# Configure the er_sample table
ErSampRec['er_sample_name'] = sample_name
ErSampRec['sample_azimuth'] = sample_azimuth
ErSampRec['sample_dip'] = sample_dip
ErSampRec['sample_bed_dip_direction'] = sample_bed_azimuth
ErSampRec['sample_bed_dip'] = sample_bed_dip
ErSampRec['sample_lat'] = sample_lat
ErSampRec['sample_lon'] = sample_lon
ErSampRec['magic_method_codes'] = meth_code
ErSampRec['er_location_name'] = er_location_name
ErSampRec['er_site_name'] = er_site_name
ErSampRec['er_citation_names'] = 'This study'
SampOuts.append(ErSampRec.copy())
# Configure the magic_measurements table
for rowNum, row in data.iterrows():
MagRec = {}
MagRec['measurement_description'] = 'Date: ' + str(row['Date']) + ' Time: ' + str(row['Time'])
MagRec["er_citation_names"] = "This study"
MagRec['er_location_name'] = er_location_name
MagRec['er_site_name'] = er_site_name
MagRec['er_sample_name'] = sample_name
MagRec['magic_software_packages'] = version_num
MagRec["treatment_temp"] = '%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"] = '%8.3e' % (273) # room temp in kelvin
MagRec["measurement_flag"] = 'g'
MagRec["measurement_standard"] = 'u'
MagRec["measurement_number"] = rowNum
MagRec["er_specimen_name"] = sample_name
MagRec["treatment_ac_field"] = '0'
if row['DM Val'] == '0':
meas_type = "LT-NO"
elif int(row['DM Type']) > 0.0:
meas_type = "LT-AF-Z"
treat = float(row['DM Val'])
MagRec["treatment_ac_field"] = '%8.3e' %(treat*1e-3) # convert from mT to tesla
elif int(row['DM Type']) == -1:
meas_type = "LT-T-Z"
treat = float(row['DM Val'])
MagRec["treatment_temp"] = '%8.3e' % (treat+273.) # temp in kelvin
else:
print("measurement type unknown:", row['DM Type'], " in row ", rowNum)
MagRec["measurement_magn_moment"] = str(row['measurement_magn_moment'])
MagRec["measurement_magn_volume"] = str(row['measurement_magn_volume'])
MagRec["measurement_dec"] = str(row['measurement_dec'])
MagRec["measurement_inc"] = str(row['measurement_inc'])
MagRec['magic_method_codes'] = meas_type
MagRec['measurement_csd'] = '0.0' # added due to magic.write error
MagRec['measurement_positions'] = '1' # added due to magic.write error
MagRecs.append(MagRec.copy())
pmag.magic_write(samp_file, SampOuts, 'er_samples')
print("sample orientations put in ", samp_file)
MagOuts = pmag.measurements_methods(MagRecs, noave)
pmag.magic_write(meas_file, MagOuts, 'magic_measurements')
print("results put in ", meas_file)
return True, meas_file | python | def main(command_line=True, **kwargs):
"""
NAME
bgc_magic.py
DESCRIPTION
converts Berkeley Geochronology Center (BGC) format files to magic_measurements format files
SYNTAX
bgc_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input file, or
-F FILE: specify output file, default is magic_measurements.txt
-Fsa: specify er_samples format file for appending, default is new er_samples.txt (Not working yet)
-loc LOCNAME : specify location/study name
-site SITENAME : specify site name
-A: don't average replicate measurements
-mcd [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented
-v NUM : specify the volume in cc of the sample, default 2.5^3cc. Will use vol in data file if volume!=0 in file.
INPUT
BGC paleomag format file
"""
# initialize some stuff
noave = 0
volume = 0.025**3 #default volume is a 2.5cm cube
#inst=""
#samp_con,Z='1',""
#missing=1
#demag="N"
er_location_name = "unknown"
er_site_name = "unknown"
#citation='This study'
args = sys.argv
meth_code = "LP-NO"
#specnum=1
version_num = pmag.get_version()
mag_file = ""
dir_path = '.'
MagRecs = []
SampOuts = []
samp_file = 'er_samples.txt'
meas_file = 'magic_measurements.txt'
meth_code = ""
#
# get command line arguments
#
if command_line:
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind+1]
if '-ID' in sys.argv:
ind = sys.argv.index('-ID')
input_dir_path = sys.argv[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
if "-h" in args:
print(main.__doc__)
return False
if '-F' in args:
ind = args.index("-F")
meas_file = args[ind+1]
if '-Fsa' in args:
ind = args.index("-Fsa")
samp_file = args[ind+1]
#try:
# open(samp_file,'r')
# ErSamps,file_type=pmag.magic_read(samp_file)
# print 'sample information will be appended to ', samp_file
#except:
# print samp_file,' not found: sample information will be stored in new er_samples.txt file'
# samp_file = output_dir_path+'/er_samples.txt'
if '-f' in args:
ind = args.index("-f")
mag_file = args[ind+1]
if "-loc" in args:
ind = args.index("-loc")
er_location_name = args[ind+1]
if "-site" in args:
ind = args.index("-site")
er_site_name = args[ind+1]
if "-A" in args:
noave = 1
if "-mcd" in args:
ind = args.index("-mcd")
meth_code = args[ind+1]
#samp_con='5'
if "-v" in args:
ind = args.index("-v")
volume = float(args[ind+1]) * 1e-6 # enter volume in cc, convert to m^3
if not command_line:
dir_path = kwargs.get('dir_path', '.')
input_dir_path = kwargs.get('input_dir_path', dir_path)
output_dir_path = dir_path
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
mag_file = kwargs.get('mag_file')
samp_file = kwargs.get('samp_file', 'er_samples.txt')
er_location_name = kwargs.get('er_location_name', '')
er_site_name = kwargs.get('er_site_name', '')
noave = kwargs.get('noave', 0) # default (0) means DO average
meth_code = kwargs.get('meth_code', "LP-NO")
volume = float(kwargs.get('volume', 0))
if not volume:
volume = 0.025**3 #default volume is a 2.5 cm cube, translated to meters cubed
else:
#convert cm^3 to m^3
volume *= 1e-6
# format variables
if not mag_file:
return False, 'You must provide a BCG format file'
mag_file = os.path.join(input_dir_path, mag_file)
meas_file = os.path.join(output_dir_path, meas_file)
samp_file = os.path.join(output_dir_path, samp_file)
ErSampRec = {}
# parse data
# Open up the BGC file and read the header information
print('mag_file in bgc_magic', mag_file)
pre_data = open(mag_file, 'r')
line = pre_data.readline()
line_items = line.split(' ')
sample_name = line_items[2]
sample_name = sample_name.replace('\n', '')
line = pre_data.readline()
line = pre_data.readline()
line_items = line.split('\t')
sample_azimuth = float(line_items[1])
sample_dip = float(line_items[2])
sample_bed_dip = line_items[3]
sample_bed_azimuth = line_items[4]
sample_lon = line_items[5]
sample_lat = line_items[6]
tmp_volume = line_items[7]
if tmp_volume != 0.0:
volume = float(tmp_volume) * 1e-6
pre_data.close()
data = pd.read_csv(mag_file, sep='\t', header=3, index_col=False)
cart = np.array([data['X'], data['Y'], data['Z']]).transpose()
direction = pmag.cart2dir(cart).transpose()
data['measurement_dec'] = direction[0]
data['measurement_inc'] = direction[1]
data['measurement_magn_moment'] = old_div(direction[2], 1000) # the data are in EMU - this converts to Am^2
data['measurement_magn_volume'] = old_div((old_div(direction[2], 1000)), volume) # EMU - data converted to A/m
# Configure the er_sample table
ErSampRec['er_sample_name'] = sample_name
ErSampRec['sample_azimuth'] = sample_azimuth
ErSampRec['sample_dip'] = sample_dip
ErSampRec['sample_bed_dip_direction'] = sample_bed_azimuth
ErSampRec['sample_bed_dip'] = sample_bed_dip
ErSampRec['sample_lat'] = sample_lat
ErSampRec['sample_lon'] = sample_lon
ErSampRec['magic_method_codes'] = meth_code
ErSampRec['er_location_name'] = er_location_name
ErSampRec['er_site_name'] = er_site_name
ErSampRec['er_citation_names'] = 'This study'
SampOuts.append(ErSampRec.copy())
# Configure the magic_measurements table
for rowNum, row in data.iterrows():
MagRec = {}
MagRec['measurement_description'] = 'Date: ' + str(row['Date']) + ' Time: ' + str(row['Time'])
MagRec["er_citation_names"] = "This study"
MagRec['er_location_name'] = er_location_name
MagRec['er_site_name'] = er_site_name
MagRec['er_sample_name'] = sample_name
MagRec['magic_software_packages'] = version_num
MagRec["treatment_temp"] = '%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"] = '%8.3e' % (273) # room temp in kelvin
MagRec["measurement_flag"] = 'g'
MagRec["measurement_standard"] = 'u'
MagRec["measurement_number"] = rowNum
MagRec["er_specimen_name"] = sample_name
MagRec["treatment_ac_field"] = '0'
if row['DM Val'] == '0':
meas_type = "LT-NO"
elif int(row['DM Type']) > 0.0:
meas_type = "LT-AF-Z"
treat = float(row['DM Val'])
MagRec["treatment_ac_field"] = '%8.3e' %(treat*1e-3) # convert from mT to tesla
elif int(row['DM Type']) == -1:
meas_type = "LT-T-Z"
treat = float(row['DM Val'])
MagRec["treatment_temp"] = '%8.3e' % (treat+273.) # temp in kelvin
else:
print("measurement type unknown:", row['DM Type'], " in row ", rowNum)
MagRec["measurement_magn_moment"] = str(row['measurement_magn_moment'])
MagRec["measurement_magn_volume"] = str(row['measurement_magn_volume'])
MagRec["measurement_dec"] = str(row['measurement_dec'])
MagRec["measurement_inc"] = str(row['measurement_inc'])
MagRec['magic_method_codes'] = meas_type
MagRec['measurement_csd'] = '0.0' # added due to magic.write error
MagRec['measurement_positions'] = '1' # added due to magic.write error
MagRecs.append(MagRec.copy())
pmag.magic_write(samp_file, SampOuts, 'er_samples')
print("sample orientations put in ", samp_file)
MagOuts = pmag.measurements_methods(MagRecs, noave)
pmag.magic_write(meas_file, MagOuts, 'magic_measurements')
print("results put in ", meas_file)
return True, meas_file | NAME
bgc_magic.py
DESCRIPTION
converts Berkeley Geochronology Center (BGC) format files to magic_measurements format files
SYNTAX
bgc_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input file, or
-F FILE: specify output file, default is magic_measurements.txt
-Fsa: specify er_samples format file for appending, default is new er_samples.txt (Not working yet)
-loc LOCNAME : specify location/study name
-site SITENAME : specify site name
-A: don't average replicate measurements
-mcd [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented
-v NUM : specify the volume in cc of the sample, default 2.5^3cc. Will use vol in data file if volume!=0 in file.
INPUT
BGC paleomag format file | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/conversion_scripts2/bgc_magic2.py#L13-L226 |
PmagPy/PmagPy | programs/deprecated/change_case_magic.py | main | def main():
"""
NAME
change_case_magic.py
DESCRIPTION
picks out key and converts to upper or lower case
SYNTAX
change_case_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input magic format file
-F FILE: specify output magic format file , default is to overwrite input file
-keys KEY1:KEY2 specify colon delimited list of keys to convert
-[U,l] : specify [U]PPER or [l]ower case, default is lower
"""
dir_path="./"
change='l'
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
magic_file=dir_path+'/'+sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
out_file=dir_path+'/'+sys.argv[ind+1]
else: out_file=magic_file
if '-keys' in sys.argv:
ind=sys.argv.index('-keys')
grab_keys=sys.argv[ind+1].split(":")
else:
print(main.__doc__)
sys.exit()
if '-U' in sys.argv: change='U'
#
#
# get data read in
Data,file_type=pmag.magic_read(magic_file)
if len(Data)>0:
for grab_key in grab_keys:
for rec in Data:
if change=='l':
rec[grab_key]=rec[grab_key].lower()
else:
rec[grab_key]=rec[grab_key].upper()
else:
print('bad file name')
pmag.magic_write(out_file,Data,file_type) | python | def main():
"""
NAME
change_case_magic.py
DESCRIPTION
picks out key and converts to upper or lower case
SYNTAX
change_case_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input magic format file
-F FILE: specify output magic format file , default is to overwrite input file
-keys KEY1:KEY2 specify colon delimited list of keys to convert
-[U,l] : specify [U]PPER or [l]ower case, default is lower
"""
dir_path="./"
change='l'
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
magic_file=dir_path+'/'+sys.argv[ind+1]
else:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
out_file=dir_path+'/'+sys.argv[ind+1]
else: out_file=magic_file
if '-keys' in sys.argv:
ind=sys.argv.index('-keys')
grab_keys=sys.argv[ind+1].split(":")
else:
print(main.__doc__)
sys.exit()
if '-U' in sys.argv: change='U'
#
#
# get data read in
Data,file_type=pmag.magic_read(magic_file)
if len(Data)>0:
for grab_key in grab_keys:
for rec in Data:
if change=='l':
rec[grab_key]=rec[grab_key].lower()
else:
rec[grab_key]=rec[grab_key].upper()
else:
print('bad file name')
pmag.magic_write(out_file,Data,file_type) | NAME
change_case_magic.py
DESCRIPTION
picks out key and converts to upper or lower case
SYNTAX
change_case_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE: specify input magic format file
-F FILE: specify output magic format file , default is to overwrite input file
-keys KEY1:KEY2 specify colon delimited list of keys to convert
-[U,l] : specify [U]PPER or [l]ower case, default is lower | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/deprecated/change_case_magic.py#L6-L63 |
PmagPy/PmagPy | pmagpy/frp.py | get_pole | def get_pole(continent,age):
"""
returns rotation poles and angles for specified continents and ages
assumes fixed Africa.
Parameters
__________
continent :
aus : Australia
eur : Eurasia
mad : Madacascar
[nwaf,congo] : NW Africa [choose one]
col : Colombia
grn : Greenland
nam : North America
par : Paraguay
eant : East Antarctica
ind : India
[neaf,kala] : NE Africa [choose one]
[sac,sam] : South America [choose one]
ib : Iberia
saf : South Africa
Returns
_______
[pole longitude, pole latitude, rotation angle] : for the continent at specified age
"""
age=int(age)
if continent=='congo':continent='nwaf'
if continent=='kala':continent='neaf'
if continent=='sam':continent='sac'
if continent=='ant':continent='eant'
if continent=='af':
return [0,0,0] # assume africa fixed
if continent=='aus':
cont= [[5,9.7,54.3,-3.3],[10,10.4,52.8,-6.2],[15,11.5,49.8,-9.0],[20,12.4,48.0,-11.8],[25,12.9,48.3,-15.0],[30,12.8,49.9,-18.1],[35,13.5,50.8,-20.9],[40,14.1,52.7,-22.1],[45,14.4,54.7,-22.9],[50,14.7,56.5,-23.6],[55,14.0,57.3,-24.7],[60,12.9,57.9,-25.7],[65,13.6,58.8,-26.3],[70,17.3,60.2,-26.3],[75,19.8,63.3,-26.7],[80,20.5,68.5,-26.6],[85,19.8,74.6,-26.9],[90,17.7,80.9,-28.9],[95,15.9,86.2,-31.1],[100,18.4,89.3,-30.7],[105,17.9,95.6,-32.6],[110,17.3,101.0,-34.8],[115,16.8,105.6,-37.4],[120,16.4,109.4,-40.3],[125,15.7,110.3,-42.3],[130,15.9,111.6,-44.4],[135,15.9,113.1,-46.6],[140,15.6,113.7,-48.3],[145,15.0,113.1,-50.5],[150,15.5,113.5,-52.5],[155,17.6,115.7,-54.3],[160,19.5,117.8,-56.2],[165,19.5,117.8,-56.2],[170,19.5,117.8,-56.2],[175,19.5,117.8,-56.2],[180,19.5,117.8,-56.2],[185,19.5,117.8,-56.2],[190,19.5,117.8,-56.2],[195,19.5,117.8,-56.2],[200,19.5,117.8,-56.2],[205,19.5,117.8,-56.2],[210,19.5,117.8,-56.2],[215,19.5,117.8,-56.2],[220,19.5,117.8,-56.2],[225,19.5,117.8,-56.2],[230,19.5,117.8,-56.2],[235,19.5,117.8,-56.2],[240,19.5,117.8,-56.2],[245,19.5,117.8,-56.2],[250,19.5,117.8,-56.2],[255,19.5,117.8,-56.2],[260,19.5,117.8,-56.2],[265,19.5,117.8,-56.2],[270,19.5,117.8,-56.2],[275,19.5,117.8,-56.2],[280,19.5,117.8,-56.2],[285,19.5,117.8,-56.2],[290,19.5,117.8,-56.2],[295,19.5,117.8,-56.2],[300,19.5,117.8,-56.2],[305,19.5,117.8,-56.2],[310,19.5,117.8,-56.2],[315,19.5,117.8,-56.2],[320,19.5,117.8,-56.2]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='eur':
cont= [[5,17.9,-27.1,0.6],[10,18.4,-26.3,1.2],[15,18.9,-24.6,1.8],[20,17.2,-22.7,2.4],[25,20.7,-19.0,3.0],[30,24.9,-19.5,4.3],[35,27.2,-19.3,5.8],[40,28.7,-18.5,7.5],[45,30.3,-18.2,9.0],[50,30.8,-16.7,10.0],[55,32.7,-15.4,11.3],[60,34.8,-15.7,12.6],[65,36.0,-15.8,13.6],[70,35.4,-16.1,14.9],[75,35.5,-15.7,15.5],[80,36.1,-15.2,16.9],[85,37.0,-14.2,18.8],[90,39.6,-13.7,21.9],[95,39.8,-13.7,25.2],[100,40.2,-12.5,28.5],[105,41.6,-11.2,31.7],[110,42.6,-9.8,34.5],[115,43.4,-8.5,37.3],[120,44.5,-6.9,40.3],[125,45.3,-6.3,42.0],[130,45.9,-5.7,43.0],[135,46.6,-5.3,44.0],[140,47.3,-4.9,45.2],[145,47.8,-4.8,46.4],[150,48.6,-4.0,47.9],[155,49.8,-2.2,50.0],[160,50.6,-1.2,52.1],[165,51.4,-0.3,54.2],[170,52.1,0.6,56.3],[175,52.9,1.9,59.6],[180,53.0,2.0,60.0],[185,53.0,2.0,60.4],[190,53.1,2.1,60.8],[195,53.2,2.2,61.1],[200,53.3,2.2,61.5],[205,53.2,2.6,59.7],[210,53.1,2.9,57.8],[215,53.1,3.3,55.9],[220,52.9,3.6,53.6],[225,52.7,4.0,51.4],[230,52.4,4.4,49.1],[235,52.2,4.8,46.8],[240,51.9,5.3,44.5],[245,51.9,5.3,44.5],[250,51.9,5.3,44.5],[255,51.9,5.3,44.5],[260,51.9,5.3,44.5],[265,51.9,5.3,44.5],[270,51.9,5.3,44.5],[275,51.9,5.3,44.5],[280,51.9,5.3,44.5],[285,51.9,5.3,44.5],[290,51.9,5.3,44.5],[295,51.9,5.3,44.5],[300,51.9,5.3,44.5],[305,51.9,5.3,44.5],[310,51.9,5.3,44.5],[315,51.9,5.3,44.5],[320,51.9,5.3,44.5]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='mad':
cont= [[5,90.0,0.0,0.0],[10,90.0,0.0,0.0],[15,90.0,0.0,0.0],[20,90.0,0.0,0.0],[25,90.0,0.0,0.0],[30,90.0,0.0,0.0],[35,90.0,0.0,0.0],[40,90.0,0.0,0.0],[45,90.0,0.0,0.0],[50,90.0,0.0,0.0],[55,90.0,0.0,0.0],[60,90.0,0.0,0.0],[65,90.0,0.0,0.0],[70,90.0,0.0,0.0],[75,90.0,0.0,0.0],[80,90.0,0.0,0.0],[85,90.0,0.0,0.0],[90,90.0,0.0,0.0],[95,90.0,0.0,0.0],[100,90.0,0.0,0.0],[105,90.0,0.0,0.0],[110,90.0,0.0,0.0],[115,90.0,0.0,0.0],[120,90.0,0.0,0.0],[125,2.6,-63.3,1.8],[130,2.6,-63.3,3.9],[135,1.5,-57.6,5.7],[140,1.0,-55.9,7.2],[145,0.6,118.3,-8.9],[150,4.5,119.8,-10.9],[155,10.6,130.1,-13.0],[160,14.8,137.5,-15.4],[165,14.8,137.5,-15.4],[170,14.8,137.5,-15.4],[175,14.8,137.5,-15.4],[180,14.8,137.5,-15.4],[185,14.8,137.5,-15.4],[190,14.8,137.5,-15.4],[195,14.8,137.5,-15.4],[200,14.8,137.5,-15.4],[205,14.8,137.5,-15.4],[210,14.8,137.5,-15.4],[215,14.8,137.5,-15.4],[220,14.8,137.5,-15.4],[225,14.8,137.5,-15.4],[230,14.8,137.5,-15.4],[235,14.8,137.5,-15.4],[240,14.8,137.5,-15.4],[245,14.8,137.5,-15.4],[250,14.8,137.5,-15.4],[255,14.8,137.5,-15.4],[260,14.8,137.5,-15.4],[265,14.8,137.5,-15.4],[270,14.8,137.5,-15.4],[275,14.8,137.5,-15.4],[280,14.8,137.5,-15.4],[285,14.8,137.5,-15.4],[290,14.8,137.5,-15.4],[295,14.8,137.5,-15.4],[300,14.8,137.5,-15.4],[305,14.8,137.5,-15.4],[310,14.8,137.5,-15.4],[315,14.8,137.5,-15.4],[320,14.8,137.5,-15.4]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='nwaf':
cont= [[5,90.0,0.0,0.0],[10,90.0,0.0,0.0],[15,90.0,0.0,0.0],[20,90.0,0.0,0.0],[25,90.0,0.0,0.0],[30,90.0,0.0,0.0],[35,90.0,0.0,0.0],[40,90.0,0.0,0.0],[45,90.0,0.0,0.0],[50,90.0,0.0,0.0],[55,90.0,0.0,0.0],[60,90.0,0.0,0.0],[65,90.0,0.0,0.0],[70,90.0,0.0,0.0],[75,90.0,0.0,0.0],[80,90.0,0.0,0.0],[85,19.6,6.7,0.0],[90,16.6,6.7,-0.2],[95,16.5,6.7,-0.4],[100,16.5,6.7,-0.5],[105,16.5,6.7,-0.7],[110,16.5,6.7,-0.8],[115,16.5,6.7,-1.0],[120,16.5,6.7,-1.1],[125,16.5,6.7,-1.2],[130,16.5,6.7,-1.2],[135,16.5,6.7,-1.2],[140,16.5,6.7,-1.2],[145,16.5,6.7,-1.2],[150,16.5,6.7,-1.2],[155,16.5,6.7,-1.2],[160,16.5,6.7,-1.2],[165,16.5,6.7,-1.2],[170,16.5,6.7,-1.2],[175,16.5,6.7,-1.2],[180,16.5,6.7,-1.2],[185,16.5,6.7,-1.2],[190,16.5,6.7,-1.2],[195,16.5,6.7,-1.2],[200,16.5,6.7,-1.2],[205,16.5,6.7,-1.2],[210,16.5,6.7,-1.2],[215,16.5,6.7,-1.2],[220,16.5,6.7,-1.2],[225,16.5,6.7,-1.2],[230,16.5,6.7,-1.2],[235,16.5,6.7,-1.2],[240,16.5,6.7,-1.2],[245,16.5,6.7,-1.2],[250,16.5,6.7,-1.2],[255,16.5,6.7,-1.2],[260,16.5,6.7,-1.2],[265,16.5,6.7,-1.2],[270,16.5,6.7,-1.2],[275,16.5,6.7,-1.2],[280,16.5,6.7,-1.2],[285,16.5,6.7,-1.2],[290,16.5,6.7,-1.2],[295,16.5,6.7,-1.2],[300,16.5,6.7,-1.2],[305,16.5,6.7,-1.2],[310,16.5,6.7,-1.2],[315,16.5,6.7,-1.2],[320,16.5,6.7,-1.2]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='col':
cont= [[5,62.1,-40.2,1.6],[10,61.8,-40.3,3.3],[15,59.6,-38.1,5.4],[20,58.5,-37.1,7.5],[25,57.7,-36.4,9.6],[30,56.7,-34.5,11.9],[35,56.5,-33.4,14.3],[40,57.1,-32.6,16.6],[45,57.0,-31.4,18.6],[50,58.2,-31.2,20.5],[55,60.7,-31.9,22.0],[60,62.5,-32.8,23.3],[65,63.7,-33.5,24.6],[70,63.5,-33.4,26.1],[75,63.2,-33.9,28.6],[80,62.7,-34.3,31.5],[85,61.2,-34.3,34.4],[90,59.1,-34.5,37.3],[95,57.2,-34.7,40.3],[100,55.7,-34.8,43.3],[105,54.3,-34.9,46.4],[110,53.1,-35.0,49.5],[115,52.2,-35.0,51.7],[120,51.6,-35.0,52.8],[125,50.7,-33.9,54.0],[130,48.5,-33.4,55.4],[135,47.5,-33.3,56.0],[140,47.5,-33.3,56.1],[145,47.5,-33.3,56.1],[150,47.5,-33.3,56.2],[155,47.5,-33.3,56.2],[160,47.5,-33.3,56.2],[165,47.5,-33.3,56.2],[170,47.5,-33.3,56.2],[175,47.5,-33.3,56.2],[180,47.5,-33.3,56.2],[185,47.5,-33.3,56.2],[190,47.5,-33.3,56.2],[195,47.5,-33.3,56.2],[200,47.5,-33.3,56.2],[205,47.5,-33.3,56.2],[210,47.5,-33.3,56.2],[215,47.5,-33.3,56.2],[220,47.5,-33.3,56.2],[225,47.5,-33.3,56.2],[230,47.5,-33.3,56.2],[235,47.5,-33.3,56.2],[240,47.5,-33.3,56.2],[245,47.5,-33.3,56.2],[250,47.5,-33.3,56.2],[255,47.5,-33.3,56.2],[260,47.5,-33.3,56.2],[265,47.5,-33.3,56.2],[270,47.5,-33.3,56.2],[275,47.5,-33.3,56.2],[280,47.5,-33.3,56.2],[285,47.5,-33.3,56.2],[290,47.5,-33.3,56.2],[295,47.5,-33.3,56.2],[300,47.5,-33.3,56.2],[305,47.5,-33.3,56.2],[310,47.5,-33.3,56.2],[315,47.5,-33.3,56.2],[320,47.5,-33.3,56.2]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='grn':
cont= [[5,80.9,22.8,1.3],[10,80.9,22.9,2.6],[15,80.9,23.2,4.1],[20,80.6,24.4,5.5],[25,79.5,28.1,6.8],[30,77.3,12.5,8.6],[35,74.8,7.2,10.2],[40,72.6,9.5,11.5],[45,71.4,11.4,12.7],[50,71.0,20.7,14.2],[55,71.8,29.6,16.8],[60,71.9,30.5,17.5],[65,71.3,32.9,17.6],[70,69.8,29.0,17.9],[75,69.0,26.6,18.5],[80,67.6,21.0,19.8],[85,66.3,16.4,21.5],[90,65.9,11.5,24.2],[95,64.2,5.5,26.9],[100,62.7,2.8,30.1],[105,62.4,1.6,33.3],[110,62.1,0.9,36.5],[115,61.8,0.5,39.7],[120,61.8,0.8,43.1],[125,61.9,1.0,44.9],[130,62.2,1.3,46.0],[135,62.4,1.6,47.1],[140,62.7,1.6,48.4],[145,62.9,1.3,49.7],[150,63.2,1.8,51.4],[155,63.7,3.6,53.8],[160,64.1,4.2,56.0],[165,64.4,4.8,58.3],[170,64.7,5.3,60.6],[175,64.8,6.0,64.1],[180,64.9,6.0,64.5],[185,64.9,5.9,64.9],[190,65.0,5.9,65.4],[195,65.0,5.8,65.8],[200,65.1,5.8,66.2],[205,65.1,5.7,66.7],[210,65.2,5.7,67.1],[215,65.2,5.6,67.5],[220,65.2,5.6,67.5],[225,65.2,5.6,67.5],[230,65.2,5.6,67.5],[235,65.2,5.6,67.5],[240,65.2,5.6,67.5],[245,65.2,5.6,67.5],[250,65.2,5.6,67.5],[255,65.2,5.6,67.5],[260,65.2,5.6,67.5],[265,65.2,5.6,67.5],[270,65.2,5.6,67.5],[275,65.2,5.6,67.5],[280,65.2,5.6,67.5],[285,65.2,5.6,67.5],[290,65.2,5.6,67.5],[295,65.2,5.6,67.5],[300,65.2,5.6,67.5],[305,65.2,5.6,67.5],[310,65.2,5.6,67.5],[315,65.2,5.6,67.5],[320,65.2,5.6,67.5]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='nam':
cont= [[5,80.9,22.8,1.3],[10,80.9,22.9,2.6],[15,80.9,23.2,4.1],[20,80.6,24.4,5.5],[25,79.5,28.1,6.8],[30,77.3,12.5,8.6],[35,75.4,3.5,10.5],[40,74.5,-1.1,12.6],[45,74.3,-4.3,14.6],[50,75.9,-3.5,16.2],[55,79.8,4.1,17.6],[60,81.6,5.1,19.1],[65,82.6,3.2,20.7],[70,81.6,-6.5,22.4],[75,80.4,-13.1,24.6],[80,78.2,-18.8,27.5],[85,76.2,-21.3,30.5],[90,74.6,-23.0,33.8],[95,72.0,-24.7,36.9],[100,70.0,-24.0,40.2],[105,69.1,-23.3,43.6],[110,68.3,-22.6,47.0],[115,67.6,-21.8,50.4],[120,67.1,-20.4,53.9],[125,67.0,-19.7,55.6],[130,67.0,-19.1,56.7],[135,67.1,-18.7,57.9],[140,67.2,-18.4,59.2],[145,67.1,-18.3,60.5],[150,67.3,-17.6,62.2],[155,67.6,-15.5,64.6],[160,67.6,-14.5,66.8],[165,67.7,-13.6,69.1],[170,67.8,-12.8,71.4],[175,67.7,-11.5,74.8],[180,67.7,-11.5,75.3],[185,67.7,-11.5,75.7],[190,67.7,-11.5,76.1],[195,67.7,-11.5,76.6],[200,67.7,-11.5,77.0],[205,67.7,-11.5,77.4],[210,67.7,-11.5,77.9],[215,67.7,-11.5,78.3],[220,67.7,-11.5,78.3],[225,67.7,-11.5,78.3],[230,67.7,-11.5,78.3],[235,67.7,-11.5,78.3],[240,67.7,-11.5,78.3],[245,67.7,-11.5,78.3],[250,67.7,-11.5,78.3],[255,67.7,-11.5,78.3],[260,67.7,-11.5,78.3],[265,67.7,-11.5,78.3],[270,67.7,-11.5,78.3],[275,67.7,-11.5,78.3],[280,67.7,-11.5,78.3],[285,67.7,-11.5,78.3],[290,67.7,-11.5,78.3],[295,67.7,-11.5,78.3],[300,67.7,-11.5,78.3],[305,67.7,-11.5,78.3],[310,67.7,-11.5,78.3],[315,67.7,-11.5,78.3],[320,67.7,-11.5,78.3]]
for rec in cont:
if int(age)==int(rec[0]):
pole= [rec[1],rec[2],rec[3]]
return pole
if continent=='par':
cont= [[5,62.1,-40.2,1.6],[10,61.8,-40.3,3.3],[15,59.6,-38.1,5.4],[20,58.5,-37.1,7.5],[25,57.7,-36.4,9.6],[30,56.7,-34.5,11.9],[35,56.5,-33.4,14.3],[40,57.1,-32.6,16.6],[45,57.0,-31.4,18.6],[50,58.2,-31.2,20.5],[55,60.7,-31.9,22.0],[60,62.5,-32.8,23.3],[65,63.7,-33.5,24.6],[70,63.5,-33.4,26.1],[75,63.2,-33.9,28.6],[80,62.7,-34.3,31.5],[85,61.2,-34.3,34.4],[90,59.1,-34.5,37.3],[95,57.2,-34.7,40.3],[100,55.7,-34.8,43.3],[105,54.3,-34.9,46.4],[110,53.1,-35.0,49.5],[115,52.2,-35.0,51.7],[120,51.6,-35.0,52.8],[125,50.7,-33.9,54.0],[130,48.5,-33.4,55.4],[135,47.5,-33.3,56.0],[140,47.5,-33.3,56.1],[145,47.5,-33.3,56.1],[150,47.5,-33.3,56.2],[155,47.5,-33.3,56.2],[160,47.5,-33.3,56.2],[165,47.5,-33.3,56.2],[170,47.5,-33.3,56.2],[175,47.5,-33.3,56.2],[180,47.5,-33.3,56.2],[185,47.5,-33.3,56.2],[190,47.5,-33.3,56.2],[195,47.5,-33.3,56.2],[200,47.5,-33.3,56.2],[205,47.5,-33.3,56.2],[210,47.5,-33.3,56.2],[215,47.5,-33.3,56.2],[220,47.5,-33.3,56.2],[225,47.5,-33.3,56.2],[230,47.5,-33.3,56.2],[235,47.5,-33.3,56.2],[240,47.5,-33.3,56.2],[245,47.5,-33.3,56.2],[250,47.5,-33.3,56.2],[255,47.5,-33.3,56.2],[260,47.5,-33.3,56.2],[265,47.5,-33.3,56.2],[270,47.5,-33.3,56.2],[275,47.5,-33.3,56.2],[280,47.5,-33.3,56.2],[285,47.5,-33.3,56.2],[290,47.5,-33.3,56.2],[295,47.5,-33.3,56.2],[300,47.5,-33.3,56.2],[305,47.5,-33.3,56.2],[310,47.5,-33.3,56.2],[315,47.5,-33.3,56.2],[320,47.5,-33.3,56.2]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='eant':
cont= [[5,8.2,-49.4,0.8],[10,8.2,-49.4,1.5],[15,9.8,-48.4,2.1],[20,10.7,-47.9,2.8],[25,11.4,-48.2,3.8],[30,11.8,-48.3,4.8],[35,12.5,-46.1,6.0],[40,13.6,-41.5,7.4],[45,11.1,-41.1,8.5],[50,9.1,-40.9,9.6],[55,9.4,-43.5,10.3],[60,10.6,-47.4,10.8],[65,8.1,-47.7,11.3],[70,0.4,-43.3,12.2],[75,3.7,138.9,-13.8],[80,2.7,142.7,-16.1],[85,0.6,144.7,-18.8],[90,1.4,-37.0,22.3],[95,2.9,-38.3,25.8],[100,3.1,146.5,-26.8],[105,5.5,148.9,-30.3],[110,7.4,150.7,-33.9],[115,9.0,152.3,-37.6],[120,10.3,153.6,-41.3],[125,9.4,152.4,-43.0],[130,9.1,151.5,-45.3],[135,8.6,150.9,-47.6],[140,8.0,150.1,-49.2],[145,7.3,148.1,-50.7],[150,7.4,147.1,-52.6],[155,9.0,148.0,-55.4],[160,10.5,148.8,-58.2],[165,10.5,148.8,-58.2],[170,10.5,148.8,-58.2],[175,10.5,148.8,-58.2],[180,10.5,148.8,-58.2],[185,10.5,148.8,-58.2],[190,10.5,148.8,-58.2],[195,10.5,148.8,-58.2],[200,10.5,148.8,-58.2],[205,10.5,148.8,-58.2],[210,10.5,148.8,-58.2],[215,10.5,148.8,-58.2],[220,10.5,148.8,-58.2],[225,10.5,148.8,-58.2],[230,10.5,148.8,-58.2],[235,10.5,148.8,-58.2],[240,10.5,148.8,-58.2],[245,10.5,148.8,-58.2],[250,10.5,148.8,-58.2],[255,10.5,148.8,-58.2],[260,10.5,148.8,-58.2],[265,10.5,148.8,-58.2],[270,10.5,148.8,-58.2],[275,10.5,148.8,-58.2],[280,10.4,148.8,-58.2],[285,10.5,148.8,-58.2],[290,10.5,148.8,-58.2],[295,10.5,148.8,-58.2],[300,10.5,148.8,-58.2],[305,10.4,148.8,-58.2],[310,10.5,148.8,-58.2],[315,10.5,148.8,-58.2],[320,10.5,148.8,-58.2]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='ind':
cont= [[5,22.7,32.9,-2.3],[10,23.8,33.1,-4.6],[15,27.1,27.4,-6.0],[20,29.6,23.9,-7.5],[25,25.1,33.2,-10.3],[30,22.5,38.5,-13.3],[35,22.6,41.3,-15.9],[40,25.5,42.7,-17.4],[45,24.2,40.1,-19.7],[50,24.0,34.2,-23.5],[55,22.1,29.2,-28.3],[60,19.5,25.2,-34.4],[65,19.0,21.9,-40.2],[70,20.5,18.9,-44.4],[75,21.8,18.2,-47.3],[80,22.3,18.2,-49.1],[85,21.8,22.1,-53.8],[90,20.0,27.5,-58.8],[95,20.7,28.1,-57.8],[100,21.3,28.8,-56.8],[105,21.9,29.6,-55.9],[110,22.6,30.3,-54.9],[115,23.3,31.1,-54.0],[120,24.0,32.0,-53.1],[125,23.4,34.8,-55.2],[130,21.2,36.2,-60.1],[135,21.2,36.2,-61.6],[140,21.9,37.5,-61.5],[145,22.6,39.0,-62.5],[150,24.1,40.4,-62.9],[155,26.9,41.2,-61.6],[160,29.8,42.1,-60.5],[165,29.8,42.1,-60.5],[170,29.8,42.1,-60.5],[175,29.8,42.1,-60.5],[180,29.8,42.1,-60.5],[185,29.8,42.1,-60.5],[190,29.8,42.1,-60.5],[195,29.8,42.1,-60.5],[200,29.8,42.1,-60.5],[205,29.8,42.1,-60.5],[210,29.8,42.1,-60.5],[215,29.8,42.1,-60.5],[220,29.8,42.1,-60.5],[225,29.8,42.1,-60.5],[230,29.8,42.1,-60.5],[235,29.8,42.1,-60.5],[240,29.8,42.1,-60.5],[245,29.8,42.1,-60.5],[250,29.8,42.1,-60.5],[255,29.8,42.1,-60.5],[260,29.8,42.1,-60.5],[265,29.8,42.1,-60.5],[270,29.8,42.1,-60.5],[275,29.8,42.1,-60.5],[280,29.8,42.1,-60.5],[285,29.8,42.1,-60.5],[290,29.8,42.1,-60.5],[295,29.8,42.1,-60.5],[300,29.8,42.1,-60.5],[305,29.8,42.1,-60.5],[310,29.8,42.1,-60.5],[315,29.8,42.1,-60.5],[320,29.8,42.1,-60.5]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='neaf':
cont= [[5,90.0,0.0,0.0],[10,90.0,0.0,0.0],[15,90.0,0.0,0.0],[20,90.0,0.0,0.0],[25,90.0,0.0,0.0],[30,90.0,0.0,0.0],[35,90.0,0.0,0.0],[40,90.0,0.0,0.0],[45,90.0,0.0,0.0],[50,90.0,0.0,0.0],[55,90.0,0.0,0.0],[60,90.0,0.0,0.0],[65,90.0,0.0,0.0],[70,90.0,0.0,0.0],[75,90.0,0.0,0.0],[80,90.0,0.0,0.0],[85,27.9,-61.4,0.0],[90,39.8,-61.4,-0.1],[95,40.8,-61.4,-0.2],[100,40.3,-61.4,-0.3],[105,40.6,-61.4,-0.4],[110,40.6,-61.4,-0.5],[115,40.5,-61.4,-0.6],[120,40.5,-61.4,-0.7],[125,40.5,-61.4,-0.7],[130,40.5,-61.4,-0.7],[135,40.5,-61.4,-0.7],[140,40.5,-61.4,-0.7],[145,40.5,-61.4,-0.7],[150,40.5,-61.4,-0.7],[155,40.5,-61.4,-0.7],[160,40.5,-61.4,-0.7],[165,40.5,-61.4,-0.7],[170,40.5,-61.4,-0.7],[175,40.5,-61.4,-0.7],[180,40.5,-61.4,-0.7],[185,40.5,-61.4,-0.7],[190,40.5,-61.4,-0.7],[195,40.5,-61.4,-0.7],[200,40.5,-61.4,-0.7],[205,40.5,-61.4,-0.7],[210,40.5,-61.4,-0.7],[215,40.5,-61.4,-0.7],[220,40.5,-61.4,-0.7],[225,40.5,-61.4,-0.7],[230,40.5,-61.4,-0.7],[235,40.5,-61.4,-0.7],[240,40.5,-61.4,-0.7],[245,40.4,-61.4,-0.7],[250,40.4,-61.4,-0.7],[255,40.4,-61.4,-0.7],[260,40.4,-61.4,-0.7],[265,40.4,-61.4,-0.7],[270,40.4,-61.4,-0.7],[275,40.4,-61.4,-0.7],[280,40.4,-61.4,-0.7],[285,40.4,-61.4,-0.7],[290,40.4,-61.4,-0.7],[295,40.4,-61.4,-0.7],[300,40.4,-61.4,-0.7],[305,40.4,-61.4,-0.7],[310,40.4,-61.4,-0.7],[315,40.4,-61.4,-0.7],[320,40.4,-61.4,-0.7]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='sac':
cont= [[5,62.1,-40.2,1.6],[10,61.8,-40.3,3.3],[15,59.6,-38.1,5.4],[20,58.5,-37.1,7.5],[25,57.7,-36.4,9.6],[30,56.7,-34.5,11.9],[35,56.5,-33.4,14.3],[40,57.1,-32.6,16.6],[45,57.0,-31.4,18.6],[50,58.2,-31.2,20.5],[55,60.7,-31.9,22.0],[60,62.5,-32.8,23.3],[65,63.7,-33.5,24.6],[70,63.5,-33.4,26.1],[75,63.2,-33.9,28.6],[80,62.7,-34.3,31.5],[85,61.2,-34.3,34.4],[90,59.1,-34.5,37.3],[95,57.2,-34.7,40.3],[100,55.7,-34.8,43.3],[105,54.3,-34.9,46.4],[110,53.1,-35.0,49.5],[115,52.2,-35.0,51.7],[120,51.6,-35.0,52.8],[125,50.7,-33.9,54.0],[130,50.1,-32.8,54.9],[135,50.0,-32.5,55.1],[140,50.0,-32.5,55.1],[145,50.0,-32.5,55.1],[150,50.0,-32.5,55.1],[155,50.0,-32.5,55.1],[160,50.0,-32.5,55.1],[165,50.0,-32.5,55.1],[170,50.0,-32.5,55.1],[175,50.0,-32.5,55.1],[180,50.0,-32.5,55.1],[185,50.0,-32.5,55.1],[190,50.0,-32.5,55.1],[195,50.0,-32.5,55.1],[200,50.0,-32.5,55.1],[205,50.0,-32.5,55.1],[210,50.0,-32.5,55.1],[215,50.0,-32.5,55.1],[220,50.0,-32.5,55.1],[225,50.0,-32.5,55.1],[230,50.0,-32.5,55.1],[235,50.0,-32.5,55.1],[240,50.0,-32.5,55.1],[245,50.0,-32.5,55.1],[250,50.0,-32.5,55.1],[255,50.0,-32.5,55.1],[260,50.0,-32.5,55.1],[265,50.0,-32.5,55.1],[270,50.0,-32.5,55.1],[275,50.0,-32.5,55.1],[280,50.0,-32.5,55.1],[285,50.0,-32.5,55.1],[290,50.0,-32.5,55.1],[295,50.0,-32.5,55.1],[300,50.0,-32.5,55.1],[305,50.0,-32.5,55.1],[310,50.0,-32.5,55.1],[315,50.0,-32.5,55.1],[320,50.0,-32.5,55.1]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='ib':
cont= [[5,0,0,0],[10,0,0,0],[15,77.93,59.14,.12],[20,77.93,59.14,.24],[25,77.93,59.14,.24],[30,-31.21,166.79,1.73],[35,-31.21,166.79,1.73],[40,-27,160,1.73],[45,-23.85,157.12,1.72],[50,-20.6,157.88,2.1],[55,-20.72,162.4,2.61],[60,-16,164,3],[65,-12.95,165.77,3.1],[70,-16.45,167.49,3.1],[75,-16.45,167.49,3.1],[80,-37.17,169,8.04],[85,-38.86,169.85,10.28],[90,-42.64,173.2,16.56],[95,-43.,174.,20],[100,-43.,174.,25],[105,-43.,174.,30],[110,-43.,174.,35],[115,-43.,174.,35],[120,-43.,174.,44.77],[120,-43.86,174.17,44.77],[125,-43.86,174.17,44.77],[130,-46.19,177.47,45.91],[135,-46.19,177.47,45.91],[140,-46.19,177.47,45.91],[145,-46.19,177.47,45.91],[150,-46.19,177.47,45.91],[155,-47.12,179.45,46.29],[160,-47.12,179.45,46.29],[165,-47.12,179.45,46.29],[170,-47.55,180.35,50.62],[175,-46.8,181.1,50.33],[180,-46.8,181.1,50.33],[185,-46.8,181.1,50.33],[190,-46.8,181.1,50.33],[195,-46.8,181.1,50.33],[200,-46.8,181.1,50.33],[205,-46.8,181.1,50.33],[210,-46.8,181.1,50.33],[215,-46.8,181.1,50.33],[220,-46.8,181.1,50.33],[225,-46.8,181.1,50.33],[230,-46.8,181.1,50.33],[235,-46.8,181.1,50.33],[240,-46.8,181.1,50.33],[245,-46.8,181.1,50.33],[250,-46.8,181.1,50.33],[255,-46.8,181.1,50.33],[260,-46.8,181.1,50.33],[265,-46.8,181.1,50.33],[270,-46.8,181.1,50.33],[275,-46.8,181.1,50.33],[280,-46.8,181.1,50.33],[285,-46.8,181.1,50.33],[290,-46.8,181.1,50.33],[295,-46.8,181.1,50.33],[300,-46.8,181.1,50.33],[305,-46.8,181.1,50.33],[310,-46.8,181.1,50.33],[315,-46.8,181.1,50.33],[320,-46.8,181.1,50.33]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='saf':
cont= [[0,0,56.0,2.2],[5,0,57.6,2.5],[10,0,53.9,2.5],[15,0,66.5,3.0],[20,0,75.5,4.7],[25,0,84.1,6.8],[30,0,95.8,7.9],[35,0,98.8,8.7],[40,0,107.5,9.2],[45,0,110.9,10.3],[50,0,111.6,13.2],[55,0,115.7,13.9],[60,0,123.5,15.7],[65,0,127.8,17.5],[70,0,137.2,17.5],[75,0,140.3,19.2],[80,0,138.1,19.3],[85,0,142.9,19.6],[90,0,144.7,20.5],[95,0,144.3,20.8],[100,0,150.8,22.3],[105,0,160.2,26.9],[110,0,169.2,32.1],[115,0,170.3,35.6],[120,0,171.3,36.2],[125,0,172.1,37.5],[130,0,170.0,39.4],[135,0,172.6,42.1],[140,0,163.1,40.8],[145,0,155.2,38.1],[150,0,155.0,34.8],[155,0,155.0,33.2],[160,0,157.0,30.7],[165,0,159.5,32.5],[170,0,167.6,28.8],[175,0,167.8,27.7],[180,0,167.4,25.9],[185,0,168.4,21.6],[190,0,158.8,18.2],[195,0,147.9,17.8],[200,0,144.4,19.2],[205,0,137.4,20.7],[210,0,133.6,23.1],[215,0,129.9,26.4],[220,0,127.2,27.2],[225,0,128.0,29.4],[230,0,130.0,31.4],[235,0,133.6,35.3],[240,0,137.4,36.5],[245,0,143.1,39.6],[250,0,145.4,40.4],[255,0,145.6,41.8],[260,0,144.8,41.9],[265,0,141.6,47.1],[270,0,140.3,46.8],[275,0,138.2,51.1],[280,0,138.6,51.6],[285,0,136.5,51.8],[290,0,135.8,52.8],[295,0,136.8,53.5],[300,0,136.9,55.4],[305,0,138.9,56.3],[310,0,139.9,59.5],[315,0,138.9,60.8],[320,0,132.5,61.6]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
return 'NONE' | python | def get_pole(continent,age):
"""
returns rotation poles and angles for specified continents and ages
assumes fixed Africa.
Parameters
__________
continent :
aus : Australia
eur : Eurasia
mad : Madacascar
[nwaf,congo] : NW Africa [choose one]
col : Colombia
grn : Greenland
nam : North America
par : Paraguay
eant : East Antarctica
ind : India
[neaf,kala] : NE Africa [choose one]
[sac,sam] : South America [choose one]
ib : Iberia
saf : South Africa
Returns
_______
[pole longitude, pole latitude, rotation angle] : for the continent at specified age
"""
age=int(age)
if continent=='congo':continent='nwaf'
if continent=='kala':continent='neaf'
if continent=='sam':continent='sac'
if continent=='ant':continent='eant'
if continent=='af':
return [0,0,0] # assume africa fixed
if continent=='aus':
cont= [[5,9.7,54.3,-3.3],[10,10.4,52.8,-6.2],[15,11.5,49.8,-9.0],[20,12.4,48.0,-11.8],[25,12.9,48.3,-15.0],[30,12.8,49.9,-18.1],[35,13.5,50.8,-20.9],[40,14.1,52.7,-22.1],[45,14.4,54.7,-22.9],[50,14.7,56.5,-23.6],[55,14.0,57.3,-24.7],[60,12.9,57.9,-25.7],[65,13.6,58.8,-26.3],[70,17.3,60.2,-26.3],[75,19.8,63.3,-26.7],[80,20.5,68.5,-26.6],[85,19.8,74.6,-26.9],[90,17.7,80.9,-28.9],[95,15.9,86.2,-31.1],[100,18.4,89.3,-30.7],[105,17.9,95.6,-32.6],[110,17.3,101.0,-34.8],[115,16.8,105.6,-37.4],[120,16.4,109.4,-40.3],[125,15.7,110.3,-42.3],[130,15.9,111.6,-44.4],[135,15.9,113.1,-46.6],[140,15.6,113.7,-48.3],[145,15.0,113.1,-50.5],[150,15.5,113.5,-52.5],[155,17.6,115.7,-54.3],[160,19.5,117.8,-56.2],[165,19.5,117.8,-56.2],[170,19.5,117.8,-56.2],[175,19.5,117.8,-56.2],[180,19.5,117.8,-56.2],[185,19.5,117.8,-56.2],[190,19.5,117.8,-56.2],[195,19.5,117.8,-56.2],[200,19.5,117.8,-56.2],[205,19.5,117.8,-56.2],[210,19.5,117.8,-56.2],[215,19.5,117.8,-56.2],[220,19.5,117.8,-56.2],[225,19.5,117.8,-56.2],[230,19.5,117.8,-56.2],[235,19.5,117.8,-56.2],[240,19.5,117.8,-56.2],[245,19.5,117.8,-56.2],[250,19.5,117.8,-56.2],[255,19.5,117.8,-56.2],[260,19.5,117.8,-56.2],[265,19.5,117.8,-56.2],[270,19.5,117.8,-56.2],[275,19.5,117.8,-56.2],[280,19.5,117.8,-56.2],[285,19.5,117.8,-56.2],[290,19.5,117.8,-56.2],[295,19.5,117.8,-56.2],[300,19.5,117.8,-56.2],[305,19.5,117.8,-56.2],[310,19.5,117.8,-56.2],[315,19.5,117.8,-56.2],[320,19.5,117.8,-56.2]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='eur':
cont= [[5,17.9,-27.1,0.6],[10,18.4,-26.3,1.2],[15,18.9,-24.6,1.8],[20,17.2,-22.7,2.4],[25,20.7,-19.0,3.0],[30,24.9,-19.5,4.3],[35,27.2,-19.3,5.8],[40,28.7,-18.5,7.5],[45,30.3,-18.2,9.0],[50,30.8,-16.7,10.0],[55,32.7,-15.4,11.3],[60,34.8,-15.7,12.6],[65,36.0,-15.8,13.6],[70,35.4,-16.1,14.9],[75,35.5,-15.7,15.5],[80,36.1,-15.2,16.9],[85,37.0,-14.2,18.8],[90,39.6,-13.7,21.9],[95,39.8,-13.7,25.2],[100,40.2,-12.5,28.5],[105,41.6,-11.2,31.7],[110,42.6,-9.8,34.5],[115,43.4,-8.5,37.3],[120,44.5,-6.9,40.3],[125,45.3,-6.3,42.0],[130,45.9,-5.7,43.0],[135,46.6,-5.3,44.0],[140,47.3,-4.9,45.2],[145,47.8,-4.8,46.4],[150,48.6,-4.0,47.9],[155,49.8,-2.2,50.0],[160,50.6,-1.2,52.1],[165,51.4,-0.3,54.2],[170,52.1,0.6,56.3],[175,52.9,1.9,59.6],[180,53.0,2.0,60.0],[185,53.0,2.0,60.4],[190,53.1,2.1,60.8],[195,53.2,2.2,61.1],[200,53.3,2.2,61.5],[205,53.2,2.6,59.7],[210,53.1,2.9,57.8],[215,53.1,3.3,55.9],[220,52.9,3.6,53.6],[225,52.7,4.0,51.4],[230,52.4,4.4,49.1],[235,52.2,4.8,46.8],[240,51.9,5.3,44.5],[245,51.9,5.3,44.5],[250,51.9,5.3,44.5],[255,51.9,5.3,44.5],[260,51.9,5.3,44.5],[265,51.9,5.3,44.5],[270,51.9,5.3,44.5],[275,51.9,5.3,44.5],[280,51.9,5.3,44.5],[285,51.9,5.3,44.5],[290,51.9,5.3,44.5],[295,51.9,5.3,44.5],[300,51.9,5.3,44.5],[305,51.9,5.3,44.5],[310,51.9,5.3,44.5],[315,51.9,5.3,44.5],[320,51.9,5.3,44.5]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='mad':
cont= [[5,90.0,0.0,0.0],[10,90.0,0.0,0.0],[15,90.0,0.0,0.0],[20,90.0,0.0,0.0],[25,90.0,0.0,0.0],[30,90.0,0.0,0.0],[35,90.0,0.0,0.0],[40,90.0,0.0,0.0],[45,90.0,0.0,0.0],[50,90.0,0.0,0.0],[55,90.0,0.0,0.0],[60,90.0,0.0,0.0],[65,90.0,0.0,0.0],[70,90.0,0.0,0.0],[75,90.0,0.0,0.0],[80,90.0,0.0,0.0],[85,90.0,0.0,0.0],[90,90.0,0.0,0.0],[95,90.0,0.0,0.0],[100,90.0,0.0,0.0],[105,90.0,0.0,0.0],[110,90.0,0.0,0.0],[115,90.0,0.0,0.0],[120,90.0,0.0,0.0],[125,2.6,-63.3,1.8],[130,2.6,-63.3,3.9],[135,1.5,-57.6,5.7],[140,1.0,-55.9,7.2],[145,0.6,118.3,-8.9],[150,4.5,119.8,-10.9],[155,10.6,130.1,-13.0],[160,14.8,137.5,-15.4],[165,14.8,137.5,-15.4],[170,14.8,137.5,-15.4],[175,14.8,137.5,-15.4],[180,14.8,137.5,-15.4],[185,14.8,137.5,-15.4],[190,14.8,137.5,-15.4],[195,14.8,137.5,-15.4],[200,14.8,137.5,-15.4],[205,14.8,137.5,-15.4],[210,14.8,137.5,-15.4],[215,14.8,137.5,-15.4],[220,14.8,137.5,-15.4],[225,14.8,137.5,-15.4],[230,14.8,137.5,-15.4],[235,14.8,137.5,-15.4],[240,14.8,137.5,-15.4],[245,14.8,137.5,-15.4],[250,14.8,137.5,-15.4],[255,14.8,137.5,-15.4],[260,14.8,137.5,-15.4],[265,14.8,137.5,-15.4],[270,14.8,137.5,-15.4],[275,14.8,137.5,-15.4],[280,14.8,137.5,-15.4],[285,14.8,137.5,-15.4],[290,14.8,137.5,-15.4],[295,14.8,137.5,-15.4],[300,14.8,137.5,-15.4],[305,14.8,137.5,-15.4],[310,14.8,137.5,-15.4],[315,14.8,137.5,-15.4],[320,14.8,137.5,-15.4]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='nwaf':
cont= [[5,90.0,0.0,0.0],[10,90.0,0.0,0.0],[15,90.0,0.0,0.0],[20,90.0,0.0,0.0],[25,90.0,0.0,0.0],[30,90.0,0.0,0.0],[35,90.0,0.0,0.0],[40,90.0,0.0,0.0],[45,90.0,0.0,0.0],[50,90.0,0.0,0.0],[55,90.0,0.0,0.0],[60,90.0,0.0,0.0],[65,90.0,0.0,0.0],[70,90.0,0.0,0.0],[75,90.0,0.0,0.0],[80,90.0,0.0,0.0],[85,19.6,6.7,0.0],[90,16.6,6.7,-0.2],[95,16.5,6.7,-0.4],[100,16.5,6.7,-0.5],[105,16.5,6.7,-0.7],[110,16.5,6.7,-0.8],[115,16.5,6.7,-1.0],[120,16.5,6.7,-1.1],[125,16.5,6.7,-1.2],[130,16.5,6.7,-1.2],[135,16.5,6.7,-1.2],[140,16.5,6.7,-1.2],[145,16.5,6.7,-1.2],[150,16.5,6.7,-1.2],[155,16.5,6.7,-1.2],[160,16.5,6.7,-1.2],[165,16.5,6.7,-1.2],[170,16.5,6.7,-1.2],[175,16.5,6.7,-1.2],[180,16.5,6.7,-1.2],[185,16.5,6.7,-1.2],[190,16.5,6.7,-1.2],[195,16.5,6.7,-1.2],[200,16.5,6.7,-1.2],[205,16.5,6.7,-1.2],[210,16.5,6.7,-1.2],[215,16.5,6.7,-1.2],[220,16.5,6.7,-1.2],[225,16.5,6.7,-1.2],[230,16.5,6.7,-1.2],[235,16.5,6.7,-1.2],[240,16.5,6.7,-1.2],[245,16.5,6.7,-1.2],[250,16.5,6.7,-1.2],[255,16.5,6.7,-1.2],[260,16.5,6.7,-1.2],[265,16.5,6.7,-1.2],[270,16.5,6.7,-1.2],[275,16.5,6.7,-1.2],[280,16.5,6.7,-1.2],[285,16.5,6.7,-1.2],[290,16.5,6.7,-1.2],[295,16.5,6.7,-1.2],[300,16.5,6.7,-1.2],[305,16.5,6.7,-1.2],[310,16.5,6.7,-1.2],[315,16.5,6.7,-1.2],[320,16.5,6.7,-1.2]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='col':
cont= [[5,62.1,-40.2,1.6],[10,61.8,-40.3,3.3],[15,59.6,-38.1,5.4],[20,58.5,-37.1,7.5],[25,57.7,-36.4,9.6],[30,56.7,-34.5,11.9],[35,56.5,-33.4,14.3],[40,57.1,-32.6,16.6],[45,57.0,-31.4,18.6],[50,58.2,-31.2,20.5],[55,60.7,-31.9,22.0],[60,62.5,-32.8,23.3],[65,63.7,-33.5,24.6],[70,63.5,-33.4,26.1],[75,63.2,-33.9,28.6],[80,62.7,-34.3,31.5],[85,61.2,-34.3,34.4],[90,59.1,-34.5,37.3],[95,57.2,-34.7,40.3],[100,55.7,-34.8,43.3],[105,54.3,-34.9,46.4],[110,53.1,-35.0,49.5],[115,52.2,-35.0,51.7],[120,51.6,-35.0,52.8],[125,50.7,-33.9,54.0],[130,48.5,-33.4,55.4],[135,47.5,-33.3,56.0],[140,47.5,-33.3,56.1],[145,47.5,-33.3,56.1],[150,47.5,-33.3,56.2],[155,47.5,-33.3,56.2],[160,47.5,-33.3,56.2],[165,47.5,-33.3,56.2],[170,47.5,-33.3,56.2],[175,47.5,-33.3,56.2],[180,47.5,-33.3,56.2],[185,47.5,-33.3,56.2],[190,47.5,-33.3,56.2],[195,47.5,-33.3,56.2],[200,47.5,-33.3,56.2],[205,47.5,-33.3,56.2],[210,47.5,-33.3,56.2],[215,47.5,-33.3,56.2],[220,47.5,-33.3,56.2],[225,47.5,-33.3,56.2],[230,47.5,-33.3,56.2],[235,47.5,-33.3,56.2],[240,47.5,-33.3,56.2],[245,47.5,-33.3,56.2],[250,47.5,-33.3,56.2],[255,47.5,-33.3,56.2],[260,47.5,-33.3,56.2],[265,47.5,-33.3,56.2],[270,47.5,-33.3,56.2],[275,47.5,-33.3,56.2],[280,47.5,-33.3,56.2],[285,47.5,-33.3,56.2],[290,47.5,-33.3,56.2],[295,47.5,-33.3,56.2],[300,47.5,-33.3,56.2],[305,47.5,-33.3,56.2],[310,47.5,-33.3,56.2],[315,47.5,-33.3,56.2],[320,47.5,-33.3,56.2]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='grn':
cont= [[5,80.9,22.8,1.3],[10,80.9,22.9,2.6],[15,80.9,23.2,4.1],[20,80.6,24.4,5.5],[25,79.5,28.1,6.8],[30,77.3,12.5,8.6],[35,74.8,7.2,10.2],[40,72.6,9.5,11.5],[45,71.4,11.4,12.7],[50,71.0,20.7,14.2],[55,71.8,29.6,16.8],[60,71.9,30.5,17.5],[65,71.3,32.9,17.6],[70,69.8,29.0,17.9],[75,69.0,26.6,18.5],[80,67.6,21.0,19.8],[85,66.3,16.4,21.5],[90,65.9,11.5,24.2],[95,64.2,5.5,26.9],[100,62.7,2.8,30.1],[105,62.4,1.6,33.3],[110,62.1,0.9,36.5],[115,61.8,0.5,39.7],[120,61.8,0.8,43.1],[125,61.9,1.0,44.9],[130,62.2,1.3,46.0],[135,62.4,1.6,47.1],[140,62.7,1.6,48.4],[145,62.9,1.3,49.7],[150,63.2,1.8,51.4],[155,63.7,3.6,53.8],[160,64.1,4.2,56.0],[165,64.4,4.8,58.3],[170,64.7,5.3,60.6],[175,64.8,6.0,64.1],[180,64.9,6.0,64.5],[185,64.9,5.9,64.9],[190,65.0,5.9,65.4],[195,65.0,5.8,65.8],[200,65.1,5.8,66.2],[205,65.1,5.7,66.7],[210,65.2,5.7,67.1],[215,65.2,5.6,67.5],[220,65.2,5.6,67.5],[225,65.2,5.6,67.5],[230,65.2,5.6,67.5],[235,65.2,5.6,67.5],[240,65.2,5.6,67.5],[245,65.2,5.6,67.5],[250,65.2,5.6,67.5],[255,65.2,5.6,67.5],[260,65.2,5.6,67.5],[265,65.2,5.6,67.5],[270,65.2,5.6,67.5],[275,65.2,5.6,67.5],[280,65.2,5.6,67.5],[285,65.2,5.6,67.5],[290,65.2,5.6,67.5],[295,65.2,5.6,67.5],[300,65.2,5.6,67.5],[305,65.2,5.6,67.5],[310,65.2,5.6,67.5],[315,65.2,5.6,67.5],[320,65.2,5.6,67.5]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='nam':
cont= [[5,80.9,22.8,1.3],[10,80.9,22.9,2.6],[15,80.9,23.2,4.1],[20,80.6,24.4,5.5],[25,79.5,28.1,6.8],[30,77.3,12.5,8.6],[35,75.4,3.5,10.5],[40,74.5,-1.1,12.6],[45,74.3,-4.3,14.6],[50,75.9,-3.5,16.2],[55,79.8,4.1,17.6],[60,81.6,5.1,19.1],[65,82.6,3.2,20.7],[70,81.6,-6.5,22.4],[75,80.4,-13.1,24.6],[80,78.2,-18.8,27.5],[85,76.2,-21.3,30.5],[90,74.6,-23.0,33.8],[95,72.0,-24.7,36.9],[100,70.0,-24.0,40.2],[105,69.1,-23.3,43.6],[110,68.3,-22.6,47.0],[115,67.6,-21.8,50.4],[120,67.1,-20.4,53.9],[125,67.0,-19.7,55.6],[130,67.0,-19.1,56.7],[135,67.1,-18.7,57.9],[140,67.2,-18.4,59.2],[145,67.1,-18.3,60.5],[150,67.3,-17.6,62.2],[155,67.6,-15.5,64.6],[160,67.6,-14.5,66.8],[165,67.7,-13.6,69.1],[170,67.8,-12.8,71.4],[175,67.7,-11.5,74.8],[180,67.7,-11.5,75.3],[185,67.7,-11.5,75.7],[190,67.7,-11.5,76.1],[195,67.7,-11.5,76.6],[200,67.7,-11.5,77.0],[205,67.7,-11.5,77.4],[210,67.7,-11.5,77.9],[215,67.7,-11.5,78.3],[220,67.7,-11.5,78.3],[225,67.7,-11.5,78.3],[230,67.7,-11.5,78.3],[235,67.7,-11.5,78.3],[240,67.7,-11.5,78.3],[245,67.7,-11.5,78.3],[250,67.7,-11.5,78.3],[255,67.7,-11.5,78.3],[260,67.7,-11.5,78.3],[265,67.7,-11.5,78.3],[270,67.7,-11.5,78.3],[275,67.7,-11.5,78.3],[280,67.7,-11.5,78.3],[285,67.7,-11.5,78.3],[290,67.7,-11.5,78.3],[295,67.7,-11.5,78.3],[300,67.7,-11.5,78.3],[305,67.7,-11.5,78.3],[310,67.7,-11.5,78.3],[315,67.7,-11.5,78.3],[320,67.7,-11.5,78.3]]
for rec in cont:
if int(age)==int(rec[0]):
pole= [rec[1],rec[2],rec[3]]
return pole
if continent=='par':
cont= [[5,62.1,-40.2,1.6],[10,61.8,-40.3,3.3],[15,59.6,-38.1,5.4],[20,58.5,-37.1,7.5],[25,57.7,-36.4,9.6],[30,56.7,-34.5,11.9],[35,56.5,-33.4,14.3],[40,57.1,-32.6,16.6],[45,57.0,-31.4,18.6],[50,58.2,-31.2,20.5],[55,60.7,-31.9,22.0],[60,62.5,-32.8,23.3],[65,63.7,-33.5,24.6],[70,63.5,-33.4,26.1],[75,63.2,-33.9,28.6],[80,62.7,-34.3,31.5],[85,61.2,-34.3,34.4],[90,59.1,-34.5,37.3],[95,57.2,-34.7,40.3],[100,55.7,-34.8,43.3],[105,54.3,-34.9,46.4],[110,53.1,-35.0,49.5],[115,52.2,-35.0,51.7],[120,51.6,-35.0,52.8],[125,50.7,-33.9,54.0],[130,48.5,-33.4,55.4],[135,47.5,-33.3,56.0],[140,47.5,-33.3,56.1],[145,47.5,-33.3,56.1],[150,47.5,-33.3,56.2],[155,47.5,-33.3,56.2],[160,47.5,-33.3,56.2],[165,47.5,-33.3,56.2],[170,47.5,-33.3,56.2],[175,47.5,-33.3,56.2],[180,47.5,-33.3,56.2],[185,47.5,-33.3,56.2],[190,47.5,-33.3,56.2],[195,47.5,-33.3,56.2],[200,47.5,-33.3,56.2],[205,47.5,-33.3,56.2],[210,47.5,-33.3,56.2],[215,47.5,-33.3,56.2],[220,47.5,-33.3,56.2],[225,47.5,-33.3,56.2],[230,47.5,-33.3,56.2],[235,47.5,-33.3,56.2],[240,47.5,-33.3,56.2],[245,47.5,-33.3,56.2],[250,47.5,-33.3,56.2],[255,47.5,-33.3,56.2],[260,47.5,-33.3,56.2],[265,47.5,-33.3,56.2],[270,47.5,-33.3,56.2],[275,47.5,-33.3,56.2],[280,47.5,-33.3,56.2],[285,47.5,-33.3,56.2],[290,47.5,-33.3,56.2],[295,47.5,-33.3,56.2],[300,47.5,-33.3,56.2],[305,47.5,-33.3,56.2],[310,47.5,-33.3,56.2],[315,47.5,-33.3,56.2],[320,47.5,-33.3,56.2]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='eant':
cont= [[5,8.2,-49.4,0.8],[10,8.2,-49.4,1.5],[15,9.8,-48.4,2.1],[20,10.7,-47.9,2.8],[25,11.4,-48.2,3.8],[30,11.8,-48.3,4.8],[35,12.5,-46.1,6.0],[40,13.6,-41.5,7.4],[45,11.1,-41.1,8.5],[50,9.1,-40.9,9.6],[55,9.4,-43.5,10.3],[60,10.6,-47.4,10.8],[65,8.1,-47.7,11.3],[70,0.4,-43.3,12.2],[75,3.7,138.9,-13.8],[80,2.7,142.7,-16.1],[85,0.6,144.7,-18.8],[90,1.4,-37.0,22.3],[95,2.9,-38.3,25.8],[100,3.1,146.5,-26.8],[105,5.5,148.9,-30.3],[110,7.4,150.7,-33.9],[115,9.0,152.3,-37.6],[120,10.3,153.6,-41.3],[125,9.4,152.4,-43.0],[130,9.1,151.5,-45.3],[135,8.6,150.9,-47.6],[140,8.0,150.1,-49.2],[145,7.3,148.1,-50.7],[150,7.4,147.1,-52.6],[155,9.0,148.0,-55.4],[160,10.5,148.8,-58.2],[165,10.5,148.8,-58.2],[170,10.5,148.8,-58.2],[175,10.5,148.8,-58.2],[180,10.5,148.8,-58.2],[185,10.5,148.8,-58.2],[190,10.5,148.8,-58.2],[195,10.5,148.8,-58.2],[200,10.5,148.8,-58.2],[205,10.5,148.8,-58.2],[210,10.5,148.8,-58.2],[215,10.5,148.8,-58.2],[220,10.5,148.8,-58.2],[225,10.5,148.8,-58.2],[230,10.5,148.8,-58.2],[235,10.5,148.8,-58.2],[240,10.5,148.8,-58.2],[245,10.5,148.8,-58.2],[250,10.5,148.8,-58.2],[255,10.5,148.8,-58.2],[260,10.5,148.8,-58.2],[265,10.5,148.8,-58.2],[270,10.5,148.8,-58.2],[275,10.5,148.8,-58.2],[280,10.4,148.8,-58.2],[285,10.5,148.8,-58.2],[290,10.5,148.8,-58.2],[295,10.5,148.8,-58.2],[300,10.5,148.8,-58.2],[305,10.4,148.8,-58.2],[310,10.5,148.8,-58.2],[315,10.5,148.8,-58.2],[320,10.5,148.8,-58.2]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='ind':
cont= [[5,22.7,32.9,-2.3],[10,23.8,33.1,-4.6],[15,27.1,27.4,-6.0],[20,29.6,23.9,-7.5],[25,25.1,33.2,-10.3],[30,22.5,38.5,-13.3],[35,22.6,41.3,-15.9],[40,25.5,42.7,-17.4],[45,24.2,40.1,-19.7],[50,24.0,34.2,-23.5],[55,22.1,29.2,-28.3],[60,19.5,25.2,-34.4],[65,19.0,21.9,-40.2],[70,20.5,18.9,-44.4],[75,21.8,18.2,-47.3],[80,22.3,18.2,-49.1],[85,21.8,22.1,-53.8],[90,20.0,27.5,-58.8],[95,20.7,28.1,-57.8],[100,21.3,28.8,-56.8],[105,21.9,29.6,-55.9],[110,22.6,30.3,-54.9],[115,23.3,31.1,-54.0],[120,24.0,32.0,-53.1],[125,23.4,34.8,-55.2],[130,21.2,36.2,-60.1],[135,21.2,36.2,-61.6],[140,21.9,37.5,-61.5],[145,22.6,39.0,-62.5],[150,24.1,40.4,-62.9],[155,26.9,41.2,-61.6],[160,29.8,42.1,-60.5],[165,29.8,42.1,-60.5],[170,29.8,42.1,-60.5],[175,29.8,42.1,-60.5],[180,29.8,42.1,-60.5],[185,29.8,42.1,-60.5],[190,29.8,42.1,-60.5],[195,29.8,42.1,-60.5],[200,29.8,42.1,-60.5],[205,29.8,42.1,-60.5],[210,29.8,42.1,-60.5],[215,29.8,42.1,-60.5],[220,29.8,42.1,-60.5],[225,29.8,42.1,-60.5],[230,29.8,42.1,-60.5],[235,29.8,42.1,-60.5],[240,29.8,42.1,-60.5],[245,29.8,42.1,-60.5],[250,29.8,42.1,-60.5],[255,29.8,42.1,-60.5],[260,29.8,42.1,-60.5],[265,29.8,42.1,-60.5],[270,29.8,42.1,-60.5],[275,29.8,42.1,-60.5],[280,29.8,42.1,-60.5],[285,29.8,42.1,-60.5],[290,29.8,42.1,-60.5],[295,29.8,42.1,-60.5],[300,29.8,42.1,-60.5],[305,29.8,42.1,-60.5],[310,29.8,42.1,-60.5],[315,29.8,42.1,-60.5],[320,29.8,42.1,-60.5]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='neaf':
cont= [[5,90.0,0.0,0.0],[10,90.0,0.0,0.0],[15,90.0,0.0,0.0],[20,90.0,0.0,0.0],[25,90.0,0.0,0.0],[30,90.0,0.0,0.0],[35,90.0,0.0,0.0],[40,90.0,0.0,0.0],[45,90.0,0.0,0.0],[50,90.0,0.0,0.0],[55,90.0,0.0,0.0],[60,90.0,0.0,0.0],[65,90.0,0.0,0.0],[70,90.0,0.0,0.0],[75,90.0,0.0,0.0],[80,90.0,0.0,0.0],[85,27.9,-61.4,0.0],[90,39.8,-61.4,-0.1],[95,40.8,-61.4,-0.2],[100,40.3,-61.4,-0.3],[105,40.6,-61.4,-0.4],[110,40.6,-61.4,-0.5],[115,40.5,-61.4,-0.6],[120,40.5,-61.4,-0.7],[125,40.5,-61.4,-0.7],[130,40.5,-61.4,-0.7],[135,40.5,-61.4,-0.7],[140,40.5,-61.4,-0.7],[145,40.5,-61.4,-0.7],[150,40.5,-61.4,-0.7],[155,40.5,-61.4,-0.7],[160,40.5,-61.4,-0.7],[165,40.5,-61.4,-0.7],[170,40.5,-61.4,-0.7],[175,40.5,-61.4,-0.7],[180,40.5,-61.4,-0.7],[185,40.5,-61.4,-0.7],[190,40.5,-61.4,-0.7],[195,40.5,-61.4,-0.7],[200,40.5,-61.4,-0.7],[205,40.5,-61.4,-0.7],[210,40.5,-61.4,-0.7],[215,40.5,-61.4,-0.7],[220,40.5,-61.4,-0.7],[225,40.5,-61.4,-0.7],[230,40.5,-61.4,-0.7],[235,40.5,-61.4,-0.7],[240,40.5,-61.4,-0.7],[245,40.4,-61.4,-0.7],[250,40.4,-61.4,-0.7],[255,40.4,-61.4,-0.7],[260,40.4,-61.4,-0.7],[265,40.4,-61.4,-0.7],[270,40.4,-61.4,-0.7],[275,40.4,-61.4,-0.7],[280,40.4,-61.4,-0.7],[285,40.4,-61.4,-0.7],[290,40.4,-61.4,-0.7],[295,40.4,-61.4,-0.7],[300,40.4,-61.4,-0.7],[305,40.4,-61.4,-0.7],[310,40.4,-61.4,-0.7],[315,40.4,-61.4,-0.7],[320,40.4,-61.4,-0.7]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='sac':
cont= [[5,62.1,-40.2,1.6],[10,61.8,-40.3,3.3],[15,59.6,-38.1,5.4],[20,58.5,-37.1,7.5],[25,57.7,-36.4,9.6],[30,56.7,-34.5,11.9],[35,56.5,-33.4,14.3],[40,57.1,-32.6,16.6],[45,57.0,-31.4,18.6],[50,58.2,-31.2,20.5],[55,60.7,-31.9,22.0],[60,62.5,-32.8,23.3],[65,63.7,-33.5,24.6],[70,63.5,-33.4,26.1],[75,63.2,-33.9,28.6],[80,62.7,-34.3,31.5],[85,61.2,-34.3,34.4],[90,59.1,-34.5,37.3],[95,57.2,-34.7,40.3],[100,55.7,-34.8,43.3],[105,54.3,-34.9,46.4],[110,53.1,-35.0,49.5],[115,52.2,-35.0,51.7],[120,51.6,-35.0,52.8],[125,50.7,-33.9,54.0],[130,50.1,-32.8,54.9],[135,50.0,-32.5,55.1],[140,50.0,-32.5,55.1],[145,50.0,-32.5,55.1],[150,50.0,-32.5,55.1],[155,50.0,-32.5,55.1],[160,50.0,-32.5,55.1],[165,50.0,-32.5,55.1],[170,50.0,-32.5,55.1],[175,50.0,-32.5,55.1],[180,50.0,-32.5,55.1],[185,50.0,-32.5,55.1],[190,50.0,-32.5,55.1],[195,50.0,-32.5,55.1],[200,50.0,-32.5,55.1],[205,50.0,-32.5,55.1],[210,50.0,-32.5,55.1],[215,50.0,-32.5,55.1],[220,50.0,-32.5,55.1],[225,50.0,-32.5,55.1],[230,50.0,-32.5,55.1],[235,50.0,-32.5,55.1],[240,50.0,-32.5,55.1],[245,50.0,-32.5,55.1],[250,50.0,-32.5,55.1],[255,50.0,-32.5,55.1],[260,50.0,-32.5,55.1],[265,50.0,-32.5,55.1],[270,50.0,-32.5,55.1],[275,50.0,-32.5,55.1],[280,50.0,-32.5,55.1],[285,50.0,-32.5,55.1],[290,50.0,-32.5,55.1],[295,50.0,-32.5,55.1],[300,50.0,-32.5,55.1],[305,50.0,-32.5,55.1],[310,50.0,-32.5,55.1],[315,50.0,-32.5,55.1],[320,50.0,-32.5,55.1]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='ib':
cont= [[5,0,0,0],[10,0,0,0],[15,77.93,59.14,.12],[20,77.93,59.14,.24],[25,77.93,59.14,.24],[30,-31.21,166.79,1.73],[35,-31.21,166.79,1.73],[40,-27,160,1.73],[45,-23.85,157.12,1.72],[50,-20.6,157.88,2.1],[55,-20.72,162.4,2.61],[60,-16,164,3],[65,-12.95,165.77,3.1],[70,-16.45,167.49,3.1],[75,-16.45,167.49,3.1],[80,-37.17,169,8.04],[85,-38.86,169.85,10.28],[90,-42.64,173.2,16.56],[95,-43.,174.,20],[100,-43.,174.,25],[105,-43.,174.,30],[110,-43.,174.,35],[115,-43.,174.,35],[120,-43.,174.,44.77],[120,-43.86,174.17,44.77],[125,-43.86,174.17,44.77],[130,-46.19,177.47,45.91],[135,-46.19,177.47,45.91],[140,-46.19,177.47,45.91],[145,-46.19,177.47,45.91],[150,-46.19,177.47,45.91],[155,-47.12,179.45,46.29],[160,-47.12,179.45,46.29],[165,-47.12,179.45,46.29],[170,-47.55,180.35,50.62],[175,-46.8,181.1,50.33],[180,-46.8,181.1,50.33],[185,-46.8,181.1,50.33],[190,-46.8,181.1,50.33],[195,-46.8,181.1,50.33],[200,-46.8,181.1,50.33],[205,-46.8,181.1,50.33],[210,-46.8,181.1,50.33],[215,-46.8,181.1,50.33],[220,-46.8,181.1,50.33],[225,-46.8,181.1,50.33],[230,-46.8,181.1,50.33],[235,-46.8,181.1,50.33],[240,-46.8,181.1,50.33],[245,-46.8,181.1,50.33],[250,-46.8,181.1,50.33],[255,-46.8,181.1,50.33],[260,-46.8,181.1,50.33],[265,-46.8,181.1,50.33],[270,-46.8,181.1,50.33],[275,-46.8,181.1,50.33],[280,-46.8,181.1,50.33],[285,-46.8,181.1,50.33],[290,-46.8,181.1,50.33],[295,-46.8,181.1,50.33],[300,-46.8,181.1,50.33],[305,-46.8,181.1,50.33],[310,-46.8,181.1,50.33],[315,-46.8,181.1,50.33],[320,-46.8,181.1,50.33]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
if continent=='saf':
cont= [[0,0,56.0,2.2],[5,0,57.6,2.5],[10,0,53.9,2.5],[15,0,66.5,3.0],[20,0,75.5,4.7],[25,0,84.1,6.8],[30,0,95.8,7.9],[35,0,98.8,8.7],[40,0,107.5,9.2],[45,0,110.9,10.3],[50,0,111.6,13.2],[55,0,115.7,13.9],[60,0,123.5,15.7],[65,0,127.8,17.5],[70,0,137.2,17.5],[75,0,140.3,19.2],[80,0,138.1,19.3],[85,0,142.9,19.6],[90,0,144.7,20.5],[95,0,144.3,20.8],[100,0,150.8,22.3],[105,0,160.2,26.9],[110,0,169.2,32.1],[115,0,170.3,35.6],[120,0,171.3,36.2],[125,0,172.1,37.5],[130,0,170.0,39.4],[135,0,172.6,42.1],[140,0,163.1,40.8],[145,0,155.2,38.1],[150,0,155.0,34.8],[155,0,155.0,33.2],[160,0,157.0,30.7],[165,0,159.5,32.5],[170,0,167.6,28.8],[175,0,167.8,27.7],[180,0,167.4,25.9],[185,0,168.4,21.6],[190,0,158.8,18.2],[195,0,147.9,17.8],[200,0,144.4,19.2],[205,0,137.4,20.7],[210,0,133.6,23.1],[215,0,129.9,26.4],[220,0,127.2,27.2],[225,0,128.0,29.4],[230,0,130.0,31.4],[235,0,133.6,35.3],[240,0,137.4,36.5],[245,0,143.1,39.6],[250,0,145.4,40.4],[255,0,145.6,41.8],[260,0,144.8,41.9],[265,0,141.6,47.1],[270,0,140.3,46.8],[275,0,138.2,51.1],[280,0,138.6,51.6],[285,0,136.5,51.8],[290,0,135.8,52.8],[295,0,136.8,53.5],[300,0,136.9,55.4],[305,0,138.9,56.3],[310,0,139.9,59.5],[315,0,138.9,60.8],[320,0,132.5,61.6]]
for rec in cont:
if age==int(rec[0]): return [rec[1],rec[2],rec[3]]
return 'NONE' | returns rotation poles and angles for specified continents and ages
assumes fixed Africa.
Parameters
__________
continent :
aus : Australia
eur : Eurasia
mad : Madacascar
[nwaf,congo] : NW Africa [choose one]
col : Colombia
grn : Greenland
nam : North America
par : Paraguay
eant : East Antarctica
ind : India
[neaf,kala] : NE Africa [choose one]
[sac,sam] : South America [choose one]
ib : Iberia
saf : South Africa
Returns
_______
[pole longitude, pole latitude, rotation angle] : for the continent at specified age | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/frp.py#L1-L91 |
PmagPy/PmagPy | programs/plot_magmap.py | main | def main():
"""
NAME
plot_magmap.py
DESCRIPTION
makes a color contour map of desired field model
SYNTAX
plot_magmap.py [command line options]
OPTIONS
-h prints help and quits
-f FILE specify field model file with format: l m g h
-fmt [pdf,eps,svg,png] specify format for output figure (default is png)
-mod [arch3k,cals3k,pfm9k,hfm10k,cals10k_2,shadif14k,cals10k] specify model for 3ka to 1900 CE, default is cals10k
-alt ALT; specify altitude in km, default is sealevel (0)
-age specify date in decimal year, default is 2016
-lon0: 0 longitude for map, default is 0
-el: [D,I,B,Br] specify element for plotting
-cm: [see https://matplotlib.org/users/colormaps.html] specify color map for plotting (default is RdYlBu)
"""
cmap = 'RdYlBu'
date = 2016.
if not ccrs:
print("-W- You must intstall the cartopy module to run plot_magmap.py")
sys.exit()
dir_path = '.'
lincr = 1 # level increment for contours
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt = sys.argv[ind+1]
if fmt == 'jpg':
print('jpg not a supported option')
print(main.__doc__)
sys.exit()
else:
fmt = 'png'
if '-cm' in sys.argv:
ind = sys.argv.index('-cm')
cmap = sys.argv[ind+1]
if '-el' in sys.argv:
ind = sys.argv.index('-el')
el = sys.argv[ind+1]
else:
el = 'B'
if '-alt' in sys.argv:
ind = sys.argv.index('-alt')
alt = sys.argv[ind+1]
else:
alt = 0
if '-lon0' in sys.argv:
ind = sys.argv.index('-lon0')
lon_0 = float(sys.argv[ind+1])
else:
lon_0 = 0
if '-mod' in sys.argv:
ind = sys.argv.index('-mod')
mod = sys.argv[ind+1]
ghfile = ''
elif '-f' in sys.argv:
ind = sys.argv.index('-f')
ghfile = sys.argv[ind+1]
mod = 'custom'
date = ''
else:
mod, ghfile = 'cals10k', ''
if '-age' in sys.argv:
ind = sys.argv.index('-age')
date = float(sys.argv[ind+1])
if '-alt' in sys.argv:
ind = sys.argv.index('-alt')
alt = float(sys.argv[ind+1])
else:
alt = 0
# doesn't work correctly with mod other than default
Ds, Is, Bs, Brs, lons, lats = pmag.do_mag_map(
date, mod=mod, lon_0=lon_0, alt=alt, file=ghfile)
ax = plt.axes(projection=ccrs.Mollweide(central_longitude=lon_0))
ax.coastlines()
xx, yy = meshgrid(lons, lats)
if mod == 'custom':
str_date = 'Custom'
else:
str_date = str(date)
if el == 'B':
levmax = Bs.max()+lincr
levmin = round(Bs.min()-lincr)
levels = np.arange(levmin, levmax, lincr)
plt.contourf(xx, yy, Bs, levels=levels, cmap=cmap,
transform=ccrs.PlateCarree(central_longitude=lon_0))
plt.title('Field strength ($\mu$T): '+ str_date)
if el == 'Br':
levmax = Brs.max()+lincr
levmin = round(Brs.min()-lincr)
plt.contourf(xx, yy, Brs,
levels=np.arange(levmin, levmax, lincr),
cmap=cmap, transform=ccrs.PlateCarree(central_longitude=lon_0))
plt.title('Radial field strength ($\mu$T): '+ str_date)
if el == 'I':
levmax = Is.max()+lincr
levmin = round(Is.min()-lincr)
plt.contourf(xx, yy, Is,
levels=np.arange(levmin, levmax, lincr),
cmap=cmap, transform=ccrs.PlateCarree(central_longitude=lon_0))
plt.contour(xx, yy, Is, levels=np.arange(-80, 90, 10),
colors='black', transform=ccrs.PlateCarree(central_longitude=lon_0))
plt.title('Field inclination: '+ str_date)
if el == 'D':
plt.contourf(xx, yy, Ds,
levels=np.arange(-180, 180, 10),
cmap=cmap, transform=ccrs.PlateCarree(central_longitude=lon_0))
plt.contour(xx, yy, Ds, levels=np.arange(-180,
180, 10), colors='black')
# cs=m.contourf(x,y,Ds,levels=np.arange(-180,180,10),cmap=cmap)
# cs=m.contourf(x,y,Ds,levels=np.arange(-180,180,10),cmap=cmap)
# m.contour(x,y,Ds,levels=np.arange(-180,180,10),colors='black')
plt.title('Field declination: '+ str_date)
cbar = plt.colorbar(orientation='horizontal')
figname = 'geomagnetic_field_' + str_date + '.'+fmt
plt.savefig(figname)
print('Figure saved as: ', figname) | python | def main():
"""
NAME
plot_magmap.py
DESCRIPTION
makes a color contour map of desired field model
SYNTAX
plot_magmap.py [command line options]
OPTIONS
-h prints help and quits
-f FILE specify field model file with format: l m g h
-fmt [pdf,eps,svg,png] specify format for output figure (default is png)
-mod [arch3k,cals3k,pfm9k,hfm10k,cals10k_2,shadif14k,cals10k] specify model for 3ka to 1900 CE, default is cals10k
-alt ALT; specify altitude in km, default is sealevel (0)
-age specify date in decimal year, default is 2016
-lon0: 0 longitude for map, default is 0
-el: [D,I,B,Br] specify element for plotting
-cm: [see https://matplotlib.org/users/colormaps.html] specify color map for plotting (default is RdYlBu)
"""
cmap = 'RdYlBu'
date = 2016.
if not ccrs:
print("-W- You must intstall the cartopy module to run plot_magmap.py")
sys.exit()
dir_path = '.'
lincr = 1 # level increment for contours
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-fmt' in sys.argv:
ind = sys.argv.index('-fmt')
fmt = sys.argv[ind+1]
if fmt == 'jpg':
print('jpg not a supported option')
print(main.__doc__)
sys.exit()
else:
fmt = 'png'
if '-cm' in sys.argv:
ind = sys.argv.index('-cm')
cmap = sys.argv[ind+1]
if '-el' in sys.argv:
ind = sys.argv.index('-el')
el = sys.argv[ind+1]
else:
el = 'B'
if '-alt' in sys.argv:
ind = sys.argv.index('-alt')
alt = sys.argv[ind+1]
else:
alt = 0
if '-lon0' in sys.argv:
ind = sys.argv.index('-lon0')
lon_0 = float(sys.argv[ind+1])
else:
lon_0 = 0
if '-mod' in sys.argv:
ind = sys.argv.index('-mod')
mod = sys.argv[ind+1]
ghfile = ''
elif '-f' in sys.argv:
ind = sys.argv.index('-f')
ghfile = sys.argv[ind+1]
mod = 'custom'
date = ''
else:
mod, ghfile = 'cals10k', ''
if '-age' in sys.argv:
ind = sys.argv.index('-age')
date = float(sys.argv[ind+1])
if '-alt' in sys.argv:
ind = sys.argv.index('-alt')
alt = float(sys.argv[ind+1])
else:
alt = 0
# doesn't work correctly with mod other than default
Ds, Is, Bs, Brs, lons, lats = pmag.do_mag_map(
date, mod=mod, lon_0=lon_0, alt=alt, file=ghfile)
ax = plt.axes(projection=ccrs.Mollweide(central_longitude=lon_0))
ax.coastlines()
xx, yy = meshgrid(lons, lats)
if mod == 'custom':
str_date = 'Custom'
else:
str_date = str(date)
if el == 'B':
levmax = Bs.max()+lincr
levmin = round(Bs.min()-lincr)
levels = np.arange(levmin, levmax, lincr)
plt.contourf(xx, yy, Bs, levels=levels, cmap=cmap,
transform=ccrs.PlateCarree(central_longitude=lon_0))
plt.title('Field strength ($\mu$T): '+ str_date)
if el == 'Br':
levmax = Brs.max()+lincr
levmin = round(Brs.min()-lincr)
plt.contourf(xx, yy, Brs,
levels=np.arange(levmin, levmax, lincr),
cmap=cmap, transform=ccrs.PlateCarree(central_longitude=lon_0))
plt.title('Radial field strength ($\mu$T): '+ str_date)
if el == 'I':
levmax = Is.max()+lincr
levmin = round(Is.min()-lincr)
plt.contourf(xx, yy, Is,
levels=np.arange(levmin, levmax, lincr),
cmap=cmap, transform=ccrs.PlateCarree(central_longitude=lon_0))
plt.contour(xx, yy, Is, levels=np.arange(-80, 90, 10),
colors='black', transform=ccrs.PlateCarree(central_longitude=lon_0))
plt.title('Field inclination: '+ str_date)
if el == 'D':
plt.contourf(xx, yy, Ds,
levels=np.arange(-180, 180, 10),
cmap=cmap, transform=ccrs.PlateCarree(central_longitude=lon_0))
plt.contour(xx, yy, Ds, levels=np.arange(-180,
180, 10), colors='black')
# cs=m.contourf(x,y,Ds,levels=np.arange(-180,180,10),cmap=cmap)
# cs=m.contourf(x,y,Ds,levels=np.arange(-180,180,10),cmap=cmap)
# m.contour(x,y,Ds,levels=np.arange(-180,180,10),colors='black')
plt.title('Field declination: '+ str_date)
cbar = plt.colorbar(orientation='horizontal')
figname = 'geomagnetic_field_' + str_date + '.'+fmt
plt.savefig(figname)
print('Figure saved as: ', figname) | NAME
plot_magmap.py
DESCRIPTION
makes a color contour map of desired field model
SYNTAX
plot_magmap.py [command line options]
OPTIONS
-h prints help and quits
-f FILE specify field model file with format: l m g h
-fmt [pdf,eps,svg,png] specify format for output figure (default is png)
-mod [arch3k,cals3k,pfm9k,hfm10k,cals10k_2,shadif14k,cals10k] specify model for 3ka to 1900 CE, default is cals10k
-alt ALT; specify altitude in km, default is sealevel (0)
-age specify date in decimal year, default is 2016
-lon0: 0 longitude for map, default is 0
-el: [D,I,B,Br] specify element for plotting
-cm: [see https://matplotlib.org/users/colormaps.html] specify color map for plotting (default is RdYlBu) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/plot_magmap.py#L24-L153 |
PmagPy/PmagPy | programs/download_magic.py | main | def main():
"""
NAME
download_magic.py
DESCRIPTION
unpacks a magic formatted smartbook .txt file from the MagIC database into the
tab delimited MagIC format txt files for use with the MagIC-Py programs.
SYNTAX
download_magic.py command line options]
INPUT
takes either the upload.txt file created by upload_magic.py or a file
downloaded from the MagIC database (http://earthref.org/MagIC)
OPTIONS
-h prints help message and quits
-i allows interactive entry of filename
-f FILE specifies input file name
-sep write location data to separate subdirectories (Location_*), (default False)
-O do not overwrite duplicate Location_* directories while downloading
-DM data model (2 or 3, default 3)
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
# interactive entry
if '-i' in sys.argv:
infile=input("Magic txt file for unpacking? ")
dir_path = '.'
input_dir_path = '.'
# non-interactive
else:
infile = pmag.get_named_arg("-f", reqd=True)
# if -O flag is present, overwrite is False
overwrite = pmag.get_flag_arg_from_sys("-O", true=False, false=True)
# if -sep flag is present, sep is True
sep = pmag.get_flag_arg_from_sys("-sep", true=True, false=False)
data_model = pmag.get_named_arg("-DM", default_val=3, reqd=False)
dir_path = pmag.get_named_arg("-WD", default_val=".", reqd=False)
input_dir_path = pmag.get_named_arg("-ID", default_val=".", reqd=False)
#if '-ID' not in sys.argv and '-WD' in sys.argv:
# input_dir_path = dir_path
if "-WD" not in sys.argv and "-ID" not in sys.argv:
input_dir_path = os.path.split(infile)[0]
if not input_dir_path:
input_dir_path = "."
ipmag.download_magic(infile, dir_path, input_dir_path, overwrite, True, data_model, sep) | python | def main():
"""
NAME
download_magic.py
DESCRIPTION
unpacks a magic formatted smartbook .txt file from the MagIC database into the
tab delimited MagIC format txt files for use with the MagIC-Py programs.
SYNTAX
download_magic.py command line options]
INPUT
takes either the upload.txt file created by upload_magic.py or a file
downloaded from the MagIC database (http://earthref.org/MagIC)
OPTIONS
-h prints help message and quits
-i allows interactive entry of filename
-f FILE specifies input file name
-sep write location data to separate subdirectories (Location_*), (default False)
-O do not overwrite duplicate Location_* directories while downloading
-DM data model (2 or 3, default 3)
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
# interactive entry
if '-i' in sys.argv:
infile=input("Magic txt file for unpacking? ")
dir_path = '.'
input_dir_path = '.'
# non-interactive
else:
infile = pmag.get_named_arg("-f", reqd=True)
# if -O flag is present, overwrite is False
overwrite = pmag.get_flag_arg_from_sys("-O", true=False, false=True)
# if -sep flag is present, sep is True
sep = pmag.get_flag_arg_from_sys("-sep", true=True, false=False)
data_model = pmag.get_named_arg("-DM", default_val=3, reqd=False)
dir_path = pmag.get_named_arg("-WD", default_val=".", reqd=False)
input_dir_path = pmag.get_named_arg("-ID", default_val=".", reqd=False)
#if '-ID' not in sys.argv and '-WD' in sys.argv:
# input_dir_path = dir_path
if "-WD" not in sys.argv and "-ID" not in sys.argv:
input_dir_path = os.path.split(infile)[0]
if not input_dir_path:
input_dir_path = "."
ipmag.download_magic(infile, dir_path, input_dir_path, overwrite, True, data_model, sep) | NAME
download_magic.py
DESCRIPTION
unpacks a magic formatted smartbook .txt file from the MagIC database into the
tab delimited MagIC format txt files for use with the MagIC-Py programs.
SYNTAX
download_magic.py command line options]
INPUT
takes either the upload.txt file created by upload_magic.py or a file
downloaded from the MagIC database (http://earthref.org/MagIC)
OPTIONS
-h prints help message and quits
-i allows interactive entry of filename
-f FILE specifies input file name
-sep write location data to separate subdirectories (Location_*), (default False)
-O do not overwrite duplicate Location_* directories while downloading
-DM data model (2 or 3, default 3) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/download_magic.py#L8-L61 |
PmagPy/PmagPy | programs/quick_hyst.py | main | def main():
"""
NAME
quick_hyst.py
DESCRIPTION
makes plots of hysteresis data
SYNTAX
quick_hyst.py [command line options]
OPTIONS
-h prints help message and quits
-f: specify input file, default is measurements.txt
-spc SPEC: specify specimen name to plot and quit
-sav save all plots and quit
-fmt [png,svg,eps,jpg]
"""
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
pltspec = ""
verbose = pmagplotlib.verbose
dir_path = pmag.get_named_arg('-WD', '.')
dir_path = os.path.realpath(dir_path)
meas_file = pmag.get_named_arg('-f', 'measurements.txt')
fmt = pmag.get_named_arg('-fmt', 'png')
save_plots = False
interactive = True
if '-sav' in args:
verbose = False
save_plots = True
interactive = False
if '-spc' in args:
ind = args.index("-spc")
pltspec = args[ind+1]
verbose = False
save_plots = True
ipmag.quick_hyst(dir_path, meas_file, save_plots,
interactive, fmt, pltspec, verbose) | python | def main():
"""
NAME
quick_hyst.py
DESCRIPTION
makes plots of hysteresis data
SYNTAX
quick_hyst.py [command line options]
OPTIONS
-h prints help message and quits
-f: specify input file, default is measurements.txt
-spc SPEC: specify specimen name to plot and quit
-sav save all plots and quit
-fmt [png,svg,eps,jpg]
"""
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
pltspec = ""
verbose = pmagplotlib.verbose
dir_path = pmag.get_named_arg('-WD', '.')
dir_path = os.path.realpath(dir_path)
meas_file = pmag.get_named_arg('-f', 'measurements.txt')
fmt = pmag.get_named_arg('-fmt', 'png')
save_plots = False
interactive = True
if '-sav' in args:
verbose = False
save_plots = True
interactive = False
if '-spc' in args:
ind = args.index("-spc")
pltspec = args[ind+1]
verbose = False
save_plots = True
ipmag.quick_hyst(dir_path, meas_file, save_plots,
interactive, fmt, pltspec, verbose) | NAME
quick_hyst.py
DESCRIPTION
makes plots of hysteresis data
SYNTAX
quick_hyst.py [command line options]
OPTIONS
-h prints help message and quits
-f: specify input file, default is measurements.txt
-spc SPEC: specify specimen name to plot and quit
-sav save all plots and quit
-fmt [png,svg,eps,jpg] | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/quick_hyst.py#L15-L55 |
PmagPy/PmagPy | programs/curie.py | smooth | def smooth(x,window_len,window='bartlett'):
"""smooth the data using a sliding window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by padding the beginning and the end of the signal
with average of the first (last) ten values of the signal, to evoid jumps
at the beggining/end
input:
x: the input signal, equaly spaced!
window_len: the dimension of the smoothing window
window: type of window from numpy library ['flat','hanning','hamming','bartlett','blackman']
-flat window will produce a moving average smoothing.
-Bartlett window is very similar to triangular window,
but always ends with zeros at points 1 and n,
-hanning,hamming,blackman are used for smoothing the Fourier transfrom
for curie temperature calculation the default is Bartlett
output:
aray of the smoothed signal
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3:
return x
# numpy available windows
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
# padding the beggining and the end of the signal with an average value to evoid edge effect
start=[average(x[0:10])]*window_len
end=[average(x[-10:])]*window_len
s=start+list(x)+end
#s=numpy.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
if window == 'flat': #moving average
w=ones(window_len,'d')
else:
w=eval('numpy.'+window+'(window_len)')
y=numpy.convolve(old_div(w,w.sum()),s,mode='same')
return array(y[window_len:-window_len]) | python | def smooth(x,window_len,window='bartlett'):
"""smooth the data using a sliding window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by padding the beginning and the end of the signal
with average of the first (last) ten values of the signal, to evoid jumps
at the beggining/end
input:
x: the input signal, equaly spaced!
window_len: the dimension of the smoothing window
window: type of window from numpy library ['flat','hanning','hamming','bartlett','blackman']
-flat window will produce a moving average smoothing.
-Bartlett window is very similar to triangular window,
but always ends with zeros at points 1 and n,
-hanning,hamming,blackman are used for smoothing the Fourier transfrom
for curie temperature calculation the default is Bartlett
output:
aray of the smoothed signal
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3:
return x
# numpy available windows
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
# padding the beggining and the end of the signal with an average value to evoid edge effect
start=[average(x[0:10])]*window_len
end=[average(x[-10:])]*window_len
s=start+list(x)+end
#s=numpy.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
if window == 'flat': #moving average
w=ones(window_len,'d')
else:
w=eval('numpy.'+window+'(window_len)')
y=numpy.convolve(old_div(w,w.sum()),s,mode='same')
return array(y[window_len:-window_len]) | smooth the data using a sliding window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by padding the beginning and the end of the signal
with average of the first (last) ten values of the signal, to evoid jumps
at the beggining/end
input:
x: the input signal, equaly spaced!
window_len: the dimension of the smoothing window
window: type of window from numpy library ['flat','hanning','hamming','bartlett','blackman']
-flat window will produce a moving average smoothing.
-Bartlett window is very similar to triangular window,
but always ends with zeros at points 1 and n,
-hanning,hamming,blackman are used for smoothing the Fourier transfrom
for curie temperature calculation the default is Bartlett
output:
aray of the smoothed signal | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/curie.py#L20-L69 |
PmagPy/PmagPy | programs/curie.py | deriv1 | def deriv1(x,y,i,n):
"""
alternative way to smooth the derivative of a noisy signal
using least square fit.
x=array of x axis
y=array of y axis
n=smoothing factor
i= position
in this method the slope in position i is calculated by least square fit of n points
before and after position.
"""
m_,x_,y_,xy_,x_2=0.,0.,0.,0.,0.
for ix in range(i,i+n,1):
x_=x_+x[ix]
y_=y_+y[ix]
xy_=xy_+x[ix]*y[ix]
x_2=x_2+x[ix]**2
m= old_div(( (n*xy_) - (x_*y_) ), ( n*x_2-(x_)**2))
return(m) | python | def deriv1(x,y,i,n):
"""
alternative way to smooth the derivative of a noisy signal
using least square fit.
x=array of x axis
y=array of y axis
n=smoothing factor
i= position
in this method the slope in position i is calculated by least square fit of n points
before and after position.
"""
m_,x_,y_,xy_,x_2=0.,0.,0.,0.,0.
for ix in range(i,i+n,1):
x_=x_+x[ix]
y_=y_+y[ix]
xy_=xy_+x[ix]*y[ix]
x_2=x_2+x[ix]**2
m= old_div(( (n*xy_) - (x_*y_) ), ( n*x_2-(x_)**2))
return(m) | alternative way to smooth the derivative of a noisy signal
using least square fit.
x=array of x axis
y=array of y axis
n=smoothing factor
i= position
in this method the slope in position i is calculated by least square fit of n points
before and after position. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/curie.py#L73-L92 |
PmagPy/PmagPy | programs/curie.py | main | def main():
"""
NAME
curie.py
DESCTIPTION
plots and interprets curie temperature data.
the 1st derivative is calculated from smoothed M-T curve
(convolution with trianfular window with width= <-w> degrees)
the 2nd derivative is calculated from smoothed 1st derivative curve
( using the same sliding window width)
the estinated curie temp. is the maximum of the 2nd derivative
- the temperature steps should be in multiples of 1.0 degrees
INPUT
T,M
SYNTAX
curie.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE, sets M,T input file (required)
-w size of sliding window in degrees (default - 3 degrees)
-t <min> <max> temperature range (optional)
-sav save figures and quit
-fmt [svg,jpg,eps,png,pdf] set format for figure output [default: svg]
example:
curie.py -f ex2.1 -w 30 -t 300 700
"""
plot,fmt=0,'svg'
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
meas_file=sys.argv[ind+1]
else:
print("missing -f\n")
sys.exit()
if '-w' in sys.argv:
ind=sys.argv.index('-w')
window_len=int(sys.argv[ind+1])
else:
window_len=3
if '-t' in sys.argv:
ind=sys.argv.index('-t')
t_begin=int(sys.argv[ind+1])
t_end=int(sys.argv[ind+2])
else:
t_begin=''
t_end=''
if '-sav' in sys.argv:plot=1
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
# read data from file
Data=numpy.loadtxt(meas_file,dtype=numpy.float)
T=Data.transpose()[0]
M=Data.transpose()[1]
T=list(T)
M=list(M)
# cut the data if -t is one of the flags
if t_begin:
while T[0]<t_begin:
M.pop(0);T.pop(0)
while T[-1]>t_end:
M.pop(-1);T.pop(-1)
# prepare the signal:
# from M(T) array with unequal deltaT
# to M(T) array with deltaT=(1 degree).
# if delataT is larger, then points are added using linear fit between
# consecutive data points.
# exit if deltaT is not integer
i=0
while i<(len(T)-1):
if (T[i+1]-T[i])%1>0.001:
print("delta T should be integer, this program will not work!")
print("temperature range:",T[i],T[i+1])
sys.exit()
if (T[i+1]-T[i])==0.:
M[i]=average([M[i],M[i+1]])
M.pop(i+1);T.pop(i+1)
elif (T[i+1]-T[i])<0.:
M.pop(i+1);T.pop(i+1)
print("check data in T=%.0f ,M[T] is ignored"%(T[i]))
elif (T[i+1]-T[i])>1.:
slope,b=polyfit([T[i],T[i+1]],[M[i],M[i+1]],1)
for j in range(int(T[i+1])-int(T[i])-1):
M.insert(i+1,slope*(T[i]+1.)+b)
T.insert(i+1,(T[i]+1.))
i=i+1
i=i+1
# calculate the smoothed signal
M=array(M,'f')
T=array(T,'f')
M_smooth=[]
M_smooth=smooth(M,window_len)
#plot the original data and the smooth data
PLT={'M_T':1,'der1':2,'der2':3,'Curie':4}
pmagplotlib.plot_init(PLT['M_T'],5,5)
string='M-T (sliding window=%i)'%int(window_len)
pmagplotlib.plot_xy(PLT['M_T'],T,M_smooth,sym='-')
pmagplotlib.plot_xy(PLT['M_T'],T,M,sym='--',xlab='Temperature C',ylab='Magnetization',title=string)
#calculate first derivative
d1,T_d1=[],[]
for i in range(len(M_smooth)-1):
Dy=M_smooth[i-1]-M_smooth[i+1]
Dx=T[i-1]-T[i+1]
d1.append(old_div(Dy,Dx))
T_d1=T[1:len(T-1)]
d1=array(d1,'f')
d1_smooth=smooth(d1,window_len)
#plot the first derivative
pmagplotlib.plot_init(PLT['der1'],5,5)
string='1st derivative (sliding window=%i)'%int(window_len)
pmagplotlib.plot_xy(PLT['der1'],T_d1,d1_smooth,sym='-',xlab='Temperature C',title=string)
pmagplotlib.plot_xy(PLT['der1'],T_d1,d1,sym='b--')
#calculate second derivative
d2,T_d2=[],[]
for i in range(len(d1_smooth)-1):
Dy=d1_smooth[i-1]-d1_smooth[i+1]
Dx=T[i-1]-T[i+1]
#print Dy/Dx
d2.append(old_div(Dy,Dx))
T_d2=T[2:len(T-2)]
d2=array(d2,'f')
d2_smooth=smooth(d2,window_len)
#plot the second derivative
pmagplotlib.plot_init(PLT['der2'],5,5)
string='2nd derivative (sliding window=%i)'%int(window_len)
pmagplotlib.plot_xy(PLT['der2'],T_d2,d2,sym='-',xlab='Temperature C',title=string)
d2=list(d2)
print('second derivative maximum is at T=%i'%int(T_d2[d2.index(max(d2))]))
# calculate Curie temperature for different width of sliding windows
curie,curie_1=[],[]
wn=list(range(5,50,1))
for win in wn:
# calculate the smoothed signal
M_smooth=[]
M_smooth=smooth(M,win)
#calculate first derivative
d1,T_d1=[],[]
for i in range(len(M_smooth)-1):
Dy=M_smooth[i-1]-M_smooth[i+1]
Dx=T[i-1]-T[i+1]
d1.append(old_div(Dy,Dx))
T_d1=T[1:len(T-1)]
d1=array(d1,'f')
d1_smooth=smooth(d1,win)
#calculate second derivative
d2,T_d2=[],[]
for i in range(len(d1_smooth)-1):
Dy=d1_smooth[i-1]-d1_smooth[i+1]
Dx=T[i-1]-T[i+1]
d2.append(old_div(Dy,Dx))
T_d2=T[2:len(T-2)]
d2=array(d2,'f')
d2_smooth=smooth(d2,win)
d2=list(d2)
d2_smooth=list(d2_smooth)
curie.append(T_d2[d2.index(max(d2))])
curie_1.append(T_d2[d2_smooth.index(max(d2_smooth))])
#plot Curie temp for different sliding window length
pmagplotlib.plot_init(PLT['Curie'],5,5)
pmagplotlib.plot_xy(PLT['Curie'],wn,curie,sym='.',xlab='Sliding Window Width (degrees)',ylab='Curie Temp',title='Curie Statistics')
files = {}
for key in list(PLT.keys()): files[key]=str(key) + "." +fmt
if plot==0:
pmagplotlib.draw_figs(PLT)
ans=input(" S[a]ve to save plot, [q]uit, Return to continue: ")
if ans=="q": sys.exit()
if ans=="a": pmagplotlib.save_plots(PLT,files)
else: pmagplotlib.save_plots(PLT,files)
sys.exit() | python | def main():
"""
NAME
curie.py
DESCTIPTION
plots and interprets curie temperature data.
the 1st derivative is calculated from smoothed M-T curve
(convolution with trianfular window with width= <-w> degrees)
the 2nd derivative is calculated from smoothed 1st derivative curve
( using the same sliding window width)
the estinated curie temp. is the maximum of the 2nd derivative
- the temperature steps should be in multiples of 1.0 degrees
INPUT
T,M
SYNTAX
curie.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE, sets M,T input file (required)
-w size of sliding window in degrees (default - 3 degrees)
-t <min> <max> temperature range (optional)
-sav save figures and quit
-fmt [svg,jpg,eps,png,pdf] set format for figure output [default: svg]
example:
curie.py -f ex2.1 -w 30 -t 300 700
"""
plot,fmt=0,'svg'
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
meas_file=sys.argv[ind+1]
else:
print("missing -f\n")
sys.exit()
if '-w' in sys.argv:
ind=sys.argv.index('-w')
window_len=int(sys.argv[ind+1])
else:
window_len=3
if '-t' in sys.argv:
ind=sys.argv.index('-t')
t_begin=int(sys.argv[ind+1])
t_end=int(sys.argv[ind+2])
else:
t_begin=''
t_end=''
if '-sav' in sys.argv:plot=1
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
# read data from file
Data=numpy.loadtxt(meas_file,dtype=numpy.float)
T=Data.transpose()[0]
M=Data.transpose()[1]
T=list(T)
M=list(M)
# cut the data if -t is one of the flags
if t_begin:
while T[0]<t_begin:
M.pop(0);T.pop(0)
while T[-1]>t_end:
M.pop(-1);T.pop(-1)
# prepare the signal:
# from M(T) array with unequal deltaT
# to M(T) array with deltaT=(1 degree).
# if delataT is larger, then points are added using linear fit between
# consecutive data points.
# exit if deltaT is not integer
i=0
while i<(len(T)-1):
if (T[i+1]-T[i])%1>0.001:
print("delta T should be integer, this program will not work!")
print("temperature range:",T[i],T[i+1])
sys.exit()
if (T[i+1]-T[i])==0.:
M[i]=average([M[i],M[i+1]])
M.pop(i+1);T.pop(i+1)
elif (T[i+1]-T[i])<0.:
M.pop(i+1);T.pop(i+1)
print("check data in T=%.0f ,M[T] is ignored"%(T[i]))
elif (T[i+1]-T[i])>1.:
slope,b=polyfit([T[i],T[i+1]],[M[i],M[i+1]],1)
for j in range(int(T[i+1])-int(T[i])-1):
M.insert(i+1,slope*(T[i]+1.)+b)
T.insert(i+1,(T[i]+1.))
i=i+1
i=i+1
# calculate the smoothed signal
M=array(M,'f')
T=array(T,'f')
M_smooth=[]
M_smooth=smooth(M,window_len)
#plot the original data and the smooth data
PLT={'M_T':1,'der1':2,'der2':3,'Curie':4}
pmagplotlib.plot_init(PLT['M_T'],5,5)
string='M-T (sliding window=%i)'%int(window_len)
pmagplotlib.plot_xy(PLT['M_T'],T,M_smooth,sym='-')
pmagplotlib.plot_xy(PLT['M_T'],T,M,sym='--',xlab='Temperature C',ylab='Magnetization',title=string)
#calculate first derivative
d1,T_d1=[],[]
for i in range(len(M_smooth)-1):
Dy=M_smooth[i-1]-M_smooth[i+1]
Dx=T[i-1]-T[i+1]
d1.append(old_div(Dy,Dx))
T_d1=T[1:len(T-1)]
d1=array(d1,'f')
d1_smooth=smooth(d1,window_len)
#plot the first derivative
pmagplotlib.plot_init(PLT['der1'],5,5)
string='1st derivative (sliding window=%i)'%int(window_len)
pmagplotlib.plot_xy(PLT['der1'],T_d1,d1_smooth,sym='-',xlab='Temperature C',title=string)
pmagplotlib.plot_xy(PLT['der1'],T_d1,d1,sym='b--')
#calculate second derivative
d2,T_d2=[],[]
for i in range(len(d1_smooth)-1):
Dy=d1_smooth[i-1]-d1_smooth[i+1]
Dx=T[i-1]-T[i+1]
#print Dy/Dx
d2.append(old_div(Dy,Dx))
T_d2=T[2:len(T-2)]
d2=array(d2,'f')
d2_smooth=smooth(d2,window_len)
#plot the second derivative
pmagplotlib.plot_init(PLT['der2'],5,5)
string='2nd derivative (sliding window=%i)'%int(window_len)
pmagplotlib.plot_xy(PLT['der2'],T_d2,d2,sym='-',xlab='Temperature C',title=string)
d2=list(d2)
print('second derivative maximum is at T=%i'%int(T_d2[d2.index(max(d2))]))
# calculate Curie temperature for different width of sliding windows
curie,curie_1=[],[]
wn=list(range(5,50,1))
for win in wn:
# calculate the smoothed signal
M_smooth=[]
M_smooth=smooth(M,win)
#calculate first derivative
d1,T_d1=[],[]
for i in range(len(M_smooth)-1):
Dy=M_smooth[i-1]-M_smooth[i+1]
Dx=T[i-1]-T[i+1]
d1.append(old_div(Dy,Dx))
T_d1=T[1:len(T-1)]
d1=array(d1,'f')
d1_smooth=smooth(d1,win)
#calculate second derivative
d2,T_d2=[],[]
for i in range(len(d1_smooth)-1):
Dy=d1_smooth[i-1]-d1_smooth[i+1]
Dx=T[i-1]-T[i+1]
d2.append(old_div(Dy,Dx))
T_d2=T[2:len(T-2)]
d2=array(d2,'f')
d2_smooth=smooth(d2,win)
d2=list(d2)
d2_smooth=list(d2_smooth)
curie.append(T_d2[d2.index(max(d2))])
curie_1.append(T_d2[d2_smooth.index(max(d2_smooth))])
#plot Curie temp for different sliding window length
pmagplotlib.plot_init(PLT['Curie'],5,5)
pmagplotlib.plot_xy(PLT['Curie'],wn,curie,sym='.',xlab='Sliding Window Width (degrees)',ylab='Curie Temp',title='Curie Statistics')
files = {}
for key in list(PLT.keys()): files[key]=str(key) + "." +fmt
if plot==0:
pmagplotlib.draw_figs(PLT)
ans=input(" S[a]ve to save plot, [q]uit, Return to continue: ")
if ans=="q": sys.exit()
if ans=="a": pmagplotlib.save_plots(PLT,files)
else: pmagplotlib.save_plots(PLT,files)
sys.exit() | NAME
curie.py
DESCTIPTION
plots and interprets curie temperature data.
the 1st derivative is calculated from smoothed M-T curve
(convolution with trianfular window with width= <-w> degrees)
the 2nd derivative is calculated from smoothed 1st derivative curve
( using the same sliding window width)
the estinated curie temp. is the maximum of the 2nd derivative
- the temperature steps should be in multiples of 1.0 degrees
INPUT
T,M
SYNTAX
curie.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE, sets M,T input file (required)
-w size of sliding window in degrees (default - 3 degrees)
-t <min> <max> temperature range (optional)
-sav save figures and quit
-fmt [svg,jpg,eps,png,pdf] set format for figure output [default: svg]
example:
curie.py -f ex2.1 -w 30 -t 300 700 | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/curie.py#L95-L284 |
PmagPy/PmagPy | programs/conversion_scripts2/jr6_jr6_magic2.py | main | def main(command_line=True, **kwargs):
"""
NAME
jr6_jr6_magic.py
DESCRIPTION
converts JR6 .jr6 format files to magic_measurements format files
SYNTAX
jr6_jr6_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input file, or
-F FILE: specify output file, default is magic_measurements.txt
-Fsa: specify er_samples format file for appending, default is new er_samples.txt (Not working yet)
-spc NUM : specify number of characters to designate a specimen, default = 1
-loc LOCNAME : specify location/study name
-A: don't average replicate measurements
-ncn NCON: specify sample naming convention (6 and 7 not yet implemented)
-mcd [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented
-JR IODP samples measured on the JOIDES RESOLUTION
-v NUM : specify the volume in cc of the sample, default 2.5^3cc
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail [email protected] for help.
INPUT
JR6 .jr6 format file
"""
# initialize some stuff
noave=0
#volume=2.5**3 #default volume is a 2.5cm cube
volume = 2.5 * 1e-6 #default volume is a 2.5 cm cube, translated to meters cubed
inst=""
samp_con,Z='1',""
missing=1
demag="N"
er_location_name="unknown"
citation='This study'
args=sys.argv
meth_code="LP-NO"
specnum=1
version_num=pmag.get_version()
Samps=[] # keeps track of sample orientations
user=""
mag_file=""
dir_path='.'
MagRecs=[]
ErSamps=[]
SampOuts=[]
samp_file = 'er_samples.txt'
meas_file = 'magic_measurements.txt'
tmp_file= "fixed.jr6"
meth_code,JR="",0
#
# get command line arguments
#
if command_line:
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-ID' in sys.argv:
ind = sys.argv.index('-ID')
input_dir_path = sys.argv[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
if "-h" in args:
print(main.__doc__)
return False
if '-F' in args:
ind=args.index("-F")
meas_file = args[ind+1]
if '-Fsa' in args:
ind = args.index("-Fsa")
samp_file = args[ind+1]
#try:
# open(samp_file,'r')
# ErSamps,file_type=pmag.magic_read(samp_file)
# print 'sample information will be appended to ', samp_file
#except:
# print samp_file,' not found: sample information will be stored in new er_samples.txt file'
# samp_file = output_dir_path+'/er_samples.txt'
if '-f' in args:
ind = args.index("-f")
mag_file= args[ind+1]
if "-spc" in args:
ind = args.index("-spc")
specnum = int(args[ind+1])
if "-ncn" in args:
ind=args.index("-ncn")
samp_con=sys.argv[ind+1]
if "-loc" in args:
ind=args.index("-loc")
er_location_name=args[ind+1]
if "-A" in args: noave=1
if "-mcd" in args:
ind=args.index("-mcd")
meth_code=args[ind+1]
if "-JR" in args:
meth_code=meth_code+":FS-C-DRILL-IODP:SP-SS-C:SO-V"
meth_code=meth_code.strip(":")
JR=1
samp_con='5'
if "-v" in args:
ind=args.index("-v")
volume=float(args[ind+1])*1e-6 # enter volume in cc, convert to m^3
if not command_line:
dir_path = kwargs.get('dir_path', '.')
input_dir_path = kwargs.get('input_dir_path', dir_path)
output_dir_path = dir_path
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
mag_file = kwargs.get('mag_file')
samp_file = kwargs.get('samp_file', 'er_samples.txt')
specnum = kwargs.get('specnum', 1)
samp_con = kwargs.get('samp_con', '1')
er_location_name = kwargs.get('er_location_name', '')
noave = kwargs.get('noave', 0) # default (0) means DO average
meth_code = kwargs.get('meth_code', "LP-NO")
volume = float(kwargs.get('volume', 0))
if not volume:
volume = 2.5 * 1e-6 #default volume is a 2.5 cm cube, translated to meters cubed
else:
#convert cm^3 to m^3
volume *= 1e-6
JR = kwargs.get('JR', 0)
if JR:
if meth_code == "LP-NO":
meth_code = ""
meth_code=meth_code+":FS-C-DRILL-IODP:SP-SS-C:SO-V"
meth_code=meth_code.strip(":")
samp_con='5'
# format variables
mag_file = input_dir_path+"/" + mag_file
meas_file = output_dir_path+"/" + meas_file
samp_file = output_dir_path+"/" + samp_file
tmp_file = output_dir_path+"/" + tmp_file
if specnum!=0:
specnum=-specnum
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
return False, "option [4] must be in form 4-Z where Z is an integer"
else:
Z=samp_con.split("-")[1]
samp_con="4"
if "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
return False, "option [7] must be in form 7-Z where Z is an integer"
else:
Z=samp_con.split("-")[1]
samp_con="7"
ErSampRec,ErSiteRec={},{}
# parse data
# fix .jr6 file so that there are spaces between all the columns.
pre_data=open(mag_file, 'r')
tmp_data=open(tmp_file, 'w')
line=pre_data.readline()
while line !='':
line=line.replace('-',' -')
#print "line=", line
tmp_data.write(line)
line=pre_data.readline()
tmp_data.close()
pre_data.close()
data=pd.read_csv(tmp_file, delim_whitespace=True,header=None)
if JR==0: #
data.columns=['er_specimen_name','step','x','y','z','expon','sample_azimuth','sample_dip', 'sample_bed_dip_direction','sample_bed_dip','bed_dip_dir2','bed_dip2','param1','param2','param3','param4','measurement_csd']
cart=np.array([data['x'],data['y'],data['z']]).transpose()
else: # measured on the Joides Resolution JR6
data.columns=['er_specimen_name','step','negz','y','x','expon','sample_azimuth','sample_dip', 'sample_bed_dip_direction','sample_bed_dip','bed_dip_dir2','bed_dip2','param1','param2','param3','param4','measurement_csd']
cart=np.array([data['x'],data['y'],-data['negz']]).transpose()
dir= pmag.cart2dir(cart).transpose()
data['measurement_dec']=dir[0]
data['measurement_inc']=dir[1]
data['measurement_magn_moment']=dir[2]*(10.0**data['expon'])*volume # the data are in A/m - this converts to Am^2
data['measurement_magn_volume']=dir[2]*(10.0**data['expon']) # A/m - data in A/m
data['sample_dip']=-data['sample_dip']
DGEOs,IGEOs=[],[]
for ind in range(len(data)):
dgeo,igeo=pmag.dogeo(data.iloc[ind]['measurement_dec'],data.iloc[ind]['measurement_inc'],data.iloc[ind]['sample_azimuth'],data.iloc[ind]['sample_dip'])
DGEOs.append(dgeo)
IGEOs.append(igeo)
data['specimen_dec']=DGEOs
data['specimen_inc']=IGEOs
data['specimen_tilt']='1'
if specnum!=0:
data['er_sample_name']=data['er_specimen_name'][:specnum]
else:
data['er_sample_name']=data['er_specimen_name']
if int(samp_con) in [1, 2, 3, 4, 5, 7]:
data['er_site_name']=pmag.parse_site(data['er_sample_name'],samp_con,Z)
# else:
# if 'er_site_name' in ErSampRec.keys():er_site_name=ErSampRec['er_site_name']
# if 'er_location_name' in ErSampRec.keys():er_location_name=ErSampRec['er_location_name']
# Configure the er_sample table
for rowNum, row in data.iterrows():
sampleFlag=0
for sampRec in SampOuts:
if sampRec['er_sample_name'] == row['er_sample_name']:
sampleFlag=1
break
if sampleFlag == 0:
ErSampRec['er_sample_name']=row['er_sample_name']
ErSampRec['sample_azimuth']=str(row['sample_azimuth'])
ErSampRec['sample_dip']=str(row['sample_dip'])
ErSampRec['magic_method_codes']=meth_code
ErSampRec['er_location_name']=er_location_name
ErSampRec['er_site_name']=row['er_site_name']
ErSampRec['er_citation_names']='This study'
SampOuts.append(ErSampRec.copy())
# Configure the magic_measurements table
for rowNum, row in data.iterrows():
MagRec={}
# MagRec['measurement_description']='Date: '+date
MagRec["er_citation_names"]="This study"
MagRec['er_location_name']=er_location_name
MagRec['er_site_name']=row['er_site_name']
MagRec['er_sample_name']=row['er_sample_name']
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_flag"]='g'
MagRec["measurement_standard"]='u'
MagRec["measurement_number"]='1'
MagRec["er_specimen_name"]=row['er_specimen_name']
MagRec["treatment_ac_field"]='0'
if row['step'] == 'NRM':
meas_type="LT-NO"
elif row['step'][0:2] == 'AD':
meas_type="LT-AF-Z"
treat=float(row['step'][2:])
MagRec["treatment_ac_field"]='%8.3e' %(treat*1e-3) # convert from mT to tesla
elif row['step'][0] == 'TD':
meas_type="LT-T-Z"
treat=float(row['step'][2:])
MagRec["treatment_temp"]='%8.3e' % (treat+273.) # temp in kelvin
else: # need to add IRM, and ARM options
print("measurement type unknown", row['step'])
return False, "measurement type unknown"
MagRec["measurement_magn_moment"]=str(row['measurement_magn_moment'])
MagRec["measurement_magn_volume"]=str(row['measurement_magn_volume'])
MagRec["measurement_dec"]=str(row['measurement_dec'])
MagRec["measurement_inc"]=str(row['measurement_inc'])
MagRec['magic_method_codes']=meas_type
MagRecs.append(MagRec.copy())
pmag.magic_write(samp_file,SampOuts,'er_samples')
print("sample orientations put in ",samp_file)
MagOuts=pmag.measurements_methods(MagRecs,noave)
pmag.magic_write(meas_file,MagOuts,'magic_measurements')
print("results put in ",meas_file)
print("exit!")
return True, meas_file | python | def main(command_line=True, **kwargs):
"""
NAME
jr6_jr6_magic.py
DESCRIPTION
converts JR6 .jr6 format files to magic_measurements format files
SYNTAX
jr6_jr6_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input file, or
-F FILE: specify output file, default is magic_measurements.txt
-Fsa: specify er_samples format file for appending, default is new er_samples.txt (Not working yet)
-spc NUM : specify number of characters to designate a specimen, default = 1
-loc LOCNAME : specify location/study name
-A: don't average replicate measurements
-ncn NCON: specify sample naming convention (6 and 7 not yet implemented)
-mcd [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented
-JR IODP samples measured on the JOIDES RESOLUTION
-v NUM : specify the volume in cc of the sample, default 2.5^3cc
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail [email protected] for help.
INPUT
JR6 .jr6 format file
"""
# initialize some stuff
noave=0
#volume=2.5**3 #default volume is a 2.5cm cube
volume = 2.5 * 1e-6 #default volume is a 2.5 cm cube, translated to meters cubed
inst=""
samp_con,Z='1',""
missing=1
demag="N"
er_location_name="unknown"
citation='This study'
args=sys.argv
meth_code="LP-NO"
specnum=1
version_num=pmag.get_version()
Samps=[] # keeps track of sample orientations
user=""
mag_file=""
dir_path='.'
MagRecs=[]
ErSamps=[]
SampOuts=[]
samp_file = 'er_samples.txt'
meas_file = 'magic_measurements.txt'
tmp_file= "fixed.jr6"
meth_code,JR="",0
#
# get command line arguments
#
if command_line:
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-ID' in sys.argv:
ind = sys.argv.index('-ID')
input_dir_path = sys.argv[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
if "-h" in args:
print(main.__doc__)
return False
if '-F' in args:
ind=args.index("-F")
meas_file = args[ind+1]
if '-Fsa' in args:
ind = args.index("-Fsa")
samp_file = args[ind+1]
#try:
# open(samp_file,'r')
# ErSamps,file_type=pmag.magic_read(samp_file)
# print 'sample information will be appended to ', samp_file
#except:
# print samp_file,' not found: sample information will be stored in new er_samples.txt file'
# samp_file = output_dir_path+'/er_samples.txt'
if '-f' in args:
ind = args.index("-f")
mag_file= args[ind+1]
if "-spc" in args:
ind = args.index("-spc")
specnum = int(args[ind+1])
if "-ncn" in args:
ind=args.index("-ncn")
samp_con=sys.argv[ind+1]
if "-loc" in args:
ind=args.index("-loc")
er_location_name=args[ind+1]
if "-A" in args: noave=1
if "-mcd" in args:
ind=args.index("-mcd")
meth_code=args[ind+1]
if "-JR" in args:
meth_code=meth_code+":FS-C-DRILL-IODP:SP-SS-C:SO-V"
meth_code=meth_code.strip(":")
JR=1
samp_con='5'
if "-v" in args:
ind=args.index("-v")
volume=float(args[ind+1])*1e-6 # enter volume in cc, convert to m^3
if not command_line:
dir_path = kwargs.get('dir_path', '.')
input_dir_path = kwargs.get('input_dir_path', dir_path)
output_dir_path = dir_path
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
mag_file = kwargs.get('mag_file')
samp_file = kwargs.get('samp_file', 'er_samples.txt')
specnum = kwargs.get('specnum', 1)
samp_con = kwargs.get('samp_con', '1')
er_location_name = kwargs.get('er_location_name', '')
noave = kwargs.get('noave', 0) # default (0) means DO average
meth_code = kwargs.get('meth_code', "LP-NO")
volume = float(kwargs.get('volume', 0))
if not volume:
volume = 2.5 * 1e-6 #default volume is a 2.5 cm cube, translated to meters cubed
else:
#convert cm^3 to m^3
volume *= 1e-6
JR = kwargs.get('JR', 0)
if JR:
if meth_code == "LP-NO":
meth_code = ""
meth_code=meth_code+":FS-C-DRILL-IODP:SP-SS-C:SO-V"
meth_code=meth_code.strip(":")
samp_con='5'
# format variables
mag_file = input_dir_path+"/" + mag_file
meas_file = output_dir_path+"/" + meas_file
samp_file = output_dir_path+"/" + samp_file
tmp_file = output_dir_path+"/" + tmp_file
if specnum!=0:
specnum=-specnum
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
return False, "option [4] must be in form 4-Z where Z is an integer"
else:
Z=samp_con.split("-")[1]
samp_con="4"
if "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
return False, "option [7] must be in form 7-Z where Z is an integer"
else:
Z=samp_con.split("-")[1]
samp_con="7"
ErSampRec,ErSiteRec={},{}
# parse data
# fix .jr6 file so that there are spaces between all the columns.
pre_data=open(mag_file, 'r')
tmp_data=open(tmp_file, 'w')
line=pre_data.readline()
while line !='':
line=line.replace('-',' -')
#print "line=", line
tmp_data.write(line)
line=pre_data.readline()
tmp_data.close()
pre_data.close()
data=pd.read_csv(tmp_file, delim_whitespace=True,header=None)
if JR==0: #
data.columns=['er_specimen_name','step','x','y','z','expon','sample_azimuth','sample_dip', 'sample_bed_dip_direction','sample_bed_dip','bed_dip_dir2','bed_dip2','param1','param2','param3','param4','measurement_csd']
cart=np.array([data['x'],data['y'],data['z']]).transpose()
else: # measured on the Joides Resolution JR6
data.columns=['er_specimen_name','step','negz','y','x','expon','sample_azimuth','sample_dip', 'sample_bed_dip_direction','sample_bed_dip','bed_dip_dir2','bed_dip2','param1','param2','param3','param4','measurement_csd']
cart=np.array([data['x'],data['y'],-data['negz']]).transpose()
dir= pmag.cart2dir(cart).transpose()
data['measurement_dec']=dir[0]
data['measurement_inc']=dir[1]
data['measurement_magn_moment']=dir[2]*(10.0**data['expon'])*volume # the data are in A/m - this converts to Am^2
data['measurement_magn_volume']=dir[2]*(10.0**data['expon']) # A/m - data in A/m
data['sample_dip']=-data['sample_dip']
DGEOs,IGEOs=[],[]
for ind in range(len(data)):
dgeo,igeo=pmag.dogeo(data.iloc[ind]['measurement_dec'],data.iloc[ind]['measurement_inc'],data.iloc[ind]['sample_azimuth'],data.iloc[ind]['sample_dip'])
DGEOs.append(dgeo)
IGEOs.append(igeo)
data['specimen_dec']=DGEOs
data['specimen_inc']=IGEOs
data['specimen_tilt']='1'
if specnum!=0:
data['er_sample_name']=data['er_specimen_name'][:specnum]
else:
data['er_sample_name']=data['er_specimen_name']
if int(samp_con) in [1, 2, 3, 4, 5, 7]:
data['er_site_name']=pmag.parse_site(data['er_sample_name'],samp_con,Z)
# else:
# if 'er_site_name' in ErSampRec.keys():er_site_name=ErSampRec['er_site_name']
# if 'er_location_name' in ErSampRec.keys():er_location_name=ErSampRec['er_location_name']
# Configure the er_sample table
for rowNum, row in data.iterrows():
sampleFlag=0
for sampRec in SampOuts:
if sampRec['er_sample_name'] == row['er_sample_name']:
sampleFlag=1
break
if sampleFlag == 0:
ErSampRec['er_sample_name']=row['er_sample_name']
ErSampRec['sample_azimuth']=str(row['sample_azimuth'])
ErSampRec['sample_dip']=str(row['sample_dip'])
ErSampRec['magic_method_codes']=meth_code
ErSampRec['er_location_name']=er_location_name
ErSampRec['er_site_name']=row['er_site_name']
ErSampRec['er_citation_names']='This study'
SampOuts.append(ErSampRec.copy())
# Configure the magic_measurements table
for rowNum, row in data.iterrows():
MagRec={}
# MagRec['measurement_description']='Date: '+date
MagRec["er_citation_names"]="This study"
MagRec['er_location_name']=er_location_name
MagRec['er_site_name']=row['er_site_name']
MagRec['er_sample_name']=row['er_sample_name']
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_flag"]='g'
MagRec["measurement_standard"]='u'
MagRec["measurement_number"]='1'
MagRec["er_specimen_name"]=row['er_specimen_name']
MagRec["treatment_ac_field"]='0'
if row['step'] == 'NRM':
meas_type="LT-NO"
elif row['step'][0:2] == 'AD':
meas_type="LT-AF-Z"
treat=float(row['step'][2:])
MagRec["treatment_ac_field"]='%8.3e' %(treat*1e-3) # convert from mT to tesla
elif row['step'][0] == 'TD':
meas_type="LT-T-Z"
treat=float(row['step'][2:])
MagRec["treatment_temp"]='%8.3e' % (treat+273.) # temp in kelvin
else: # need to add IRM, and ARM options
print("measurement type unknown", row['step'])
return False, "measurement type unknown"
MagRec["measurement_magn_moment"]=str(row['measurement_magn_moment'])
MagRec["measurement_magn_volume"]=str(row['measurement_magn_volume'])
MagRec["measurement_dec"]=str(row['measurement_dec'])
MagRec["measurement_inc"]=str(row['measurement_inc'])
MagRec['magic_method_codes']=meas_type
MagRecs.append(MagRec.copy())
pmag.magic_write(samp_file,SampOuts,'er_samples')
print("sample orientations put in ",samp_file)
MagOuts=pmag.measurements_methods(MagRecs,noave)
pmag.magic_write(meas_file,MagOuts,'magic_measurements')
print("results put in ",meas_file)
print("exit!")
return True, meas_file | NAME
jr6_jr6_magic.py
DESCRIPTION
converts JR6 .jr6 format files to magic_measurements format files
SYNTAX
jr6_jr6_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input file, or
-F FILE: specify output file, default is magic_measurements.txt
-Fsa: specify er_samples format file for appending, default is new er_samples.txt (Not working yet)
-spc NUM : specify number of characters to designate a specimen, default = 1
-loc LOCNAME : specify location/study name
-A: don't average replicate measurements
-ncn NCON: specify sample naming convention (6 and 7 not yet implemented)
-mcd [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented
-JR IODP samples measured on the JOIDES RESOLUTION
-v NUM : specify the volume in cc of the sample, default 2.5^3cc
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail [email protected] for help.
INPUT
JR6 .jr6 format file | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/conversion_scripts2/jr6_jr6_magic2.py#L10-L287 |
PmagPy/PmagPy | programs/deprecated/extract_methods.py | main | def main():
"""
NAME
extract_methods.py
DESCRIPTION
reads in a magic table and creates a file with method codes
SYNTAX
extract_methods.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify magic format input file, default is magic_measurements.txt
-F FILE: specify method code output file, default is magic_methods.txt
"""
citation='This study'
args=sys.argv
outfile='magic_methods.txt'
infile='magic_measurements.txt'
#
# get command line arguments
#
dir_path='.'
if '-WD' in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if '-F' in args:
ind=args.index("-F")
outfile=args[ind+1]
if '-f' in args:
ind=args.index("-f")
infile=args[ind+1]
infile=dir_path+'/'+infile
outfile=dir_path+'/'+outfile
data,file_type=pmag.magic_read(infile)
MethRecs=[]
methods=[]
for rec in data:
meths=rec['magic_method_codes'].split(":")
for meth in meths:
if meth not in methods:
MethRec={}
methods.append(meth)
MethRec['magic_method_code']=meth
MethRecs.append(MethRec)
pmag.magic_write(outfile,MethRecs,'magic_methods') | python | def main():
"""
NAME
extract_methods.py
DESCRIPTION
reads in a magic table and creates a file with method codes
SYNTAX
extract_methods.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify magic format input file, default is magic_measurements.txt
-F FILE: specify method code output file, default is magic_methods.txt
"""
citation='This study'
args=sys.argv
outfile='magic_methods.txt'
infile='magic_measurements.txt'
#
# get command line arguments
#
dir_path='.'
if '-WD' in args:
ind=args.index("-WD")
dir_path=args[ind+1]
if "-h" in args:
print(main.__doc__)
sys.exit()
if '-F' in args:
ind=args.index("-F")
outfile=args[ind+1]
if '-f' in args:
ind=args.index("-f")
infile=args[ind+1]
infile=dir_path+'/'+infile
outfile=dir_path+'/'+outfile
data,file_type=pmag.magic_read(infile)
MethRecs=[]
methods=[]
for rec in data:
meths=rec['magic_method_codes'].split(":")
for meth in meths:
if meth not in methods:
MethRec={}
methods.append(meth)
MethRec['magic_method_code']=meth
MethRecs.append(MethRec)
pmag.magic_write(outfile,MethRecs,'magic_methods') | NAME
extract_methods.py
DESCRIPTION
reads in a magic table and creates a file with method codes
SYNTAX
extract_methods.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify magic format input file, default is magic_measurements.txt
-F FILE: specify method code output file, default is magic_methods.txt | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/deprecated/extract_methods.py#L6-L55 |
PmagPy/PmagPy | programs/gofish.py | main | def main():
"""
NAME
gofish.py
DESCRIPTION
calculates fisher parameters from dec inc data
INPUT FORMAT
takes dec/inc as first two columns in space delimited file
SYNTAX
gofish.py [options] [< filename]
OPTIONS
-h prints help message and quits
-i for interactive filename entry
-f FILE, specify input file
-F FILE, specifies output file name
< filename for reading from standard input
OUTPUT
mean dec, mean inc, N, R, k, a95, csd
"""
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-i' in sys.argv: # ask for filename
file=input("Enter file name with dec, inc data: ")
f=open(file,'r')
data=f.readlines()
elif '-f' in sys.argv:
dat=[]
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
else:
data = sys.stdin.readlines() # read from standard input
ofile = ""
if '-F' in sys.argv:
ind = sys.argv.index('-F')
ofile= sys.argv[ind+1]
out = open(ofile, 'w + a')
DIs= [] # set up list for dec inc data
for line in data: # read in the data from standard input
if '\t' in line:
rec=line.split('\t') # split each line on space to get records
else:
rec=line.split() # split each line on space to get records
DIs.append((float(rec[0]),float(rec[1])))
#
fpars=pmag.fisher_mean(DIs)
outstring='%7.1f %7.1f %i %10.4f %8.1f %7.1f %7.1f'%(fpars['dec'],fpars['inc'],fpars['n'],fpars['r'],fpars['k'],fpars['alpha95'], fpars['csd'])
if ofile == "":
print(outstring)
else:
out.write(outstring+'\n') | python | def main():
"""
NAME
gofish.py
DESCRIPTION
calculates fisher parameters from dec inc data
INPUT FORMAT
takes dec/inc as first two columns in space delimited file
SYNTAX
gofish.py [options] [< filename]
OPTIONS
-h prints help message and quits
-i for interactive filename entry
-f FILE, specify input file
-F FILE, specifies output file name
< filename for reading from standard input
OUTPUT
mean dec, mean inc, N, R, k, a95, csd
"""
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-i' in sys.argv: # ask for filename
file=input("Enter file name with dec, inc data: ")
f=open(file,'r')
data=f.readlines()
elif '-f' in sys.argv:
dat=[]
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
else:
data = sys.stdin.readlines() # read from standard input
ofile = ""
if '-F' in sys.argv:
ind = sys.argv.index('-F')
ofile= sys.argv[ind+1]
out = open(ofile, 'w + a')
DIs= [] # set up list for dec inc data
for line in data: # read in the data from standard input
if '\t' in line:
rec=line.split('\t') # split each line on space to get records
else:
rec=line.split() # split each line on space to get records
DIs.append((float(rec[0]),float(rec[1])))
#
fpars=pmag.fisher_mean(DIs)
outstring='%7.1f %7.1f %i %10.4f %8.1f %7.1f %7.1f'%(fpars['dec'],fpars['inc'],fpars['n'],fpars['r'],fpars['k'],fpars['alpha95'], fpars['csd'])
if ofile == "":
print(outstring)
else:
out.write(outstring+'\n') | NAME
gofish.py
DESCRIPTION
calculates fisher parameters from dec inc data
INPUT FORMAT
takes dec/inc as first two columns in space delimited file
SYNTAX
gofish.py [options] [< filename]
OPTIONS
-h prints help message and quits
-i for interactive filename entry
-f FILE, specify input file
-F FILE, specifies output file name
< filename for reading from standard input
OUTPUT
mean dec, mean inc, N, R, k, a95, csd | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/gofish.py#L7-L65 |
PmagPy/PmagPy | programs/trmaq_magic.py | main | def main():
"""
NAME
trmaq_magic.py
DESCTIPTION
does non-linear trm acquisisiton correction
SYNTAX
trmaq_magic.py [-h][-i][command line options]
OPTIONS
-h prints help message and quits
-i allows interactive setting of file names
-f MFILE, sets magic_measurements input file
-ft TSPEC, sets thellier_specimens input file
-F OUT, sets output for non-linear TRM acquisition corrected data
-sav save figures and quit
-fmt [png, svg, pdf]
-DM [2, 3] MagIC data model, default 3
DEFAULTS
MFILE: trmaq_measurements.txt
TSPEC: thellier_specimens.txt
OUT: NLT_specimens.txt
"""
meas_file = 'trmaq_measurements.txt'
tspec = "thellier_specimens.txt"
output = 'NLT_specimens.txt'
data_model_num = int(float(pmag.get_named_arg("-DM", 3)))
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-i' in sys.argv:
meas_file = input(
"Input magic_measurements file name? [trmaq_measurements.txt] ")
if meas_file == "":
meas_file = "trmaq_measurements.txt"
tspec = input(
" thellier_specimens file name? [thellier_specimens.txt] ")
if tspec == "":
tspec = "thellier_specimens.txt"
output = input(
"File for non-linear TRM adjusted specimen data: [NLTspecimens.txt] ")
if output == "":
output = "NLT_specimens.txt"
if '-f' in sys.argv:
ind = sys.argv.index('-f')
meas_file = sys.argv[ind+1]
if '-ft' in sys.argv:
ind = sys.argv.index('-ft')
tspec = sys.argv[ind+1]
if '-F' in sys.argv:
ind = sys.argv.index('-F')
output = sys.argv[ind+1]
if '-sav' in sys.argv:
save_plots = True
else:
save_plots = False
fmt = pmag.get_named_arg("-fmt", "svg")
#
PLT = {'aq': 1}
if not save_plots:
pmagplotlib.plot_init(PLT['aq'], 5, 5)
#
# get name of file from command line
#
comment = ""
#
#
meas_data, file_type = pmag.magic_read(meas_file)
if 'measurements' not in file_type:
print(file_type, "This is not a valid measurements file ")
sys.exit()
if data_model_num == 2:
spec_col = "er_specimen_name"
lab_field_dc_col = "specimen_lab_field_dc"
int_col = "specimen_int"
meth_col = "magic_method_codes"
treat_dc_col = "treatment_dc_field"
magn_moment_col = "measurement_magn_moment"
experiment_col = "magic_experiment_name"
outfile_type = "pmag_specimens"
else:
spec_col = "specimen"
lab_field_dc_col = "int_treat_dc_field"
int_col = "int_abs"
meth_col = "method_codes"
treat_dc_col = "treat_dc_field"
magn_moment_col = "magn_moment"
experiment_col = "experiment"
outfile_type = "specimens"
sids = pmag.get_specs(meas_data)
specimen = 0
#
# read in thellier_specimen data
#
nrm, file_type = pmag.magic_read(tspec)
PmagSpecRecs= []
while specimen < len(sids):
#
# find corresoponding paleointensity data for this specimen
#
s = sids[specimen]
blab, best = "", ""
for nrec in nrm: # pick out the Banc data for this spec
if nrec[spec_col] == s:
try:
blab = float(nrec[lab_field_dc_col])
except ValueError:
continue
best = float(nrec[int_col])
TrmRec = nrec
break
if blab == "":
print("skipping ", s, " : no best ")
specimen += 1
else:
print(sids[specimen], specimen+1, 'of ',
len(sids), 'Best = ', best*1e6)
MeasRecs = []
#
# find the data from the meas_data file for this specimen
#
for rec in meas_data:
if rec[spec_col] == s:
meths = rec[meth_col].split(":")
methcodes = []
for meth in meths:
methcodes.append(meth.strip())
if "LP-TRM" in methcodes:
MeasRecs.append(rec)
if len(MeasRecs) < 2:
specimen += 1
print('skipping specimen - no trm acquisition data ', s)
#
# collect info for the PmagSpecRec dictionary
#
else:
TRMs, Bs = [], []
for rec in MeasRecs:
Bs.append(float(rec[treat_dc_col]))
TRMs.append(float(rec[magn_moment_col]))
# calculate best fit parameters through TRM acquisition data, and get new banc
NLpars = nlt.NLtrm(Bs, TRMs, best, blab, 0)
#
Mp, Bp = [], []
for k in range(int(max(Bs)*1e6)):
Bp.append(float(k)*1e-6)
# predicted NRM for this field
npred = nlt.TRM(Bp[-1], NLpars['xopt']
[0], NLpars['xopt'][1])
Mp.append(npred)
pmagplotlib.plot_trm(
PLT['aq'], Bs, TRMs, Bp, Mp, NLpars, rec[experiment_col])
if not save_plots:
pmagplotlib.draw_figs(PLT)
print('Banc= ', float(NLpars['banc'])*1e6)
trmTC = {}
for key in list(TrmRec.keys()):
# copy of info from thellier_specimens record
trmTC[key] = TrmRec[key]
trmTC[int_col] = '%8.3e' % (NLpars['banc'])
trmTC[meth_col] = TrmRec[meth_col]+":DA-NL"
PmagSpecRecs.append(trmTC)
if not save_plots:
ans = input("Return for next specimen, s[a]ve plot ")
if ans == 'a':
Name = {'aq': rec[spec_col]+'_TRM.{}'.format(fmt)}
pmagplotlib.save_plots(PLT, Name)
else:
Name = {'aq': rec[spec_col]+'_TRM.{}'.format(fmt)}
pmagplotlib.save_plots(PLT, Name)
specimen += 1
pmag.magic_write(output, PmagSpecRecs, outfile_type) | python | def main():
"""
NAME
trmaq_magic.py
DESCTIPTION
does non-linear trm acquisisiton correction
SYNTAX
trmaq_magic.py [-h][-i][command line options]
OPTIONS
-h prints help message and quits
-i allows interactive setting of file names
-f MFILE, sets magic_measurements input file
-ft TSPEC, sets thellier_specimens input file
-F OUT, sets output for non-linear TRM acquisition corrected data
-sav save figures and quit
-fmt [png, svg, pdf]
-DM [2, 3] MagIC data model, default 3
DEFAULTS
MFILE: trmaq_measurements.txt
TSPEC: thellier_specimens.txt
OUT: NLT_specimens.txt
"""
meas_file = 'trmaq_measurements.txt'
tspec = "thellier_specimens.txt"
output = 'NLT_specimens.txt'
data_model_num = int(float(pmag.get_named_arg("-DM", 3)))
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-i' in sys.argv:
meas_file = input(
"Input magic_measurements file name? [trmaq_measurements.txt] ")
if meas_file == "":
meas_file = "trmaq_measurements.txt"
tspec = input(
" thellier_specimens file name? [thellier_specimens.txt] ")
if tspec == "":
tspec = "thellier_specimens.txt"
output = input(
"File for non-linear TRM adjusted specimen data: [NLTspecimens.txt] ")
if output == "":
output = "NLT_specimens.txt"
if '-f' in sys.argv:
ind = sys.argv.index('-f')
meas_file = sys.argv[ind+1]
if '-ft' in sys.argv:
ind = sys.argv.index('-ft')
tspec = sys.argv[ind+1]
if '-F' in sys.argv:
ind = sys.argv.index('-F')
output = sys.argv[ind+1]
if '-sav' in sys.argv:
save_plots = True
else:
save_plots = False
fmt = pmag.get_named_arg("-fmt", "svg")
#
PLT = {'aq': 1}
if not save_plots:
pmagplotlib.plot_init(PLT['aq'], 5, 5)
#
# get name of file from command line
#
comment = ""
#
#
meas_data, file_type = pmag.magic_read(meas_file)
if 'measurements' not in file_type:
print(file_type, "This is not a valid measurements file ")
sys.exit()
if data_model_num == 2:
spec_col = "er_specimen_name"
lab_field_dc_col = "specimen_lab_field_dc"
int_col = "specimen_int"
meth_col = "magic_method_codes"
treat_dc_col = "treatment_dc_field"
magn_moment_col = "measurement_magn_moment"
experiment_col = "magic_experiment_name"
outfile_type = "pmag_specimens"
else:
spec_col = "specimen"
lab_field_dc_col = "int_treat_dc_field"
int_col = "int_abs"
meth_col = "method_codes"
treat_dc_col = "treat_dc_field"
magn_moment_col = "magn_moment"
experiment_col = "experiment"
outfile_type = "specimens"
sids = pmag.get_specs(meas_data)
specimen = 0
#
# read in thellier_specimen data
#
nrm, file_type = pmag.magic_read(tspec)
PmagSpecRecs= []
while specimen < len(sids):
#
# find corresoponding paleointensity data for this specimen
#
s = sids[specimen]
blab, best = "", ""
for nrec in nrm: # pick out the Banc data for this spec
if nrec[spec_col] == s:
try:
blab = float(nrec[lab_field_dc_col])
except ValueError:
continue
best = float(nrec[int_col])
TrmRec = nrec
break
if blab == "":
print("skipping ", s, " : no best ")
specimen += 1
else:
print(sids[specimen], specimen+1, 'of ',
len(sids), 'Best = ', best*1e6)
MeasRecs = []
#
# find the data from the meas_data file for this specimen
#
for rec in meas_data:
if rec[spec_col] == s:
meths = rec[meth_col].split(":")
methcodes = []
for meth in meths:
methcodes.append(meth.strip())
if "LP-TRM" in methcodes:
MeasRecs.append(rec)
if len(MeasRecs) < 2:
specimen += 1
print('skipping specimen - no trm acquisition data ', s)
#
# collect info for the PmagSpecRec dictionary
#
else:
TRMs, Bs = [], []
for rec in MeasRecs:
Bs.append(float(rec[treat_dc_col]))
TRMs.append(float(rec[magn_moment_col]))
# calculate best fit parameters through TRM acquisition data, and get new banc
NLpars = nlt.NLtrm(Bs, TRMs, best, blab, 0)
#
Mp, Bp = [], []
for k in range(int(max(Bs)*1e6)):
Bp.append(float(k)*1e-6)
# predicted NRM for this field
npred = nlt.TRM(Bp[-1], NLpars['xopt']
[0], NLpars['xopt'][1])
Mp.append(npred)
pmagplotlib.plot_trm(
PLT['aq'], Bs, TRMs, Bp, Mp, NLpars, rec[experiment_col])
if not save_plots:
pmagplotlib.draw_figs(PLT)
print('Banc= ', float(NLpars['banc'])*1e6)
trmTC = {}
for key in list(TrmRec.keys()):
# copy of info from thellier_specimens record
trmTC[key] = TrmRec[key]
trmTC[int_col] = '%8.3e' % (NLpars['banc'])
trmTC[meth_col] = TrmRec[meth_col]+":DA-NL"
PmagSpecRecs.append(trmTC)
if not save_plots:
ans = input("Return for next specimen, s[a]ve plot ")
if ans == 'a':
Name = {'aq': rec[spec_col]+'_TRM.{}'.format(fmt)}
pmagplotlib.save_plots(PLT, Name)
else:
Name = {'aq': rec[spec_col]+'_TRM.{}'.format(fmt)}
pmagplotlib.save_plots(PLT, Name)
specimen += 1
pmag.magic_write(output, PmagSpecRecs, outfile_type) | NAME
trmaq_magic.py
DESCTIPTION
does non-linear trm acquisisiton correction
SYNTAX
trmaq_magic.py [-h][-i][command line options]
OPTIONS
-h prints help message and quits
-i allows interactive setting of file names
-f MFILE, sets magic_measurements input file
-ft TSPEC, sets thellier_specimens input file
-F OUT, sets output for non-linear TRM acquisition corrected data
-sav save figures and quit
-fmt [png, svg, pdf]
-DM [2, 3] MagIC data model, default 3
DEFAULTS
MFILE: trmaq_measurements.txt
TSPEC: thellier_specimens.txt
OUT: NLT_specimens.txt | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/trmaq_magic.py#L8-L187 |
PmagPy/PmagPy | programs/conversion_scripts/huji_sample_magic.py | main | def main():
"""
NAME
huji_sample_magic.py
DESCRIPTION
takes tab delimited Hebrew University sample file and converts to MagIC formatted tables
SYNTAX
huji_sample_magic.py [command line options]
OPTIONS
-f FILE: specify input file
-Fsa FILE: specify sample output file, default is: samples.txt
-Fsi FILE: specify site output file, default is: sites.txt
-Iso: import sample orientation info - default is to set sample_az/dip to 0,0
-ncn NCON: specify naming convention: default is #1 below
-mcd: specify sampling method codes as a colon delimited string: [default is: FS-FD:SO-POM:SO-SUN]
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
SO-MAG orientation with magnetic compass
-loc: location name, default="unknown"
-DM: data model number (MagIC 2 or 3, default 3)
INPUT FORMAT
Input files must be tab delimited:
Samp Az Dip Dip_dir Dip
Orientation convention:
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
e.g. field_dip is degrees from horizontal of drill direction
Magnetic declination convention:
Az is already corrected in file
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail [email protected] for help.
OUTPUT
output saved in samples will overwrite any existing files
"""
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
#
# initialize variables
Z = 1
# get arguments from the command line
orient_file = pmag.get_named_arg("-f", reqd=True)
data_model_num = int(float(pmag.get_named_arg("-DM", 3)))
if data_model_num == 2:
samp_file = pmag.get_named_arg("-Fsa", "er_samples.txt")
site_file = pmag.get_named_arg("-Fsi", "er_sites.txt")
else:
samp_file = pmag.get_named_arg("-Fsa", "samples.txt")
site_file = pmag.get_named_arg("-Fsi", "sites.txt")
samp_con = pmag.get_named_arg("-ncn", "1")
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 3-Z where Z is an integer")
sys.exit()
else:
Z = samp_con.split("-")[1]
#samp_con = "4"
print(samp_con)#, Z)
meths = pmag.get_named_arg("-mcd", 'FS-FD:SO-POM:SO-SUN')
location_name = pmag.get_named_arg("-loc", "unknown")
if "-Iso" in args:
ignore = 0
else:
ignore = 1
convert.huji_sample(orient_file, meths, location_name, samp_con, ignore) | python | def main():
"""
NAME
huji_sample_magic.py
DESCRIPTION
takes tab delimited Hebrew University sample file and converts to MagIC formatted tables
SYNTAX
huji_sample_magic.py [command line options]
OPTIONS
-f FILE: specify input file
-Fsa FILE: specify sample output file, default is: samples.txt
-Fsi FILE: specify site output file, default is: sites.txt
-Iso: import sample orientation info - default is to set sample_az/dip to 0,0
-ncn NCON: specify naming convention: default is #1 below
-mcd: specify sampling method codes as a colon delimited string: [default is: FS-FD:SO-POM:SO-SUN]
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
SO-MAG orientation with magnetic compass
-loc: location name, default="unknown"
-DM: data model number (MagIC 2 or 3, default 3)
INPUT FORMAT
Input files must be tab delimited:
Samp Az Dip Dip_dir Dip
Orientation convention:
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
e.g. field_dip is degrees from horizontal of drill direction
Magnetic declination convention:
Az is already corrected in file
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail [email protected] for help.
OUTPUT
output saved in samples will overwrite any existing files
"""
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
#
# initialize variables
Z = 1
# get arguments from the command line
orient_file = pmag.get_named_arg("-f", reqd=True)
data_model_num = int(float(pmag.get_named_arg("-DM", 3)))
if data_model_num == 2:
samp_file = pmag.get_named_arg("-Fsa", "er_samples.txt")
site_file = pmag.get_named_arg("-Fsi", "er_sites.txt")
else:
samp_file = pmag.get_named_arg("-Fsa", "samples.txt")
site_file = pmag.get_named_arg("-Fsi", "sites.txt")
samp_con = pmag.get_named_arg("-ncn", "1")
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 3-Z where Z is an integer")
sys.exit()
else:
Z = samp_con.split("-")[1]
#samp_con = "4"
print(samp_con)#, Z)
meths = pmag.get_named_arg("-mcd", 'FS-FD:SO-POM:SO-SUN')
location_name = pmag.get_named_arg("-loc", "unknown")
if "-Iso" in args:
ignore = 0
else:
ignore = 1
convert.huji_sample(orient_file, meths, location_name, samp_con, ignore) | NAME
huji_sample_magic.py
DESCRIPTION
takes tab delimited Hebrew University sample file and converts to MagIC formatted tables
SYNTAX
huji_sample_magic.py [command line options]
OPTIONS
-f FILE: specify input file
-Fsa FILE: specify sample output file, default is: samples.txt
-Fsi FILE: specify site output file, default is: sites.txt
-Iso: import sample orientation info - default is to set sample_az/dip to 0,0
-ncn NCON: specify naming convention: default is #1 below
-mcd: specify sampling method codes as a colon delimited string: [default is: FS-FD:SO-POM:SO-SUN]
FS-FD field sampling done with a drill
FS-H field sampling done with hand samples
FS-LOC-GPS field location done with GPS
FS-LOC-MAP field location done with map
SO-POM a Pomeroy orientation device was used
SO-ASC an ASC orientation device was used
SO-MAG orientation with magnetic compass
-loc: location name, default="unknown"
-DM: data model number (MagIC 2 or 3, default 3)
INPUT FORMAT
Input files must be tab delimited:
Samp Az Dip Dip_dir Dip
Orientation convention:
Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip
e.g. field_dip is degrees from horizontal of drill direction
Magnetic declination convention:
Az is already corrected in file
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
NB: all others you will have to either customize your
self or e-mail [email protected] for help.
OUTPUT
output saved in samples will overwrite any existing files | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/conversion_scripts/huji_sample_magic.py#L7-L93 |
PmagPy/PmagPy | programs/vector_mean.py | main | def main():
"""
NAME
vector_mean.py
DESCRIPTION
calculates vector mean of vector data
INPUT FORMAT
takes dec, inc, int from an input file
SYNTAX
vector_mean.py [command line options] [< filename]
OPTIONS
-h prints help message and quits
-f FILE, specify input file
-F FILE, specify output file
< filename for reading from standard input
OUTPUT
mean dec, mean inc, R, N
"""
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-f' in sys.argv:
dat=[]
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
else:
file = sys.stdin # read from standard input
ofile=""
if '-F' in sys.argv:
ind = sys.argv.index('-F')
ofile= sys.argv[ind+1]
out = open(ofile, 'w + a')
DIIs=numpy.loadtxt(file,dtype=numpy.float) # read in the data
#
vpars,R=pmag.vector_mean(DIIs)
outstring='%7.1f %7.1f %10.3e %i'%(vpars[0],vpars[1],R,len(DIIs))
if ofile == "":
print(outstring)
else:
out.write(outstring + "\n") | python | def main():
"""
NAME
vector_mean.py
DESCRIPTION
calculates vector mean of vector data
INPUT FORMAT
takes dec, inc, int from an input file
SYNTAX
vector_mean.py [command line options] [< filename]
OPTIONS
-h prints help message and quits
-f FILE, specify input file
-F FILE, specify output file
< filename for reading from standard input
OUTPUT
mean dec, mean inc, R, N
"""
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-f' in sys.argv:
dat=[]
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
else:
file = sys.stdin # read from standard input
ofile=""
if '-F' in sys.argv:
ind = sys.argv.index('-F')
ofile= sys.argv[ind+1]
out = open(ofile, 'w + a')
DIIs=numpy.loadtxt(file,dtype=numpy.float) # read in the data
#
vpars,R=pmag.vector_mean(DIIs)
outstring='%7.1f %7.1f %10.3e %i'%(vpars[0],vpars[1],R,len(DIIs))
if ofile == "":
print(outstring)
else:
out.write(outstring + "\n") | NAME
vector_mean.py
DESCRIPTION
calculates vector mean of vector data
INPUT FORMAT
takes dec, inc, int from an input file
SYNTAX
vector_mean.py [command line options] [< filename]
OPTIONS
-h prints help message and quits
-f FILE, specify input file
-F FILE, specify output file
< filename for reading from standard input
OUTPUT
mean dec, mean inc, R, N | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/vector_mean.py#L7-L52 |
PmagPy/PmagPy | programs/scalc.py | main | def main():
"""
NAME
scalc.py
DESCRIPTION
calculates Sb from VGP Long,VGP Lat,Directional kappa,Site latitude data
SYNTAX
scalc -h [command line options] [< standard input]
INPUT
takes space delimited files with PLong, PLat,[kappa, N_site, slat]
OPTIONS
-h prints help message and quits
-f FILE: specify input file
-c cutoff: specify VGP colatitude cutoff value
-k cutoff: specify kappa cutoff
-v : use the VanDammme criterion
-a: use antipodes of reverse data: default is to use only normal
-r use only reverse data, default is False
-b: do a bootstrap for confidence
-p: do relative to principle axis
-n: set minimum n for samples (specimens) per site
-mm97: correct for within site scatter (McElhinny & McFadden, 1997)
NOTES
if kappa, N_site, lat supplied, will consider within site scatter
OUTPUT
N Sb Sb_lower Sb_upper Co-lat. Cutoff
"""
kappa, cutoff = 0, 180
rev, anti, boot = 0, 0, 0
spin,n,v,mm97 = 0,0,0,0
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind = sys.argv.index("-f")
in_file = sys.argv[ind + 1]
vgp_df=pd.read_csv(in_file,delim_whitespace=True,header=None)
else:
vgp_df=pd.read_csv(sys.stdin,delim_whitespace=True,header=None)
if '-c' in sys.argv:
ind = sys.argv.index('-c')
cutoff = float(sys.argv[ind + 1])
if '-k' in sys.argv:
ind = sys.argv.index('-k')
kappa = float(sys.argv[ind + 1])
if '-n' in sys.argv:
ind = sys.argv.index('-n')
n = int(sys.argv[ind + 1])
if '-a' in sys.argv: anti = 1
if '-r' in sys.argv: rev=1
if '-b' in sys.argv: boot = 1
if '-v' in sys.argv: v = 1
if '-p' in sys.argv: spin = 1
if '-mm97' in sys.argv: mm97=1
#
#
if len(list(vgp_df.columns))==2:
vgp_df.columns=['vgp_lon','vgp_lat']
vgp_df['dir_k'],vgp_df['dir_n_samples'],vgp_df['lat']=0,0,0
else:
vgp_df.columns=['vgp_lon','vgp_lat','dir_k','dir_n_samples','lat']
N,S_B,low,high,cutoff=pmag.scalc_vgp_df(vgp_df,anti=anti,rev=rev,cutoff=cutoff,kappa=kappa,n=n,spin=spin,v=v,boot=boot,mm97=mm97)
if high!=0:
print(N, '%7.1f %7.1f %7.1f %7.1f ' % (S_B, low, high, cutoff))
else:
print(N, '%7.1f %7.1f ' % (S_B, cutoff)) | python | def main():
"""
NAME
scalc.py
DESCRIPTION
calculates Sb from VGP Long,VGP Lat,Directional kappa,Site latitude data
SYNTAX
scalc -h [command line options] [< standard input]
INPUT
takes space delimited files with PLong, PLat,[kappa, N_site, slat]
OPTIONS
-h prints help message and quits
-f FILE: specify input file
-c cutoff: specify VGP colatitude cutoff value
-k cutoff: specify kappa cutoff
-v : use the VanDammme criterion
-a: use antipodes of reverse data: default is to use only normal
-r use only reverse data, default is False
-b: do a bootstrap for confidence
-p: do relative to principle axis
-n: set minimum n for samples (specimens) per site
-mm97: correct for within site scatter (McElhinny & McFadden, 1997)
NOTES
if kappa, N_site, lat supplied, will consider within site scatter
OUTPUT
N Sb Sb_lower Sb_upper Co-lat. Cutoff
"""
kappa, cutoff = 0, 180
rev, anti, boot = 0, 0, 0
spin,n,v,mm97 = 0,0,0,0
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind = sys.argv.index("-f")
in_file = sys.argv[ind + 1]
vgp_df=pd.read_csv(in_file,delim_whitespace=True,header=None)
else:
vgp_df=pd.read_csv(sys.stdin,delim_whitespace=True,header=None)
if '-c' in sys.argv:
ind = sys.argv.index('-c')
cutoff = float(sys.argv[ind + 1])
if '-k' in sys.argv:
ind = sys.argv.index('-k')
kappa = float(sys.argv[ind + 1])
if '-n' in sys.argv:
ind = sys.argv.index('-n')
n = int(sys.argv[ind + 1])
if '-a' in sys.argv: anti = 1
if '-r' in sys.argv: rev=1
if '-b' in sys.argv: boot = 1
if '-v' in sys.argv: v = 1
if '-p' in sys.argv: spin = 1
if '-mm97' in sys.argv: mm97=1
#
#
if len(list(vgp_df.columns))==2:
vgp_df.columns=['vgp_lon','vgp_lat']
vgp_df['dir_k'],vgp_df['dir_n_samples'],vgp_df['lat']=0,0,0
else:
vgp_df.columns=['vgp_lon','vgp_lat','dir_k','dir_n_samples','lat']
N,S_B,low,high,cutoff=pmag.scalc_vgp_df(vgp_df,anti=anti,rev=rev,cutoff=cutoff,kappa=kappa,n=n,spin=spin,v=v,boot=boot,mm97=mm97)
if high!=0:
print(N, '%7.1f %7.1f %7.1f %7.1f ' % (S_B, low, high, cutoff))
else:
print(N, '%7.1f %7.1f ' % (S_B, cutoff)) | NAME
scalc.py
DESCRIPTION
calculates Sb from VGP Long,VGP Lat,Directional kappa,Site latitude data
SYNTAX
scalc -h [command line options] [< standard input]
INPUT
takes space delimited files with PLong, PLat,[kappa, N_site, slat]
OPTIONS
-h prints help message and quits
-f FILE: specify input file
-c cutoff: specify VGP colatitude cutoff value
-k cutoff: specify kappa cutoff
-v : use the VanDammme criterion
-a: use antipodes of reverse data: default is to use only normal
-r use only reverse data, default is False
-b: do a bootstrap for confidence
-p: do relative to principle axis
-n: set minimum n for samples (specimens) per site
-mm97: correct for within site scatter (McElhinny & McFadden, 1997)
NOTES
if kappa, N_site, lat supplied, will consider within site scatter
OUTPUT
N Sb Sb_lower Sb_upper Co-lat. Cutoff | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/scalc.py#L11-L76 |
PmagPy/PmagPy | programs/irmaq_magic.py | main | def main():
"""
NAME
irmaq_magic.py
DESCRIPTION
plots IRM acquisition curves from measurements file
SYNTAX
irmaq_magic [command line options]
INPUT
takes magic formatted magic_measurements.txt files
OPTIONS
-h prints help message and quits
-f FILE: specify input file, default is: magic_measurements.txt/measurements.txt
-obj OBJ: specify object [loc, sit, sam, spc] for plot, default is by location
-N ; do not normalize by last point - use original units
-fmt [png,jpg,eps,pdf] set plot file format [default is svg]
-sav save plot[s] and quit
-DM MagIC data model number, default is 3
NOTE
loc: location (study); sit: site; sam: sample; spc: specimen
"""
FIG = {} # plot dictionary
FIG['exp'] = 1 # exp is figure 1
dir_path = './'
plot, fmt = 0, 'svg'
units = 'T',
XLP = []
norm = 1
LP = "LP-IRM"
if len(sys.argv) > 1:
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
data_model = int(pmag.get_named_arg("-DM", 3))
if '-N' in sys.argv:
norm = 0
if '-sav' in sys.argv:
plot = 1
if '-fmt' in sys.argv:
ind = sys.argv.index("-fmt")
fmt = sys.argv[ind + 1]
if data_model == 3:
in_file = pmag.get_named_arg("-f", 'measurements.txt')
else:
in_file = pmag.get_named_arg("-f", 'magic_measurements.txt')
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind + 1]
dir_path = os.path.realpath(dir_path)
in_file = pmag.resolve_file_name(in_file, dir_path)
if '-WD' not in sys.argv:
dir_path = os.path.split(in_file)[0]
plot_by = pmag.get_named_arg("-obj", "loc")
if data_model == 3:
plot_key = 'location'
if plot_by == 'sit':
plot_key = 'site'
if plot_by == 'sam':
plot_key = 'sample'
if plot_by == 'spc':
plot_key = 'specimen'
else:
plot_key = 'er_location_name'
if plot_by == 'sit':
plot_key = 'er_site_name'
if plot_by == 'sam':
plot_key = 'er_sample_name'
if plot_by == 'spc':
plot_key = 'er_specimen_name'
# set defaults and get more information if needed
if data_model == 3:
dmag_key = 'treat_dc_field'
else:
dmag_key = 'treatment_dc_field'
#
if data_model == 3 and plot_key != 'specimen':
# gonna need to read in more files
print('-W- You are trying to plot measurements by {}'.format(plot_key))
print(' By default, this information is not available in your measurement file.')
print(' Trying to acquire this information from {}'.format(dir_path))
con = cb.Contribution(dir_path)
meas_df = con.propagate_location_to_measurements()
if meas_df is None:
print('-W- No data found in {}'.format(dir_path))
return
if plot_key not in meas_df.columns:
print('-W- Could not find required data.')
print(' Try a different plot key.')
return
else:
print('-I- Found {} information, continuing with plotting'.format(plot_key))
# need to take the data directly from the contribution here, to keep
# location/site/sample columns in the measurements table
data = con.tables['measurements'].convert_to_pmag_data_list()
file_type = "measurements"
else:
data, file_type = pmag.magic_read(in_file)
# read in data
sids = pmag.get_specs(data)
pmagplotlib.plot_init(FIG['exp'], 6, 6)
#
#
# find desired intensity data
#
# get plotlist
#
plotlist = []
if data_model == 3:
intlist = ['magn_moment', 'magn_volume', 'magn_mass', 'magnitude']
else:
intlist = ['measurement_magnitude', 'measurement_magn_moment',
'measurement_magn_volume', 'measurement_magn_mass']
IntMeths = []
# get all the records with this lab protocol
#print('data', len(data))
#print('data[0]', data[0])
if data_model == 3:
data = pmag.get_dictitem(data, 'method_codes', LP, 'has')
else:
data = pmag.get_dictitem(data, 'magic_method_codes', LP, 'has')
Ints = {}
NoInts, int_key = 1, ""
for key in intlist:
# get all non-blank data for intensity type
Ints[key] = pmag.get_dictitem(data, key, '', 'F')
if len(Ints[key]) > 0:
NoInts = 0
if int_key == "":
int_key = key
if NoInts == 1:
print('No intensity information found')
sys.exit()
for rec in Ints[int_key]:
if rec[plot_key] not in plotlist:
plotlist.append(rec[plot_key])
plotlist.sort()
for plt in plotlist:
print(plt)
INTblock = []
# get data with right intensity info whose plot_key matches plot
data = pmag.get_dictitem(Ints[int_key], plot_key, plt, 'T')
# get a list of specimens with appropriate data
sids = pmag.get_specs(data)
if len(sids) > 0:
title = data[0][plot_key]
for s in sids:
INTblock = []
# get data for each specimen
if data_model == 3:
sdata = pmag.get_dictitem(data, 'specimen', s, 'T')
else:
sdata = pmag.get_dictitem(data, 'er_specimen_name', s, 'T')
for rec in sdata:
INTblock.append([float(rec[dmag_key]), 0, 0,
float(rec[int_key]), 1, 'g'])
pmagplotlib.plot_mag(FIG['exp'], INTblock, title, 0, units, norm)
files = {}
for key in list(FIG.keys()):
files[key] = title + '_' + LP + '.' + fmt
if plot == 0:
pmagplotlib.draw_figs(FIG)
ans = input(" S[a]ve to save plot, [q]uit, Return to continue: ")
if ans == 'q':
sys.exit()
if ans == "a":
pmagplotlib.save_plots(FIG, files)
if plt != plotlist[-1]: # if it isn't the last plot, init the next one
pmagplotlib.plot_init(FIG['exp'], 6, 6)
else:
pmagplotlib.save_plots(FIG, files)
pmagplotlib.clearFIG(FIG['exp']) | python | def main():
"""
NAME
irmaq_magic.py
DESCRIPTION
plots IRM acquisition curves from measurements file
SYNTAX
irmaq_magic [command line options]
INPUT
takes magic formatted magic_measurements.txt files
OPTIONS
-h prints help message and quits
-f FILE: specify input file, default is: magic_measurements.txt/measurements.txt
-obj OBJ: specify object [loc, sit, sam, spc] for plot, default is by location
-N ; do not normalize by last point - use original units
-fmt [png,jpg,eps,pdf] set plot file format [default is svg]
-sav save plot[s] and quit
-DM MagIC data model number, default is 3
NOTE
loc: location (study); sit: site; sam: sample; spc: specimen
"""
FIG = {} # plot dictionary
FIG['exp'] = 1 # exp is figure 1
dir_path = './'
plot, fmt = 0, 'svg'
units = 'T',
XLP = []
norm = 1
LP = "LP-IRM"
if len(sys.argv) > 1:
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
data_model = int(pmag.get_named_arg("-DM", 3))
if '-N' in sys.argv:
norm = 0
if '-sav' in sys.argv:
plot = 1
if '-fmt' in sys.argv:
ind = sys.argv.index("-fmt")
fmt = sys.argv[ind + 1]
if data_model == 3:
in_file = pmag.get_named_arg("-f", 'measurements.txt')
else:
in_file = pmag.get_named_arg("-f", 'magic_measurements.txt')
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind + 1]
dir_path = os.path.realpath(dir_path)
in_file = pmag.resolve_file_name(in_file, dir_path)
if '-WD' not in sys.argv:
dir_path = os.path.split(in_file)[0]
plot_by = pmag.get_named_arg("-obj", "loc")
if data_model == 3:
plot_key = 'location'
if plot_by == 'sit':
plot_key = 'site'
if plot_by == 'sam':
plot_key = 'sample'
if plot_by == 'spc':
plot_key = 'specimen'
else:
plot_key = 'er_location_name'
if plot_by == 'sit':
plot_key = 'er_site_name'
if plot_by == 'sam':
plot_key = 'er_sample_name'
if plot_by == 'spc':
plot_key = 'er_specimen_name'
# set defaults and get more information if needed
if data_model == 3:
dmag_key = 'treat_dc_field'
else:
dmag_key = 'treatment_dc_field'
#
if data_model == 3 and plot_key != 'specimen':
# gonna need to read in more files
print('-W- You are trying to plot measurements by {}'.format(plot_key))
print(' By default, this information is not available in your measurement file.')
print(' Trying to acquire this information from {}'.format(dir_path))
con = cb.Contribution(dir_path)
meas_df = con.propagate_location_to_measurements()
if meas_df is None:
print('-W- No data found in {}'.format(dir_path))
return
if plot_key not in meas_df.columns:
print('-W- Could not find required data.')
print(' Try a different plot key.')
return
else:
print('-I- Found {} information, continuing with plotting'.format(plot_key))
# need to take the data directly from the contribution here, to keep
# location/site/sample columns in the measurements table
data = con.tables['measurements'].convert_to_pmag_data_list()
file_type = "measurements"
else:
data, file_type = pmag.magic_read(in_file)
# read in data
sids = pmag.get_specs(data)
pmagplotlib.plot_init(FIG['exp'], 6, 6)
#
#
# find desired intensity data
#
# get plotlist
#
plotlist = []
if data_model == 3:
intlist = ['magn_moment', 'magn_volume', 'magn_mass', 'magnitude']
else:
intlist = ['measurement_magnitude', 'measurement_magn_moment',
'measurement_magn_volume', 'measurement_magn_mass']
IntMeths = []
# get all the records with this lab protocol
#print('data', len(data))
#print('data[0]', data[0])
if data_model == 3:
data = pmag.get_dictitem(data, 'method_codes', LP, 'has')
else:
data = pmag.get_dictitem(data, 'magic_method_codes', LP, 'has')
Ints = {}
NoInts, int_key = 1, ""
for key in intlist:
# get all non-blank data for intensity type
Ints[key] = pmag.get_dictitem(data, key, '', 'F')
if len(Ints[key]) > 0:
NoInts = 0
if int_key == "":
int_key = key
if NoInts == 1:
print('No intensity information found')
sys.exit()
for rec in Ints[int_key]:
if rec[plot_key] not in plotlist:
plotlist.append(rec[plot_key])
plotlist.sort()
for plt in plotlist:
print(plt)
INTblock = []
# get data with right intensity info whose plot_key matches plot
data = pmag.get_dictitem(Ints[int_key], plot_key, plt, 'T')
# get a list of specimens with appropriate data
sids = pmag.get_specs(data)
if len(sids) > 0:
title = data[0][plot_key]
for s in sids:
INTblock = []
# get data for each specimen
if data_model == 3:
sdata = pmag.get_dictitem(data, 'specimen', s, 'T')
else:
sdata = pmag.get_dictitem(data, 'er_specimen_name', s, 'T')
for rec in sdata:
INTblock.append([float(rec[dmag_key]), 0, 0,
float(rec[int_key]), 1, 'g'])
pmagplotlib.plot_mag(FIG['exp'], INTblock, title, 0, units, norm)
files = {}
for key in list(FIG.keys()):
files[key] = title + '_' + LP + '.' + fmt
if plot == 0:
pmagplotlib.draw_figs(FIG)
ans = input(" S[a]ve to save plot, [q]uit, Return to continue: ")
if ans == 'q':
sys.exit()
if ans == "a":
pmagplotlib.save_plots(FIG, files)
if plt != plotlist[-1]: # if it isn't the last plot, init the next one
pmagplotlib.plot_init(FIG['exp'], 6, 6)
else:
pmagplotlib.save_plots(FIG, files)
pmagplotlib.clearFIG(FIG['exp']) | NAME
irmaq_magic.py
DESCRIPTION
plots IRM acquisition curves from measurements file
SYNTAX
irmaq_magic [command line options]
INPUT
takes magic formatted magic_measurements.txt files
OPTIONS
-h prints help message and quits
-f FILE: specify input file, default is: magic_measurements.txt/measurements.txt
-obj OBJ: specify object [loc, sit, sam, spc] for plot, default is by location
-N ; do not normalize by last point - use original units
-fmt [png,jpg,eps,pdf] set plot file format [default is svg]
-sav save plot[s] and quit
-DM MagIC data model number, default is 3
NOTE
loc: location (study); sit: site; sam: sample; spc: specimen | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/irmaq_magic.py#L13-L188 |
PmagPy/PmagPy | pmagpy/func.py | all_but_axis | def all_but_axis(i, axis, num_axes):
"""
Return a slice covering all combinations with coordinate i along
axis. (Effectively the hyperplane perpendicular to axis at i.)
"""
the_slice = ()
for j in range(num_axes):
if j == axis:
the_slice = the_slice + (i,)
else:
the_slice = the_slice + (slice(None),)
return the_slice | python | def all_but_axis(i, axis, num_axes):
"""
Return a slice covering all combinations with coordinate i along
axis. (Effectively the hyperplane perpendicular to axis at i.)
"""
the_slice = ()
for j in range(num_axes):
if j == axis:
the_slice = the_slice + (i,)
else:
the_slice = the_slice + (slice(None),)
return the_slice | Return a slice covering all combinations with coordinate i along
axis. (Effectively the hyperplane perpendicular to axis at i.) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/func.py#L188-L199 |
PmagPy/PmagPy | pmagpy/func.py | array_map | def array_map(f, ar):
"Apply an ordinary function to all values in an array."
flat_ar = ravel(ar)
out = zeros(len(flat_ar), flat_ar.typecode())
for i in range(len(flat_ar)):
out[i] = f(flat_ar[i])
out.shape = ar.shape
return out | python | def array_map(f, ar):
"Apply an ordinary function to all values in an array."
flat_ar = ravel(ar)
out = zeros(len(flat_ar), flat_ar.typecode())
for i in range(len(flat_ar)):
out[i] = f(flat_ar[i])
out.shape = ar.shape
return out | Apply an ordinary function to all values in an array. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/func.py#L294-L301 |
PmagPy/PmagPy | dialogs/demag_dialogs.py | VGP_Dialog.on_plot_select | def on_plot_select(self,event):
"""
Select data point if cursor is in range of a data point
@param: event -> the wx Mouseevent for that click
"""
if not self.xdata or not self.ydata: return
pos=event.GetPosition()
width, height = self.canvas.get_width_height()
pos[1] = height - pos[1]
xpick_data,ypick_data = pos
xdata_org = self.xdata
ydata_org = self.ydata
data_corrected = self.map.transData.transform(vstack([xdata_org,ydata_org]).T)
xdata,ydata = data_corrected.T
xdata = list(map(float,xdata))
ydata = list(map(float,ydata))
e = 4e0
index = None
for i,(x,y) in enumerate(zip(xdata,ydata)):
if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e:
index = i
break
if index==None: print("Couldn't find point %.1f,%.1f"%(xpick_data,ypick_data))
self.change_selected(index) | python | def on_plot_select(self,event):
"""
Select data point if cursor is in range of a data point
@param: event -> the wx Mouseevent for that click
"""
if not self.xdata or not self.ydata: return
pos=event.GetPosition()
width, height = self.canvas.get_width_height()
pos[1] = height - pos[1]
xpick_data,ypick_data = pos
xdata_org = self.xdata
ydata_org = self.ydata
data_corrected = self.map.transData.transform(vstack([xdata_org,ydata_org]).T)
xdata,ydata = data_corrected.T
xdata = list(map(float,xdata))
ydata = list(map(float,ydata))
e = 4e0
index = None
for i,(x,y) in enumerate(zip(xdata,ydata)):
if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e:
index = i
break
if index==None: print("Couldn't find point %.1f,%.1f"%(xpick_data,ypick_data))
self.change_selected(index) | Select data point if cursor is in range of a data point
@param: event -> the wx Mouseevent for that click | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/demag_dialogs.py#L155-L180 |
PmagPy/PmagPy | dialogs/demag_dialogs.py | VGP_Dialog.on_change_plot_cursor | def on_change_plot_cursor(self,event):
"""
If mouse is over data point making it selectable change the shape of the cursor
@param: event -> the wx Mouseevent for that click
"""
if not self.xdata or not self.ydata: return
pos=event.GetPosition()
width, height = self.canvas.get_width_height()
pos[1] = height - pos[1]
xpick_data,ypick_data = pos
xdata_org = self.xdata
ydata_org = self.ydata
data_corrected = self.map.transData.transform(vstack([xdata_org,ydata_org]).T)
xdata,ydata = data_corrected.T
xdata = list(map(float,xdata))
ydata = list(map(float,ydata))
e = 4e0
if self.plot_setting == "Zoom":
self.canvas.SetCursor(wx.Cursor(wx.CURSOR_CROSS))
else:
self.canvas.SetCursor(wx.Cursor(wx.CURSOR_ARROW))
for i,(x,y) in enumerate(zip(xdata,ydata)):
if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e:
self.canvas.SetCursor(wx.Cursor(wx.CURSOR_HAND))
break
event.Skip() | python | def on_change_plot_cursor(self,event):
"""
If mouse is over data point making it selectable change the shape of the cursor
@param: event -> the wx Mouseevent for that click
"""
if not self.xdata or not self.ydata: return
pos=event.GetPosition()
width, height = self.canvas.get_width_height()
pos[1] = height - pos[1]
xpick_data,ypick_data = pos
xdata_org = self.xdata
ydata_org = self.ydata
data_corrected = self.map.transData.transform(vstack([xdata_org,ydata_org]).T)
xdata,ydata = data_corrected.T
xdata = list(map(float,xdata))
ydata = list(map(float,ydata))
e = 4e0
if self.plot_setting == "Zoom":
self.canvas.SetCursor(wx.Cursor(wx.CURSOR_CROSS))
else:
self.canvas.SetCursor(wx.Cursor(wx.CURSOR_ARROW))
for i,(x,y) in enumerate(zip(xdata,ydata)):
if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e:
self.canvas.SetCursor(wx.Cursor(wx.CURSOR_HAND))
break
event.Skip() | If mouse is over data point making it selectable change the shape of the cursor
@param: event -> the wx Mouseevent for that click | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/demag_dialogs.py#L182-L208 |
PmagPy/PmagPy | dialogs/demag_dialogs.py | user_input.get_values | def get_values(self):
"""
Applies parsing functions to each input as specified in init before returning a tuple with first entry being a boolean which specifies if the user entered all values and a second entry which is a dictionary of input names to parsed values.
"""
return_dict = {}
for i,ctrl in enumerate(self.list_ctrls):
if hasattr(self.parse_funcs,'__getitem__') and len(self.parse_funcs)>i and hasattr(self.parse_funcs[i],'__call__'):
try: return_dict[self.inputs[i]] = self.parse_funcs[i](ctrl.GetValue())
except: return_dict[self.inputs[i]] = ctrl.GetValue()
else:
return_dict[self.inputs[i]] = ctrl.GetValue()
return ('' not in list(return_dict.values()), return_dict) | python | def get_values(self):
"""
Applies parsing functions to each input as specified in init before returning a tuple with first entry being a boolean which specifies if the user entered all values and a second entry which is a dictionary of input names to parsed values.
"""
return_dict = {}
for i,ctrl in enumerate(self.list_ctrls):
if hasattr(self.parse_funcs,'__getitem__') and len(self.parse_funcs)>i and hasattr(self.parse_funcs[i],'__call__'):
try: return_dict[self.inputs[i]] = self.parse_funcs[i](ctrl.GetValue())
except: return_dict[self.inputs[i]] = ctrl.GetValue()
else:
return_dict[self.inputs[i]] = ctrl.GetValue()
return ('' not in list(return_dict.values()), return_dict) | Applies parsing functions to each input as specified in init before returning a tuple with first entry being a boolean which specifies if the user entered all values and a second entry which is a dictionary of input names to parsed values. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/demag_dialogs.py#L545-L556 |
PmagPy/PmagPy | programs/upload_magic.py | main | def main():
"""
NAME
upload_magic.py
DESCRIPTION
This program will prepare your MagIC text files for uploading to the MagIC database
it will check for all the MagIC text files and skip the missing ones
SYNTAX
upload_magic.py
INPUT
MagIC txt files
OPTIONS
-h prints help message and quits
-all include all the measurement data, default is only those used in interpretations
-DM specify which MagIC data model number to use (2 or 3). Default is 3.
OUTPUT
upload file: file for uploading to MagIC database
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
else:
data_model_num = pmag.get_named_arg("-DM", 3)
dataframe = extractor.command_line_dataframe([['cat', False, 0], ['F', False, ''], ['f', False, '']])
checked_args = extractor.extract_and_check_args(sys.argv, dataframe)
dir_path, concat = extractor.get_vars(['WD', 'cat'], checked_args)
data_model_num = int(float(data_model_num))
if data_model_num == 2:
ipmag.upload_magic2(concat, dir_path)
else:
ipmag.upload_magic(concat, dir_path) | python | def main():
"""
NAME
upload_magic.py
DESCRIPTION
This program will prepare your MagIC text files for uploading to the MagIC database
it will check for all the MagIC text files and skip the missing ones
SYNTAX
upload_magic.py
INPUT
MagIC txt files
OPTIONS
-h prints help message and quits
-all include all the measurement data, default is only those used in interpretations
-DM specify which MagIC data model number to use (2 or 3). Default is 3.
OUTPUT
upload file: file for uploading to MagIC database
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
else:
data_model_num = pmag.get_named_arg("-DM", 3)
dataframe = extractor.command_line_dataframe([['cat', False, 0], ['F', False, ''], ['f', False, '']])
checked_args = extractor.extract_and_check_args(sys.argv, dataframe)
dir_path, concat = extractor.get_vars(['WD', 'cat'], checked_args)
data_model_num = int(float(data_model_num))
if data_model_num == 2:
ipmag.upload_magic2(concat, dir_path)
else:
ipmag.upload_magic(concat, dir_path) | NAME
upload_magic.py
DESCRIPTION
This program will prepare your MagIC text files for uploading to the MagIC database
it will check for all the MagIC text files and skip the missing ones
SYNTAX
upload_magic.py
INPUT
MagIC txt files
OPTIONS
-h prints help message and quits
-all include all the measurement data, default is only those used in interpretations
-DM specify which MagIC data model number to use (2 or 3). Default is 3.
OUTPUT
upload file: file for uploading to MagIC database | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/upload_magic.py#L7-L42 |
PmagPy/PmagPy | programs/forc_diagram.py | d2_func | def d2_func(x, y, z):
'''
#=================================================
/poly fit for every SF grid data
#=================================================
'''
X, Y = np.meshgrid(x, y, copy=False)
X = X.flatten()
Y = Y.flatten()
A = np.array([np.ones(len(X)), X, X**2, Y, Y**2, X*Y]).T
Z = np.array(z)
B = Z.flatten()
# print(A.shape,B.shape)
coeff, r, rank, s = np.linalg.lstsq(A, B, rcond=None)
return -coeff[5] | python | def d2_func(x, y, z):
'''
#=================================================
/poly fit for every SF grid data
#=================================================
'''
X, Y = np.meshgrid(x, y, copy=False)
X = X.flatten()
Y = Y.flatten()
A = np.array([np.ones(len(X)), X, X**2, Y, Y**2, X*Y]).T
Z = np.array(z)
B = Z.flatten()
# print(A.shape,B.shape)
coeff, r, rank, s = np.linalg.lstsq(A, B, rcond=None)
return -coeff[5] | #=================================================
/poly fit for every SF grid data
#================================================= | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/forc_diagram.py#L271-L285 |
PmagPy/PmagPy | programs/forc_diagram.py | grid_list | def grid_list(data):
'''
#=================================================
/process the grid data
/convert to list data for poly fitting
#=================================================
'''
a = []
b = []
M = []
for i in data:
a.append(i[0]) # np.array([i[1] for i in data], dtype=np.float64)
b.append(i[1]) # np.array([i[0] for i in data], dtype=np.float64)
M.append(i[2]) # np.array([i[2] for i in data], dtype=np.float64)
a = np.array(a, dtype=np.float64).tolist()
b = np.array(b, dtype=np.float64).tolist()
M = np.array(M, dtype=np.float64).tolist()
a = list(set(a))
b = list(set(b))
return a, b, M | python | def grid_list(data):
'''
#=================================================
/process the grid data
/convert to list data for poly fitting
#=================================================
'''
a = []
b = []
M = []
for i in data:
a.append(i[0]) # np.array([i[1] for i in data], dtype=np.float64)
b.append(i[1]) # np.array([i[0] for i in data], dtype=np.float64)
M.append(i[2]) # np.array([i[2] for i in data], dtype=np.float64)
a = np.array(a, dtype=np.float64).tolist()
b = np.array(b, dtype=np.float64).tolist()
M = np.array(M, dtype=np.float64).tolist()
a = list(set(a))
b = list(set(b))
return a, b, M | #=================================================
/process the grid data
/convert to list data for poly fitting
#================================================= | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/forc_diagram.py#L288-L307 |
PmagPy/PmagPy | programs/forc_diagram.py | Forc.fit | def fit(self, SF, x_range, y_range, matrix_z):
'''
#=================================================
/the main fitting process
/xx,yy,zz = Hb,Ha,p
/p is the FORC distribution
/m0,n0 is the index of values on Ha = Hb
/then loop m0 and n0
/based on smooth factor(SF)
/select data grid from the matrix_z for curve fitting
#=================================================
'''
xx, yy, zz = [], [], []
m0, n0 = [], []
for m, n in itertools.product(np.arange(0, len(x_range), step=SF), np.arange(0, len(y_range), step=SF)):
if x_range[m] > y_range[n]: # Ha nearly equal Hb
m0.append(m)
n0.append(n)
aa, bb, cc = [], [], []
for m, n in zip(m0, n0):
s = 0
try:
grid_data = []
a_ = x_range[m+s]
b_ = y_range[n-s]
for i, j in itertools.product(np.arange(3*SF+1), np.arange(3*SF+1)):
try:
grid_data.append(
[x_range[m+s+i], y_range[n-s-j], matrix_z.item(n-s-j, m+s+i)])
except:
try:
for i, j in itertools.product(np.arange(3), np.arange(3)):
grid_data.append(
[x_range[m+i], y_range[n-j], matrix_z.item(n-j, m+i)])
except:
pass
# print(grid_data)
'''
#=================================================
/when SF = n
/data grid as (2*n+1)x(2*n+1)
/grid_list: convert grid to list
/every grid produce on FORC distritution p
/the poly fitting use d2_func
#=================================================
'''
x, y, z = grid_list(grid_data)
try:
p = d2_func(x, y, z)
# print(p)
xx.append((a_-b_)/2)
yy.append((a_+b_)/2)
zz.append(p)
except Exception as e:
# print(e)
pass
except:
pass
'''
#=================================================
/the data will be save as pandas dataframe
/all the data with nan values will be delete be dropna()
#=================================================
'''
# print(zz)
df = pd.DataFrame({'x': xx, 'y': yy, 'z': zz})
#df = df.replace(0,np.nan)
df = df.dropna()
'''
#=================================================
/due to the space near Bc = zero
/the Bi values when Bc <0.003 will be mirrored to -Bc
#=================================================
'''
df_negative = df[(df.x < 0.03)].copy()
df_negative.x = df_negative.x*-1
df = df.append(df_negative)
df = df.drop_duplicates(['x', 'y'])
df = df.sort_values('x')
# plt.scatter(df.x,df.y,c=df.z)
# plt.show()
'''
#=================================================
/reset the Bc and Bi range by X,Y
/use linear interpolate to obtain FORC distribution
#=================================================
'''
xrange = [0, int((np.max(df.x)+0.05)*10)/10]
yrange = [int((np.min(df.y)-0.05)*10)/10,
int((np.max(df.y)+0.05)*10)/10]
X = np.linspace(xrange[0], xrange[1], 200)
Y = np.linspace(yrange[0], yrange[1], 200)
self.yi, self.xi = np.mgrid[yrange[0]:yrange[1]:200j, xrange[0]:xrange[1]:200j]
#self.xi,self.yi = np.mgrid[0:0.2:400j,-0.15:0.15:400j]
z = df.z/np.max(df.z)
z = np.asarray(z.tolist())
self.zi = griddata((df.x, df.y), z, (self.xi, self.yi), method='cubic') | python | def fit(self, SF, x_range, y_range, matrix_z):
'''
#=================================================
/the main fitting process
/xx,yy,zz = Hb,Ha,p
/p is the FORC distribution
/m0,n0 is the index of values on Ha = Hb
/then loop m0 and n0
/based on smooth factor(SF)
/select data grid from the matrix_z for curve fitting
#=================================================
'''
xx, yy, zz = [], [], []
m0, n0 = [], []
for m, n in itertools.product(np.arange(0, len(x_range), step=SF), np.arange(0, len(y_range), step=SF)):
if x_range[m] > y_range[n]: # Ha nearly equal Hb
m0.append(m)
n0.append(n)
aa, bb, cc = [], [], []
for m, n in zip(m0, n0):
s = 0
try:
grid_data = []
a_ = x_range[m+s]
b_ = y_range[n-s]
for i, j in itertools.product(np.arange(3*SF+1), np.arange(3*SF+1)):
try:
grid_data.append(
[x_range[m+s+i], y_range[n-s-j], matrix_z.item(n-s-j, m+s+i)])
except:
try:
for i, j in itertools.product(np.arange(3), np.arange(3)):
grid_data.append(
[x_range[m+i], y_range[n-j], matrix_z.item(n-j, m+i)])
except:
pass
# print(grid_data)
'''
#=================================================
/when SF = n
/data grid as (2*n+1)x(2*n+1)
/grid_list: convert grid to list
/every grid produce on FORC distritution p
/the poly fitting use d2_func
#=================================================
'''
x, y, z = grid_list(grid_data)
try:
p = d2_func(x, y, z)
# print(p)
xx.append((a_-b_)/2)
yy.append((a_+b_)/2)
zz.append(p)
except Exception as e:
# print(e)
pass
except:
pass
'''
#=================================================
/the data will be save as pandas dataframe
/all the data with nan values will be delete be dropna()
#=================================================
'''
# print(zz)
df = pd.DataFrame({'x': xx, 'y': yy, 'z': zz})
#df = df.replace(0,np.nan)
df = df.dropna()
'''
#=================================================
/due to the space near Bc = zero
/the Bi values when Bc <0.003 will be mirrored to -Bc
#=================================================
'''
df_negative = df[(df.x < 0.03)].copy()
df_negative.x = df_negative.x*-1
df = df.append(df_negative)
df = df.drop_duplicates(['x', 'y'])
df = df.sort_values('x')
# plt.scatter(df.x,df.y,c=df.z)
# plt.show()
'''
#=================================================
/reset the Bc and Bi range by X,Y
/use linear interpolate to obtain FORC distribution
#=================================================
'''
xrange = [0, int((np.max(df.x)+0.05)*10)/10]
yrange = [int((np.min(df.y)-0.05)*10)/10,
int((np.max(df.y)+0.05)*10)/10]
X = np.linspace(xrange[0], xrange[1], 200)
Y = np.linspace(yrange[0], yrange[1], 200)
self.yi, self.xi = np.mgrid[yrange[0]:yrange[1]:200j, xrange[0]:xrange[1]:200j]
#self.xi,self.yi = np.mgrid[0:0.2:400j,-0.15:0.15:400j]
z = df.z/np.max(df.z)
z = np.asarray(z.tolist())
self.zi = griddata((df.x, df.y), z, (self.xi, self.yi), method='cubic') | #=================================================
/the main fitting process
/xx,yy,zz = Hb,Ha,p
/p is the FORC distribution
/m0,n0 is the index of values on Ha = Hb
/then loop m0 and n0
/based on smooth factor(SF)
/select data grid from the matrix_z for curve fitting
#================================================= | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/forc_diagram.py#L46-L145 |
PmagPy/PmagPy | programs/forc_diagram.py | dataLoad.rawData | def rawData(self, fileAdres=None):
# skip skiprows
skiprows = None
skip_from = [b'Field', b'Moment']
with open(fileAdres, 'rb') as fr:
#f = fr.read()
for i, line in enumerate(fr, 1):
# print(line.split())
if skip_from == line.split():
skiprows = i+2
break
# else:
# print('file format wrong, cannot find the data row.')
skiprows = 34 if skiprows == None else skiprows
df = pd.read_csv(fileAdres, skiprows=skiprows, sep='\s+',
delimiter=',', names=['H', 'M'], skipfooter=1,
engine='python')
H = df.H # measured field
M = df.M # measured magnetic moment
'''
#=================================================
/datainterval_H/_M
/slice the measured data into pieces
/for every measured FORC
#=================================================
'''
dataInterval_H = []
dataInterval_M = []
# print(H)
cretia = df.H.mean() # edge of linear programing for selecting data
H0 = df.H.max() # the maximum field
self.x, self.y, self.z = [[], [], []]
for i in np.arange(1, len(H)):
dataInterval_H.append(H[i])
dataInterval_M.append(M[i])
if abs(H[i]-H0) <= 0.001: # when the filed reach the max, a new forc
if len(dataInterval_H) >= 0 and len(dataInterval_H) <= 200:
# print(dataInterval_H)
Ha = dataInterval_H[0]
dataInterval_H.pop(-1)
dataInterval_M.pop(-1)
Hb = dataInterval_H[1:-1]
Hm = dataInterval_M[1:-1]
for t in np.arange(len(Hb)):
self.x.append(Hb[t])
self.y.append(Ha)
self.z.append(Hm[t])
# print(Ha)
dataInterval_H = []
dataInterval_M = []
self.rawdf = df
'''
#=================================================
transfer the data set to matrix as len(x)*len(y) with z value
/mesh up the rawdata
/select the data area by X,Y ranges
/obtain regular spaced data potins by np.linspace
/use interplote to caculate the Hm values
/loop Ha(Y),Hb(X)
/fill every position with Hm, else with np.nan
#=================================================
'''
self.z = self.z/np.max(self.z)
# print(int(np.min(self.x)*100)/100,np.max(self.x))
xrange = [int((np.min(self.x)-0.1)*10)/10,
int((np.max(self.x)+0.1)*10)/10]
yrange = [int((np.min(self.y)-0.1)*10)/10,
int((np.max(self.y)+0.1)*10)/10]
X = np.linspace(xrange[0], xrange[1], 200)
Y = np.linspace(yrange[0], yrange[1], 200)
yi, xi = np.mgrid[yrange[0]:yrange[1]:200j, xrange[0]:xrange[1]:200j]
#X = np.linspace(-0.2,0.3,200)
#Y = np.linspace(-0.2,0.3,200)
#xi,yi = np.mgrid[-0.2:0.3:200j,-0.2:0.3:200j]
zi = griddata((self.x, self.y), self.z, (xi, yi),
method='linear') # !!! must linear
self.matrix_z = zi
self.x_range = X
self.y_range = Y | python | def rawData(self, fileAdres=None):
# skip skiprows
skiprows = None
skip_from = [b'Field', b'Moment']
with open(fileAdres, 'rb') as fr:
#f = fr.read()
for i, line in enumerate(fr, 1):
# print(line.split())
if skip_from == line.split():
skiprows = i+2
break
# else:
# print('file format wrong, cannot find the data row.')
skiprows = 34 if skiprows == None else skiprows
df = pd.read_csv(fileAdres, skiprows=skiprows, sep='\s+',
delimiter=',', names=['H', 'M'], skipfooter=1,
engine='python')
H = df.H # measured field
M = df.M # measured magnetic moment
'''
#=================================================
/datainterval_H/_M
/slice the measured data into pieces
/for every measured FORC
#=================================================
'''
dataInterval_H = []
dataInterval_M = []
# print(H)
cretia = df.H.mean() # edge of linear programing for selecting data
H0 = df.H.max() # the maximum field
self.x, self.y, self.z = [[], [], []]
for i in np.arange(1, len(H)):
dataInterval_H.append(H[i])
dataInterval_M.append(M[i])
if abs(H[i]-H0) <= 0.001: # when the filed reach the max, a new forc
if len(dataInterval_H) >= 0 and len(dataInterval_H) <= 200:
# print(dataInterval_H)
Ha = dataInterval_H[0]
dataInterval_H.pop(-1)
dataInterval_M.pop(-1)
Hb = dataInterval_H[1:-1]
Hm = dataInterval_M[1:-1]
for t in np.arange(len(Hb)):
self.x.append(Hb[t])
self.y.append(Ha)
self.z.append(Hm[t])
# print(Ha)
dataInterval_H = []
dataInterval_M = []
self.rawdf = df
'''
#=================================================
transfer the data set to matrix as len(x)*len(y) with z value
/mesh up the rawdata
/select the data area by X,Y ranges
/obtain regular spaced data potins by np.linspace
/use interplote to caculate the Hm values
/loop Ha(Y),Hb(X)
/fill every position with Hm, else with np.nan
#=================================================
'''
self.z = self.z/np.max(self.z)
# print(int(np.min(self.x)*100)/100,np.max(self.x))
xrange = [int((np.min(self.x)-0.1)*10)/10,
int((np.max(self.x)+0.1)*10)/10]
yrange = [int((np.min(self.y)-0.1)*10)/10,
int((np.max(self.y)+0.1)*10)/10]
X = np.linspace(xrange[0], xrange[1], 200)
Y = np.linspace(yrange[0], yrange[1], 200)
yi, xi = np.mgrid[yrange[0]:yrange[1]:200j, xrange[0]:xrange[1]:200j]
#X = np.linspace(-0.2,0.3,200)
#Y = np.linspace(-0.2,0.3,200)
#xi,yi = np.mgrid[-0.2:0.3:200j,-0.2:0.3:200j]
zi = griddata((self.x, self.y), self.z, (xi, yi),
method='linear') # !!! must linear
self.matrix_z = zi
self.x_range = X
self.y_range = Y | #=================================================
/datainterval_H/_M
/slice the measured data into pieces
/for every measured FORC
#================================================= | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/forc_diagram.py#L187-L268 |
PmagPy/PmagPy | pmagpy/validate_upload2.py | get_data_model | def get_data_model():
"""
try to grab the up to date data model document from the EarthRef site.
if that fails, try to get the data model document from the PmagPy directory on the user's computer.
if that fails, return False.
data_model is a set of nested dictionaries that looks like this:
{'magic_contributions':
{'group_userid': {'data_status': 'Optional', 'data_type': 'String(10)'}, 'activate': {'data_status': 'Optional', 'data_type': 'String(1)'}, ....},
'er_synthetics':
{'synthetic_type': {'data_status': 'Required', 'data_type': 'String(50)'}, 'er_citation_names': {'data_status': 'Required', 'data_type': 'List(500)'}, ...},
....
}
the top level keys are the file types.
the second level keys are the possible headers for that file type.
the third level keys are data_type and data_status for that header.
"""
#print("-I- getting data model, please be patient!!!!")
url = 'http://earthref.org/services/MagIC-data-model.txt'
offline = True # always get cached data model, as 2.5 is now static
#try:
# data = urllib2.urlopen(url)
#except urllib2.URLError:
# print '-W- Unable to fetch data model online\nTrying to use cached data model instead'
# offline = True
#except httplib.BadStatusLine:
# print '-W- Website: {} not responding\nTrying to use cached data model instead'.format(url)
# offline = True
if offline:
data = get_data_offline()
data_model, file_type = pmag.magic_read(None, data)
if file_type in ('bad file', 'empty_file'):
print('-W- Unable to read online data model.\nTrying to use cached data model instead')
data = get_data_offline()
data_model, file_type = pmag.magic_read(None, data)
ref_dicts = [d for d in data_model if d['column_nmb'] != '>>>>>>>>>>']
file_types = [d['field_name'] for d in data_model if d['column_nmb'] == 'tab delimited']
file_types.insert(0, file_type)
complete_ref = {}
dictionary = {}
n = 0
for d in ref_dicts:
if d['field_name'] in file_types:
complete_ref[file_types[n]] = dictionary
n += 1
dictionary = {}
else:
dictionary[d['field_name_oracle']] = {'data_type': d['data_type'], 'data_status': d['data_status']}
return complete_ref | python | def get_data_model():
"""
try to grab the up to date data model document from the EarthRef site.
if that fails, try to get the data model document from the PmagPy directory on the user's computer.
if that fails, return False.
data_model is a set of nested dictionaries that looks like this:
{'magic_contributions':
{'group_userid': {'data_status': 'Optional', 'data_type': 'String(10)'}, 'activate': {'data_status': 'Optional', 'data_type': 'String(1)'}, ....},
'er_synthetics':
{'synthetic_type': {'data_status': 'Required', 'data_type': 'String(50)'}, 'er_citation_names': {'data_status': 'Required', 'data_type': 'List(500)'}, ...},
....
}
the top level keys are the file types.
the second level keys are the possible headers for that file type.
the third level keys are data_type and data_status for that header.
"""
#print("-I- getting data model, please be patient!!!!")
url = 'http://earthref.org/services/MagIC-data-model.txt'
offline = True # always get cached data model, as 2.5 is now static
#try:
# data = urllib2.urlopen(url)
#except urllib2.URLError:
# print '-W- Unable to fetch data model online\nTrying to use cached data model instead'
# offline = True
#except httplib.BadStatusLine:
# print '-W- Website: {} not responding\nTrying to use cached data model instead'.format(url)
# offline = True
if offline:
data = get_data_offline()
data_model, file_type = pmag.magic_read(None, data)
if file_type in ('bad file', 'empty_file'):
print('-W- Unable to read online data model.\nTrying to use cached data model instead')
data = get_data_offline()
data_model, file_type = pmag.magic_read(None, data)
ref_dicts = [d for d in data_model if d['column_nmb'] != '>>>>>>>>>>']
file_types = [d['field_name'] for d in data_model if d['column_nmb'] == 'tab delimited']
file_types.insert(0, file_type)
complete_ref = {}
dictionary = {}
n = 0
for d in ref_dicts:
if d['field_name'] in file_types:
complete_ref[file_types[n]] = dictionary
n += 1
dictionary = {}
else:
dictionary[d['field_name_oracle']] = {'data_type': d['data_type'], 'data_status': d['data_status']}
return complete_ref | try to grab the up to date data model document from the EarthRef site.
if that fails, try to get the data model document from the PmagPy directory on the user's computer.
if that fails, return False.
data_model is a set of nested dictionaries that looks like this:
{'magic_contributions':
{'group_userid': {'data_status': 'Optional', 'data_type': 'String(10)'}, 'activate': {'data_status': 'Optional', 'data_type': 'String(1)'}, ....},
'er_synthetics':
{'synthetic_type': {'data_status': 'Required', 'data_type': 'String(50)'}, 'er_citation_names': {'data_status': 'Required', 'data_type': 'List(500)'}, ...},
....
}
the top level keys are the file types.
the second level keys are the possible headers for that file type.
the third level keys are data_type and data_status for that header. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/validate_upload2.py#L27-L75 |
PmagPy/PmagPy | pmagpy/validate_upload2.py | read_upload | def read_upload(up_file, data_model=None):
"""
take a file that should be ready for upload
using the data model, check that all required columns are full,
and that all numeric data is in fact numeric.
print out warnings for any validation problems
return True if there were no problems, otherwise return False
"""
print("-I- Running validation for your upload file")
## Read file
f = open(up_file)
lines = f.readlines()
f.close()
data = split_lines(lines)
data_dicts = get_dicts(data)
## initialize
invalid_data = {}
missing_data = {}
non_numeric = {}
bad_vocab = {}
bad_coords = {}
invalid_col_names = {}
missing_file_type = False
## make sure you have the data model
if not data_model:
data_model = get_data_model()
reqd_file_types = ['er_locations']
provided_file_types = set()
if not data_model:
return False, None
## Iterate through data
# each dictionary is one tab delimited line in a csv file
for dictionary in data_dicts:
for k, v in list(dictionary.items()):
if k == "file_type": # meta data
provided_file_types.add(v)
continue
file_type = dictionary['file_type']
# need to deal with pmag_criteria type file, too
item_type = file_type.split('_')[1][:-1]
if item_type == 'criteria':
item_name = dictionary.get('criteria_definition')
elif item_type == 'result':
item_name = dictionary.get('pmag_result_name', None)
elif item_type in ('specimen', 'sample', 'site', 'location'):
item_name = dictionary.get('er_' + item_type + '_name', None)
elif item_type == 'age':
# get the lowest level er_*_name column that is filled in
for dtype in ('specimen', 'sample', 'site', 'location'):
item_name = dictionary.get('er_' + dtype + '_name', None)
if item_name:
break
elif item_type == 'measurement':
exp_name = dictionary.get('magic_experiment_name')
meas_num = dictionary.get('measurement_number')
item_name = exp_name + '_' + str(meas_num)
else:
item_name = None
if file_type not in list(data_model.keys()):
continue
specific_data_model = data_model[file_type]
## Function for building problems list
def add_to_invalid_data(item_name, item_type, invalid_data,
validation, problem_type):
"""
correctly create or add to the dictionary of invalid values
"""
if item_name:
if item_type not in invalid_data:
invalid_data[item_type] = {}
if item_name not in invalid_data[item_type]:
invalid_data[item_type][item_name] = {}
if problem_type not in invalid_data[item_type][item_name]:
invalid_data[item_type][item_name][problem_type] = []
invalid_data[item_type][item_name][problem_type].append(validation)
## Validate for each problem type
# check if column header is in the data model
invalid_col_name = validate_for_recognized_column(k, v, specific_data_model)
if invalid_col_name:
if item_type not in list(invalid_col_names.keys()):
invalid_col_names[item_type] = set()
invalid_col_names[item_type].add(invalid_col_name)
# skip to next item, as additional validations won't work
# (key is not in the data model)
## new style
add_to_invalid_data(item_name, item_type, invalid_data,
invalid_col_name, 'invalid_col')
# skip to next item, as additional validations won't work
# (key is not in the data model)
continue
# make a list of missing, required data
missing_item = validate_for_presence(k, v, specific_data_model)
#print 'k, v', k, v
if missing_item:
if item_type not in list(missing_data.keys()):
missing_data[item_type] = set()
missing_data[item_type].add(missing_item)
if item_name:
# don't double count if a site is missing its parent location
if item_type == 'age' and missing_item == 'er_location_name':
pass
# ignore er_synthetic_name (data model is incorrect here)
if missing_item == 'er_synthetic_name':
pass
else:
add_to_invalid_data(item_name, item_type, invalid_data,
missing_item, 'missing_data')
# vocabulary problems
vocab_problem = validate_for_controlled_vocab(k, v, specific_data_model)
if vocab_problem:
if item_type not in list(bad_vocab.keys()):
bad_vocab[item_type] = set()
bad_vocab[item_type].add(vocab_problem)
add_to_invalid_data(item_name, item_type, invalid_data,
vocab_problem, 'vocab_problem')
# illegal coordinates
coord_problem = validate_for_coordinates(k, v, specific_data_model)
if coord_problem:
if item_type not in list(bad_coords.keys()):
bad_coords[item_type] = set()
bad_coords[item_type].add(coord_problem)
add_to_invalid_data(item_name, item_type, invalid_data,
coord_problem, 'coordinates')
# make a list of data that should be numeric, but aren't
number_fail = validate_for_numericality(k, v, specific_data_model)
if number_fail:
if item_type not in list(non_numeric.keys()):
non_numeric[item_type] = set()
non_numeric[item_type].add(number_fail)
add_to_invalid_data(item_name, item_type, invalid_data,
number_fail, 'number_fail')
## Print out all issues
for file_type, invalid_names in list(invalid_col_names.items()):
print("-W- In your {} file, you are using the following unrecognized columns: {}".format(file_type, ', '.join(invalid_names)))
for file_type, wrong_cols in list(non_numeric.items()):
print("-W- In your {} file, you must provide only valid numbers, in the following columns: {}".format(file_type, ', '.join(wrong_cols)))
for file_type, empty_cols in list(missing_data.items()):
print("-W- In your {} file, you are missing data in the following required columns: {}".format(file_type, ', '.join(empty_cols)))
for file_type in reqd_file_types:
if file_type not in provided_file_types:
print("-W- You have not provided a(n) {} type file, which is required data".format(file_type))
missing_file_type = True
for file_type, vocab_types in list(bad_vocab.items()):
print("-W- In your {} file, you are using an unrecognized value for these controlled vocabularies: {}".format(file_type, ', '.join(vocab_types)))
for file_type, coords in list(bad_coords.items()):
print("-W- In your {} file, you are using an illegal value for these columns: {}. (Latitude must be between -90 and +90)".format(file_type, ', '.join(coords)))
if any((invalid_col_names, non_numeric, missing_data, missing_file_type, bad_vocab, bad_coords)):
return False, invalid_data
else:
print("-I- validation was successful")
return True, None | python | def read_upload(up_file, data_model=None):
"""
take a file that should be ready for upload
using the data model, check that all required columns are full,
and that all numeric data is in fact numeric.
print out warnings for any validation problems
return True if there were no problems, otherwise return False
"""
print("-I- Running validation for your upload file")
## Read file
f = open(up_file)
lines = f.readlines()
f.close()
data = split_lines(lines)
data_dicts = get_dicts(data)
## initialize
invalid_data = {}
missing_data = {}
non_numeric = {}
bad_vocab = {}
bad_coords = {}
invalid_col_names = {}
missing_file_type = False
## make sure you have the data model
if not data_model:
data_model = get_data_model()
reqd_file_types = ['er_locations']
provided_file_types = set()
if not data_model:
return False, None
## Iterate through data
# each dictionary is one tab delimited line in a csv file
for dictionary in data_dicts:
for k, v in list(dictionary.items()):
if k == "file_type": # meta data
provided_file_types.add(v)
continue
file_type = dictionary['file_type']
# need to deal with pmag_criteria type file, too
item_type = file_type.split('_')[1][:-1]
if item_type == 'criteria':
item_name = dictionary.get('criteria_definition')
elif item_type == 'result':
item_name = dictionary.get('pmag_result_name', None)
elif item_type in ('specimen', 'sample', 'site', 'location'):
item_name = dictionary.get('er_' + item_type + '_name', None)
elif item_type == 'age':
# get the lowest level er_*_name column that is filled in
for dtype in ('specimen', 'sample', 'site', 'location'):
item_name = dictionary.get('er_' + dtype + '_name', None)
if item_name:
break
elif item_type == 'measurement':
exp_name = dictionary.get('magic_experiment_name')
meas_num = dictionary.get('measurement_number')
item_name = exp_name + '_' + str(meas_num)
else:
item_name = None
if file_type not in list(data_model.keys()):
continue
specific_data_model = data_model[file_type]
## Function for building problems list
def add_to_invalid_data(item_name, item_type, invalid_data,
validation, problem_type):
"""
correctly create or add to the dictionary of invalid values
"""
if item_name:
if item_type not in invalid_data:
invalid_data[item_type] = {}
if item_name not in invalid_data[item_type]:
invalid_data[item_type][item_name] = {}
if problem_type not in invalid_data[item_type][item_name]:
invalid_data[item_type][item_name][problem_type] = []
invalid_data[item_type][item_name][problem_type].append(validation)
## Validate for each problem type
# check if column header is in the data model
invalid_col_name = validate_for_recognized_column(k, v, specific_data_model)
if invalid_col_name:
if item_type not in list(invalid_col_names.keys()):
invalid_col_names[item_type] = set()
invalid_col_names[item_type].add(invalid_col_name)
# skip to next item, as additional validations won't work
# (key is not in the data model)
## new style
add_to_invalid_data(item_name, item_type, invalid_data,
invalid_col_name, 'invalid_col')
# skip to next item, as additional validations won't work
# (key is not in the data model)
continue
# make a list of missing, required data
missing_item = validate_for_presence(k, v, specific_data_model)
#print 'k, v', k, v
if missing_item:
if item_type not in list(missing_data.keys()):
missing_data[item_type] = set()
missing_data[item_type].add(missing_item)
if item_name:
# don't double count if a site is missing its parent location
if item_type == 'age' and missing_item == 'er_location_name':
pass
# ignore er_synthetic_name (data model is incorrect here)
if missing_item == 'er_synthetic_name':
pass
else:
add_to_invalid_data(item_name, item_type, invalid_data,
missing_item, 'missing_data')
# vocabulary problems
vocab_problem = validate_for_controlled_vocab(k, v, specific_data_model)
if vocab_problem:
if item_type not in list(bad_vocab.keys()):
bad_vocab[item_type] = set()
bad_vocab[item_type].add(vocab_problem)
add_to_invalid_data(item_name, item_type, invalid_data,
vocab_problem, 'vocab_problem')
# illegal coordinates
coord_problem = validate_for_coordinates(k, v, specific_data_model)
if coord_problem:
if item_type not in list(bad_coords.keys()):
bad_coords[item_type] = set()
bad_coords[item_type].add(coord_problem)
add_to_invalid_data(item_name, item_type, invalid_data,
coord_problem, 'coordinates')
# make a list of data that should be numeric, but aren't
number_fail = validate_for_numericality(k, v, specific_data_model)
if number_fail:
if item_type not in list(non_numeric.keys()):
non_numeric[item_type] = set()
non_numeric[item_type].add(number_fail)
add_to_invalid_data(item_name, item_type, invalid_data,
number_fail, 'number_fail')
## Print out all issues
for file_type, invalid_names in list(invalid_col_names.items()):
print("-W- In your {} file, you are using the following unrecognized columns: {}".format(file_type, ', '.join(invalid_names)))
for file_type, wrong_cols in list(non_numeric.items()):
print("-W- In your {} file, you must provide only valid numbers, in the following columns: {}".format(file_type, ', '.join(wrong_cols)))
for file_type, empty_cols in list(missing_data.items()):
print("-W- In your {} file, you are missing data in the following required columns: {}".format(file_type, ', '.join(empty_cols)))
for file_type in reqd_file_types:
if file_type not in provided_file_types:
print("-W- You have not provided a(n) {} type file, which is required data".format(file_type))
missing_file_type = True
for file_type, vocab_types in list(bad_vocab.items()):
print("-W- In your {} file, you are using an unrecognized value for these controlled vocabularies: {}".format(file_type, ', '.join(vocab_types)))
for file_type, coords in list(bad_coords.items()):
print("-W- In your {} file, you are using an illegal value for these columns: {}. (Latitude must be between -90 and +90)".format(file_type, ', '.join(coords)))
if any((invalid_col_names, non_numeric, missing_data, missing_file_type, bad_vocab, bad_coords)):
return False, invalid_data
else:
print("-I- validation was successful")
return True, None | take a file that should be ready for upload
using the data model, check that all required columns are full,
and that all numeric data is in fact numeric.
print out warnings for any validation problems
return True if there were no problems, otherwise return False | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/validate_upload2.py#L78-L247 |
PmagPy/PmagPy | pmagpy/validate_upload2.py | split_lines | def split_lines(lines):
"""
split a MagIC upload format file into lists.
the lists are split by the '>>>' lines between file_types.
"""
container = []
new_list = []
for line in lines:
if '>>>' in line:
container.append(new_list)
new_list = []
else:
new_list.append(line)
container.append(new_list)
return container | python | def split_lines(lines):
"""
split a MagIC upload format file into lists.
the lists are split by the '>>>' lines between file_types.
"""
container = []
new_list = []
for line in lines:
if '>>>' in line:
container.append(new_list)
new_list = []
else:
new_list.append(line)
container.append(new_list)
return container | split a MagIC upload format file into lists.
the lists are split by the '>>>' lines between file_types. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/validate_upload2.py#L250-L264 |
PmagPy/PmagPy | pmagpy/validate_upload2.py | get_dicts | def get_dicts(data):
"""
data must be a list of lists, from a tab delimited file.
in each list:
the first list item will be the type of data.
the second list item will be a tab delimited list of headers.
the remaining items will be a tab delimited list following the list of headers.
"""
data_dictionaries = []
for chunk in data[:-1]:
if not chunk:
continue
data1 = data[0]
file_type = chunk[0].split('\t')[1].strip('\n').strip('\r')
keys = chunk[1].split('\t')
clean_keys = []
# remove new-line characters, and any empty string keys
for key in keys:
clean_key = key.strip('\n').strip('\r')
if clean_key:
clean_keys.append(clean_key)
for line in chunk[2:]:
data_dict = {}
for key in clean_keys:
data_dict[key] = ""
line = line.split('\t')
for n, key in enumerate(clean_keys):
data_dict[key] = line[n].strip('\n').strip('\r')
data_dict['file_type'] = file_type
data_dictionaries.append(data_dict)
return data_dictionaries | python | def get_dicts(data):
"""
data must be a list of lists, from a tab delimited file.
in each list:
the first list item will be the type of data.
the second list item will be a tab delimited list of headers.
the remaining items will be a tab delimited list following the list of headers.
"""
data_dictionaries = []
for chunk in data[:-1]:
if not chunk:
continue
data1 = data[0]
file_type = chunk[0].split('\t')[1].strip('\n').strip('\r')
keys = chunk[1].split('\t')
clean_keys = []
# remove new-line characters, and any empty string keys
for key in keys:
clean_key = key.strip('\n').strip('\r')
if clean_key:
clean_keys.append(clean_key)
for line in chunk[2:]:
data_dict = {}
for key in clean_keys:
data_dict[key] = ""
line = line.split('\t')
for n, key in enumerate(clean_keys):
data_dict[key] = line[n].strip('\n').strip('\r')
data_dict['file_type'] = file_type
data_dictionaries.append(data_dict)
return data_dictionaries | data must be a list of lists, from a tab delimited file.
in each list:
the first list item will be the type of data.
the second list item will be a tab delimited list of headers.
the remaining items will be a tab delimited list following the list of headers. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/validate_upload2.py#L267-L298 |
PmagPy/PmagPy | programs/lsq_redo.py | main | def main():
"""
NAME
lsq_redo.py
DESCRIPTION
converts a tab delimited LSQ format to PmagPy redo file and edits the magic_measurements table to mark "bad" measurements.
SYNTAX
lsq_redo.py [-h] [command line options]
OPTIONS
-h: prints help message and quits
-f FILE: specify LSQ input file
-fm MFILE: specify measurements file for editting, default is
magic_measurements.txt
-F FILE: specify output file, default is 'zeq_redo'
"""
letters=string.ascii_uppercase
for l in string.ascii_lowercase: letters=letters+l
dir_path='.'
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
inspec=dir_path+'/'+sys.argv[ind+1]
else:
zfile=dir_path+'/zeq_redo'
if '-fm' in sys.argv:
ind=sys.argv.index('-f')
meas_file=dir_path+'/'+sys.argv[ind+1]
else:
meas_file=dir_path+'/magic_measurements.txt'
if '-F' in sys.argv:
ind=sys.argv.index('-F')
zfile=dir_path+'/'+sys.argv[ind+1]
else:
zfile=dir_path+'/zeq_redo'
try:
open(meas_file,"r")
meas_data,file_type=pmag.magic_read(meas_file)
except IOError:
print(main.__doc__)
print("""You must have a valid measurements file prior to converting
this LSQ file""")
sys.exit()
zredo=open(zfile,"w")
MeasRecs=[]
#
# read in LSQ file
#
specs,MeasOuts=[],[]
prior_spec_data=open(inspec,'r').readlines()
for line in prior_spec_data:
if len(line)<2:
sys.exit()
# spec=line[0:14].strip().replace(" ","") # get out the specimen name = collapsing spaces
# rec=line[14:].split() # split up the rest of the line
rec=line.split('\t')
spec=rec[0].lower()
specs.append(spec)
comp_name=rec[2] # assign component name
calculation_type="DE-FM"
if rec[1][0]=="L":
calculation_type="DE-BFL" # best-fit line
else:
calculation_type="DE-BFP" # best-fit line
lists=rec[7].split('-') # get list of data used
incl=[]
for l in lists[0]:
incl.append(letters.index(l))
for l in letters[letters.index(lists[0][-1])+1:letters.index(lists[1][0])]:
incl.append(letters.index(l)) # add in the in between parts
for l in lists[1]:
incl.append(letters.index(l))
if len(lists)>2:
for l in letters[letters.index(lists[1][-1])+1:letters.index(lists[2][0])]:
incl.append(letters.index(l)) # add in the in between parts
for l in lists[2]:
incl.append(letters.index(l))
# now find all the data for this specimen in measurements
datablock,min,max=[],"",""
demag='N'
for s in meas_data:
if s['er_specimen_name'].lower()==spec.lower():
meths=s['magic_method_codes'].replace(" ","").split(":")
if 'LT-NO' in meths or 'LT-AF-Z' in meths or 'LT-T-Z' in meths:
datablock.append(s)
if len(datablock)>0:
for t in datablock:print(t['magic_method_codes'])
incl_int=len(incl)
while incl[-1]>len(datablock)-1:
del incl[-1] # don't include measurements beyond what is in file
if len(incl)!=incl_int:
'converting calculation type to best-fit line'
meths0= datablock[incl[0]]['magic_method_codes'].replace(" ","").split(':')
meths1= datablock[incl[-1]]['magic_method_codes'].replace(" ","").split(':')
H0=datablock[incl[0]]['treatment_ac_field']
T0=datablock[incl[0]]['treatment_temp']
H1=datablock[incl[-1]]['treatment_ac_field']
T1=datablock[incl[-1]]['treatment_temp']
if 'LT-T-Z' in meths1:
max=T1
demag="T"
elif 'LT-AF-Z' in meths1:
demag="AF"
max=H1
if 'LT-NO' in meths0:
if demag=='T':
min=273
else:
min=0
elif 'LT-T-Z' in meths0:
min=T0
else:
min=H0
for ind in range(incl[0]):
MeasRecs.append(datablock[ind])
for ind in range(incl[0],incl[-1]):
if ind not in incl: # datapoint not used in calculation
datablock[ind]['measurement_flag']='b'
MeasRecs.append(datablock[ind])
for ind in range(incl[-1],len(datablock)):
MeasRecs.append(datablock[ind])
outstring='%s %s %s %s %s \n'%(spec,calculation_type,min,max,comp_name)
zredo.write(outstring)
for s in meas_data: # collect the rest of the measurement data not already included
if s['er_specimen_name'] not in specs:
MeasRecs.append(s)
pmag.magic_write(meas_file,MeasRecs,'magic_measurements') | python | def main():
"""
NAME
lsq_redo.py
DESCRIPTION
converts a tab delimited LSQ format to PmagPy redo file and edits the magic_measurements table to mark "bad" measurements.
SYNTAX
lsq_redo.py [-h] [command line options]
OPTIONS
-h: prints help message and quits
-f FILE: specify LSQ input file
-fm MFILE: specify measurements file for editting, default is
magic_measurements.txt
-F FILE: specify output file, default is 'zeq_redo'
"""
letters=string.ascii_uppercase
for l in string.ascii_lowercase: letters=letters+l
dir_path='.'
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
inspec=dir_path+'/'+sys.argv[ind+1]
else:
zfile=dir_path+'/zeq_redo'
if '-fm' in sys.argv:
ind=sys.argv.index('-f')
meas_file=dir_path+'/'+sys.argv[ind+1]
else:
meas_file=dir_path+'/magic_measurements.txt'
if '-F' in sys.argv:
ind=sys.argv.index('-F')
zfile=dir_path+'/'+sys.argv[ind+1]
else:
zfile=dir_path+'/zeq_redo'
try:
open(meas_file,"r")
meas_data,file_type=pmag.magic_read(meas_file)
except IOError:
print(main.__doc__)
print("""You must have a valid measurements file prior to converting
this LSQ file""")
sys.exit()
zredo=open(zfile,"w")
MeasRecs=[]
#
# read in LSQ file
#
specs,MeasOuts=[],[]
prior_spec_data=open(inspec,'r').readlines()
for line in prior_spec_data:
if len(line)<2:
sys.exit()
# spec=line[0:14].strip().replace(" ","") # get out the specimen name = collapsing spaces
# rec=line[14:].split() # split up the rest of the line
rec=line.split('\t')
spec=rec[0].lower()
specs.append(spec)
comp_name=rec[2] # assign component name
calculation_type="DE-FM"
if rec[1][0]=="L":
calculation_type="DE-BFL" # best-fit line
else:
calculation_type="DE-BFP" # best-fit line
lists=rec[7].split('-') # get list of data used
incl=[]
for l in lists[0]:
incl.append(letters.index(l))
for l in letters[letters.index(lists[0][-1])+1:letters.index(lists[1][0])]:
incl.append(letters.index(l)) # add in the in between parts
for l in lists[1]:
incl.append(letters.index(l))
if len(lists)>2:
for l in letters[letters.index(lists[1][-1])+1:letters.index(lists[2][0])]:
incl.append(letters.index(l)) # add in the in between parts
for l in lists[2]:
incl.append(letters.index(l))
# now find all the data for this specimen in measurements
datablock,min,max=[],"",""
demag='N'
for s in meas_data:
if s['er_specimen_name'].lower()==spec.lower():
meths=s['magic_method_codes'].replace(" ","").split(":")
if 'LT-NO' in meths or 'LT-AF-Z' in meths or 'LT-T-Z' in meths:
datablock.append(s)
if len(datablock)>0:
for t in datablock:print(t['magic_method_codes'])
incl_int=len(incl)
while incl[-1]>len(datablock)-1:
del incl[-1] # don't include measurements beyond what is in file
if len(incl)!=incl_int:
'converting calculation type to best-fit line'
meths0= datablock[incl[0]]['magic_method_codes'].replace(" ","").split(':')
meths1= datablock[incl[-1]]['magic_method_codes'].replace(" ","").split(':')
H0=datablock[incl[0]]['treatment_ac_field']
T0=datablock[incl[0]]['treatment_temp']
H1=datablock[incl[-1]]['treatment_ac_field']
T1=datablock[incl[-1]]['treatment_temp']
if 'LT-T-Z' in meths1:
max=T1
demag="T"
elif 'LT-AF-Z' in meths1:
demag="AF"
max=H1
if 'LT-NO' in meths0:
if demag=='T':
min=273
else:
min=0
elif 'LT-T-Z' in meths0:
min=T0
else:
min=H0
for ind in range(incl[0]):
MeasRecs.append(datablock[ind])
for ind in range(incl[0],incl[-1]):
if ind not in incl: # datapoint not used in calculation
datablock[ind]['measurement_flag']='b'
MeasRecs.append(datablock[ind])
for ind in range(incl[-1],len(datablock)):
MeasRecs.append(datablock[ind])
outstring='%s %s %s %s %s \n'%(spec,calculation_type,min,max,comp_name)
zredo.write(outstring)
for s in meas_data: # collect the rest of the measurement data not already included
if s['er_specimen_name'] not in specs:
MeasRecs.append(s)
pmag.magic_write(meas_file,MeasRecs,'magic_measurements') | NAME
lsq_redo.py
DESCRIPTION
converts a tab delimited LSQ format to PmagPy redo file and edits the magic_measurements table to mark "bad" measurements.
SYNTAX
lsq_redo.py [-h] [command line options]
OPTIONS
-h: prints help message and quits
-f FILE: specify LSQ input file
-fm MFILE: specify measurements file for editting, default is
magic_measurements.txt
-F FILE: specify output file, default is 'zeq_redo' | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/lsq_redo.py#L8-L141 |
PmagPy/PmagPy | pmagpy/ipmag.py | igrf | def igrf(input_list, mod='', ghfile=""):
"""
Determine Declination, Inclination and Intensity from the IGRF model.
(http://www.ngdc.noaa.gov/IAGA/vmod/igrf.html)
Parameters
----------
input_list : list with format [Date, Altitude, Latitude, Longitude]
date must be in decimal year format XXXX.XXXX (Common Era)
mod : desired model
"" : Use the IGRF
custom : use values supplied in ghfile
or choose from this list
['arch3k','cals3k','pfm9k','hfm10k','cals10k.2','cals10k.1b']
where:
arch3k (Korte et al., 2009)
cals3k (Korte and Constable, 2011)
cals10k.1b (Korte et al., 2011)
pfm9k (Nilsson et al., 2014)
hfm10k is the hfm.OL1.A1 of Constable et al. (2016)
cals10k.2 (Constable et al., 2016)
the first four of these models, are constrained to agree
with gufm1 (Jackson et al., 2000) for the past four centuries
gh : path to file with l m g h data
Returns
-------
igrf_array : array of IGRF values (0: dec; 1: inc; 2: intensity (in nT))
Examples
--------
>>> local_field = ipmag.igrf([2013.6544, .052, 37.87, -122.27])
>>> local_field
array([ 1.39489916e+01, 6.13532008e+01, 4.87452644e+04])
>>> ipmag.igrf_print(local_field)
Declination: 13.949
Inclination: 61.353
Intensity: 48745.264 nT
"""
if ghfile != "":
lmgh = np.loadtxt(ghfile)
gh = []
lmgh = np.loadtxt(ghfile).transpose()
gh.append(lmgh[2][0])
for i in range(1, lmgh.shape[1]):
gh.append(lmgh[2][i])
gh.append(lmgh[3][i])
if len(gh) == 0:
print('no valid gh file')
return
mod = 'custom'
if mod == "":
x, y, z, f = pmag.doigrf(
input_list[3] % 360., input_list[2], input_list[1], input_list[0])
elif mod != 'custom':
x, y, z, f = pmag.doigrf(
input_list[3] % 360., input_list[2], input_list[1], input_list[0], mod=mod)
else:
x, y, z, f = pmag.docustom(
input_list[3] % 360., input_list[2], input_list[1], gh)
igrf_array = pmag.cart2dir((x, y, z))
return igrf_array | python | def igrf(input_list, mod='', ghfile=""):
"""
Determine Declination, Inclination and Intensity from the IGRF model.
(http://www.ngdc.noaa.gov/IAGA/vmod/igrf.html)
Parameters
----------
input_list : list with format [Date, Altitude, Latitude, Longitude]
date must be in decimal year format XXXX.XXXX (Common Era)
mod : desired model
"" : Use the IGRF
custom : use values supplied in ghfile
or choose from this list
['arch3k','cals3k','pfm9k','hfm10k','cals10k.2','cals10k.1b']
where:
arch3k (Korte et al., 2009)
cals3k (Korte and Constable, 2011)
cals10k.1b (Korte et al., 2011)
pfm9k (Nilsson et al., 2014)
hfm10k is the hfm.OL1.A1 of Constable et al. (2016)
cals10k.2 (Constable et al., 2016)
the first four of these models, are constrained to agree
with gufm1 (Jackson et al., 2000) for the past four centuries
gh : path to file with l m g h data
Returns
-------
igrf_array : array of IGRF values (0: dec; 1: inc; 2: intensity (in nT))
Examples
--------
>>> local_field = ipmag.igrf([2013.6544, .052, 37.87, -122.27])
>>> local_field
array([ 1.39489916e+01, 6.13532008e+01, 4.87452644e+04])
>>> ipmag.igrf_print(local_field)
Declination: 13.949
Inclination: 61.353
Intensity: 48745.264 nT
"""
if ghfile != "":
lmgh = np.loadtxt(ghfile)
gh = []
lmgh = np.loadtxt(ghfile).transpose()
gh.append(lmgh[2][0])
for i in range(1, lmgh.shape[1]):
gh.append(lmgh[2][i])
gh.append(lmgh[3][i])
if len(gh) == 0:
print('no valid gh file')
return
mod = 'custom'
if mod == "":
x, y, z, f = pmag.doigrf(
input_list[3] % 360., input_list[2], input_list[1], input_list[0])
elif mod != 'custom':
x, y, z, f = pmag.doigrf(
input_list[3] % 360., input_list[2], input_list[1], input_list[0], mod=mod)
else:
x, y, z, f = pmag.docustom(
input_list[3] % 360., input_list[2], input_list[1], gh)
igrf_array = pmag.cart2dir((x, y, z))
return igrf_array | Determine Declination, Inclination and Intensity from the IGRF model.
(http://www.ngdc.noaa.gov/IAGA/vmod/igrf.html)
Parameters
----------
input_list : list with format [Date, Altitude, Latitude, Longitude]
date must be in decimal year format XXXX.XXXX (Common Era)
mod : desired model
"" : Use the IGRF
custom : use values supplied in ghfile
or choose from this list
['arch3k','cals3k','pfm9k','hfm10k','cals10k.2','cals10k.1b']
where:
arch3k (Korte et al., 2009)
cals3k (Korte and Constable, 2011)
cals10k.1b (Korte et al., 2011)
pfm9k (Nilsson et al., 2014)
hfm10k is the hfm.OL1.A1 of Constable et al. (2016)
cals10k.2 (Constable et al., 2016)
the first four of these models, are constrained to agree
with gufm1 (Jackson et al., 2000) for the past four centuries
gh : path to file with l m g h data
Returns
-------
igrf_array : array of IGRF values (0: dec; 1: inc; 2: intensity (in nT))
Examples
--------
>>> local_field = ipmag.igrf([2013.6544, .052, 37.87, -122.27])
>>> local_field
array([ 1.39489916e+01, 6.13532008e+01, 4.87452644e+04])
>>> ipmag.igrf_print(local_field)
Declination: 13.949
Inclination: 61.353
Intensity: 48745.264 nT | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L35-L99 |
PmagPy/PmagPy | pmagpy/ipmag.py | dms2dd | def dms2dd(degrees, minutes, seconds):
"""
Convert latitude/longitude of a location that is in degrees, minutes, seconds to decimal degrees
Parameters
----------
degrees : degrees of latitude/longitude
minutes : minutes of latitude/longitude
seconds : seconds of latitude/longitude
Returns
-------
degrees : decimal degrees of location
Examples
--------
Convert 180 degrees 4 minutes 23 seconds to decimal degrees:
>>> ipmag.dms2dd(180,4,23)
180.07305555555556
"""
dd = float(degrees) + old_div(float(minutes), 60) + \
old_div(float(seconds), (60 * 60))
return dd | python | def dms2dd(degrees, minutes, seconds):
"""
Convert latitude/longitude of a location that is in degrees, minutes, seconds to decimal degrees
Parameters
----------
degrees : degrees of latitude/longitude
minutes : minutes of latitude/longitude
seconds : seconds of latitude/longitude
Returns
-------
degrees : decimal degrees of location
Examples
--------
Convert 180 degrees 4 minutes 23 seconds to decimal degrees:
>>> ipmag.dms2dd(180,4,23)
180.07305555555556
"""
dd = float(degrees) + old_div(float(minutes), 60) + \
old_div(float(seconds), (60 * 60))
return dd | Convert latitude/longitude of a location that is in degrees, minutes, seconds to decimal degrees
Parameters
----------
degrees : degrees of latitude/longitude
minutes : minutes of latitude/longitude
seconds : seconds of latitude/longitude
Returns
-------
degrees : decimal degrees of location
Examples
--------
Convert 180 degrees 4 minutes 23 seconds to decimal degrees:
>>> ipmag.dms2dd(180,4,23)
180.07305555555556 | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L127-L150 |
PmagPy/PmagPy | pmagpy/ipmag.py | fisher_mean | def fisher_mean(dec=None, inc=None, di_block=None):
"""
Calculates the Fisher mean and associated parameters from either a list of
declination values and a separate list of inclination values or from a
di_block (a nested list a nested list of [dec,inc,1.0]). Returns a
dictionary with the Fisher mean and statistical parameters.
Parameters
----------
dec : list of declinations or longitudes
inc : list of inclinations or latitudes
di_block : a nested list of [dec,inc,1.0]
A di_block can be provided instead of dec, inc lists in which case it
will be used. Either dec, inc lists or a di_block need to be provided.
Returns
-------
fisher_mean : dictionary containing the Fisher mean parameters
Examples
--------
Use lists of declination and inclination to calculate a Fisher mean:
>>> ipmag.fisher_mean(dec=[140,127,142,136],inc=[21,23,19,22])
{'alpha95': 7.292891411309177,
'csd': 6.4097743211340896,
'dec': 136.30838974272072,
'inc': 21.347784026899987,
'k': 159.69251473636305,
'n': 4,
'r': 3.9812138971889026}
Use a di_block to calculate a Fisher mean (will give the same output as the
example with the lists):
>>> ipmag.fisher_mean(di_block=[[140,21],[127,23],[142,19],[136,22]])
"""
if di_block is None:
di_block = make_di_block(dec, inc)
return pmag.fisher_mean(di_block)
else:
return pmag.fisher_mean(di_block) | python | def fisher_mean(dec=None, inc=None, di_block=None):
"""
Calculates the Fisher mean and associated parameters from either a list of
declination values and a separate list of inclination values or from a
di_block (a nested list a nested list of [dec,inc,1.0]). Returns a
dictionary with the Fisher mean and statistical parameters.
Parameters
----------
dec : list of declinations or longitudes
inc : list of inclinations or latitudes
di_block : a nested list of [dec,inc,1.0]
A di_block can be provided instead of dec, inc lists in which case it
will be used. Either dec, inc lists or a di_block need to be provided.
Returns
-------
fisher_mean : dictionary containing the Fisher mean parameters
Examples
--------
Use lists of declination and inclination to calculate a Fisher mean:
>>> ipmag.fisher_mean(dec=[140,127,142,136],inc=[21,23,19,22])
{'alpha95': 7.292891411309177,
'csd': 6.4097743211340896,
'dec': 136.30838974272072,
'inc': 21.347784026899987,
'k': 159.69251473636305,
'n': 4,
'r': 3.9812138971889026}
Use a di_block to calculate a Fisher mean (will give the same output as the
example with the lists):
>>> ipmag.fisher_mean(di_block=[[140,21],[127,23],[142,19],[136,22]])
"""
if di_block is None:
di_block = make_di_block(dec, inc)
return pmag.fisher_mean(di_block)
else:
return pmag.fisher_mean(di_block) | Calculates the Fisher mean and associated parameters from either a list of
declination values and a separate list of inclination values or from a
di_block (a nested list a nested list of [dec,inc,1.0]). Returns a
dictionary with the Fisher mean and statistical parameters.
Parameters
----------
dec : list of declinations or longitudes
inc : list of inclinations or latitudes
di_block : a nested list of [dec,inc,1.0]
A di_block can be provided instead of dec, inc lists in which case it
will be used. Either dec, inc lists or a di_block need to be provided.
Returns
-------
fisher_mean : dictionary containing the Fisher mean parameters
Examples
--------
Use lists of declination and inclination to calculate a Fisher mean:
>>> ipmag.fisher_mean(dec=[140,127,142,136],inc=[21,23,19,22])
{'alpha95': 7.292891411309177,
'csd': 6.4097743211340896,
'dec': 136.30838974272072,
'inc': 21.347784026899987,
'k': 159.69251473636305,
'n': 4,
'r': 3.9812138971889026}
Use a di_block to calculate a Fisher mean (will give the same output as the
example with the lists):
>>> ipmag.fisher_mean(di_block=[[140,21],[127,23],[142,19],[136,22]]) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L153-L194 |
PmagPy/PmagPy | pmagpy/ipmag.py | fisher_angular_deviation | def fisher_angular_deviation(dec=None, inc=None, di_block=None, confidence=95):
'''
The angle from the true mean within which a chosen percentage of directions
lie can be calculated from the Fisher distribution. This function uses the
calculated Fisher concentration parameter to estimate this angle from
directional data. The 63 percent confidence interval is often called the
angular standard deviation.
Parameters
----------
dec : list of declinations or longitudes
inc : list of inclinations or latitudes
di_block : a nested list of [dec,inc,1.0]
A di_block can be provided instead of dec, inc lists in which case it
will be used. Either dec, inc lists or a di_block need to be provided.
confidence : 50 percent, 63 percent or 95 percent
Returns
-------
theta : critical angle of interest from the mean which contains the
percentage of directions specified by the confidence parameter
'''
if di_block is None:
di_block = make_di_block(dec, inc)
mean = pmag.fisher_mean(di_block)
else:
mean = pmag.fisher_mean(di_block)
if confidence == 50:
theta = old_div(67.5, np.sqrt(mean['k']))
if confidence == 63:
theta = old_div(81, np.sqrt(mean['k']))
if confidence == 95:
theta = old_div(140, np.sqrt(mean['k']))
return theta | python | def fisher_angular_deviation(dec=None, inc=None, di_block=None, confidence=95):
'''
The angle from the true mean within which a chosen percentage of directions
lie can be calculated from the Fisher distribution. This function uses the
calculated Fisher concentration parameter to estimate this angle from
directional data. The 63 percent confidence interval is often called the
angular standard deviation.
Parameters
----------
dec : list of declinations or longitudes
inc : list of inclinations or latitudes
di_block : a nested list of [dec,inc,1.0]
A di_block can be provided instead of dec, inc lists in which case it
will be used. Either dec, inc lists or a di_block need to be provided.
confidence : 50 percent, 63 percent or 95 percent
Returns
-------
theta : critical angle of interest from the mean which contains the
percentage of directions specified by the confidence parameter
'''
if di_block is None:
di_block = make_di_block(dec, inc)
mean = pmag.fisher_mean(di_block)
else:
mean = pmag.fisher_mean(di_block)
if confidence == 50:
theta = old_div(67.5, np.sqrt(mean['k']))
if confidence == 63:
theta = old_div(81, np.sqrt(mean['k']))
if confidence == 95:
theta = old_div(140, np.sqrt(mean['k']))
return theta | The angle from the true mean within which a chosen percentage of directions
lie can be calculated from the Fisher distribution. This function uses the
calculated Fisher concentration parameter to estimate this angle from
directional data. The 63 percent confidence interval is often called the
angular standard deviation.
Parameters
----------
dec : list of declinations or longitudes
inc : list of inclinations or latitudes
di_block : a nested list of [dec,inc,1.0]
A di_block can be provided instead of dec, inc lists in which case it
will be used. Either dec, inc lists or a di_block need to be provided.
confidence : 50 percent, 63 percent or 95 percent
Returns
-------
theta : critical angle of interest from the mean which contains the
percentage of directions specified by the confidence parameter | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L197-L230 |
PmagPy/PmagPy | pmagpy/ipmag.py | bingham_mean | def bingham_mean(dec=None, inc=None, di_block=None):
"""
Calculates the Bingham mean and associated statistical parameters from
either a list of declination values and a separate list of inclination
values or from a di_block (a nested list a nested list of [dec,inc,1.0]).
Returns a dictionary with the Bingham mean and statistical parameters.
Parameters
----------
dec: list of declinations
inc: list of inclinations
or
di_block: a nested list of [dec,inc,1.0]
A di_block can be provided instead of dec, inc lists in which case it will
be used. Either dec, inc lists or a di_block need to passed to the function.
Returns
---------
bpars : dictionary containing the Bingham mean and associated statistics.
Examples
--------
Use lists of declination and inclination to calculate a Bingham mean:
>>> ipmag.bingham_mean(dec=[140,127,142,136],inc=[21,23,19,22])
{'Edec': 220.84075754194598,
'Einc': -13.745780972597291,
'Eta': 9.9111522306938742,
'Zdec': 280.38894136954474,
'Zeta': 9.8653370276451113,
'Zinc': 64.23509410796224,
'dec': 136.32637167111312,
'inc': 21.34518678073179,
'n': 4}
Use a di_block to calculate a Bingham mean (will give the same output as the
example with the lists):
>>> ipmag.bingham_mean(di_block=[[140,21],[127,23],[142,19],[136,22]])
"""
if di_block is None:
di_block = make_di_block(dec, inc)
return pmag.dobingham(di_block)
else:
return pmag.dobingham(di_block) | python | def bingham_mean(dec=None, inc=None, di_block=None):
"""
Calculates the Bingham mean and associated statistical parameters from
either a list of declination values and a separate list of inclination
values or from a di_block (a nested list a nested list of [dec,inc,1.0]).
Returns a dictionary with the Bingham mean and statistical parameters.
Parameters
----------
dec: list of declinations
inc: list of inclinations
or
di_block: a nested list of [dec,inc,1.0]
A di_block can be provided instead of dec, inc lists in which case it will
be used. Either dec, inc lists or a di_block need to passed to the function.
Returns
---------
bpars : dictionary containing the Bingham mean and associated statistics.
Examples
--------
Use lists of declination and inclination to calculate a Bingham mean:
>>> ipmag.bingham_mean(dec=[140,127,142,136],inc=[21,23,19,22])
{'Edec': 220.84075754194598,
'Einc': -13.745780972597291,
'Eta': 9.9111522306938742,
'Zdec': 280.38894136954474,
'Zeta': 9.8653370276451113,
'Zinc': 64.23509410796224,
'dec': 136.32637167111312,
'inc': 21.34518678073179,
'n': 4}
Use a di_block to calculate a Bingham mean (will give the same output as the
example with the lists):
>>> ipmag.bingham_mean(di_block=[[140,21],[127,23],[142,19],[136,22]])
"""
if di_block is None:
di_block = make_di_block(dec, inc)
return pmag.dobingham(di_block)
else:
return pmag.dobingham(di_block) | Calculates the Bingham mean and associated statistical parameters from
either a list of declination values and a separate list of inclination
values or from a di_block (a nested list a nested list of [dec,inc,1.0]).
Returns a dictionary with the Bingham mean and statistical parameters.
Parameters
----------
dec: list of declinations
inc: list of inclinations
or
di_block: a nested list of [dec,inc,1.0]
A di_block can be provided instead of dec, inc lists in which case it will
be used. Either dec, inc lists or a di_block need to passed to the function.
Returns
---------
bpars : dictionary containing the Bingham mean and associated statistics.
Examples
--------
Use lists of declination and inclination to calculate a Bingham mean:
>>> ipmag.bingham_mean(dec=[140,127,142,136],inc=[21,23,19,22])
{'Edec': 220.84075754194598,
'Einc': -13.745780972597291,
'Eta': 9.9111522306938742,
'Zdec': 280.38894136954474,
'Zeta': 9.8653370276451113,
'Zinc': 64.23509410796224,
'dec': 136.32637167111312,
'inc': 21.34518678073179,
'n': 4}
Use a di_block to calculate a Bingham mean (will give the same output as the
example with the lists):
>>> ipmag.bingham_mean(di_block=[[140,21],[127,23],[142,19],[136,22]]) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L233-L280 |
PmagPy/PmagPy | pmagpy/ipmag.py | kent_mean | def kent_mean(dec=None, inc=None, di_block=None):
"""
Calculates the Kent mean and associated statistical parameters from either a list of
declination values and a separate list of inclination values or from a
di_block (a nested list a nested list of [dec,inc,1.0]). Returns a
dictionary with the Kent mean and statistical parameters.
Parameters
----------
dec: list of declinations
inc: list of inclinations
or
di_block: a nested list of [dec,inc,1.0]
A di_block can be provided instead of dec, inc lists in which case it will
be used. Either dec, inc lists or a di_block need to passed to the function.
Returns
----------
kpars : dictionary containing Kent mean and associated statistics.
Examples
--------
Use lists of declination and inclination to calculate a Kent mean:
>>> ipmag.kent_mean(dec=[140,127,142,136],inc=[21,23,19,22])
{'Edec': 280.38683553668795,
'Einc': 64.236598921744289,
'Eta': 0.72982112760919715,
'Zdec': 40.824690028412761,
'Zeta': 6.7896823241008795,
'Zinc': 13.739412321974067,
'dec': 136.30838974272072,
'inc': 21.347784026899987,
'n': 4}
Use a di_block to calculate a Kent mean (will give the same output as the
example with the lists):
>>> ipmag.kent_mean(di_block=[[140,21],[127,23],[142,19],[136,22]])
"""
if di_block is None:
di_block = make_di_block(dec, inc)
return pmag.dokent(di_block, len(di_block))
else:
return pmag.dokent(di_block, len(di_block)) | python | def kent_mean(dec=None, inc=None, di_block=None):
"""
Calculates the Kent mean and associated statistical parameters from either a list of
declination values and a separate list of inclination values or from a
di_block (a nested list a nested list of [dec,inc,1.0]). Returns a
dictionary with the Kent mean and statistical parameters.
Parameters
----------
dec: list of declinations
inc: list of inclinations
or
di_block: a nested list of [dec,inc,1.0]
A di_block can be provided instead of dec, inc lists in which case it will
be used. Either dec, inc lists or a di_block need to passed to the function.
Returns
----------
kpars : dictionary containing Kent mean and associated statistics.
Examples
--------
Use lists of declination and inclination to calculate a Kent mean:
>>> ipmag.kent_mean(dec=[140,127,142,136],inc=[21,23,19,22])
{'Edec': 280.38683553668795,
'Einc': 64.236598921744289,
'Eta': 0.72982112760919715,
'Zdec': 40.824690028412761,
'Zeta': 6.7896823241008795,
'Zinc': 13.739412321974067,
'dec': 136.30838974272072,
'inc': 21.347784026899987,
'n': 4}
Use a di_block to calculate a Kent mean (will give the same output as the
example with the lists):
>>> ipmag.kent_mean(di_block=[[140,21],[127,23],[142,19],[136,22]])
"""
if di_block is None:
di_block = make_di_block(dec, inc)
return pmag.dokent(di_block, len(di_block))
else:
return pmag.dokent(di_block, len(di_block)) | Calculates the Kent mean and associated statistical parameters from either a list of
declination values and a separate list of inclination values or from a
di_block (a nested list a nested list of [dec,inc,1.0]). Returns a
dictionary with the Kent mean and statistical parameters.
Parameters
----------
dec: list of declinations
inc: list of inclinations
or
di_block: a nested list of [dec,inc,1.0]
A di_block can be provided instead of dec, inc lists in which case it will
be used. Either dec, inc lists or a di_block need to passed to the function.
Returns
----------
kpars : dictionary containing Kent mean and associated statistics.
Examples
--------
Use lists of declination and inclination to calculate a Kent mean:
>>> ipmag.kent_mean(dec=[140,127,142,136],inc=[21,23,19,22])
{'Edec': 280.38683553668795,
'Einc': 64.236598921744289,
'Eta': 0.72982112760919715,
'Zdec': 40.824690028412761,
'Zeta': 6.7896823241008795,
'Zinc': 13.739412321974067,
'dec': 136.30838974272072,
'inc': 21.347784026899987,
'n': 4}
Use a di_block to calculate a Kent mean (will give the same output as the
example with the lists):
>>> ipmag.kent_mean(di_block=[[140,21],[127,23],[142,19],[136,22]]) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L283-L330 |
PmagPy/PmagPy | pmagpy/ipmag.py | print_direction_mean | def print_direction_mean(mean_dictionary):
"""
Does a pretty job printing a Fisher mean and associated statistics for
directional data.
Parameters
----------
mean_dictionary: output dictionary of pmag.fisher_mean
Examples
--------
Generate a Fisher mean using ``ipmag.fisher_mean`` and then print it nicely
using ``ipmag.print_direction_mean``
>>> my_mean = ipmag.fisher_mean(di_block=[[140,21],[127,23],[142,19],[136,22]])
>>> ipmag.print_direction_mean(my_mean)
Dec: 136.3 Inc: 21.3
Number of directions in mean (n): 4
Angular radius of 95% confidence (a_95): 7.3
Precision parameter (k) estimate: 159.7
"""
print('Dec: ' + str(round(mean_dictionary['dec'], 1)) +
' Inc: ' + str(round(mean_dictionary['inc'], 1)))
print('Number of directions in mean (n): ' + str(mean_dictionary['n']))
print('Angular radius of 95% confidence (a_95): ' +
str(round(mean_dictionary['alpha95'], 1)))
print('Precision parameter (k) estimate: ' +
str(round(mean_dictionary['k'], 1))) | python | def print_direction_mean(mean_dictionary):
"""
Does a pretty job printing a Fisher mean and associated statistics for
directional data.
Parameters
----------
mean_dictionary: output dictionary of pmag.fisher_mean
Examples
--------
Generate a Fisher mean using ``ipmag.fisher_mean`` and then print it nicely
using ``ipmag.print_direction_mean``
>>> my_mean = ipmag.fisher_mean(di_block=[[140,21],[127,23],[142,19],[136,22]])
>>> ipmag.print_direction_mean(my_mean)
Dec: 136.3 Inc: 21.3
Number of directions in mean (n): 4
Angular radius of 95% confidence (a_95): 7.3
Precision parameter (k) estimate: 159.7
"""
print('Dec: ' + str(round(mean_dictionary['dec'], 1)) +
' Inc: ' + str(round(mean_dictionary['inc'], 1)))
print('Number of directions in mean (n): ' + str(mean_dictionary['n']))
print('Angular radius of 95% confidence (a_95): ' +
str(round(mean_dictionary['alpha95'], 1)))
print('Precision parameter (k) estimate: ' +
str(round(mean_dictionary['k'], 1))) | Does a pretty job printing a Fisher mean and associated statistics for
directional data.
Parameters
----------
mean_dictionary: output dictionary of pmag.fisher_mean
Examples
--------
Generate a Fisher mean using ``ipmag.fisher_mean`` and then print it nicely
using ``ipmag.print_direction_mean``
>>> my_mean = ipmag.fisher_mean(di_block=[[140,21],[127,23],[142,19],[136,22]])
>>> ipmag.print_direction_mean(my_mean)
Dec: 136.3 Inc: 21.3
Number of directions in mean (n): 4
Angular radius of 95% confidence (a_95): 7.3
Precision parameter (k) estimate: 159.7 | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L333-L360 |
PmagPy/PmagPy | pmagpy/ipmag.py | print_pole_mean | def print_pole_mean(mean_dictionary):
"""
Does a pretty job printing a Fisher mean and associated statistics for
mean paleomagnetic poles.
Parameters
----------
mean_dictionary: output dictionary of pmag.fisher_mean
Examples
--------
Generate a Fisher mean using ``ipmag.fisher_mean`` and then print it nicely
using ``ipmag.print_pole_mean``
>>> my_mean = ipmag.fisher_mean(di_block=[[140,21],[127,23],[142,19],[136,22]])
>>> ipmag.print_pole_mean(my_mean)
Plon: 136.3 Plat: 21.3
Number of directions in mean (n): 4
Angular radius of 95% confidence (A_95): 7.3
Precision parameter (k) estimate: 159.7
"""
print('Plon: ' + str(round(mean_dictionary['dec'], 1)) +
' Plat: ' + str(round(mean_dictionary['inc'], 1)))
print('Number of directions in mean (n): ' + str(mean_dictionary['n']))
print('Angular radius of 95% confidence (A_95): ' +
str(round(mean_dictionary['alpha95'], 1)))
print('Precision parameter (k) estimate: ' +
str(round(mean_dictionary['k'], 1))) | python | def print_pole_mean(mean_dictionary):
"""
Does a pretty job printing a Fisher mean and associated statistics for
mean paleomagnetic poles.
Parameters
----------
mean_dictionary: output dictionary of pmag.fisher_mean
Examples
--------
Generate a Fisher mean using ``ipmag.fisher_mean`` and then print it nicely
using ``ipmag.print_pole_mean``
>>> my_mean = ipmag.fisher_mean(di_block=[[140,21],[127,23],[142,19],[136,22]])
>>> ipmag.print_pole_mean(my_mean)
Plon: 136.3 Plat: 21.3
Number of directions in mean (n): 4
Angular radius of 95% confidence (A_95): 7.3
Precision parameter (k) estimate: 159.7
"""
print('Plon: ' + str(round(mean_dictionary['dec'], 1)) +
' Plat: ' + str(round(mean_dictionary['inc'], 1)))
print('Number of directions in mean (n): ' + str(mean_dictionary['n']))
print('Angular radius of 95% confidence (A_95): ' +
str(round(mean_dictionary['alpha95'], 1)))
print('Precision parameter (k) estimate: ' +
str(round(mean_dictionary['k'], 1))) | Does a pretty job printing a Fisher mean and associated statistics for
mean paleomagnetic poles.
Parameters
----------
mean_dictionary: output dictionary of pmag.fisher_mean
Examples
--------
Generate a Fisher mean using ``ipmag.fisher_mean`` and then print it nicely
using ``ipmag.print_pole_mean``
>>> my_mean = ipmag.fisher_mean(di_block=[[140,21],[127,23],[142,19],[136,22]])
>>> ipmag.print_pole_mean(my_mean)
Plon: 136.3 Plat: 21.3
Number of directions in mean (n): 4
Angular radius of 95% confidence (A_95): 7.3
Precision parameter (k) estimate: 159.7 | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L363-L390 |
PmagPy/PmagPy | pmagpy/ipmag.py | fishrot | def fishrot(k=20, n=100, dec=0, inc=90, di_block=True):
"""
Generates Fisher distributed unit vectors from a specified distribution
using the pmag.py fshdev and dodirot functions.
Parameters
----------
k : kappa precision parameter (default is 20)
n : number of vectors to determine (default is 100)
dec : mean declination of distribution (default is 0)
inc : mean inclination of distribution (default is 90)
di_block : this function returns a nested list of [dec,inc,1.0] as the default
if di_block = False it will return a list of dec and a list of inc
Returns
---------
di_block : a nested list of [dec,inc,1.0] (default)
dec, inc : a list of dec and a list of inc (if di_block = False)
Examples
--------
>>> ipmag.fishrot(k=20, n=5, dec=40, inc=60)
[[44.766285502555775, 37.440866867657235, 1.0],
[33.866315796883725, 64.732532250463436, 1.0],
[47.002912770597163, 54.317853800896977, 1.0],
[36.762165614432547, 56.857240672884252, 1.0],
[71.43950604474395, 59.825830945715431, 1.0]]
"""
directions = []
declinations = []
inclinations = []
if di_block == True:
for data in range(n):
d, i = pmag.fshdev(k)
drot, irot = pmag.dodirot(d, i, dec, inc)
directions.append([drot, irot, 1.])
return directions
else:
for data in range(n):
d, i = pmag.fshdev(k)
drot, irot = pmag.dodirot(d, i, dec, inc)
declinations.append(drot)
inclinations.append(irot)
return declinations, inclinations | python | def fishrot(k=20, n=100, dec=0, inc=90, di_block=True):
"""
Generates Fisher distributed unit vectors from a specified distribution
using the pmag.py fshdev and dodirot functions.
Parameters
----------
k : kappa precision parameter (default is 20)
n : number of vectors to determine (default is 100)
dec : mean declination of distribution (default is 0)
inc : mean inclination of distribution (default is 90)
di_block : this function returns a nested list of [dec,inc,1.0] as the default
if di_block = False it will return a list of dec and a list of inc
Returns
---------
di_block : a nested list of [dec,inc,1.0] (default)
dec, inc : a list of dec and a list of inc (if di_block = False)
Examples
--------
>>> ipmag.fishrot(k=20, n=5, dec=40, inc=60)
[[44.766285502555775, 37.440866867657235, 1.0],
[33.866315796883725, 64.732532250463436, 1.0],
[47.002912770597163, 54.317853800896977, 1.0],
[36.762165614432547, 56.857240672884252, 1.0],
[71.43950604474395, 59.825830945715431, 1.0]]
"""
directions = []
declinations = []
inclinations = []
if di_block == True:
for data in range(n):
d, i = pmag.fshdev(k)
drot, irot = pmag.dodirot(d, i, dec, inc)
directions.append([drot, irot, 1.])
return directions
else:
for data in range(n):
d, i = pmag.fshdev(k)
drot, irot = pmag.dodirot(d, i, dec, inc)
declinations.append(drot)
inclinations.append(irot)
return declinations, inclinations | Generates Fisher distributed unit vectors from a specified distribution
using the pmag.py fshdev and dodirot functions.
Parameters
----------
k : kappa precision parameter (default is 20)
n : number of vectors to determine (default is 100)
dec : mean declination of distribution (default is 0)
inc : mean inclination of distribution (default is 90)
di_block : this function returns a nested list of [dec,inc,1.0] as the default
if di_block = False it will return a list of dec and a list of inc
Returns
---------
di_block : a nested list of [dec,inc,1.0] (default)
dec, inc : a list of dec and a list of inc (if di_block = False)
Examples
--------
>>> ipmag.fishrot(k=20, n=5, dec=40, inc=60)
[[44.766285502555775, 37.440866867657235, 1.0],
[33.866315796883725, 64.732532250463436, 1.0],
[47.002912770597163, 54.317853800896977, 1.0],
[36.762165614432547, 56.857240672884252, 1.0],
[71.43950604474395, 59.825830945715431, 1.0]] | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L393-L436 |
PmagPy/PmagPy | pmagpy/ipmag.py | tk03 | def tk03(n=100, dec=0, lat=0, rev='no', G2=0, G3=0):
"""
Generates vectors drawn from the TK03.gad model of secular
variation (Tauxe and Kent, 2004) at given latitude and rotated
about a vertical axis by the given declination. Return a nested list of
of [dec,inc,intensity].
Parameters
----------
n : number of vectors to determine (default is 100)
dec : mean declination of data set (default is 0)
lat : latitude at which secular variation is simulated (default is 0)
rev : if reversals are to be included this should be 'yes' (default is 'no')
G2 : specify average g_2^0 fraction (default is 0)
G3 : specify average g_3^0 fraction (default is 0)
Returns
----------
tk_03_output : a nested list of declination, inclination, and intensity (in nT)
Examples
--------
>>> ipmag.tk03(n=5, dec=0, lat=0)
[[14.752502674158681, -36.189370642603834, 16584.848620957589],
[9.2859465437113311, -10.064247301056071, 17383.950391596223],
[2.4278460589582913, 4.8079990844938019, 18243.679003572055],
[352.93759572283585, 0.086693343935840397, 18524.551174838372],
[352.48366219759953, 11.579098286352332, 24928.412830772766]]
"""
tk_03_output = []
for k in range(n):
gh = pmag.mktk03(8, k, G2, G3) # terms and random seed
# get a random longitude, between 0 and 359
lon = random.randint(0, 360)
vec = pmag.getvec(gh, lat, lon) # send field model and lat to getvec
vec[0] += dec
if vec[0] >= 360.:
vec[0] -= 360.
if k % 2 == 0 and rev == 'yes':
vec[0] += 180.
vec[1] = -vec[1]
tk_03_output.append([vec[0], vec[1], vec[2]])
return tk_03_output | python | def tk03(n=100, dec=0, lat=0, rev='no', G2=0, G3=0):
"""
Generates vectors drawn from the TK03.gad model of secular
variation (Tauxe and Kent, 2004) at given latitude and rotated
about a vertical axis by the given declination. Return a nested list of
of [dec,inc,intensity].
Parameters
----------
n : number of vectors to determine (default is 100)
dec : mean declination of data set (default is 0)
lat : latitude at which secular variation is simulated (default is 0)
rev : if reversals are to be included this should be 'yes' (default is 'no')
G2 : specify average g_2^0 fraction (default is 0)
G3 : specify average g_3^0 fraction (default is 0)
Returns
----------
tk_03_output : a nested list of declination, inclination, and intensity (in nT)
Examples
--------
>>> ipmag.tk03(n=5, dec=0, lat=0)
[[14.752502674158681, -36.189370642603834, 16584.848620957589],
[9.2859465437113311, -10.064247301056071, 17383.950391596223],
[2.4278460589582913, 4.8079990844938019, 18243.679003572055],
[352.93759572283585, 0.086693343935840397, 18524.551174838372],
[352.48366219759953, 11.579098286352332, 24928.412830772766]]
"""
tk_03_output = []
for k in range(n):
gh = pmag.mktk03(8, k, G2, G3) # terms and random seed
# get a random longitude, between 0 and 359
lon = random.randint(0, 360)
vec = pmag.getvec(gh, lat, lon) # send field model and lat to getvec
vec[0] += dec
if vec[0] >= 360.:
vec[0] -= 360.
if k % 2 == 0 and rev == 'yes':
vec[0] += 180.
vec[1] = -vec[1]
tk_03_output.append([vec[0], vec[1], vec[2]])
return tk_03_output | Generates vectors drawn from the TK03.gad model of secular
variation (Tauxe and Kent, 2004) at given latitude and rotated
about a vertical axis by the given declination. Return a nested list of
of [dec,inc,intensity].
Parameters
----------
n : number of vectors to determine (default is 100)
dec : mean declination of data set (default is 0)
lat : latitude at which secular variation is simulated (default is 0)
rev : if reversals are to be included this should be 'yes' (default is 'no')
G2 : specify average g_2^0 fraction (default is 0)
G3 : specify average g_3^0 fraction (default is 0)
Returns
----------
tk_03_output : a nested list of declination, inclination, and intensity (in nT)
Examples
--------
>>> ipmag.tk03(n=5, dec=0, lat=0)
[[14.752502674158681, -36.189370642603834, 16584.848620957589],
[9.2859465437113311, -10.064247301056071, 17383.950391596223],
[2.4278460589582913, 4.8079990844938019, 18243.679003572055],
[352.93759572283585, 0.086693343935840397, 18524.551174838372],
[352.48366219759953, 11.579098286352332, 24928.412830772766]] | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L439-L481 |
PmagPy/PmagPy | pmagpy/ipmag.py | unsquish | def unsquish(incs, f):
"""
This function applies uses a flattening factor (f) to unflatten inclination
data (incs) and returns 'unsquished' values.
Parameters
----------
incs : list of inclination values or a single value
f : unflattening factor (between 0.0 and 1.0)
Returns
----------
incs_unsquished : List of unflattened inclinations (in degrees)
Examples
--------
Take a list of inclinations, flatten them using ``ipmag.squish`` and then
use ``ipmag.squish`` and the flattening factor to unflatten them.
>>> inclinations = [43,47,41]
>>> squished_incs = ipmag.squish(inclinations,0.4)
>>> ipmag.unsquish(squished_incs,0.4)
[43.0, 47.0, 41.0]
"""
try:
length = len(incs)
incs_unsquished = []
for n in range(0, length):
inc_rad = np.deg2rad(incs[n]) # convert to radians
inc_new_rad = (old_div(1., f)) * np.tan(inc_rad)
# convert back to degrees
inc_new = np.rad2deg(np.arctan(inc_new_rad))
incs_unsquished.append(inc_new)
return incs_unsquished
except:
inc_rad = np.deg2rad(incs) # convert to radians
inc_new_rad = (old_div(1., f)) * np.tan(inc_rad)
inc_new = np.rad2deg(np.arctan(inc_new_rad)) # convert back to degrees
return inc_new | python | def unsquish(incs, f):
"""
This function applies uses a flattening factor (f) to unflatten inclination
data (incs) and returns 'unsquished' values.
Parameters
----------
incs : list of inclination values or a single value
f : unflattening factor (between 0.0 and 1.0)
Returns
----------
incs_unsquished : List of unflattened inclinations (in degrees)
Examples
--------
Take a list of inclinations, flatten them using ``ipmag.squish`` and then
use ``ipmag.squish`` and the flattening factor to unflatten them.
>>> inclinations = [43,47,41]
>>> squished_incs = ipmag.squish(inclinations,0.4)
>>> ipmag.unsquish(squished_incs,0.4)
[43.0, 47.0, 41.0]
"""
try:
length = len(incs)
incs_unsquished = []
for n in range(0, length):
inc_rad = np.deg2rad(incs[n]) # convert to radians
inc_new_rad = (old_div(1., f)) * np.tan(inc_rad)
# convert back to degrees
inc_new = np.rad2deg(np.arctan(inc_new_rad))
incs_unsquished.append(inc_new)
return incs_unsquished
except:
inc_rad = np.deg2rad(incs) # convert to radians
inc_new_rad = (old_div(1., f)) * np.tan(inc_rad)
inc_new = np.rad2deg(np.arctan(inc_new_rad)) # convert back to degrees
return inc_new | This function applies uses a flattening factor (f) to unflatten inclination
data (incs) and returns 'unsquished' values.
Parameters
----------
incs : list of inclination values or a single value
f : unflattening factor (between 0.0 and 1.0)
Returns
----------
incs_unsquished : List of unflattened inclinations (in degrees)
Examples
--------
Take a list of inclinations, flatten them using ``ipmag.squish`` and then
use ``ipmag.squish`` and the flattening factor to unflatten them.
>>> inclinations = [43,47,41]
>>> squished_incs = ipmag.squish(inclinations,0.4)
>>> ipmag.unsquish(squished_incs,0.4)
[43.0, 47.0, 41.0] | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L484-L522 |
PmagPy/PmagPy | pmagpy/ipmag.py | squish | def squish(incs, f):
"""
This function applies an flattening factor (f) to inclination data
(incs) and returns 'squished' values.
Parameters
----------
incs : list of inclination values or a single value
f : flattening factor (between 0.0 and 1.0)
Returns
---------
incs_squished : List of flattened directions (in degrees)
Examples
--------
Take a list of inclinations, flatten them.
>>> inclinations = [43,47,41]
>>> ipmag.squish(inclinations,0.4)
[20.455818908027187, 23.216791019112204, 19.173314360172309]
"""
try:
length = len(incs)
incs_squished = []
for n in range(0, length):
inc_rad = incs[n] * np.pi / 180. # convert to radians
inc_new_rad = f * np.tan(inc_rad)
inc_new = np.arctan(inc_new_rad) * 180. / \
np.pi # convert back to degrees
incs_squished.append(inc_new)
return incs_squished
except:
inc_rad = incs * np.pi / 180. # convert to radians
inc_new_rad = f * np.tan(inc_rad)
inc_new = np.arctan(inc_new_rad) * 180. / \
np.pi # convert back to degrees
return inc_new | python | def squish(incs, f):
"""
This function applies an flattening factor (f) to inclination data
(incs) and returns 'squished' values.
Parameters
----------
incs : list of inclination values or a single value
f : flattening factor (between 0.0 and 1.0)
Returns
---------
incs_squished : List of flattened directions (in degrees)
Examples
--------
Take a list of inclinations, flatten them.
>>> inclinations = [43,47,41]
>>> ipmag.squish(inclinations,0.4)
[20.455818908027187, 23.216791019112204, 19.173314360172309]
"""
try:
length = len(incs)
incs_squished = []
for n in range(0, length):
inc_rad = incs[n] * np.pi / 180. # convert to radians
inc_new_rad = f * np.tan(inc_rad)
inc_new = np.arctan(inc_new_rad) * 180. / \
np.pi # convert back to degrees
incs_squished.append(inc_new)
return incs_squished
except:
inc_rad = incs * np.pi / 180. # convert to radians
inc_new_rad = f * np.tan(inc_rad)
inc_new = np.arctan(inc_new_rad) * 180. / \
np.pi # convert back to degrees
return inc_new | This function applies an flattening factor (f) to inclination data
(incs) and returns 'squished' values.
Parameters
----------
incs : list of inclination values or a single value
f : flattening factor (between 0.0 and 1.0)
Returns
---------
incs_squished : List of flattened directions (in degrees)
Examples
--------
Take a list of inclinations, flatten them.
>>> inclinations = [43,47,41]
>>> ipmag.squish(inclinations,0.4)
[20.455818908027187, 23.216791019112204, 19.173314360172309] | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L525-L562 |
PmagPy/PmagPy | pmagpy/ipmag.py | do_flip | def do_flip(dec=None, inc=None, di_block=None):
"""
This function returns the antipode (i.e. it flips) of directions.
The function can take dec and inc as seperate lists if they are of equal
length and explicitly specified or are the first two arguments. It will then
return a list of flipped decs and a list of flipped incs. If a di_block (a
nested list of [dec, inc, 1.0]) is specified then it is used and the function
returns a di_block with the flipped directions.
Parameters
----------
dec: list of declinations
inc: list of inclinations
or
di_block: a nested list of [dec, inc, 1.0]
A di_block can be provided instead of dec, inc lists in which case it will
be used. Either dec, inc lists or a di_block need to passed to the function.
Returns
----------
dec_flip, inc_flip : list of flipped declinations and inclinations
or
dflip : a nested list of [dec, inc, 1.0]
Examples
----------
Lists of declination and inclination can be flipped to their antipodes:
>>> decs = [1.0, 358.0, 2.0]
>>> incs = [10.0, 12.0, 8.0]
>>> ipmag.do_flip(decs, incs)
([181.0, 178.0, 182.0], [-10.0, -12.0, -8.0])
The function can also take a di_block and returns a flipped di_block:
>>> directions = [[1.0,10.0],[358.0,12.0,],[2.0,8.0]]
>>> ipmag.do_flip(di_block=directions)
[[181.0, -10.0, 1.0], [178.0, -12.0, 1.0], [182.0, -8.0, 1.0]]
"""
if di_block is None:
dec_flip = []
inc_flip = []
for n in range(0, len(dec)):
dec_flip.append((dec[n] - 180.) % 360.0)
inc_flip.append(-inc[n])
return dec_flip, inc_flip
else:
dflip = []
for rec in di_block:
d, i = (rec[0] - 180.) % 360., -rec[1]
dflip.append([d, i, 1.0])
return dflip | python | def do_flip(dec=None, inc=None, di_block=None):
"""
This function returns the antipode (i.e. it flips) of directions.
The function can take dec and inc as seperate lists if they are of equal
length and explicitly specified or are the first two arguments. It will then
return a list of flipped decs and a list of flipped incs. If a di_block (a
nested list of [dec, inc, 1.0]) is specified then it is used and the function
returns a di_block with the flipped directions.
Parameters
----------
dec: list of declinations
inc: list of inclinations
or
di_block: a nested list of [dec, inc, 1.0]
A di_block can be provided instead of dec, inc lists in which case it will
be used. Either dec, inc lists or a di_block need to passed to the function.
Returns
----------
dec_flip, inc_flip : list of flipped declinations and inclinations
or
dflip : a nested list of [dec, inc, 1.0]
Examples
----------
Lists of declination and inclination can be flipped to their antipodes:
>>> decs = [1.0, 358.0, 2.0]
>>> incs = [10.0, 12.0, 8.0]
>>> ipmag.do_flip(decs, incs)
([181.0, 178.0, 182.0], [-10.0, -12.0, -8.0])
The function can also take a di_block and returns a flipped di_block:
>>> directions = [[1.0,10.0],[358.0,12.0,],[2.0,8.0]]
>>> ipmag.do_flip(di_block=directions)
[[181.0, -10.0, 1.0], [178.0, -12.0, 1.0], [182.0, -8.0, 1.0]]
"""
if di_block is None:
dec_flip = []
inc_flip = []
for n in range(0, len(dec)):
dec_flip.append((dec[n] - 180.) % 360.0)
inc_flip.append(-inc[n])
return dec_flip, inc_flip
else:
dflip = []
for rec in di_block:
d, i = (rec[0] - 180.) % 360., -rec[1]
dflip.append([d, i, 1.0])
return dflip | This function returns the antipode (i.e. it flips) of directions.
The function can take dec and inc as seperate lists if they are of equal
length and explicitly specified or are the first two arguments. It will then
return a list of flipped decs and a list of flipped incs. If a di_block (a
nested list of [dec, inc, 1.0]) is specified then it is used and the function
returns a di_block with the flipped directions.
Parameters
----------
dec: list of declinations
inc: list of inclinations
or
di_block: a nested list of [dec, inc, 1.0]
A di_block can be provided instead of dec, inc lists in which case it will
be used. Either dec, inc lists or a di_block need to passed to the function.
Returns
----------
dec_flip, inc_flip : list of flipped declinations and inclinations
or
dflip : a nested list of [dec, inc, 1.0]
Examples
----------
Lists of declination and inclination can be flipped to their antipodes:
>>> decs = [1.0, 358.0, 2.0]
>>> incs = [10.0, 12.0, 8.0]
>>> ipmag.do_flip(decs, incs)
([181.0, 178.0, 182.0], [-10.0, -12.0, -8.0])
The function can also take a di_block and returns a flipped di_block:
>>> directions = [[1.0,10.0],[358.0,12.0,],[2.0,8.0]]
>>> ipmag.do_flip(di_block=directions)
[[181.0, -10.0, 1.0], [178.0, -12.0, 1.0], [182.0, -8.0, 1.0]] | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L565-L620 |
PmagPy/PmagPy | pmagpy/ipmag.py | common_mean_bootstrap | def common_mean_bootstrap(Data1, Data2, NumSims=1000, save=False, save_folder='.', fmt='svg', figsize=(7, 2.3), x_tick_bins=4):
"""
Conduct a bootstrap test (Tauxe, 2010) for a common mean on two declination,
inclination data sets. Plots are generated of the cumulative distributions
of the Cartesian coordinates of the means of the pseudo-samples (one for x,
one for y and one for z). If the 95 percent confidence bounds for each
component overlap, the two directions are not significantly different.
Parameters
----------
Data1 : a nested list of directional data [dec,inc] (a di_block)
Data2 : a nested list of directional data [dec,inc] (a di_block)
if Data2 is length of 1, treat as single direction
NumSims : number of bootstrap samples (default is 1000)
save : optional save of plots (default is False)
save_folder : path to directory where plots should be saved
fmt : format of figures to be saved (default is 'svg')
figsize : optionally adjust figure size (default is (7, 2.3))
x_tick_bins : because they occasionally overlap depending on the data, this
argument allows you adjust number of tick marks on the x axis of graphs
(default is 4)
Returns
-------
three plots : cumulative distributions of the X, Y, Z of bootstrapped means
Examples
--------
Develop two populations of directions using ``ipmag.fishrot``. Use the
function to determine if they share a common mean (through visual inspection
of resulting plots).
>>> directions_A = ipmag.fishrot(k=20, n=30, dec=40, inc=60)
>>> directions_B = ipmag.fishrot(k=35, n=25, dec=42, inc=57)
>>> ipmag.common_mean_bootstrap(directions_A, directions_B)
"""
counter = 0
BDI1 = pmag.di_boot(Data1)
cart1 = pmag.dir2cart(BDI1).transpose()
X1, Y1, Z1 = cart1[0], cart1[1], cart1[2]
if np.array(Data2).shape[0] > 2:
BDI2 = pmag.di_boot(Data2)
cart2 = pmag.dir2cart(BDI2).transpose()
X2, Y2, Z2 = cart2[0], cart2[1], cart2[2]
else:
cart = pmag.dir2cart(Data2).transpose()
fignum = 1
fig = plt.figure(figsize=figsize)
fig = plt.subplot(1, 3, 1)
minimum = int(0.025 * len(X1))
maximum = int(0.975 * len(X1))
X1, y = pmagplotlib.plot_cdf(fignum, X1, "X component", 'r', "")
bounds1 = [X1[minimum], X1[maximum]]
pmagplotlib.plot_vs(fignum, bounds1, 'r', '-')
if np.array(Data2).shape[0] > 2:
X2, y = pmagplotlib.plot_cdf(fignum, X2, "X component", 'b', "")
bounds2 = [X2[minimum], X2[maximum]]
pmagplotlib.plot_vs(fignum, bounds2, 'b', '--')
else:
pmagplotlib.plot_vs(fignum, [cart[0]], 'k', '--')
plt.ylim(0, 1)
plt.locator_params(nbins=x_tick_bins)
plt.subplot(1, 3, 2)
Y1, y = pmagplotlib.plot_cdf(fignum, Y1, "Y component", 'r', "")
bounds1 = [Y1[minimum], Y1[maximum]]
pmagplotlib.plot_vs(fignum, bounds1, 'r', '-')
if np.array(Data2).shape[0] > 2:
Y2, y = pmagplotlib.plot_cdf(fignum, Y2, "Y component", 'b', "")
bounds2 = [Y2[minimum], Y2[maximum]]
pmagplotlib.plot_vs(fignum, bounds2, 'b', '--')
else:
pmagplotlib.plot_vs(fignum, [cart[1]], 'k', '--')
plt.ylim(0, 1)
plt.subplot(1, 3, 3)
Z1, y = pmagplotlib.plot_cdf(fignum, Z1, "Z component", 'r', "")
bounds1 = [Z1[minimum], Z1[maximum]]
pmagplotlib.plot_vs(fignum, bounds1, 'r', '-')
if np.array(Data2).shape[0] > 2:
Z2, y = pmagplotlib.plot_cdf(fignum, Z2, "Z component", 'b', "")
bounds2 = [Z2[minimum], Z2[maximum]]
pmagplotlib.plot_vs(fignum, bounds2, 'b', '--')
else:
pmagplotlib.plot_vs(fignum, [cart[2]], 'k', '--')
plt.ylim(0, 1)
plt.locator_params(nbins=x_tick_bins)
plt.tight_layout()
if save == True:
plt.savefig(os.path.join(
save_folder, 'common_mean_bootstrap') + '.' + fmt)
plt.show() | python | def common_mean_bootstrap(Data1, Data2, NumSims=1000, save=False, save_folder='.', fmt='svg', figsize=(7, 2.3), x_tick_bins=4):
"""
Conduct a bootstrap test (Tauxe, 2010) for a common mean on two declination,
inclination data sets. Plots are generated of the cumulative distributions
of the Cartesian coordinates of the means of the pseudo-samples (one for x,
one for y and one for z). If the 95 percent confidence bounds for each
component overlap, the two directions are not significantly different.
Parameters
----------
Data1 : a nested list of directional data [dec,inc] (a di_block)
Data2 : a nested list of directional data [dec,inc] (a di_block)
if Data2 is length of 1, treat as single direction
NumSims : number of bootstrap samples (default is 1000)
save : optional save of plots (default is False)
save_folder : path to directory where plots should be saved
fmt : format of figures to be saved (default is 'svg')
figsize : optionally adjust figure size (default is (7, 2.3))
x_tick_bins : because they occasionally overlap depending on the data, this
argument allows you adjust number of tick marks on the x axis of graphs
(default is 4)
Returns
-------
three plots : cumulative distributions of the X, Y, Z of bootstrapped means
Examples
--------
Develop two populations of directions using ``ipmag.fishrot``. Use the
function to determine if they share a common mean (through visual inspection
of resulting plots).
>>> directions_A = ipmag.fishrot(k=20, n=30, dec=40, inc=60)
>>> directions_B = ipmag.fishrot(k=35, n=25, dec=42, inc=57)
>>> ipmag.common_mean_bootstrap(directions_A, directions_B)
"""
counter = 0
BDI1 = pmag.di_boot(Data1)
cart1 = pmag.dir2cart(BDI1).transpose()
X1, Y1, Z1 = cart1[0], cart1[1], cart1[2]
if np.array(Data2).shape[0] > 2:
BDI2 = pmag.di_boot(Data2)
cart2 = pmag.dir2cart(BDI2).transpose()
X2, Y2, Z2 = cart2[0], cart2[1], cart2[2]
else:
cart = pmag.dir2cart(Data2).transpose()
fignum = 1
fig = plt.figure(figsize=figsize)
fig = plt.subplot(1, 3, 1)
minimum = int(0.025 * len(X1))
maximum = int(0.975 * len(X1))
X1, y = pmagplotlib.plot_cdf(fignum, X1, "X component", 'r', "")
bounds1 = [X1[minimum], X1[maximum]]
pmagplotlib.plot_vs(fignum, bounds1, 'r', '-')
if np.array(Data2).shape[0] > 2:
X2, y = pmagplotlib.plot_cdf(fignum, X2, "X component", 'b', "")
bounds2 = [X2[minimum], X2[maximum]]
pmagplotlib.plot_vs(fignum, bounds2, 'b', '--')
else:
pmagplotlib.plot_vs(fignum, [cart[0]], 'k', '--')
plt.ylim(0, 1)
plt.locator_params(nbins=x_tick_bins)
plt.subplot(1, 3, 2)
Y1, y = pmagplotlib.plot_cdf(fignum, Y1, "Y component", 'r', "")
bounds1 = [Y1[minimum], Y1[maximum]]
pmagplotlib.plot_vs(fignum, bounds1, 'r', '-')
if np.array(Data2).shape[0] > 2:
Y2, y = pmagplotlib.plot_cdf(fignum, Y2, "Y component", 'b', "")
bounds2 = [Y2[minimum], Y2[maximum]]
pmagplotlib.plot_vs(fignum, bounds2, 'b', '--')
else:
pmagplotlib.plot_vs(fignum, [cart[1]], 'k', '--')
plt.ylim(0, 1)
plt.subplot(1, 3, 3)
Z1, y = pmagplotlib.plot_cdf(fignum, Z1, "Z component", 'r', "")
bounds1 = [Z1[minimum], Z1[maximum]]
pmagplotlib.plot_vs(fignum, bounds1, 'r', '-')
if np.array(Data2).shape[0] > 2:
Z2, y = pmagplotlib.plot_cdf(fignum, Z2, "Z component", 'b', "")
bounds2 = [Z2[minimum], Z2[maximum]]
pmagplotlib.plot_vs(fignum, bounds2, 'b', '--')
else:
pmagplotlib.plot_vs(fignum, [cart[2]], 'k', '--')
plt.ylim(0, 1)
plt.locator_params(nbins=x_tick_bins)
plt.tight_layout()
if save == True:
plt.savefig(os.path.join(
save_folder, 'common_mean_bootstrap') + '.' + fmt)
plt.show() | Conduct a bootstrap test (Tauxe, 2010) for a common mean on two declination,
inclination data sets. Plots are generated of the cumulative distributions
of the Cartesian coordinates of the means of the pseudo-samples (one for x,
one for y and one for z). If the 95 percent confidence bounds for each
component overlap, the two directions are not significantly different.
Parameters
----------
Data1 : a nested list of directional data [dec,inc] (a di_block)
Data2 : a nested list of directional data [dec,inc] (a di_block)
if Data2 is length of 1, treat as single direction
NumSims : number of bootstrap samples (default is 1000)
save : optional save of plots (default is False)
save_folder : path to directory where plots should be saved
fmt : format of figures to be saved (default is 'svg')
figsize : optionally adjust figure size (default is (7, 2.3))
x_tick_bins : because they occasionally overlap depending on the data, this
argument allows you adjust number of tick marks on the x axis of graphs
(default is 4)
Returns
-------
three plots : cumulative distributions of the X, Y, Z of bootstrapped means
Examples
--------
Develop two populations of directions using ``ipmag.fishrot``. Use the
function to determine if they share a common mean (through visual inspection
of resulting plots).
>>> directions_A = ipmag.fishrot(k=20, n=30, dec=40, inc=60)
>>> directions_B = ipmag.fishrot(k=35, n=25, dec=42, inc=57)
>>> ipmag.common_mean_bootstrap(directions_A, directions_B) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L749-L847 |
PmagPy/PmagPy | pmagpy/ipmag.py | common_mean_watson | def common_mean_watson(Data1, Data2, NumSims=5000, print_result=True, plot='no', save=False, save_folder='.', fmt='svg'):
"""
Conduct a Watson V test for a common mean on two directional data sets.
This function calculates Watson's V statistic from input files through
Monte Carlo simulation in order to test whether two populations of
directional data could have been drawn from a common mean. The critical
angle between the two sample mean directions and the corresponding
McFadden and McElhinny (1990) classification is printed.
Parameters
----------
Data1 : a nested list of directional data [dec,inc] (a di_block)
Data2 : a nested list of directional data [dec,inc] (a di_block)
NumSims : number of Monte Carlo simulations (default is 5000)
print_result : default is to print the test result (True)
plot : the default is no plot ('no'). Putting 'yes' will the plot the CDF
from the Monte Carlo simulations.
save : optional save of plots (default is False)
save_folder : path to where plots will be saved (default is current)
fmt : format of figures to be saved (default is 'svg')
Returns
-------
printed text : text describing the test result is printed
result : a boolean where 0 is fail and 1 is pass
angle : angle between the Fisher means of the two data sets
critical_angle : critical angle for the test to pass
Examples
--------
Develop two populations of directions using ``ipmag.fishrot``. Use the
function to determine if they share a common mean.
>>> directions_A = ipmag.fishrot(k=20, n=30, dec=40, inc=60)
>>> directions_B = ipmag.fishrot(k=35, n=25, dec=42, inc=57)
>>> ipmag.common_mean_watson(directions_A, directions_B)
"""
pars_1 = pmag.fisher_mean(Data1)
pars_2 = pmag.fisher_mean(Data2)
cart_1 = pmag.dir2cart([pars_1["dec"], pars_1["inc"], pars_1["r"]])
cart_2 = pmag.dir2cart([pars_2['dec'], pars_2['inc'], pars_2["r"]])
Sw = pars_1['k'] * pars_1['r'] + pars_2['k'] * pars_2['r'] # k1*r1+k2*r2
xhat_1 = pars_1['k'] * cart_1[0] + pars_2['k'] * cart_2[0] # k1*x1+k2*x2
xhat_2 = pars_1['k'] * cart_1[1] + pars_2['k'] * cart_2[1] # k1*y1+k2*y2
xhat_3 = pars_1['k'] * cart_1[2] + pars_2['k'] * cart_2[2] # k1*z1+k2*z2
Rw = np.sqrt(xhat_1**2 + xhat_2**2 + xhat_3**2)
V = 2 * (Sw - Rw)
# keep weighted sum for later when determining the "critical angle"
# let's save it as Sr (notation of McFadden and McElhinny, 1990)
Sr = Sw
# do monte carlo simulation of datasets with same kappas as data,
# but a common mean
counter = 0
Vp = [] # set of Vs from simulations
for k in range(NumSims):
# get a set of N1 fisher distributed vectors with k1,
# calculate fisher stats
Dirp = []
for i in range(pars_1["n"]):
Dirp.append(pmag.fshdev(pars_1["k"]))
pars_p1 = pmag.fisher_mean(Dirp)
# get a set of N2 fisher distributed vectors with k2,
# calculate fisher stats
Dirp = []
for i in range(pars_2["n"]):
Dirp.append(pmag.fshdev(pars_2["k"]))
pars_p2 = pmag.fisher_mean(Dirp)
# get the V for these
Vk = pmag.vfunc(pars_p1, pars_p2)
Vp.append(Vk)
# sort the Vs, get Vcrit (95th percentile one)
Vp.sort()
k = int(.95 * NumSims)
Vcrit = Vp[k]
# equation 18 of McFadden and McElhinny, 1990 calculates the critical
# value of R (Rwc)
Rwc = Sr - (old_div(Vcrit, 2))
# following equation 19 of McFadden and McElhinny (1990) the critical
# angle is calculated. If the observed angle (also calculated below)
# between the data set means exceeds the critical angle the hypothesis
# of a common mean direction may be rejected at the 95% confidence
# level. The critical angle is simply a different way to present
# Watson's V parameter so it makes sense to use the Watson V parameter
# in comparison with the critical value of V for considering the test
# results. What calculating the critical angle allows for is the
# classification of McFadden and McElhinny (1990) to be made
# for data sets that are consistent with sharing a common mean.
k1 = pars_1['k']
k2 = pars_2['k']
R1 = pars_1['r']
R2 = pars_2['r']
critical_angle = np.degrees(np.arccos(old_div(((Rwc**2) - ((k1 * R1)**2)
- ((k2 * R2)**2)),
(2 * k1 * R1 * k2 * R2))))
D1 = (pars_1['dec'], pars_1['inc'])
D2 = (pars_2['dec'], pars_2['inc'])
angle = pmag.angle(D1, D2)
if print_result == True:
print("Results of Watson V test: ")
print("")
print("Watson's V: " '%.1f' % (V))
print("Critical value of V: " '%.1f' % (Vcrit))
if V < Vcrit:
if print_result == True:
print('"Pass": Since V is less than Vcrit, the null hypothesis')
print('that the two populations are drawn from distributions')
print('that share a common mean direction can not be rejected.')
result = 1
elif V > Vcrit:
if print_result == True:
print('"Fail": Since V is greater than Vcrit, the two means can')
print('be distinguished at the 95% confidence level.')
result = 0
if print_result == True:
print("")
print("M&M1990 classification:")
print("")
print("Angle between data set means: " '%.1f' % (angle))
print("Critical angle for M&M1990: " '%.1f' % (critical_angle))
if print_result == True:
if V > Vcrit:
print("")
elif V < Vcrit:
if critical_angle < 5:
print("The McFadden and McElhinny (1990) classification for")
print("this test is: 'A'")
elif critical_angle < 10:
print("The McFadden and McElhinny (1990) classification for")
print("this test is: 'B'")
elif critical_angle < 20:
print("The McFadden and McElhinny (1990) classification for")
print("this test is: 'C'")
else:
print("The McFadden and McElhinny (1990) classification for")
print("this test is: 'INDETERMINATE;")
if plot == 'yes':
CDF = {'cdf': 1}
# pmagplotlib.plot_init(CDF['cdf'],5,5)
plt.figure(figsize=(3.5, 2.5))
p1 = pmagplotlib.plot_cdf(CDF['cdf'], Vp, "Watson's V", 'r', "")
p2 = pmagplotlib.plot_vs(CDF['cdf'], [V], 'g', '-')
p3 = pmagplotlib.plot_vs(CDF['cdf'], [Vp[k]], 'b', '--')
# pmagplotlib.draw_figs(CDF)
if save == True:
plt.savefig(os.path.join(
save_folder, 'common_mean_watson') + '.' + fmt)
pmagplotlib.show_fig(CDF['cdf'])
return result, angle[0], critical_angle | python | def common_mean_watson(Data1, Data2, NumSims=5000, print_result=True, plot='no', save=False, save_folder='.', fmt='svg'):
"""
Conduct a Watson V test for a common mean on two directional data sets.
This function calculates Watson's V statistic from input files through
Monte Carlo simulation in order to test whether two populations of
directional data could have been drawn from a common mean. The critical
angle between the two sample mean directions and the corresponding
McFadden and McElhinny (1990) classification is printed.
Parameters
----------
Data1 : a nested list of directional data [dec,inc] (a di_block)
Data2 : a nested list of directional data [dec,inc] (a di_block)
NumSims : number of Monte Carlo simulations (default is 5000)
print_result : default is to print the test result (True)
plot : the default is no plot ('no'). Putting 'yes' will the plot the CDF
from the Monte Carlo simulations.
save : optional save of plots (default is False)
save_folder : path to where plots will be saved (default is current)
fmt : format of figures to be saved (default is 'svg')
Returns
-------
printed text : text describing the test result is printed
result : a boolean where 0 is fail and 1 is pass
angle : angle between the Fisher means of the two data sets
critical_angle : critical angle for the test to pass
Examples
--------
Develop two populations of directions using ``ipmag.fishrot``. Use the
function to determine if they share a common mean.
>>> directions_A = ipmag.fishrot(k=20, n=30, dec=40, inc=60)
>>> directions_B = ipmag.fishrot(k=35, n=25, dec=42, inc=57)
>>> ipmag.common_mean_watson(directions_A, directions_B)
"""
pars_1 = pmag.fisher_mean(Data1)
pars_2 = pmag.fisher_mean(Data2)
cart_1 = pmag.dir2cart([pars_1["dec"], pars_1["inc"], pars_1["r"]])
cart_2 = pmag.dir2cart([pars_2['dec'], pars_2['inc'], pars_2["r"]])
Sw = pars_1['k'] * pars_1['r'] + pars_2['k'] * pars_2['r'] # k1*r1+k2*r2
xhat_1 = pars_1['k'] * cart_1[0] + pars_2['k'] * cart_2[0] # k1*x1+k2*x2
xhat_2 = pars_1['k'] * cart_1[1] + pars_2['k'] * cart_2[1] # k1*y1+k2*y2
xhat_3 = pars_1['k'] * cart_1[2] + pars_2['k'] * cart_2[2] # k1*z1+k2*z2
Rw = np.sqrt(xhat_1**2 + xhat_2**2 + xhat_3**2)
V = 2 * (Sw - Rw)
# keep weighted sum for later when determining the "critical angle"
# let's save it as Sr (notation of McFadden and McElhinny, 1990)
Sr = Sw
# do monte carlo simulation of datasets with same kappas as data,
# but a common mean
counter = 0
Vp = [] # set of Vs from simulations
for k in range(NumSims):
# get a set of N1 fisher distributed vectors with k1,
# calculate fisher stats
Dirp = []
for i in range(pars_1["n"]):
Dirp.append(pmag.fshdev(pars_1["k"]))
pars_p1 = pmag.fisher_mean(Dirp)
# get a set of N2 fisher distributed vectors with k2,
# calculate fisher stats
Dirp = []
for i in range(pars_2["n"]):
Dirp.append(pmag.fshdev(pars_2["k"]))
pars_p2 = pmag.fisher_mean(Dirp)
# get the V for these
Vk = pmag.vfunc(pars_p1, pars_p2)
Vp.append(Vk)
# sort the Vs, get Vcrit (95th percentile one)
Vp.sort()
k = int(.95 * NumSims)
Vcrit = Vp[k]
# equation 18 of McFadden and McElhinny, 1990 calculates the critical
# value of R (Rwc)
Rwc = Sr - (old_div(Vcrit, 2))
# following equation 19 of McFadden and McElhinny (1990) the critical
# angle is calculated. If the observed angle (also calculated below)
# between the data set means exceeds the critical angle the hypothesis
# of a common mean direction may be rejected at the 95% confidence
# level. The critical angle is simply a different way to present
# Watson's V parameter so it makes sense to use the Watson V parameter
# in comparison with the critical value of V for considering the test
# results. What calculating the critical angle allows for is the
# classification of McFadden and McElhinny (1990) to be made
# for data sets that are consistent with sharing a common mean.
k1 = pars_1['k']
k2 = pars_2['k']
R1 = pars_1['r']
R2 = pars_2['r']
critical_angle = np.degrees(np.arccos(old_div(((Rwc**2) - ((k1 * R1)**2)
- ((k2 * R2)**2)),
(2 * k1 * R1 * k2 * R2))))
D1 = (pars_1['dec'], pars_1['inc'])
D2 = (pars_2['dec'], pars_2['inc'])
angle = pmag.angle(D1, D2)
if print_result == True:
print("Results of Watson V test: ")
print("")
print("Watson's V: " '%.1f' % (V))
print("Critical value of V: " '%.1f' % (Vcrit))
if V < Vcrit:
if print_result == True:
print('"Pass": Since V is less than Vcrit, the null hypothesis')
print('that the two populations are drawn from distributions')
print('that share a common mean direction can not be rejected.')
result = 1
elif V > Vcrit:
if print_result == True:
print('"Fail": Since V is greater than Vcrit, the two means can')
print('be distinguished at the 95% confidence level.')
result = 0
if print_result == True:
print("")
print("M&M1990 classification:")
print("")
print("Angle between data set means: " '%.1f' % (angle))
print("Critical angle for M&M1990: " '%.1f' % (critical_angle))
if print_result == True:
if V > Vcrit:
print("")
elif V < Vcrit:
if critical_angle < 5:
print("The McFadden and McElhinny (1990) classification for")
print("this test is: 'A'")
elif critical_angle < 10:
print("The McFadden and McElhinny (1990) classification for")
print("this test is: 'B'")
elif critical_angle < 20:
print("The McFadden and McElhinny (1990) classification for")
print("this test is: 'C'")
else:
print("The McFadden and McElhinny (1990) classification for")
print("this test is: 'INDETERMINATE;")
if plot == 'yes':
CDF = {'cdf': 1}
# pmagplotlib.plot_init(CDF['cdf'],5,5)
plt.figure(figsize=(3.5, 2.5))
p1 = pmagplotlib.plot_cdf(CDF['cdf'], Vp, "Watson's V", 'r', "")
p2 = pmagplotlib.plot_vs(CDF['cdf'], [V], 'g', '-')
p3 = pmagplotlib.plot_vs(CDF['cdf'], [Vp[k]], 'b', '--')
# pmagplotlib.draw_figs(CDF)
if save == True:
plt.savefig(os.path.join(
save_folder, 'common_mean_watson') + '.' + fmt)
pmagplotlib.show_fig(CDF['cdf'])
return result, angle[0], critical_angle | Conduct a Watson V test for a common mean on two directional data sets.
This function calculates Watson's V statistic from input files through
Monte Carlo simulation in order to test whether two populations of
directional data could have been drawn from a common mean. The critical
angle between the two sample mean directions and the corresponding
McFadden and McElhinny (1990) classification is printed.
Parameters
----------
Data1 : a nested list of directional data [dec,inc] (a di_block)
Data2 : a nested list of directional data [dec,inc] (a di_block)
NumSims : number of Monte Carlo simulations (default is 5000)
print_result : default is to print the test result (True)
plot : the default is no plot ('no'). Putting 'yes' will the plot the CDF
from the Monte Carlo simulations.
save : optional save of plots (default is False)
save_folder : path to where plots will be saved (default is current)
fmt : format of figures to be saved (default is 'svg')
Returns
-------
printed text : text describing the test result is printed
result : a boolean where 0 is fail and 1 is pass
angle : angle between the Fisher means of the two data sets
critical_angle : critical angle for the test to pass
Examples
--------
Develop two populations of directions using ``ipmag.fishrot``. Use the
function to determine if they share a common mean.
>>> directions_A = ipmag.fishrot(k=20, n=30, dec=40, inc=60)
>>> directions_B = ipmag.fishrot(k=35, n=25, dec=42, inc=57)
>>> ipmag.common_mean_watson(directions_A, directions_B) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L850-L1012 |
PmagPy/PmagPy | pmagpy/ipmag.py | fishqq | def fishqq(lon=None, lat=None, di_block=None):
"""
Test whether a distribution is Fisherian and make a corresponding Q-Q plot.
The Q-Q plot shows the data plotted against the value expected from a
Fisher distribution. The first plot is the uniform plot which is the
Fisher model distribution in terms of longitude (declination). The second
plot is the exponential plot which is the Fisher model distribution in terms
of latitude (inclination). In addition to the plots, the test statistics Mu
(uniform) and Me (exponential) are calculated and compared against the
critical test values. If Mu or Me are too large in comparision to the test
statistics, the hypothesis that the distribution is Fisherian is rejected
(see Fisher et al., 1987).
Parameters:
-----------
lon : longitude or declination of the data
lat : latitude or inclination of the data
or
di_block: a nested list of [dec,inc]
A di_block can be provided in which case it will be used instead of
dec, inc lists.
Output:
-----------
dictionary containing
lon : mean longitude (or declination)
lat : mean latitude (or inclination)
N : number of vectors
Mu : Mu test statistic value for the data
Mu_critical : critical value for Mu
Me : Me test statistic value for the data
Me_critical : critical value for Me
if the data has two modes with N >=10 (N and R)
two of these dictionaries will be returned
Examples
--------
In this example, directions are sampled from a Fisher distribution using
``ipmag.fishrot`` and then the ``ipmag.fishqq`` function is used to test
whether that distribution is Fisherian:
>>> directions = ipmag.fishrot(k=40, n=50, dec=200, inc=50)
>>> ipmag.fishqq(di_block = directions)
{'Dec': 199.73564290371894,
'Inc': 49.017612342358298,
'Me': 0.78330310031220352,
'Me_critical': 1.094,
'Mode': 'Mode 1',
'Mu': 0.69915926146177099,
'Mu_critical': 1.207,
'N': 50,
'Test_result': 'consistent with Fisherian model'}
The above example passed a di_block to the function as an input. Lists of
paired declination and inclination can also be used as inputs. Here the
directions di_block is unpacked to separate declination and inclination
lists using the ``ipmag.unpack_di_block`` functionwhich are then used as
input to fishqq:
>>> dec_list, inc_list = ipmag.unpack_di_block(directions)
>>> ipmag.fishqq(lon=dec_list, lat=inc_list)
"""
if di_block is None:
all_dirs = make_di_block(lon, lat)
else:
all_dirs = di_block
ppars = pmag.doprinc(all_dirs) # get principal directions
rDIs = []
nDIs = []
QQ_dict1 = {}
QQ_dict2 = {}
for rec in all_dirs:
angle = pmag.angle([rec[0], rec[1]], [ppars['dec'], ppars['inc']])
if angle > 90.:
rDIs.append(rec)
else:
nDIs.append(rec)
if len(rDIs) >= 10 or len(nDIs) >= 10:
D1, I1 = [], []
QQ = {'unf': 1, 'exp': 2}
if len(nDIs) < 10:
ppars = pmag.doprinc(rDIs) # get principal directions
Drbar, Irbar = ppars['dec'] - 180., -ppars['inc']
Nr = len(rDIs)
for di in rDIs:
d, irot = pmag.dotilt(
di[0], di[1], Drbar - 180., 90. - Irbar) # rotate to mean
drot = d - 180.
if drot < 0:
drot = drot + 360.
D1.append(drot)
I1.append(irot)
Dtit = 'Mode 2 Declinations'
Itit = 'Mode 2 Inclinations'
else:
ppars = pmag.doprinc(nDIs) # get principal directions
Dnbar, Inbar = ppars['dec'], ppars['inc']
Nn = len(nDIs)
for di in nDIs:
d, irot = pmag.dotilt(
di[0], di[1], Dnbar - 180., 90. - Inbar) # rotate to mean
drot = d - 180.
if drot < 0:
drot = drot + 360.
D1.append(drot)
I1.append(irot)
Dtit = 'Mode 1 Declinations'
Itit = 'Mode 1 Inclinations'
plt.figure(figsize=(6, 3))
Mu_n, Mu_ncr = pmagplotlib.plot_qq_unf(
QQ['unf'], D1, Dtit, subplot=True) # make plot
Me_n, Me_ncr = pmagplotlib.plot_qq_exp(
QQ['exp'], I1, Itit, subplot=True) # make plot
plt.tight_layout()
if Mu_n <= Mu_ncr and Me_n <= Me_ncr:
F_n = 'consistent with Fisherian model'
else:
F_n = 'Fisherian model rejected'
QQ_dict1['Mode'] = 'Mode 1'
QQ_dict1['Dec'] = Dnbar
QQ_dict1['Inc'] = Inbar
QQ_dict1['N'] = Nn
QQ_dict1['Mu'] = Mu_n
QQ_dict1['Mu_critical'] = Mu_ncr
QQ_dict1['Me'] = Me_n
QQ_dict1['Me_critical'] = Me_ncr
QQ_dict1['Test_result'] = F_n
if len(rDIs) > 10 and len(nDIs) > 10:
D2, I2 = [], []
ppars = pmag.doprinc(rDIs) # get principal directions
Drbar, Irbar = ppars['dec'] - 180., -ppars['inc']
Nr = len(rDIs)
for di in rDIs:
d, irot = pmag.dotilt(
di[0], di[1], Drbar - 180., 90. - Irbar) # rotate to mean
drot = d - 180.
if drot < 0:
drot = drot + 360.
D2.append(drot)
I2.append(irot)
Dtit = 'Mode 2 Declinations'
Itit = 'Mode 2 Inclinations'
plt.figure(figsize=(6, 3))
Mu_r, Mu_rcr = pmagplotlib.plot_qq_unf(
QQ['unf'], D2, Dtit, subplot=True) # make plot
Me_r, Me_rcr = pmagplotlib.plot_qq_exp(
QQ['exp'], I2, Itit, subplot=True) # make plot
plt.tight_layout()
if Mu_r <= Mu_rcr and Me_r <= Me_rcr:
F_r = 'consistent with Fisherian model'
else:
F_r = 'Fisherian model rejected'
QQ_dict2['Mode'] = 'Mode 2'
QQ_dict2['Dec'] = Drbar
QQ_dict2['Inc'] = Irbar
QQ_dict2['N'] = Nr
QQ_dict2['Mu'] = Mu_r
QQ_dict2['Mu_critical'] = Mu_rcr
QQ_dict2['Me'] = Me_r
QQ_dict2['Me_critical'] = Me_rcr
QQ_dict2['Test_result'] = F_r
if QQ_dict2:
return QQ_dict1, QQ_dict2
elif QQ_dict1:
return QQ_dict1
else:
print('you need N> 10 for at least one mode') | python | def fishqq(lon=None, lat=None, di_block=None):
"""
Test whether a distribution is Fisherian and make a corresponding Q-Q plot.
The Q-Q plot shows the data plotted against the value expected from a
Fisher distribution. The first plot is the uniform plot which is the
Fisher model distribution in terms of longitude (declination). The second
plot is the exponential plot which is the Fisher model distribution in terms
of latitude (inclination). In addition to the plots, the test statistics Mu
(uniform) and Me (exponential) are calculated and compared against the
critical test values. If Mu or Me are too large in comparision to the test
statistics, the hypothesis that the distribution is Fisherian is rejected
(see Fisher et al., 1987).
Parameters:
-----------
lon : longitude or declination of the data
lat : latitude or inclination of the data
or
di_block: a nested list of [dec,inc]
A di_block can be provided in which case it will be used instead of
dec, inc lists.
Output:
-----------
dictionary containing
lon : mean longitude (or declination)
lat : mean latitude (or inclination)
N : number of vectors
Mu : Mu test statistic value for the data
Mu_critical : critical value for Mu
Me : Me test statistic value for the data
Me_critical : critical value for Me
if the data has two modes with N >=10 (N and R)
two of these dictionaries will be returned
Examples
--------
In this example, directions are sampled from a Fisher distribution using
``ipmag.fishrot`` and then the ``ipmag.fishqq`` function is used to test
whether that distribution is Fisherian:
>>> directions = ipmag.fishrot(k=40, n=50, dec=200, inc=50)
>>> ipmag.fishqq(di_block = directions)
{'Dec': 199.73564290371894,
'Inc': 49.017612342358298,
'Me': 0.78330310031220352,
'Me_critical': 1.094,
'Mode': 'Mode 1',
'Mu': 0.69915926146177099,
'Mu_critical': 1.207,
'N': 50,
'Test_result': 'consistent with Fisherian model'}
The above example passed a di_block to the function as an input. Lists of
paired declination and inclination can also be used as inputs. Here the
directions di_block is unpacked to separate declination and inclination
lists using the ``ipmag.unpack_di_block`` functionwhich are then used as
input to fishqq:
>>> dec_list, inc_list = ipmag.unpack_di_block(directions)
>>> ipmag.fishqq(lon=dec_list, lat=inc_list)
"""
if di_block is None:
all_dirs = make_di_block(lon, lat)
else:
all_dirs = di_block
ppars = pmag.doprinc(all_dirs) # get principal directions
rDIs = []
nDIs = []
QQ_dict1 = {}
QQ_dict2 = {}
for rec in all_dirs:
angle = pmag.angle([rec[0], rec[1]], [ppars['dec'], ppars['inc']])
if angle > 90.:
rDIs.append(rec)
else:
nDIs.append(rec)
if len(rDIs) >= 10 or len(nDIs) >= 10:
D1, I1 = [], []
QQ = {'unf': 1, 'exp': 2}
if len(nDIs) < 10:
ppars = pmag.doprinc(rDIs) # get principal directions
Drbar, Irbar = ppars['dec'] - 180., -ppars['inc']
Nr = len(rDIs)
for di in rDIs:
d, irot = pmag.dotilt(
di[0], di[1], Drbar - 180., 90. - Irbar) # rotate to mean
drot = d - 180.
if drot < 0:
drot = drot + 360.
D1.append(drot)
I1.append(irot)
Dtit = 'Mode 2 Declinations'
Itit = 'Mode 2 Inclinations'
else:
ppars = pmag.doprinc(nDIs) # get principal directions
Dnbar, Inbar = ppars['dec'], ppars['inc']
Nn = len(nDIs)
for di in nDIs:
d, irot = pmag.dotilt(
di[0], di[1], Dnbar - 180., 90. - Inbar) # rotate to mean
drot = d - 180.
if drot < 0:
drot = drot + 360.
D1.append(drot)
I1.append(irot)
Dtit = 'Mode 1 Declinations'
Itit = 'Mode 1 Inclinations'
plt.figure(figsize=(6, 3))
Mu_n, Mu_ncr = pmagplotlib.plot_qq_unf(
QQ['unf'], D1, Dtit, subplot=True) # make plot
Me_n, Me_ncr = pmagplotlib.plot_qq_exp(
QQ['exp'], I1, Itit, subplot=True) # make plot
plt.tight_layout()
if Mu_n <= Mu_ncr and Me_n <= Me_ncr:
F_n = 'consistent with Fisherian model'
else:
F_n = 'Fisherian model rejected'
QQ_dict1['Mode'] = 'Mode 1'
QQ_dict1['Dec'] = Dnbar
QQ_dict1['Inc'] = Inbar
QQ_dict1['N'] = Nn
QQ_dict1['Mu'] = Mu_n
QQ_dict1['Mu_critical'] = Mu_ncr
QQ_dict1['Me'] = Me_n
QQ_dict1['Me_critical'] = Me_ncr
QQ_dict1['Test_result'] = F_n
if len(rDIs) > 10 and len(nDIs) > 10:
D2, I2 = [], []
ppars = pmag.doprinc(rDIs) # get principal directions
Drbar, Irbar = ppars['dec'] - 180., -ppars['inc']
Nr = len(rDIs)
for di in rDIs:
d, irot = pmag.dotilt(
di[0], di[1], Drbar - 180., 90. - Irbar) # rotate to mean
drot = d - 180.
if drot < 0:
drot = drot + 360.
D2.append(drot)
I2.append(irot)
Dtit = 'Mode 2 Declinations'
Itit = 'Mode 2 Inclinations'
plt.figure(figsize=(6, 3))
Mu_r, Mu_rcr = pmagplotlib.plot_qq_unf(
QQ['unf'], D2, Dtit, subplot=True) # make plot
Me_r, Me_rcr = pmagplotlib.plot_qq_exp(
QQ['exp'], I2, Itit, subplot=True) # make plot
plt.tight_layout()
if Mu_r <= Mu_rcr and Me_r <= Me_rcr:
F_r = 'consistent with Fisherian model'
else:
F_r = 'Fisherian model rejected'
QQ_dict2['Mode'] = 'Mode 2'
QQ_dict2['Dec'] = Drbar
QQ_dict2['Inc'] = Irbar
QQ_dict2['N'] = Nr
QQ_dict2['Mu'] = Mu_r
QQ_dict2['Mu_critical'] = Mu_rcr
QQ_dict2['Me'] = Me_r
QQ_dict2['Me_critical'] = Me_rcr
QQ_dict2['Test_result'] = F_r
if QQ_dict2:
return QQ_dict1, QQ_dict2
elif QQ_dict1:
return QQ_dict1
else:
print('you need N> 10 for at least one mode') | Test whether a distribution is Fisherian and make a corresponding Q-Q plot.
The Q-Q plot shows the data plotted against the value expected from a
Fisher distribution. The first plot is the uniform plot which is the
Fisher model distribution in terms of longitude (declination). The second
plot is the exponential plot which is the Fisher model distribution in terms
of latitude (inclination). In addition to the plots, the test statistics Mu
(uniform) and Me (exponential) are calculated and compared against the
critical test values. If Mu or Me are too large in comparision to the test
statistics, the hypothesis that the distribution is Fisherian is rejected
(see Fisher et al., 1987).
Parameters:
-----------
lon : longitude or declination of the data
lat : latitude or inclination of the data
or
di_block: a nested list of [dec,inc]
A di_block can be provided in which case it will be used instead of
dec, inc lists.
Output:
-----------
dictionary containing
lon : mean longitude (or declination)
lat : mean latitude (or inclination)
N : number of vectors
Mu : Mu test statistic value for the data
Mu_critical : critical value for Mu
Me : Me test statistic value for the data
Me_critical : critical value for Me
if the data has two modes with N >=10 (N and R)
two of these dictionaries will be returned
Examples
--------
In this example, directions are sampled from a Fisher distribution using
``ipmag.fishrot`` and then the ``ipmag.fishqq`` function is used to test
whether that distribution is Fisherian:
>>> directions = ipmag.fishrot(k=40, n=50, dec=200, inc=50)
>>> ipmag.fishqq(di_block = directions)
{'Dec': 199.73564290371894,
'Inc': 49.017612342358298,
'Me': 0.78330310031220352,
'Me_critical': 1.094,
'Mode': 'Mode 1',
'Mu': 0.69915926146177099,
'Mu_critical': 1.207,
'N': 50,
'Test_result': 'consistent with Fisherian model'}
The above example passed a di_block to the function as an input. Lists of
paired declination and inclination can also be used as inputs. Here the
directions di_block is unpacked to separate declination and inclination
lists using the ``ipmag.unpack_di_block`` functionwhich are then used as
input to fishqq:
>>> dec_list, inc_list = ipmag.unpack_di_block(directions)
>>> ipmag.fishqq(lon=dec_list, lat=inc_list) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L1201-L1376 |
PmagPy/PmagPy | pmagpy/ipmag.py | lat_from_inc | def lat_from_inc(inc, a95=None):
"""
Calculate paleolatitude from inclination using the dipole equation
Required Parameter
----------
inc: (paleo)magnetic inclination in degrees
Optional Parameter
----------
a95: 95% confidence interval from Fisher mean
Returns
----------
if a95 is provided paleo_lat, paleo_lat_max, paleo_lat_min are returned
otherwise, it just returns paleo_lat
"""
rad = old_div(np.pi, 180.)
paleo_lat = old_div(np.arctan(0.5 * np.tan(inc * rad)), rad)
if a95 is not None:
paleo_lat_max = old_div(
np.arctan(0.5 * np.tan((inc + a95) * rad)), rad)
paleo_lat_min = old_div(
np.arctan(0.5 * np.tan((inc - a95) * rad)), rad)
return paleo_lat, paleo_lat_max, paleo_lat_min
else:
return paleo_lat | python | def lat_from_inc(inc, a95=None):
"""
Calculate paleolatitude from inclination using the dipole equation
Required Parameter
----------
inc: (paleo)magnetic inclination in degrees
Optional Parameter
----------
a95: 95% confidence interval from Fisher mean
Returns
----------
if a95 is provided paleo_lat, paleo_lat_max, paleo_lat_min are returned
otherwise, it just returns paleo_lat
"""
rad = old_div(np.pi, 180.)
paleo_lat = old_div(np.arctan(0.5 * np.tan(inc * rad)), rad)
if a95 is not None:
paleo_lat_max = old_div(
np.arctan(0.5 * np.tan((inc + a95) * rad)), rad)
paleo_lat_min = old_div(
np.arctan(0.5 * np.tan((inc - a95) * rad)), rad)
return paleo_lat, paleo_lat_max, paleo_lat_min
else:
return paleo_lat | Calculate paleolatitude from inclination using the dipole equation
Required Parameter
----------
inc: (paleo)magnetic inclination in degrees
Optional Parameter
----------
a95: 95% confidence interval from Fisher mean
Returns
----------
if a95 is provided paleo_lat, paleo_lat_max, paleo_lat_min are returned
otherwise, it just returns paleo_lat | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L1379-L1405 |
PmagPy/PmagPy | pmagpy/ipmag.py | lat_from_pole | def lat_from_pole(ref_loc_lon, ref_loc_lat, pole_plon, pole_plat):
"""
Calculate paleolatitude for a reference location based on a paleomagnetic pole
Required Parameters
----------
ref_loc_lon: longitude of reference location in degrees
ref_loc_lat: latitude of reference location
pole_plon: paleopole longitude in degrees
pole_plat: paleopole latitude in degrees
"""
ref_loc = (ref_loc_lon, ref_loc_lat)
pole = (pole_plon, pole_plat)
paleo_lat = 90 - pmag.angle(pole, ref_loc)
return float(paleo_lat) | python | def lat_from_pole(ref_loc_lon, ref_loc_lat, pole_plon, pole_plat):
"""
Calculate paleolatitude for a reference location based on a paleomagnetic pole
Required Parameters
----------
ref_loc_lon: longitude of reference location in degrees
ref_loc_lat: latitude of reference location
pole_plon: paleopole longitude in degrees
pole_plat: paleopole latitude in degrees
"""
ref_loc = (ref_loc_lon, ref_loc_lat)
pole = (pole_plon, pole_plat)
paleo_lat = 90 - pmag.angle(pole, ref_loc)
return float(paleo_lat) | Calculate paleolatitude for a reference location based on a paleomagnetic pole
Required Parameters
----------
ref_loc_lon: longitude of reference location in degrees
ref_loc_lat: latitude of reference location
pole_plon: paleopole longitude in degrees
pole_plat: paleopole latitude in degrees | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L1408-L1423 |
PmagPy/PmagPy | pmagpy/ipmag.py | inc_from_lat | def inc_from_lat(lat):
"""
Calculate inclination predicted from latitude using the dipole equation
Parameter
----------
lat : latitude in degrees
Returns
-------
inc : inclination calculated using the dipole equation
"""
rad = old_div(np.pi, 180.)
inc = old_div(np.arctan(2 * np.tan(lat * rad)), rad)
return inc | python | def inc_from_lat(lat):
"""
Calculate inclination predicted from latitude using the dipole equation
Parameter
----------
lat : latitude in degrees
Returns
-------
inc : inclination calculated using the dipole equation
"""
rad = old_div(np.pi, 180.)
inc = old_div(np.arctan(2 * np.tan(lat * rad)), rad)
return inc | Calculate inclination predicted from latitude using the dipole equation
Parameter
----------
lat : latitude in degrees
Returns
-------
inc : inclination calculated using the dipole equation | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L1426-L1440 |
PmagPy/PmagPy | pmagpy/ipmag.py | plot_net | def plot_net(fignum):
"""
Draws circle and tick marks for equal area projection.
"""
# make the perimeter
plt.figure(num=fignum,)
plt.clf()
plt.axis("off")
Dcirc = np.arange(0, 361.)
Icirc = np.zeros(361, 'f')
Xcirc, Ycirc = [], []
for k in range(361):
XY = pmag.dimap(Dcirc[k], Icirc[k])
Xcirc.append(XY[0])
Ycirc.append(XY[1])
plt.plot(Xcirc, Ycirc, 'k')
# put on the tick marks
Xsym, Ysym = [], []
for I in range(10, 100, 10):
XY = pmag.dimap(0., I)
Xsym.append(XY[0])
Ysym.append(XY[1])
plt.plot(Xsym, Ysym, 'k+')
Xsym, Ysym = [], []
for I in range(10, 90, 10):
XY = pmag.dimap(90., I)
Xsym.append(XY[0])
Ysym.append(XY[1])
plt.plot(Xsym, Ysym, 'k+')
Xsym, Ysym = [], []
for I in range(10, 90, 10):
XY = pmag.dimap(180., I)
Xsym.append(XY[0])
Ysym.append(XY[1])
plt.plot(Xsym, Ysym, 'k+')
Xsym, Ysym = [], []
for I in range(10, 90, 10):
XY = pmag.dimap(270., I)
Xsym.append(XY[0])
Ysym.append(XY[1])
plt.plot(Xsym, Ysym, 'k+')
for D in range(0, 360, 10):
Xtick, Ytick = [], []
for I in range(4):
XY = pmag.dimap(D, I)
Xtick.append(XY[0])
Ytick.append(XY[1])
plt.plot(Xtick, Ytick, 'k')
plt.axis("equal")
plt.axis((-1.05, 1.05, -1.05, 1.05)) | python | def plot_net(fignum):
"""
Draws circle and tick marks for equal area projection.
"""
# make the perimeter
plt.figure(num=fignum,)
plt.clf()
plt.axis("off")
Dcirc = np.arange(0, 361.)
Icirc = np.zeros(361, 'f')
Xcirc, Ycirc = [], []
for k in range(361):
XY = pmag.dimap(Dcirc[k], Icirc[k])
Xcirc.append(XY[0])
Ycirc.append(XY[1])
plt.plot(Xcirc, Ycirc, 'k')
# put on the tick marks
Xsym, Ysym = [], []
for I in range(10, 100, 10):
XY = pmag.dimap(0., I)
Xsym.append(XY[0])
Ysym.append(XY[1])
plt.plot(Xsym, Ysym, 'k+')
Xsym, Ysym = [], []
for I in range(10, 90, 10):
XY = pmag.dimap(90., I)
Xsym.append(XY[0])
Ysym.append(XY[1])
plt.plot(Xsym, Ysym, 'k+')
Xsym, Ysym = [], []
for I in range(10, 90, 10):
XY = pmag.dimap(180., I)
Xsym.append(XY[0])
Ysym.append(XY[1])
plt.plot(Xsym, Ysym, 'k+')
Xsym, Ysym = [], []
for I in range(10, 90, 10):
XY = pmag.dimap(270., I)
Xsym.append(XY[0])
Ysym.append(XY[1])
plt.plot(Xsym, Ysym, 'k+')
for D in range(0, 360, 10):
Xtick, Ytick = [], []
for I in range(4):
XY = pmag.dimap(D, I)
Xtick.append(XY[0])
Ytick.append(XY[1])
plt.plot(Xtick, Ytick, 'k')
plt.axis("equal")
plt.axis((-1.05, 1.05, -1.05, 1.05)) | Draws circle and tick marks for equal area projection. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L1443-L1494 |
PmagPy/PmagPy | pmagpy/ipmag.py | plot_di | def plot_di(dec=None, inc=None, di_block=None, color='k', marker='o', markersize=20, legend='no', label='', title='', edge='',alpha=1):
"""
Plot declination, inclination data on an equal area plot.
Before this function is called a plot needs to be initialized with code that looks
something like:
>fignum = 1
>plt.figure(num=fignum,figsize=(10,10),dpi=160)
>ipmag.plot_net(fignum)
Required Parameters
-----------
dec : declination being plotted
inc : inclination being plotted
or
di_block: a nested list of [dec,inc,1.0]
(di_block can be provided instead of dec, inc in which case it will be used)
Optional Parameters (defaults are used if not specified)
-----------
color : the default color is black. Other colors can be chosen (e.g. 'r')
marker : the default marker is a circle ('o')
markersize : default size is 20
label : the default label is blank ('')
legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
edge : marker edge color - if blank, is color of marker
alpha : opacity
"""
X_down = []
X_up = []
Y_down = []
Y_up = []
color_down = []
color_up = []
if di_block is not None:
di_lists = unpack_di_block(di_block)
if len(di_lists) == 3:
dec, inc, intensity = di_lists
if len(di_lists) == 2:
dec, inc = di_lists
try:
length = len(dec)
for n in range(len(dec)):
XY = pmag.dimap(dec[n], inc[n])
if inc[n] >= 0:
X_down.append(XY[0])
Y_down.append(XY[1])
if type(color) == list:
color_down.append(color[n])
else:
color_down.append(color)
else:
X_up.append(XY[0])
Y_up.append(XY[1])
if type(color) == list:
color_up.append(color[n])
else:
color_up.append(color)
except:
XY = pmag.dimap(dec, inc)
if inc >= 0:
X_down.append(XY[0])
Y_down.append(XY[1])
color_down.append(color)
else:
X_up.append(XY[0])
Y_up.append(XY[1])
color_up.append(color)
if len(X_up) > 0:
plt.scatter(X_up, Y_up, facecolors='none', edgecolors=color_up,
s=markersize, marker=marker, label=label,alpha=alpha)
if len(X_down) > 0:
plt.scatter(X_down, Y_down, facecolors=color_down, edgecolors=edge,
s=markersize, marker=marker, label=label,alpha=alpha)
if legend == 'yes':
plt.legend(loc=2)
plt.tight_layout()
if title != "":
plt.title(title) | python | def plot_di(dec=None, inc=None, di_block=None, color='k', marker='o', markersize=20, legend='no', label='', title='', edge='',alpha=1):
"""
Plot declination, inclination data on an equal area plot.
Before this function is called a plot needs to be initialized with code that looks
something like:
>fignum = 1
>plt.figure(num=fignum,figsize=(10,10),dpi=160)
>ipmag.plot_net(fignum)
Required Parameters
-----------
dec : declination being plotted
inc : inclination being plotted
or
di_block: a nested list of [dec,inc,1.0]
(di_block can be provided instead of dec, inc in which case it will be used)
Optional Parameters (defaults are used if not specified)
-----------
color : the default color is black. Other colors can be chosen (e.g. 'r')
marker : the default marker is a circle ('o')
markersize : default size is 20
label : the default label is blank ('')
legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
edge : marker edge color - if blank, is color of marker
alpha : opacity
"""
X_down = []
X_up = []
Y_down = []
Y_up = []
color_down = []
color_up = []
if di_block is not None:
di_lists = unpack_di_block(di_block)
if len(di_lists) == 3:
dec, inc, intensity = di_lists
if len(di_lists) == 2:
dec, inc = di_lists
try:
length = len(dec)
for n in range(len(dec)):
XY = pmag.dimap(dec[n], inc[n])
if inc[n] >= 0:
X_down.append(XY[0])
Y_down.append(XY[1])
if type(color) == list:
color_down.append(color[n])
else:
color_down.append(color)
else:
X_up.append(XY[0])
Y_up.append(XY[1])
if type(color) == list:
color_up.append(color[n])
else:
color_up.append(color)
except:
XY = pmag.dimap(dec, inc)
if inc >= 0:
X_down.append(XY[0])
Y_down.append(XY[1])
color_down.append(color)
else:
X_up.append(XY[0])
Y_up.append(XY[1])
color_up.append(color)
if len(X_up) > 0:
plt.scatter(X_up, Y_up, facecolors='none', edgecolors=color_up,
s=markersize, marker=marker, label=label,alpha=alpha)
if len(X_down) > 0:
plt.scatter(X_down, Y_down, facecolors=color_down, edgecolors=edge,
s=markersize, marker=marker, label=label,alpha=alpha)
if legend == 'yes':
plt.legend(loc=2)
plt.tight_layout()
if title != "":
plt.title(title) | Plot declination, inclination data on an equal area plot.
Before this function is called a plot needs to be initialized with code that looks
something like:
>fignum = 1
>plt.figure(num=fignum,figsize=(10,10),dpi=160)
>ipmag.plot_net(fignum)
Required Parameters
-----------
dec : declination being plotted
inc : inclination being plotted
or
di_block: a nested list of [dec,inc,1.0]
(di_block can be provided instead of dec, inc in which case it will be used)
Optional Parameters (defaults are used if not specified)
-----------
color : the default color is black. Other colors can be chosen (e.g. 'r')
marker : the default marker is a circle ('o')
markersize : default size is 20
label : the default label is blank ('')
legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
edge : marker edge color - if blank, is color of marker
alpha : opacity | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L1501-L1584 |
PmagPy/PmagPy | pmagpy/ipmag.py | plot_di_mean | def plot_di_mean(dec, inc, a95, color='k', marker='o', markersize=20, label='', legend='no'):
"""
Plot a mean direction (declination, inclination) with alpha_95 ellipse on
an equal area plot.
Before this function is called, a plot needs to be initialized with code
that looks something like:
>fignum = 1
>plt.figure(num=fignum,figsize=(10,10),dpi=160)
>ipmag.plot_net(fignum)
Required Parameters
-----------
dec : declination of mean being plotted
inc : inclination of mean being plotted
a95 : a95 confidence ellipse of mean being plotted
Optional Parameters (defaults are used if not specified)
-----------
color : the default color is black. Other colors can be chosen (e.g. 'r').
marker : the default is a circle. Other symbols can be chosen (e.g. 's').
markersize : the default is 20. Other sizes can be chosen.
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
"""
DI_dimap = pmag.dimap(dec, inc)
if inc < 0:
plt.scatter(DI_dimap[0], DI_dimap[1],
edgecolors=color, facecolors='white',
marker=marker, s=markersize, label=label)
if inc >= 0:
plt.scatter(DI_dimap[0], DI_dimap[1],
edgecolors=color, facecolors=color,
marker=marker, s=markersize, label=label)
Xcirc, Ycirc = [], []
Da95, Ia95 = pmag.circ(dec, inc, a95)
if legend == 'yes':
plt.legend(loc=2)
for k in range(len(Da95)):
XY = pmag.dimap(Da95[k], Ia95[k])
Xcirc.append(XY[0])
Ycirc.append(XY[1])
plt.plot(Xcirc, Ycirc, c=color)
plt.tight_layout() | python | def plot_di_mean(dec, inc, a95, color='k', marker='o', markersize=20, label='', legend='no'):
"""
Plot a mean direction (declination, inclination) with alpha_95 ellipse on
an equal area plot.
Before this function is called, a plot needs to be initialized with code
that looks something like:
>fignum = 1
>plt.figure(num=fignum,figsize=(10,10),dpi=160)
>ipmag.plot_net(fignum)
Required Parameters
-----------
dec : declination of mean being plotted
inc : inclination of mean being plotted
a95 : a95 confidence ellipse of mean being plotted
Optional Parameters (defaults are used if not specified)
-----------
color : the default color is black. Other colors can be chosen (e.g. 'r').
marker : the default is a circle. Other symbols can be chosen (e.g. 's').
markersize : the default is 20. Other sizes can be chosen.
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
"""
DI_dimap = pmag.dimap(dec, inc)
if inc < 0:
plt.scatter(DI_dimap[0], DI_dimap[1],
edgecolors=color, facecolors='white',
marker=marker, s=markersize, label=label)
if inc >= 0:
plt.scatter(DI_dimap[0], DI_dimap[1],
edgecolors=color, facecolors=color,
marker=marker, s=markersize, label=label)
Xcirc, Ycirc = [], []
Da95, Ia95 = pmag.circ(dec, inc, a95)
if legend == 'yes':
plt.legend(loc=2)
for k in range(len(Da95)):
XY = pmag.dimap(Da95[k], Ia95[k])
Xcirc.append(XY[0])
Ycirc.append(XY[1])
plt.plot(Xcirc, Ycirc, c=color)
plt.tight_layout() | Plot a mean direction (declination, inclination) with alpha_95 ellipse on
an equal area plot.
Before this function is called, a plot needs to be initialized with code
that looks something like:
>fignum = 1
>plt.figure(num=fignum,figsize=(10,10),dpi=160)
>ipmag.plot_net(fignum)
Required Parameters
-----------
dec : declination of mean being plotted
inc : inclination of mean being plotted
a95 : a95 confidence ellipse of mean being plotted
Optional Parameters (defaults are used if not specified)
-----------
color : the default color is black. Other colors can be chosen (e.g. 'r').
marker : the default is a circle. Other symbols can be chosen (e.g. 's').
markersize : the default is 20. Other sizes can be chosen.
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L1587-L1630 |
PmagPy/PmagPy | pmagpy/ipmag.py | plot_di_mean_bingham | def plot_di_mean_bingham(bingham_dictionary, fignum=1, color='k', marker='o', markersize=20, label='', legend='no'):
"""
see plot_di_mean_ellipse
"""
plot_di_mean_ellipse(bingham_dictionary, fignum=fignum, color=color,
marker=marker, markersize=markersize, label=label, legend=legend) | python | def plot_di_mean_bingham(bingham_dictionary, fignum=1, color='k', marker='o', markersize=20, label='', legend='no'):
"""
see plot_di_mean_ellipse
"""
plot_di_mean_ellipse(bingham_dictionary, fignum=fignum, color=color,
marker=marker, markersize=markersize, label=label, legend=legend) | see plot_di_mean_ellipse | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L1633-L1638 |
PmagPy/PmagPy | pmagpy/ipmag.py | plot_di_mean_ellipse | def plot_di_mean_ellipse(dictionary, fignum=1, color='k', marker='o', markersize=20, label='', legend='no'):
"""
Plot a mean direction (declination, inclination) confidence ellipse.
Parameters
-----------
dictionary : a dictionary generated by the pmag.dobingham or pmag.dokent funcitons
"""
pars = []
pars.append(dictionary['dec'])
pars.append(dictionary['inc'])
pars.append(dictionary['Zeta'])
pars.append(dictionary['Zdec'])
pars.append(dictionary['Zinc'])
pars.append(dictionary['Eta'])
pars.append(dictionary['Edec'])
pars.append(dictionary['Einc'])
DI_dimap = pmag.dimap(dictionary['dec'], dictionary['inc'])
if dictionary['inc'] < 0:
plt.scatter(DI_dimap[0], DI_dimap[1],
edgecolors=color, facecolors='white',
marker=marker, s=markersize, label=label)
if dictionary['inc'] >= 0:
plt.scatter(DI_dimap[0], DI_dimap[1],
edgecolors=color, facecolors=color,
marker=marker, s=markersize, label=label)
pmagplotlib.plot_ell(fignum, pars, color, 0, 1) | python | def plot_di_mean_ellipse(dictionary, fignum=1, color='k', marker='o', markersize=20, label='', legend='no'):
"""
Plot a mean direction (declination, inclination) confidence ellipse.
Parameters
-----------
dictionary : a dictionary generated by the pmag.dobingham or pmag.dokent funcitons
"""
pars = []
pars.append(dictionary['dec'])
pars.append(dictionary['inc'])
pars.append(dictionary['Zeta'])
pars.append(dictionary['Zdec'])
pars.append(dictionary['Zinc'])
pars.append(dictionary['Eta'])
pars.append(dictionary['Edec'])
pars.append(dictionary['Einc'])
DI_dimap = pmag.dimap(dictionary['dec'], dictionary['inc'])
if dictionary['inc'] < 0:
plt.scatter(DI_dimap[0], DI_dimap[1],
edgecolors=color, facecolors='white',
marker=marker, s=markersize, label=label)
if dictionary['inc'] >= 0:
plt.scatter(DI_dimap[0], DI_dimap[1],
edgecolors=color, facecolors=color,
marker=marker, s=markersize, label=label)
pmagplotlib.plot_ell(fignum, pars, color, 0, 1) | Plot a mean direction (declination, inclination) confidence ellipse.
Parameters
-----------
dictionary : a dictionary generated by the pmag.dobingham or pmag.dokent funcitons | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L1641-L1669 |
PmagPy/PmagPy | pmagpy/ipmag.py | make_orthographic_map | def make_orthographic_map(central_longitude=0, central_latitude=0, figsize=(8, 8),
add_land=True, land_color='tan', add_ocean=False, ocean_color='lightblue', grid_lines=True,
lat_grid=[-80., -60., -30.,
0., 30., 60., 80.],
lon_grid=[-180., -150., -120., -90., -60., -30., 0., 30., 60., 90., 120., 150., 180.]):
'''
Function creates and returns an orthographic map projection using cartopy
Example
-------
>>> map_axis = make_orthographic_map(central_longitude=200,central_latitude=30)
Optional Parameters
-----------
central_longitude : central longitude of projection (default is 0)
central_latitude : central latitude of projection (default is 0)
figsize : size of the figure (default is 8x8)
add_land : chose whether land is plotted on map (default is true)
land_color : specify land color (default is 'tan')
add_ocean : chose whether land is plotted on map (default is False, change to True to plot)
ocean_color : specify ocean color (default is 'lightblue')
grid_lines : chose whether gird lines are plotted on map (default is true)
lat_grid : specify the latitude grid (default is 30 degree spacing)
lon_grid : specify the longitude grid (default is 30 degree spacing)
'''
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.make_orthographic_map')
return
fig = plt.figure(figsize=figsize)
map_projection = ccrs.Orthographic(
central_longitude=central_longitude, central_latitude=central_latitude)
ax = plt.axes(projection=map_projection)
ax.set_global()
if add_ocean == True:
ax.add_feature(cartopy.feature.OCEAN, zorder=0, facecolor=ocean_color)
if add_land == True:
ax.add_feature(cartopy.feature.LAND, zorder=0,
facecolor=land_color, edgecolor='black')
if grid_lines == True:
ax.gridlines(xlocs=lon_grid, ylocs=lat_grid, linewidth=1,
color='black', linestyle='dotted')
return ax | python | def make_orthographic_map(central_longitude=0, central_latitude=0, figsize=(8, 8),
add_land=True, land_color='tan', add_ocean=False, ocean_color='lightblue', grid_lines=True,
lat_grid=[-80., -60., -30.,
0., 30., 60., 80.],
lon_grid=[-180., -150., -120., -90., -60., -30., 0., 30., 60., 90., 120., 150., 180.]):
'''
Function creates and returns an orthographic map projection using cartopy
Example
-------
>>> map_axis = make_orthographic_map(central_longitude=200,central_latitude=30)
Optional Parameters
-----------
central_longitude : central longitude of projection (default is 0)
central_latitude : central latitude of projection (default is 0)
figsize : size of the figure (default is 8x8)
add_land : chose whether land is plotted on map (default is true)
land_color : specify land color (default is 'tan')
add_ocean : chose whether land is plotted on map (default is False, change to True to plot)
ocean_color : specify ocean color (default is 'lightblue')
grid_lines : chose whether gird lines are plotted on map (default is true)
lat_grid : specify the latitude grid (default is 30 degree spacing)
lon_grid : specify the longitude grid (default is 30 degree spacing)
'''
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.make_orthographic_map')
return
fig = plt.figure(figsize=figsize)
map_projection = ccrs.Orthographic(
central_longitude=central_longitude, central_latitude=central_latitude)
ax = plt.axes(projection=map_projection)
ax.set_global()
if add_ocean == True:
ax.add_feature(cartopy.feature.OCEAN, zorder=0, facecolor=ocean_color)
if add_land == True:
ax.add_feature(cartopy.feature.LAND, zorder=0,
facecolor=land_color, edgecolor='black')
if grid_lines == True:
ax.gridlines(xlocs=lon_grid, ylocs=lat_grid, linewidth=1,
color='black', linestyle='dotted')
return ax | Function creates and returns an orthographic map projection using cartopy
Example
-------
>>> map_axis = make_orthographic_map(central_longitude=200,central_latitude=30)
Optional Parameters
-----------
central_longitude : central longitude of projection (default is 0)
central_latitude : central latitude of projection (default is 0)
figsize : size of the figure (default is 8x8)
add_land : chose whether land is plotted on map (default is true)
land_color : specify land color (default is 'tan')
add_ocean : chose whether land is plotted on map (default is False, change to True to plot)
ocean_color : specify ocean color (default is 'lightblue')
grid_lines : chose whether gird lines are plotted on map (default is true)
lat_grid : specify the latitude grid (default is 30 degree spacing)
lon_grid : specify the longitude grid (default is 30 degree spacing) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L1672-L1713 |
PmagPy/PmagPy | pmagpy/ipmag.py | plot_pole | def plot_pole(map_axis, plon, plat, A95, label='', color='k', edgecolor='k', marker='o', markersize=20, legend='no'):
"""
This function plots a paleomagnetic pole and A95 error ellipse on a cartopy map axis.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Example
-------
>>> plon = 200
>>> plat = 60
>>> A95 = 6
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200,central_latitude=30)
>>> ipmag.plot_pole(map_axis, plon, plat, A95 ,color='red',markersize=40)
Required Parameters
-----------
map_axis : the name of the current map axis that has been developed using cartopy
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
A95 : the A_95 confidence ellipse of the paleomagnetic pole (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
color : the default color is black. Other colors can be chosen (e.g. 'r')
marker : the default is a circle. Other symbols can be chosen (e.g. 's')
markersize : the default is 20. Other size can be chosen
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
"""
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.plot_pole')
return
A95_km = A95 * 111.32
map_axis.scatter(plon, plat, marker=marker,
color=color, edgecolors=edgecolor, s=markersize,
label=label, zorder=101, transform=ccrs.Geodetic())
equi(map_axis, plon, plat, A95_km, color)
if legend == 'yes':
plt.legend(loc=2) | python | def plot_pole(map_axis, plon, plat, A95, label='', color='k', edgecolor='k', marker='o', markersize=20, legend='no'):
"""
This function plots a paleomagnetic pole and A95 error ellipse on a cartopy map axis.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Example
-------
>>> plon = 200
>>> plat = 60
>>> A95 = 6
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200,central_latitude=30)
>>> ipmag.plot_pole(map_axis, plon, plat, A95 ,color='red',markersize=40)
Required Parameters
-----------
map_axis : the name of the current map axis that has been developed using cartopy
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
A95 : the A_95 confidence ellipse of the paleomagnetic pole (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
color : the default color is black. Other colors can be chosen (e.g. 'r')
marker : the default is a circle. Other symbols can be chosen (e.g. 's')
markersize : the default is 20. Other size can be chosen
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
"""
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.plot_pole')
return
A95_km = A95 * 111.32
map_axis.scatter(plon, plat, marker=marker,
color=color, edgecolors=edgecolor, s=markersize,
label=label, zorder=101, transform=ccrs.Geodetic())
equi(map_axis, plon, plat, A95_km, color)
if legend == 'yes':
plt.legend(loc=2) | This function plots a paleomagnetic pole and A95 error ellipse on a cartopy map axis.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Example
-------
>>> plon = 200
>>> plat = 60
>>> A95 = 6
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200,central_latitude=30)
>>> ipmag.plot_pole(map_axis, plon, plat, A95 ,color='red',markersize=40)
Required Parameters
-----------
map_axis : the name of the current map axis that has been developed using cartopy
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
A95 : the A_95 confidence ellipse of the paleomagnetic pole (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
color : the default color is black. Other colors can be chosen (e.g. 'r')
marker : the default is a circle. Other symbols can be chosen (e.g. 's')
markersize : the default is 20. Other size can be chosen
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L1800-L1839 |
PmagPy/PmagPy | pmagpy/ipmag.py | plot_poles | def plot_poles(map_axis, plon, plat, A95, label='', color='k', edgecolor='k', marker='o', markersize=20, legend='no'):
"""
This function plots paleomagnetic poles and A95 error ellipses on a cartopy map axis.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Examples
-------
>>> plons = [200, 180, 210]
>>> plats = [60, 40, 35]
>>> A95 = [6, 3, 10]
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200, central_latitude=30)
>>> ipmag.plot_poles(map_axis, plons, plats, A95s, color='red', markersize=40)
>>> plons = [200, 180, 210]
>>> plats = [60, 40, 35]
>>> A95 = [6, 3, 10]
>>> colors = ['red','green','blue']
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200, central_latitude=30)
>>> ipmag.plot_poles(map_axis, plons, plats, A95s, color=colors, markersize=40)
Required Parameters
-----------
map_axis : the name of the current map axis that has been developed using cartopy
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
A95 : the A_95 confidence ellipse of the paleomagnetic pole (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
color : the default color is black. Other colors can be chosen (e.g. 'r')
a list of colors can also be given so that each pole has a distinct color
edgecolor : the default edgecolor is black. Other colors can be chosen (e.g. 'r')
marker : the default is a circle. Other symbols can be chosen (e.g. 's')
markersize : the default is 20. Other size can be chosen
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
"""
map_axis.scatter(plon, plat, marker=marker,
color=color, edgecolors=edgecolor, s=markersize,
label=label, zorder=101, transform=ccrs.Geodetic())
if isinstance(color,str)==True:
for n in range(0,len(A95)):
A95_km = A95[n] * 111.32
equi(map_axis, plon[n], plat[n], A95_km, color)
else:
for n in range(0,len(A95)):
A95_km = A95[n] * 111.32
equi(map_axis, plon[n], plat[n], A95_km, color[n])
if legend == 'yes':
plt.legend(loc=2) | python | def plot_poles(map_axis, plon, plat, A95, label='', color='k', edgecolor='k', marker='o', markersize=20, legend='no'):
"""
This function plots paleomagnetic poles and A95 error ellipses on a cartopy map axis.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Examples
-------
>>> plons = [200, 180, 210]
>>> plats = [60, 40, 35]
>>> A95 = [6, 3, 10]
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200, central_latitude=30)
>>> ipmag.plot_poles(map_axis, plons, plats, A95s, color='red', markersize=40)
>>> plons = [200, 180, 210]
>>> plats = [60, 40, 35]
>>> A95 = [6, 3, 10]
>>> colors = ['red','green','blue']
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200, central_latitude=30)
>>> ipmag.plot_poles(map_axis, plons, plats, A95s, color=colors, markersize=40)
Required Parameters
-----------
map_axis : the name of the current map axis that has been developed using cartopy
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
A95 : the A_95 confidence ellipse of the paleomagnetic pole (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
color : the default color is black. Other colors can be chosen (e.g. 'r')
a list of colors can also be given so that each pole has a distinct color
edgecolor : the default edgecolor is black. Other colors can be chosen (e.g. 'r')
marker : the default is a circle. Other symbols can be chosen (e.g. 's')
markersize : the default is 20. Other size can be chosen
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
"""
map_axis.scatter(plon, plat, marker=marker,
color=color, edgecolors=edgecolor, s=markersize,
label=label, zorder=101, transform=ccrs.Geodetic())
if isinstance(color,str)==True:
for n in range(0,len(A95)):
A95_km = A95[n] * 111.32
equi(map_axis, plon[n], plat[n], A95_km, color)
else:
for n in range(0,len(A95)):
A95_km = A95[n] * 111.32
equi(map_axis, plon[n], plat[n], A95_km, color[n])
if legend == 'yes':
plt.legend(loc=2) | This function plots paleomagnetic poles and A95 error ellipses on a cartopy map axis.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Examples
-------
>>> plons = [200, 180, 210]
>>> plats = [60, 40, 35]
>>> A95 = [6, 3, 10]
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200, central_latitude=30)
>>> ipmag.plot_poles(map_axis, plons, plats, A95s, color='red', markersize=40)
>>> plons = [200, 180, 210]
>>> plats = [60, 40, 35]
>>> A95 = [6, 3, 10]
>>> colors = ['red','green','blue']
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200, central_latitude=30)
>>> ipmag.plot_poles(map_axis, plons, plats, A95s, color=colors, markersize=40)
Required Parameters
-----------
map_axis : the name of the current map axis that has been developed using cartopy
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
A95 : the A_95 confidence ellipse of the paleomagnetic pole (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
color : the default color is black. Other colors can be chosen (e.g. 'r')
a list of colors can also be given so that each pole has a distinct color
edgecolor : the default edgecolor is black. Other colors can be chosen (e.g. 'r')
marker : the default is a circle. Other symbols can be chosen (e.g. 's')
markersize : the default is 20. Other size can be chosen
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L1842-L1894 |
PmagPy/PmagPy | pmagpy/ipmag.py | plot_pole_basemap | def plot_pole_basemap(mapname, plon, plat, A95, label='', color='k', edgecolor='k', marker='o', markersize=20, legend='no'):
"""
This function plots a paleomagnetic pole and A95 error ellipse on whatever
current map projection has been set using the basemap plotting library.
Before this function is called, a plot needs to be initialized with code
that looks something like:
>from mpl_toolkits.basemap import Basemap
>mapname = Basemap(projection='ortho',lat_0=35,lon_0=200)
>plt.figure(figsize=(6, 6))
>mapname.drawcoastlines(linewidth=0.25)
>mapname.fillcontinents(color='bisque',lake_color='white',zorder=1)
>mapname.drawmapboundary(fill_color='white')
>mapname.drawmeridians(np.arange(0,360,30))
>mapname.drawparallels(np.arange(-90,90,30))
Required Parameters
-----------
mapname : the name of the current map that has been developed using basemap
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
A95 : the A_95 confidence ellipse of the paleomagnetic pole (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
color : the default color is black. Other colors can be chosen (e.g. 'r')
marker : the default is a circle. Other symbols can be chosen (e.g. 's')
markersize : the default is 20. Other size can be chosen
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
"""
centerlon, centerlat = mapname(plon, plat)
A95_km = A95 * 111.32
mapname.scatter(centerlon, centerlat, marker=marker,
color=color, edgecolors=edgecolor, s=markersize, label=label, zorder=101)
equi_basemap(mapname, plon, plat, A95_km, color)
if legend == 'yes':
plt.legend(loc=2) | python | def plot_pole_basemap(mapname, plon, plat, A95, label='', color='k', edgecolor='k', marker='o', markersize=20, legend='no'):
"""
This function plots a paleomagnetic pole and A95 error ellipse on whatever
current map projection has been set using the basemap plotting library.
Before this function is called, a plot needs to be initialized with code
that looks something like:
>from mpl_toolkits.basemap import Basemap
>mapname = Basemap(projection='ortho',lat_0=35,lon_0=200)
>plt.figure(figsize=(6, 6))
>mapname.drawcoastlines(linewidth=0.25)
>mapname.fillcontinents(color='bisque',lake_color='white',zorder=1)
>mapname.drawmapboundary(fill_color='white')
>mapname.drawmeridians(np.arange(0,360,30))
>mapname.drawparallels(np.arange(-90,90,30))
Required Parameters
-----------
mapname : the name of the current map that has been developed using basemap
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
A95 : the A_95 confidence ellipse of the paleomagnetic pole (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
color : the default color is black. Other colors can be chosen (e.g. 'r')
marker : the default is a circle. Other symbols can be chosen (e.g. 's')
markersize : the default is 20. Other size can be chosen
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
"""
centerlon, centerlat = mapname(plon, plat)
A95_km = A95 * 111.32
mapname.scatter(centerlon, centerlat, marker=marker,
color=color, edgecolors=edgecolor, s=markersize, label=label, zorder=101)
equi_basemap(mapname, plon, plat, A95_km, color)
if legend == 'yes':
plt.legend(loc=2) | This function plots a paleomagnetic pole and A95 error ellipse on whatever
current map projection has been set using the basemap plotting library.
Before this function is called, a plot needs to be initialized with code
that looks something like:
>from mpl_toolkits.basemap import Basemap
>mapname = Basemap(projection='ortho',lat_0=35,lon_0=200)
>plt.figure(figsize=(6, 6))
>mapname.drawcoastlines(linewidth=0.25)
>mapname.fillcontinents(color='bisque',lake_color='white',zorder=1)
>mapname.drawmapboundary(fill_color='white')
>mapname.drawmeridians(np.arange(0,360,30))
>mapname.drawparallels(np.arange(-90,90,30))
Required Parameters
-----------
mapname : the name of the current map that has been developed using basemap
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
A95 : the A_95 confidence ellipse of the paleomagnetic pole (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
color : the default color is black. Other colors can be chosen (e.g. 'r')
marker : the default is a circle. Other symbols can be chosen (e.g. 's')
markersize : the default is 20. Other size can be chosen
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L1897-L1931 |
PmagPy/PmagPy | pmagpy/ipmag.py | plot_pole_dp_dm | def plot_pole_dp_dm(map_axis, plon, plat, slon, slat, dp, dm, pole_label='pole', site_label='site',
pole_color='k', pole_edgecolor='k', pole_marker='o',
site_color='r', site_edgecolor='r', site_marker='s',
markersize=20, legend=True):
"""
This function plots a paleomagnetic pole and a dp/dm confidence ellipse on a cartopy map axis.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Example
-------
>>> dec = 280
>>> inc = 45
>>> a95 = 5
>>> site_lat = 45
>>> site_lon = -100
>>> pole = pmag.dia_vgp(dec, inc, a95, site_lat, site_lon)
>>> pole_lon = pole[0]
>>> pole_lat = pole[1]
>>> dp = pole[2]
>>> dm = pole[3]
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200,central_latitude=30)
>>> ipmag.plot_pole_dp_dm(map_axis,pole_lon,pole_lat,site_lon,site_lat,dp,dm)
Required Parameters
-----------
map_axis : the name of the current map axis that has been developed using cartopy
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
slon : the longitude of the site (in degrees E)
slat : the latitude of the site (in degrees)
dp : the semi-minor axis of the confidence ellipse (in degrees)
dm : the semi-major axis of the confidence ellipse (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
pole_color : the default color is black. Other colors can be chosen (e.g. 'g')
site_color : the default color is red. Other colors can be chosen (e.g. 'g')
pole_marker : the default is a circle. Other symbols can be chosen (e.g. 's')
site_marker : the default is a square. Other symbols can be chosen (e.g. '^')
markersize : the default is 20. Other size can be chosen
pole_label : string that labels the pole.
site_label : string that labels the site
legend : the default is a legend (True). Putting False will suppress legend plotting.
"""
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.plot_pole_dp_dm')
return
dp_km = dp*111.32
dm_km = dm*111.32
map_axis.scatter(plon, plat, marker=pole_marker,
color=pole_color, edgecolors=pole_edgecolor, s=markersize,
label=pole_label, zorder=101, transform=ccrs.Geodetic())
map_axis.scatter(slon, slat, marker=site_marker,
color=site_color, edgecolors=site_edgecolor, s=markersize,
label=site_label, zorder=101, transform=ccrs.Geodetic())
# the orientation of the ellipse needs to be determined using the
# two laws of cosines for spherical triangles where the triangle is
# A: site, B: north pole, C: paleomagnetic pole (see Fig. A.2 of Butler)
site_lon_rad = np.deg2rad(slon)
site_lat_rad = np.deg2rad(slat)
c_rad = np.deg2rad(90-slat)
pole_lon_rad = np.deg2rad(plon)
pole_lat_rad = np.deg2rad(plat)
a_rad = np.deg2rad(90-plat)
B_rad = np.abs(pole_lon_rad-site_lon_rad)
cos_b = np.cos(c_rad)*np.cos(a_rad) + np.sin(c_rad) * \
np.sin(a_rad)*np.cos(B_rad)
b_rad = np.arccos(cos_b)
sin_C = (np.sin(B_rad)/np.sin(b_rad))*np.sin(c_rad)
C_rad = np.arcsin(sin_C)
# need to make the rotation of the ellipse go the right way
if slon-plon > 180:
if plon >= slon and plat >= slat:
C_deg = -np.abs(np.rad2deg(C_rad))
elif plon <= slon and plat >= slat:
C_deg = np.abs(np.rad2deg(C_rad))
elif plon >= slon and plat <= slat:
C_deg = np.abs(np.rad2deg(C_rad))
elif plon <= slon and plat <= slat:
C_deg = -np.abs(np.rad2deg(C_rad))
elif slon-plon <= 180:
if plon >= slon and plat >= slat:
C_deg = np.abs(np.rad2deg(C_rad))
elif plon <= slon and plat >= slat:
C_deg = -np.abs(np.rad2deg(C_rad))
elif plon >= slon and plat <= slat:
C_deg = -np.abs(np.rad2deg(C_rad))
elif plon <= slon and plat <= slat:
C_deg = np.abs(np.rad2deg(C_rad))
print(C_deg)
ellipse(map_axis, plon, plat, dp_km, dm_km, C_deg)
if legend == True:
plt.legend(loc=2) | python | def plot_pole_dp_dm(map_axis, plon, plat, slon, slat, dp, dm, pole_label='pole', site_label='site',
pole_color='k', pole_edgecolor='k', pole_marker='o',
site_color='r', site_edgecolor='r', site_marker='s',
markersize=20, legend=True):
"""
This function plots a paleomagnetic pole and a dp/dm confidence ellipse on a cartopy map axis.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Example
-------
>>> dec = 280
>>> inc = 45
>>> a95 = 5
>>> site_lat = 45
>>> site_lon = -100
>>> pole = pmag.dia_vgp(dec, inc, a95, site_lat, site_lon)
>>> pole_lon = pole[0]
>>> pole_lat = pole[1]
>>> dp = pole[2]
>>> dm = pole[3]
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200,central_latitude=30)
>>> ipmag.plot_pole_dp_dm(map_axis,pole_lon,pole_lat,site_lon,site_lat,dp,dm)
Required Parameters
-----------
map_axis : the name of the current map axis that has been developed using cartopy
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
slon : the longitude of the site (in degrees E)
slat : the latitude of the site (in degrees)
dp : the semi-minor axis of the confidence ellipse (in degrees)
dm : the semi-major axis of the confidence ellipse (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
pole_color : the default color is black. Other colors can be chosen (e.g. 'g')
site_color : the default color is red. Other colors can be chosen (e.g. 'g')
pole_marker : the default is a circle. Other symbols can be chosen (e.g. 's')
site_marker : the default is a square. Other symbols can be chosen (e.g. '^')
markersize : the default is 20. Other size can be chosen
pole_label : string that labels the pole.
site_label : string that labels the site
legend : the default is a legend (True). Putting False will suppress legend plotting.
"""
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.plot_pole_dp_dm')
return
dp_km = dp*111.32
dm_km = dm*111.32
map_axis.scatter(plon, plat, marker=pole_marker,
color=pole_color, edgecolors=pole_edgecolor, s=markersize,
label=pole_label, zorder=101, transform=ccrs.Geodetic())
map_axis.scatter(slon, slat, marker=site_marker,
color=site_color, edgecolors=site_edgecolor, s=markersize,
label=site_label, zorder=101, transform=ccrs.Geodetic())
# the orientation of the ellipse needs to be determined using the
# two laws of cosines for spherical triangles where the triangle is
# A: site, B: north pole, C: paleomagnetic pole (see Fig. A.2 of Butler)
site_lon_rad = np.deg2rad(slon)
site_lat_rad = np.deg2rad(slat)
c_rad = np.deg2rad(90-slat)
pole_lon_rad = np.deg2rad(plon)
pole_lat_rad = np.deg2rad(plat)
a_rad = np.deg2rad(90-plat)
B_rad = np.abs(pole_lon_rad-site_lon_rad)
cos_b = np.cos(c_rad)*np.cos(a_rad) + np.sin(c_rad) * \
np.sin(a_rad)*np.cos(B_rad)
b_rad = np.arccos(cos_b)
sin_C = (np.sin(B_rad)/np.sin(b_rad))*np.sin(c_rad)
C_rad = np.arcsin(sin_C)
# need to make the rotation of the ellipse go the right way
if slon-plon > 180:
if plon >= slon and plat >= slat:
C_deg = -np.abs(np.rad2deg(C_rad))
elif plon <= slon and plat >= slat:
C_deg = np.abs(np.rad2deg(C_rad))
elif plon >= slon and plat <= slat:
C_deg = np.abs(np.rad2deg(C_rad))
elif plon <= slon and plat <= slat:
C_deg = -np.abs(np.rad2deg(C_rad))
elif slon-plon <= 180:
if plon >= slon and plat >= slat:
C_deg = np.abs(np.rad2deg(C_rad))
elif plon <= slon and plat >= slat:
C_deg = -np.abs(np.rad2deg(C_rad))
elif plon >= slon and plat <= slat:
C_deg = -np.abs(np.rad2deg(C_rad))
elif plon <= slon and plat <= slat:
C_deg = np.abs(np.rad2deg(C_rad))
print(C_deg)
ellipse(map_axis, plon, plat, dp_km, dm_km, C_deg)
if legend == True:
plt.legend(loc=2) | This function plots a paleomagnetic pole and a dp/dm confidence ellipse on a cartopy map axis.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Example
-------
>>> dec = 280
>>> inc = 45
>>> a95 = 5
>>> site_lat = 45
>>> site_lon = -100
>>> pole = pmag.dia_vgp(dec, inc, a95, site_lat, site_lon)
>>> pole_lon = pole[0]
>>> pole_lat = pole[1]
>>> dp = pole[2]
>>> dm = pole[3]
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200,central_latitude=30)
>>> ipmag.plot_pole_dp_dm(map_axis,pole_lon,pole_lat,site_lon,site_lat,dp,dm)
Required Parameters
-----------
map_axis : the name of the current map axis that has been developed using cartopy
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
slon : the longitude of the site (in degrees E)
slat : the latitude of the site (in degrees)
dp : the semi-minor axis of the confidence ellipse (in degrees)
dm : the semi-major axis of the confidence ellipse (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
pole_color : the default color is black. Other colors can be chosen (e.g. 'g')
site_color : the default color is red. Other colors can be chosen (e.g. 'g')
pole_marker : the default is a circle. Other symbols can be chosen (e.g. 's')
site_marker : the default is a square. Other symbols can be chosen (e.g. '^')
markersize : the default is 20. Other size can be chosen
pole_label : string that labels the pole.
site_label : string that labels the site
legend : the default is a legend (True). Putting False will suppress legend plotting. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L1934-L2037 |
PmagPy/PmagPy | pmagpy/ipmag.py | plot_poles_colorbar | def plot_poles_colorbar(map_axis, plons, plats, A95s, colorvalues, vmin, vmax,
colormap='viridis', edgecolor='k', marker='o', markersize='20',
alpha=1.0, colorbar=True, colorbar_label='pole age (Ma)'):
"""
This function plots multiple paleomagnetic pole and A95 error ellipse on a cartopy map axis.
The poles are colored by the defined colormap.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Example
-------
>>> plons = [200, 180, 210]
>>> plats = [60, 40, 35]
>>> A95s = [6, 3, 10]
>>> ages = [100,200,300]
>>> vmin = 0
>>> vmax = 300
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200, central_latitude=30)
>>> ipmag.plot_poles_colorbar(map_axis, plons, plats, A95s, ages, vmin, vmax)
Required Parameters
-----------
map_axis : the name of the current map axis that has been developed using cartopy
plons : the longitude of the paleomagnetic pole being plotted (in degrees E)
plats : the latitude of the paleomagnetic pole being plotted (in degrees)
A95s : the A_95 confidence ellipse of the paleomagnetic pole (in degrees)
colorvalues : what attribute is being used to determine the colors
vmin : what is the minimum range for the colormap
vmax : what is the maximum range for the colormap
Optional Parameters (defaults are used if not specified)
-----------
colormap : the colormap used (default is 'viridis'; others should be put as a string with quotes, e.g. 'plasma')
edgecolor : the color desired for the symbol outline
marker : the marker shape desired for the pole mean symbol (default is 'o' aka a circle)
colorbar : the default is to include a colorbar (True). Putting False will make it so no legend is plotted.
colorbar_label : label for the colorbar
"""
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.plot_poles_colorbar')
return
color_mapping = plt.cm.ScalarMappable(cmap=colormap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
colors = color_mapping.to_rgba(colorvalues).tolist()
plot_poles(map_axis, plons, plats, A95s,
label='', color=colors, edgecolor=edgecolor, marker=marker)
if colorbar == True:
sm = plt.cm.ScalarMappable(
cmap=colormap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
sm._A = []
plt.colorbar(sm, orientation='horizontal', shrink=0.8,
pad=0.05, label=colorbar_label) | python | def plot_poles_colorbar(map_axis, plons, plats, A95s, colorvalues, vmin, vmax,
colormap='viridis', edgecolor='k', marker='o', markersize='20',
alpha=1.0, colorbar=True, colorbar_label='pole age (Ma)'):
"""
This function plots multiple paleomagnetic pole and A95 error ellipse on a cartopy map axis.
The poles are colored by the defined colormap.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Example
-------
>>> plons = [200, 180, 210]
>>> plats = [60, 40, 35]
>>> A95s = [6, 3, 10]
>>> ages = [100,200,300]
>>> vmin = 0
>>> vmax = 300
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200, central_latitude=30)
>>> ipmag.plot_poles_colorbar(map_axis, plons, plats, A95s, ages, vmin, vmax)
Required Parameters
-----------
map_axis : the name of the current map axis that has been developed using cartopy
plons : the longitude of the paleomagnetic pole being plotted (in degrees E)
plats : the latitude of the paleomagnetic pole being plotted (in degrees)
A95s : the A_95 confidence ellipse of the paleomagnetic pole (in degrees)
colorvalues : what attribute is being used to determine the colors
vmin : what is the minimum range for the colormap
vmax : what is the maximum range for the colormap
Optional Parameters (defaults are used if not specified)
-----------
colormap : the colormap used (default is 'viridis'; others should be put as a string with quotes, e.g. 'plasma')
edgecolor : the color desired for the symbol outline
marker : the marker shape desired for the pole mean symbol (default is 'o' aka a circle)
colorbar : the default is to include a colorbar (True). Putting False will make it so no legend is plotted.
colorbar_label : label for the colorbar
"""
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.plot_poles_colorbar')
return
color_mapping = plt.cm.ScalarMappable(cmap=colormap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
colors = color_mapping.to_rgba(colorvalues).tolist()
plot_poles(map_axis, plons, plats, A95s,
label='', color=colors, edgecolor=edgecolor, marker=marker)
if colorbar == True:
sm = plt.cm.ScalarMappable(
cmap=colormap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
sm._A = []
plt.colorbar(sm, orientation='horizontal', shrink=0.8,
pad=0.05, label=colorbar_label) | This function plots multiple paleomagnetic pole and A95 error ellipse on a cartopy map axis.
The poles are colored by the defined colormap.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Example
-------
>>> plons = [200, 180, 210]
>>> plats = [60, 40, 35]
>>> A95s = [6, 3, 10]
>>> ages = [100,200,300]
>>> vmin = 0
>>> vmax = 300
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200, central_latitude=30)
>>> ipmag.plot_poles_colorbar(map_axis, plons, plats, A95s, ages, vmin, vmax)
Required Parameters
-----------
map_axis : the name of the current map axis that has been developed using cartopy
plons : the longitude of the paleomagnetic pole being plotted (in degrees E)
plats : the latitude of the paleomagnetic pole being plotted (in degrees)
A95s : the A_95 confidence ellipse of the paleomagnetic pole (in degrees)
colorvalues : what attribute is being used to determine the colors
vmin : what is the minimum range for the colormap
vmax : what is the maximum range for the colormap
Optional Parameters (defaults are used if not specified)
-----------
colormap : the colormap used (default is 'viridis'; others should be put as a string with quotes, e.g. 'plasma')
edgecolor : the color desired for the symbol outline
marker : the marker shape desired for the pole mean symbol (default is 'o' aka a circle)
colorbar : the default is to include a colorbar (True). Putting False will make it so no legend is plotted.
colorbar_label : label for the colorbar | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L2040-L2095 |
PmagPy/PmagPy | pmagpy/ipmag.py | plot_vgp | def plot_vgp(map_axis, vgp_lon=None, vgp_lat=None, di_block=None, label='', color='k', marker='o',
edge='black', markersize=20, legend=False):
"""
This function plots a paleomagnetic pole position on a cartopy map axis.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Example
-------
>>> vgps = ipmag.fishrot(dec=200,inc=30)
>>> vgp_lon_list,vgp_lat_list,intensities= ipmag.unpack_di_block(vgps)
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200,central_latitude=30)
>>> ipmag.plot_vgp(map_axis,vgp_lon=vgp_lon_list,vgp_lat=vgp_lat_list,color='red',markersize=40)
Required Parameters
-----------
map_axis : the name of the current map axis that has been developed using cartopy
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
color : the color desired for the symbol (default is 'k' aka black)
marker : the marker shape desired for the pole mean symbol (default is 'o' aka a circle)
edge : the color of the edge of the marker (default is black)
markersize : size of the marker in pt (default is 20)
label : the default is no label. Labels can be assigned.
legend : the default is no legend (False). Putting True will plot a legend.
"""
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.plot_vgp')
return
if di_block != None:
di_lists = unpack_di_block(di_block)
if len(di_lists) == 3:
vgp_lon, vgp_lat, intensity = di_lists
if len(di_lists) == 2:
vgp_lon, vgp_lat = di_lists
map_axis.scatter(vgp_lon, vgp_lat, marker=marker, edgecolors=[edge],
s=markersize, color=color, label=label, zorder=100, transform=ccrs.Geodetic())
map_axis.set_global()
if legend == True:
plt.legend(loc=2) | python | def plot_vgp(map_axis, vgp_lon=None, vgp_lat=None, di_block=None, label='', color='k', marker='o',
edge='black', markersize=20, legend=False):
"""
This function plots a paleomagnetic pole position on a cartopy map axis.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Example
-------
>>> vgps = ipmag.fishrot(dec=200,inc=30)
>>> vgp_lon_list,vgp_lat_list,intensities= ipmag.unpack_di_block(vgps)
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200,central_latitude=30)
>>> ipmag.plot_vgp(map_axis,vgp_lon=vgp_lon_list,vgp_lat=vgp_lat_list,color='red',markersize=40)
Required Parameters
-----------
map_axis : the name of the current map axis that has been developed using cartopy
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
color : the color desired for the symbol (default is 'k' aka black)
marker : the marker shape desired for the pole mean symbol (default is 'o' aka a circle)
edge : the color of the edge of the marker (default is black)
markersize : size of the marker in pt (default is 20)
label : the default is no label. Labels can be assigned.
legend : the default is no legend (False). Putting True will plot a legend.
"""
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.plot_vgp')
return
if di_block != None:
di_lists = unpack_di_block(di_block)
if len(di_lists) == 3:
vgp_lon, vgp_lat, intensity = di_lists
if len(di_lists) == 2:
vgp_lon, vgp_lat = di_lists
map_axis.scatter(vgp_lon, vgp_lat, marker=marker, edgecolors=[edge],
s=markersize, color=color, label=label, zorder=100, transform=ccrs.Geodetic())
map_axis.set_global()
if legend == True:
plt.legend(loc=2) | This function plots a paleomagnetic pole position on a cartopy map axis.
Before this function is called, a plot needs to be initialized with code
such as that in the make_orthographic_map function.
Example
-------
>>> vgps = ipmag.fishrot(dec=200,inc=30)
>>> vgp_lon_list,vgp_lat_list,intensities= ipmag.unpack_di_block(vgps)
>>> map_axis = ipmag.make_orthographic_map(central_longitude=200,central_latitude=30)
>>> ipmag.plot_vgp(map_axis,vgp_lon=vgp_lon_list,vgp_lat=vgp_lat_list,color='red',markersize=40)
Required Parameters
-----------
map_axis : the name of the current map axis that has been developed using cartopy
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
color : the color desired for the symbol (default is 'k' aka black)
marker : the marker shape desired for the pole mean symbol (default is 'o' aka a circle)
edge : the color of the edge of the marker (default is black)
markersize : size of the marker in pt (default is 20)
label : the default is no label. Labels can be assigned.
legend : the default is no legend (False). Putting True will plot a legend. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L2098-L2141 |
PmagPy/PmagPy | pmagpy/ipmag.py | plot_vgp_basemap | def plot_vgp_basemap(mapname, vgp_lon=None, vgp_lat=None, di_block=None, label='', color='k', marker='o', markersize=20, legend='no'):
"""
This function plots a paleomagnetic pole on whatever current map projection
has been set using the basemap plotting library.
Before this function is called, a plot needs to be initialized with code
that looks something like:
>from mpl_toolkits.basemap import Basemap
>mapname = Basemap(projection='ortho',lat_0=35,lon_0=200)
>plt.figure(figsize=(6, 6))
>mapname.drawcoastlines(linewidth=0.25)
>mapname.fillcontinents(color='bisque',lake_color='white',zorder=1)
>mapname.drawmapboundary(fill_color='white')
>mapname.drawmeridians(np.arange(0,360,30))
>mapname.drawparallels(np.arange(-90,90,30))
Required Parameters
-----------
mapname : the name of the current map that has been developed using basemap
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
color : the color desired for the symbol and its A95 ellipse (default is 'k' aka black)
marker : the marker shape desired for the pole mean symbol (default is 'o' aka a circle)
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
"""
if di_block != None:
di_lists = unpack_di_block(di_block)
if len(di_lists) == 3:
vgp_lon, vgp_lat, intensity = di_lists
if len(di_lists) == 2:
vgp_lon, vgp_lat = di_lists
centerlon, centerlat = mapname(vgp_lon, vgp_lat)
mapname.scatter(centerlon, centerlat, marker=marker,
s=markersize, color=color, label=label, zorder=100)
if legend == 'yes':
plt.legend(loc=2) | python | def plot_vgp_basemap(mapname, vgp_lon=None, vgp_lat=None, di_block=None, label='', color='k', marker='o', markersize=20, legend='no'):
"""
This function plots a paleomagnetic pole on whatever current map projection
has been set using the basemap plotting library.
Before this function is called, a plot needs to be initialized with code
that looks something like:
>from mpl_toolkits.basemap import Basemap
>mapname = Basemap(projection='ortho',lat_0=35,lon_0=200)
>plt.figure(figsize=(6, 6))
>mapname.drawcoastlines(linewidth=0.25)
>mapname.fillcontinents(color='bisque',lake_color='white',zorder=1)
>mapname.drawmapboundary(fill_color='white')
>mapname.drawmeridians(np.arange(0,360,30))
>mapname.drawparallels(np.arange(-90,90,30))
Required Parameters
-----------
mapname : the name of the current map that has been developed using basemap
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
color : the color desired for the symbol and its A95 ellipse (default is 'k' aka black)
marker : the marker shape desired for the pole mean symbol (default is 'o' aka a circle)
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend.
"""
if di_block != None:
di_lists = unpack_di_block(di_block)
if len(di_lists) == 3:
vgp_lon, vgp_lat, intensity = di_lists
if len(di_lists) == 2:
vgp_lon, vgp_lat = di_lists
centerlon, centerlat = mapname(vgp_lon, vgp_lat)
mapname.scatter(centerlon, centerlat, marker=marker,
s=markersize, color=color, label=label, zorder=100)
if legend == 'yes':
plt.legend(loc=2) | This function plots a paleomagnetic pole on whatever current map projection
has been set using the basemap plotting library.
Before this function is called, a plot needs to be initialized with code
that looks something like:
>from mpl_toolkits.basemap import Basemap
>mapname = Basemap(projection='ortho',lat_0=35,lon_0=200)
>plt.figure(figsize=(6, 6))
>mapname.drawcoastlines(linewidth=0.25)
>mapname.fillcontinents(color='bisque',lake_color='white',zorder=1)
>mapname.drawmapboundary(fill_color='white')
>mapname.drawmeridians(np.arange(0,360,30))
>mapname.drawparallels(np.arange(-90,90,30))
Required Parameters
-----------
mapname : the name of the current map that has been developed using basemap
plon : the longitude of the paleomagnetic pole being plotted (in degrees E)
plat : the latitude of the paleomagnetic pole being plotted (in degrees)
Optional Parameters (defaults are used if not specified)
-----------
color : the color desired for the symbol and its A95 ellipse (default is 'k' aka black)
marker : the marker shape desired for the pole mean symbol (default is 'o' aka a circle)
label : the default is no label. Labels can be assigned.
legend : the default is no legend ('no'). Putting 'yes' will plot a legend. | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L2144-L2180 |
PmagPy/PmagPy | pmagpy/ipmag.py | vgp_calc | def vgp_calc(dataframe, tilt_correction='yes', site_lon='site_lon', site_lat='site_lat', dec_is='dec_is', inc_is='inc_is', dec_tc='dec_tc', inc_tc='inc_tc'):
"""
This function calculates paleomagnetic poles using directional data and site
location data within a pandas.DataFrame. The function adds the columns
'paleolatitude', 'vgp_lat', 'vgp_lon', 'vgp_lat_rev', and 'vgp_lon_rev'
to the dataframe. The '_rev' columns allow for subsequent choice as to which
polarity will be used for the VGPs.
Parameters
-----------
dataframe : the name of the pandas.DataFrame containing the data
tilt-correction : 'yes' is the default and uses tilt-corrected data (dec_tc, inc_tc), 'no' uses data that is not tilt-corrected and is in geographic coordinates
dataframe['site_lat'] : the name of the Dataframe column containing the latitude of the site
dataframe['site_lon'] : the name of the Dataframe column containing the longitude of the site
dataframe['inc_tc'] : the name of the Dataframe column containing the tilt-corrected inclination (used by default tilt-correction='yes')
dataframe['dec_tc'] : the name of the Dataframe column containing the tilt-corrected declination (used by default tilt-correction='yes')
dataframe['inc_is'] : the name of the Dataframe column containing the insitu inclination (used when tilt-correction='no')
dataframe['dec_is'] : the name of the Dataframe column containing the insitu declination (used when tilt-correction='no')
Returns
-------
dataframe['paleolatitude']
dataframe['colatitude']
dataframe['vgp_lat']
dataframe['vgp_lon']
dataframe['vgp_lat_rev']
dataframe['vgp_lon_rev']
"""
dataframe.is_copy = False
if tilt_correction == 'yes':
# calculate the paleolatitude/colatitude
dataframe['paleolatitude'] = np.degrees(
np.arctan(0.5 * np.tan(np.radians(dataframe[inc_tc]))))
dataframe['colatitude'] = 90 - dataframe['paleolatitude']
# calculate the latitude of the pole
dataframe['vgp_lat'] = np.degrees(np.arcsin(np.sin(np.radians(dataframe[site_lat])) *
np.cos(np.radians(dataframe['colatitude'])) +
np.cos(np.radians(dataframe[site_lat])) *
np.sin(np.radians(dataframe['colatitude'])) *
np.cos(np.radians(dataframe[dec_tc]))))
# calculate the longitudinal difference between the pole and the site
# (beta)
dataframe['beta'] = np.degrees(np.arcsin(old_div((np.sin(np.radians(dataframe['colatitude'])) *
np.sin(np.radians(dataframe[dec_tc]))),
(np.cos(np.radians(dataframe['vgp_lat']))))))
# generate a boolean array (mask) to use to distinguish between the two possibilities for pole longitude
# and then calculate pole longitude using the site location and
# calculated beta
mask = np.cos(np.radians(dataframe['colatitude'])) > np.sin(
np.radians(dataframe[site_lat])) * np.sin(np.radians(dataframe['vgp_lat']))
dataframe['vgp_lon'] = np.where(mask, (dataframe[site_lon] + dataframe['beta']) %
360., (dataframe[site_lon] + 180 - dataframe['beta']) % 360.)
# calculate the antipode of the poles
dataframe['vgp_lat_rev'] = -dataframe['vgp_lat']
dataframe['vgp_lon_rev'] = (dataframe['vgp_lon'] - 180.) % 360.
# the 'colatitude' and 'beta' columns were created for the purposes of the pole calculations
# but aren't of further use and are deleted
del dataframe['colatitude']
del dataframe['beta']
if tilt_correction == 'no':
# calculate the paleolatitude/colatitude
dataframe['paleolatitude'] = np.degrees(
np.arctan(0.5 * np.tan(np.radians(dataframe[inc_is]))))
dataframe['colatitude'] = 90 - dataframe['paleolatitude']
# calculate the latitude of the pole
dataframe['vgp_lat'] = np.degrees(np.arcsin(np.sin(np.radians(dataframe[site_lat])) *
np.cos(np.radians(dataframe['colatitude'])) +
np.cos(np.radians(dataframe[site_lat])) *
np.sin(np.radians(dataframe['colatitude'])) *
np.cos(np.radians(dataframe[dec_is]))))
# calculate the longitudinal difference between the pole and the site
# (beta)
dataframe['beta'] = np.degrees(np.arcsin(old_div((np.sin(np.radians(dataframe['colatitude'])) *
np.sin(np.radians(dataframe[dec_is]))),
(np.cos(np.radians(dataframe['vgp_lat']))))))
# generate a boolean array (mask) to use to distinguish between the two possibilities for pole longitude
# and then calculate pole longitude using the site location and
# calculated beta
mask = np.cos(np.radians(dataframe['colatitude'])) > np.sin(
np.radians(dataframe[site_lat])) * np.sin(np.radians(dataframe['vgp_lat']))
dataframe['vgp_lon'] = np.where(mask, (dataframe[site_lon] + dataframe['beta']) %
360., (dataframe[site_lon] + 180 - dataframe['beta']) % 360.)
# calculate the antipode of the poles
dataframe['vgp_lat_rev'] = -dataframe['vgp_lat']
dataframe['vgp_lon_rev'] = (dataframe['vgp_lon'] - 180.) % 360.
# the 'colatitude' and 'beta' columns were created for the purposes of the pole calculations
# but aren't of further use and are deleted
del dataframe['colatitude']
del dataframe['beta']
return(dataframe) | python | def vgp_calc(dataframe, tilt_correction='yes', site_lon='site_lon', site_lat='site_lat', dec_is='dec_is', inc_is='inc_is', dec_tc='dec_tc', inc_tc='inc_tc'):
"""
This function calculates paleomagnetic poles using directional data and site
location data within a pandas.DataFrame. The function adds the columns
'paleolatitude', 'vgp_lat', 'vgp_lon', 'vgp_lat_rev', and 'vgp_lon_rev'
to the dataframe. The '_rev' columns allow for subsequent choice as to which
polarity will be used for the VGPs.
Parameters
-----------
dataframe : the name of the pandas.DataFrame containing the data
tilt-correction : 'yes' is the default and uses tilt-corrected data (dec_tc, inc_tc), 'no' uses data that is not tilt-corrected and is in geographic coordinates
dataframe['site_lat'] : the name of the Dataframe column containing the latitude of the site
dataframe['site_lon'] : the name of the Dataframe column containing the longitude of the site
dataframe['inc_tc'] : the name of the Dataframe column containing the tilt-corrected inclination (used by default tilt-correction='yes')
dataframe['dec_tc'] : the name of the Dataframe column containing the tilt-corrected declination (used by default tilt-correction='yes')
dataframe['inc_is'] : the name of the Dataframe column containing the insitu inclination (used when tilt-correction='no')
dataframe['dec_is'] : the name of the Dataframe column containing the insitu declination (used when tilt-correction='no')
Returns
-------
dataframe['paleolatitude']
dataframe['colatitude']
dataframe['vgp_lat']
dataframe['vgp_lon']
dataframe['vgp_lat_rev']
dataframe['vgp_lon_rev']
"""
dataframe.is_copy = False
if tilt_correction == 'yes':
# calculate the paleolatitude/colatitude
dataframe['paleolatitude'] = np.degrees(
np.arctan(0.5 * np.tan(np.radians(dataframe[inc_tc]))))
dataframe['colatitude'] = 90 - dataframe['paleolatitude']
# calculate the latitude of the pole
dataframe['vgp_lat'] = np.degrees(np.arcsin(np.sin(np.radians(dataframe[site_lat])) *
np.cos(np.radians(dataframe['colatitude'])) +
np.cos(np.radians(dataframe[site_lat])) *
np.sin(np.radians(dataframe['colatitude'])) *
np.cos(np.radians(dataframe[dec_tc]))))
# calculate the longitudinal difference between the pole and the site
# (beta)
dataframe['beta'] = np.degrees(np.arcsin(old_div((np.sin(np.radians(dataframe['colatitude'])) *
np.sin(np.radians(dataframe[dec_tc]))),
(np.cos(np.radians(dataframe['vgp_lat']))))))
# generate a boolean array (mask) to use to distinguish between the two possibilities for pole longitude
# and then calculate pole longitude using the site location and
# calculated beta
mask = np.cos(np.radians(dataframe['colatitude'])) > np.sin(
np.radians(dataframe[site_lat])) * np.sin(np.radians(dataframe['vgp_lat']))
dataframe['vgp_lon'] = np.where(mask, (dataframe[site_lon] + dataframe['beta']) %
360., (dataframe[site_lon] + 180 - dataframe['beta']) % 360.)
# calculate the antipode of the poles
dataframe['vgp_lat_rev'] = -dataframe['vgp_lat']
dataframe['vgp_lon_rev'] = (dataframe['vgp_lon'] - 180.) % 360.
# the 'colatitude' and 'beta' columns were created for the purposes of the pole calculations
# but aren't of further use and are deleted
del dataframe['colatitude']
del dataframe['beta']
if tilt_correction == 'no':
# calculate the paleolatitude/colatitude
dataframe['paleolatitude'] = np.degrees(
np.arctan(0.5 * np.tan(np.radians(dataframe[inc_is]))))
dataframe['colatitude'] = 90 - dataframe['paleolatitude']
# calculate the latitude of the pole
dataframe['vgp_lat'] = np.degrees(np.arcsin(np.sin(np.radians(dataframe[site_lat])) *
np.cos(np.radians(dataframe['colatitude'])) +
np.cos(np.radians(dataframe[site_lat])) *
np.sin(np.radians(dataframe['colatitude'])) *
np.cos(np.radians(dataframe[dec_is]))))
# calculate the longitudinal difference between the pole and the site
# (beta)
dataframe['beta'] = np.degrees(np.arcsin(old_div((np.sin(np.radians(dataframe['colatitude'])) *
np.sin(np.radians(dataframe[dec_is]))),
(np.cos(np.radians(dataframe['vgp_lat']))))))
# generate a boolean array (mask) to use to distinguish between the two possibilities for pole longitude
# and then calculate pole longitude using the site location and
# calculated beta
mask = np.cos(np.radians(dataframe['colatitude'])) > np.sin(
np.radians(dataframe[site_lat])) * np.sin(np.radians(dataframe['vgp_lat']))
dataframe['vgp_lon'] = np.where(mask, (dataframe[site_lon] + dataframe['beta']) %
360., (dataframe[site_lon] + 180 - dataframe['beta']) % 360.)
# calculate the antipode of the poles
dataframe['vgp_lat_rev'] = -dataframe['vgp_lat']
dataframe['vgp_lon_rev'] = (dataframe['vgp_lon'] - 180.) % 360.
# the 'colatitude' and 'beta' columns were created for the purposes of the pole calculations
# but aren't of further use and are deleted
del dataframe['colatitude']
del dataframe['beta']
return(dataframe) | This function calculates paleomagnetic poles using directional data and site
location data within a pandas.DataFrame. The function adds the columns
'paleolatitude', 'vgp_lat', 'vgp_lon', 'vgp_lat_rev', and 'vgp_lon_rev'
to the dataframe. The '_rev' columns allow for subsequent choice as to which
polarity will be used for the VGPs.
Parameters
-----------
dataframe : the name of the pandas.DataFrame containing the data
tilt-correction : 'yes' is the default and uses tilt-corrected data (dec_tc, inc_tc), 'no' uses data that is not tilt-corrected and is in geographic coordinates
dataframe['site_lat'] : the name of the Dataframe column containing the latitude of the site
dataframe['site_lon'] : the name of the Dataframe column containing the longitude of the site
dataframe['inc_tc'] : the name of the Dataframe column containing the tilt-corrected inclination (used by default tilt-correction='yes')
dataframe['dec_tc'] : the name of the Dataframe column containing the tilt-corrected declination (used by default tilt-correction='yes')
dataframe['inc_is'] : the name of the Dataframe column containing the insitu inclination (used when tilt-correction='no')
dataframe['dec_is'] : the name of the Dataframe column containing the insitu declination (used when tilt-correction='no')
Returns
-------
dataframe['paleolatitude']
dataframe['colatitude']
dataframe['vgp_lat']
dataframe['vgp_lon']
dataframe['vgp_lat_rev']
dataframe['vgp_lon_rev'] | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L2183-L2272 |
PmagPy/PmagPy | pmagpy/ipmag.py | sb_vgp_calc | def sb_vgp_calc(dataframe, site_correction='yes', dec_tc='dec_tc', inc_tc='inc_tc'):
"""
This function calculates the angular dispersion of VGPs and corrects
for within site dispersion (unless site_correction = 'no') to return
a value S_b. The input data needs to be within a pandas Dataframe.
Parameters
-----------
dataframe : the name of the pandas.DataFrame containing the data
the data frame needs to contain these columns:
dataframe['site_lat'] : latitude of the site
dataframe['site_lon'] : longitude of the site
dataframe['k'] : fisher precision parameter for directions
dataframe['vgp_lat'] : VGP latitude
dataframe['vgp_lon'] : VGP longitude
----- the following default parameters can be changes by keyword argument -----
dataframe['inc_tc'] : tilt-corrected inclination
dataframe['dec_tc'] : tilt-corrected declination
plot : default is 'no', will make a plot of poles if 'yes'
"""
# calculate the mean from the directional data
dataframe_dirs = []
for n in range(0, len(dataframe)):
dataframe_dirs.append([dataframe[dec_tc][n],
dataframe[inc_tc][n], 1.])
dataframe_dir_mean = pmag.fisher_mean(dataframe_dirs)
# calculate the mean from the vgp data
dataframe_poles = []
dataframe_pole_lats = []
dataframe_pole_lons = []
for n in range(0, len(dataframe)):
dataframe_poles.append([dataframe['vgp_lon'][n],
dataframe['vgp_lat'][n], 1.])
dataframe_pole_lats.append(dataframe['vgp_lat'][n])
dataframe_pole_lons.append(dataframe['vgp_lon'][n])
dataframe_pole_mean = pmag.fisher_mean(dataframe_poles)
# calculate mean paleolatitude from the directional data
dataframe['paleolatitude'] = lat_from_inc(dataframe_dir_mean['inc'])
angle_list = []
for n in range(0, len(dataframe)):
angle = pmag.angle([dataframe['vgp_lon'][n], dataframe['vgp_lat'][n]],
[dataframe_pole_mean['dec'], dataframe_pole_mean['inc']])
angle_list.append(angle[0])
dataframe['delta_mean_pole'] = angle_list
if site_correction == 'yes':
# use eq. 2 of Cox (1970) to translate the directional precision parameter
# into pole coordinates using the assumption of a Fisherian distribution in
# directional coordinates and the paleolatitude as calculated from mean
# inclination using the dipole equation
dataframe['K'] = old_div(dataframe['k'], (0.125 * (5 + 18 * np.sin(np.deg2rad(dataframe['paleolatitude']))**2
+ 9 * np.sin(np.deg2rad(dataframe['paleolatitude']))**4)))
dataframe['Sw'] = old_div(81, (dataframe['K']**0.5))
summation = 0
N = 0
for n in range(0, len(dataframe)):
quantity = dataframe['delta_mean_pole'][n]**2 - \
old_div(dataframe['Sw'][n]**2, dataframe['n'][n])
summation += quantity
N += 1
Sb = ((old_div(1.0, (N - 1.0))) * summation)**0.5
if site_correction == 'no':
summation = 0
N = 0
for n in range(0, len(dataframe)):
quantity = dataframe['delta_mean_pole'][n]**2
summation += quantity
N += 1
Sb = ((old_div(1.0, (N - 1.0))) * summation)**0.5
return Sb | python | def sb_vgp_calc(dataframe, site_correction='yes', dec_tc='dec_tc', inc_tc='inc_tc'):
"""
This function calculates the angular dispersion of VGPs and corrects
for within site dispersion (unless site_correction = 'no') to return
a value S_b. The input data needs to be within a pandas Dataframe.
Parameters
-----------
dataframe : the name of the pandas.DataFrame containing the data
the data frame needs to contain these columns:
dataframe['site_lat'] : latitude of the site
dataframe['site_lon'] : longitude of the site
dataframe['k'] : fisher precision parameter for directions
dataframe['vgp_lat'] : VGP latitude
dataframe['vgp_lon'] : VGP longitude
----- the following default parameters can be changes by keyword argument -----
dataframe['inc_tc'] : tilt-corrected inclination
dataframe['dec_tc'] : tilt-corrected declination
plot : default is 'no', will make a plot of poles if 'yes'
"""
# calculate the mean from the directional data
dataframe_dirs = []
for n in range(0, len(dataframe)):
dataframe_dirs.append([dataframe[dec_tc][n],
dataframe[inc_tc][n], 1.])
dataframe_dir_mean = pmag.fisher_mean(dataframe_dirs)
# calculate the mean from the vgp data
dataframe_poles = []
dataframe_pole_lats = []
dataframe_pole_lons = []
for n in range(0, len(dataframe)):
dataframe_poles.append([dataframe['vgp_lon'][n],
dataframe['vgp_lat'][n], 1.])
dataframe_pole_lats.append(dataframe['vgp_lat'][n])
dataframe_pole_lons.append(dataframe['vgp_lon'][n])
dataframe_pole_mean = pmag.fisher_mean(dataframe_poles)
# calculate mean paleolatitude from the directional data
dataframe['paleolatitude'] = lat_from_inc(dataframe_dir_mean['inc'])
angle_list = []
for n in range(0, len(dataframe)):
angle = pmag.angle([dataframe['vgp_lon'][n], dataframe['vgp_lat'][n]],
[dataframe_pole_mean['dec'], dataframe_pole_mean['inc']])
angle_list.append(angle[0])
dataframe['delta_mean_pole'] = angle_list
if site_correction == 'yes':
# use eq. 2 of Cox (1970) to translate the directional precision parameter
# into pole coordinates using the assumption of a Fisherian distribution in
# directional coordinates and the paleolatitude as calculated from mean
# inclination using the dipole equation
dataframe['K'] = old_div(dataframe['k'], (0.125 * (5 + 18 * np.sin(np.deg2rad(dataframe['paleolatitude']))**2
+ 9 * np.sin(np.deg2rad(dataframe['paleolatitude']))**4)))
dataframe['Sw'] = old_div(81, (dataframe['K']**0.5))
summation = 0
N = 0
for n in range(0, len(dataframe)):
quantity = dataframe['delta_mean_pole'][n]**2 - \
old_div(dataframe['Sw'][n]**2, dataframe['n'][n])
summation += quantity
N += 1
Sb = ((old_div(1.0, (N - 1.0))) * summation)**0.5
if site_correction == 'no':
summation = 0
N = 0
for n in range(0, len(dataframe)):
quantity = dataframe['delta_mean_pole'][n]**2
summation += quantity
N += 1
Sb = ((old_div(1.0, (N - 1.0))) * summation)**0.5
return Sb | This function calculates the angular dispersion of VGPs and corrects
for within site dispersion (unless site_correction = 'no') to return
a value S_b. The input data needs to be within a pandas Dataframe.
Parameters
-----------
dataframe : the name of the pandas.DataFrame containing the data
the data frame needs to contain these columns:
dataframe['site_lat'] : latitude of the site
dataframe['site_lon'] : longitude of the site
dataframe['k'] : fisher precision parameter for directions
dataframe['vgp_lat'] : VGP latitude
dataframe['vgp_lon'] : VGP longitude
----- the following default parameters can be changes by keyword argument -----
dataframe['inc_tc'] : tilt-corrected inclination
dataframe['dec_tc'] : tilt-corrected declination
plot : default is 'no', will make a plot of poles if 'yes' | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L2275-L2356 |
PmagPy/PmagPy | pmagpy/ipmag.py | make_di_block | def make_di_block(dec, inc):
"""
Some pmag.py and ipmag.py functions require or will take a list of unit
vectors [dec,inc,1.] as input. This function takes declination and
inclination data and make it into such a nest list of lists.
Parameters
-----------
dec : list of declinations
inc : list of inclinations
Returns
-----------
di_block : nested list of declination, inclination lists
Example
-----------
>>> decs = [180.3, 179.2, 177.2]
>>> incs = [12.1, 13.7, 11.9]
>>> ipmag.make_di_block(decs,incs)
[[180.3, 12.1, 1.0], [179.2, 13.7, 1.0], [177.2, 11.9, 1.0]]
"""
di_block = []
for n in range(0, len(dec)):
di_block.append([dec[n], inc[n], 1.0])
return di_block | python | def make_di_block(dec, inc):
"""
Some pmag.py and ipmag.py functions require or will take a list of unit
vectors [dec,inc,1.] as input. This function takes declination and
inclination data and make it into such a nest list of lists.
Parameters
-----------
dec : list of declinations
inc : list of inclinations
Returns
-----------
di_block : nested list of declination, inclination lists
Example
-----------
>>> decs = [180.3, 179.2, 177.2]
>>> incs = [12.1, 13.7, 11.9]
>>> ipmag.make_di_block(decs,incs)
[[180.3, 12.1, 1.0], [179.2, 13.7, 1.0], [177.2, 11.9, 1.0]]
"""
di_block = []
for n in range(0, len(dec)):
di_block.append([dec[n], inc[n], 1.0])
return di_block | Some pmag.py and ipmag.py functions require or will take a list of unit
vectors [dec,inc,1.] as input. This function takes declination and
inclination data and make it into such a nest list of lists.
Parameters
-----------
dec : list of declinations
inc : list of inclinations
Returns
-----------
di_block : nested list of declination, inclination lists
Example
-----------
>>> decs = [180.3, 179.2, 177.2]
>>> incs = [12.1, 13.7, 11.9]
>>> ipmag.make_di_block(decs,incs)
[[180.3, 12.1, 1.0], [179.2, 13.7, 1.0], [177.2, 11.9, 1.0]] | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L2359-L2384 |
PmagPy/PmagPy | pmagpy/ipmag.py | unpack_di_block | def unpack_di_block(di_block):
"""
This function unpacks a nested list of [dec,inc,mag_moment] into a list of
declination values, a list of inclination values and a list of magnetic
moment values. Mag_moment values are optional, while dec and inc values are
required.
Parameters
-----------
di_block : nested list of declination, inclination lists
Returns
-----------
dec : list of declinations
inc : list of inclinations
mag_moment : list of magnetic moment (if present in di_block)
Example
-----------
The di_block nested lists of lists can be unpacked using the function
>>> directions = [[180.3, 12.1, 1.0], [179.2, 13.7, 1.0], [177.2, 11.9, 1.0]]
>>> ipmag.unpack_di_block(directions)
([180.3, 179.2, 177.2], [12.1, 13.7, 11.9], [1.0, 1.0, 1.0])
These unpacked values can be assigned to variables:
>>> dec, inc, moment = ipmag.unpack_di_block(directions)
"""
dec_list = []
inc_list = []
moment_list = []
for n in range(0, len(di_block)):
dec = di_block[n][0]
inc = di_block[n][1]
dec_list.append(dec)
inc_list.append(inc)
if len(di_block[n]) > 2:
moment = di_block[n][2]
moment_list.append(moment)
return dec_list, inc_list, moment_list | python | def unpack_di_block(di_block):
"""
This function unpacks a nested list of [dec,inc,mag_moment] into a list of
declination values, a list of inclination values and a list of magnetic
moment values. Mag_moment values are optional, while dec and inc values are
required.
Parameters
-----------
di_block : nested list of declination, inclination lists
Returns
-----------
dec : list of declinations
inc : list of inclinations
mag_moment : list of magnetic moment (if present in di_block)
Example
-----------
The di_block nested lists of lists can be unpacked using the function
>>> directions = [[180.3, 12.1, 1.0], [179.2, 13.7, 1.0], [177.2, 11.9, 1.0]]
>>> ipmag.unpack_di_block(directions)
([180.3, 179.2, 177.2], [12.1, 13.7, 11.9], [1.0, 1.0, 1.0])
These unpacked values can be assigned to variables:
>>> dec, inc, moment = ipmag.unpack_di_block(directions)
"""
dec_list = []
inc_list = []
moment_list = []
for n in range(0, len(di_block)):
dec = di_block[n][0]
inc = di_block[n][1]
dec_list.append(dec)
inc_list.append(inc)
if len(di_block[n]) > 2:
moment = di_block[n][2]
moment_list.append(moment)
return dec_list, inc_list, moment_list | This function unpacks a nested list of [dec,inc,mag_moment] into a list of
declination values, a list of inclination values and a list of magnetic
moment values. Mag_moment values are optional, while dec and inc values are
required.
Parameters
-----------
di_block : nested list of declination, inclination lists
Returns
-----------
dec : list of declinations
inc : list of inclinations
mag_moment : list of magnetic moment (if present in di_block)
Example
-----------
The di_block nested lists of lists can be unpacked using the function
>>> directions = [[180.3, 12.1, 1.0], [179.2, 13.7, 1.0], [177.2, 11.9, 1.0]]
>>> ipmag.unpack_di_block(directions)
([180.3, 179.2, 177.2], [12.1, 13.7, 11.9], [1.0, 1.0, 1.0])
These unpacked values can be assigned to variables:
>>> dec, inc, moment = ipmag.unpack_di_block(directions) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L2387-L2429 |
PmagPy/PmagPy | pmagpy/ipmag.py | make_diddd_array | def make_diddd_array(dec, inc, dip_direction, dip):
"""
Some pmag.py functions such as the bootstrap fold test require a numpy array
of dec, inc, dip direction, dip [dec, inc, dd, dip] as input. This function
makes such an array.
Parameters
-----------
dec : paleomagnetic declination in degrees
inc : paleomagnetic inclination in degrees
dip_direction : the dip direction of bedding (in degrees between 0 and 360)
dip: dip of bedding (in degrees)
Returns
-------
array : an array of [dec, inc, dip_direction, dip]
Examples
--------
Data in separate lists of dec, inc, dip_direction, dip data can be made into
an array.
>>> dec = [132.5,124.3,142.7,130.3,163.2]
>>> inc = [12.1,23.2,34.2,37.7,32.6]
>>> dip_direction = [265.0,265.0,265.0,164.0,164.0]
>>> dip = [20.0,20.0,20.0,72.0,72.0]
>>> data_array = ipmag.make_diddd_array(dec,inc,dip_direction,dip)
>>> data_array
array([[ 132.5, 12.1, 265. , 20. ],
[ 124.3, 23.2, 265. , 20. ],
[ 142.7, 34.2, 265. , 20. ],
[ 130.3, 37.7, 164. , 72. ],
[ 163.2, 32.6, 164. , 72. ]])
"""
diddd_block = []
for n in range(0, len(dec)):
diddd_block.append([dec[n], inc[n], dip_direction[n], dip[n]])
diddd_array = np.array(diddd_block)
return diddd_array | python | def make_diddd_array(dec, inc, dip_direction, dip):
"""
Some pmag.py functions such as the bootstrap fold test require a numpy array
of dec, inc, dip direction, dip [dec, inc, dd, dip] as input. This function
makes such an array.
Parameters
-----------
dec : paleomagnetic declination in degrees
inc : paleomagnetic inclination in degrees
dip_direction : the dip direction of bedding (in degrees between 0 and 360)
dip: dip of bedding (in degrees)
Returns
-------
array : an array of [dec, inc, dip_direction, dip]
Examples
--------
Data in separate lists of dec, inc, dip_direction, dip data can be made into
an array.
>>> dec = [132.5,124.3,142.7,130.3,163.2]
>>> inc = [12.1,23.2,34.2,37.7,32.6]
>>> dip_direction = [265.0,265.0,265.0,164.0,164.0]
>>> dip = [20.0,20.0,20.0,72.0,72.0]
>>> data_array = ipmag.make_diddd_array(dec,inc,dip_direction,dip)
>>> data_array
array([[ 132.5, 12.1, 265. , 20. ],
[ 124.3, 23.2, 265. , 20. ],
[ 142.7, 34.2, 265. , 20. ],
[ 130.3, 37.7, 164. , 72. ],
[ 163.2, 32.6, 164. , 72. ]])
"""
diddd_block = []
for n in range(0, len(dec)):
diddd_block.append([dec[n], inc[n], dip_direction[n], dip[n]])
diddd_array = np.array(diddd_block)
return diddd_array | Some pmag.py functions such as the bootstrap fold test require a numpy array
of dec, inc, dip direction, dip [dec, inc, dd, dip] as input. This function
makes such an array.
Parameters
-----------
dec : paleomagnetic declination in degrees
inc : paleomagnetic inclination in degrees
dip_direction : the dip direction of bedding (in degrees between 0 and 360)
dip: dip of bedding (in degrees)
Returns
-------
array : an array of [dec, inc, dip_direction, dip]
Examples
--------
Data in separate lists of dec, inc, dip_direction, dip data can be made into
an array.
>>> dec = [132.5,124.3,142.7,130.3,163.2]
>>> inc = [12.1,23.2,34.2,37.7,32.6]
>>> dip_direction = [265.0,265.0,265.0,164.0,164.0]
>>> dip = [20.0,20.0,20.0,72.0,72.0]
>>> data_array = ipmag.make_diddd_array(dec,inc,dip_direction,dip)
>>> data_array
array([[ 132.5, 12.1, 265. , 20. ],
[ 124.3, 23.2, 265. , 20. ],
[ 142.7, 34.2, 265. , 20. ],
[ 130.3, 37.7, 164. , 72. ],
[ 163.2, 32.6, 164. , 72. ]]) | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L2432-L2470 |
PmagPy/PmagPy | pmagpy/ipmag.py | equi | def equi(map_axis, centerlon, centerlat, radius, color, alpha=1.0):
"""
This function enables A95 error ellipses to be drawn in cartopy around
paleomagnetic poles in conjunction with shoot
(modified from: http://www.geophysique.be/2011/02/20/matplotlib-basemap-tutorial-09-drawing-circles/).
"""
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.equi')
return
glon1 = centerlon
glat1 = centerlat
X = []
Y = []
for azimuth in range(0, 360):
glon2, glat2, baz = shoot(glon1, glat1, azimuth, radius)
X.append(glon2)
Y.append(glat2)
X.append(X[0])
Y.append(Y[0])
plt.plot(X[::-1], Y[::-1], color=color,
transform=ccrs.Geodetic(), alpha=alpha) | python | def equi(map_axis, centerlon, centerlat, radius, color, alpha=1.0):
"""
This function enables A95 error ellipses to be drawn in cartopy around
paleomagnetic poles in conjunction with shoot
(modified from: http://www.geophysique.be/2011/02/20/matplotlib-basemap-tutorial-09-drawing-circles/).
"""
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.equi')
return
glon1 = centerlon
glat1 = centerlat
X = []
Y = []
for azimuth in range(0, 360):
glon2, glat2, baz = shoot(glon1, glat1, azimuth, radius)
X.append(glon2)
Y.append(glat2)
X.append(X[0])
Y.append(Y[0])
plt.plot(X[::-1], Y[::-1], color=color,
transform=ccrs.Geodetic(), alpha=alpha) | This function enables A95 error ellipses to be drawn in cartopy around
paleomagnetic poles in conjunction with shoot
(modified from: http://www.geophysique.be/2011/02/20/matplotlib-basemap-tutorial-09-drawing-circles/). | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L2542-L2563 |
PmagPy/PmagPy | pmagpy/ipmag.py | equi_basemap | def equi_basemap(m, centerlon, centerlat, radius, color):
"""
This function enables A95 error ellipses to be drawn in basemap around
paleomagnetic poles in conjunction with shoot
(from: http://www.geophysique.be/2011/02/20/matplotlib-basemap-tutorial-09-drawing-circles/).
"""
glon1 = centerlon
glat1 = centerlat
X = []
Y = []
for azimuth in range(0, 360):
glon2, glat2, baz = shoot(glon1, glat1, azimuth, radius)
X.append(glon2)
Y.append(glat2)
X.append(X[0])
Y.append(Y[0])
X, Y = m(X, Y)
plt.plot(X, Y, color) | python | def equi_basemap(m, centerlon, centerlat, radius, color):
"""
This function enables A95 error ellipses to be drawn in basemap around
paleomagnetic poles in conjunction with shoot
(from: http://www.geophysique.be/2011/02/20/matplotlib-basemap-tutorial-09-drawing-circles/).
"""
glon1 = centerlon
glat1 = centerlat
X = []
Y = []
for azimuth in range(0, 360):
glon2, glat2, baz = shoot(glon1, glat1, azimuth, radius)
X.append(glon2)
Y.append(glat2)
X.append(X[0])
Y.append(Y[0])
X, Y = m(X, Y)
plt.plot(X, Y, color) | This function enables A95 error ellipses to be drawn in basemap around
paleomagnetic poles in conjunction with shoot
(from: http://www.geophysique.be/2011/02/20/matplotlib-basemap-tutorial-09-drawing-circles/). | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L2566-L2584 |
PmagPy/PmagPy | pmagpy/ipmag.py | ellipse | def ellipse(map_axis, centerlon, centerlat, major_axis, minor_axis, angle, n=360, filled=False, **kwargs):
"""
This function enables general error ellipses to be drawn on the cartopy projection of the input map axis
using a center and a set of major and minor axes and a rotation angle east of north.
(Adapted from equi).
Parameters
-----------
map_axis : cartopy axis
centerlon : longitude of the center of the ellipse
centerlat : latitude of the center of the ellipse
major_axis : Major axis of ellipse
minor_axis : Minor axis of ellipse
angle : angle of major axis in degrees east of north
n : number of points with which to apporximate the ellipse
filled : boolean specifying if the ellipse should be plotted as a filled polygon or
as a set of line segments (Doesn't work right now)
kwargs : any other key word arguments can be passed for the line
Returns
---------
The map object with the ellipse plotted on it
"""
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.ellipse')
return False
angle = angle*(np.pi/180)
glon1 = centerlon
glat1 = centerlat
X = []
Y = []
for azimuth in np.linspace(0, 360, n):
az_rad = azimuth*(np.pi/180)
radius = ((major_axis*minor_axis)/(((minor_axis*np.cos(az_rad-angle))
** 2 + (major_axis*np.sin(az_rad-angle))**2)**.5))
glon2, glat2, baz = shoot(glon1, glat1, azimuth, radius)
X.append((360+glon2) % 360)
Y.append(glat2)
X.append(X[0])
Y.append(Y[0])
if filled:
ellip = np.array((X, Y)).T
ellip = map_axis.projection.transform_points(
ccrs.PlateCarree(), ellip[:, 0], ellip[:, 1])
poly = Polygon(ellip[:, :2], **kwargs)
map_axis.add_patch(poly)
else:
try:
map_axis.plot(X, Y, transform=ccrs.Geodetic(), **kwargs)
return True
except ValueError:
return False | python | def ellipse(map_axis, centerlon, centerlat, major_axis, minor_axis, angle, n=360, filled=False, **kwargs):
"""
This function enables general error ellipses to be drawn on the cartopy projection of the input map axis
using a center and a set of major and minor axes and a rotation angle east of north.
(Adapted from equi).
Parameters
-----------
map_axis : cartopy axis
centerlon : longitude of the center of the ellipse
centerlat : latitude of the center of the ellipse
major_axis : Major axis of ellipse
minor_axis : Minor axis of ellipse
angle : angle of major axis in degrees east of north
n : number of points with which to apporximate the ellipse
filled : boolean specifying if the ellipse should be plotted as a filled polygon or
as a set of line segments (Doesn't work right now)
kwargs : any other key word arguments can be passed for the line
Returns
---------
The map object with the ellipse plotted on it
"""
if not has_cartopy:
print('-W- cartopy must be installed to run ipmag.ellipse')
return False
angle = angle*(np.pi/180)
glon1 = centerlon
glat1 = centerlat
X = []
Y = []
for azimuth in np.linspace(0, 360, n):
az_rad = azimuth*(np.pi/180)
radius = ((major_axis*minor_axis)/(((minor_axis*np.cos(az_rad-angle))
** 2 + (major_axis*np.sin(az_rad-angle))**2)**.5))
glon2, glat2, baz = shoot(glon1, glat1, azimuth, radius)
X.append((360+glon2) % 360)
Y.append(glat2)
X.append(X[0])
Y.append(Y[0])
if filled:
ellip = np.array((X, Y)).T
ellip = map_axis.projection.transform_points(
ccrs.PlateCarree(), ellip[:, 0], ellip[:, 1])
poly = Polygon(ellip[:, :2], **kwargs)
map_axis.add_patch(poly)
else:
try:
map_axis.plot(X, Y, transform=ccrs.Geodetic(), **kwargs)
return True
except ValueError:
return False | This function enables general error ellipses to be drawn on the cartopy projection of the input map axis
using a center and a set of major and minor axes and a rotation angle east of north.
(Adapted from equi).
Parameters
-----------
map_axis : cartopy axis
centerlon : longitude of the center of the ellipse
centerlat : latitude of the center of the ellipse
major_axis : Major axis of ellipse
minor_axis : Minor axis of ellipse
angle : angle of major axis in degrees east of north
n : number of points with which to apporximate the ellipse
filled : boolean specifying if the ellipse should be plotted as a filled polygon or
as a set of line segments (Doesn't work right now)
kwargs : any other key word arguments can be passed for the line
Returns
---------
The map object with the ellipse plotted on it | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L2587-L2640 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.