sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def _get_data_info(self, data, file_format):
"""Support file writing by determiniing data type and other options
Parameters
----------
data : pandas object
Data to be written
file_format : basestring
String indicating netCDF3 or netCDF4
Returns
-------
data_flag, datetime_flag, old_format
"""
# get type of data
data_type = data.dtype
# check if older file_format
# if file_format[:7] == 'NETCDF3':
if file_format != 'NETCDF4':
old_format = True
else:
old_format = False
# check for object type
if data_type != np.dtype('O'):
# simple data, not an object
# no 64bit ints in netCDF3
if (data_type == np.int64) & old_format:
data = data.astype(np.int32)
data_type = np.int32
if data_type == np.dtype('<M8[ns]'):
if not old_format:
data_type = np.int64
else:
data_type = np.float
datetime_flag = True
else:
datetime_flag = False
else:
# dealing with a more complicated object
# iterate over elements until we hit something that is something,
# and not NaN
data_type = type(data.iloc[0])
for i in np.arange(len(data)):
if len(data.iloc[i]) > 0:
data_type = type(data.iloc[i])
if not isinstance(data_type, np.float):
break
datetime_flag = False
return data, data_type, datetime_flag | Support file writing by determiniing data type and other options
Parameters
----------
data : pandas object
Data to be written
file_format : basestring
String indicating netCDF3 or netCDF4
Returns
-------
data_flag, datetime_flag, old_format | entailment |
def _filter_netcdf4_metadata(self, mdata_dict, coltype, remove=False):
"""Filter metadata properties to be consistent with netCDF4.
Notes
-----
removed forced to True if coltype consistent with a string type
Parameters
----------
mdata_dict : dict
Dictionary equivalent to Meta object info
coltype : type
Type provided by _get_data_info
remove : boolean (False)
Removes FillValue and associated parameters disallowed for strings
Returns
-------
dict
Modified as needed for netCDf4
"""
# Coerce boolean types to integers
for key in mdata_dict:
if type(mdata_dict[key]) == bool:
mdata_dict[key] = int(mdata_dict[key])
if (coltype == type(' ')) or (coltype == type(u' ')):
remove = True
# print ('coltype', coltype, remove, type(coltype), )
if u'_FillValue' in mdata_dict.keys():
# make sure _FillValue is the same type as the data
if remove:
mdata_dict.pop('_FillValue')
else:
mdata_dict['_FillValue'] = np.array(mdata_dict['_FillValue']).astype(coltype)
if u'FillVal' in mdata_dict.keys():
# make sure _FillValue is the same type as the data
if remove:
mdata_dict.pop('FillVal')
else:
mdata_dict['FillVal'] = np.array(mdata_dict['FillVal']).astype(coltype)
return mdata_dict | Filter metadata properties to be consistent with netCDF4.
Notes
-----
removed forced to True if coltype consistent with a string type
Parameters
----------
mdata_dict : dict
Dictionary equivalent to Meta object info
coltype : type
Type provided by _get_data_info
remove : boolean (False)
Removes FillValue and associated parameters disallowed for strings
Returns
-------
dict
Modified as needed for netCDf4 | entailment |
def generic_meta_translator(self, meta_to_translate):
'''Translates the metadate contained in an object into a dictionary
suitable for export.
Parameters
----------
meta_to_translate : Meta
The metadata object to translate
Returns
-------
dict
A dictionary of the metadata for each variable of an output file
e.g. netcdf4'''
export_dict = {}
if self._meta_translation_table is not None:
# Create a translation table for the actual values of the meta labels.
# The instrument specific translation table only stores the names of the
# attributes that hold the various meta labels
translation_table = {}
for key in self._meta_translation_table:
translation_table[getattr(self, key)] = self._meta_translation_table[key]
else:
translation_table = None
#First Order Data
for key in meta_to_translate.data.index:
if translation_table is None:
export_dict[key] = meta_to_translate.data.loc[key].to_dict()
else:
# Translate each key if a translation is provided
export_dict[key] = {}
meta_dict = meta_to_translate.data.loc[key].to_dict()
for original_key in meta_dict:
if original_key in translation_table:
for translated_key in translation_table[original_key]:
export_dict[key][translated_key] = meta_dict[original_key]
else:
export_dict[key][original_key] = meta_dict[original_key]
#Higher Order Data
for key in meta_to_translate.ho_data:
if key not in export_dict:
export_dict[key] = {}
for ho_key in meta_to_translate.ho_data[key].data.index:
if translation_table is None:
export_dict[key+'_'+ho_key] = meta_to_translate.ho_data[key].data.loc[ho_key].to_dict()
else:
#Translate each key if a translation is provided
export_dict[key+'_'+ho_key] = {}
meta_dict = meta_to_translate.ho_data[key].data.loc[ho_key].to_dict()
for original_key in meta_dict:
if original_key in translation_table:
for translated_key in translation_table[original_key]:
export_dict[key+'_'+ho_key][translated_key] = meta_dict[original_key]
else:
export_dict[key+'_'+ho_key][original_key] = meta_dict[original_key]
return export_dict | Translates the metadate contained in an object into a dictionary
suitable for export.
Parameters
----------
meta_to_translate : Meta
The metadata object to translate
Returns
-------
dict
A dictionary of the metadata for each variable of an output file
e.g. netcdf4 | entailment |
def to_netcdf4(self, fname=None, base_instrument=None, epoch_name='Epoch',
zlib=False, complevel=4, shuffle=True):
"""Stores loaded data into a netCDF4 file.
Parameters
----------
fname : string
full path to save instrument object to
base_instrument : pysat.Instrument
used as a comparison, only attributes that are present with
self and not on base_instrument are written to netCDF
epoch_name : str
Label in file for datetime index of Instrument object
zlib : boolean
Flag for engaging zlib compression (True - compression on)
complevel : int
an integer between 1 and 9 describing the level of compression
desired (default 4). Ignored if zlib=False
shuffle : boolean
the HDF5 shuffle filter will be applied before compressing the data (default True).
This significantly improves compression. Default is True. Ignored if zlib=False.
Note
----
Stores 1-D data along dimension 'epoch' - the date time index.
Stores higher order data (e.g. dataframes within series) separately
- The name of the main variable column is used to prepend subvariable
names within netCDF, var_subvar_sub
- A netCDF4 dimension is created for each main variable column
with higher order data; first dimension Epoch
- The index organizing the data stored as a dimension variable
- from_netcdf4 uses the variable dimensions to reconstruct data structure
All attributes attached to instrument meta are written to netCDF attrs.
"""
import netCDF4
import pysat
file_format = 'NETCDF4'
# base_instrument used to define the standard attributes attached
# to the instrument object. Any additional attributes added
# to the main input Instrument will be written to the netCDF4
base_instrument = Instrument() if base_instrument is None else base_instrument
# begin processing metadata for writing to the file
# look to see if user supplied a list of export keys
# corresponding to internally tracked metadata within pysat
export_meta = self.generic_meta_translator(self.meta)
if self._meta_translation_table is None:
# didn't find a translation table, using the strings
# attached to the supplied pysat.Instrument object
export_name_labels = [self.name_label]
export_units_labels = [self.units_label]
export_desc_labels = [self.desc_label]
export_notes_labels = [self.notes_label]
else:
# user supplied labels in translation table
export_name_labels = self._meta_translation_table['name_label']
export_units_labels = self._meta_translation_table['units_label']
export_desc_labels = self._meta_translation_table['desc_label']
export_notes_labels = self._meta_translation_table['notes_label']
print('Using Metadata Translation Table: ', self._meta_translation_table)
# Apply instrument specific post-processing to the export_meta
if hasattr(self._export_meta_post_processing, '__call__'):
export_meta = self._export_meta_post_processing(export_meta)
# general process for writing data is this
# first, take care of the EPOCH information
# second, iterate over the variable colums in Instrument.data
# check the type of data
# if 1D column, do simple write (type is not an object)
# if it is an object, then check if writing strings, if not strings, then
# if column is a Series of Frames, write as 2D variables
# metadata must be filtered before writing to netCDF4, string variables
# can't have a fill value
with netCDF4.Dataset(fname, mode='w', format=file_format) as out_data:
# number of items, yeah
num = len(self.data.index)
# write out the datetime index
out_data.createDimension(epoch_name, num)
cdfkey = out_data.createVariable(epoch_name, 'i8',
dimensions=(epoch_name),
zlib=zlib,
complevel=complevel,
shuffle=shuffle)
# grab existing metadata for Epoch or create suitable info
if epoch_name in self.meta:
new_dict = export_meta[self.meta.var_case_name(epoch_name)]
else:
# create empty shell
new_dict = {}
# update required and basic information if not present
for export_name_label in export_name_labels:
if export_name_label not in new_dict:
new_dict[export_name_label] = epoch_name
for export_units_label in export_units_labels:
if export_units_label not in new_dict:
new_dict[export_units_label] = 'Milliseconds since 1970-1-1 00:00:00'
for export_desc_label in export_desc_labels:
if export_desc_label not in new_dict:
new_dict[export_desc_label] = 'Milliseconds since 1970-1-1 00:00:00'
for export_notes_label in export_notes_labels:
if export_notes_label not in new_dict:
new_dict[export_notes_label] = ''
new_dict['calendar'] = 'standard'
new_dict['Format'] = 'i8'
new_dict['Var_Type'] = 'data'
if self.data.index.is_monotonic_increasing:
new_dict['MonoTon'] = 'increase'
elif self.data.index.is_monotonic_decreasing:
new_dict['MonoTon'] = 'decrease'
new_dict['Time_Base'] = 'Milliseconds since 1970-1-1 00:00:00'
new_dict['Time_Scale'] = 'UTC'
new_dict = self._filter_netcdf4_metadata(new_dict, np.int64)
# attach metadata
cdfkey.setncatts(new_dict)
# attach data
cdfkey[:] = (self.data.index.values.astype(np.int64) *
1.E-6).astype(np.int64)
# iterate over all of the columns in the Instrument dataframe
# check what kind of data we are dealing with, then store
for key in self.data.columns:
# print (key)
# get information on type data we are dealing with
# data is data in proer type( multiformat support)
# coltype is the direct type, np.int64
# and datetime_flag lets you know if the data is full of time
# information
data, coltype, datetime_flag = self._get_data_info(self[key],
file_format)
# operate on data based upon type
if self[key].dtype != np.dtype('O'):
# not an object, normal basic 1D data
# print(key, coltype, file_format)
cdfkey = out_data.createVariable(key,
coltype,
dimensions=(epoch_name),
zlib=zlib,
complevel=complevel,
shuffle=shuffle) #, chunksizes=1)
# attach any meta data, after filtering for standards
try:
# attach dimension metadata
new_dict = export_meta[key]
new_dict['Depend_0'] = epoch_name
new_dict['Display_Type'] = 'Time Series'
new_dict['Format'] = self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
new_dict = self._filter_netcdf4_metadata(new_dict,
coltype)
cdfkey.setncatts(new_dict)
except KeyError:
print(', '.join(('Unable to find MetaData for', key)))
# assign data
if datetime_flag:
# datetime is in nanoseconds, storing milliseconds
cdfkey[:] = (data.values.astype(coltype)
* 1.E-6).astype(coltype)
else:
# not datetime data, just store as is
cdfkey[:] = data.values.astype(coltype)
# back to main check on type of data to write
else:
# it is a Series of objects, need to figure out
# what the actual objects are, then act as needed
# use info in coltype to get real datatype of object
# isinstance isn't working here because of something with coltype
if (coltype == type(' ')) or (coltype == type(u' ')):
# dealing with a string
cdfkey = out_data.createVariable(key, coltype, \
dimensions=(epoch_name), zlib=zlib, \
complevel=complevel, shuffle=shuffle)
# attach any meta data
try:
# attach dimension metadata
new_dict = export_meta[key]
new_dict['Depend_0'] = epoch_name
new_dict['Display_Type'] = 'Time Series'
new_dict['Format'] = self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
# no FillValue or FillVal allowed for strings
new_dict = self._filter_netcdf4_metadata(new_dict, \
coltype, remove=True)
# really attach metadata now
cdfkey.setncatts(new_dict)
except KeyError:
print(', '.join(('Unable to find MetaData for',
key)))
# time to actually write the data now
cdfkey[:] = data.values
# still dealing with an object, not just a series
# of strings
# maps to if check on coltypes being stringbased
else:
# presuming a series with a dataframe or series in each location
# start by collecting some basic info on dimensions
# sizes, names, then create corresponding netCDF4 dimensions
# total dimensions stored for object are epoch plus ones
# created below
dims = np.shape(self[key].iloc[0])
obj_dim_names = []
if len(dims) == 1:
# generally working with higher dimensional data
# pad dimensions so that the rest of the code works
# for either a Series or a Frame
dims = (dims[0], 0)
for i, dim in enumerate(dims[:-1]):
# don't need to go over last dimension value,
# it covers number of columns (if a frame)
obj_dim_names.append(key)
out_data.createDimension(obj_dim_names[-1], dim)
# create simple tuple with information needed to create
# the right dimensions for variables that will
# be written to file
var_dim = tuple([epoch_name] + obj_dim_names)
# We need to do different things if a series or dataframe
# stored
try:
# start by assuming it is a dataframe
# get list of subvariables
iterable = self[key].iloc[0].columns
# store our newfound knowledge, we are dealing with
# a series of DataFrames
is_frame = True
except AttributeError:
# turns out data is Series of Series
# which doesn't have columns
iterable = [self[key].iloc[0].name]
is_frame = False
# find location within main variable
# that actually has subvariable data (not just empty frame/series)
# so we can determine what the real underlying data types are
good_data_loc = 0
for jjj in np.arange(len(self.data)):
if len(self.data[key].iloc[0]) > 0:
data_loc = jjj
break
# found a place with data, if there is one
# now iterate over the subvariables, get data info
# create netCDF4 variables and store the data
# stored name is variable_subvariable
for col in iterable:
if is_frame:
# we are working with a dataframe
# so multiple subvariables stored under a single
# main variable heading
data, coltype, _ = self._get_data_info(self[key].iloc[good_data_loc][col], file_format)
cdfkey = out_data.createVariable(key + '_' + col,
coltype,
dimensions=var_dim,
zlib=zlib,
complevel=complevel,
shuffle=shuffle)
# attach any meta data
try:
new_dict = export_meta[key+'_'+col]
new_dict['Depend_0'] = epoch_name
new_dict['Depend_1'] = obj_dim_names[-1]
new_dict['Display_Type'] = 'Spectrogram'
new_dict['Format'] = self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
# print('Frame Writing ', key, col, export_meta[key].children[col])
new_dict = self._filter_netcdf4_metadata(new_dict, coltype)
# print ('mid2 ', new_dict)
cdfkey.setncatts(new_dict)
except KeyError:
print(', '.join(('Unable to find MetaData for', key, col)) )
# attach data
# it may be slow to repeatedly call the store
# method as well astype method below collect
# data into a numpy array, then write the full
# array in one go
# print(coltype, dims)
temp_cdf_data = np.zeros((num, dims[0])).astype(coltype)
for i in range(num):
temp_cdf_data[i, :] = self[key].iloc[i][col].values
# write data
cdfkey[:, :] = temp_cdf_data.astype(coltype)
else:
# we are dealing with a Series
# get information about information within series
data, coltype, _ = self._get_data_info(self[key].iloc[good_data_loc], file_format)
cdfkey = out_data.createVariable(key + '_data',
coltype,
dimensions=var_dim,
zlib=zlib,
complevel=complevel,
shuffle=shuffle) #, chunksizes=1)
# attach any meta data
try:
new_dict = export_meta[key]
new_dict['Depend_0'] = epoch_name
new_dict['Depend_1'] = obj_dim_names[-1]
new_dict['Display_Type'] = 'Spectrogram'
new_dict['Format'] = self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
new_dict = self._filter_netcdf4_metadata(new_dict, coltype)
# really attach metadata now
# print ('mid3 ', new_dict)
cdfkey.setncatts(new_dict)
except KeyError:
print(', '.join(('Unable to find MetaData for', key)))
# attach data
temp_cdf_data = np.zeros((num, dims[0])).astype(coltype)
for i in range(num):
temp_cdf_data[i, :] = self[i, key].values
# write data
cdfkey[:, :] = temp_cdf_data.astype(coltype)
# we are done storing the actual data for the given higher
# order variable, now we need to store the index for all
# of that fancy data
# get index information
data, coltype, datetime_flag = self._get_data_info(self[key].iloc[good_data_loc].index, file_format)
# create dimension variable for to store index in netCDF4
cdfkey = out_data.createVariable(key,
coltype, dimensions=var_dim,
zlib=zlib,
complevel=complevel,
shuffle=shuffle)
# work with metadata
new_dict = export_meta[key]
new_dict['Depend_0'] = epoch_name
new_dict['Depend_1'] = obj_dim_names[-1]
new_dict['Display_Type'] = 'Time Series'
new_dict['Format'] = self._get_var_type_code(coltype)
new_dict['Var_Type'] = 'data'
if datetime_flag:
#print('datetime flag')
for export_name_label in export_name_labels:
new_dict[export_name_label] = epoch_name
for export_units_label in export_units_labels:
new_dict[export_units_label] = 'Milliseconds since 1970-1-1 00:00:00'
new_dict = self._filter_netcdf4_metadata(new_dict, coltype)
# set metadata dict
cdfkey.setncatts(new_dict)
# set data
temp_cdf_data = np.zeros((num,
dims[0])).astype(coltype)
for i in range(num):
temp_cdf_data[i, :] = self[i, key].index.values
cdfkey[:, :] = (temp_cdf_data.astype(coltype) *
1.E-6).astype(coltype)
else:
if self[key].iloc[data_loc].index.name is not None:
for export_name_label in export_name_labels:
new_dict[export_name_label] = self[key].iloc[data_loc].index.name
else:
for export_name_label in export_name_labels:
new_dict[export_name_label] = key
new_dict = self._filter_netcdf4_metadata(new_dict, coltype)
# assign metadata dict
cdfkey.setncatts(new_dict)
# set data
temp_cdf_data = np.zeros((num, dims[0])).astype(coltype)
for i in range(num):
temp_cdf_data[i, :] = self[key].iloc[i].index.to_native_types()
cdfkey[:, :] = temp_cdf_data.astype(coltype)
# store any non standard attributes
# compare this Instrument's attributes to base object
base_attrb = dir(base_instrument)
this_attrb = dir(self)
# filter out any 'private' attributes
# those that start with a _
adict = {}
for key in this_attrb:
if key not in base_attrb:
if key[0] != '_':
adict[key] = self.__getattribute__(key)
# store any non-standard attributes attached to meta
base_attrb = dir(base_instrument.meta)
this_attrb = dir(self.meta)
for key in this_attrb:
if key not in base_attrb:
if key[0] != '_':
adict[key] = self.meta.__getattribute__(key)
adict['pysat_version'] = pysat.__version__
if 'Conventions' not in adict:
adict['Conventions'] = 'SPDF ISTP/IACG Modified for NetCDF'
if 'Text_Supplement' not in adict:
adict['Text_Supplement'] = ''
adict['Date_Start'] = pysat.datetime.strftime(self.data.index[0], '%a, %d %b %Y, %Y-%m-%dT%H:%M:%S.%f UTC')
adict['Date_End'] = pysat.datetime.strftime(self.data.index[-1], '%a, %d %b %Y, %Y-%m-%dT%H:%M:%S.%f UTC')
adict['File'] = os.path.split(fname)
adict['Generation_Date'] = pysat.datetime.utcnow().strftime('%Y%m%d')
adict['Logical_File_ID'] = os.path.split(fname)[-1].split('.')[:-1]
# check for binary types
for key in adict.keys():
if isinstance(adict[key], bool):
adict[key] = int(adict[key])
# print('adict', adict)
out_data.setncatts(adict)
return | Stores loaded data into a netCDF4 file.
Parameters
----------
fname : string
full path to save instrument object to
base_instrument : pysat.Instrument
used as a comparison, only attributes that are present with
self and not on base_instrument are written to netCDF
epoch_name : str
Label in file for datetime index of Instrument object
zlib : boolean
Flag for engaging zlib compression (True - compression on)
complevel : int
an integer between 1 and 9 describing the level of compression
desired (default 4). Ignored if zlib=False
shuffle : boolean
the HDF5 shuffle filter will be applied before compressing the data (default True).
This significantly improves compression. Default is True. Ignored if zlib=False.
Note
----
Stores 1-D data along dimension 'epoch' - the date time index.
Stores higher order data (e.g. dataframes within series) separately
- The name of the main variable column is used to prepend subvariable
names within netCDF, var_subvar_sub
- A netCDF4 dimension is created for each main variable column
with higher order data; first dimension Epoch
- The index organizing the data stored as a dimension variable
- from_netcdf4 uses the variable dimensions to reconstruct data structure
All attributes attached to instrument meta are written to netCDF attrs. | entailment |
def to_alu_hlu_map(input_str):
"""Converter for alu hlu map
Convert following input into a alu -> hlu map:
Sample input:
```
HLU Number ALU Number
---------- ----------
0 12
1 23
```
ALU stands for array LUN number
hlu stands for host LUN number
:param input_str: raw input from naviseccli
:return: alu -> hlu map
"""
ret = {}
if input_str is not None:
pattern = re.compile(r'(\d+)\s*(\d+)')
for line in input_str.split('\n'):
line = line.strip()
if len(line) == 0:
continue
matched = re.search(pattern, line)
if matched is None or len(matched.groups()) < 2:
continue
else:
hlu = matched.group(1)
alu = matched.group(2)
ret[int(alu)] = int(hlu)
return ret | Converter for alu hlu map
Convert following input into a alu -> hlu map:
Sample input:
```
HLU Number ALU Number
---------- ----------
0 12
1 23
```
ALU stands for array LUN number
hlu stands for host LUN number
:param input_str: raw input from naviseccli
:return: alu -> hlu map | entailment |
def to_disk_indices(value):
"""Convert following input to disk indices
Sample input:
```
Disks:
Bus 0 Enclosure 0 Disk 9
Bus 1 Enclosure 0 Disk 12
Bus 1 Enclosure 0 Disk 9
Bus 0 Enclosure 0 Disk 4
Bus 0 Enclosure 0 Disk 7
```
:param value: disk list
:return: disk indices in list
"""
ret = []
p = re.compile(r'Bus\s+(\w+)\s+Enclosure\s+(\w+)\s+Disk\s+(\w+)')
if value is not None:
for line in value.split('\n'):
line = line.strip()
if len(line) == 0:
continue
matched = re.search(p, line)
if matched is None or len(matched.groups()) < 3:
continue
else:
ret.append('{}_{}_{}'.format(*matched.groups()))
return ret | Convert following input to disk indices
Sample input:
```
Disks:
Bus 0 Enclosure 0 Disk 9
Bus 1 Enclosure 0 Disk 12
Bus 1 Enclosure 0 Disk 9
Bus 0 Enclosure 0 Disk 4
Bus 0 Enclosure 0 Disk 7
```
:param value: disk list
:return: disk indices in list | entailment |
def url_to_host(url):
"""convert a url to a host (ip or domain)
:param url: url string
:returns: host: domain name or ipv4/v6 address
:rtype: str
:raises: ValueError: given an illegal url that without a ip or domain name
"""
regex_url = r"([a-z][a-z0-9+\-.]*://)?" + \
r"([a-z0-9\-._~%!$&'()*+,;=]+@)?" + \
r"([a-z0-9\-._~%]+" + \
r"|\[[a-z0-9\-._~%!$&'()*+,;=:]+\])?" + \
r"(:(?P<port>[0-9]+))?"
m = re.match(regex_url, url, re.IGNORECASE)
if m and m.group(3):
return url[m.start(3): m.end(3)]
else:
raise ValueError("URL without a valid host or ip") | convert a url to a host (ip or domain)
:param url: url string
:returns: host: domain name or ipv4/v6 address
:rtype: str
:raises: ValueError: given an illegal url that without a ip or domain name | entailment |
def parse_host_address(addr):
"""
parse host address to get domain name or ipv4/v6 address,
cidr prefix and net mask code string if given a subnet address
:param addr:
:type addr: str
:return: parsed domain name/ipv4 address/ipv6 address,
cidr prefix if there is,
net mask code string if there is
:rtype: (string, int, string)
"""
if addr.startswith('[') and addr.endswith(']'):
addr = addr[1:-1]
parts = addr.split('/')
if len(parts) == 1:
return parts[0], None, None
if len(parts) > 2:
raise ValueError("Illegal host address")
else:
domain_or_ip, prefix = parts
prefix = int(prefix)
if re.match(r"^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$", domain_or_ip):
return domain_or_ip, prefix, ipv4_prefix_to_mask(prefix)
elif ':' in domain_or_ip:
return domain_or_ip, prefix, ipv6_prefix_to_mask(prefix)
else:
return domain_or_ip, None, None | parse host address to get domain name or ipv4/v6 address,
cidr prefix and net mask code string if given a subnet address
:param addr:
:type addr: str
:return: parsed domain name/ipv4 address/ipv6 address,
cidr prefix if there is,
net mask code string if there is
:rtype: (string, int, string) | entailment |
def ipv4_prefix_to_mask(prefix):
"""
ipv4 cidr prefix to net mask
:param prefix: cidr prefix , rang in (0, 32)
:type prefix: int
:return: dot separated ipv4 net mask code, eg: 255.255.255.0
:rtype: str
"""
if prefix > 32 or prefix < 0:
raise ValueError("invalid cidr prefix for ipv4")
else:
mask = ((1 << 32) - 1) ^ ((1 << (32 - prefix)) - 1)
eight_ones = 255 # 0b11111111
mask_str = ''
for i in range(0, 4):
mask_str = str(mask & eight_ones) + mask_str
mask = mask >> 8
if i != 3:
mask_str = '.' + mask_str
return mask_str | ipv4 cidr prefix to net mask
:param prefix: cidr prefix , rang in (0, 32)
:type prefix: int
:return: dot separated ipv4 net mask code, eg: 255.255.255.0
:rtype: str | entailment |
def ipv6_prefix_to_mask(prefix):
"""
ipv6 cidr prefix to net mask
:param prefix: cidr prefix, rang in (0, 128)
:type prefix: int
:return: comma separated ipv6 net mask code,
eg: ffff:ffff:ffff:ffff:0000:0000:0000:0000
:rtype: str
"""
if prefix > 128 or prefix < 0:
raise ValueError("invalid cidr prefix for ipv6")
else:
mask = ((1 << 128) - 1) ^ ((1 << (128 - prefix)) - 1)
f = 15 # 0xf or 0b1111
hex_mask_str = ''
for i in range(0, 32):
hex_mask_str = format((mask & f), 'x') + hex_mask_str
mask = mask >> 4
if i != 31 and i & 3 == 3:
hex_mask_str = ':' + hex_mask_str
return hex_mask_str | ipv6 cidr prefix to net mask
:param prefix: cidr prefix, rang in (0, 128)
:type prefix: int
:return: comma separated ipv6 net mask code,
eg: ffff:ffff:ffff:ffff:0000:0000:0000:0000
:rtype: str | entailment |
def expand(self, new_size):
""" expand the LUN to a new size
:param new_size: new size in bytes.
:return: the old size
"""
ret = self.size_total
resp = self.modify(size=new_size)
resp.raise_if_err()
return ret | expand the LUN to a new size
:param new_size: new size in bytes.
:return: the old size | entailment |
def update_hosts(self, host_names):
"""Primarily for puppet-unity use.
Update the hosts for the lun if needed.
:param host_names: specify the new hosts which access the LUN.
"""
if self.host_access:
curr_hosts = [access.host.name for access in self.host_access]
else:
curr_hosts = []
if set(curr_hosts) == set(host_names):
log.info('Hosts for updating is equal to current hosts, '
'skip modification.')
return None
new_hosts = [UnityHostList.get(cli=self._cli, name=host_name)[0]
for host_name in host_names]
new_access = [{'host': item,
'accessMask': HostLUNAccessEnum.PRODUCTION}
for item in new_hosts]
resp = self.modify(host_access=new_access)
resp.raise_if_err()
return resp | Primarily for puppet-unity use.
Update the hosts for the lun if needed.
:param host_names: specify the new hosts which access the LUN. | entailment |
def replicate(self, dst_lun_id, max_time_out_of_sync,
replication_name=None, replicate_existing_snaps=None,
remote_system=None):
"""
Creates a replication session with a existing lun as destination.
:param dst_lun_id: destination lun id.
:param max_time_out_of_sync: maximum time to wait before syncing the
source and destination. Value `-1` means the automatic sync is not
performed. `0` means it is a sync replication.
:param replication_name: replication name.
:param replicate_existing_snaps: whether to replicate existing snaps.
:param remote_system: `UnityRemoteSystem` object. The remote system to
which the replication is being configured. When not specified, it
defaults to local system.
:return: created replication session.
"""
return UnityReplicationSession.create(
self._cli, self.get_id(), dst_lun_id, max_time_out_of_sync,
name=replication_name,
replicate_existing_snaps=replicate_existing_snaps,
remote_system=remote_system) | Creates a replication session with a existing lun as destination.
:param dst_lun_id: destination lun id.
:param max_time_out_of_sync: maximum time to wait before syncing the
source and destination. Value `-1` means the automatic sync is not
performed. `0` means it is a sync replication.
:param replication_name: replication name.
:param replicate_existing_snaps: whether to replicate existing snaps.
:param remote_system: `UnityRemoteSystem` object. The remote system to
which the replication is being configured. When not specified, it
defaults to local system.
:return: created replication session. | entailment |
def replicate_with_dst_resource_provisioning(self, max_time_out_of_sync,
dst_pool_id,
dst_lun_name=None,
remote_system=None,
replication_name=None,
dst_size=None, dst_sp=None,
is_dst_thin=None,
dst_tiering_policy=None,
is_dst_compression=None):
"""
Creates a replication session with destination lun provisioning.
:param max_time_out_of_sync: maximum time to wait before syncing the
source and destination. Value `-1` means the automatic sync is not
performed. `0` means it is a sync replication.
:param dst_pool_id: id of pool to allocate destination lun.
:param dst_lun_name: destination lun name.
:param remote_system: `UnityRemoteSystem` object. The remote system to
which the replication is being configured. When not specified, it
defaults to local system.
:param replication_name: replication name.
:param dst_size: destination lun size.
:param dst_sp: `NodeEnum` value. Default storage processor of
destination lun.
:param is_dst_thin: indicates whether destination lun is thin or not.
:param dst_tiering_policy: `TieringPolicyEnum` value. Tiering policy of
destination lun.
:param is_dst_compression: indicates whether destination lun is
compression enabled or not.
:return: created replication session.
"""
dst_size = self.size_total if dst_size is None else dst_size
dst_resource = UnityResourceConfig.to_embedded(
name=dst_lun_name, pool_id=dst_pool_id,
size=dst_size, default_sp=dst_sp,
tiering_policy=dst_tiering_policy, is_thin_enabled=is_dst_thin,
is_compression_enabled=is_dst_compression)
return UnityReplicationSession.create_with_dst_resource_provisioning(
self._cli, self.get_id(), dst_resource, max_time_out_of_sync,
remote_system=remote_system, name=replication_name) | Creates a replication session with destination lun provisioning.
:param max_time_out_of_sync: maximum time to wait before syncing the
source and destination. Value `-1` means the automatic sync is not
performed. `0` means it is a sync replication.
:param dst_pool_id: id of pool to allocate destination lun.
:param dst_lun_name: destination lun name.
:param remote_system: `UnityRemoteSystem` object. The remote system to
which the replication is being configured. When not specified, it
defaults to local system.
:param replication_name: replication name.
:param dst_size: destination lun size.
:param dst_sp: `NodeEnum` value. Default storage processor of
destination lun.
:param is_dst_thin: indicates whether destination lun is thin or not.
:param dst_tiering_policy: `TieringPolicyEnum` value. Tiering policy of
destination lun.
:param is_dst_compression: indicates whether destination lun is
compression enabled or not.
:return: created replication session. | entailment |
def get_physical_port(self):
"""Returns the link aggregation object or the ethernet port object."""
obj = None
if self.is_link_aggregation():
obj = UnityLinkAggregation.get(self._cli, self.get_id())
else:
obj = UnityEthernetPort.get(self._cli, self.get_id())
return obj | Returns the link aggregation object or the ethernet port object. | entailment |
def to_embedded(pool_id=None, is_thin_enabled=None,
is_deduplication_enabled=None, is_compression_enabled=None,
is_backup_only=None, size=None, tiering_policy=None,
request_id=None, src_id=None, name=None, default_sp=None,
replication_resource_type=None):
"""
Constructs an embeded object of `UnityResourceConfig`.
:param pool_id: storage pool of the resource.
:param is_thin_enabled: is thin type or not.
:param is_deduplication_enabled: is deduplication enabled or not.
:param is_compression_enabled: is in-line compression (ILC) enabled or
not.
:param is_backup_only: is backup only or not.
:param size: size of the resource.
:param tiering_policy: `TieringPolicyEnum` value. Tiering policy
for the resource.
:param request_id: unique request ID for the configuration.
:param src_id: storage resource if it already exists.
:param name: name of the storage resource.
:param default_sp: `NodeEnum` value. Default storage processor for
the resource.
:param replication_resource_type: `ReplicationEndpointResourceTypeEnum`
value. Replication resource type.
:return:
"""
return {'poolId': pool_id, 'isThinEnabled': is_thin_enabled,
'isDeduplicationEnabled': is_deduplication_enabled,
'isCompressionEnabled': is_compression_enabled,
'isBackupOnly': is_backup_only, 'size': size,
'tieringPolicy': tiering_policy, 'requestId': request_id,
'srcId': src_id, 'name': name, 'defaultSP': default_sp,
'replicationResourceType': replication_resource_type} | Constructs an embeded object of `UnityResourceConfig`.
:param pool_id: storage pool of the resource.
:param is_thin_enabled: is thin type or not.
:param is_deduplication_enabled: is deduplication enabled or not.
:param is_compression_enabled: is in-line compression (ILC) enabled or
not.
:param is_backup_only: is backup only or not.
:param size: size of the resource.
:param tiering_policy: `TieringPolicyEnum` value. Tiering policy
for the resource.
:param request_id: unique request ID for the configuration.
:param src_id: storage resource if it already exists.
:param name: name of the storage resource.
:param default_sp: `NodeEnum` value. Default storage processor for
the resource.
:param replication_resource_type: `ReplicationEndpointResourceTypeEnum`
value. Replication resource type.
:return: | entailment |
def create(cls, cli, src_resource_id, dst_resource_id,
max_time_out_of_sync, name=None, members=None,
auto_initiate=None, hourly_snap_replication_policy=None,
daily_snap_replication_policy=None,
replicate_existing_snaps=None, remote_system=None,
src_spa_interface=None, src_spb_interface=None,
dst_spa_interface=None, dst_spb_interface=None):
"""
Creates a replication session.
:param cli: the rest cli.
:param src_resource_id: id of the replication source, could be
lun/fs/cg.
:param dst_resource_id: id of the replication destination.
:param max_time_out_of_sync: maximum time to wait before syncing the
source and destination. Value `-1` means the automatic sync is not
performed. `0` means it is a sync replication.
:param name: name of the replication.
:param members: list of `UnityLunMemberReplication` object. If
`src_resource` is cg, `lunMemberReplication` list need to pass in
to this parameter as member lun pairing between source and
destination cg.
:param auto_initiate: indicates whether to perform the first
replication sync automatically.
True - perform the first replication sync automatically.
False - perform the first replication sync manually.
:param hourly_snap_replication_policy: `UnitySnapReplicationPolicy`
object. The policy for replicating hourly scheduled snaps of the
source resource.
:param daily_snap_replication_policy: `UnitySnapReplicationPolicy`
object. The policy for replicating daily scheduled snaps of the
source resource.
:param replicate_existing_snaps: indicates whether or not to replicate
snapshots already existing on the resource.
:param remote_system: `UnityRemoteSystem` object. The remote system of
remote replication.
:param src_spa_interface: `UnityRemoteInterface` object. The
replication interface for source SPA.
:param src_spb_interface: `UnityRemoteInterface` object. The
replication interface for source SPB.
:param dst_spa_interface: `UnityRemoteInterface` object. The
replication interface for destination SPA.
:param dst_spb_interface: `UnityRemoteInterface` object. The
replication interface for destination SPB.
:return: the newly created replication session.
"""
req_body = cli.make_body(
srcResourceId=src_resource_id, dstResourceId=dst_resource_id,
maxTimeOutOfSync=max_time_out_of_sync, members=members,
autoInitiate=auto_initiate, name=name,
hourlySnapReplicationPolicy=hourly_snap_replication_policy,
dailySnapReplicationPolicy=daily_snap_replication_policy,
replicateExistingSnaps=replicate_existing_snaps,
remoteSystem=remote_system,
srcSPAInterface=src_spa_interface,
srcSPBInterface=src_spb_interface,
dstSPAInterface=dst_spa_interface,
dstSPBInterface=dst_spb_interface)
resp = cli.post(cls().resource_class, **req_body)
resp.raise_if_err()
return cls.get(cli, resp.resource_id) | Creates a replication session.
:param cli: the rest cli.
:param src_resource_id: id of the replication source, could be
lun/fs/cg.
:param dst_resource_id: id of the replication destination.
:param max_time_out_of_sync: maximum time to wait before syncing the
source and destination. Value `-1` means the automatic sync is not
performed. `0` means it is a sync replication.
:param name: name of the replication.
:param members: list of `UnityLunMemberReplication` object. If
`src_resource` is cg, `lunMemberReplication` list need to pass in
to this parameter as member lun pairing between source and
destination cg.
:param auto_initiate: indicates whether to perform the first
replication sync automatically.
True - perform the first replication sync automatically.
False - perform the first replication sync manually.
:param hourly_snap_replication_policy: `UnitySnapReplicationPolicy`
object. The policy for replicating hourly scheduled snaps of the
source resource.
:param daily_snap_replication_policy: `UnitySnapReplicationPolicy`
object. The policy for replicating daily scheduled snaps of the
source resource.
:param replicate_existing_snaps: indicates whether or not to replicate
snapshots already existing on the resource.
:param remote_system: `UnityRemoteSystem` object. The remote system of
remote replication.
:param src_spa_interface: `UnityRemoteInterface` object. The
replication interface for source SPA.
:param src_spb_interface: `UnityRemoteInterface` object. The
replication interface for source SPB.
:param dst_spa_interface: `UnityRemoteInterface` object. The
replication interface for destination SPA.
:param dst_spb_interface: `UnityRemoteInterface` object. The
replication interface for destination SPB.
:return: the newly created replication session. | entailment |
def create_with_dst_resource_provisioning(
cls, cli, src_resource_id, dst_resource_config,
max_time_out_of_sync, name=None, remote_system=None,
src_spa_interface=None, src_spb_interface=None,
dst_spa_interface=None, dst_spb_interface=None,
dst_resource_element_configs=None, auto_initiate=None,
hourly_snap_replication_policy=None,
daily_snap_replication_policy=None, replicate_existing_snaps=None):
"""
Create a replication session along with destination resource
provisioning.
:param cli: the rest cli.
:param src_resource_id: id of the replication source, could be
lun/fs/cg.
:param dst_resource_config: `UnityResourceConfig` object. The user
chosen config for destination resource provisioning. `pool_id` and
`size` are required for creation.
:param max_time_out_of_sync: maximum time to wait before syncing the
source and destination. Value `-1` means the automatic sync is not
performed. `0` means it is a sync replication.
:param name: name of the replication.
:param remote_system: `UnityRemoteSystem` object. The remote system to
which the replication is being configured. When not specified, it
defaults to local system.
:param src_spa_interface: `UnityRemoteInterface` object. The
replication interface for source SPA.
:param src_spb_interface: `UnityRemoteInterface` object. The
replication interface for source SPB.
:param dst_spa_interface: `UnityRemoteInterface` object. The
replication interface for destination SPA.
:param dst_spb_interface: `UnityRemoteInterface` object. The
replication interface for destination SPB.
:param dst_resource_element_configs: List of `UnityResourceConfig`
objects. The user chose config for each of the member element of
the destination resource.
:param auto_initiate: indicates whether to perform the first
replication sync automatically.
True - perform the first replication sync automatically.
False - perform the first replication sync manually.
:param hourly_snap_replication_policy: `UnitySnapReplicationPolicy`
object. The policy for replicating hourly scheduled snaps of the
source resource.
:param daily_snap_replication_policy: `UnitySnapReplicationPolicy`
object. The policy for replicating daily scheduled snaps of the
source resource.
:param replicate_existing_snaps: indicates whether or not to replicate
snapshots already existing on the resource.
:return: the newly created replication session.
"""
req_body = cli.make_body(
srcResourceId=src_resource_id,
dstResourceConfig=dst_resource_config,
maxTimeOutOfSync=max_time_out_of_sync,
name=name, remoteSystem=remote_system,
srcSPAInterface=src_spa_interface,
srcSPBInterface=src_spb_interface,
dstSPAInterface=dst_spa_interface,
dstSPBInterface=dst_spb_interface,
dstResourceElementConfigs=dst_resource_element_configs,
autoInitiate=auto_initiate,
hourlySnapReplicationPolicy=hourly_snap_replication_policy,
dailySnapReplicationPolicy=daily_snap_replication_policy,
replicateExistingSnaps=replicate_existing_snaps)
resp = cli.type_action(
cls().resource_class,
'createReplicationSessionWDestResProvisioning',
**req_body)
resp.raise_if_err()
# response is like:
# "content": {
# "id": {
# "id": "42949676351_FNM00150600267_xxxx"
# }
session_resp = resp.first_content['id']
return cls.get(cli, _id=session_resp['id']) | Create a replication session along with destination resource
provisioning.
:param cli: the rest cli.
:param src_resource_id: id of the replication source, could be
lun/fs/cg.
:param dst_resource_config: `UnityResourceConfig` object. The user
chosen config for destination resource provisioning. `pool_id` and
`size` are required for creation.
:param max_time_out_of_sync: maximum time to wait before syncing the
source and destination. Value `-1` means the automatic sync is not
performed. `0` means it is a sync replication.
:param name: name of the replication.
:param remote_system: `UnityRemoteSystem` object. The remote system to
which the replication is being configured. When not specified, it
defaults to local system.
:param src_spa_interface: `UnityRemoteInterface` object. The
replication interface for source SPA.
:param src_spb_interface: `UnityRemoteInterface` object. The
replication interface for source SPB.
:param dst_spa_interface: `UnityRemoteInterface` object. The
replication interface for destination SPA.
:param dst_spb_interface: `UnityRemoteInterface` object. The
replication interface for destination SPB.
:param dst_resource_element_configs: List of `UnityResourceConfig`
objects. The user chose config for each of the member element of
the destination resource.
:param auto_initiate: indicates whether to perform the first
replication sync automatically.
True - perform the first replication sync automatically.
False - perform the first replication sync manually.
:param hourly_snap_replication_policy: `UnitySnapReplicationPolicy`
object. The policy for replicating hourly scheduled snaps of the
source resource.
:param daily_snap_replication_policy: `UnitySnapReplicationPolicy`
object. The policy for replicating daily scheduled snaps of the
source resource.
:param replicate_existing_snaps: indicates whether or not to replicate
snapshots already existing on the resource.
:return: the newly created replication session. | entailment |
def modify(self, max_time_out_of_sync=None, name=None,
hourly_snap_replication_policy=None,
daily_snap_replication_policy=None,
src_spa_interface=None, src_spb_interface=None,
dst_spa_interface=None, dst_spb_interface=None):
"""
Modifies properties of a replication session.
:param max_time_out_of_sync: same as the one in `create` method.
:param name: same as the one in `create` method.
:param hourly_snap_replication_policy: same as the one in `create`
method.
:param daily_snap_replication_policy: same as the one in `create`
method.
:param src_spa_interface: same as the one in `create` method.
:param src_spb_interface: same as the one in `create` method.
:param dst_spa_interface: same as the one in `create` method.
:param dst_spb_interface: same as the one in `create` method.
"""
req_body = self._cli.make_body(
maxTimeOutOfSync=max_time_out_of_sync, name=name,
hourlySnapReplicationPolicy=hourly_snap_replication_policy,
dailySnapReplicationPolicy=daily_snap_replication_policy,
srcSPAInterface=src_spa_interface,
srcSPBInterface=src_spb_interface,
dstSPAInterface=dst_spa_interface,
dstSPBInterface=dst_spb_interface)
resp = self.action('modify', **req_body)
resp.raise_if_err()
return resp | Modifies properties of a replication session.
:param max_time_out_of_sync: same as the one in `create` method.
:param name: same as the one in `create` method.
:param hourly_snap_replication_policy: same as the one in `create`
method.
:param daily_snap_replication_policy: same as the one in `create`
method.
:param src_spa_interface: same as the one in `create` method.
:param src_spb_interface: same as the one in `create` method.
:param dst_spa_interface: same as the one in `create` method.
:param dst_spb_interface: same as the one in `create` method. | entailment |
def resume(self, force_full_copy=None,
src_spa_interface=None, src_spb_interface=None,
dst_spa_interface=None, dst_spb_interface=None):
"""
Resumes a replication session.
This can be applied on replication session when it's operational status
is reported as Failed over, or Paused.
:param force_full_copy: needed when replication session goes out of
sync due to a fault.
True - replicate all data.
False - replicate changed data only.
:param src_spa_interface: same as the one in `create` method.
:param src_spb_interface: same as the one in `create` method.
:param dst_spa_interface: same as the one in `create` method.
:param dst_spb_interface: same as the one in `create` method.
"""
req_body = self._cli.make_body(forceFullCopy=force_full_copy,
srcSPAInterface=src_spa_interface,
srcSPBInterface=src_spb_interface,
dstSPAInterface=dst_spa_interface,
dstSPBInterface=dst_spb_interface)
resp = self.action('resume', **req_body)
resp.raise_if_err()
return resp | Resumes a replication session.
This can be applied on replication session when it's operational status
is reported as Failed over, or Paused.
:param force_full_copy: needed when replication session goes out of
sync due to a fault.
True - replicate all data.
False - replicate changed data only.
:param src_spa_interface: same as the one in `create` method.
:param src_spb_interface: same as the one in `create` method.
:param dst_spa_interface: same as the one in `create` method.
:param dst_spb_interface: same as the one in `create` method. | entailment |
def failover(self, sync=None, force=None):
"""
Fails over a replication session.
:param sync: True - sync the source and destination resources before
failing over the asynchronous replication session or keep them in
sync after failing over the synchronous replication session.
False - don't sync.
:param force: True - skip pre-checks on file system(s) replication
sessions of a NAS server when a replication failover is issued from
the source NAS server.
False - don't skip pre-checks.
"""
req_body = self._cli.make_body(sync=sync, force=force)
resp = self.action('failover', **req_body)
resp.raise_if_err()
return resp | Fails over a replication session.
:param sync: True - sync the source and destination resources before
failing over the asynchronous replication session or keep them in
sync after failing over the synchronous replication session.
False - don't sync.
:param force: True - skip pre-checks on file system(s) replication
sessions of a NAS server when a replication failover is issued from
the source NAS server.
False - don't skip pre-checks. | entailment |
def failback(self, force_full_copy=None):
"""
Fails back a replication session.
This can be applied on a replication session that is failed over. Fail
back will synchronize the changes done to original destination back to
original source site and will restore the original direction of
session.
:param force_full_copy: indicates whether to sync back all data from
the destination SP to the source SP during the failback session.
True - Sync back all data.
False - Sync back changed data only.
"""
req_body = self._cli.make_body(forceFullCopy=force_full_copy)
resp = self.action('failback', **req_body)
resp.raise_if_err()
return resp | Fails back a replication session.
This can be applied on a replication session that is failed over. Fail
back will synchronize the changes done to original destination back to
original source site and will restore the original direction of
session.
:param force_full_copy: indicates whether to sync back all data from
the destination SP to the source SP during the failback session.
True - Sync back all data.
False - Sync back changed data only. | entailment |
def _calcOrbits(self):
"""Prepares data structure for breaking data into orbits. Not intended
for end user."""
# if the breaks between orbit have not been defined, define them
# also, store the data so that grabbing different orbits does not
# require reloads of whole dataset
if len(self._orbit_breaks) == 0:
# determine orbit breaks
self._detBreaks()
# store a copy of data
self._fullDayData = self.sat.data.copy()
# set current orbit counter to zero (default)
self._current = 0 | Prepares data structure for breaking data into orbits. Not intended
for end user. | entailment |
def _equaBreaks(self, orbit_index_period=24.):
"""Determine where breaks in an equatorial satellite orbit occur.
Looks for negative gradients in local time (or longitude) as well as
breaks in UT.
Parameters
----------
orbit_index_period : float
The change in value of supplied index parameter for a single orbit
"""
if self.orbit_index is None:
raise ValueError('Orbit properties must be defined at ' +
'pysat.Instrument object instantiation.' +
'See Instrument docs.')
else:
try:
self.sat[self.orbit_index]
except ValueError:
raise ValueError('Provided orbit index does not exist in ' +
'loaded data')
# get difference in orbit index around the orbit
lt_diff = self.sat[self.orbit_index].diff()
# universal time values, from datetime index
ut_vals = Series(self.sat.data.index)
# UT difference
ut_diff = ut_vals.diff()
# get locations where orbit index derivative is less than 0
# then do some basic checks on these locations
ind, = np.where((lt_diff < -0.1))
if len(ind) > 0:
ind = np.hstack((ind, np.array([len(self.sat[self.orbit_index])])))
# look at distance between breaks
dist = ind[1:] - ind[0:-1]
# only keep orbit breaks with a distance greater than 1
# done for robustness
if len(ind) > 1:
if min(dist) == 1:
print('There are orbit breaks right next to each other')
ind = ind[:-1][dist > 1]
# check for large positive gradients around the break that would
# suggest not a true orbit break, but rather bad orbit_index values
new_ind = []
for idx in ind:
tidx, = np.where(lt_diff[idx - 5:idx + 6] > 0.1)
if len(tidx) != 0:
# there are large changes, suggests a false alarm
# iterate over samples and check
for tidx in tidx:
# look at time change vs local time change
if(ut_diff[idx - 5:idx + 6].iloc[tidx] <
lt_diff[idx - 5:idx + 6].iloc[tidx] /
orbit_index_period * self.orbit_period):
# change in ut is small compared to the change in
# the orbit index this is flagged as a false alarm,
# or dropped from consideration
pass
else:
# change in UT is significant, keep orbit break
new_ind.append(idx)
break
else:
# no large positive gradients, current orbit break passes
# the first test
new_ind.append(idx)
# replace all breaks with those that are 'good'
ind = np.array(new_ind)
# now, assemble some orbit breaks that are not triggered by changes in
# the orbit index
# check if there is a UT break that is larger than orbital period, aka
# a time gap
ut_change_vs_period = ( ut_diff > self.orbit_period )
# characterize ut change using orbital period
norm_ut = ut_diff / self.orbit_period
# now, look for breaks because the length of time between samples is
# too large, thus there is no break in slt/mlt/etc, lt_diff is small
# but UT change is big
norm_ut_vs_norm_lt = norm_ut.gt(np.abs(lt_diff.values /
orbit_index_period))
# indices when one or other flag is true
ut_ind, = np.where(ut_change_vs_period | (norm_ut_vs_norm_lt &
(norm_ut > 0.95)))
# added the or and check after or on 10/20/2014
# & lt_diff.notnull() ))# & (lt_diff != 0) ) )
# combine these UT determined orbit breaks with the orbit index orbit
# breaks
if len(ut_ind) > 0:
ind = np.hstack((ind, ut_ind))
ind = np.sort(ind)
ind = np.unique(ind)
print('Time Gap')
# now that most problems in orbits should have been caught, look at
# the time difference between orbits (not individual orbits)
orbit_ut_diff = ut_vals[ind].diff()
orbit_lt_diff = self.sat[self.orbit_index][ind].diff()
# look for time gaps between partial orbits. The full orbital time
# period is not required between end of one orbit and begining of next
# if first orbit is partial. Also provides another general test of the
# orbital breaks determined.
idx, = np.where((orbit_ut_diff / self.orbit_period -
orbit_lt_diff.values / orbit_index_period) > 0.97)
# pull out breaks that pass the test, need to make sure the first one
# is always included it gets dropped via the nature of diff
if len(idx) > 0:
if idx[0] != 0:
idx = np.hstack((0, idx))
else:
idx = np.array([0])
# only keep the good indices
if len(ind) > 0:
ind = ind[idx]
# create orbitbreak index, ensure first element is always 0
if ind[0] != 0:
ind = np.hstack((np.array([0]), ind))
else:
ind = np.array([0])
# number of orbits
num_orbits = len(ind)
# set index of orbit breaks
self._orbit_breaks = ind
# set number of orbits for the day
self.num = num_orbits | Determine where breaks in an equatorial satellite orbit occur.
Looks for negative gradients in local time (or longitude) as well as
breaks in UT.
Parameters
----------
orbit_index_period : float
The change in value of supplied index parameter for a single orbit | entailment |
def _polarBreaks(self):
"""Determine where breaks in a polar orbiting satellite orbit occur.
Looks for sign changes in latitude (magnetic or geographic) as well as
breaks in UT.
"""
if self.orbit_index is None:
raise ValueError('Orbit properties must be defined at ' +
'pysat.Instrument object instantiation.' +
'See Instrument docs.')
else:
try:
self.sat[self.orbit_index]
except ValueError:
raise ValueError('Provided orbit index does not appear to ' +
'exist in loaded data')
# determine where orbit index goes from positive to negative
pos = (self.sat[self.orbit_index] >= 0)
npos = -pos
change = (pos.values[:-1] & npos.values[1:]) | (npos.values[:-1] &
pos.values[1:])
ind, = np.where(change)
ind += 1
ut_diff = Series(self.sat.data.index).diff()
ut_ind, = np.where(ut_diff / self.orbit_period > 0.95)
if len(ut_ind) > 0:
ind = np.hstack((ind, ut_ind))
ind = np.sort(ind)
ind = np.unique(ind)
# print 'Time Gap'
# create orbitbreak index, ensure first element is always 0
if ind[0] != 0:
ind = np.hstack((np.array([0]), ind))
# number of orbits
num_orbits = len(ind)
# set index of orbit breaks
self._orbit_breaks = ind
# set number of orbits for the day
self.num = num_orbits | Determine where breaks in a polar orbiting satellite orbit occur.
Looks for sign changes in latitude (magnetic or geographic) as well as
breaks in UT. | entailment |
def _orbitNumberBreaks(self):
"""Determine where orbital breaks in a dataset with orbit numbers occur.
Looks for changes in unique values.
"""
if self.orbit_index is None:
raise ValueError('Orbit properties must be defined at ' +
'pysat.Instrument object instantiation.' +
'See Instrument docs.')
else:
try:
self.sat[self.orbit_index]
except ValueError:
raise ValueError('Provided orbit index does not appear to ' +
'exist in loaded data')
# determine where the orbit index changes from one value to the next
uniq_vals = self.sat[self.orbit_index].unique()
orbit_index = []
for val in uniq_vals:
idx, = np.where(val == self.sat[self.orbit_index].values)
orbit_index.append(idx[0])
# create orbitbreak index, ensure first element is always 0
if orbit_index[0] != 0:
ind = np.hstack((np.array([0]), orbit_index))
else:
ind = orbit_index
# number of orbits
num_orbits = len(ind)
# set index of orbit breaks
self._orbit_breaks = ind
# set number of orbits for the day
self.num = num_orbits | Determine where orbital breaks in a dataset with orbit numbers occur.
Looks for changes in unique values. | entailment |
def _getBasicOrbit(self, orbit=None):
"""Load a particular orbit into .data for loaded day.
Parameters
----------
orbit : int
orbit number, 1 indexed, negative indexes allowed, -1 last orbit
Note
----
A day of data must be loaded before this routine functions properly.
If the last orbit of the day is requested, it will NOT automatically be
padded with data from the next day.
"""
# ensure data exists
if not self.sat.empty:
# ensure proper orbit metadata present
self._calcOrbits()
# ensure user is requesting a particular orbit
if orbit is not None:
# pull out requested orbit
if orbit == -1:
# load orbit data into data
self.sat.data = self._fullDayData[self._orbit_breaks[self.num + orbit]:]
self._current = self.num + orbit + 1
elif ((orbit < 0) & (orbit >= -self.num)):
# load orbit data into data
self.sat.data = self._fullDayData[
self._orbit_breaks[self.num + orbit]:self._orbit_breaks[self.num + orbit + 1]]
self._current = self.num + orbit + 1
elif (orbit < self.num) & (orbit != 0):
# load orbit data into data
self.sat.data = self._fullDayData[self._orbit_breaks[orbit - 1]:self._orbit_breaks[orbit]]
self._current = orbit
elif orbit == self.num:
self.sat.data = self._fullDayData[self._orbit_breaks[orbit - 1]:]
# recent addition, wondering why it wasn't there before,
# could just be a bug that is now fixed.
self._current = orbit
elif orbit == 0:
raise ValueError('Orbits internally indexed by 1, 0 not ' +
'allowed')
else:
# gone too far
self.sat.data = []
raise ValueError('Requested an orbit past total orbits ' +
'for day')
else:
raise ValueError('Must set an orbit') | Load a particular orbit into .data for loaded day.
Parameters
----------
orbit : int
orbit number, 1 indexed, negative indexes allowed, -1 last orbit
Note
----
A day of data must be loaded before this routine functions properly.
If the last orbit of the day is requested, it will NOT automatically be
padded with data from the next day. | entailment |
def load(self, orbit=None):
"""Load a particular orbit into .data for loaded day.
Parameters
----------
orbit : int
orbit number, 1 indexed
Note
----
A day of data must be loaded before this routine functions properly.
If the last orbit of the day is requested, it will automatically be
padded with data from the next day. The orbit counter will be
reset to 1.
"""
if not self.sat.empty: # ensure data exists
# set up orbit metadata
self._calcOrbits()
# ensure user supplied an orbit
if orbit is not None:
# pull out requested orbit
if orbit < 0:
# negative indexing consistent with numpy, -1 last,
# -2 second to last, etc.
orbit = self.num + 1 + orbit
if orbit == 1:
# change from orig copied from _core, didn't look correct.
# self._getBasicOrbit(orbit=2)
try:
true_date = self.sat.date # .copy()
self.sat.prev()
# if and else added becuase of CINDI turn off
# 6/5/2013, turn on 10/22/2014
# crashed when starting on 10/22/2014
# prev returned empty data
if not self.sat.empty:
self.load(orbit=-1)
else:
self.sat.next()
self._getBasicOrbit(orbit=1)
# check that this orbit should end on the current day
delta = true_date - self.sat.data.index[0]
# print 'checking if first orbit should land on requested day'
# print self.sat.date, self.sat.data.index[0], delta, delta >= self.orbit_period
if delta >= self.orbit_period:
# the orbit loaded isn't close enough to date
# to be the first orbit of the day, move forward
self.next()
except StopIteration:
# print 'going for basic orbit'
self._getBasicOrbit(orbit=1)
# includes hack to appear to be zero indexed
print('Loaded Orbit:%i' % (self._current - 1))
# check if the first orbit is also the last orbit
elif orbit == self.num:
# we get here if user asks for last orbit
# make sure that orbit data goes across daybreak as needed
# load previous orbit
if self.num != 1:
self._getBasicOrbit(self.num - 1)
self.next()
else:
self._getBasicOrbit(orbit=-1)
elif orbit < self.num:
# load orbit data into data
self._getBasicOrbit(orbit)
# includes hack to appear to be zero indexed
print('Loaded Orbit:%i' % (self._current - 1))
else:
# gone too far
self.sat.data = DataFrame()
raise Exception('Requested an orbit past total orbits for day')
else:
raise Exception('Must set an orbit')
else:
print('No data loaded in instrument object to determine orbits.') | Load a particular orbit into .data for loaded day.
Parameters
----------
orbit : int
orbit number, 1 indexed
Note
----
A day of data must be loaded before this routine functions properly.
If the last orbit of the day is requested, it will automatically be
padded with data from the next day. The orbit counter will be
reset to 1. | entailment |
def next(self, *arg, **kwarg):
"""Load the next orbit into .data.
Note
----
Forms complete orbits across day boundaries. If no data loaded
then the first orbit from the first date of data is returned.
"""
# first, check if data exists
if not self.sat.empty:
# set up orbit metadata
self._calcOrbits()
# if current orbit near the last, must be careful
if self._current == (self.num - 1):
# first, load last orbit data
self._getBasicOrbit(orbit=-1)
# End of orbit may occur on the next day
load_next = True
if self.sat._iter_type == 'date':
delta = self.sat.date - self.sat.data.index[-1] \
+ pds.Timedelta('1 day')
if delta >= self.orbit_period:
# don't need to load the next day because this orbit
# ends more than a orbital period from the next date
load_next = False
if load_next:
# the end of the user's desired orbit occurs tomorrow, need
# to form a complete orbit save this current orbit, load
# the next day, combine data, select the correct orbit
temp_orbit_data = self.sat.data.copy()
try:
# loading next day/file clears orbit breaks info
self.sat.next()
if not self.sat.empty:
# combine this next day's data with previous last
# orbit, grab the first one
self.sat.data = pds.concat(
[temp_orbit_data[:self.sat.data.index[0] -
pds.DateOffset(microseconds=1)],
self.sat.data])
self._getBasicOrbit(orbit=1)
else:
# no data, go back a day and grab the last orbit.
# As complete as orbit can be
self.sat.prev()
self._getBasicOrbit(orbit=-1)
except StopIteration:
pass
del temp_orbit_data
# includes hack to appear to be zero indexed
print('Loaded Orbit:%i' % (self._current - 1))
elif self._current == (self.num):
# at the last orbit, need to be careful about getting the next
# orbit save this current orbit and load the next day
temp_orbit_data = self.sat.data.copy()
# load next day, which clears orbit breaks info
self.sat.next()
# combine this next day orbit with previous last orbit to
# ensure things are correct
if not self.sat.empty:
pad_next = True
# check if data padding is really needed, only works when
# loading by date
if self.sat._iter_type == 'date':
delta = self.sat.date - temp_orbit_data.index[-1]
if delta >= self.orbit_period:
# the end of the previous orbit is more than an
# orbit away from today we don't have to worry
# about it
pad_next = False
if pad_next:
# orbit went across day break, stick old orbit onto new
# data and grab second orbit (first is old)
self.sat.data = pds.concat(
[temp_orbit_data[:self.sat.data.index[0] -
pds.DateOffset(microseconds=1)],
self.sat.data])
# select second orbit of combined data
self._getBasicOrbit(orbit=2)
else:
# padding from the previous orbit wasn't needed, can
# just grab the first orbit of loaded data
self._getBasicOrbit(orbit=1)
if self.sat._iter_type == 'date':
delta = self.sat.date + pds.DateOffset(days=1) \
- self.sat.data.index[0]
if delta < self.orbit_period:
# this orbits end occurs on the next day, though
# we grabbed the first orbit, missing data
# means the first available orbit in the data
# is actually the last for the day. Resetting to
# the second to last orbit and then calling
# next() will get the last orbit, accounting
# for tomorrow's data as well.
self._current = self.num - 1
self.next()
else:
# no data for the next day
# continue loading data until there is some
# nextData raises StopIteration when it reaches the end,
# leaving this function
while self.sat.empty:
self.sat.next()
self._getBasicOrbit(orbit=1)
del temp_orbit_data
# includes hack to appear to be zero indexed
print('Loaded Orbit:%i' % (self._current - 1))
elif self._current == 0:
# no current orbit set, grab the first one
# using load command to specify the first orbit, which
# automatically loads prev day if needed to form complete orbit
self.load(orbit=1)
elif self._current < (self.num - 1):
# since we aren't close to the last orbit, just pull the next
# orbit
self._getBasicOrbit(orbit=self._current + 1)
# includes hack to appear to be zero indexed
print('Loaded Orbit:%i' % (self._current - 1))
else:
raise Exception('You ended up where nobody should ever be. ' +
'Talk to someone about this fundamental ' +
'failure.')
else: # no data
while self.sat.empty:
# keep going until data is found
# next raises stopIteration at end of data set, no more data
# possible
self.sat.next()
# we've found data, grab the next orbit
self.next() | Load the next orbit into .data.
Note
----
Forms complete orbits across day boundaries. If no data loaded
then the first orbit from the first date of data is returned. | entailment |
def load(fnames, tag=None, sat_id=None):
"""Load Kp index files
Parameters
------------
fnames : (pandas.Series)
Series of filenames
tag : (str or NoneType)
tag or None (default=None)
sat_id : (str or NoneType)
satellite id or None (default=None)
Returns
---------
data : (pandas.DataFrame)
Object containing satellite data
meta : (pysat.Meta)
Object containing metadata such as column names and units
Note
----
Called by pysat. Not intended for direct use by user.
"""
data = pds.DataFrame()
for filename in fnames:
# need to remove date appended to dst filename
fname = filename[0:-11]
#f = open(fname)
with open(fname) as f:
lines = f.readlines()
idx = 0
# check if all lines are good
max_lines=0
for line in lines:
if len(line) > 1:
max_lines+=1
yr = np.zeros(max_lines*24, dtype=int)
mo = np.zeros(max_lines*24, dtype=int)
day = np.zeros(max_lines*24, dtype=int)
ut = np.zeros(max_lines*24, dtype=int)
dst = np.zeros(max_lines*24, dtype=int)
for line in lines:
if len(line) > 1:
temp_year = int(line[14:16] + line[3:5])
if temp_year > 57:
temp_year += 1900
else:
temp_year += 2000
yr[idx:idx+24] = temp_year
mo[idx:idx+24] = int(line[5:7])
day[idx:idx+24] = int(line[8:10])
ut[idx:idx+24] = np.arange(24)
temp = line.strip()[20:-4]
temp2 = [temp[4*i:4*(i+1)] for i in np.arange(24)]
dst[idx:idx+24] = temp2
idx += 24
#f.close()
start = pds.datetime(yr[0], mo[0], day[0], ut[0])
stop = pds.datetime(yr[-1], mo[-1], day[-1], ut[-1])
dates = pds.date_range(start, stop, freq='H')
new_data = pds.DataFrame(dst, index=dates, columns=['dst'])
# pull out specific day
new_date = pysat.datetime.strptime(filename[-10:], '%Y-%m-%d')
idx, = np.where((new_data.index >= new_date) & (new_data.index < new_date+pds.DateOffset(days=1)))
new_data = new_data.iloc[idx,:]
# add specific day to all data loaded for filenames
data = pds.concat([data, new_data], axis=0)
return data, pysat.Meta() | Load Kp index files
Parameters
------------
fnames : (pandas.Series)
Series of filenames
tag : (str or NoneType)
tag or None (default=None)
sat_id : (str or NoneType)
satellite id or None (default=None)
Returns
---------
data : (pandas.DataFrame)
Object containing satellite data
meta : (pysat.Meta)
Object containing metadata such as column names and units
Note
----
Called by pysat. Not intended for direct use by user. | entailment |
def _parse(yr, mo, day):
"""
Basic parser to deal with date format of the Kp file.
"""
yr = '20'+yr
yr = int(yr)
mo = int(mo)
day = int(day)
return pds.datetime(yr, mo, day) | Basic parser to deal with date format of the Kp file. | entailment |
def load(fnames, tag=None, sat_id=None):
"""Load Kp index files
Parameters
------------
fnames : (pandas.Series)
Series of filenames
tag : (str or NoneType)
tag or None (default=None)
sat_id : (str or NoneType)
satellite id or None (default=None)
Returns
---------
data : (pandas.DataFrame)
Object containing satellite data
meta : (pysat.Meta)
Object containing metadata such as column names and units
Notes
-----
Called by pysat. Not intended for direct use by user.
"""
# Kp data stored monthly, need to return data daily
# the daily date is attached to filename
# parse off the last date, load month of data, downselect to desired day
data = pds.DataFrame()
#set up fixed width format for these files
colspec = [(0,2),(2,4),(4,6),(7,10),(10,13),(13,16),(16,19),(19,23),(23,26),(26,29),(29,32),(32,50)]
for filename in fnames:
# the daily date is attached to filename
# parse off the last date, load month of data, downselect to desired day
fname = filename[0:-11]
date = pysat.datetime.strptime(filename[-10:], '%Y-%m-%d')
temp = pds.read_fwf(fname, colspecs=colspec, skipfooter=4,header=None,
parse_dates=[[0,1,2]], date_parser=_parse,
index_col='0_1_2')
idx, = np.where((temp.index >= date) & (temp.index < date+pds.DateOffset(days=1)))
temp = temp.iloc[idx,:]
data = pds.concat([data,temp], axis=0)
# drop last column as it has data I don't care about
data = data.iloc[:,0:-1]
# each column increments UT by three hours
# produce a single data series that has Kp value monotonically increasing in time
# with appropriate datetime indices
s = pds.Series()
for i in np.arange(8):
temp = pds.Series(data.iloc[:,i].values,
index=data.index+pds.DateOffset(hours=int(3*i)) )
#print temp
s = s.append(temp)
s = s.sort_index()
s.index.name = 'time'
# now, Kp comes in non-user friendly values
# 2-, 2o, and 2+ relate to 1.6, 2.0, 2.3
# will convert for user friendliness
first = np.array([float(x[0]) for x in s])
flag = np.array([x[1] for x in s])
ind, = np.where(flag == '+')
first[ind] += 1./3.
ind, = np.where(flag == '-')
first[ind] -= 1./3.
result = pds.DataFrame(first, columns=['kp'], index=s.index)
return result, pysat.Meta() | Load Kp index files
Parameters
------------
fnames : (pandas.Series)
Series of filenames
tag : (str or NoneType)
tag or None (default=None)
sat_id : (str or NoneType)
satellite id or None (default=None)
Returns
---------
data : (pandas.DataFrame)
Object containing satellite data
meta : (pysat.Meta)
Object containing metadata such as column names and units
Notes
-----
Called by pysat. Not intended for direct use by user. | entailment |
def download(date_array, tag, sat_id, data_path, user=None, password=None):
"""Routine to download Kp index data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are '1min' and '5min'.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
Returns
--------
Void : (NoneType)
data downloaded to disk, if available.
Notes
-----
Called by pysat. Not intended for direct use by user.
"""
import ftplib
from ftplib import FTP
import sys
ftp = FTP('ftp.gfz-potsdam.de') # connect to host, default port
ftp.login() # user anonymous, passwd anonymous@
ftp.cwd('/pub/home/obs/kp-ap/tab')
for date in date_array:
fname = 'kp{year:02d}{month:02d}.tab'
fname = fname.format(year=(date.year - date.year//100*100), month=date.month)
local_fname = fname
saved_fname = os.path.join(data_path,local_fname)
try:
print('Downloading file for '+date.strftime('%D'))
sys.stdout.flush()
ftp.retrbinary('RETR '+fname, open(saved_fname,'wb').write)
except ftplib.error_perm as exception:
# if exception[0][0:3] != '550':
if str(exception.args[0]).split(" ", 1)[0] != '550':
raise
else:
os.remove(saved_fname)
print('File not available for '+date.strftime('%D'))
ftp.close()
return | Routine to download Kp index data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are '1min' and '5min'.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
Returns
--------
Void : (NoneType)
data downloaded to disk, if available.
Notes
-----
Called by pysat. Not intended for direct use by user. | entailment |
def filter_geoquiet(sat, maxKp=None, filterTime=None, kpData=None, kp_inst=None):
"""Filters pysat.Instrument data for given time after Kp drops below gate.
Loads Kp data for the same timeframe covered by sat and sets sat.data to
NaN for times when Kp > maxKp and for filterTime after Kp drops below maxKp.
Parameters
----------
sat : pysat.Instrument
Instrument to be filtered
maxKp : float
Maximum Kp value allowed. Kp values above this trigger
sat.data filtering.
filterTime : int
Number of hours to filter data after Kp drops below maxKp
kpData : pysat.Instrument (optional)
Kp pysat.Instrument object with data already loaded
kp_inst : pysat.Instrument (optional)
Kp pysat.Instrument object ready to load Kp data.Overrides kpData.
Returns
-------
None : NoneType
sat Instrument object modified in place
"""
if kp_inst is not None:
kp_inst.load(date=sat.date, verifyPad=True)
kpData = kp_inst
elif kpData is None:
kp = pysat.Instrument('sw', 'kp', pad=pds.DateOffset(days=1))
kp.load(date=sat.date, verifyPad=True)
kpData = kp
if maxKp is None:
maxKp = 3+ 1./3.
if filterTime is None:
filterTime = 24
# now the defaults are ensured, let's do some filtering
# date of satellite data
date = sat.date
selData = kpData[date-pds.DateOffset(days=1):date+pds.DateOffset(days=1)]
ind, = np.where(selData['kp'] >= maxKp)
for lind in ind:
sat.data[selData.index[lind]:(selData.index[lind]+pds.DateOffset(hours=filterTime) )] = np.NaN
sat.data = sat.data.dropna(axis=0, how='all')
return | Filters pysat.Instrument data for given time after Kp drops below gate.
Loads Kp data for the same timeframe covered by sat and sets sat.data to
NaN for times when Kp > maxKp and for filterTime after Kp drops below maxKp.
Parameters
----------
sat : pysat.Instrument
Instrument to be filtered
maxKp : float
Maximum Kp value allowed. Kp values above this trigger
sat.data filtering.
filterTime : int
Number of hours to filter data after Kp drops below maxKp
kpData : pysat.Instrument (optional)
Kp pysat.Instrument object with data already loaded
kp_inst : pysat.Instrument (optional)
Kp pysat.Instrument object ready to load Kp data.Overrides kpData.
Returns
-------
None : NoneType
sat Instrument object modified in place | entailment |
def _get_converter(self, converter_str):
"""find converter function reference by name
find converter by name, converter name follows this convention:
Class.method
or:
method
The first type of converter class/function must be available in
current module.
The second type of converter must be available in `__builtin__`
(or `builtins` in python3) module.
:param converter_str: string representation of the converter func
:return: function reference
"""
ret = None
if converter_str is not None:
converter_desc_list = converter_str.split('.')
if len(converter_desc_list) == 1:
converter = converter_desc_list[0]
# default to `converter`
ret = getattr(cvt, converter, None)
if ret is None:
# try module converter
ret = self.get_converter(converter)
if ret is None:
ret = self.get_resource_clz_by_name(converter)
if ret is None:
ret = self.get_enum_by_name(converter)
if ret is None:
# try parser config
ret = self.get(converter)
if ret is None and converter_str is not None:
raise ValueError(
'Specified converter not supported: {}'.format(
converter_str))
return ret | find converter function reference by name
find converter by name, converter name follows this convention:
Class.method
or:
method
The first type of converter class/function must be available in
current module.
The second type of converter must be available in `__builtin__`
(or `builtins` in python3) module.
:param converter_str: string representation of the converter func
:return: function reference | entailment |
def copy_file_to_remote(self, local_path, remote_path):
"""scp the local file to remote folder.
:param local_path: local path
:param remote_path: remote path
"""
sftp_client = self.transport.open_sftp_client()
LOG.debug('Copy the local file to remote. '
'Source=%(src)s. Target=%(target)s.' %
{'src': local_path, 'target': remote_path})
try:
sftp_client.put(local_path, remote_path)
except Exception as ex:
LOG.error('Failed to copy the local file to remote. '
'Reason: %s.' % six.text_type(ex))
raise SFtpExecutionError(err=ex) | scp the local file to remote folder.
:param local_path: local path
:param remote_path: remote path | entailment |
def get_remote_file(self, remote_path, local_path):
"""Fetch remote File.
:param remote_path: remote path
:param local_path: local path
"""
sftp_client = self.transport.open_sftp_client()
LOG.debug('Get the remote file. '
'Source=%(src)s. Target=%(target)s.' %
{'src': remote_path, 'target': local_path})
try:
sftp_client.get(remote_path, local_path)
except Exception as ex:
LOG.error('Failed to secure copy. Reason: %s.' %
six.text_type(ex))
raise SFtpExecutionError(err=ex) | Fetch remote File.
:param remote_path: remote path
:param local_path: local path | entailment |
def close(self):
"""Closes the ssh connection."""
if 'isLive' in self.__dict__ and self.isLive:
self.transport.close()
self.isLive = False | Closes the ssh connection. | entailment |
def xml_request(check_object=False, check_invalid_data_mover=False):
""" indicate the return value is a xml api request
:param check_invalid_data_mover:
:param check_object:
:return: the response of this request
"""
def decorator(f):
@functools.wraps(f)
def func_wrapper(self, *argv, **kwargs):
request = f(self, *argv, **kwargs)
return self.request(
request, check_object=check_object,
check_invalid_data_mover=check_invalid_data_mover)
return func_wrapper
return decorator | indicate the return value is a xml api request
:param check_invalid_data_mover:
:param check_object:
:return: the response of this request | entailment |
def nas_command(f):
""" indicate it's a command of nas command run with ssh
:param f: function that returns the command in list
:return: command execution result
"""
@functools.wraps(f)
def func_wrapper(self, *argv, **kwargs):
commands = f(self, *argv, **kwargs)
return self.ssh_execute(['env', 'NAS_DB=/nas'] + commands)
return func_wrapper | indicate it's a command of nas command run with ssh
:param f: function that returns the command in list
:return: command execution result | entailment |
def restore(self, backup=None, delete_backup=False):
"""Restore the snapshot to the associated storage resource.
:param backup: name of the backup snapshot
:param delete_backup: Whether to delete the backup snap after a
successful restore.
"""
resp = self._cli.action(self.resource_class, self.get_id(),
'restore', copyName=backup)
resp.raise_if_err()
backup = resp.first_content['backup']
backup_snap = UnitySnap(_id=backup['id'], cli=self._cli)
if delete_backup:
log.info("Deleting the backup snap {} as the restoration "
"succeeded.".format(backup['id']))
backup_snap.delete()
return backup_snap | Restore the snapshot to the associated storage resource.
:param backup: name of the backup snapshot
:param delete_backup: Whether to delete the backup snap after a
successful restore. | entailment |
def thin_clone(self, name, io_limit_policy=None, description=None):
"""Creates a new thin clone from this snapshot.
.. note:: this snapshot should not enable Auto-Delete.
"""
if self.is_member_snap():
raise UnityCGMemberActionNotSupportError()
if self.lun and not self.lun.is_thin_enabled:
raise UnityThinCloneNotAllowedError()
return TCHelper.thin_clone(self._cli, self, name, io_limit_policy,
description) | Creates a new thin clone from this snapshot.
.. note:: this snapshot should not enable Auto-Delete. | entailment |
def delete(self, async_mode=False, even_attached=False):
"""Deletes the snapshot.
:param async_mode: whether to delete the snapshot in async mode.
:param even_attached: whether to delete the snapshot even it is
attached to hosts.
"""
try:
return super(UnitySnap, self).delete(async_mode=async_mode)
except UnityDeleteAttachedSnapError:
if even_attached:
log.debug("Force delete the snapshot even if it is attached. "
"First detach the snapshot from hosts, then delete "
"again.")
# Currently `detach_from` doesn't process `host` parameter.
# It always detaches the snapshot from all hosts. So pass in
# `None` here.
self.detach_from(None)
return super(UnitySnap, self).delete(async_mode=async_mode)
else:
raise | Deletes the snapshot.
:param async_mode: whether to delete the snapshot in async mode.
:param even_attached: whether to delete the snapshot even it is
attached to hosts. | entailment |
def get_all(self, type_name, base_fields=None, the_filter=None,
nested_fields=None):
"""Get the resource by resource id.
:param nested_fields: nested resource fields
:param base_fields: fields of this resource
:param the_filter: dictionary of filter like `{'name': 'abc'}`
:param type_name: Resource type. For example, pool, lun, nasServer.
:return: List of resource class objects
"""
fields = self.get_fields(type_name, base_fields, nested_fields)
the_filter = self.dict_to_filter_string(the_filter)
url = '/api/types/{}/instances'.format(type_name)
resp = self.rest_get(url, fields=fields, filter=the_filter)
ret = resp
while resp.has_next_page:
resp = self.rest_get(url, fields=fields, filter=the_filter,
page=resp.next_page)
ret.entries.extend(resp.entries)
return ret | Get the resource by resource id.
:param nested_fields: nested resource fields
:param base_fields: fields of this resource
:param the_filter: dictionary of filter like `{'name': 'abc'}`
:param type_name: Resource type. For example, pool, lun, nasServer.
:return: List of resource class objects | entailment |
def get(self, type_name, obj_id, base_fields=None, nested_fields=None):
"""Get the resource by resource id.
:param nested_fields: nested resource fields.
:param type_name: Resource type. For example, pool, lun, nasServer.
:param obj_id: Resource id
:param base_fields: Resource fields to return
:return: List of tuple [(name, res_inst)]
"""
base_fields = self.get_fields(type_name, base_fields, nested_fields)
url = '/api/instances/{}/{}'.format(type_name, obj_id)
return self.rest_get(url, fields=base_fields) | Get the resource by resource id.
:param nested_fields: nested resource fields.
:param type_name: Resource type. For example, pool, lun, nasServer.
:param obj_id: Resource id
:param base_fields: Resource fields to return
:return: List of tuple [(name, res_inst)] | entailment |
def _flat_vports(self, connection_port):
"""Flat the virtual ports."""
vports = []
for vport in connection_port.virtual_ports:
self._set_child_props(connection_port, vport)
vports.append(vport)
return vports | Flat the virtual ports. | entailment |
def has_snap(self):
""" This method won't count the snaps in "destroying" state!
:return: false if no snaps or all snaps are destroying.
"""
return len(list(filter(lambda s: s.state != SnapStateEnum.DESTROYING,
self.snapshots))) > 0 | This method won't count the snaps in "destroying" state!
:return: false if no snaps or all snaps are destroying. | entailment |
def median2D(const, bin1, label1, bin2, label2, data_label,
returnData=False):
"""Return a 2D average of data_label over a season and label1, label2.
Parameters
----------
const: Constellation or Instrument
bin#: [min, max, number of bins]
label#: string
identifies data product for bin#
data_label: list-like
contains strings identifying data product(s) to be averaged
Returns
-------
median : dictionary
2D median accessed by data_label as a function of label1 and label2
over the season delineated by bounds of passed instrument objects.
Also includes 'count' and 'avg_abs_dev' as well as the values of
the bin edges in 'bin_x' and 'bin_y'.
"""
# const is either an Instrument or a Constellation, and we want to
# iterate over it.
# If it's a Constellation, then we can do that as is, but if it's
# an Instrument, we just have to put that Instrument into something
# that will yeild that Instrument, like a list.
if isinstance(const, pysat.Instrument):
const = [const]
elif not isinstance(const, pysat.Constellation):
raise ValueError("Parameter must be an Instrument or a Constellation.")
# create bins
#// seems to create the boundaries used for sorting into bins
binx = np.linspace(bin1[0], bin1[1], bin1[2]+1)
biny = np.linspace(bin2[0], bin2[1], bin2[2]+1)
#// how many bins are used
numx = len(binx)-1
numy = len(biny)-1
#// how many different data products
numz = len(data_label)
# create array to store all values before taking median
#// the indices of the bins/data products? used for looping.
yarr = np.arange(numy)
xarr = np.arange(numx)
zarr = np.arange(numz)
#// 3d array: stores the data that is sorted into each bin? - in a deque
ans = [ [ [collections.deque() for i in xarr] for j in yarr] for k in zarr]
for inst in const:
# do loop to iterate over instrument season
#// probably iterates by date but that all depends on the
#// configuration of that particular instrument.
#// either way, it iterates over the instrument, loading successive
#// data between start and end bounds
for inst in inst:
# collect data in bins for averaging
if len(inst.data) != 0:
#// sort the data into bins (x) based on label 1
#// (stores bin indexes in xind)
xind = np.digitize(inst.data[label1], binx)-1
#// for each possible x index
for xi in xarr:
#// get the indicies of those pieces of data in that bin
xindex, = np.where(xind==xi)
if len(xindex) > 0:
#// look up the data along y (label2) at that set of indicies (a given x)
yData = inst.data.iloc[xindex]
#// digitize that, to sort data into bins along y (label2) (get bin indexes)
yind = np.digitize(yData[label2], biny)-1
#// for each possible y index
for yj in yarr:
#// select data with this y index (and we already filtered for this x index)
yindex, = np.where(yind==yj)
if len(yindex) > 0:
#// for each data product label zk
for zk in zarr:
#// take the data (already filtered by x); filter it by y and
#// select the data product, put it in a list, and extend the deque
ans[zk][yj][xi].extend( yData.ix[yindex,data_label[zk]].tolist() )
return _calc_2d_median(ans, data_label, binx, biny, xarr, yarr, zarr, numx, numy, numz, returnData) | Return a 2D average of data_label over a season and label1, label2.
Parameters
----------
const: Constellation or Instrument
bin#: [min, max, number of bins]
label#: string
identifies data product for bin#
data_label: list-like
contains strings identifying data product(s) to be averaged
Returns
-------
median : dictionary
2D median accessed by data_label as a function of label1 and label2
over the season delineated by bounds of passed instrument objects.
Also includes 'count' and 'avg_abs_dev' as well as the values of
the bin edges in 'bin_x' and 'bin_y'. | entailment |
def get_rsc_list_2(self, rsc_clz_list=None):
"""get the list of resource list to collect based on clz list
:param rsc_clz_list: the list of classes to collect
:return: filtered list of resource list,
like [VNXLunList(), VNXDiskList()]
"""
rsc_list_2 = self._default_rsc_list_with_perf_stats()
if rsc_clz_list is None:
rsc_clz_list = ResourceList.get_rsc_clz_list(rsc_list_2)
return [rsc_list
for rsc_list in rsc_list_2
if rsc_list.get_resource_class() in rsc_clz_list] | get the list of resource list to collect based on clz list
:param rsc_clz_list: the list of classes to collect
:return: filtered list of resource list,
like [VNXLunList(), VNXDiskList()] | entailment |
def load(cosmicFiles, tag=None, sat_id=None):
"""
cosmic data load routine, called by pysat
"""
import netCDF4
num = len(cosmicFiles)
# make sure there are files to read
if num != 0:
# call separate load_files routine, segemented for possible
# multiprocessor load, not included and only benefits about 20%
output = pysat.DataFrame(load_files(cosmicFiles, tag=tag, sat_id=sat_id))
output.index = pysat.utils.create_datetime_index(year=output.year,
month=output.month, day=output.day,
uts=output.hour*3600.+output.minute*60.+output.second)
# make sure UTS strictly increasing
output.sort_index(inplace=True)
# use the first available file to pick out meta information
meta = pysat.Meta()
ind = 0
repeat = True
while repeat:
try:
data = netCDF4.Dataset(cosmicFiles[ind])
ncattrsList = data.ncattrs()
for d in ncattrsList:
meta[d] = {'units':'', 'long_name':d}
keys = data.variables.keys()
for key in keys:
meta[key] = {'units':data.variables[key].units,
'long_name':data.variables[key].long_name}
repeat = False
except RuntimeError:
# file was empty, try the next one by incrementing ind
ind+=1
return output, meta
else:
# no data
return pysat.DataFrame(None), pysat.Meta() | cosmic data load routine, called by pysat | entailment |
def load_files(files, tag=None, sat_id=None):
'''Loads a list of COSMIC data files, supplied by user.
Returns a list of dicts, a dict for each file.
'''
output = [None]*len(files)
drop_idx = []
for (i,file) in enumerate(files):
try:
data = netCDF4.Dataset(file)
# build up dictionary will all ncattrs
new = {}
# get list of file attributes
ncattrsList = data.ncattrs()
for d in ncattrsList:
new[d] = data.getncattr(d)
# load all of the variables in the netCDF
loadedVars={}
keys = data.variables.keys()
for key in keys:
loadedVars[key] = data.variables[key][:]
new['profiles'] = pysat.DataFrame(loadedVars)
if tag == 'ionprf':
new['profiles'].index = new['profiles']['MSL_alt']
output[i] = new
data.close()
except RuntimeError:
# some of the S4 files have zero bytes, which causes a read error
# this stores the index of these zero byte files so I can drop
# the Nones the gappy file leaves behind
drop_idx.append(i)
# drop anything that came from the zero byte files
drop_idx.reverse()
for i in drop_idx:
del output[i]
return output | Loads a list of COSMIC data files, supplied by user.
Returns a list of dicts, a dict for each file. | entailment |
def download(date_array, tag, sat_id, data_path=None, user=None, password=None):
"""Downloads data from Madrigal.
The user's names should be provided in field user. John Malkovich should be
entered as John+Malkovich
The password field should be the user's email address. These parameters
are passed to Madrigal when downloading.
The affiliation field is set to pysat to enable tracking of pysat downloads.
Parameters
----------
"""
import subprocess
# currently passes things along if no user and password supplied
# need to do this for testing
# TODO, implement user and password values in test code
# specific to DMSP
if user is None:
print ('No user information supplied for download.')
user = 'pysat_testing'
if password is None:
print ('Please provide email address in password field.')
password = 'pysat_testing@not_real_email.org'
a = subprocess.check_output(["globalDownload.py", "--verbose",
"--url=http://cedar.openmadrigal.org",
'--outputDir='+data_path,
'--user_fullname='+user,
'--user_email='+password,
'--user_affiliation=pysat',
'--format=hdf5',
'--startDate='+date_array[0].strftime('%m/%d/%Y'),
'--endDate='+date_array[-1].strftime('%m/%d/%Y'),
'--inst=8100',
'--kindat='+str(madrigal_tag[sat_id])])
print ('Feedback from openMadrigal ', a) | Downloads data from Madrigal.
The user's names should be provided in field user. John Malkovich should be
entered as John+Malkovich
The password field should be the user's email address. These parameters
are passed to Madrigal when downloading.
The affiliation field is set to pysat to enable tracking of pysat downloads.
Parameters
---------- | entailment |
def clean(self):
"""Routine to return DMSP IVM data cleaned to the specified level
'Clean' enforces that both RPA and DM flags are <= 1
'Dusty' <= 2
'Dirty' <= 3
'None' None
Routine is called by pysat, and not by the end user directly.
Parameters
-----------
inst : (pysat.Instrument)
Instrument class object, whose attribute clean_level is used to return
the desired level of data selectivity.
Returns
--------
Void : (NoneType)
data in inst is modified in-place.
Notes
--------
Supports 'clean', 'dusty', 'dirty'
"""
if self.clean_level == 'clean':
idx, = np.where((self['rpa_flag_ut'] <= 1) & (self['idm_flag_ut'] <= 1))
elif self.clean_level == 'dusty':
idx, = np.where((self['rpa_flag_ut'] <= 2) & (self['idm_flag_ut'] <= 2))
elif self.clean_level == 'dirty':
idx, = np.where((self['rpa_flag_ut'] <= 3) & (self['idm_flag_ut'] <= 3))
else:
idx = []
# downselect data based upon cleaning conditions above
self.data = self[idx]
return | Routine to return DMSP IVM data cleaned to the specified level
'Clean' enforces that both RPA and DM flags are <= 1
'Dusty' <= 2
'Dirty' <= 3
'None' None
Routine is called by pysat, and not by the end user directly.
Parameters
-----------
inst : (pysat.Instrument)
Instrument class object, whose attribute clean_level is used to return
the desired level of data selectivity.
Returns
--------
Void : (NoneType)
data in inst is modified in-place.
Notes
--------
Supports 'clean', 'dusty', 'dirty' | entailment |
def create(cls, cli, management_address,
local_username=None, local_password=None,
remote_username=None, remote_password=None,
connection_type=None):
"""
Configures a remote system for remote replication.
:param cls: this class.
:param cli: the rest client.
:param management_address: the management IP address of the remote
system.
:param local_username: administrative username of local system.
:param local_password: administrative password of local system.
:param remote_username: administrative username of remote system.
:param remote_password: administrative password of remote system.
:param connection_type: `ReplicationCapabilityEnum`. Replication
connection type to the remote system.
:return: the newly created remote system.
"""
req_body = cli.make_body(
managementAddress=management_address, localUsername=local_username,
localPassword=local_password, remoteUsername=remote_username,
remotePassword=remote_password, connectionType=connection_type)
resp = cli.post(cls().resource_class, **req_body)
resp.raise_if_err()
return cls.get(cli, resp.resource_id) | Configures a remote system for remote replication.
:param cls: this class.
:param cli: the rest client.
:param management_address: the management IP address of the remote
system.
:param local_username: administrative username of local system.
:param local_password: administrative password of local system.
:param remote_username: administrative username of remote system.
:param remote_password: administrative password of remote system.
:param connection_type: `ReplicationCapabilityEnum`. Replication
connection type to the remote system.
:return: the newly created remote system. | entailment |
def modify(self, management_address=None, username=None, password=None,
connection_type=None):
"""
Modifies a remote system for remote replication.
:param management_address: same as the one in `create` method.
:param username: username for accessing the remote system.
:param password: password for accessing the remote system.
:param connection_type: same as the one in `create` method.
"""
req_body = self._cli.make_body(
managementAddress=management_address, username=username,
password=password, connectionType=connection_type)
resp = self.action('modify', **req_body)
resp.raise_if_err()
return resp | Modifies a remote system for remote replication.
:param management_address: same as the one in `create` method.
:param username: username for accessing the remote system.
:param password: password for accessing the remote system.
:param connection_type: same as the one in `create` method. | entailment |
def verify(self, connection_type=None):
"""
Verifies and update the remote system settings.
:param connection_type: same as the one in `create` method.
"""
req_body = self._cli.make_body(connectionType=connection_type)
resp = self.action('verify', **req_body)
resp.raise_if_err()
return resp | Verifies and update the remote system settings.
:param connection_type: same as the one in `create` method. | entailment |
def create(cls, cli, sp, ip_port, ip_address, netmask=None,
v6_prefix_length=None, gateway=None, vlan_id=None):
"""
Creates a replication interface.
:param cls: this class.
:param cli: the rest cli.
:param sp: `UnityStorageProcessor` object. Storage processor on which
the replication interface is running.
:param ip_port: `UnityIpPort` object. Physical port or link aggregation
on the storage processor on which the interface is running.
:param ip_address: IP address of the replication interface.
:param netmask: IPv4 netmask for the replication interface, if it uses
an IPv4 address.
:param v6_prefix_length: IPv6 prefix length for the interface, if it
uses an IPv6 address.
:param gateway: IPv4 or IPv6 gateway address for the replication
interface.
:param vlan_id: VLAN identifier for the interface.
:return: the newly create replication interface.
"""
req_body = cli.make_body(sp=sp, ipPort=ip_port,
ipAddress=ip_address, netmask=netmask,
v6PrefixLength=v6_prefix_length,
gateway=gateway, vlanId=vlan_id)
resp = cli.post(cls().resource_class, **req_body)
resp.raise_if_err()
return cls.get(cli, resp.resource_id) | Creates a replication interface.
:param cls: this class.
:param cli: the rest cli.
:param sp: `UnityStorageProcessor` object. Storage processor on which
the replication interface is running.
:param ip_port: `UnityIpPort` object. Physical port or link aggregation
on the storage processor on which the interface is running.
:param ip_address: IP address of the replication interface.
:param netmask: IPv4 netmask for the replication interface, if it uses
an IPv4 address.
:param v6_prefix_length: IPv6 prefix length for the interface, if it
uses an IPv6 address.
:param gateway: IPv4 or IPv6 gateway address for the replication
interface.
:param vlan_id: VLAN identifier for the interface.
:return: the newly create replication interface. | entailment |
def modify(self, sp=None, ip_port=None, ip_address=None, netmask=None,
v6_prefix_length=None, gateway=None, vlan_id=None):
"""
Modifies a replication interface.
:param sp: same as the one in `create` method.
:param ip_port: same as the one in `create` method.
:param ip_address: same as the one in `create` method.
:param netmask: same as the one in `create` method.
:param v6_prefix_length: same as the one in `create` method.
:param gateway: same as the one in `create` method.
:param vlan_id: same as the one in `create` method.
"""
req_body = self._cli.make_body(sp=sp, ipPort=ip_port,
ipAddress=ip_address, netmask=netmask,
v6PrefixLength=v6_prefix_length,
gateway=gateway, vlanId=vlan_id)
resp = self.action('modify', **req_body)
resp.raise_if_err()
return resp | Modifies a replication interface.
:param sp: same as the one in `create` method.
:param ip_port: same as the one in `create` method.
:param ip_address: same as the one in `create` method.
:param netmask: same as the one in `create` method.
:param v6_prefix_length: same as the one in `create` method.
:param gateway: same as the one in `create` method.
:param vlan_id: same as the one in `create` method. | entailment |
def sp_sum_values(self):
"""
return sp level values
input:
"values": {
"spa": {
"19": "385",
"18": "0",
"20": "0",
"17": "0",
"16": "0"
},
"spb": {
"19": "101",
"18": "101",
"20": "101",
"17": "101",
"16": "101"
}
},
return:
"values": {
"spa": 385,
"spb": 505
},
"""
if self.values is None:
ret = IdValues()
else:
ret = IdValues({k: sum(int(x) for x in v.values()) for k, v in
self.values.items()})
return ret | return sp level values
input:
"values": {
"spa": {
"19": "385",
"18": "0",
"20": "0",
"17": "0",
"16": "0"
},
"spb": {
"19": "101",
"18": "101",
"20": "101",
"17": "101",
"16": "101"
}
},
return:
"values": {
"spa": 385,
"spb": 505
}, | entailment |
def sum_sp_values(self):
"""
return system level values (spa + spb)
input:
"values": {
"spa": 385,
"spb": 505
},
return:
"values": {
"0": 890
},
"""
if self.values is None:
ret = IdValues()
else:
ret = IdValues({'0': sum(int(x) for x in self.values.values())})
return ret | return system level values (spa + spb)
input:
"values": {
"spa": 385,
"spb": 505
},
return:
"values": {
"0": 890
}, | entailment |
def combine_numeric_values(self, other):
"""
numeric_values * sp_values
"""
if self.values is None:
ret = IdValues()
else:
ret = sum([IdValues(
{k: int(v) * int(other.values[key]) for k, v in value.items()})
for key, value in self.values.items()])
return ret | numeric_values * sp_values | entailment |
def combine_sp_values(self, other):
"""
sp_values * sp_values
"""
if self.values is None:
ret = IdValues()
else:
ret = IdValues({k: int(v) * int(other.values[k]) for k, v in
self.values.items()})
return ret | sp_values * sp_values | entailment |
def sum_combined_sp_values(self, other):
"""
sum(sp_values * sp_values)
"""
if self.values is None:
ret = IdValues()
else:
ret = IdValues({'0': sum(int(x) for x in
{k: int(v) * int(other.values[k]) for k, v
in self.values.items()}.values())})
return ret | sum(sp_values * sp_values) | entailment |
def add(self, function, kind='add', at_pos='end',*args, **kwargs):
"""Add a function to custom processing queue.
Custom functions are applied automatically to associated
pysat instrument whenever instrument.load command called.
Parameters
----------
function : string or function object
name of function or function object to be added to queue
kind : {'add', 'modify', 'pass}
add
Adds data returned from function to instrument object.
A copy of pysat instrument object supplied to routine.
modify
pysat instrument object supplied to routine. Any and all
changes to object are retained.
pass
A copy of pysat object is passed to function. No
data is accepted from return.
at_pos : string or int
insert at position. (default, insert at end).
args : extra arguments
extra arguments are passed to the custom function (once)
kwargs : extra keyword arguments
extra keyword args are passed to the custom function (once)
Note
----
Allowed `add` function returns:
- {'data' : pandas Series/DataFrame/array_like,
'units' : string/array_like of strings,
'long_name' : string/array_like of strings,
'name' : string/array_like of strings (iff data array_like)}
- pandas DataFrame, names of columns are used
- pandas Series, .name required
- (string/list of strings, numpy array/list of arrays)
"""
if isinstance(function, str):
# convert string to function object
function=eval(function)
if (at_pos == 'end') | (at_pos == len(self._functions)):
# store function object
self._functions.append(function)
self._args.append(args)
self._kwargs.append(kwargs)
self._kind.append(kind.lower())
elif at_pos < len(self._functions):
# user picked a specific location to insert
self._functions.insert(at_pos, function)
self._args.insert(at_pos, args)
self._kwargs.insert(at_pos, kwargs)
self._kind.insert(at_pos, kind)
else:
raise TypeError('Must enter an index between 0 and %i' %
len(self._functions)) | Add a function to custom processing queue.
Custom functions are applied automatically to associated
pysat instrument whenever instrument.load command called.
Parameters
----------
function : string or function object
name of function or function object to be added to queue
kind : {'add', 'modify', 'pass}
add
Adds data returned from function to instrument object.
A copy of pysat instrument object supplied to routine.
modify
pysat instrument object supplied to routine. Any and all
changes to object are retained.
pass
A copy of pysat object is passed to function. No
data is accepted from return.
at_pos : string or int
insert at position. (default, insert at end).
args : extra arguments
extra arguments are passed to the custom function (once)
kwargs : extra keyword arguments
extra keyword args are passed to the custom function (once)
Note
----
Allowed `add` function returns:
- {'data' : pandas Series/DataFrame/array_like,
'units' : string/array_like of strings,
'long_name' : string/array_like of strings,
'name' : string/array_like of strings (iff data array_like)}
- pandas DataFrame, names of columns are used
- pandas Series, .name required
- (string/list of strings, numpy array/list of arrays) | entailment |
def _apply_all(self, sat):
"""
Apply all of the custom functions to the satellite data object.
"""
if len(self._functions) > 0:
for func, arg, kwarg, kind in zip(self._functions, self._args,
self._kwargs, self._kind):
if len(sat.data) > 0:
if kind == 'add':
# apply custom functions that add data to the
# instrument object
tempd = sat.copy()
newData = func(tempd, *arg, **kwarg)
del tempd
# process different types of data returned by the
# function if a dict is returned, data in 'data'
if isinstance(newData,dict):
# if DataFrame returned, add Frame to existing frame
if isinstance(newData['data'], pds.DataFrame):
sat[newData['data'].columns] = newData
# if a series is returned, add it as a column
elif isinstance(newData['data'], pds.Series):
# look for name attached to series first
if newData['data'].name is not None:
sat[newData['data'].name] = newData
# look if name is provided as part of dict
# returned from function
elif 'name' in newData.keys():
name = newData.pop('name')
sat[name] = newData
# couldn't find name information
else:
raise ValueError('Must assign a name to ' +
'Series or return a ' +
'"name" in dictionary.')
# some kind of iterable was returned
elif hasattr(newData['data'], '__iter__'):
# look for name in returned dict
if 'name' in newData.keys():
name = newData.pop('name')
sat[name] = newData
else:
raise ValueError('Must include "name" in ' +
'returned dictionary.')
# bare DataFrame is returned
elif isinstance(newData, pds.DataFrame):
sat[newData.columns] = newData
# bare Series is returned, name must be attached to
# Series
elif isinstance(newData, pds.Series):
sat[newData.name] = newData
# some kind of iterable returned,
# presuming (name, data)
# or ([name1,...], [data1,...])
elif hasattr(newData, '__iter__'):
# falling back to older behavior
# unpack tuple/list that was returned
newName = newData[0]
newData = newData[1]
if len(newData)>0:
# doesn't really check ensure data, there could
# be multiple empty arrays returned, [[],[]]
if isinstance(newName, str):
# one item to add
sat[newName] = newData
else:
# multiple items
for name, data in zip(newName, newData):
if len(data)>0:
# fixes up the incomplete check
# from before
sat[name] = data
else:
raise ValueError("kernel doesn't know what to do " +
"with returned data.")
# modifying loaded data
if kind == 'modify':
t = func(sat,*arg,**kwarg)
if t is not None:
raise ValueError('Modify functions should not ' +
'return any information via ' +
'return. Information may only be' +
' propagated back by modifying ' +
'supplied pysat object.')
# pass function (function runs, no data allowed back)
if kind == 'pass':
tempd = sat.copy()
t = func(tempd,*arg,**kwarg)
del tempd
if t is not None:
raise ValueError('Pass functions should not ' +
'return any information via ' +
'return.') | Apply all of the custom functions to the satellite data object. | entailment |
def clear(self):
"""Clear custom function list."""
self._functions=[]
self._args=[]
self._kwargs=[]
self._kind=[] | Clear custom function list. | entailment |
def list_files(tag='north', sat_id=None, data_path=None, format_str=None):
"""Return a Pandas Series of every file for chosen satellite data
Parameters
-----------
tag : (string)
Denotes type of file to load. Accepted types are 'north' and 'south'.
(default='north')
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files
"""
if format_str is None and tag is not None:
if tag == 'north' or tag == 'south':
hemi_fmt = ''.join(('{year:4d}{month:02d}{day:02d}.', tag, '.grdex'))
return pysat.Files.from_os(data_path=data_path, format_str=hemi_fmt)
else:
estr = 'Unrecognized tag name for SuperDARN, north or south.'
raise ValueError(estr)
elif format_str is None:
estr = 'A tag name must be passed to SuperDARN.'
raise ValueError (estr)
else:
return pysat.Files.from_os(data_path=data_path, format_str=format_str) | Return a Pandas Series of every file for chosen satellite data
Parameters
-----------
tag : (string)
Denotes type of file to load. Accepted types are 'north' and 'south'.
(default='north')
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files | entailment |
def download(date_array, tag, sat_id, data_path, user=None, password=None):
"""
Download SuperDARN data from Virginia Tech organized for loading by pysat.
"""
import sys
import os
import pysftp
import davitpy
if user is None:
user = os.environ['DBREADUSER']
if password is None:
password = os.environ['DBREADPASS']
with pysftp.Connection(
os.environ['VTDB'],
username=user,
password=password) as sftp:
for date in date_array:
myDir = '/data/'+date.strftime("%Y")+'/grdex/'+tag+'/'
fname = date.strftime("%Y%m%d")+'.' + tag + '.grdex'
local_fname = fname+'.bz2'
saved_fname = os.path.join(data_path,local_fname)
full_fname = os.path.join(data_path,fname)
try:
print('Downloading file for '+date.strftime('%D'))
sys.stdout.flush()
sftp.get(myDir+local_fname, saved_fname)
os.system('bunzip2 -c '+saved_fname+' > '+ full_fname)
os.system('rm ' + saved_fname)
except IOError:
print('File not available for '+date.strftime('%D'))
return | Download SuperDARN data from Virginia Tech organized for loading by pysat. | entailment |
def same_disks(self, count=2):
""" filter self to the required number of disks with same size and type
Select the disks with the same type and same size. If not
enough disks available, set self to empty.
:param count: number of disks to retrieve
:return: disk list
"""
ret = self
if len(self) > 0:
type_counter = Counter(self.drive_type)
drive_type, counts = type_counter.most_common()[0]
self.set_drive_type(drive_type)
if len(self) > 0:
size_counter = Counter(self.capacity)
size, counts = size_counter.most_common()[0]
self.set_capacity(size)
if len(self) >= count:
indices = self.index[:count]
self.set_indices(indices)
else:
self.set_indices('N/A')
return ret | filter self to the required number of disks with same size and type
Select the disks with the same type and same size. If not
enough disks available, set self to empty.
:param count: number of disks to retrieve
:return: disk list | entailment |
def set_bounds(self, start, stop):
"""
Sets boundaries for all instruments in constellation
"""
for instrument in self.instruments:
instrument.bounds = (start, stop) | Sets boundaries for all instruments in constellation | entailment |
def data_mod(self, *args, **kwargs):
"""
Register a function to modify data of member Instruments.
The function is not partially applied to modify member data.
When the Constellation receives a function call to register a function for data modification,
it passes the call to each instrument and registers it in the instrument's pysat.Custom queue.
(Wraps pysat.Custom.add; documentation of that function is
reproduced here.)
Parameters
----------
function : string or function object
name of function or function object to be added to queue
kind : {'add, 'modify', 'pass'}
add
Adds data returned from fuction to instrument object.
modify
pysat instrument object supplied to routine. Any and all
changes to object are retained.
pass
A copy of pysat object is passed to function. No
data is accepted from return.
at_pos : string or int
insert at position. (default, insert at end).
args : extra arguments
Note
----
Allowed `add` function returns:
- {'data' : pandas Series/DataFrame/array_like,
'units' : string/array_like of strings,
'long_name' : string/array_like of strings,
'name' : string/array_like of strings (iff data array_like)}
- pandas DataFrame, names of columns are used
- pandas Series, .name required
- (string/list of strings, numpy array/list of arrays)
"""
for instrument in self.instruments:
instrument.custom.add(*args, **kwargs) | Register a function to modify data of member Instruments.
The function is not partially applied to modify member data.
When the Constellation receives a function call to register a function for data modification,
it passes the call to each instrument and registers it in the instrument's pysat.Custom queue.
(Wraps pysat.Custom.add; documentation of that function is
reproduced here.)
Parameters
----------
function : string or function object
name of function or function object to be added to queue
kind : {'add, 'modify', 'pass'}
add
Adds data returned from fuction to instrument object.
modify
pysat instrument object supplied to routine. Any and all
changes to object are retained.
pass
A copy of pysat object is passed to function. No
data is accepted from return.
at_pos : string or int
insert at position. (default, insert at end).
args : extra arguments
Note
----
Allowed `add` function returns:
- {'data' : pandas Series/DataFrame/array_like,
'units' : string/array_like of strings,
'long_name' : string/array_like of strings,
'name' : string/array_like of strings (iff data array_like)}
- pandas DataFrame, names of columns are used
- pandas Series, .name required
- (string/list of strings, numpy array/list of arrays) | entailment |
def load(self, *args, **kwargs):
"""
Load instrument data into instrument object.data
(Wraps pysat.Instrument.load; documentation of that function is
reproduced here.)
Parameters
---------
yr : integer
Year for desired data
doy : integer
day of year
data : datetime object
date to load
fname : 'string'
filename to be loaded
verifyPad : boolean
if true, padding data not removed (debug purposes)
"""
for instrument in self.instruments:
instrument.load(*args, **kwargs) | Load instrument data into instrument object.data
(Wraps pysat.Instrument.load; documentation of that function is
reproduced here.)
Parameters
---------
yr : integer
Year for desired data
doy : integer
day of year
data : datetime object
date to load
fname : 'string'
filename to be loaded
verifyPad : boolean
if true, padding data not removed (debug purposes) | entailment |
def add(self, bounds1, label1, bounds2, label2, bin3, label3,
data_label):
"""
Combines signals from multiple instruments within
given bounds.
Parameters
----------
bounds1 : (min, max)
Bounds for selecting data on the axis of label1
Data points with label1 in [min, max) will be considered.
label1 : string
Data label for bounds1 to act on.
bounds2 : (min, max)
Bounds for selecting data on the axis of label2
Data points with label1 in [min, max) will be considered.
label2 : string
Data label for bounds2 to act on.
bin3 : (min, max, #bins)
Min and max bounds and number of bins for third axis.
label3 : string
Data label for third axis.
data_label : array of strings
Data label(s) for data product(s) to be averaged.
Returns
-------
median : dictionary
Dictionary indexed by data label, each value of which is a
dictionary with keys 'median', 'count', 'avg_abs_dev', and
'bin' (the values of the bin edges.)
"""
# TODO Update for 2.7 compatability.
if isinstance(data_label, str):
data_label = [data_label, ]
elif not isinstance(data_label, collections.Sequence):
raise ValueError("Please pass data_label as a string or "
"collection of strings.")
# Modeled after pysat.ssnl.median2D
# Make bin boundaries.
# y: values at label3
# z: *data_labels
biny = np.linspace(bin3[0], bin3[1], bin3[2]+1)
numy = len(biny)-1
numz = len(data_label)
# Ranges
yarr, zarr = map(np.arange, (numy, numz))
# Store data here.
ans = [[[collections.deque()] for j in yarr] for k in zarr]
# Filter data by bounds and bin it.
# Idiom for loading all of the data in an instrument's bounds.
for inst in self:
for inst in inst:
if len(inst.data) != 0:
# Select indicies for each piece of data we're interest in.
# Not all of this data is in bounds on label3 but we'll
# sort this later.
min1, max1 = bounds1
min2, max2 = bounds2
data1 = inst.data[label1]
data2 = inst.data[label2]
in_bounds, = np.where((min1 <= data1) & (data1 < max1) &
(min2 <= data2) & (data2 < max2))
# Grab the data in bounds on data1, data2.
data_considered = inst.data.iloc[in_bounds]
y_indexes = np.digitize(data_considered[label3], biny) - 1
# Iterate over the bins along y
for yj in yarr:
# Indicies of data in this bin
yindex, = np.where(y_indexes == yj)
# If there's data in this bin
if len(yindex) > 0:
# For each data label, add the points.
for zk in zarr:
ans[zk][yj][0].extend(
data_considered.ix[yindex, data_label[zk]].tolist())
# Now for the averaging.
# Let's, try .. packing the answers for the 2d function.
numx = 1
xarr = np.arange(numx)
binx = None
# TODO modify output
out_2d = _calc_2d_median(ans, data_label, binx, biny, xarr, yarr, zarr, numx, numy, numz)
# Transform output
output = {}
for i, label in enumerate(data_label):
median = [r[0] for r in out_2d[label]['median']]
count = [r[0] for r in out_2d[label]['count']]
dev = [r[0] for r in out_2d[label]['avg_abs_dev']]
output[label] = {'median': median,
'count': count,
'avg_abs_dev': dev,
'bin': out_2d[label]['bin_y']}
return output | Combines signals from multiple instruments within
given bounds.
Parameters
----------
bounds1 : (min, max)
Bounds for selecting data on the axis of label1
Data points with label1 in [min, max) will be considered.
label1 : string
Data label for bounds1 to act on.
bounds2 : (min, max)
Bounds for selecting data on the axis of label2
Data points with label1 in [min, max) will be considered.
label2 : string
Data label for bounds2 to act on.
bin3 : (min, max, #bins)
Min and max bounds and number of bins for third axis.
label3 : string
Data label for third axis.
data_label : array of strings
Data label(s) for data product(s) to be averaged.
Returns
-------
median : dictionary
Dictionary indexed by data label, each value of which is a
dictionary with keys 'median', 'count', 'avg_abs_dev', and
'bin' (the values of the bin edges.) | entailment |
def difference(self, instrument1, instrument2, bounds, data_labels,
cost_function):
"""
Calculates the difference in signals from multiple
instruments within the given bounds.
Parameters
----------
instrument1 : Instrument
Information must already be loaded into the
instrument.
instrument2 : Instrument
Information must already be loaded into the
instrument.
bounds : list of tuples in the form (inst1_label, inst2_label,
min, max, max_difference)
inst1_label are inst2_label are labels for the data in
instrument1 and instrument2
min and max are bounds on the data considered
max_difference is the maximum difference between two points
for the difference to be calculated
data_labels : list of tuples of data labels
The first key is used to access data in s1
and the second data in s2.
cost_function : function
function that operates on two rows of the instrument data.
used to determine the distance between two points for finding
closest points
Returns
-------
data_df: pandas DataFrame
Each row has a point from instrument1, with the keys
preceded by '1_', and a point within bounds on that point
from instrument2 with the keys preceded by '2_', and the
difference between the instruments' data for all the labels
in data_labels
Created as part of a Spring 2018 UTDesign project.
"""
"""
Draft Pseudocode
----------------
Check integrity of inputs.
Let STD_LABELS be the constant tuple:
("time", "lat", "long", "alt")
Note: modify so that user can override labels for time,
lat, long, data for each satelite.
// We only care about the data currently loaded
into each object.
Let start be the later of the datetime of the
first piece of data loaded into s1, the first
piece of data loaded into s2, and the user
supplied start bound.
Let end be the later of the datetime of the first
piece of data loaded into s1, the first piece
of data loaded into s2, and the user supplied
end bound.
If start is after end, raise an error.
// Let data be the 2D array of deques holding each piece
// of data, sorted into bins by lat/long/alt.
Let s1_data (resp s2_data) be data from s1.data, s2.data
filtered by user-provided lat/long/alt bounds, time bounds
calculated.
Let data be a dictionary of lists with the keys
[ dl1 for dl1, dl2 in data_labels ] +
STD_LABELS +
[ lb+"2" for lb in STD_LABELS ]
For each piece of data s1_point in s1_data:
# Hopefully np.where is very good, because this
# runs O(n) times.
# We could try reusing selections, maybe, if needed.
# This would probably involve binning.
Let s2_near be the data from s2.data within certain
bounds on lat/long/alt/time using 8 statements to
numpy.where. We can probably get those defaults from
the user or handy constants / config?
# We could try a different algorithm for closest pairs
# of points.
Let distance be the numpy array representing the
distance between s1_point and each point in s2_near.
# S: Difference for others: change this line.
For each of those, calculate the spatial difference
from the s1 using lat/long/alt. If s2_near is
empty; break loop.
Let s2_nearest be the point in s2_near corresponding
to the lowest distance.
Append to data: a point, indexed by the time from
s1_point, containing the following data:
# note
Let n be the length of data["time"].
For each key in data:
Assert len(data[key]) == n
End for.
# Create data row to pass to pandas.
Let row be an empty dict.
For dl1, dl2 in data_labels:
Append s1_point[dl1] - s2_nearest[dl2] to data[dl1].
For key in STD_LABELS:
Append s1_point[translate[key]] to data[key]
key = key+"2"
Append s2_nearest[translate[key]] to data[key]
Let data_df be a pandas dataframe created from the data
in data.
return { 'data': data_df, 'start':start, 'end':end }
"""
labels = [dl1 for dl1, dl2 in data_labels] + ['1_'+b[0] for b in bounds] + ['2_'+b[1] for b in bounds] + ['dist']
data = {label: [] for label in labels}
# Apply bounds
inst1 = instrument1.data
inst2 = instrument2.data
for b in bounds:
label1 = b[0]
label2 = b[1]
low = b[2]
high = b[3]
data1 = inst1[label1]
ind1 = np.where((data1 >= low) & (data1 < high))
inst1 = inst1.iloc[ind1]
data2 = inst2[label2]
ind2 = np.where((data2 >= low) & (data2 < high))
inst2 = inst2.iloc[ind2]
for i, s1_point in inst1.iterrows():
# Gets points in instrument2 within the given bounds
s2_near = instrument2.data
for b in bounds:
label1 = b[0]
label2 = b[1]
s1_val = s1_point[label1]
max_dist = b[4]
minbound = s1_val - max_dist
maxbound = s1_val + max_dist
data2 = s2_near[label2]
indices = np.where((data2 >= minbound) & (data2 < maxbound))
s2_near = s2_near.iloc[indices]
# Finds nearest point to s1_point in s2_near
s2_nearest = None
min_dist = float('NaN')
for j, s2_point in s2_near.iterrows():
dist = cost_function(s1_point, s2_point)
if dist < min_dist or min_dist != min_dist:
min_dist = dist
s2_nearest = s2_point
data['dist'].append(min_dist)
# Append difference to data dict
for dl1, dl2 in data_labels:
if s2_nearest is not None:
data[dl1].append(s1_point[dl1] - s2_nearest[dl2])
else:
data[dl1].append(float('NaN'))
# Append the rest of the row
for b in bounds:
label1 = b[0]
label2 = b[1]
data['1_'+label1].append(s1_point[label1])
if s2_nearest is not None:
data['2_'+label2].append(s2_nearest[label2])
else:
data['2_'+label2].append(float('NaN'))
data_df = pds.DataFrame(data=data)
return data_df | Calculates the difference in signals from multiple
instruments within the given bounds.
Parameters
----------
instrument1 : Instrument
Information must already be loaded into the
instrument.
instrument2 : Instrument
Information must already be loaded into the
instrument.
bounds : list of tuples in the form (inst1_label, inst2_label,
min, max, max_difference)
inst1_label are inst2_label are labels for the data in
instrument1 and instrument2
min and max are bounds on the data considered
max_difference is the maximum difference between two points
for the difference to be calculated
data_labels : list of tuples of data labels
The first key is used to access data in s1
and the second data in s2.
cost_function : function
function that operates on two rows of the instrument data.
used to determine the distance between two points for finding
closest points
Returns
-------
data_df: pandas DataFrame
Each row has a point from instrument1, with the keys
preceded by '1_', and a point within bounds on that point
from instrument2 with the keys preceded by '2_', and the
difference between the instruments' data for all the labels
in data_labels
Created as part of a Spring 2018 UTDesign project. | entailment |
def daily2D(inst, bin1, label1, bin2, label2, data_label, gate, returnBins=False):
"""2D Daily Occurrence Probability of data_label > gate over a season.
If data_label is greater than gate at least once per day,
then a 100% occurrence probability results.Season delineated by the bounds
attached to Instrument object.
Prob = (# of times with at least one hit)/(# of times in bin)
Parameters
----------
inst: pysat.Instrument()
Instrument to use for calculating occurrence probability
binx: list
[min, max, number of bins]
labelx: string
name for data product for binx
data_label: list of strings
identifies data product(s) to calculate occurrence probability
e.g. inst[data_label]
gate: list of values
values that data_label must achieve to be counted as an occurrence
returnBins: Boolean
if True, return arrays with values of bin edges, useful for pcolor
Returns
-------
occur_prob : dictionary
A dict of dicts indexed by data_label. Each entry is dict with entries
'prob' for the probability and 'count' for the number of days with any
data; 'bin_x' and 'bin_y' are also returned if requested. Note that arrays
are organized for direct plotting, y values along rows, x along columns.
Note
----
Season delineated by the bounds attached to Instrument object.
"""
return _occurrence2D(inst, bin1, label1, bin2, label2, data_label, gate,
by_orbit=False, returnBins=returnBins) | 2D Daily Occurrence Probability of data_label > gate over a season.
If data_label is greater than gate at least once per day,
then a 100% occurrence probability results.Season delineated by the bounds
attached to Instrument object.
Prob = (# of times with at least one hit)/(# of times in bin)
Parameters
----------
inst: pysat.Instrument()
Instrument to use for calculating occurrence probability
binx: list
[min, max, number of bins]
labelx: string
name for data product for binx
data_label: list of strings
identifies data product(s) to calculate occurrence probability
e.g. inst[data_label]
gate: list of values
values that data_label must achieve to be counted as an occurrence
returnBins: Boolean
if True, return arrays with values of bin edges, useful for pcolor
Returns
-------
occur_prob : dictionary
A dict of dicts indexed by data_label. Each entry is dict with entries
'prob' for the probability and 'count' for the number of days with any
data; 'bin_x' and 'bin_y' are also returned if requested. Note that arrays
are organized for direct plotting, y values along rows, x along columns.
Note
----
Season delineated by the bounds attached to Instrument object. | entailment |
def by_orbit2D(inst, bin1, label1, bin2, label2, data_label, gate, returnBins=False):
"""2D Occurrence Probability of data_label orbit-by-orbit over a season.
If data_label is greater than gate atleast once per orbit, then a
100% occurrence probability results. Season delineated by the bounds
attached to Instrument object.
Prob = (# of times with at least one hit)/(# of times in bin)
Parameters
----------
inst: pysat.Instrument()
Instrument to use for calculating occurrence probability
binx: list
[min value, max value, number of bins]
labelx: string
identifies data product for binx
data_label: list of strings
identifies data product(s) to calculate occurrence probability
gate: list of values
values that data_label must achieve to be counted as an occurrence
returnBins: Boolean
if True, return arrays with values of bin edges, useful for pcolor
Returns
-------
occur_prob : dictionary
A dict of dicts indexed by data_label. Each entry is dict with entries
'prob' for the probability and 'count' for the number of orbits with any
data; 'bin_x' and 'bin_y' are also returned if requested. Note that arrays
are organized for direct plotting, y values along rows, x along columns.
Note
----
Season delineated by the bounds attached to Instrument object.
"""
return _occurrence2D(inst, bin1, label1, bin2, label2, data_label, gate,
by_orbit=True, returnBins=returnBins) | 2D Occurrence Probability of data_label orbit-by-orbit over a season.
If data_label is greater than gate atleast once per orbit, then a
100% occurrence probability results. Season delineated by the bounds
attached to Instrument object.
Prob = (# of times with at least one hit)/(# of times in bin)
Parameters
----------
inst: pysat.Instrument()
Instrument to use for calculating occurrence probability
binx: list
[min value, max value, number of bins]
labelx: string
identifies data product for binx
data_label: list of strings
identifies data product(s) to calculate occurrence probability
gate: list of values
values that data_label must achieve to be counted as an occurrence
returnBins: Boolean
if True, return arrays with values of bin edges, useful for pcolor
Returns
-------
occur_prob : dictionary
A dict of dicts indexed by data_label. Each entry is dict with entries
'prob' for the probability and 'count' for the number of orbits with any
data; 'bin_x' and 'bin_y' are also returned if requested. Note that arrays
are organized for direct plotting, y values along rows, x along columns.
Note
----
Season delineated by the bounds attached to Instrument object. | entailment |
def daily3D(inst, bin1, label1, bin2, label2, bin3, label3,
data_label, gate, returnBins=False):
"""3D Daily Occurrence Probability of data_label > gate over a season.
If data_label is greater than gate atleast once per day,
then a 100% occurrence probability results. Season delineated by
the bounds attached to Instrument object.
Prob = (# of times with at least one hit)/(# of times in bin)
Parameters
----------
inst: pysat.Instrument()
Instrument to use for calculating occurrence probability
binx: list
[min, max, number of bins]
labelx: string
name for data product for binx
data_label: list of strings
identifies data product(s) to calculate occurrence probability
gate: list of values
values that data_label must achieve to be counted as an occurrence
returnBins: Boolean
if True, return arrays with values of bin edges, useful for pcolor
Returns
-------
occur_prob : dictionary
A dict of dicts indexed by data_label. Each entry is dict with entries
'prob' for the probability and 'count' for the number of days with any
data; 'bin_x', 'bin_y', and 'bin_z' are also returned if requested. Note
that arrays are organized for direct plotting, z,y,x.
Note
----
Season delineated by the bounds attached to Instrument object.
"""
return _occurrence3D(inst, bin1, label1, bin2, label2, bin3, label3,
data_label, gate, returnBins=returnBins, by_orbit=False) | 3D Daily Occurrence Probability of data_label > gate over a season.
If data_label is greater than gate atleast once per day,
then a 100% occurrence probability results. Season delineated by
the bounds attached to Instrument object.
Prob = (# of times with at least one hit)/(# of times in bin)
Parameters
----------
inst: pysat.Instrument()
Instrument to use for calculating occurrence probability
binx: list
[min, max, number of bins]
labelx: string
name for data product for binx
data_label: list of strings
identifies data product(s) to calculate occurrence probability
gate: list of values
values that data_label must achieve to be counted as an occurrence
returnBins: Boolean
if True, return arrays with values of bin edges, useful for pcolor
Returns
-------
occur_prob : dictionary
A dict of dicts indexed by data_label. Each entry is dict with entries
'prob' for the probability and 'count' for the number of days with any
data; 'bin_x', 'bin_y', and 'bin_z' are also returned if requested. Note
that arrays are organized for direct plotting, z,y,x.
Note
----
Season delineated by the bounds attached to Instrument object. | entailment |
def computational_form(data):
"""
Input Series of numbers, Series, or DataFrames repackaged
for calculation.
Parameters
----------
data : pandas.Series
Series of numbers, Series, DataFrames
Returns
-------
pandas.Series, DataFrame, or Panel
repacked data, aligned by indices, ready for calculation
"""
if isinstance(data.iloc[0], DataFrame):
dslice = Panel.from_dict(dict([(i,data.iloc[i])
for i in xrange(len(data))]))
elif isinstance(data.iloc[0], Series):
dslice = DataFrame(data.tolist())
dslice.index = data.index
else:
dslice = data
return dslice | Input Series of numbers, Series, or DataFrames repackaged
for calculation.
Parameters
----------
data : pandas.Series
Series of numbers, Series, DataFrames
Returns
-------
pandas.Series, DataFrame, or Panel
repacked data, aligned by indices, ready for calculation | entailment |
def set_data_dir(path=None, store=None):
"""
Set the top level directory pysat uses to look for data and reload.
Parameters
----------
path : string
valid path to directory pysat uses to look for data
store : bool
if True, store data directory for future runs
"""
import sys
import os
import pysat
if sys.version_info[0] >= 3:
if sys.version_info[1] < 4:
import imp
re_load = imp.reload
else:
import importlib
re_load = importlib.reload
else:
re_load = reload
if store is None:
store = True
if os.path.isdir(path):
if store:
with open(os.path.join(os.path.expanduser('~'), '.pysat',
'data_path.txt'), 'w') as f:
f.write(path)
pysat.data_dir = path
pysat._files = re_load(pysat._files)
pysat._instrument = re_load(pysat._instrument)
else:
raise ValueError('Path %s does not lead to a valid directory.' % path) | Set the top level directory pysat uses to look for data and reload.
Parameters
----------
path : string
valid path to directory pysat uses to look for data
store : bool
if True, store data directory for future runs | entailment |
def load_netcdf4(fnames=None, strict_meta=False, file_format=None, epoch_name='Epoch',
units_label='units', name_label='long_name',
notes_label='notes', desc_label='desc',
plot_label='label', axis_label='axis',
scale_label='scale',
min_label='value_min', max_label='value_max',
fill_label='fill'):
# unix_time=False, **kwargs):
"""Load netCDF-3/4 file produced by pysat.
Parameters
----------
fnames : string or array_like of strings
filenames to load
strict_meta : boolean
check if metadata across fnames is the same
file_format : string
file_format keyword passed to netCDF4 routine
NETCDF3_CLASSIC, NETCDF3_64BIT, NETCDF4_CLASSIC, and NETCDF4
Returns
--------
out : pandas.core.frame.DataFrame
DataFrame output
mdata : pysat._meta.Meta
Meta data
"""
import netCDF4
import string
import pysat
if fnames is None:
raise ValueError("Must supply a filename/list of filenames")
if isinstance(fnames, basestring):
fnames = [fnames]
if file_format is None:
file_format = 'NETCDF4'
else:
file_format = file_format.upper()
saved_mdata = None
running_idx = 0
running_store=[]
two_d_keys = []; two_d_dims = []; three_d_keys = []; three_d_dims = [];
for fname in fnames:
with netCDF4.Dataset(fname, mode='r', format=file_format) as data:
# build up dictionary with all global ncattrs
# and add those attributes to a pysat meta object
ncattrsList = data.ncattrs()
mdata = pysat.Meta(units_label=units_label, name_label=name_label,
notes_label=notes_label, desc_label=desc_label,
plot_label=plot_label, axis_label=axis_label,
scale_label=scale_label,
min_label=min_label, max_label=max_label,
fill_label=fill_label)
for d in ncattrsList:
if hasattr(mdata, d):
mdata.__setattr__(d+'_', data.getncattr(d))
else:
mdata.__setattr__(d, data.getncattr(d))
# loadup all of the variables in the netCDF
loadedVars = {}
for key in data.variables.keys():
# load up metadata. From here group unique
# dimensions and act accordingly, 1D, 2D, 3D
if len(data.variables[key].dimensions) == 1:
# load 1D data variable
# assuming basic time dimension
loadedVars[key] = data.variables[key][:]
# if key != epoch_name:
# load up metadata
meta_dict = {}
for nc_key in data.variables[key].ncattrs():
meta_dict[nc_key] = data.variables[key].getncattr(nc_key)
mdata[key] = meta_dict
if len(data.variables[key].dimensions) == 2:
# part of dataframe within dataframe
two_d_keys.append(key)
two_d_dims.append(data.variables[key].dimensions)
if len(data.variables[key].dimensions) == 3:
# part of full/dedicated dataframe within dataframe
three_d_keys.append(key)
three_d_dims.append(data.variables[key].dimensions)
# we now have a list of keys that need to go into a dataframe,
# could be more than one, collect unique dimensions for 2D keys
for dim in set(two_d_dims):
# first dimension should be epoch
# second dimension name used as variable name
obj_key_name = dim[1]
# collect variable names associated with dimension
idx_bool = [dim == i for i in two_d_dims]
idx, = np.where(np.array(idx_bool))
obj_var_keys = []
clean_var_keys = []
for i in idx:
obj_var_keys.append(two_d_keys[i])
clean_var_keys.append(two_d_keys[i].split(obj_key_name+'_')[-1])
# figure out how to index this data, it could provide its own
# index - or we may have to create simple integer based DataFrame access
# if the dimension is stored as its own variable then use that info for index
if obj_key_name in obj_var_keys:
# string used to indentify dimension also in data.variables
# will be used as an index
index_key_name = obj_key_name
# if the object index uses UNIX time, process into datetime index
if data.variables[obj_key_name].getncattr(name_label) == epoch_name:
# name to be used in DataFrame index
index_name = epoch_name
time_index_flag = True
else:
time_index_flag = False
# label to be used in DataFrame index
index_name = data.variables[obj_key_name].getncattr(name_label)
else:
# dimension is not itself a variable
index_key_name = None
# iterate over the variables and grab metadata
dim_meta_data = pysat.Meta(units_label=units_label, name_label=name_label,
notes_label=notes_label, desc_label=desc_label,
plot_label=plot_label, axis_label=axis_label,
scale_label=scale_label,
min_label=min_label, max_label=max_label,
fill_label=fill_label)
for key, clean_key in zip(obj_var_keys, clean_var_keys):
# store attributes in metadata, exept for dim name
meta_dict = {}
for nc_key in data.variables[key].ncattrs():
meta_dict[nc_key] = data.variables[key].getncattr(nc_key)
dim_meta_data[clean_key] = meta_dict
# print (dim_meta_data)
dim_meta_dict = {'meta':dim_meta_data}
if index_key_name is not None:
# add top level meta
for nc_key in data.variables[obj_key_name].ncattrs():
dim_meta_dict[nc_key] = data.variables[obj_key_name].getncattr(nc_key)
mdata[obj_key_name] = dim_meta_dict
# iterate over all variables with this dimension and store data
# data storage, whole shebang
loop_dict = {}
# list holds a series of slices, parsed from dict above
loop_list = []
for key, clean_key in zip(obj_var_keys, clean_var_keys):
# data
loop_dict[clean_key] = data.variables[key][:,:].flatten(order='C')
# number of values in time
loop_lim = data.variables[obj_var_keys[0]].shape[0]
# number of values per time
step_size = len(data.variables[obj_var_keys[0]][0,:])
# check if there is an index we should use
if not (index_key_name is None):
# an index was found
time_var = loop_dict.pop(index_key_name)
if time_index_flag:
# create datetime index from data
if file_format == 'NETCDF4':
time_var = pds.to_datetime(1E6*time_var)
else:
time_var = pds.to_datetime(1E6*time_var)
new_index = time_var
new_index_name = index_name
else:
# using integer indexing
new_index = np.arange(loop_lim*step_size, dtype=int) % step_size
new_index_name = 'index'
# load all data into frame
if len(loop_dict.keys()) > 1:
loop_frame = pds.DataFrame(loop_dict, columns=clean_var_keys)
if obj_key_name in loop_frame:
del loop_frame[obj_key_name]
# break massive frame into bunch of smaller frames
for i in np.arange(loop_lim, dtype=int):
loop_list.append(loop_frame.iloc[step_size*i:step_size*(i+1),:])
loop_list[-1].index = new_index[step_size*i:step_size*(i+1)]
loop_list[-1].index.name = new_index_name
else:
loop_frame = pds.Series(loop_dict[clean_var_keys[0]], name=obj_var_keys[0])
# break massive series into bunch of smaller series
for i in np.arange(loop_lim, dtype=int):
loop_list.append(loop_frame.iloc[step_size*i:step_size*(i+1)])
loop_list[-1].index = new_index[step_size*i:step_size*(i+1)]
loop_list[-1].index.name = new_index_name
# print (loop_frame.columns)
# add 2D object data, all based on a unique dimension within
# netCDF, to loaded data dictionary
loadedVars[obj_key_name] = loop_list
del loop_list
# we now have a list of keys that need to go into a dataframe,
# could be more than one, collect unique dimensions for 2D keys
for dim in set(three_d_dims):
# collect variable names associated with dimension
idx_bool = [dim == i for i in three_d_dims]
idx, = np.where(np.array(idx_bool))
obj_var_keys = []
for i in idx:
obj_var_keys.append(three_d_keys[i])
for obj_key_name in obj_var_keys:
# store attributes in metadata
meta_dict = {}
for nc_key in data.variables[obj_key_name].ncattrs():
meta_dict[nc_key] = data.variables[obj_key_name].getncattr(nc_key)
mdata[obj_key_name] = meta_dict
# iterate over all variables with this dimension and store data
# data storage, whole shebang
loop_dict = {}
# list holds a series of slices, parsed from dict above
loop_list = []
loop_dict[obj_key_name] = data.variables[obj_key_name][:,:,:]
# number of values in time
loop_lim = data.variables[obj_key_name].shape[0]
# number of values per time
step_size_x = len(data.variables[obj_key_name][0, :, 0])
step_size_y = len(data.variables[obj_key_name][0, 0, :])
step_size = step_size_x
loop_dict[obj_key_name] = loop_dict[obj_key_name].reshape((loop_lim*step_size_x, step_size_y))
# check if there is an index we should use
if not (index_key_name is None):
# an index was found
time_var = loop_dict.pop(index_key_name)
if time_index_flag:
# create datetime index from data
if file_format == 'NETCDF4':
time_var = pds.to_datetime(1E6*time_var)
else:
time_var = pds.to_datetime(1E6*time_var)
new_index = time_var
new_index_name = index_name
else:
# using integer indexing
new_index = np.arange(loop_lim*step_size, dtype=int) % step_size
new_index_name = 'index'
# load all data into frame
loop_frame = pds.DataFrame(loop_dict[obj_key_name])
# del loop_frame['dimension_1']
# break massive frame into bunch of smaller frames
for i in np.arange(loop_lim, dtype=int):
loop_list.append(loop_frame.iloc[step_size*i:step_size*(i+1),:])
loop_list[-1].index = new_index[step_size*i:step_size*(i+1)]
loop_list[-1].index.name = new_index_name
# add 2D object data, all based on a unique dimension within netCDF,
# to loaded data dictionary
loadedVars[obj_key_name] = loop_list
del loop_list
# prepare dataframe index for this netcdf file
time_var = loadedVars.pop(epoch_name)
# convert from GPS seconds to seconds used in pandas (unix time,
# no leap)
#time_var = convert_gps_to_unix_seconds(time_var)
if file_format == 'NETCDF4':
loadedVars[epoch_name] = pds.to_datetime((1E6 *
time_var).astype(int))
else:
loadedVars[epoch_name] = pds.to_datetime((time_var *
1E6).astype(int))
#loadedVars[epoch_name] = pds.to_datetime((time_var*1E6).astype(int))
running_store.append(loadedVars)
running_idx += len(loadedVars[epoch_name])
if strict_meta:
if saved_mdata is None:
saved_mdata = copy.deepcopy(mdata)
elif (mdata != saved_mdata):
raise ValueError('Metadata across filenames is not the ' +
'same.')
# combine all of the data loaded across files together
out = []
for item in running_store:
out.append(pds.DataFrame.from_records(item, index=epoch_name))
out = pds.concat(out, axis=0)
return out, mdata | Load netCDF-3/4 file produced by pysat.
Parameters
----------
fnames : string or array_like of strings
filenames to load
strict_meta : boolean
check if metadata across fnames is the same
file_format : string
file_format keyword passed to netCDF4 routine
NETCDF3_CLASSIC, NETCDF3_64BIT, NETCDF4_CLASSIC, and NETCDF4
Returns
--------
out : pandas.core.frame.DataFrame
DataFrame output
mdata : pysat._meta.Meta
Meta data | entailment |
def getyrdoy(date):
"""Return a tuple of year, day of year for a supplied datetime object."""
try:
doy = date.toordinal()-datetime(date.year,1,1).toordinal()+1
except AttributeError:
raise AttributeError("Must supply a pandas datetime object or " +
"equivalent")
else:
return date.year, doy | Return a tuple of year, day of year for a supplied datetime object. | entailment |
def season_date_range(start, stop, freq='D'):
"""
Return array of datetime objects using input frequency from start to stop
Supports single datetime object or list, tuple, ndarray of start and
stop dates.
freq codes correspond to pandas date_range codes, D daily, M monthly,
S secondly
"""
if hasattr(start, '__iter__'):
# missing check for datetime
season = pds.date_range(start[0], stop[0], freq=freq)
for (sta,stp) in zip(start[1:], stop[1:]):
season = season.append(pds.date_range(sta, stp, freq=freq))
else:
season = pds.date_range(start, stop, freq=freq)
return season | Return array of datetime objects using input frequency from start to stop
Supports single datetime object or list, tuple, ndarray of start and
stop dates.
freq codes correspond to pandas date_range codes, D daily, M monthly,
S secondly | entailment |
def create_datetime_index(year=None, month=None, day=None, uts=None):
"""Create a timeseries index using supplied year, month, day, and ut in
seconds.
Parameters
----------
year : array_like of ints
month : array_like of ints or None
day : array_like of ints
for day (default) or day of year (use month=None)
uts : array_like of floats
Returns
-------
Pandas timeseries index.
Note
----
Leap seconds have no meaning here.
"""
# need a timeseries index for storing satellite data in pandas but
# creating a datetime object for everything is too slow
# so I calculate the number of nanoseconds elapsed since first sample,
# and create timeseries index from that.
# Factor of 20 improvement compared to previous method,
# which itself was an order of magnitude faster than datetime.
#get list of unique year, and month
if not hasattr(year, '__iter__'):
raise ValueError('Must provide an iterable for all inputs.')
if len(year) == 0:
raise ValueError('Length of array must be larger than 0.')
year = year.astype(int)
if month is None:
month = np.ones(len(year), dtype=int)
else:
month = month.astype(int)
if uts is None:
uts = np.zeros(len(year))
if day is None:
day = np.ones(len(year))
day = day.astype(int)
# track changes in seconds
uts_del = uts.copy().astype(float)
# determine where there are changes in year and month that need to be
# accounted for
_,idx = np.unique(year*100.+month, return_index=True)
# create another index array for faster algorithm below
idx2 = np.hstack((idx,len(year)+1))
# computes UTC seconds offset for each unique set of year and month
for _idx,_idx2 in zip(idx[1:],idx2[2:]):
temp = (datetime(year[_idx],month[_idx],1)
- datetime(year[0],month[0],1))
uts_del[_idx:_idx2] += temp.total_seconds()
# add in UTC seconds for days, ignores existence of leap seconds
uts_del += (day-1)*86400
# add in seconds since unix epoch to first day
uts_del += (datetime(year[0],month[0],1)-datetime(1970,1,1)).total_seconds()
# going to use routine that defaults to nanseconds for epoch
uts_del *= 1E9
return pds.to_datetime(uts_del) | Create a timeseries index using supplied year, month, day, and ut in
seconds.
Parameters
----------
year : array_like of ints
month : array_like of ints or None
day : array_like of ints
for day (default) or day of year (use month=None)
uts : array_like of floats
Returns
-------
Pandas timeseries index.
Note
----
Leap seconds have no meaning here. | entailment |
def nan_circmean(samples, high=2.0*np.pi, low=0.0, axis=None):
"""NaN insensitive version of scipy's circular mean routine
Parameters
-----------
samples : array_like
Input array
low : float or int
Lower boundary for circular standard deviation range (default=0)
high: float or int
Upper boundary for circular standard deviation range (default=2 pi)
axis : int or NoneType
Axis along which standard deviations are computed. The default is to
compute the standard deviation of the flattened array
Returns
--------
circmean : float
Circular mean
"""
samples = np.asarray(samples)
samples = samples[~np.isnan(samples)]
if samples.size == 0:
return np.nan
# Ensure the samples are in radians
ang = (samples - low) * 2.0 * np.pi / (high - low)
# Calculate the means of the sine and cosine, as well as the length
# of their unit vector
ssum = np.sin(ang).sum(axis=axis)
csum = np.cos(ang).sum(axis=axis)
res = np.arctan2(ssum, csum)
# Bring the range of the result between 0 and 2 pi
mask = res < 0.0
if mask.ndim > 0:
res[mask] += 2.0 * np.pi
elif mask:
res += 2.0 * np.pi
# Calculate the circular standard deviation
circmean = res * (high - low) / (2.0 * np.pi) + low
return circmean | NaN insensitive version of scipy's circular mean routine
Parameters
-----------
samples : array_like
Input array
low : float or int
Lower boundary for circular standard deviation range (default=0)
high: float or int
Upper boundary for circular standard deviation range (default=2 pi)
axis : int or NoneType
Axis along which standard deviations are computed. The default is to
compute the standard deviation of the flattened array
Returns
--------
circmean : float
Circular mean | entailment |
def nan_circstd(samples, high=2.0*np.pi, low=0.0, axis=None):
"""NaN insensitive version of scipy's circular standard deviation routine
Parameters
-----------
samples : array_like
Input array
low : float or int
Lower boundary for circular standard deviation range (default=0)
high: float or int
Upper boundary for circular standard deviation range (default=2 pi)
axis : int or NoneType
Axis along which standard deviations are computed. The default is to
compute the standard deviation of the flattened array
Returns
--------
circstd : float
Circular standard deviation
"""
samples = np.asarray(samples)
samples = samples[~np.isnan(samples)]
if samples.size == 0:
return np.nan
# Ensure the samples are in radians
ang = (samples - low) * 2.0 * np.pi / (high - low)
# Calculate the means of the sine and cosine, as well as the length
# of their unit vector
smean = np.sin(ang).mean(axis=axis)
cmean = np.cos(ang).mean(axis=axis)
rmean = np.sqrt(smean**2 + cmean**2)
# Calculate the circular standard deviation
circstd = (high - low) * np.sqrt(-2.0 * np.log(rmean)) / (2.0 * np.pi)
return circstd | NaN insensitive version of scipy's circular standard deviation routine
Parameters
-----------
samples : array_like
Input array
low : float or int
Lower boundary for circular standard deviation range (default=0)
high: float or int
Upper boundary for circular standard deviation range (default=2 pi)
axis : int or NoneType
Axis along which standard deviations are computed. The default is to
compute the standard deviation of the flattened array
Returns
--------
circstd : float
Circular standard deviation | entailment |
def default(inst):
"""Default routine to be applied when loading data. Removes redundant naming
"""
import pysat.instruments.icon_ivm as icivm
inst.tag = 'level_2'
icivm.remove_icon_names(inst, target='ICON_L2_EUV_Daytime_OP_') | Default routine to be applied when loading data. Removes redundant naming | entailment |
def list_files(tag=None, sat_id=None, data_path=None, format_str=None):
"""Produce a list of ICON EUV files.
Notes
-----
Currently fixed to level-2
"""
desc = None
level = tag
if level == 'level_1':
code = 'L1'
desc = None
elif level == 'level_2':
code = 'L2'
desc = None
else:
raise ValueError('Unsupported level supplied: ' + level)
if format_str is None:
format_str = 'ICON_'+code+'_EUV_Daytime'
if desc is not None:
format_str += '_' + desc +'_'
format_str += '_{year:4d}-{month:02d}-{day:02d}_v{version:02d}r{revision:03d}.NC'
return pysat.Files.from_os(data_path=data_path,
format_str=format_str) | Produce a list of ICON EUV files.
Notes
-----
Currently fixed to level-2 | entailment |
def shadow_copy(self):
""" Return a copy of the resource with same raw data
:return: copy of the resource
"""
ret = self.__class__()
if not self._is_updated():
# before copy, make sure source is updated.
self.update()
ret._parsed_resource = self._parsed_resource
return ret | Return a copy of the resource with same raw data
:return: copy of the resource | entailment |
def load(fnames, tag=None, sat_id=None, **kwargs):
"""Loads data using pysat.utils.load_netcdf4 .
This routine is called as needed by pysat. It is not intended
for direct user interaction.
Parameters
----------
fnames : array-like
iterable of filename strings, full path, to data files to be loaded.
This input is nominally provided by pysat itself.
tag : string
tag name used to identify particular data set to be loaded.
This input is nominally provided by pysat itself.
sat_id : string
Satellite ID used to identify particular data set to be loaded.
This input is nominally provided by pysat itself.
**kwargs : extra keywords
Passthrough for additional keyword arguments specified when
instantiating an Instrument object. These additional keywords
are passed through to this routine by pysat.
Returns
-------
data, metadata
Data and Metadata are formatted for pysat. Data is a pandas
DataFrame while metadata is a pysat.Meta instance.
Note
----
Any additional keyword arguments passed to pysat.Instrument
upon instantiation are passed along to this routine and through
to the load_netcdf4 call.
Examples
--------
::
inst = pysat.Instrument('sport', 'ivm')
inst.load(2019,1)
# create quick Instrument object for a new, random netCDF4 file
# define filename template string to identify files
# this is normally done by instrument code, but in this case
# there is no built in pysat instrument support
# presumes files are named default_2019-01-01.NC
format_str = 'default_{year:04d}-{month:02d}-{day:02d}.NC'
inst = pysat.Instrument('netcdf', 'pandas',
custom_kwarg='test'
data_path='./',
format_str=format_str)
inst.load(2019,1)
"""
return pysat.utils.load_netcdf4(fnames, **kwargs) | Loads data using pysat.utils.load_netcdf4 .
This routine is called as needed by pysat. It is not intended
for direct user interaction.
Parameters
----------
fnames : array-like
iterable of filename strings, full path, to data files to be loaded.
This input is nominally provided by pysat itself.
tag : string
tag name used to identify particular data set to be loaded.
This input is nominally provided by pysat itself.
sat_id : string
Satellite ID used to identify particular data set to be loaded.
This input is nominally provided by pysat itself.
**kwargs : extra keywords
Passthrough for additional keyword arguments specified when
instantiating an Instrument object. These additional keywords
are passed through to this routine by pysat.
Returns
-------
data, metadata
Data and Metadata are formatted for pysat. Data is a pandas
DataFrame while metadata is a pysat.Meta instance.
Note
----
Any additional keyword arguments passed to pysat.Instrument
upon instantiation are passed along to this routine and through
to the load_netcdf4 call.
Examples
--------
::
inst = pysat.Instrument('sport', 'ivm')
inst.load(2019,1)
# create quick Instrument object for a new, random netCDF4 file
# define filename template string to identify files
# this is normally done by instrument code, but in this case
# there is no built in pysat instrument support
# presumes files are named default_2019-01-01.NC
format_str = 'default_{year:04d}-{month:02d}-{day:02d}.NC'
inst = pysat.Instrument('netcdf', 'pandas',
custom_kwarg='test'
data_path='./',
format_str=format_str)
inst.load(2019,1) | entailment |
def list_files(tag=None, sat_id=None, data_path=None, format_str=None):
"""Produce a list of files corresponding to format_str located at data_path.
This routine is invoked by pysat and is not intended for direct use by the end user.
Multiple data levels may be supported via the 'tag' and 'sat_id' input strings.
Parameters
----------
tag : string ('')
tag name used to identify particular data set to be loaded.
This input is nominally provided by pysat itself.
sat_id : string ('')
Satellite ID used to identify particular data set to be loaded.
This input is nominally provided by pysat itself.
data_path : string
Full path to directory containing files to be loaded. This
is provided by pysat. The user may specify their own data path
at Instrument instantiation and it will appear here.
format_str : string (None)
String template used to parse the datasets filenames. If a user
supplies a template string at Instrument instantiation
then it will appear here, otherwise defaults to None.
Returns
-------
pandas.Series
Series of filename strings, including the path, indexed by datetime.
Examples
--------
::
If a filename is SPORT_L2_IVM_2019-01-01_v01r0000.NC then the template
is 'SPORT_L2_IVM_{year:04d}-{month:02d}-{day:02d}_v{version:02d}r{revision:04d}.NC'
Note
----
The returned Series should not have any duplicate datetimes. If there are
multiple versions of a file the most recent version should be kept and the rest
discarded. This routine uses the pysat.Files.from_os constructor, thus
the returned files are up to pysat specifications.
Normally the format_str for each supported tag and sat_id is defined within this routine.
However, as this is a generic routine, those definitions can't be made here. This method
could be used in an instrument specific module where the list_files routine in the
new package defines the format_str based upon inputs, then calls this routine passing
both data_path and format_str.
Alternately, the list_files routine in nasa_cdaweb_methods may also be used and has
more built in functionality. Supported tages and format strings may be defined
within the new instrument module and passed as arguments to nasa_cdaweb_methods.list_files .
For an example on using this routine, see pysat/instrument/cnofs_ivm.py or cnofs_vefi, cnofs_plp,
omni_hro, timed_see, etc.
"""
return pysat.Files.from_os(data_path=data_path, format_str=format_str) | Produce a list of files corresponding to format_str located at data_path.
This routine is invoked by pysat and is not intended for direct use by the end user.
Multiple data levels may be supported via the 'tag' and 'sat_id' input strings.
Parameters
----------
tag : string ('')
tag name used to identify particular data set to be loaded.
This input is nominally provided by pysat itself.
sat_id : string ('')
Satellite ID used to identify particular data set to be loaded.
This input is nominally provided by pysat itself.
data_path : string
Full path to directory containing files to be loaded. This
is provided by pysat. The user may specify their own data path
at Instrument instantiation and it will appear here.
format_str : string (None)
String template used to parse the datasets filenames. If a user
supplies a template string at Instrument instantiation
then it will appear here, otherwise defaults to None.
Returns
-------
pandas.Series
Series of filename strings, including the path, indexed by datetime.
Examples
--------
::
If a filename is SPORT_L2_IVM_2019-01-01_v01r0000.NC then the template
is 'SPORT_L2_IVM_{year:04d}-{month:02d}-{day:02d}_v{version:02d}r{revision:04d}.NC'
Note
----
The returned Series should not have any duplicate datetimes. If there are
multiple versions of a file the most recent version should be kept and the rest
discarded. This routine uses the pysat.Files.from_os constructor, thus
the returned files are up to pysat specifications.
Normally the format_str for each supported tag and sat_id is defined within this routine.
However, as this is a generic routine, those definitions can't be made here. This method
could be used in an instrument specific module where the list_files routine in the
new package defines the format_str based upon inputs, then calls this routine passing
both data_path and format_str.
Alternately, the list_files routine in nasa_cdaweb_methods may also be used and has
more built in functionality. Supported tages and format strings may be defined
within the new instrument module and passed as arguments to nasa_cdaweb_methods.list_files .
For an example on using this routine, see pysat/instrument/cnofs_ivm.py or cnofs_vefi, cnofs_plp,
omni_hro, timed_see, etc. | entailment |
def command(f):
""" indicate it's a command of naviseccli
:param f: function that returns the command in list
:return: command execution result
"""
@functools.wraps(f)
def func_wrapper(self, *argv, **kwargs):
if 'ip' in kwargs:
ip = kwargs['ip']
del kwargs['ip']
else:
ip = None
commands = _get_commands(f, self, *argv, **kwargs)
return self.execute(commands, ip=ip)
return func_wrapper | indicate it's a command of naviseccli
:param f: function that returns the command in list
:return: command execution result | entailment |
def duel_command(f):
""" indicate it's a command need to be called on both SP
:param f: function that returns the command in list
:return: command execution result on both sps (tuple of 2)
"""
@functools.wraps(f)
def func_wrapper(self, *argv, **kwargs):
commands = _get_commands(f, self, *argv, **kwargs)
return self.execute_dual(commands)
return func_wrapper | indicate it's a command need to be called on both SP
:param f: function that returns the command in list
:return: command execution result on both sps (tuple of 2) | entailment |
def supplement_filesystem(old_size, user_cap=False):
"""Return new size accounting for the metadata."""
new_size = old_size
if user_cap:
if old_size <= _GiB_to_Byte(1.5):
new_size = _GiB_to_Byte(3)
else:
new_size += _GiB_to_Byte(1.5)
return int(new_size) | Return new size accounting for the metadata. | entailment |
def synchronized(cls, obj=None):
""" synchronize on obj if obj is supplied.
:param obj: the obj to lock on. if none, lock to the function
:return: return of the func.
"""
def get_key(f, o):
if o is None:
key = hash(f)
else:
key = hash(o)
return key
def get_lock(f, o):
key = get_key(f, o)
if key not in cls.lock_map:
with cls.lock_map_lock:
if key not in cls.lock_map:
cls.lock_map[key] = _init_lock()
return cls.lock_map[key]
def wrap(f):
@functools.wraps(f)
def new_func(*args, **kw):
with get_lock(f, obj):
return f(*args, **kw)
return new_func
return wrap | synchronize on obj if obj is supplied.
:param obj: the obj to lock on. if none, lock to the function
:return: return of the func. | entailment |
def re_enqueue(self, item):
"""Re-enqueue till reach max retries."""
if 'retries' in item:
retries = item['retries']
if retries >= self.MAX_RETRIES:
log.warn("Failed to execute {} after {} retries, give it "
" up.".format(item['method'], retries))
else:
retries += 1
item['retries'] = retries
self._q.put_nowait(item)
else:
item['retries'] = 1
self._q.put_nowait(item) | Re-enqueue till reach max retries. | entailment |
def _support_op(*args):
"""Internal decorator to define an criteria compare operations."""
def inner(func):
for one_arg in args:
_op_mapping_[one_arg] = func
return func
return inner | Internal decorator to define an criteria compare operations. | entailment |
def clean(inst):
"""Routine to return VEFI data cleaned to the specified level
Parameters
-----------
inst : (pysat.Instrument)
Instrument class object, whose attribute clean_level is used to return
the desired level of data selectivity.
Returns
--------
Void : (NoneType)
data in inst is modified in-place.
Notes
--------
'dusty' or 'clean' removes data when interpolation flag is set to 1
"""
if (inst.clean_level == 'dusty') | (inst.clean_level == 'clean'):
idx, = np.where(inst['B_flag'] == 0)
inst.data = inst[idx, :]
return None | Routine to return VEFI data cleaned to the specified level
Parameters
-----------
inst : (pysat.Instrument)
Instrument class object, whose attribute clean_level is used to return
the desired level of data selectivity.
Returns
--------
Void : (NoneType)
data in inst is modified in-place.
Notes
--------
'dusty' or 'clean' removes data when interpolation flag is set to 1 | entailment |
def remove_icon_names(inst, target=None):
"""Removes leading text on ICON project variable names
Parameters
----------
inst : pysat.Instrument
ICON associated pysat.Instrument object
target : str
Leading string to remove. If none supplied,
ICON project standards are used to identify and remove
leading text
Returns
-------
None
Modifies Instrument object in place
"""
if target is None:
lev = inst.tag
if lev == 'level_2':
lev = 'L2'
elif lev == 'level_0':
lev = 'L0'
elif lev == 'level_0p':
lev = 'L0P'
elif lev == 'level_1.5':
lev = 'L1-5'
elif lev == 'level_1':
lev = 'L1'
else:
raise ValueError('Uknown ICON data level')
# get instrument code
sid = inst.sat_id.lower()
if sid == 'a':
sid = 'IVM_A'
elif sid == 'b':
sid = 'IVM_B'
else:
raise ValueError('Unknown ICON satellite ID')
prepend_str = '_'.join(('ICON', lev, sid)) + '_'
else:
prepend_str = target
inst.data.rename(columns=lambda x: x.split(prepend_str)[-1], inplace=True)
inst.meta.data.rename(index=lambda x: x.split(prepend_str)[-1], inplace=True)
orig_keys = inst.meta.keys_nD()
for key in orig_keys:
new_key = key.split(prepend_str)[-1]
new_meta = inst.meta.pop(key)
new_meta.data.rename(index=lambda x: x.split(prepend_str)[-1], inplace=True)
inst.meta[new_key] = new_meta
return | Removes leading text on ICON project variable names
Parameters
----------
inst : pysat.Instrument
ICON associated pysat.Instrument object
target : str
Leading string to remove. If none supplied,
ICON project standards are used to identify and remove
leading text
Returns
-------
None
Modifies Instrument object in place | entailment |
def list_files(tag=None, sat_id=None, data_path=None, format_str=None):
"""Return a Pandas Series of every file for chosen satellite data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are '1min' and '5min'.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files
"""
if format_str is None and data_path is not None:
if (tag == '1min') | (tag == '5min'):
min_fmt = ''.join(['omni_hro_', tag,
'{year:4d}{month:02d}{day:02d}_v01.cdf'])
files = pysat.Files.from_os(data_path=data_path, format_str=min_fmt)
# files are by month, just add date to monthly filename for
# each day of the month. load routine will use date to select out
# appropriate data
if not files.empty:
files.ix[files.index[-1] + pds.DateOffset(months=1) -
pds.DateOffset(days=1)] = files.iloc[-1]
files = files.asfreq('D', 'pad')
# add the date to the filename
files = files + '_' + files.index.strftime('%Y-%m-%d')
return files
else:
raise ValueError('Unknown tag')
elif format_str is None:
estr = 'A directory must be passed to the loading routine for OMNI HRO'
raise ValueError (estr)
else:
return pysat.Files.from_os(data_path=data_path, format_str=format_str) | Return a Pandas Series of every file for chosen satellite data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are '1min' and '5min'.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files | entailment |
def time_shift_to_magnetic_poles(inst):
""" OMNI data is time-shifted to bow shock. Time shifted again
to intersections with magnetic pole.
Parameters
-----------
inst : Instrument class object
Instrument with OMNI HRO data
Notes
---------
Time shift calculated using distance to bow shock nose (BSN)
and velocity of solar wind along x-direction.
Warnings
--------
Use at own risk.
"""
# need to fill in Vx to get an estimate of what is going on
inst['Vx'] = inst['Vx'].interpolate('nearest')
inst['Vx'] = inst['Vx'].fillna(method='backfill')
inst['Vx'] = inst['Vx'].fillna(method='pad')
inst['BSN_x'] = inst['BSN_x'].interpolate('nearest')
inst['BSN_x'] = inst['BSN_x'].fillna(method='backfill')
inst['BSN_x'] = inst['BSN_x'].fillna(method='pad')
# make sure there are no gaps larger than a minute
inst.data = inst.data.resample('1T').interpolate('time')
time_x = inst['BSN_x']*6371.2/-inst['Vx']
idx, = np.where(np.isnan(time_x))
if len(idx) > 0:
print (time_x[idx])
print (time_x)
time_x_offset = [pds.DateOffset(seconds = time)
for time in time_x.astype(int)]
new_index=[]
for i, time in enumerate(time_x_offset):
new_index.append(inst.data.index[i] + time)
inst.data.index = new_index
inst.data = inst.data.sort_index()
return | OMNI data is time-shifted to bow shock. Time shifted again
to intersections with magnetic pole.
Parameters
-----------
inst : Instrument class object
Instrument with OMNI HRO data
Notes
---------
Time shift calculated using distance to bow shock nose (BSN)
and velocity of solar wind along x-direction.
Warnings
--------
Use at own risk. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.