sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def _fix_bias_shape(self, op_name, inputs, attrs):
"""A workaround to reshape bias term to (1, num_channel)."""
if (op_name == 'Add' or op_name == 'Mul') and (int(len(self._params)) > 0) and \
('broadcast' in attrs and attrs['broadcast'] == 1):
assert len(list(inputs)) == 2
bias_name = self._renames.get(inputs[1], inputs[1])
bias = self._params[bias_name]
assert len(bias.shape) == 1
# reshape to (1, n)
bias = mx.nd.array(bias.asnumpy().reshape((1, -1, 1, 1)))
# broadcast_add expects shape with sym.variable
self._nodes[bias_name] = mx.sym.Variable(name=bias_name, shape=bias.shape)
self._params[bias_name] = bias | A workaround to reshape bias term to (1, num_channel). | entailment |
def _fix_channels(self, op, attrs, inputs):
"""A workaround for getting 'channels' or 'units' since onnx don't provide
these attributes. We check the shape of weights provided to get the number.
"""
if op not in [mx.sym.Convolution, mx.sym.Deconvolution, mx.sym.FullyConnected]:
return attrs
weight_name = self._renames[inputs[1]]
if not weight_name in self._params:
raise ValueError("Unable to get channels/units attr from onnx graph.")
else:
wshape = self._params[weight_name].shape
assert len(wshape) >= 2, "Weights shape is invalid: {}".format(wshape)
if op in [mx.sym.FullyConnected]:
attrs['num_hidden'] = wshape[0]
else:
if op == mx.sym.Convolution:
# Weight shape for Conv and FC: (M x C x kH x kW) : M is number of
# feature maps/hidden and C is number of channels
attrs['num_filter'] = wshape[0]
elif op == mx.sym.Deconvolution:
# Weight shape for DeConv : (C x M x kH x kW) : M is number of
# feature maps/filters and C is number of channels
attrs['num_filter'] = wshape[1]
return attrs | A workaround for getting 'channels' or 'units' since onnx don't provide
these attributes. We check the shape of weights provided to get the number. | entailment |
def run(self, inputs, **kwargs):
"""Run model inference and return the result
Parameters
----------
inputs : numpy array
input to run a layer on
Returns
-------
params : numpy array
result obtained after running the inference on mxnet
"""
input_data = np.asarray(inputs[0], dtype='f')
# create module, passing cpu context
if self.device == 'CPU':
ctx = mx.cpu()
else:
raise NotImplementedError("Only CPU context is supported for now")
mod = mx.mod.Module(symbol=self.symbol, data_names=['input_0'], context=ctx,
label_names=None)
mod.bind(for_training=False, data_shapes=[('input_0', input_data.shape)],
label_shapes=None)
mod.set_params(arg_params=self.params, aux_params=None)
# run inference
batch = namedtuple('Batch', ['data'])
mod.forward(batch([mx.nd.array(input_data)]))
result = mod.get_outputs()[0].asnumpy()
return [result] | Run model inference and return the result
Parameters
----------
inputs : numpy array
input to run a layer on
Returns
-------
params : numpy array
result obtained after running the inference on mxnet | entailment |
def _parse_default(self, target):
"""Helper function to parse default values."""
if not isinstance(target, (list, tuple)):
k, v, t = target, None, lambda x: x
elif len(target) == 1:
k, v, t = target[0], None, lambda x: x
elif len(target) == 2:
k, v, t = target[0], target[1], lambda x: x
elif len(target) > 2:
k, v, t = target[0], target[1], target[2]
else:
k = None # should raise
if not isinstance(k, string_types):
msg = "{} is not a valid target, (name, default) expected.".format(target)
raise ValueError(msg)
return k, v, t | Helper function to parse default values. | entailment |
def _parse_bool(self, value):
"""Helper function to parse default boolean values."""
if isinstance(value, string_types):
return value.strip().lower() in ['true', '1', 't', 'y', 'yes']
return bool(value) | Helper function to parse default boolean values. | entailment |
def _required_attr(self, attr, key):
"""Wrapper for getting required attributes."""
assert isinstance(attr, dict)
if key not in attr:
raise AttributeError("Required attribute {} not found.".format(key))
return attr[key] | Wrapper for getting required attributes. | entailment |
def make_graph(node, inputs):
""" Created ONNX GraphProto from node"""
initializer = []
tensor_input_info = []
tensor_output_info = []
# Adding input tensor info.
for index in range(len(node.input)):
tensor_input_info.append(
helper.make_tensor_value_info(str(node.input[index]), TensorProto.FLOAT, [1]))
# Creating an initializer for Weight params.
# Assumes that weight params is named as 'W'.
# TODO: Handle multiple weight params.
# TODO: Add for "bias" if needed
if node.input[index] == 'W':
dim = inputs[index].shape
param_tensor = helper.make_tensor(
name=node.input[index],
data_type=TensorProto.FLOAT,
dims=dim,
vals=inputs[index].flatten())
initializer.append(param_tensor)
# Adding output tensor info.
for index in range(len(node.output)):
tensor_output_info.append(
helper.make_tensor_value_info(str(node.output[index]), TensorProto.FLOAT, [1]))
# creating graph proto object.
graph_proto = helper.make_graph(
[node],
"test",
tensor_input_info,
tensor_output_info,
initializer=initializer)
return graph_proto | Created ONNX GraphProto from node | entailment |
def run_node(cls, node, inputs, device='CPU'): # pylint: disable=arguments-differ
"""Running individual node inference on mxnet engine and
return the result to onnx test infrastructure.
Parameters
----------
node : onnx node object
loaded onnx node (individual layer)
inputs : numpy array
input to run a node on
device : 'CPU'
device to run a node on
Returns
-------
params : numpy array
result obtained after running the operator
"""
graph = GraphProto()
sym, _ = graph.from_onnx(MXNetBackend.make_graph(node, inputs))
data_names = [i for i in sym.get_internals().list_inputs()]
data_shapes = []
reduce_op_types = set(['ReduceMin', 'ReduceMax', 'ReduceMean',
'ReduceProd', 'ReduceSum', 'Slice', 'Pad',
'Squeeze', 'Upsample', 'Reshape', 'Conv', 'ConvTranspose'])
# Adding extra dimension of batch_size 1 if the batch_size is different for multiple inputs.
for idx, input_name in enumerate(data_names):
batch_size = 1
if len(inputs[idx].shape) < 4 and len(inputs) > 1 and \
len(set(x.shape[0] for x in inputs)) != 1:
tuples = ((batch_size,), inputs[idx].shape)
new_shape = sum(tuples, ())
data_shapes.append((input_name, new_shape))
else:
data_shapes.append((input_name, inputs[idx].shape))
# create module, passing cpu context
if device == 'CPU':
ctx = mx.cpu()
else:
raise NotImplementedError("Only CPU context is supported for now")
# create a module
mod = mx.mod.Module(symbol=sym, data_names=data_names, context=ctx, label_names=None)
mod.bind(for_training=False, data_shapes=data_shapes, label_shapes=None)
# initializing parameters for calculating result of each individual node
mod.init_params()
data_forward = []
for idx, input_name in enumerate(data_names):
# slice and pad operator tests needs 1 less dimension in forward pass
# otherwise it will throw an error.
# for squeeze operator, need to retain shape of input as provided
val = inputs[idx]
if node.op_type in reduce_op_types:
data_forward.append(mx.nd.array(val))
else:
data_forward.append(mx.nd.array([val]))
mod.forward(mx.io.DataBatch(data_forward))
result = mod.get_outputs()[0].asnumpy()
if node.op_type in reduce_op_types:
return [result]
return result | Running individual node inference on mxnet engine and
return the result to onnx test infrastructure.
Parameters
----------
node : onnx node object
loaded onnx node (individual layer)
inputs : numpy array
input to run a node on
device : 'CPU'
device to run a node on
Returns
-------
params : numpy array
result obtained after running the operator | entailment |
def prepare(cls, model, device='CPU', **kwargs):
"""For running end to end model(used for onnx test backend)
Parameters
----------
model : onnx ModelProto object
loaded onnx graph
device : 'CPU'
specifying device to run test on
kwargs :
other arguments
Returns
-------
MXNetBackendRep : object
Returns object of MXNetBackendRep class which will be in turn
used to run inference on the input model and return the result for comparison.
"""
graph = GraphProto()
sym, params = graph.from_onnx(model.graph)
return MXNetBackendRep(sym, params, device) | For running end to end model(used for onnx test backend)
Parameters
----------
model : onnx ModelProto object
loaded onnx graph
device : 'CPU'
specifying device to run test on
kwargs :
other arguments
Returns
-------
MXNetBackendRep : object
Returns object of MXNetBackendRep class which will be in turn
used to run inference on the input model and return the result for comparison. | entailment |
def _revert_caffe2_pad(attr):
"""Removing extra padding from Caffe2."""
if len(attr) == 4:
attr = attr[:2]
elif len(attr) == 2:
pass
else:
raise ValueError("Invalid caffe2 type padding: {}".format(attr))
return attr | Removing extra padding from Caffe2. | entailment |
def _pad_sequence_fix(attr, kernelDim=None):
"""Changing onnx's pads sequence to match with mxnet's pad_width
mxnet: (x1_begin, x1_end, ... , xn_begin, xn_end)
onnx: (x1_begin, x2_begin, ... , xn_end, xn_end)"""
new_attr = ()
if len(attr) % 2 == 0:
for index in range(int(len(attr) / 2)):
new_attr = new_attr + attr[index::int(len(attr) / 2)]
# Making sure pad values are in the attr for all axes.
if kernelDim is not None:
while len(new_attr) < kernelDim*2:
new_attr = new_attr + (0, 0)
return new_attr | Changing onnx's pads sequence to match with mxnet's pad_width
mxnet: (x1_begin, x1_end, ... , xn_begin, xn_end)
onnx: (x1_begin, x2_begin, ... , xn_end, xn_end) | entailment |
def import_model(model_file):
"""Imports the supplied ONNX model file into MXNet symbol and parameters.
Parameters
----------
model_file : ONNX model file name
Returns
-------
sym : mx.symbol
Compatible mxnet symbol
params : dict of str to mx.ndarray
Dict of converted parameters stored in mx.ndarray format
"""
graph = GraphProto()
# loads model file and returns ONNX protobuf object
model_proto = onnx.load(model_file)
sym, params = graph.from_onnx(model_proto.graph)
return sym, params | Imports the supplied ONNX model file into MXNet symbol and parameters.
Parameters
----------
model_file : ONNX model file name
Returns
-------
sym : mx.symbol
Compatible mxnet symbol
params : dict of str to mx.ndarray
Dict of converted parameters stored in mx.ndarray format | entailment |
def generate_hash(filepath):
"""Public function that reads a local file and generates a SHA256 hash digest for it"""
fr = FileReader(filepath)
data = fr.read_bin()
return _calculate_sha256(data) | Public function that reads a local file and generates a SHA256 hash digest for it | entailment |
def generate_tar_files(directory_list):
"""Public function that reads a list of local directories and generates tar archives from them"""
tar_file_list = []
for directory in directory_list:
if dir_exists(directory):
_generate_tar(directory) # create the tar archive
tar_file_list.append(directory + '.tar') # append the tar archive filename to the returned tar_file_list list
else:
stderr("The directory '" + directory + "' does not exist and a tar archive could not be created from it.", exit=1)
return tar_file_list | Public function that reads a list of local directories and generates tar archives from them | entailment |
def remove_tar_files(file_list):
"""Public function that removes temporary tar archive files in a local directory"""
for f in file_list:
if file_exists(f) and f.endswith('.tar'):
os.remove(f) | Public function that removes temporary tar archive files in a local directory | entailment |
def _generate_tar(dir_path):
"""Private function that reads a local directory and generates a tar archive from it"""
try:
with tarfile.open(dir_path + '.tar', 'w') as tar:
tar.add(dir_path)
except tarfile.TarError as e:
stderr("Error: tar archive creation failed [" + str(e) + "]", exit=1) | Private function that reads a local directory and generates a tar archive from it | entailment |
def encrypt_file(self, inpath, force_nocompress=False, force_compress=False, armored=False, checksum=False):
"""public method for single file encryption with optional compression, ASCII armored formatting, and file hash digest generation"""
if armored:
if force_compress:
command_stub = self.command_maxcompress_armored
elif force_nocompress:
command_stub = self.command_nocompress_armored
else:
if self._is_compress_filetype(inpath):
command_stub = self.command_default_armored
else:
command_stub = self.command_nocompress_armored
else:
if force_compress:
command_stub = self.command_maxcompress
elif force_nocompress:
command_stub = self.command_nocompress
else:
if self._is_compress_filetype(inpath):
command_stub = self.command_default
else:
command_stub = self.command_nocompress
encrypted_outpath = self._create_outfilepath(inpath)
system_command = command_stub + encrypted_outpath + " --passphrase " + quote(self.passphrase) + " --symmetric " + quote(inpath)
try:
response = muterun(system_command)
# check returned status code
if response.exitcode == 0:
stdout(encrypted_outpath + " was generated from " + inpath)
if checksum: # add a SHA256 hash digest of the encrypted file - requested by user --hash flag in command
from crypto.library import hash
encrypted_file_hash = hash.generate_hash(encrypted_outpath)
if len(encrypted_file_hash) == 64:
stdout("SHA256 hash digest for " + encrypted_outpath + " :")
stdout(encrypted_file_hash)
else:
stdout("Unable to generate a SHA256 hash digest for the file " + encrypted_outpath)
else:
stderr(response.stderr, 0)
stderr("Encryption failed")
sys.exit(1)
except Exception as e:
stderr("There was a problem with the execution of gpg. Encryption failed. Error: [" + str(e) + "]")
sys.exit(1) | public method for single file encryption with optional compression, ASCII armored formatting, and file hash digest generation | entailment |
def encrypt_files(self, file_list, force_nocompress=False, force_compress=False, armored=False, checksum=False):
"""public method for multiple file encryption with optional compression, ASCII armored formatting, and file hash digest generation"""
for the_file in file_list:
self.encrypt_file(the_file, force_nocompress, force_compress, armored, checksum) | public method for multiple file encryption with optional compression, ASCII armored formatting, and file hash digest generation | entailment |
def _is_compress_filetype(self, inpath):
"""private method that performs magic number and size check on file to determine whether to compress the file"""
# check for common file type suffixes in order to avoid the need for file reads to check magic number for binary vs. text file
if self._is_common_binary(inpath):
return False
elif self._is_common_text(inpath):
return True
else:
# files > 10kB get checked for compression (arbitrary decision to skip compression on small files)
the_file_size = file_size(inpath)
if the_file_size > 10240:
if the_file_size > 512000: # seems to be a break point at ~ 500kb where file compression offset by additional file read, so limit tests to files > 500kB
try:
system_command = "file --mime-type -b " + quote(inpath)
response = muterun(system_command)
if response.stdout[0:5] == "text/": # check for a text file mime type
return True # appropriate size, appropriate file mime type
else:
return False # appropriate size, inappropriate file mime type
except Exception:
return False
else:
return True # if file size is < 500kB, skip the additional file read and just go with compression
else:
return False | private method that performs magic number and size check on file to determine whether to compress the file | entailment |
def _is_common_binary(self, inpath):
"""private method to compare file path mime type to common binary file types"""
# make local variables for the available char numbers in the suffix types to be tested
two_suffix = inpath[-3:]
three_suffix = inpath[-4:]
four_suffix = inpath[-5:]
# test for inclusion in the instance variable common_binaries (defined in __init__)
if two_suffix in self.common_binaries:
return True
elif three_suffix in self.common_binaries:
return True
elif four_suffix in self.common_binaries:
return True
else:
return False | private method to compare file path mime type to common binary file types | entailment |
def _is_common_text(self, inpath):
"""private method to compare file path mime type to common text file types"""
# make local variables for the available char numbers in the suffix types to be tested
one_suffix = inpath[-2:]
two_suffix = inpath[-3:]
three_suffix = inpath[-4:]
four_suffix = inpath[-5:]
# test for inclusion in the instance variable common_text (defined in __init__)
if one_suffix in self.common_text:
return True
elif two_suffix in self.common_text:
return True
elif three_suffix in self.common_text:
return True
elif four_suffix in self.common_text:
return True
else:
return False | private method to compare file path mime type to common text file types | entailment |
def knn_impute_few_observed(
X, missing_mask, k, verbose=False, print_interval=100):
"""
Seems to be the fastest kNN implementation. Pre-sorts each rows neighbors
and then filters these sorted indices using each columns mask of
observed values.
Important detail: If k observed values are not available then uses fewer
than k neighboring rows.
Parameters
----------
X : np.ndarray
Matrix to fill of shape (n_samples, n_features)
missing_mask : np.ndarray
Boolean array of same shape as X
k : int
verbose : bool
"""
start_t = time.time()
n_rows, n_cols = X.shape
# put the missing mask in column major order since it's accessed
# one column at a time
missing_mask_column_major = np.asarray(missing_mask, order="F")
observed_mask_column_major = ~missing_mask_column_major
X_column_major = X.copy(order="F")
X_row_major, D, effective_infinity = \
knn_initialize(X, missing_mask, verbose=verbose)
# get rid of infinities, replace them with a very large number
D_sorted = np.argsort(D, axis=1)
inv_D = 1.0 / D
D_valid_mask = D < effective_infinity
valid_distances_per_row = D_valid_mask.sum(axis=1)
# trim the number of other rows we consider to exclude those
# with infinite distances
D_sorted = [
D_sorted[i, :count]
for i, count in enumerate(valid_distances_per_row)
]
dot = np.dot
for i in range(n_rows):
missing_row = missing_mask[i, :]
missing_indices = np.where(missing_row)[0]
row_weights = inv_D[i, :]
if verbose and i % print_interval == 0:
print(
"Imputing row %d/%d with %d missing, elapsed time: %0.3f" % (
i + 1,
n_rows,
len(missing_indices),
time.time() - start_t))
candidate_neighbor_indices = D_sorted[i]
for j in missing_indices:
observed = observed_mask_column_major[:, j]
sorted_observed = observed[candidate_neighbor_indices]
observed_neighbor_indices = candidate_neighbor_indices[sorted_observed]
k_nearest_indices = observed_neighbor_indices[:k]
weights = row_weights[k_nearest_indices]
weight_sum = weights.sum()
if weight_sum > 0:
column = X_column_major[:, j]
values = column[k_nearest_indices]
X_row_major[i, j] = dot(values, weights) / weight_sum
return X_row_major | Seems to be the fastest kNN implementation. Pre-sorts each rows neighbors
and then filters these sorted indices using each columns mask of
observed values.
Important detail: If k observed values are not available then uses fewer
than k neighboring rows.
Parameters
----------
X : np.ndarray
Matrix to fill of shape (n_samples, n_features)
missing_mask : np.ndarray
Boolean array of same shape as X
k : int
verbose : bool | entailment |
def knn_initialize(
X,
missing_mask,
verbose=False,
min_dist=1e-6,
max_dist_multiplier=1e6):
"""
Fill X with NaN values if necessary, construct the n_samples x n_samples
distance matrix and set the self-distance of each row to infinity.
Returns contents of X laid out in row-major, the distance matrix,
and an "effective infinity" which is larger than any entry of the
distance matrix.
"""
X_row_major = X.copy("C")
if missing_mask.sum() != np.isnan(X_row_major).sum():
# if the missing values have already been zero-filled need
# to put NaN's back in the data matrix for the distances function
X_row_major[missing_mask] = np.nan
D = all_pairs_normalized_distances(X_row_major)
D_finite_flat = D[np.isfinite(D)]
if len(D_finite_flat) > 0:
max_dist = max_dist_multiplier * max(1, D_finite_flat.max())
else:
max_dist = max_dist_multiplier
# set diagonal of distance matrix to a large value since we don't want
# points considering themselves as neighbors
np.fill_diagonal(D, max_dist)
D[D < min_dist] = min_dist # prevents 0s
D[D > max_dist] = max_dist # prevents infinities
return X_row_major, D, max_dist | Fill X with NaN values if necessary, construct the n_samples x n_samples
distance matrix and set the self-distance of each row to infinity.
Returns contents of X laid out in row-major, the distance matrix,
and an "effective infinity" which is larger than any entry of the
distance matrix. | entailment |
def knn_impute_optimistic(
X,
missing_mask,
k,
verbose=False,
print_interval=100):
"""
Fill in the given incomplete matrix using k-nearest neighbor imputation.
This version assumes that most of the time the same neighbors will be
used so first performs the weighted average of a row's k-nearest neighbors
and checks afterward whether it was valid (due to possible missing values).
Has been observed to be a lot faster for 1/4 missing images matrix
with 1000 rows and ~9000 columns.
Parameters
----------
X : np.ndarray
Matrix to fill of shape (n_samples, n_features)
missing_mask : np.ndarray
Boolean array of same shape as X
k : int
verbose : bool
Modifies X by replacing its missing values with weighted averages of
similar rows. Returns the modified X.
"""
start_t = time.time()
n_rows, n_cols = X.shape
X_row_major, D, _ = knn_initialize(X, missing_mask, verbose=verbose)
D_sorted_indices = np.argsort(D, axis=1)
X_column_major = X_row_major.copy(order="F")
dot = np.dot
# preallocate array to prevent repeated creation in the following loops
neighbor_weights = np.ones(k, dtype=X.dtype)
missing_mask_column_major = np.asarray(missing_mask, order="F")
observed_mask_column_major = ~missing_mask_column_major
for i in range(n_rows):
missing_columns = np.where(missing_mask[i])[0]
if verbose and i % print_interval == 0:
print(
"Imputing row %d/%d with %d missing, elapsed time: %0.3f" % (
i + 1,
n_rows,
len(missing_columns),
time.time() - start_t))
n_missing_columns = len(missing_columns)
if n_missing_columns == 0:
continue
row_distances = D[i, :]
neighbor_indices = D_sorted_indices[i, :]
X_missing_columns = X_column_major[:, missing_columns]
# precompute these for the fast path where the k nearest neighbors
# are not missing the feature value we're currently trying to impute
k_nearest_indices = neighbor_indices[:k]
np.divide(1.0, row_distances[k_nearest_indices], out=neighbor_weights)
# optimistically impute all the columns from the k nearest neighbors
# we'll have to back-track for some of the columns for which
# one of the neighbors did not have a value
X_knn = X_missing_columns[k_nearest_indices, :]
weighted_average_of_neighboring_rows = dot(
X_knn.T,
neighbor_weights)
sum_weights = neighbor_weights.sum()
weighted_average_of_neighboring_rows /= sum_weights
imputed_values = weighted_average_of_neighboring_rows
observed_mask_missing_columns = observed_mask_column_major[:, missing_columns]
observed_mask_missing_columns_sorted = observed_mask_missing_columns[
neighbor_indices, :]
# We can determine the maximum number of other rows that must be
# inspected across all features missing for this row by
# looking at the column-wise running sums of the observed feature
# matrix.
observed_cumulative_sum = observed_mask_missing_columns_sorted.cumsum(axis=0)
sufficient_rows = (observed_cumulative_sum == k)
n_rows_needed = sufficient_rows.argmax(axis=0) + 1
max_rows_needed = n_rows_needed.max()
if max_rows_needed == k:
# if we never needed more than k rows then we're done after the
# optimistic averaging above, so go on to the next sample
X[i, missing_columns] = imputed_values
continue
# truncate all the sorted arrays to only include the necessary
# number of rows (should significantly speed up the "slow" path)
necessary_indices = neighbor_indices[:max_rows_needed]
d_sorted = row_distances[necessary_indices]
X_missing_columns_sorted = X_missing_columns[necessary_indices, :]
observed_mask_missing_columns_sorted = observed_mask_missing_columns_sorted[
:max_rows_needed, :]
for missing_column_idx in range(n_missing_columns):
# since all the arrays we're looking into have already been
# sliced out at the missing features, we need to address these
# features from 0..n_missing using missing_idx rather than j
if n_rows_needed[missing_column_idx] == k:
assert np.isfinite(imputed_values[missing_column_idx]), \
"Expected finite imputed value #%d (column #%d for row %d)" % (
missing_column_idx,
missing_columns[missing_column_idx],
i)
continue
row_mask = observed_mask_missing_columns_sorted[:, missing_column_idx]
sorted_column_values = X_missing_columns_sorted[:, missing_column_idx]
neighbor_distances = d_sorted[row_mask][:k]
# may not have enough values in a column for all k neighbors
k_or_less = len(neighbor_distances)
usable_weights = neighbor_weights[:k_or_less]
np.divide(
1.0,
neighbor_distances, out=usable_weights)
neighbor_values = sorted_column_values[row_mask][:k_or_less]
imputed_values[missing_column_idx] = (
dot(neighbor_values, usable_weights) / usable_weights.sum())
X[i, missing_columns] = imputed_values
return X | Fill in the given incomplete matrix using k-nearest neighbor imputation.
This version assumes that most of the time the same neighbors will be
used so first performs the weighted average of a row's k-nearest neighbors
and checks afterward whether it was valid (due to possible missing values).
Has been observed to be a lot faster for 1/4 missing images matrix
with 1000 rows and ~9000 columns.
Parameters
----------
X : np.ndarray
Matrix to fill of shape (n_samples, n_features)
missing_mask : np.ndarray
Boolean array of same shape as X
k : int
verbose : bool
Modifies X by replacing its missing values with weighted averages of
similar rows. Returns the modified X. | entailment |
def all_pairs_normalized_distances(X):
"""
We can't really compute distances over incomplete data since
rows are missing different numbers of entries.
The next best thing is the mean squared difference between two vectors
(a normalized distance), which gets computed only over the columns that
two vectors have in common. If two vectors have no features in common
then their distance is infinity.
Parameters
----------
X : np.ndarray
Data matrix of shape (n_samples, n_features) with missing entries
marked using np.nan
Returns a (n_samples, n_samples) matrix of pairwise normalized distances.
"""
n_rows, n_cols = X.shape
# matrix of mean squared difference between between samples
D = np.ones((n_rows, n_rows), dtype="float32", order="C") * np.inf
# we can cheaply determine the number of columns that two rows share
# by taking the dot product between their finite masks
observed_elements = np.isfinite(X).astype(int)
n_shared_features_for_pairs_of_rows = np.dot(
observed_elements,
observed_elements.T)
no_overlapping_features_rows = n_shared_features_for_pairs_of_rows == 0
number_incomparable_rows = no_overlapping_features_rows.sum(axis=1)
row_overlaps_every_other_row = (number_incomparable_rows == 0)
row_overlaps_no_other_rows = number_incomparable_rows == n_rows
valid_rows_mask = ~row_overlaps_no_other_rows
valid_row_indices = np.where(valid_rows_mask)[0]
# preallocate all the arrays that we would otherwise create in the
# following loop and pass them as "out" parameters to NumPy ufuncs
diffs = np.zeros_like(X)
missing_differences = np.zeros_like(diffs, dtype=bool)
valid_rows = np.zeros(n_rows, dtype=bool)
ssd = np.zeros(n_rows, dtype=X.dtype)
for i in valid_row_indices:
x = X[i, :]
np.subtract(X, x.reshape((1, n_cols)), out=diffs)
np.isnan(diffs, out=missing_differences)
# zero out all NaN's
diffs[missing_differences] = 0
# square each difference
diffs **= 2
observed_counts_per_row = n_shared_features_for_pairs_of_rows[i]
if row_overlaps_every_other_row[i]:
# add up all the non-missing squared differences
diffs.sum(axis=1, out=D[i, :])
D[i, :] /= observed_counts_per_row
else:
np.logical_not(no_overlapping_features_rows[i], out=valid_rows)
# add up all the non-missing squared differences
diffs.sum(axis=1, out=ssd)
ssd[valid_rows] /= observed_counts_per_row[valid_rows]
D[i, valid_rows] = ssd[valid_rows]
return D | We can't really compute distances over incomplete data since
rows are missing different numbers of entries.
The next best thing is the mean squared difference between two vectors
(a normalized distance), which gets computed only over the columns that
two vectors have in common. If two vectors have no features in common
then their distance is infinity.
Parameters
----------
X : np.ndarray
Data matrix of shape (n_samples, n_features) with missing entries
marked using np.nan
Returns a (n_samples, n_samples) matrix of pairwise normalized distances. | entailment |
def all_pairs_normalized_distances_reference(X):
"""
Reference implementation of normalized all-pairs distance, used
for testing the more efficient implementation above for equivalence.
"""
n_samples, n_cols = X.shape
# matrix of mean squared difference between between samples
D = np.ones((n_samples, n_samples), dtype="float32") * np.inf
for i in range(n_samples):
diffs = X - X[i, :].reshape((1, n_cols))
missing_diffs = np.isnan(diffs)
missing_counts_per_row = missing_diffs.sum(axis=1)
valid_rows = missing_counts_per_row < n_cols
D[i, valid_rows] = np.nanmean(
diffs[valid_rows, :] ** 2,
axis=1)
return D | Reference implementation of normalized all-pairs distance, used
for testing the more efficient implementation above for equivalence. | entailment |
def knn_impute_with_argpartition(
X,
missing_mask,
k,
verbose=False,
print_interval=100):
"""
Fill in the given incomplete matrix using k-nearest neighbor imputation.
This version is a simpler algorithm meant primarily for testing but
surprisingly it's faster for many (but not all) dataset sizes, particularly
when most of the columns are missing in any given row. The crucial
bottleneck is the call to numpy.argpartition for every missing element
in the array.
Parameters
----------
X : np.ndarray
Matrix to fill of shape (n_samples, n_features)
missing_mask : np.ndarray
Boolean array of same shape as X
k : int
verbose : bool
Returns a row-major copy of X with imputed values.
"""
start_t = time.time()
n_rows, n_cols = X.shape
# put the missing mask in column major order since it's accessed
# one column at a time
missing_mask_column_major = np.asarray(missing_mask, order="F")
X_row_major, D, effective_infinity = \
knn_initialize(X, missing_mask, verbose=verbose)
D_reciprocal = 1.0 / D
dot = np.dot
array = np.array
argpartition = np.argpartition
for i in range(n_rows):
missing_indices = np.where(missing_mask[i])[0]
if verbose and i % print_interval == 0:
print(
"Imputing row %d/%d with %d missing, elapsed time: %0.3f" % (
i + 1,
n_rows,
len(missing_indices),
time.time() - start_t))
d = D[i, :]
inv_d = D_reciprocal[i, :]
for j in missing_indices:
# move rows which lack this feature to be infinitely far away
d_copy = d.copy()
d_copy[missing_mask_column_major[:, j]] = effective_infinity
neighbor_indices = argpartition(d_copy, k)[:k]
if d_copy[neighbor_indices].max() >= effective_infinity:
# if there aren't k rows with the feature of interest then
# we need to filter out indices of points at infinite distance
neighbor_indices = array([
neighbor_index
for neighbor_index in neighbor_indices
if d_copy[neighbor_index] < effective_infinity
])
n_current_neighbors = len(neighbor_indices)
if n_current_neighbors > 0:
neighbor_weights = inv_d[neighbor_indices]
X_row_major[i, j] = (
dot(X[:, j][neighbor_indices], neighbor_weights) /
neighbor_weights.sum()
)
return X_row_major | Fill in the given incomplete matrix using k-nearest neighbor imputation.
This version is a simpler algorithm meant primarily for testing but
surprisingly it's faster for many (but not all) dataset sizes, particularly
when most of the columns are missing in any given row. The crucial
bottleneck is the call to numpy.argpartition for every missing element
in the array.
Parameters
----------
X : np.ndarray
Matrix to fill of shape (n_samples, n_features)
missing_mask : np.ndarray
Boolean array of same shape as X
k : int
verbose : bool
Returns a row-major copy of X with imputed values. | entailment |
def knn_impute_reference(
X,
missing_mask,
k,
verbose=False,
print_interval=100):
"""
Reference implementation of kNN imputation logic.
"""
n_rows, n_cols = X.shape
X_result, D, effective_infinity = \
knn_initialize(X, missing_mask, verbose=verbose)
for i in range(n_rows):
for j in np.where(missing_mask[i, :])[0]:
distances = D[i, :].copy()
# any rows that don't have the value we're currently trying
# to impute are set to infinite distances
distances[missing_mask[:, j]] = effective_infinity
neighbor_indices = np.argsort(distances)
neighbor_distances = distances[neighbor_indices]
# get rid of any infinite distance neighbors in the top k
valid_distances = neighbor_distances < effective_infinity
neighbor_distances = neighbor_distances[valid_distances][:k]
neighbor_indices = neighbor_indices[valid_distances][:k]
weights = 1.0 / neighbor_distances
weight_sum = weights.sum()
if weight_sum > 0:
column = X[:, j]
values = column[neighbor_indices]
X_result[i, j] = np.dot(values, weights) / weight_sum
return X_result | Reference implementation of kNN imputation logic. | entailment |
def active(self, registered_only=True):
"Returns all active users, e.g. not logged and non-expired session."
visitors = self.filter(
expiry_time__gt=timezone.now(),
end_time=None
)
if registered_only:
visitors = visitors.filter(user__isnull=False)
return visitors | Returns all active users, e.g. not logged and non-expired session. | entailment |
def stats(self, start_date, end_date, registered_only=False):
"""Returns a dictionary of visits including:
* total visits
* unique visits
* return ratio
* pages per visit (if pageviews are enabled)
* time on site
for all users, registered users and guests.
"""
visitors = self.filter(
start_time__gte=start_date,
start_time__lt=end_date
)
stats = {
'total': 0,
'unique': 0,
'return_ratio': 0,
}
# All visitors
stats['total'] = total_count = visitors.count()
unique_count = 0
# No visitors! Nothing more to do.
if not total_count:
return stats
# Avg time on site
total_time_on_site = visitors.aggregate(
avg_tos=Avg('time_on_site'))['avg_tos']
stats['time_on_site'] = timedelta(seconds=int(total_time_on_site))
# Registered user sessions
registered_visitors = visitors.filter(user__isnull=False)
registered_total_count = registered_visitors.count()
if registered_total_count:
registered_unique_count = registered_visitors.values(
'user'
).distinct().count()
# Avg time on site
time_on_site = registered_visitors.aggregate(
avg_tos=Avg('time_on_site'))['avg_tos']
# Update the total unique count..
unique_count += registered_unique_count
# Set the registered stats..
returns = (registered_total_count - registered_unique_count)
stats['registered'] = {
'total': registered_total_count,
'unique': registered_unique_count,
'return_ratio': (returns / registered_total_count) * 100,
'time_on_site': timedelta(seconds=int(time_on_site)),
}
# Get stats for our guests..
if TRACK_ANONYMOUS_USERS and not registered_only:
guests = visitors.filter(user__isnull=True)
guest_total_count = guests.count()
if guest_total_count:
guest_unique_count = guests.values(
'ip_address'
).distinct().count()
# Avg time on site
guest_time_on_site = guests.aggregate(
avg_tos=Avg('time_on_site'))['avg_tos']
# return rate
returns = (guest_total_count - guest_unique_count)
return_ratio = (returns / guest_total_count) * 100
time_on_site = timedelta(seconds=int(guest_time_on_site))
else:
guest_total_count = 0
guest_unique_count = 0
return_ratio = 0.0
time_on_site = timedelta(0)
# Update the total unique count
unique_count += guest_unique_count
stats['guests'] = {
'total': guest_total_count,
'unique': guest_unique_count,
'return_ratio': return_ratio,
'time_on_site': time_on_site,
}
# Finish setting the total visitor counts
returns = (total_count - unique_count)
stats['unique'] = unique_count
stats['return_ratio'] = (returns / total_count) * 100
# If pageviews are being tracked, add the aggregate pages-per-visit
if TRACK_PAGEVIEWS:
if 'registered' in stats:
pages_per_visit = registered_visitors.annotate(
page_count=Count('pageviews')
).filter(page_count__gt=0).aggregate(
pages_per_visit=Avg('page_count'))['pages_per_visit']
stats['registered']['pages_per_visit'] = pages_per_visit
if TRACK_ANONYMOUS_USERS and not registered_only:
stats['guests']['pages_per_visit'] = guests.annotate(
page_count=Count('pageviews')
).filter(page_count__gt=0).aggregate(
pages_per_visit=Avg('page_count'))['pages_per_visit']
total_per_visit = visitors.annotate(
page_count=Count('pageviews')
).filter(page_count__gt=0).aggregate(
pages_per_visit=Avg('page_count'))['pages_per_visit']
else:
if 'registered' in stats:
total_per_visit = stats['registered']['pages_per_visit']
else:
total_per_visit = 0
stats['pages_per_visit'] = total_per_visit
return stats | Returns a dictionary of visits including:
* total visits
* unique visits
* return ratio
* pages per visit (if pageviews are enabled)
* time on site
for all users, registered users and guests. | entailment |
def stats(self, start_date=None, end_date=None, registered_only=False):
"""Returns a dictionary of pageviews including:
* total pageviews
for all users, registered users and guests.
"""
pageviews = self.filter(
visitor__start_time__lt=end_date,
visitor__start_time__gte=start_date,
).select_related('visitor')
stats = {
'total': 0,
'unique': 0,
}
stats['total'] = total_views = pageviews.count()
unique_count = 0
if not total_views:
return stats
# Registered user sessions
registered_pageviews = pageviews.filter(visitor__user__isnull=False)
registered_count = registered_pageviews.count()
if registered_count:
registered_unique_count = registered_pageviews.values(
'visitor', 'url').distinct().count()
# Update the total unique count...
unique_count += registered_unique_count
stats['registered'] = {
'total': registered_count,
'unique': registered_unique_count,
}
if TRACK_ANONYMOUS_USERS and not registered_only:
guest_pageviews = pageviews.filter(visitor__user__isnull=True)
guest_count = guest_pageviews.count()
if guest_count:
guest_unique_count = guest_pageviews.values(
'visitor', 'url').distinct().count()
# Update the total unique count...
unique_count += guest_unique_count
stats['guests'] = {
'total': guest_count,
'unique': guest_unique_count,
}
# Finish setting the total visitor counts
stats['unique'] = unique_count
return stats | Returns a dictionary of pageviews including:
* total pageviews
for all users, registered users and guests. | entailment |
def dashboard(request):
"Counts, aggregations and more!"
end_time = now()
start_time = end_time - timedelta(days=7)
defaults = {'start': start_time, 'end': end_time}
form = DashboardForm(data=request.GET or defaults)
if form.is_valid():
start_time = form.cleaned_data['start']
end_time = form.cleaned_data['end']
# determine when tracking began
try:
obj = Visitor.objects.order_by('start_time')[0]
track_start_time = obj.start_time
except (IndexError, Visitor.DoesNotExist):
track_start_time = now()
# If the start_date is before tracking began, warn about incomplete data
warn_incomplete = (start_time < track_start_time)
# queries take `date` objects (for now)
user_stats = Visitor.objects.user_stats(start_time, end_time)
visitor_stats = Visitor.objects.stats(start_time, end_time)
if TRACK_PAGEVIEWS:
pageview_stats = Pageview.objects.stats(start_time, end_time)
else:
pageview_stats = None
context = {
'form': form,
'track_start_time': track_start_time,
'warn_incomplete': warn_incomplete,
'user_stats': user_stats,
'visitor_stats': visitor_stats,
'pageview_stats': pageview_stats,
}
return render(request, 'tracking/dashboard.html', context) | Counts, aggregations and more! | entailment |
def geoip_data(self):
"""Attempt to retrieve MaxMind GeoIP data based on visitor's IP."""
if not HAS_GEOIP or not TRACK_USING_GEOIP:
return
if not hasattr(self, '_geoip_data'):
self._geoip_data = None
try:
gip = GeoIP(cache=GEOIP_CACHE_TYPE)
self._geoip_data = gip.city(self.ip_address)
except GeoIPException:
msg = 'Error getting GeoIP data for IP "{0}"'.format(
self.ip_address)
log.exception(msg)
return self._geoip_data | Attempt to retrieve MaxMind GeoIP data based on visitor's IP. | entailment |
def escape(s):
"""Convert the characters &, <, >, ' and " in string s to HTML-safe
sequences. Use this if you need to display text that might contain
such characters in HTML. Marks return value as markup string.
"""
if hasattr(s, '__html__'):
return s.__html__()
if isinstance(s, six.binary_type):
s = six.text_type(str(s), 'utf8')
elif isinstance(s, six.text_type):
s = s
else:
s = str(s)
return (s
.replace('&', '&')
.replace('>', '>')
.replace('<', '<')
.replace("'", ''')
.replace('"', '"')
) | Convert the characters &, <, >, ' and " in string s to HTML-safe
sequences. Use this if you need to display text that might contain
such characters in HTML. Marks return value as markup string. | entailment |
def iteration(obj, num_keys):
"""
Jade iteration supports "for 'value' [, key]?" iteration only.
PyJade has implicitly supported value unpacking instead, without
the list indexes. Trying to not break existing code, the following
rules are applied:
1. If the object is a mapping type, return it as-is, and assume
the caller has the correct set of keys defined.
2. If the object's values are iterable (and not string-like):
a. If the number of keys matches the cardinality of the object's
values, return the object as-is.
b. If the number of keys is one more than the cardinality of
values, return a list of [v(0), v(1), ... v(n), index]
3. Else the object's values are not iterable, or are string like:
a. if there's only one key, return the list
b. otherwise return a list of (value,index) tuples
"""
# If the object is a mapping type, return it as-is
if is_mapping(obj):
return obj
_marker = []
iter_obj = iter(obj)
head = next(iter_obj, _marker)
iter_obj = chain([head], iter_obj)
if head is _marker:
# Empty list
return []
if is_iterable(head):
if num_keys == get_cardinality(head) + 1:
return (tuple(item) + (ix,) for ix, item in enumerate(iter_obj))
else:
return iter_obj
elif num_keys == 2:
return ((item, ix) for ix, item in enumerate(iter_obj))
else:
return iter_obj | Jade iteration supports "for 'value' [, key]?" iteration only.
PyJade has implicitly supported value unpacking instead, without
the list indexes. Trying to not break existing code, the following
rules are applied:
1. If the object is a mapping type, return it as-is, and assume
the caller has the correct set of keys defined.
2. If the object's values are iterable (and not string-like):
a. If the number of keys matches the cardinality of the object's
values, return the object as-is.
b. If the number of keys is one more than the cardinality of
values, return a list of [v(0), v(1), ... v(n), index]
3. Else the object's values are not iterable, or are string like:
a. if there's only one key, return the list
b. otherwise return a list of (value,index) tuples | entailment |
def do_evaluate(parser, token):
'''Calls an arbitrary method on an object.'''
code = token.contents
firstspace = code.find(' ')
if firstspace >= 0:
code = code[firstspace+1:]
return Evaluator(code) | Calls an arbitrary method on an object. | entailment |
def do_set(parser, token):
'''Calls an arbitrary method on an object.'''
code = token.contents
firstspace = code.find(' ')
if firstspace >= 0:
code = code[firstspace+1:]
return Setter(code) | Calls an arbitrary method on an object. | entailment |
def render(self, context):
'''Evaluates the code in the page and returns the result'''
modules = {
'pyjade': __import__('pyjade')
}
context['false'] = False
context['true'] = True
try:
return six.text_type(eval('pyjade.runtime.attrs(%s)'%self.code,modules,context))
except NameError:
return '' | Evaluates the code in the page and returns the result | entailment |
def render(self, context):
'''Evaluates the code in the page and returns the result'''
modules = {
}
context['false'] = False
context['true'] = True
new_ctx = eval('dict(%s)'%self.code,modules,context)
context.update(new_ctx)
return '' | Evaluates the code in the page and returns the result | entailment |
def sprite_filepath_build(sprite_type, sprite_id, **kwargs):
"""returns the filepath of the sprite *relative to SPRITE_CACHE*"""
options = parse_sprite_options(sprite_type, **kwargs)
filename = '.'.join([str(sprite_id), SPRITE_EXT])
filepath = os.path.join(sprite_type, *options, filename)
return filepath | returns the filepath of the sprite *relative to SPRITE_CACHE* | entailment |
def _make_obj(obj):
"""Takes an object and returns a corresponding API class.
The names and values of the data will match exactly with those found
in the online docs at https://pokeapi.co/docsv2/ . In some cases, the data
may be of a standard type, such as an integer or string. For those cases,
the input value is simply returned, unchanged.
:param obj: the object to be converted
:return either the same value, if it does not need to be converted, or a
APIResource or APIMetadata instance, depending on the data inputted.
"""
if isinstance(obj, dict):
if 'url' in obj.keys():
url = obj['url']
id_ = int(url.split('/')[-2]) # ID of the data.
endpoint = url.split('/')[-3] # Where the data is located.
return APIResource(endpoint, id_, lazy_load=True)
return APIMetadata(obj)
return obj | Takes an object and returns a corresponding API class.
The names and values of the data will match exactly with those found
in the online docs at https://pokeapi.co/docsv2/ . In some cases, the data
may be of a standard type, such as an integer or string. For those cases,
the input value is simply returned, unchanged.
:param obj: the object to be converted
:return either the same value, if it does not need to be converted, or a
APIResource or APIMetadata instance, depending on the data inputted. | entailment |
def _load(self):
"""Function to collect reference data and connect it to the instance as
attributes.
Internal function, does not usually need to be called by the user, as
it is called automatically when an attribute is requested.
:return None
"""
data = get_data(self.endpoint, self.id_, force_lookup=self.__force_lookup)
# Make our custom objects from the data.
for key, val in data.items():
if key == 'location_area_encounters' \
and self.endpoint == 'pokemon':
params = val.split('/')[-3:]
ep, id_, subr = params
encounters = get_data(ep, int(id_), subr)
data[key] = [_make_obj(enc) for enc in encounters]
continue
if isinstance(val, dict):
data[key] = _make_obj(val)
elif isinstance(val, list):
data[key] = [_make_obj(i) for i in val]
self.__dict__.update(data)
return None | Function to collect reference data and connect it to the instance as
attributes.
Internal function, does not usually need to be called by the user, as
it is called automatically when an attribute is requested.
:return None | entailment |
def safe_make_dirs(path, mode=0o777):
"""Create a leaf directory and all intermediate ones in a safe way.
A wrapper to os.makedirs() that handles existing leaf directories while
avoiding os.path.exists() race conditions.
:param path: relative or absolute directory tree to create
:param mode: directory permissions in octal
:return: The newly-created path
"""
try:
os.makedirs(path, mode)
except OSError as error:
if error.errno != 17: # File exists
raise
return path | Create a leaf directory and all intermediate ones in a safe way.
A wrapper to os.makedirs() that handles existing leaf directories while
avoiding os.path.exists() race conditions.
:param path: relative or absolute directory tree to create
:param mode: directory permissions in octal
:return: The newly-created path | entailment |
def get_default_cache():
"""Get the default cache location.
Adheres to the XDG Base Directory specification, as described in
https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
:return: the default cache directory absolute path
"""
xdg_cache_home = os.environ.get('XDG_CACHE_HOME') or \
os.path.join(os.path.expanduser('~'), '.cache')
return os.path.join(xdg_cache_home, 'pokebase') | Get the default cache location.
Adheres to the XDG Base Directory specification, as described in
https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
:return: the default cache directory absolute path | entailment |
def set_cache(new_path=None):
"""Simple function to change the cache location.
`new_path` can be an absolute or relative path. If the directory does not
exist yet, this function will create it. If None it will set the cache to
the default cache directory.
If you are going to change the cache directory, this function should be
called at the top of your script, before you make any calls to the API.
This is to avoid duplicate files and excess API calls.
:param new_path: relative or absolute path to the desired new cache
directory
:return: str, str
"""
global CACHE_DIR, API_CACHE, SPRITE_CACHE
if new_path is None:
new_path = get_default_cache()
CACHE_DIR = safe_make_dirs(os.path.abspath(new_path))
API_CACHE = os.path.join(CACHE_DIR, 'api.cache')
SPRITE_CACHE = safe_make_dirs(os.path.join(CACHE_DIR, 'sprite'))
return CACHE_DIR, API_CACHE, SPRITE_CACHE | Simple function to change the cache location.
`new_path` can be an absolute or relative path. If the directory does not
exist yet, this function will create it. If None it will set the cache to
the default cache directory.
If you are going to change the cache directory, this function should be
called at the top of your script, before you make any calls to the API.
This is to avoid duplicate files and excess API calls.
:param new_path: relative or absolute path to the desired new cache
directory
:return: str, str | entailment |
def attach(self, lun_or_snap, skip_hlu_0=False):
""" Attaches lun, snap or member snap of cg snap to host.
Don't pass cg snapshot in as `lun_or_snap`.
:param lun_or_snap: the lun, snap, or a member snap of cg snap
:param skip_hlu_0: whether to skip hlu 0
:return: the hlu number
"""
# `UnityResourceAlreadyAttachedError` check was removed due to there
# is a host cache existing in Cinder driver. If the lun was attached to
# the host and the info was stored in the cache, wrong hlu would be
# returned.
# And attaching a lun to a host twice would success, if Cinder retry
# triggers another attachment of same lun to the host, the cost would
# be one more rest request of `modifyLun` and one for host instance
# query.
try:
return self._attach_with_retry(lun_or_snap, skip_hlu_0)
except ex.SystemAPINotSupported:
# Attaching snap to host not support before 4.1.
raise
except ex.UnityAttachExceedLimitError:
# The number of luns exceeds system limit
raise
except: # noqa
# other attach error, remove this lun if already attached
self.detach(lun_or_snap)
raise | Attaches lun, snap or member snap of cg snap to host.
Don't pass cg snapshot in as `lun_or_snap`.
:param lun_or_snap: the lun, snap, or a member snap of cg snap
:param skip_hlu_0: whether to skip hlu 0
:return: the hlu number | entailment |
def has_hlu(self, lun_or_snap, cg_member=None):
"""Returns True if `lun_or_snap` is attached to the host.
:param lun_or_snap: can be lun, lun snap, cg snap or a member snap of
cg snap.
:param cg_member: the member lun of cg if `lun_or_snap` is cg snap.
:return: True - if `lun_or_snap` is attached, otherwise False.
"""
hlu = self.get_hlu(lun_or_snap, cg_member=cg_member)
return hlu is not None | Returns True if `lun_or_snap` is attached to the host.
:param lun_or_snap: can be lun, lun snap, cg snap or a member snap of
cg snap.
:param cg_member: the member lun of cg if `lun_or_snap` is cg snap.
:return: True - if `lun_or_snap` is attached, otherwise False. | entailment |
def get_host_lun(self, lun_or_snap, cg_member=None):
"""Gets the host lun of a lun, lun snap, cg snap or a member snap of cg
snap.
:param lun_or_snap: can be lun, lun snap, cg snap or a member snap of
cg snap.
:param cg_member: the member lun of cg if `lun_or_snap` is cg snap.
:return: the host lun object.
"""
import storops.unity.resource.lun as lun_module
import storops.unity.resource.snap as snap_module
which = None
if isinstance(lun_or_snap, lun_module.UnityLun):
which = self._get_host_luns(lun=lun_or_snap)
elif isinstance(lun_or_snap, snap_module.UnitySnap):
if lun_or_snap.is_cg_snap():
if cg_member is None:
log.debug('None host lun for CG snap {}. '
'Use its member snap instead or pass in '
'cg_member.'.format(lun_or_snap.id))
return None
lun_or_snap = lun_or_snap.get_member_snap(cg_member)
which = self._get_host_luns(lun=cg_member, snap=lun_or_snap)
else:
which = self._get_host_luns(snap=lun_or_snap)
if not which:
log.debug('Resource(LUN or Snap) {} is not attached to host {}'
.format(lun_or_snap.name, self.name))
return None
return which[0] | Gets the host lun of a lun, lun snap, cg snap or a member snap of cg
snap.
:param lun_or_snap: can be lun, lun snap, cg snap or a member snap of
cg snap.
:param cg_member: the member lun of cg if `lun_or_snap` is cg snap.
:return: the host lun object. | entailment |
def get_hlu(self, resource, cg_member=None):
"""Gets the hlu number of a lun, lun snap, cg snap or a member snap of
cg snap.
:param resource: can be lun, lun snap, cg snap or a member snap of cg
snap.
:param cg_member: the member lun of cg if `lun_or_snap` is cg snap.
:return: the hlu number.
"""
host_lun = self.get_host_lun(resource, cg_member=cg_member)
return host_lun if host_lun is None else host_lun.hlu | Gets the hlu number of a lun, lun snap, cg snap or a member snap of
cg snap.
:param resource: can be lun, lun snap, cg snap or a member snap of cg
snap.
:param cg_member: the member lun of cg if `lun_or_snap` is cg snap.
:return: the hlu number. | entailment |
def update_initiators(self, iqns=None, wwns=None):
"""Primarily for puppet-unity use.
Update the iSCSI and FC initiators if needed.
"""
# First get current iqns
iqns = set(iqns) if iqns else set()
current_iqns = set()
if self.iscsi_host_initiators:
current_iqns = {initiator.initiator_id
for initiator in self.iscsi_host_initiators}
# Then get current wwns
wwns = set(wwns) if wwns else set()
current_wwns = set()
if self.fc_host_initiators:
current_wwns = {initiator.initiator_id
for initiator in self.fc_host_initiators}
updater = UnityHostInitiatorUpdater(
self, current_iqns | current_wwns, iqns | wwns)
return updater.update() | Primarily for puppet-unity use.
Update the iSCSI and FC initiators if needed. | entailment |
def list_files(tag=None, sat_id=None, data_path=None, format_str=None,
supported_tags=None, fake_daily_files_from_monthly=False,
two_digit_year_break=None):
"""Return a Pandas Series of every file for chosen satellite data.
This routine is intended to be used by pysat instrument modules supporting
a particular NASA CDAWeb dataset.
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are <tag strings>. (default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
supported_tags : (dict or NoneType)
keys are tags supported by list_files routine. Values are the
default format_str values for key. (default=None)
fake_daily_files_from_monthly : bool
Some CDAWeb instrument data files are stored by month, interfering
with pysat's functionality of loading by day. This flag, when true,
appends daily dates to monthly files internally. These dates are
used by load routine in this module to provide data by day.
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files
Examples
--------
::
fname = 'cnofs_vefi_bfield_1sec_{year:04d}{month:02d}{day:02d}_v05.cdf'
supported_tags = {'dc_b':fname}
list_files = functools.partial(nasa_cdaweb_methods.list_files,
supported_tags=supported_tags)
ivm_fname = 'cnofs_cindi_ivm_500ms_{year:4d}{month:02d}{day:02d}_v01.cdf'
supported_tags = {'':ivm_fname}
list_files = functools.partial(cdw.list_files,
supported_tags=supported_tags)
"""
if data_path is not None:
if format_str is None:
try:
format_str = supported_tags[sat_id][tag]
except KeyError:
raise ValueError('Unknown tag')
out = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
if (not out.empty) and fake_daily_files_from_monthly:
out.ix[out.index[-1] + pds.DateOffset(months=1) -
pds.DateOffset(days=1)] = out.iloc[-1]
out = out.asfreq('D', 'pad')
out = out + '_' + out.index.strftime('%Y-%m-%d')
return out
return out
else:
estr = 'A directory must be passed to the loading routine for <Instrument Code>'
raise ValueError (estr) | Return a Pandas Series of every file for chosen satellite data.
This routine is intended to be used by pysat instrument modules supporting
a particular NASA CDAWeb dataset.
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are <tag strings>. (default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
supported_tags : (dict or NoneType)
keys are tags supported by list_files routine. Values are the
default format_str values for key. (default=None)
fake_daily_files_from_monthly : bool
Some CDAWeb instrument data files are stored by month, interfering
with pysat's functionality of loading by day. This flag, when true,
appends daily dates to monthly files internally. These dates are
used by load routine in this module to provide data by day.
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files
Examples
--------
::
fname = 'cnofs_vefi_bfield_1sec_{year:04d}{month:02d}{day:02d}_v05.cdf'
supported_tags = {'dc_b':fname}
list_files = functools.partial(nasa_cdaweb_methods.list_files,
supported_tags=supported_tags)
ivm_fname = 'cnofs_cindi_ivm_500ms_{year:4d}{month:02d}{day:02d}_v01.cdf'
supported_tags = {'':ivm_fname}
list_files = functools.partial(cdw.list_files,
supported_tags=supported_tags) | entailment |
def load(fnames, tag=None, sat_id=None,
fake_daily_files_from_monthly=False,
flatten_twod=True):
"""Load NASA CDAWeb CDF files.
This routine is intended to be used by pysat instrument modules supporting
a particular NASA CDAWeb dataset.
Parameters
------------
fnames : (pandas.Series)
Series of filenames
tag : (str or NoneType)
tag or None (default=None)
sat_id : (str or NoneType)
satellite id or None (default=None)
fake_daily_files_from_monthly : bool
Some CDAWeb instrument data files are stored by month, interfering
with pysat's functionality of loading by day. This flag, when true,
parses of daily dates to monthly files that were added internally
by the list_files routine, when flagged. These dates are
used here to provide data by day.
Returns
---------
data : (pandas.DataFrame)
Object containing satellite data
meta : (pysat.Meta)
Object containing metadata such as column names and units
Examples
--------
::
# within the new instrument module, at the top level define
# a new variable named load, and set it equal to this load method
# code below taken from cnofs_ivm.py.
# support load routine
# use the default CDAWeb method
load = cdw.load
"""
import pysatCDF
if len(fnames) <= 0 :
return pysat.DataFrame(None), None
else:
# going to use pysatCDF to load the CDF and format
# data and metadata for pysat using some assumptions.
# Depending upon your needs the resulting pandas DataFrame may
# need modification
# currently only loads one file, which handles more situations via pysat
# than you may initially think
if fake_daily_files_from_monthly:
# parse out date from filename
fname = fnames[0][0:-11]
date = pysat.datetime.strptime(fnames[0][-10:], '%Y-%m-%d')
with pysatCDF.CDF(fname) as cdf:
# convert data to pysat format
data, meta = cdf.to_pysat(flatten_twod=flatten_twod)
# select data from monthly
data = data.ix[date:date+pds.DateOffset(days=1) - pds.DateOffset(microseconds=1),:]
return data, meta
else:
# basic data return
with pysatCDF.CDF(fnames[0]) as cdf:
return cdf.to_pysat(flatten_twod=flatten_twod) | Load NASA CDAWeb CDF files.
This routine is intended to be used by pysat instrument modules supporting
a particular NASA CDAWeb dataset.
Parameters
------------
fnames : (pandas.Series)
Series of filenames
tag : (str or NoneType)
tag or None (default=None)
sat_id : (str or NoneType)
satellite id or None (default=None)
fake_daily_files_from_monthly : bool
Some CDAWeb instrument data files are stored by month, interfering
with pysat's functionality of loading by day. This flag, when true,
parses of daily dates to monthly files that were added internally
by the list_files routine, when flagged. These dates are
used here to provide data by day.
Returns
---------
data : (pandas.DataFrame)
Object containing satellite data
meta : (pysat.Meta)
Object containing metadata such as column names and units
Examples
--------
::
# within the new instrument module, at the top level define
# a new variable named load, and set it equal to this load method
# code below taken from cnofs_ivm.py.
# support load routine
# use the default CDAWeb method
load = cdw.load | entailment |
def download(supported_tags, date_array, tag, sat_id,
ftp_site='cdaweb.gsfc.nasa.gov',
data_path=None, user=None, password=None,
fake_daily_files_from_monthly=False):
"""Routine to download NASA CDAWeb CDF data.
This routine is intended to be used by pysat instrument modules supporting
a particular NASA CDAWeb dataset.
Parameters
-----------
supported_tags : dict
dict of dicts. Keys are supported tag names for download. Value is
a dict with 'dir', 'remote_fname', 'local_fname'. Inteded to be
pre-set with functools.partial then assigned to new instrument code.
date_array : array_like
Array of datetimes to download data for. Provided by pysat.
tag : (str or NoneType)
tag or None (default=None)
sat_id : (str or NoneType)
satellite id or None (default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
user : (string or NoneType)
Username to be passed along to resource with relevant data.
(default=None)
password : (string or NoneType)
User password to be passed along to resource with relevant data.
(default=None)
fake_daily_files_from_monthly : bool
Some CDAWeb instrument data files are stored by month.This flag,
when true, accomodates this reality with user feedback on a monthly
time frame.
Returns
--------
Void : (NoneType)
Downloads data to disk.
Examples
--------
::
# download support added to cnofs_vefi.py using code below
rn = '{year:4d}/cnofs_vefi_bfield_1sec_{year:4d}{month:02d}{day:02d}_v05.cdf'
ln = 'cnofs_vefi_bfield_1sec_{year:4d}{month:02d}{day:02d}_v05.cdf'
dc_b_tag = {'dir':'/pub/data/cnofs/vefi/bfield_1sec',
'remote_fname':rn,
'local_fname':ln}
supported_tags = {'dc_b':dc_b_tag}
download = functools.partial(nasa_cdaweb_methods.download,
supported_tags=supported_tags)
"""
import os
import ftplib
# connect to CDAWeb default port
ftp = ftplib.FTP(ftp_site)
# user anonymous, passwd anonymous@
ftp.login()
try:
ftp_dict = supported_tags[tag]
except KeyError:
raise ValueError('Tag name unknown.')
# path to relevant file on CDAWeb
ftp.cwd(ftp_dict['dir'])
# naming scheme for files on the CDAWeb server
remote_fname = ftp_dict['remote_fname']
# naming scheme for local files, should be closely related
# to CDAWeb scheme, though directory structures may be reduced
# if desired
local_fname = ftp_dict['local_fname']
for date in date_array:
# format files for specific dates and download location
formatted_remote_fname = remote_fname.format(year=date.year,
month=date.month, day=date.day)
formatted_local_fname = local_fname.format(year=date.year,
month=date.month, day=date.day)
saved_local_fname = os.path.join(data_path,formatted_local_fname)
# perform download
try:
print('Attempting to download file for '+date.strftime('%x'))
sys.stdout.flush()
ftp.retrbinary('RETR '+formatted_remote_fname, open(saved_local_fname,'wb').write)
print('Finished.')
except ftplib.error_perm as exception:
# if exception[0][0:3] != '550':
if str(exception.args[0]).split(" ", 1)[0] != '550':
raise
else:
os.remove(saved_local_fname)
print('File not available for '+ date.strftime('%x'))
ftp.close() | Routine to download NASA CDAWeb CDF data.
This routine is intended to be used by pysat instrument modules supporting
a particular NASA CDAWeb dataset.
Parameters
-----------
supported_tags : dict
dict of dicts. Keys are supported tag names for download. Value is
a dict with 'dir', 'remote_fname', 'local_fname'. Inteded to be
pre-set with functools.partial then assigned to new instrument code.
date_array : array_like
Array of datetimes to download data for. Provided by pysat.
tag : (str or NoneType)
tag or None (default=None)
sat_id : (str or NoneType)
satellite id or None (default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
user : (string or NoneType)
Username to be passed along to resource with relevant data.
(default=None)
password : (string or NoneType)
User password to be passed along to resource with relevant data.
(default=None)
fake_daily_files_from_monthly : bool
Some CDAWeb instrument data files are stored by month.This flag,
when true, accomodates this reality with user feedback on a monthly
time frame.
Returns
--------
Void : (NoneType)
Downloads data to disk.
Examples
--------
::
# download support added to cnofs_vefi.py using code below
rn = '{year:4d}/cnofs_vefi_bfield_1sec_{year:4d}{month:02d}{day:02d}_v05.cdf'
ln = 'cnofs_vefi_bfield_1sec_{year:4d}{month:02d}{day:02d}_v05.cdf'
dc_b_tag = {'dir':'/pub/data/cnofs/vefi/bfield_1sec',
'remote_fname':rn,
'local_fname':ln}
supported_tags = {'dc_b':dc_b_tag}
download = functools.partial(nasa_cdaweb_methods.download,
supported_tags=supported_tags) | entailment |
def from_response(response, method, url):
"""Returns an instance of :class:`HttpError` or subclass based on response.
:param response: instance of `requests.Response` class
:param method: HTTP method used for request
:param url: URL used for request
"""
req_id = response.headers.get("x-openstack-request-id")
# NOTE(hdd) true for older versions of nova and cinder
if not req_id:
req_id = response.headers.get("x-compute-request-id")
kwargs = {
"http_status": response.status_code,
"response": response,
"method": method,
"url": url,
"request_id": req_id,
}
if "retry-after" in response.headers:
kwargs["retry_after"] = response.headers["retry-after"]
content_type = response.headers.get("Content-Type", "")
if content_type.startswith("application/json"):
try:
body = response.json()
except ValueError:
pass
else:
if isinstance(body, dict):
error = body.get(list(body)[0])
if isinstance(error, dict):
kwargs["message"] = (error.get("message") or
error.get("faultstring"))
kwargs["details"] = (error.get("details") or
six.text_type(body))
elif content_type.startswith("text/"):
kwargs["details"] = response.text
try:
cls = _code_map[response.status_code]
except KeyError:
if 500 <= response.status_code < 600:
cls = HttpServerError
elif 400 <= response.status_code < 500:
cls = HTTPClientError
else:
cls = HttpError
return cls(**kwargs) | Returns an instance of :class:`HttpError` or subclass based on response.
:param response: instance of `requests.Response` class
:param method: HTTP method used for request
:param url: URL used for request | entailment |
def create_pool(self, name, raid_groups, description=None, **kwargs):
"""Create pool based on RaidGroupParameter.
:param name: pool name
:param raid_groups: a list of *RaidGroupParameter*
:param description: pool description
:param alert_threshold: Threshold at which the system will generate
alerts about the free space in the pool, specified as
a percentage.
:param is_harvest_enabled:
True - Enable pool harvesting for the pool.
False - Disable pool harvesting for the pool.
:param is_snap_harvest_enabled:
True - Enable snapshot harvesting for the pool.
False - Disable snapshot harvesting for the pool.
:param pool_harvest_high_threshold: Pool used space high threshold at
which the system will automatically starts to delete snapshots
in the pool
:param pool_harvest_low_threshold: Pool used space low threshold under
which the system will automatically stop deletion of snapshots
in the pool
:param snap_harvest_high_threshold: Snapshot used space high threshold
at which the system automatically starts to delete snapshots
in the pool
:param snap_harvest_low_threshold: Snapshot used space low threshold
below which the system will stop automatically deleting
snapshots in the pool
:param is_fast_cache_enabled:
True - FAST Cache will be enabled for this pool.
False - FAST Cache will be disabled for this pool.
:param is_fastvp_enabled:
True - Enable scheduled data relocations for the pool.
False - Disable scheduled data relocations for the pool.
:param pool_type:
StoragePoolTypeEnum.TRADITIONAL - Create traditional pool.
StoragePoolTypeEnum.DYNAMIC - Create dynamic pool. (default)
"""
return UnityPool.create(self._cli, name=name, description=description,
raid_groups=raid_groups, **kwargs) | Create pool based on RaidGroupParameter.
:param name: pool name
:param raid_groups: a list of *RaidGroupParameter*
:param description: pool description
:param alert_threshold: Threshold at which the system will generate
alerts about the free space in the pool, specified as
a percentage.
:param is_harvest_enabled:
True - Enable pool harvesting for the pool.
False - Disable pool harvesting for the pool.
:param is_snap_harvest_enabled:
True - Enable snapshot harvesting for the pool.
False - Disable snapshot harvesting for the pool.
:param pool_harvest_high_threshold: Pool used space high threshold at
which the system will automatically starts to delete snapshots
in the pool
:param pool_harvest_low_threshold: Pool used space low threshold under
which the system will automatically stop deletion of snapshots
in the pool
:param snap_harvest_high_threshold: Snapshot used space high threshold
at which the system automatically starts to delete snapshots
in the pool
:param snap_harvest_low_threshold: Snapshot used space low threshold
below which the system will stop automatically deleting
snapshots in the pool
:param is_fast_cache_enabled:
True - FAST Cache will be enabled for this pool.
False - FAST Cache will be disabled for this pool.
:param is_fastvp_enabled:
True - Enable scheduled data relocations for the pool.
False - Disable scheduled data relocations for the pool.
:param pool_type:
StoragePoolTypeEnum.TRADITIONAL - Create traditional pool.
StoragePoolTypeEnum.DYNAMIC - Create dynamic pool. (default) | entailment |
def get_file_port(self):
"""Returns ports list can be used by File
File ports includes ethernet ports and link aggregation ports.
"""
eths = self.get_ethernet_port(bond=False)
las = self.get_link_aggregation()
return eths + las | Returns ports list can be used by File
File ports includes ethernet ports and link aggregation ports. | entailment |
def create_remote_system(self, management_address,
local_username=None, local_password=None,
remote_username=None, remote_password=None,
connection_type=None):
"""
Configures a remote system for remote replication.
:param management_address: the management IP address of the remote
system.
:param local_username: administrative username of local system.
:param local_password: administrative password of local system.
:param remote_username: administrative username of remote system.
:param remote_password: administrative password of remote system.
:param connection_type: `ReplicationCapabilityEnum`. Replication
connection type to the remote system.
:return: the newly created remote system.
"""
return UnityRemoteSystem.create(self._cli, management_address,
local_username=local_username,
local_password=local_password,
remote_username=remote_username,
remote_password=remote_password,
connection_type=connection_type) | Configures a remote system for remote replication.
:param management_address: the management IP address of the remote
system.
:param local_username: administrative username of local system.
:param local_password: administrative password of local system.
:param remote_username: administrative username of remote system.
:param remote_password: administrative password of remote system.
:param connection_type: `ReplicationCapabilityEnum`. Replication
connection type to the remote system.
:return: the newly created remote system. | entailment |
def create_replication_interface(self, sp, ip_port, ip_address,
netmask=None, v6_prefix_length=None,
gateway=None, vlan_id=None):
"""
Creates a replication interface.
:param sp: `UnityStorageProcessor` object. Storage processor on which
the replication interface is running.
:param ip_port: `UnityIpPort` object. Physical port or link aggregation
on the storage processor on which the interface is running.
:param ip_address: IP address of the replication interface.
:param netmask: IPv4 netmask for the replication interface, if it uses
an IPv4 address.
:param v6_prefix_length: IPv6 prefix length for the interface, if it
uses an IPv6 address.
:param gateway: IPv4 or IPv6 gateway address for the replication
interface.
:param vlan_id: VLAN identifier for the interface.
:return: the newly create replication interface.
"""
return UnityReplicationInterface.create(
self._cli, sp, ip_port, ip_address, netmask=netmask,
v6_prefix_length=v6_prefix_length, gateway=gateway,
vlan_id=vlan_id) | Creates a replication interface.
:param sp: `UnityStorageProcessor` object. Storage processor on which
the replication interface is running.
:param ip_port: `UnityIpPort` object. Physical port or link aggregation
on the storage processor on which the interface is running.
:param ip_address: IP address of the replication interface.
:param netmask: IPv4 netmask for the replication interface, if it uses
an IPv4 address.
:param v6_prefix_length: IPv6 prefix length for the interface, if it
uses an IPv6 address.
:param gateway: IPv4 or IPv6 gateway address for the replication
interface.
:param vlan_id: VLAN identifier for the interface.
:return: the newly create replication interface. | entailment |
def geo2mag(incoord):
"""geographic coordinate to magnetic coordinate (coarse):
Parameters
----------
incoord : numpy.array of shape (2,*)
array([[glat0,glat1,glat2,...],[glon0,glon1,glon2,...]),
where glat, glon are geographic latitude and longitude
(or if you have only one point it is [[glat,glon]]).
Warnings
--------
Calculation of geomagnetic coordinates is approximate.
Coordinates are for a geomagnetic dipole, not the full field.
Location of geomagnetic dipole set for 2010.
Returns
-------
array([mlat0,mlat1,...],[mlon0,mlon1,...]])
Note
----
Routine is generally verified as coarsely correct using 2010 values from
http://wdc.kugi.kyoto-u.ac.jp/igrf/gggm/index.html.
Copied from https://stackoverflow.com/a/7949249
Examples
--------
mag = geo2mag(np.array([[80.08],[287.789]]))
print mag
print 'We should get [90,0]'
mag = geo2mag(np.array([[90],[0]]))
print mag
print 'We should get something close to [80.02, 180]'
# kyoto, japan
mag = geo2mag(np.array([[35.],[135.45]]))
print mag
print 'We should get something close to [25.87, -154.94]'
"""
from numpy import pi, cos, sin, arctan2, sqrt, dot
# SOME 'constants' for location of northern mag pole
lat = 80.08#79.3
lon = -72.211+360.#287.789 # or 71.41W
r = 1.0
# convert first to radians
lon, lat = [x*pi/180 for x in lon, lat]
glat = incoord[0] * pi / 180.0
glon = incoord[1] * pi / 180.0
galt = glat * 0. + r
coord = np.vstack([glat,glon,galt])
# convert to rectangular coordinates
x = coord[2]*cos(coord[0])*cos(coord[1])
y = coord[2]*cos(coord[0])*sin(coord[1])
z = coord[2]*sin(coord[0])
xyz = np.vstack((x,y,z))
# computer 1st rotation matrix:
geo2maglon = np.zeros((3,3), dtype='float64')
geo2maglon[0,0] = cos(lon)
geo2maglon[0,1] = sin(lon)
geo2maglon[1,0] = -sin(lon)
geo2maglon[1,1] = cos(lon)
geo2maglon[2,2] = 1.
out = dot(geo2maglon , xyz)
tomaglat = np.zeros((3,3), dtype='float64')
tomaglat[0,0] = cos(.5*pi-lat)
tomaglat[0,2] = -sin(.5*pi-lat)
tomaglat[2,0] = sin(.5*pi-lat)
tomaglat[2,2] = cos(.5*pi-lat)
tomaglat[1,1] = 1.
out = dot(tomaglat , out)
mlat = arctan2(out[2],
sqrt(out[0]*out[0] + out[1]*out[1]))
mlat = mlat * 180 / pi
mlon = arctan2(out[1], out[0])
mlon = mlon * 180 / pi
outcoord = np.vstack((mlat, mlon))
return outcoord | geographic coordinate to magnetic coordinate (coarse):
Parameters
----------
incoord : numpy.array of shape (2,*)
array([[glat0,glat1,glat2,...],[glon0,glon1,glon2,...]),
where glat, glon are geographic latitude and longitude
(or if you have only one point it is [[glat,glon]]).
Warnings
--------
Calculation of geomagnetic coordinates is approximate.
Coordinates are for a geomagnetic dipole, not the full field.
Location of geomagnetic dipole set for 2010.
Returns
-------
array([mlat0,mlat1,...],[mlon0,mlon1,...]])
Note
----
Routine is generally verified as coarsely correct using 2010 values from
http://wdc.kugi.kyoto-u.ac.jp/igrf/gggm/index.html.
Copied from https://stackoverflow.com/a/7949249
Examples
--------
mag = geo2mag(np.array([[80.08],[287.789]]))
print mag
print 'We should get [90,0]'
mag = geo2mag(np.array([[90],[0]]))
print mag
print 'We should get something close to [80.02, 180]'
# kyoto, japan
mag = geo2mag(np.array([[35.],[135.45]]))
print mag
print 'We should get something close to [25.87, -154.94]' | entailment |
def restore(self, res_id, backup_snap=None):
"""
Restores a snapshot.
:param res_id: the LUN number of primary LUN or snapshot mount point to
be restored.
:param backup_snap: the name of a backup snapshot to be created before
restoring.
"""
name = self._get_name()
out = self._cli.restore_snap(name, res_id, backup_snap)
ex.raise_if_err(out, 'failed to restore snap {}.'.format(name),
default=ex.VNXSnapError) | Restores a snapshot.
:param res_id: the LUN number of primary LUN or snapshot mount point to
be restored.
:param backup_snap: the name of a backup snapshot to be created before
restoring. | entailment |
def list_files(tag='', sat_id=None, data_path=None, format_str=None):
"""Return a Pandas Series of every file for chosen SuperMAG data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are 'indices', 'all',
'stations', and '' (for just magnetometer measurements). (default='')
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files
"""
if format_str is None and data_path is not None:
file_base = 'supermag_magnetometer'
if tag == "indices" or tag == "all":
file_base += '_all' # Can't just download indices
if tag == "indices":
psplit = path.split(data_path[:-1])
data_path = path.join(psplit[0], "all", "")
if tag == "stations":
min_fmt = '_'.join([file_base, '{year:4d}.???'])
doff = pds.DateOffset(years=1)
else:
min_fmt = '_'.join([file_base, '{year:4d}{month:02d}{day:02d}.???'])
doff = pds.DateOffset(days=1)
files = pysat.Files.from_os(data_path=data_path, format_str=min_fmt)
# station files are once per year but we need to
# create the illusion there is a file per year
if not files.empty:
files = files.sort_index()
if tag == "stations":
orig_files = files.copy()
new_files = []
# Assigns the validity of each station file to be 1 year
for orig in orig_files.iteritems():
files.ix[orig[0] + doff - pds.DateOffset(days=1)] = orig[1]
files = files.sort_index()
new_files.append(files.ix[orig[0]: orig[0] + doff - \
pds.DateOffset(days=1)].asfreq('D', method='pad'))
files = pds.concat(new_files)
files = files.dropna()
files = files.sort_index()
# add the date to the filename
files = files + '_' + files.index.strftime('%Y-%m-%d')
return files
elif format_str is None:
estr = 'A directory must be passed to the loading routine for SuperMAG'
raise ValueError (estr)
else:
return pysat.Files.from_os(data_path=data_path, format_str=format_str) | Return a Pandas Series of every file for chosen SuperMAG data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are 'indices', 'all',
'stations', and '' (for just magnetometer measurements). (default='')
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files | entailment |
def load(fnames, tag='', sat_id=None):
""" Load the SuperMAG files
Parameters
-----------
fnames : (list)
List of filenames
tag : (str or NoneType)
Denotes type of file to load. Accepted types are 'indices', 'all',
'stations', and '' (for just magnetometer measurements). (default='')
sat_id : (str or NoneType)
Satellite ID for constellations, not used. (default=None)
Returns
--------
data : (pandas.DataFrame)
Object containing satellite data
meta : (pysat.Meta)
Object containing metadata such as column names and units
"""
# Ensure that there are files to load
if len(fnames) <= 0 :
return pysat.DataFrame(None), pysat.Meta(None)
# Ensure that the files are in a list
if isinstance(fnames, str):
fnames = [fnames]
# Initialise the output data
data = pds.DataFrame()
baseline = list()
# Cycle through the files
for fname in fnames:
fname = fname[:-11] # Remove date index from end of filename
file_type = path.splitext(fname)[1].lower()
# Open and load the files for each file type
if file_type == ".csv":
if tag != "indices":
temp = load_csv_data(fname, tag)
else:
temp, bline = load_ascii_data(fname, tag)
if bline is not None:
baseline.append(bline)
# Save the loaded data in the output data structure
if len(temp.columns) > 0:
data = pds.concat([data, temp], axis=0)
del temp
# If data was loaded, update the meta data
if len(data.columns) > 0:
meta = pysat.Meta()
for cc in data.columns:
meta[cc] = update_smag_metadata(cc)
meta.info = {'baseline':format_baseline_list(baseline)}
else:
meta = pysat.Meta(None)
return data, meta | Load the SuperMAG files
Parameters
-----------
fnames : (list)
List of filenames
tag : (str or NoneType)
Denotes type of file to load. Accepted types are 'indices', 'all',
'stations', and '' (for just magnetometer measurements). (default='')
sat_id : (str or NoneType)
Satellite ID for constellations, not used. (default=None)
Returns
--------
data : (pandas.DataFrame)
Object containing satellite data
meta : (pysat.Meta)
Object containing metadata such as column names and units | entailment |
def load_csv_data(fname, tag):
"""Load data from a comma separated SuperMAG file
Parameters
------------
fname : (str)
CSV SuperMAG file name
tag : (str)
Denotes type of file to load. Accepted types are 'indices', 'all',
'stations', and '' (for just magnetometer measurements).
Returns
--------
data : (pandas.DataFrame)
Pandas DataFrame
"""
import re
if tag == "stations":
# Because there may be multiple operators, the default pandas reader
# cannot be used.
ddict = dict()
dkeys = list()
date_list = list()
# Open and read the file
with open(fname, "r") as fopen:
dtime = pds.datetime.strptime(fname.split("_")[-1].split(".")[0],
"%Y")
for fline in fopen.readlines():
sline = [ll for ll in re.split(r'[,\n]+', fline) if len(ll) > 0]
if len(ddict.items()) == 0:
for kk in sline:
kk = re.sub("-", "_", kk)
ddict[kk] = list()
dkeys.append(kk)
else:
date_list.append(dtime)
for i,ll in enumerate(sline):
if i >= 1 and i <= 4:
ddict[dkeys[i]].append(float(ll))
elif i == 6:
ddict[dkeys[i]].append(int(ll))
elif i < len(dkeys):
ddict[dkeys[i]].append(ll)
else:
ddict[dkeys[-1]][-1] += " {:s}".format(ll)
# Create a data frame for this file
data = pds.DataFrame(ddict, index=date_list, columns=ddict.keys())
else:
# Define the date parser
def parse_smag_date(dd):
return pysat.datetime.strptime(dd, "%Y-%m-%d %H:%M:%S")
# Load the file into a data frame
data = pds.read_csv(fname, parse_dates={'datetime':[0]},
date_parser=parse_smag_date, index_col='datetime')
return data | Load data from a comma separated SuperMAG file
Parameters
------------
fname : (str)
CSV SuperMAG file name
tag : (str)
Denotes type of file to load. Accepted types are 'indices', 'all',
'stations', and '' (for just magnetometer measurements).
Returns
--------
data : (pandas.DataFrame)
Pandas DataFrame | entailment |
def load_ascii_data(fname, tag):
"""Load data from a self-documenting ASCII SuperMAG file
Parameters
------------
fname : (str)
ASCII SuperMAG filename
tag : (str)
Denotes type of file to load. Accepted types are 'indices', 'all',
'stations', and '' (for just magnetometer measurements).
Returns
--------
data : (pandas.DataFrame)
Pandas DataFrame
baseline : (list)
List of strings denoting the presence of a standard and file-specific
baselines for each file. None of not present or not applicable.
"""
import re
ndata = {"indices":2, "":4, "all":4, "stations":8}
dkeys = {'stations':list(), '':['IAGA', 'N', 'E', 'Z']}
data = pds.DataFrame(None)
baseline = None
# Ensure that the tag indicates a type of data we know how to load
if not tag in ndata.keys():
return data, baseline
# Read in the text data, processing the header, indices, and
# magnetometer data (as desired)
with open(fname, "r") as fopen:
# Set the processing flags
hflag = True # header lines
pflag = False # parameter line
dflag = False if tag == "stations" else True # date line
snum = 0 # number of magnetometer stations
ddict = dict()
date_list = list()
if tag == "stations":
dtime = pds.datetime.strptime(fname.split("_")[-1].split(".")[0],
"%Y")
for fline in fopen.readlines():
# Cycle past the header
line_len = len(fline)
if hflag:
if pflag:
pflag = False # Unset the flag
if fline.find("-mlt") > 0:
ndata[''] += 2
dkeys[''].extend(['MLT', 'MLAT'])
if fline.find("-sza") > 0:
ndata[''] += 1
dkeys[''].append('SZA')
if fline.find("-decl") > 0:
ndata[''] += 1
dkeys[''].append('IGRF_DECL')
if tag == "indices" and fline.find("-envelope") < 0:
# Indices not included in this file
break
# Save the baseline information
lsplit = fline.split()
idelta = lsplit.index('-delta') + 1
ibase = lsplit.index('-baseline') + 1
isd = lsplit.index('-sd') + 1
ist = lsplit.index('-st') + 1
iex = lsplit.index('-ex') + 1
baseline = " ".join([lsplit[ibase], lsplit[idelta],
lsplit[isd], lsplit[ist], lsplit[iex]])
if fline.find("Selected parameters:") >= 0:
pflag = True
if fline.count("=") == line_len - 1 and line_len > 2:
hflag = False
else:
# Load the desired data
lsplit = [ll for ll in re.split(r'[\t\n]+', fline)
if len(ll) > 0]
if dflag:
dflag = False # Unset the date flag
dstring = " ".join(lsplit[:6])
dtime = pysat.datetime.strptime(dstring,
"%Y %m %d %H %M %S")
snum = int(lsplit[6]) # Set the number of stations
# Load the times
if tag == "indices":
date_list.append(dtime)
else:
date_list.extend([dtime for i in range(snum)])
elif len(lsplit) == ndata['indices']:
if tag is not '':
if lsplit[0] not in ddict.keys():
ddict[lsplit[0]] = list()
if tag == "indices":
ddict[lsplit[0]].append(int(lsplit[1]))
else:
# This works because indices occur before
# magnetometer measurements
ddict[lsplit[0]].extend([int(lsplit[1])
for i in range(snum)])
else:
if tag == "stations" and len(lsplit) >= ndata[tag]:
if len(dkeys[tag]) == 0:
# Station files include column names and data files
# do not. Read in the column names here
for ll in lsplit:
ll = re.sub("-", "_", ll)
dkeys[tag].append(ll)
ddict[ll] = list()
else:
# Because stations can have multiple operators,
# ndata supplies the minimum number of columns
date_list.append(dtime)
for i,ll in enumerate(lsplit):
if i >= 1 and i <= 4:
ddict[dkeys[tag][i]].append(float(ll))
elif i == 6:
ddict[dkeys[tag][i]].append(int(ll))
elif i < len(dkeys[tag]):
ddict[dkeys[tag][i]].append(ll)
else:
ddict[dkeys[tag][-1]][-1] += \
" {:s}".format(ll)
elif len(lsplit) == ndata['']:
snum -= 1 # Mark the ingestion of a station
if tag != "indices":
if len(ddict.keys()) < ndata['']:
for kk in dkeys['']:
ddict[kk] = list()
for i,kk in enumerate(dkeys['']):
if i == 0:
ddict[kk].append(lsplit[i])
else:
ddict[kk].append(float(lsplit[i]))
if tag != "stations" and snum == 0 and len(ddict.items()) >= 2:
# The previous value was the last value, prepare for
# next block
dflag = True
# Create a data frame for this file
data = pds.DataFrame(ddict, index=date_list, columns=ddict.keys())
fopen.close()
return data, baseline | Load data from a self-documenting ASCII SuperMAG file
Parameters
------------
fname : (str)
ASCII SuperMAG filename
tag : (str)
Denotes type of file to load. Accepted types are 'indices', 'all',
'stations', and '' (for just magnetometer measurements).
Returns
--------
data : (pandas.DataFrame)
Pandas DataFrame
baseline : (list)
List of strings denoting the presence of a standard and file-specific
baselines for each file. None of not present or not applicable. | entailment |
def update_smag_metadata(col_name):
"""Update SuperMAG metadata
Parameters
-----------
col_name : (str)
Data column name
Returns
--------
col_dict : (dict)
Dictionary of strings detailing the units and long-form name of the data
"""
smag_units = {'IAGA':'none', 'N':'nT', 'E':'nT', 'Z':'nT', 'MLT':'hours',
'MLAT':'degrees', 'SZA':'degrees', 'IGRF_DECL':'degrees',
'SMU':'none', 'SML':'none', 'datetime':'YYYY-MM-DD HH:MM:SS',
'GEOLON':'degrees', 'GEOLAT':'degrees', 'AACGMLON':'degrees',
'AACGMLAT':'degrees', 'STATION_NAME':'none',
'OPERATOR_NUM':'none', 'OPERATORS':'none'}
smag_name = {'IAGA':'Station Code', 'N':'B along local magnetic North',
'E':'B along local magnetic East', 'Z':'B vertically downward',
'MLT':'Magnetic Local Time', 'MLAT':'Magnetic Latitude',
'SZA':'Solar Zenith Angle',
'IGRF_DECL':'IGRF magnetic declination',
'SMU': 'Maximum eastward auroral electrojets strength.\n'
'Upper envelope of N-component for stations between 40 and '
'80 degrees magnetic north.',
'SML':'Maximum westward auroral electrojets strength.\n'
'Lower envelope of N-component for stations between 40 and 80'
' degrees magnetic north.', 'datetime':'UT date and time',
'GEOLON':'geographic longitude',
'GEOLAT':'geographic latitude',
'AACGMLON':'Altitude-Adjusted Corrected Geomagnetic longitude',
'AACGMLAT':'Altitude-Adjusted Corrected Geomagnetic latitude',
'STATION_NAME':'Long form station name',
'OPERATOR_NUM':'Number of station operators',
'OPERATORS':'Station operator name(s)',}
ackn = "When using this data please include the following reference:\n"
ackn += "Gjerloev, J. W., The SuperMAG data processing technique, "
ackn += "Geophys. Res., 117, A09213, doi:10.1029/2012JA017683, 2012\n\n"
ackn += "For publications and presentations, please include the following"
ackn += "acknowledgement:\nFor the ground magnetometer data we gratefully "
ackn += "acknowledge: Intermagnet; USGS, Jeffrey J. Love; CARISMA, PI Ian "
ackn += "Mann; CANMOS; The S-RAMP Database, PI K. Yumoto and Dr. K. "
ackn += "Shiokawa; The SPIDR database; AARI, PI Oleg Troshichev; The "
ackn += "MACCS program, PI M. Engebretson, Geomagnetism Unit of the "
ackn += "Geological Survey of Canada; GIMA; MEASURE, UCLA IGPP and Florida"
ackn += " Institute of Technology; SAMBA, PI Eftyhia Zesta; 210 Chain, PI "
ackn += "K. Yumoto; SAMNET, PI Farideh Honary; The institutes who maintain"
ackn += " the IMAGE magnetometer array, PI Eija Tanskanen; PENGUIN; AUTUMN,"
ackn += " PI Martin Connors; DTU Space, PI Dr. Rico Behlke; South Pole and "
ackn += " McMurdo Magnetometer, PI's Louis J. Lanzarotti and Alan T. "
ackn += "Weatherwax; ICESTAR; RAPIDMAG; PENGUIn; British Artarctic Survey; "
ackn += "McMac, PI Dr. Peter Chi; BGS, PI Dr. Susan Macmillan; Pushkov "
ackn += "Institute of Terrestrial Magnetism, Ionosphere and Radio Wave "
ackn += "Propagation (IZMIRAN); GFZ, PI Dr. Juergen Matzka; MFGI, PI B. "
ackn += "Heilig; IGFPAS, PI J. Reda; University of L’Aquila, PI M. "
ackn += "Vellante; BCMT, V. Lesur and A. Chambodut; Data obtained in "
ackn += "cooperation with Geoscience Australia, PI Marina Costelloe; "
ackn += "SuperMAG, PI Jesper W. Gjerloev."
col_dict = {'units':smag_units[col_name], 'long_name':smag_name[col_name],
'acknowledgements':ackn}
return col_dict | Update SuperMAG metadata
Parameters
-----------
col_name : (str)
Data column name
Returns
--------
col_dict : (dict)
Dictionary of strings detailing the units and long-form name of the data | entailment |
def format_baseline_list(baseline_list):
"""Format the list of baseline information from the loaded files into a
cohesive, informative string
Parameters
------------
baseline_list : (list)
List of strings specifying the baseline information for each
SuperMAG file
Returns
---------
base_string : (str)
Single string containing the relevent data
"""
uniq_base = dict()
uniq_delta = dict()
for bline in baseline_list:
bsplit = bline.split()
bdate = " ".join(bsplit[2:])
if bsplit[0] not in uniq_base.keys():
uniq_base[bsplit[0]] = ""
if bsplit[1] not in uniq_delta.keys():
uniq_delta[bsplit[1]] = ""
uniq_base[bsplit[0]] += "{:s}, ".format(bdate)
uniq_delta[bsplit[1]] += "{:s}, ".format(bdate)
if len(uniq_base.items()) == 1:
base_string = "Baseline {:s}".format(list(uniq_base.keys())[0])
else:
base_string = "Baseline "
for i,kk in enumerate(uniq_base.keys()):
if i == 1:
base_string += "{:s}: {:s}".format(kk, uniq_base[kk][:-2])
else:
base_string += " {:s}: {:s}".format(kk,
uniq_base[kk][:-2])
else:
base_string += "unknown"
if len(uniq_delta.items()) == 1:
base_string += "\nDelta {:s}".format(list(uniq_delta.keys())[0])
else:
base_string += "\nDelta "
for i,kk in enumerate(uniq_delta.keys()):
if i == 1:
base_string += "{:s}: {:s}".format(kk, uniq_delta[kk][:-2])
else:
base_string += " {:s}: {:s}".format(kk,
uniq_delta[kk][:-2])
else:
base_string += "unknown"
return base_string | Format the list of baseline information from the loaded files into a
cohesive, informative string
Parameters
------------
baseline_list : (list)
List of strings specifying the baseline information for each
SuperMAG file
Returns
---------
base_string : (str)
Single string containing the relevent data | entailment |
def download(date_array, tag, sat_id='', data_path=None, user=None,
password=None, baseline='all', delta='none', options='all',
file_fmt='ascii'):
"""Routine to download SuperMAG data
Parameters
-----------
date_array : np.array
Array of datetime objects
tag : string
String denoting the type of file to load, accepted values are 'indices',
'all', 'stations', and '' (for only magnetometer data)
sat_id : string
Not used (default='')
data_path : string or NoneType
Data path to save downloaded files to (default=None)
user : string or NoneType
SuperMAG requires user registration (default=None)
password : string or NoneType
Not used; SuperMAG does not require a password (default=None)
file_fmt : string
File format options: 'ascii' and 'csv'. (default='ascii')
baseline : string
Baseline to remove from magnetometer data. Options are 'all', 'yearly',
and 'none'. (default='all')
delta : string
Remove a value from the magnetometer data. Options are 'none', 'start',
and 'median'. (default='none')
options : string or NoneType
Additional parameter options for magnetometer data. Includes 'mlt'
(MLat and MLT), 'decl' (IGRF declination), 'sza' (Solar Zenith Angle),
'all', and None. (default='all')
Returns
-------
"""
import sys
import requests
global platform, name
max_stations = 470
if user is None:
raise ValueError('SuperMAG requires user registration')
remoteaccess = {'method':'http', 'host':'supermag.jhuapl.edu',
'path':'mag/lib/services', 'user':'user={:s}'.format(user),
'service':'service=', 'options':'options='}
remotefmt = "{method}://{host}/{path}/??{user}&{service}&{filefmt}&{start}"
# Set the tag information
if tag == "indices":
tag = "all"
if tag != "stations":
remotefmt += "&{interval}&{stations}&{delta}&{baseline}&{options}"
# Determine whether station or magnetometer data is requested
remoteaccess['service'] += tag if tag == "stations" else "mag"
# Add request for file type
file_fmt = file_fmt.lower()
if not file_fmt in ['ascii', 'csv']:
estr = "unknown file format [{:s}], using 'ascii'".format(file_fmt)
print("WARNING: {:s}".format(estr))
file_fmt = 'ascii'
remoteaccess['filefmt'] = 'fmt={:s}'.format(file_fmt)
# If indices are requested, add them now.
if not tag in [None, 'stations']:
remoteaccess['options'] += "+envelope"
# Add other download options (for non-station files)
if tag != "stations":
if options is not None:
options = options.lower()
if options is 'all':
remoteaccess['options'] += "+mlt+sza+decl"
else:
remoteaccess['options'] += "+{:s}".format(options)
# Add requests for baseline substraction
baseline = baseline.lower()
if not baseline in ['all', 'yearly', 'none']:
estr = "unknown baseline [{:s}], using 'all'".format(baseline)
print("WARNING: {:s}".format(estr))
baseline = 'all'
remoteaccess['baseline'] = "baseline={:s}".format(baseline)
delta = delta.lower()
if not delta in ['none', 'median', 'start']:
estr = "unknown delta [{:s}], using 'none'".format(delta)
print("WARNING: {:s}".format(estr))
delta = 'none'
remoteaccess['delta'] = 'delta={:s}'.format(delta)
# Set the time information and format
remoteaccess['interval'] = "interval=23:59"
sfmt = "%Y-%m-%dT00:00:00.000"
tag_str = "_" if tag is None else "_all_"
ffmt = "{:s}_{:s}{:s}%Y%m%d.{:s}".format(platform, name, tag_str,
"txt" if file_fmt == "ascii"
else file_fmt)
start_str = "start="
else:
# Set the time format
sfmt = "%Y"
ffmt = "{:s}_{:s}_{:s}_%Y.{:s}".format(platform, name, tag,
"txt" if file_fmt == "ascii"
else file_fmt)
start_str = "year="
# Cycle through all of the dates, formatting them to achieve a unique set
# of times to download data
date_fmts = list(set([dd.strftime(sfmt) for dd in date_array]))
# Now that the unique dates are known, construct the file names
name_fmts = [None for dd in date_fmts]
for dd in date_array:
i = date_fmts.index(dd.strftime(sfmt))
name_fmts[i] = dd.strftime(ffmt)
if None in name_fmts:
raise ValueError("unable to construct all unique file names")
# Cycle through all of the unique dates. Stations lists are yearly and
# magnetometer data is daily
station_year = None
istr = 'SuperMAG {:s}'.format(tag if tag == "stations" else "data")
for i,date in enumerate(date_fmts):
print("Downloading {:s} for {:s}".format(istr, date.split("T")[0]))
sys.stdout.flush()
nreq = 1
# Add the start time and download period to query
remoteaccess['start'] = "{:s}{:s}".format(start_str, date)
if tag != "stations":
# Station lists are for each year, see if this year is loaded
current_date = pds.datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.000")
if current_date.year != station_year:
# Get all of the stations for this time
smag_stat = pysat.Instrument(platform=platform, name=name,
tag='stations')
# try to load data
smag_stat.load(date=current_date)
if smag_stat.empty:
# no data
etime = current_date + pds.DateOffset(days=1)
smag_stat.download(start=current_date, stop=etime,
user=user, password=password,
file_fmt=file_fmt)
smag_stat.load(date=current_date)
if smag_stat.empty:
# no data
estr = "unable to format station query for "
estr += "[{:d}]".format(current_date.year)
raise ValueError(estr)
# Format a string of the station names
if smag_stat.data.IAGA.shape[0] > max_stations:
station_year = current_date.year
nreq = int(np.ceil(smag_stat.data.IAGA.shape[0] /
float(max_stations)))
out = list()
for ireq in range(nreq):
if tag != "stations":
if station_year is None:
raise RuntimeError("unable to load station data")
stat_str = ",".join(smag_stat.data.IAGA[ireq*max_stations:
(ireq+1)*max_stations])
remoteaccess['stations'] = "stations={:s}".format(stat_str)
# Format the query
url = remotefmt.format(**remoteaccess)
# Set up a request
try:
# print (url)
result = requests.post(url)
result.encoding = 'ISO-8859-1'
# handle strings differently for python 2/3
if sys.version_info.major == 2:
out.append(str(result.text.encode('ascii', 'replace')))
else:
out.append(result.text)
except:
raise RuntimeError("unable to connect to [{:s}]".format(url))
# Test the result
if "requested URL was rejected" in out[-1]:
estr = "Requested url was rejected:\n{:s}".format(url)
raise RuntimeError(estr)
# Build the output file name
if tag is '':
fname = path.join(data_path, name_fmts[i])
else:
fname = path.join(data_path, name_fmts[i])
# If more than one data pass was needed, append the files
if len(out) > 1:
out_data = append_data(out, file_fmt, tag)
else:
out_data = out[0]
# Save the file data
with open(fname, "w") as local_file:
local_file.write(out_data)
local_file.close()
del out_data
return | Routine to download SuperMAG data
Parameters
-----------
date_array : np.array
Array of datetime objects
tag : string
String denoting the type of file to load, accepted values are 'indices',
'all', 'stations', and '' (for only magnetometer data)
sat_id : string
Not used (default='')
data_path : string or NoneType
Data path to save downloaded files to (default=None)
user : string or NoneType
SuperMAG requires user registration (default=None)
password : string or NoneType
Not used; SuperMAG does not require a password (default=None)
file_fmt : string
File format options: 'ascii' and 'csv'. (default='ascii')
baseline : string
Baseline to remove from magnetometer data. Options are 'all', 'yearly',
and 'none'. (default='all')
delta : string
Remove a value from the magnetometer data. Options are 'none', 'start',
and 'median'. (default='none')
options : string or NoneType
Additional parameter options for magnetometer data. Includes 'mlt'
(MLat and MLT), 'decl' (IGRF declination), 'sza' (Solar Zenith Angle),
'all', and None. (default='all')
Returns
------- | entailment |
def append_data(file_strings, file_fmt, tag):
""" Load the SuperMAG files
Parameters
-----------
file_strings : array-like
Lists or arrays of strings, where each string contains one file of data
file_fmt : str
String denoting file type (ascii or csv)
tag : string
String denoting the type of file to load, accepted values are 'indices',
'all', 'stations', and '' (for only magnetometer data)
Returns
-------
out_string : string
String with all data, ready for output to a file
"""
# Determine the right appending routine for the file type
if file_fmt.lower() == "csv":
return append_csv_data(file_strings)
else:
return append_ascii_data(file_strings, tag) | Load the SuperMAG files
Parameters
-----------
file_strings : array-like
Lists or arrays of strings, where each string contains one file of data
file_fmt : str
String denoting file type (ascii or csv)
tag : string
String denoting the type of file to load, accepted values are 'indices',
'all', 'stations', and '' (for only magnetometer data)
Returns
-------
out_string : string
String with all data, ready for output to a file | entailment |
def append_ascii_data(file_strings, tag):
""" Append data from multiple files for the same time period
Parameters
-----------
file_strings : array-like
Lists or arrays of strings, where each string contains one file of data
tag : string
String denoting the type of file to load, accepted values are 'indices',
'all', 'stations', and None (for only magnetometer data)
Returns
-------
out_string : string
String with all data, ready for output to a file
"""
import re
# Start with data from the first list element
out_lines = file_strings[0].split('\n')
iparam = -1 # Index for the parameter line
ihead = -1 # Index for the last header line
idates = list() # Indices for the date lines
date_list = list() # List of dates
num_stations = list() # Number of stations for each date line
ind_num = 2 if tag in ['all', 'indices', ''] else 0
# ind_num = 2 if tag == '' else ind_num
# Find the index information for the data
for i,line in enumerate(out_lines):
if line == "Selected parameters:":
iparam = i + 1
elif line.count("=") == len(line) and len(line) > 2:
ihead = i
break
# Find the time indices and number of stations for each date line
i = ihead + 1
while i < len(out_lines) - 1:
idates.append(i)
lsplit = re.split('\t+', out_lines[i])
dtime = pds.datetime.strptime(" ".join(lsplit[0:-1]),
"%Y %m %d %H %M %S")
date_list.append(dtime)
num_stations.append(int(lsplit[-1]))
i += num_stations[-1] + 1 + ind_num
idates = np.array(idates)
# Initialize a list of station names
station_names = list()
# Cycle through each additional set of file strings
for ff in range(len(file_strings)-1):
file_lines = file_strings[ff+1].split('\n')
# Find the index information for the data
head = True
snum = 0
for i,line in enumerate(file_lines):
if head:
if line.count("=") == len(line) and len(line) > 2:
head = False
elif len(line) > 0:
lsplit = re.split('\t+', line)
if snum == 0:
dtime = pds.datetime.strptime(" ".join(lsplit[0:-1]),
"%Y %m %d %H %M %S")
try:
idate = date_list.index(dtime)
except:
# SuperMAG outputs date lines regardless of the
# number of stations. These files shouldn't be
# appended together.
raise ValueError("Unexpected date ", dtime)
snum = int(lsplit[-1])
onum = num_stations[idate]
inum = ind_num
# Adjust reference data for new number of station lines
idates[idate+1:] += snum
num_stations[idate] += snum
# Adjust date line for new number of station lines
oline = "{:s}\t{:d}".format( \
dtime.strftime("%Y\t%m\t%d\t%H\t%M\t%S"),
num_stations[idate])
out_lines[idates[idate]] = oline
else:
if inum > 0:
inum -= 1
else:
# Insert the station line to the end of the date section
onum += 1
snum -= 1
out_lines.insert(idates[idate]+onum, line)
# Save the station name to update the parameter line
if not lsplit[0] in station_names:
station_names.append(lsplit[0])
# Update the parameter line
out_lines[iparam] += "," + ",".join(station_names)
# Join the output lines into a single string
out_string = "\n".join(out_lines)
return out_string | Append data from multiple files for the same time period
Parameters
-----------
file_strings : array-like
Lists or arrays of strings, where each string contains one file of data
tag : string
String denoting the type of file to load, accepted values are 'indices',
'all', 'stations', and None (for only magnetometer data)
Returns
-------
out_string : string
String with all data, ready for output to a file | entailment |
def append_csv_data(file_strings):
""" Append data from multiple csv files for the same time period
Parameters
-----------
file_strings : array-like
Lists or arrays of strings, where each string contains one file of data
Returns
-------
out_string : string
String with all data, ready for output to a file
"""
# Start with data from the first list element
out_lines = list()
head_line = None
# Cycle through the lists of file strings, creating a list of line strings
for fstrings in file_strings:
file_lines = fstrings.split('\n')
# Remove and save the header line
head_line = file_lines.pop(0)
# Save the data lines
out_lines.extend(file_lines)
# Sort the output lines by date and station (first two columns) in place
out_lines.sort()
# Remove all zero-length lines from front, add one to back, and add header
i = 0
while len(out_lines[i]) == 0:
out_lines.pop(i)
out_lines.insert(0, head_line)
out_lines.append('')
# Join the output lines into a single string
out_string = "\n".join(out_lines)
return out_string | Append data from multiple csv files for the same time period
Parameters
-----------
file_strings : array-like
Lists or arrays of strings, where each string contains one file of data
Returns
-------
out_string : string
String with all data, ready for output to a file | entailment |
def init(self):
"""
Adds custom calculations to orbit simulation.
This routine is run once, and only once, upon instantiation.
Adds quasi-dipole coordiantes, velocity calculation in ECEF coords,
adds the attitude vectors of spacecraft assuming x is ram pointing and
z is generally nadir, adds ionospheric parameters from the Interational
Reference Ionosphere (IRI), as well as simulated winds from the
Horiontal Wind Model (HWM).
"""
self.custom.add(add_quasi_dipole_coordinates, 'modify')
self.custom.add(add_aacgm_coordinates, 'modify')
self.custom.add(calculate_ecef_velocity, 'modify')
self.custom.add(add_sc_attitude_vectors, 'modify')
self.custom.add(add_iri_thermal_plasma, 'modify')
self.custom.add(add_hwm_winds_and_ecef_vectors, 'modify')
self.custom.add(add_igrf, 'modify')
# project simulated vectors onto s/c basis
# IGRF
# create metadata to be added along with vector projection
in_meta = {'desc':'IGRF geomagnetic field expressed in the s/c basis.',
'units':'nT'}
# project IGRF
self.custom.add(project_ecef_vector_onto_sc, 'modify', 'end', 'B_ecef_x', 'B_ecef_y', 'B_ecef_z',
'B_sc_x', 'B_sc_y', 'B_sc_z',
meta=[in_meta.copy(), in_meta.copy(), in_meta.copy()])
# project total wind vector
self.custom.add(project_hwm_onto_sc, 'modify')
# neutral parameters
self.custom.add(add_msis, 'modify') | Adds custom calculations to orbit simulation.
This routine is run once, and only once, upon instantiation.
Adds quasi-dipole coordiantes, velocity calculation in ECEF coords,
adds the attitude vectors of spacecraft assuming x is ram pointing and
z is generally nadir, adds ionospheric parameters from the Interational
Reference Ionosphere (IRI), as well as simulated winds from the
Horiontal Wind Model (HWM). | entailment |
def load(fnames, tag=None, sat_id=None, obs_long=0., obs_lat=0., obs_alt=0.,
TLE1=None, TLE2=None):
"""
Returns data and metadata in the format required by pysat. Finds position
of satellite in both ECI and ECEF co-ordinates.
Routine is directly called by pysat and not the user.
Parameters
----------
fnames : list-like collection
File name that contains date in its name.
tag : string
Identifies a particular subset of satellite data
sat_id : string
Satellite ID
obs_long: float
Longitude of the observer on the Earth's surface
obs_lat: float
Latitude of the observer on the Earth's surface
obs_alt: float
Altitude of the observer on the Earth's surface
TLE1 : string
First string for Two Line Element. Must be in TLE format
TLE2 : string
Second string for Two Line Element. Must be in TLE format
Example
-------
inst = pysat.Instrument('pysat', 'sgp4',
TLE1='1 25544U 98067A 18135.61844383 .00002728 00000-0 48567-4 0 9998',
TLE2='2 25544 51.6402 181.0633 0004018 88.8954 22.2246 15.54059185113452')
inst.load(2018, 1)
"""
import sgp4
# wgs72 is the most commonly used gravity model in satellite tracking community
from sgp4.earth_gravity import wgs72
from sgp4.io import twoline2rv
import ephem
import pysatMagVect
# TLEs (Two Line Elements for ISS)
# format of TLEs is fixed and available from wikipedia...
# lines encode list of orbital elements of an Earth-orbiting object
# for a given point in time
line1 = ('1 25544U 98067A 18135.61844383 .00002728 00000-0 48567-4 0 9998')
line2 = ('2 25544 51.6402 181.0633 0004018 88.8954 22.2246 15.54059185113452')
# use ISS defaults if not provided by user
if TLE1 is not None:
line1 = TLE1
if TLE2 is not None:
line2 = TLE2
# create satellite from TLEs and assuming a gravity model
# according to module webpage, wgs72 is common
satellite = twoline2rv(line1, line2, wgs72)
# grab date from filename
parts = os.path.split(fnames[0])[-1].split('-')
yr = int(parts[0])
month = int(parts[1])
day = int(parts[2][0:2])
date = pysat.datetime(yr, month, day)
# create timing at 1 Hz (for 1 day)
times = pds.date_range(start=date, end=date+pds.DateOffset(seconds=86399), freq='1S')
# reduce requirements if on testing server
# TODO Remove this when testing resources are higher
on_travis = os.environ.get('ONTRAVIS') == 'True'
if on_travis:
times = times[0:100]
# create list to hold satellite position, velocity
position = []
velocity = []
for time in times:
# orbit propagator - computes x,y,z position and velocity
pos, vel = satellite.propagate(time.year, time.month, time.day,
time.hour, time.minute, time.second)
# print (pos)
position.extend(pos)
velocity.extend(vel)
# put data into DataFrame
data = pysat.DataFrame({'position_eci_x': position[::3],
'position_eci_y': position[1::3],
'position_eci_z': position[2::3],
'velocity_eci_x': velocity[::3],
'velocity_eci_y': velocity[1::3],
'velocity_eci_z': velocity[2::3]},
index=times)
data.index.name = 'Epoch'
# add position and velocity in ECEF
# add call for GEI/ECEF translation here
# instead, since available, I'll use an orbit predictor from another
# package that outputs in ECEF
# it also supports ground station calculations
# the observer's (ground station) position on the Earth surface
site = ephem.Observer()
site.lon = str(obs_long)
site.lat = str(obs_lat)
site.elevation = obs_alt
# The first parameter in readtle() is the satellite name
sat = ephem.readtle('pysat' , line1, line2)
output_params = []
for time in times:
lp = {}
site.date = time
sat.compute(site)
# parameters relative to the ground station
lp['obs_sat_az_angle'] = ephem.degrees(sat.az)
lp['obs_sat_el_angle'] = ephem.degrees(sat.alt)
# total distance away
lp['obs_sat_slant_range'] = sat.range
# satellite location
# sub latitude point
lp['glat'] = np.degrees(sat.sublat)
# sublongitude point
lp['glong'] = np.degrees(sat.sublong)
# elevation of sat in m, stored as km
lp['alt'] = sat.elevation/1000.
# get ECEF position of satellite
lp['x'], lp['y'], lp['z'] = pysatMagVect.geodetic_to_ecef(lp['glat'], lp['glong'], lp['alt'])
output_params.append(lp)
output = pds.DataFrame(output_params, index=times)
# modify input object to include calculated parameters
data[['glong', 'glat', 'alt']] = output[['glong', 'glat', 'alt']]
data[['position_ecef_x', 'position_ecef_y', 'position_ecef_z']] = output[['x', 'y', 'z']]
data['obs_sat_az_angle'] = output['obs_sat_az_angle']
data['obs_sat_el_angle'] = output['obs_sat_el_angle']
data['obs_sat_slant_range'] = output['obs_sat_slant_range']
return data, meta.copy() | Returns data and metadata in the format required by pysat. Finds position
of satellite in both ECI and ECEF co-ordinates.
Routine is directly called by pysat and not the user.
Parameters
----------
fnames : list-like collection
File name that contains date in its name.
tag : string
Identifies a particular subset of satellite data
sat_id : string
Satellite ID
obs_long: float
Longitude of the observer on the Earth's surface
obs_lat: float
Latitude of the observer on the Earth's surface
obs_alt: float
Altitude of the observer on the Earth's surface
TLE1 : string
First string for Two Line Element. Must be in TLE format
TLE2 : string
Second string for Two Line Element. Must be in TLE format
Example
-------
inst = pysat.Instrument('pysat', 'sgp4',
TLE1='1 25544U 98067A 18135.61844383 .00002728 00000-0 48567-4 0 9998',
TLE2='2 25544 51.6402 181.0633 0004018 88.8954 22.2246 15.54059185113452')
inst.load(2018, 1) | entailment |
def list_files(tag=None, sat_id=None, data_path=None, format_str=None):
"""Produce a fake list of files spanning a year"""
index = pds.date_range(pysat.datetime(2017,12,1), pysat.datetime(2018,12,1))
# file list is effectively just the date in string format - '%D' works only in Mac. '%x' workins in both Windows and Mac
names = [ data_path+date.strftime('%Y-%m-%d')+'.nofile' for date in index]
return pysat.Series(names, index=index) | Produce a fake list of files spanning a year | entailment |
def add_sc_attitude_vectors(inst):
"""
Add attitude vectors for spacecraft assuming ram pointing.
Presumes spacecraft is pointed along the velocity vector (x), z is
generally nadir pointing (positive towards Earth), and y completes the
right handed system (generally southward).
Notes
-----
Expects velocity and position of spacecraft in Earth Centered
Earth Fixed (ECEF) coordinates to be in the instrument object
and named velocity_ecef_* (*=x,y,z) and position_ecef_* (*=x,y,z)
Adds attitude vectors for spacecraft in the ECEF basis by calculating the scalar
product of each attitude vector with each component of ECEF.
Parameters
----------
inst : pysat.Instrument
Instrument object
Returns
-------
None
Modifies pysat.Instrument object in place to include S/C attitude unit
vectors, expressed in ECEF basis. Vectors are named sc_(x,y,z)hat_ecef_(x,y,z).
sc_xhat_ecef_x is the spacecraft unit vector along x (positive along velocity vector)
reported in ECEF, ECEF x-component.
"""
import pysatMagVect
# ram pointing is along velocity vector
inst['sc_xhat_ecef_x'], inst['sc_xhat_ecef_y'], inst['sc_xhat_ecef_z'] = \
pysatMagVect.normalize_vector(inst['velocity_ecef_x'], inst['velocity_ecef_y'], inst['velocity_ecef_z'])
# begin with z along Nadir (towards Earth)
# if orbit isn't perfectly circular, then the s/c z vector won't
# point exactly along nadir. However, nadir pointing is close enough
# to the true z (in the orbital plane) that we can use it to get y,
# and use x and y to get the real z
inst['sc_zhat_ecef_x'], inst['sc_zhat_ecef_y'], inst['sc_zhat_ecef_z'] = \
pysatMagVect.normalize_vector(-inst['position_ecef_x'], -inst['position_ecef_y'], -inst['position_ecef_z'])
# get y vector assuming right hand rule
# Z x X = Y
inst['sc_yhat_ecef_x'], inst['sc_yhat_ecef_y'], inst['sc_yhat_ecef_z'] = \
pysatMagVect.cross_product(inst['sc_zhat_ecef_x'], inst['sc_zhat_ecef_y'], inst['sc_zhat_ecef_z'],
inst['sc_xhat_ecef_x'], inst['sc_xhat_ecef_y'], inst['sc_xhat_ecef_z'])
# normalize since Xhat and Zhat from above may not be orthogonal
inst['sc_yhat_ecef_x'], inst['sc_yhat_ecef_y'], inst['sc_yhat_ecef_z'] = \
pysatMagVect.normalize_vector(inst['sc_yhat_ecef_x'], inst['sc_yhat_ecef_y'], inst['sc_yhat_ecef_z'])
# strictly, need to recalculate Zhat so that it is consistent with RHS
# just created
# Z = X x Y
inst['sc_zhat_ecef_x'], inst['sc_zhat_ecef_y'], inst['sc_zhat_ecef_z'] = \
pysatMagVect.cross_product(inst['sc_xhat_ecef_x'], inst['sc_xhat_ecef_y'], inst['sc_xhat_ecef_z'],
inst['sc_yhat_ecef_x'], inst['sc_yhat_ecef_y'], inst['sc_yhat_ecef_z'])
# Adding metadata
inst.meta['sc_xhat_ecef_x'] = {'units':'',
'desc':'S/C attitude (x-direction, ram) unit vector, expressed in ECEF basis, x-component'}
inst.meta['sc_xhat_ecef_y'] = {'units':'',
'desc':'S/C attitude (x-direction, ram) unit vector, expressed in ECEF basis, y-component'}
inst.meta['sc_xhat_ecef_z'] = {'units':'',
'desc':'S/C attitude (x-direction, ram) unit vector, expressed in ECEF basis, z-component'}
inst.meta['sc_zhat_ecef_x'] = {'units':'',
'desc':'S/C attitude (z-direction, generally nadir) unit vector, expressed in ECEF basis, x-component'}
inst.meta['sc_zhat_ecef_y'] = {'units':'',
'desc':'S/C attitude (z-direction, generally nadir) unit vector, expressed in ECEF basis, y-component'}
inst.meta['sc_zhat_ecef_z'] = {'units':'',
'desc':'S/C attitude (z-direction, generally nadir) unit vector, expressed in ECEF basis, z-component'}
inst.meta['sc_yhat_ecef_x'] = {'units':'',
'desc':'S/C attitude (y-direction, generally south) unit vector, expressed in ECEF basis, x-component'}
inst.meta['sc_yhat_ecef_y'] = {'units':'',
'desc':'S/C attitude (y-direction, generally south) unit vector, expressed in ECEF basis, y-component'}
inst.meta['sc_yhat_ecef_z'] = {'units':'',
'desc':'S/C attitude (y-direction, generally south) unit vector, expressed in ECEF basis, z-component'}
# check what magnitudes we get
mag = np.sqrt(inst['sc_zhat_ecef_x']**2 + inst['sc_zhat_ecef_y']**2 +
inst['sc_zhat_ecef_z']**2)
idx, = np.where( (mag < .999999999) | (mag > 1.000000001))
if len(idx) > 0:
print (mag[idx])
raise RuntimeError('Unit vector generation failure. Not sufficently orthogonal.')
return | Add attitude vectors for spacecraft assuming ram pointing.
Presumes spacecraft is pointed along the velocity vector (x), z is
generally nadir pointing (positive towards Earth), and y completes the
right handed system (generally southward).
Notes
-----
Expects velocity and position of spacecraft in Earth Centered
Earth Fixed (ECEF) coordinates to be in the instrument object
and named velocity_ecef_* (*=x,y,z) and position_ecef_* (*=x,y,z)
Adds attitude vectors for spacecraft in the ECEF basis by calculating the scalar
product of each attitude vector with each component of ECEF.
Parameters
----------
inst : pysat.Instrument
Instrument object
Returns
-------
None
Modifies pysat.Instrument object in place to include S/C attitude unit
vectors, expressed in ECEF basis. Vectors are named sc_(x,y,z)hat_ecef_(x,y,z).
sc_xhat_ecef_x is the spacecraft unit vector along x (positive along velocity vector)
reported in ECEF, ECEF x-component. | entailment |
def calculate_ecef_velocity(inst):
"""
Calculates spacecraft velocity in ECEF frame.
Presumes that the spacecraft velocity in ECEF is in
the input instrument object as position_ecef_*. Uses a symmetric
difference to calculate the velocity thus endpoints will be
set to NaN. Routine should be run using pysat data padding feature
to create valid end points.
Parameters
----------
inst : pysat.Instrument
Instrument object
Returns
-------
None
Modifies pysat.Instrument object in place to include ECEF velocity
using naming scheme velocity_ecef_* (*=x,y,z)
"""
x = inst['position_ecef_x']
vel_x = (x.values[2:] - x.values[0:-2])/2.
y = inst['position_ecef_y']
vel_y = (y.values[2:] - y.values[0:-2])/2.
z = inst['position_ecef_z']
vel_z = (z.values[2:] - z.values[0:-2])/2.
inst[1:-1, 'velocity_ecef_x'] = vel_x
inst[1:-1, 'velocity_ecef_y'] = vel_y
inst[1:-1, 'velocity_ecef_z'] = vel_z
inst.meta['velocity_ecef_x'] = {'units':'km/s',
'desc':'Velocity of satellite calculated with respect to ECEF frame.'}
inst.meta['velocity_ecef_y'] = {'units':'km/s',
'desc':'Velocity of satellite calculated with respect to ECEF frame.'}
inst.meta['velocity_ecef_z'] = {'units':'km/s',
'desc':'Velocity of satellite calculated with respect to ECEF frame.'}
return | Calculates spacecraft velocity in ECEF frame.
Presumes that the spacecraft velocity in ECEF is in
the input instrument object as position_ecef_*. Uses a symmetric
difference to calculate the velocity thus endpoints will be
set to NaN. Routine should be run using pysat data padding feature
to create valid end points.
Parameters
----------
inst : pysat.Instrument
Instrument object
Returns
-------
None
Modifies pysat.Instrument object in place to include ECEF velocity
using naming scheme velocity_ecef_* (*=x,y,z) | entailment |
def add_quasi_dipole_coordinates(inst, glat_label='glat', glong_label='glong',
alt_label='alt'):
"""
Uses Apexpy package to add quasi-dipole coordinates to instrument object.
The Quasi-Dipole coordinate system includes both the tilt and offset of the
geomagnetic field to calculate the latitude, longitude, and local time
of the spacecraft with respect to the geomagnetic field.
This system is preferred over AACGM near the equator for LEO satellites.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_quasi_dipole_coordinates, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include quasi-dipole coordinates, 'qd_lat'
for magnetic latitude, 'qd_long' for longitude, and 'mlt' for magnetic local time.
"""
import apexpy
ap = apexpy.Apex(date=inst.date)
qd_lat = []; qd_lon = []; mlt = []
for lat, lon, alt, time in zip(inst[glat_label], inst[glong_label], inst[alt_label],
inst.data.index):
# quasi-dipole latitude and longitude from geodetic coords
tlat, tlon = ap.geo2qd(lat, lon, alt)
qd_lat.append(tlat)
qd_lon.append(tlon)
mlt.append(ap.mlon2mlt(tlon, time))
inst['qd_lat'] = qd_lat
inst['qd_long'] = qd_lon
inst['mlt'] = mlt
inst.meta['qd_lat'] = {'units':'degrees','long_name':'Quasi dipole latitude'}
inst.meta['qd_long'] = {'units':'degrees','long_name':'Quasi dipole longitude'}
inst.meta['qd_mlt'] = {'units':'hrs','long_name':'Magnetic local time'}
return | Uses Apexpy package to add quasi-dipole coordinates to instrument object.
The Quasi-Dipole coordinate system includes both the tilt and offset of the
geomagnetic field to calculate the latitude, longitude, and local time
of the spacecraft with respect to the geomagnetic field.
This system is preferred over AACGM near the equator for LEO satellites.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_quasi_dipole_coordinates, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include quasi-dipole coordinates, 'qd_lat'
for magnetic latitude, 'qd_long' for longitude, and 'mlt' for magnetic local time. | entailment |
def add_aacgm_coordinates(inst, glat_label='glat', glong_label='glong',
alt_label='alt'):
"""
Uses AACGMV2 package to add AACGM coordinates to instrument object.
The Altitude Adjusted Corrected Geomagnetic Coordinates library is used
to calculate the latitude, longitude, and local time
of the spacecraft with respect to the geomagnetic field.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_quasi_dipole_coordinates, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees N)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees E)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include quasi-dipole coordinates, 'aacgm_lat'
for magnetic latitude, 'aacgm_long' for longitude, and 'aacgm_mlt' for magnetic local time.
"""
import aacgmv2
aalat = []; aalon = []; mlt = []
for lat, lon, alt, time in zip(inst[glat_label], inst[glong_label], inst[alt_label],
inst.data.index):
# aacgmv2 latitude and longitude from geodetic coords
tlat, tlon, tmlt = aacgmv2.get_aacgm_coord(lat, lon, alt, time)
aalat.append(tlat)
aalon.append(tlon)
mlt.append(tmlt)
inst['aacgm_lat'] = aalat
inst['aacgm_long'] = aalon
inst['aacgm_mlt'] = mlt
inst.meta['aacgm_lat'] = {'units':'degrees','long_name':'AACGM latitude'}
inst.meta['aacgm_long'] = {'units':'degrees','long_name':'AACGM longitude'}
inst.meta['aacgm_mlt'] = {'units':'hrs','long_name':'AACGM Magnetic local time'}
return | Uses AACGMV2 package to add AACGM coordinates to instrument object.
The Altitude Adjusted Corrected Geomagnetic Coordinates library is used
to calculate the latitude, longitude, and local time
of the spacecraft with respect to the geomagnetic field.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_quasi_dipole_coordinates, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees N)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees E)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include quasi-dipole coordinates, 'aacgm_lat'
for magnetic latitude, 'aacgm_long' for longitude, and 'aacgm_mlt' for magnetic local time. | entailment |
def add_iri_thermal_plasma(inst, glat_label='glat', glong_label='glong',
alt_label='alt'):
"""
Uses IRI (International Reference Ionosphere) model to simulate an ionosphere.
Uses pyglow module to run IRI. Configured to use actual solar parameters to run
model.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_iri_thermal_plasma, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include thermal plasma parameters.
'ion_temp' for ion temperature in Kelvin
'e_temp' for electron temperature in Kelvin
'ion_dens' for the total ion density (O+ and H+)
'frac_dens_o' for the fraction of total density that is O+
'frac_dens_h' for the fraction of total density that is H+
"""
import pyglow
from pyglow.pyglow import Point
iri_params = []
# print 'IRI Simulations'
for time,lat,lon,alt in zip(inst.data.index, inst[glat_label], inst[glong_label], inst[alt_label]):
# Point class is instantiated. Its parameters are a function of time and spatial location
pt = Point(time,lat,lon,alt)
pt.run_iri()
iri = {}
# After the model is run, its members like Ti, ni[O+], etc. can be accessed
iri['ion_temp'] = pt.Ti
iri['e_temp'] = pt.Te
iri['ion_dens'] = pt.ni['O+'] + pt.ni['H+'] + pt.ni['HE+']#pt.ne - pt.ni['NO+'] - pt.ni['O2+'] - pt.ni['HE+']
iri['frac_dens_o'] = pt.ni['O+']/iri['ion_dens']
iri['frac_dens_h'] = pt.ni['H+']/iri['ion_dens']
iri['frac_dens_he'] = pt.ni['HE+']/iri['ion_dens']
iri_params.append(iri)
# print 'Complete.'
iri = pds.DataFrame(iri_params)
iri.index = inst.data.index
inst[iri.keys()] = iri
inst.meta['ion_temp'] = {'units':'Kelvin','long_name':'Ion Temperature'}
inst.meta['ion_dens'] = {'units':'N/cc','long_name':'Ion Density',
'desc':'Total ion density including O+ and H+ from IRI model run.'}
inst.meta['frac_dens_o'] = {'units':'','long_name':'Fractional O+ Density'}
inst.meta['frac_dens_h'] = {'units':'','long_name':'Fractional H+ Density'} | Uses IRI (International Reference Ionosphere) model to simulate an ionosphere.
Uses pyglow module to run IRI. Configured to use actual solar parameters to run
model.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_iri_thermal_plasma, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include thermal plasma parameters.
'ion_temp' for ion temperature in Kelvin
'e_temp' for electron temperature in Kelvin
'ion_dens' for the total ion density (O+ and H+)
'frac_dens_o' for the fraction of total density that is O+
'frac_dens_h' for the fraction of total density that is H+ | entailment |
def add_hwm_winds_and_ecef_vectors(inst, glat_label='glat', glong_label='glong',
alt_label='alt'):
"""
Uses HWM (Horizontal Wind Model) model to obtain neutral wind details.
Uses pyglow module to run HWM. Configured to use actual solar parameters to run
model.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_hwm_winds_and_ecef_vectors, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include HWM winds.
'zonal_wind' for the east/west winds (u in model) in m/s
'meiridional_wind' for the north/south winds (v in model) in m/s
'unit_zonal_wind_ecef_*' (*=x,y,z) is the zonal vector expressed in the ECEF basis
'unit_mer_wind_ecef_*' (*=x,y,z) is the meridional vector expressed in the ECEF basis
'sim_inst_wind_*' (*=x,y,z) is the projection of the total wind vector onto s/c basis
"""
import pyglow
import pysatMagVect
hwm_params = []
for time,lat,lon,alt in zip(inst.data.index, inst[glat_label], inst[glong_label], inst[alt_label]):
# Point class is instantiated.
# Its parameters are a function of time and spatial location
pt = pyglow.Point(time,lat,lon,alt)
pt.run_hwm()
hwm = {}
hwm['zonal_wind'] = pt.u
hwm['meridional_wind'] = pt.v
hwm_params.append(hwm)
# print 'Complete.'
hwm = pds.DataFrame(hwm_params)
hwm.index = inst.data.index
inst[['zonal_wind', 'meridional_wind']] = hwm[['zonal_wind', 'meridional_wind']]
# calculate zonal unit vector in ECEF
# zonal wind: east - west; positive east
# EW direction is tangent to XY location of S/C in ECEF coordinates
mag = np.sqrt(inst['position_ecef_x']**2 + inst['position_ecef_y']**2)
inst['unit_zonal_wind_ecef_x'] = -inst['position_ecef_y']/mag
inst['unit_zonal_wind_ecef_y'] = inst['position_ecef_x']/mag
inst['unit_zonal_wind_ecef_z'] = 0*inst['position_ecef_x']
# calculate meridional unit vector in ECEF
# meridional wind: north - south; positive north
# mer direction completes RHS of position and zonal vector
unit_pos_x, unit_pos_y, unit_pos_z = \
pysatMagVect.normalize_vector(-inst['position_ecef_x'], -inst['position_ecef_y'], -inst['position_ecef_z'])
# mer = r x zonal
inst['unit_mer_wind_ecef_x'], inst['unit_mer_wind_ecef_y'], inst['unit_mer_wind_ecef_z'] = \
pysatMagVect.cross_product(unit_pos_x, unit_pos_y, unit_pos_z,
inst['unit_zonal_wind_ecef_x'], inst['unit_zonal_wind_ecef_y'], inst['unit_zonal_wind_ecef_z'])
# Adding metadata information
inst.meta['zonal_wind'] = {'units':'m/s','long_name':'Zonal Wind',
'desc':'HWM model zonal wind'}
inst.meta['meridional_wind'] = {'units':'m/s','long_name':'Meridional Wind',
'desc':'HWM model meridional wind'}
inst.meta['unit_zonal_wind_ecef_x'] = {'units':'',
'long_name':'Zonal Wind Unit ECEF x-vector',
'desc':'x-value of zonal wind unit vector in ECEF co ordinates'}
inst.meta['unit_zonal_wind_ecef_y'] = {'units':'',
'long_name':'Zonal Wind Unit ECEF y-vector',
'desc':'y-value of zonal wind unit vector in ECEF co ordinates'}
inst.meta['unit_zonal_wind_ecef_z'] = {'units':'',
'long_name':'Zonal Wind Unit ECEF z-vector',
'desc':'z-value of zonal wind unit vector in ECEF co ordinates'}
inst.meta['unit_mer_wind_ecef_x'] = {'units':'',
'long_name':'Meridional Wind Unit ECEF x-vector',
'desc':'x-value of meridional wind unit vector in ECEF co ordinates'}
inst.meta['unit_mer_wind_ecef_y'] = {'units':'',
'long_name':'Meridional Wind Unit ECEF y-vector',
'desc':'y-value of meridional wind unit vector in ECEF co ordinates'}
inst.meta['unit_mer_wind_ecef_z'] = {'units':'',
'long_name':'Meridional Wind Unit ECEF z-vector',
'desc':'z-value of meridional wind unit vector in ECEF co ordinates'}
return | Uses HWM (Horizontal Wind Model) model to obtain neutral wind details.
Uses pyglow module to run HWM. Configured to use actual solar parameters to run
model.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_hwm_winds_and_ecef_vectors, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include HWM winds.
'zonal_wind' for the east/west winds (u in model) in m/s
'meiridional_wind' for the north/south winds (v in model) in m/s
'unit_zonal_wind_ecef_*' (*=x,y,z) is the zonal vector expressed in the ECEF basis
'unit_mer_wind_ecef_*' (*=x,y,z) is the meridional vector expressed in the ECEF basis
'sim_inst_wind_*' (*=x,y,z) is the projection of the total wind vector onto s/c basis | entailment |
def add_igrf(inst, glat_label='glat', glong_label='glong',
alt_label='alt'):
"""
Uses International Geomagnetic Reference Field (IGRF) model to obtain geomagnetic field values.
Uses pyglow module to run IGRF. Configured to use actual solar parameters to run
model.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_igrf, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include HWM winds.
'B' total geomagnetic field
'B_east' Geomagnetic field component along east/west directions (+ east)
'B_north' Geomagnetic field component along north/south directions (+ north)
'B_up' Geomagnetic field component along up/down directions (+ up)
'B_ecef_x' Geomagnetic field component along ECEF x
'B_ecef_y' Geomagnetic field component along ECEF y
'B_ecef_z' Geomagnetic field component along ECEF z
"""
import pyglow
from pyglow.pyglow import Point
import pysatMagVect
igrf_params = []
# print 'IRI Simulations'
for time,lat,lon,alt in zip(inst.data.index, inst[glat_label], inst[glong_label], inst[alt_label]):
pt = Point(time,lat,lon,alt)
pt.run_igrf()
igrf = {}
igrf['B'] = pt.B
igrf['B_east'] = pt.Bx
igrf['B_north'] = pt.By
igrf['B_up'] = pt.Bz
igrf_params.append(igrf)
# print 'Complete.'
igrf = pds.DataFrame(igrf_params)
igrf.index = inst.data.index
inst[igrf.keys()] = igrf
# convert magnetic field in East/north/up to ECEF basis
x, y, z = pysatMagVect.enu_to_ecef_vector(inst['B_east'], inst['B_north'], inst['B_up'],
inst[glat_label], inst[glong_label])
inst['B_ecef_x'] = x
inst['B_ecef_y'] = y
inst['B_ecef_z'] = z
# metadata
inst.meta['B'] = {'units':'nT',
'desc':'Total geomagnetic field from IGRF.'}
inst.meta['B_east'] = {'units':'nT',
'desc':'Geomagnetic field from IGRF expressed using the East/North/Up (ENU) basis.'}
inst.meta['B_north'] = {'units':'nT',
'desc':'Geomagnetic field from IGRF expressed using the East/North/Up (ENU) basis.'}
inst.meta['B_up'] = {'units':'nT',
'desc':'Geomagnetic field from IGRF expressed using the East/North/Up (ENU) basis.'}
inst.meta['B_ecef_x'] = {'units':'nT',
'desc':'Geomagnetic field from IGRF expressed using the Earth Centered Earth Fixed (ECEF) basis.'}
inst.meta['B_ecef_y'] = {'units':'nT',
'desc':'Geomagnetic field from IGRF expressed using the Earth Centered Earth Fixed (ECEF) basis.'}
inst.meta['B_ecef_z'] = {'units':'nT',
'desc':'Geomagnetic field from IGRF expressed using the Earth Centered Earth Fixed (ECEF) basis.'}
return | Uses International Geomagnetic Reference Field (IGRF) model to obtain geomagnetic field values.
Uses pyglow module to run IGRF. Configured to use actual solar parameters to run
model.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_igrf, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include HWM winds.
'B' total geomagnetic field
'B_east' Geomagnetic field component along east/west directions (+ east)
'B_north' Geomagnetic field component along north/south directions (+ north)
'B_up' Geomagnetic field component along up/down directions (+ up)
'B_ecef_x' Geomagnetic field component along ECEF x
'B_ecef_y' Geomagnetic field component along ECEF y
'B_ecef_z' Geomagnetic field component along ECEF z | entailment |
def add_msis(inst, glat_label='glat', glong_label='glong',
alt_label='alt'):
"""
Uses MSIS model to obtain thermospheric values.
Uses pyglow module to run MSIS. Configured to use actual solar parameters to run
model.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_msis, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include MSIS values winds.
'Nn' total neutral density particles/cm^3
'Nn_N' Nitrogen number density (particles/cm^3)
'Nn_N2' N2 number density (particles/cm^3)
'Nn_O' Oxygen number density (particles/cm^3)
'Nn_O2' O2 number density (particles/cm^3)
'Tn_msis' Temperature from MSIS (Kelvin)
"""
import pyglow
from pyglow.pyglow import Point
msis_params = []
# print 'IRI Simulations'
for time,lat,lon,alt in zip(inst.data.index, inst[glat_label], inst[glong_label], inst[alt_label]):
pt = Point(time,lat,lon,alt)
pt.run_msis()
msis = {}
total = 0
for key in pt.nn.keys():
total += pt.nn[key]
msis['Nn'] = total
msis['Nn_N'] = pt.nn['N']
msis['Nn_N2'] = pt.nn['N2']
msis['Nn_O'] = pt.nn['O']
msis['Nn_O2'] = pt.nn['O2']
msis['Tn_msis'] = pt.Tn_msis
msis_params.append(msis)
# print 'Complete.'
msis = pds.DataFrame(msis_params)
msis.index = inst.data.index
inst[msis.keys()] = msis
# metadata
inst.meta['Nn'] = {'units':'cm^-3',
'desc':'Total neutral number particle density from MSIS.'}
inst.meta['Nn_N'] = {'units':'cm^-3',
'desc':'Total nitrogen number particle density from MSIS.'}
inst.meta['Nn_N2'] = {'units':'cm^-3',
'desc':'Total N2 number particle density from MSIS.'}
inst.meta['Nn_O'] = {'units':'cm^-3',
'desc':'Total oxygen number particle density from MSIS.'}
inst.meta['Nn_O2'] = {'units':'cm^-3',
'desc':'Total O2 number particle density from MSIS.'}
inst.meta['Tn_msis'] = {'units':'K',
'desc':'Neutral temperature from MSIS.'}
return | Uses MSIS model to obtain thermospheric values.
Uses pyglow module to run MSIS. Configured to use actual solar parameters to run
model.
Example
-------
# function added velow modifies the inst object upon every inst.load call
inst.custom.add(add_msis, 'modify', glat_label='custom_label')
Parameters
----------
inst : pysat.Instrument
Designed with pysat_sgp4 in mind
glat_label : string
label used in inst to identify WGS84 geodetic latitude (degrees)
glong_label : string
label used in inst to identify WGS84 geodetic longitude (degrees)
alt_label : string
label used in inst to identify WGS84 geodetic altitude (km, height above surface)
Returns
-------
inst
Input pysat.Instrument object modified to include MSIS values winds.
'Nn' total neutral density particles/cm^3
'Nn_N' Nitrogen number density (particles/cm^3)
'Nn_N2' N2 number density (particles/cm^3)
'Nn_O' Oxygen number density (particles/cm^3)
'Nn_O2' O2 number density (particles/cm^3)
'Tn_msis' Temperature from MSIS (Kelvin) | entailment |
def project_ecef_vector_onto_sc(inst, x_label, y_label, z_label,
new_x_label, new_y_label, new_z_label,
meta=None):
"""Express input vector using s/c attitude directions
x - ram pointing
y - generally southward
z - generally nadir
Parameters
----------
x_label : string
Label used to get ECEF-X component of vector to be projected
y_label : string
Label used to get ECEF-Y component of vector to be projected
z_label : string
Label used to get ECEF-Z component of vector to be projected
new_x_label : string
Label used to set X component of projected vector
new_y_label : string
Label used to set Y component of projected vector
new_z_label : string
Label used to set Z component of projected vector
meta : array_like of dicts (None)
Dicts contain metadata to be assigned.
"""
import pysatMagVect
x, y, z = pysatMagVect.project_ecef_vector_onto_basis(inst[x_label], inst[y_label], inst[z_label],
inst['sc_xhat_ecef_x'], inst['sc_xhat_ecef_y'], inst['sc_xhat_ecef_z'],
inst['sc_yhat_ecef_x'], inst['sc_yhat_ecef_y'], inst['sc_yhat_ecef_z'],
inst['sc_zhat_ecef_x'], inst['sc_zhat_ecef_y'], inst['sc_zhat_ecef_z'])
inst[new_x_label] = x
inst[new_y_label] = y
inst[new_z_label] = z
if meta is not None:
inst.meta[new_x_label] = meta[0]
inst.meta[new_y_label] = meta[1]
inst.meta[new_z_label] = meta[2]
return | Express input vector using s/c attitude directions
x - ram pointing
y - generally southward
z - generally nadir
Parameters
----------
x_label : string
Label used to get ECEF-X component of vector to be projected
y_label : string
Label used to get ECEF-Y component of vector to be projected
z_label : string
Label used to get ECEF-Z component of vector to be projected
new_x_label : string
Label used to set X component of projected vector
new_y_label : string
Label used to set Y component of projected vector
new_z_label : string
Label used to set Z component of projected vector
meta : array_like of dicts (None)
Dicts contain metadata to be assigned. | entailment |
def scatterplot(inst, labelx, labely, data_label, datalim, xlim=None, ylim=None):
"""Return scatterplot of data_label(s) as functions of labelx,y over a season.
Parameters
----------
labelx : string
data product for x-axis
labely : string
data product for y-axis
data_label : string, array-like of strings
data product(s) to be scatter plotted
datalim : numyp array
plot limits for data_label
Returns
-------
Returns a list of scatter plots of data_label as a function
of labelx and labely over the season delineated by start and
stop datetime objects.
"""
# interactive plotting off
plt.ioff()
# create figures for plotting
figs = []
axs = []
# check if doing multiple data quantities
if type(data_label) is str:
# max of one data type
figs.append( plt.figure() )
ax1 = figs[0].add_subplot(211, projection='3d')
ax2 = figs[0].add_subplot(212)
axs.append((ax1, ax2))
plt.suptitle(data_label)
if xlim is not None:
ax1.set_xlim(xlim)
ax2.set_xlim(xlim)
if ylim is not None:
ax1.set_ylim(ylim)
ax2.set_ylim(ylim)
data_label = [data_label]
plt.hold(True)
else:
# multiple data to be plotted
for i in np.arange(len(data_label)):
figs.append( plt.figure() )
ax1 = figs[i].add_subplot(211, projection='3d')
ax2 = figs[i].add_subplot(212)
axs.append((ax1, ax2))
#axs.append( figs[i].add_subplot(111, projection='3d') )
plt.suptitle(data_label[i])
if xlim is not None:
ax1.set_xlim(xlim)
ax2.set_xlim(xlim)
if ylim is not None:
ax1.set_ylim(ylim)
ax2.set_ylim(ylim)
plt.hold(True)
# norm method so that data may be scaled to colors appropriately
norm = colors.Normalize(vmin=datalim[0], vmax=datalim[1])
p = [i for i in np.arange(len(figs))]
for i,inst in enumerate(inst):
for j, (fig, ax) in enumerate(zip(figs, axs)):
if len(inst.data) > 0:
if (len(inst.data[labelx]) > 0) & (len(inst.data[labely]) > 0) & (len(inst.data[data_label[j]]) > 0):
p[j]=ax[0].scatter(inst.data[labelx], inst.data[labely], inst.data[data_label[j]],
zdir='z', c=inst.data[data_label[j]], cmap=cm.jet, norm=norm,
linewidth='0', edgecolors=None)
ax[1].scatter(inst.data[labelx], inst.data[labely], c=inst.data[data_label[j]],
cmap=cm.jet, norm=norm, linewidth=0.00000000001, alpha=0.5, edgecolor=None)
for j, (fig, ax) in enumerate(zip(figs, axs)):
try:
cbar = plt.colorbar(p[j],ax=ax[0], label='Amplitude (m/s)')
except:
print('Tried colorbar but failed, thus no colorbar.')
ax[0].elev=30.
# interactive plotting back on
plt.ion()
return figs | Return scatterplot of data_label(s) as functions of labelx,y over a season.
Parameters
----------
labelx : string
data product for x-axis
labely : string
data product for y-axis
data_label : string, array-like of strings
data product(s) to be scatter plotted
datalim : numyp array
plot limits for data_label
Returns
-------
Returns a list of scatter plots of data_label as a function
of labelx and labely over the season delineated by start and
stop datetime objects. | entailment |
def enable_log(level=logging.DEBUG):
"""Enable console logging.
This is a utils method for try run with storops.
:param level: log level, default to DEBUG
"""
logger = logging.getLogger(__name__)
logger.setLevel(level)
if not logger.handlers:
logger.info('enabling logging to console.')
logger.addHandler(logging.StreamHandler(sys.stdout)) | Enable console logging.
This is a utils method for try run with storops.
:param level: log level, default to DEBUG | entailment |
def list_files(tag=None, sat_id=None, data_path=None, format_str=None):
"""Return a Pandas Series of every file for chosen satellite data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are '' and 'ascii'.
If '' is specified, the primary data type (ascii) is loaded.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (NoneType)
User specified file format not supported. (default=None)
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files
"""
import sys
#if tag == 'ionprf':
# # from_os constructor currently doesn't work because of the variable
# # filename components at the end of each string.....
# ion_fmt = '*/ionPrf_*.{year:04d}.{day:03d}.{hour:02d}.{min:02d}*_nc'
# return pysat.Files.from_os(dir_path=os.path.join('cosmic', 'ionprf'),
# format_str=ion_fmt)
estr = 'Building a list of COSMIC files, which can possibly take time. '
estr = '{:s}~1s per 100K files'.format(estr)
print(estr)
sys.stdout.flush()
# number of files may be large
# only select file that are the cosmic data files and end with _nc
cosmicFiles = glob.glob(os.path.join(data_path, '*/*_nc'))
# need to get date and time from filename to generate index
num = len(cosmicFiles)
if num != 0:
print('Estimated time:', num*1.E-5,'seconds')
sys.stdout.flush()
# preallocate lists
year=[None]*num; days=[None]*num; hours=[None]*num;
minutes=[None]*num; microseconds=[None]*num;
for i,f in enumerate(cosmicFiles):
f2 = f.split('.')
year[i]=f2[-6]
days[i]=f2[-5]
hours[i]=f2[-4]
minutes[i]=f2[-3]
microseconds[i]=i
year=np.array(year).astype(int)
days=np.array(days).astype(int)
uts=np.array(hours).astype(int)*3600.+np.array(minutes).astype(int)*60.
# adding microseconds to ensure each time is unique, not allowed to
# pass 1.E-3 s
uts+=np.mod(np.array(microseconds).astype(int)*4, 8000)*1.E-5
index = pysat.utils.create_datetime_index(year=year, day=days, uts=uts)
file_list = pysat.Series(cosmicFiles, index=index)
return file_list
else:
print('Found no files, check your path or download them.')
return pysat.Series(None) | Return a Pandas Series of every file for chosen satellite data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are '' and 'ascii'.
If '' is specified, the primary data type (ascii) is loaded.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (NoneType)
User specified file format not supported. (default=None)
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files | entailment |
def load_files(files, tag=None, sat_id=None, altitude_bin=None):
'''Loads a list of COSMIC data files, supplied by user.
Returns a list of dicts, a dict for each file.
'''
output = [None]*len(files)
drop_idx = []
for (i,file) in enumerate(files):
try:
#data = netCDF4.Dataset(file)
data = netcdf_file(file, mode='r', mmap=False)
# build up dictionary will all ncattrs
new = {}
# get list of file attributes
#ncattrsList = data.ncattrs()
ncattrsList = data._attributes.keys()
for d in ncattrsList:
new[d] = data._attributes[d] #data.getncattr(d)
# load all of the variables in the netCDF
loadedVars={}
keys = data.variables.keys()
for key in keys:
if data.variables[key][:].dtype.byteorder != '=':
loadedVars[key] = data.variables[key][:].byteswap().newbyteorder()
else:
loadedVars[key] = data.variables[key][:]
new['profiles'] = pysat.DataFrame(loadedVars)
output[i] = new
data.close()
except RuntimeError:
# some of the files have zero bytes, which causes a read error
# this stores the index of these zero byte files so I can drop
# the Nones the gappy file leaves behind
drop_idx.append(i)
# drop anything that came from the zero byte files
drop_idx.reverse()
for i in drop_idx:
del output[i]
if tag == 'ionprf':
if altitude_bin is not None:
for out in output:
out['profiles'].index = (out['profiles']['MSL_alt']/altitude_bin).round().values*altitude_bin
out['profiles'] = out['profiles'].groupby(out['profiles'].index.values).mean()
else:
for out in output:
out['profiles'].index = out['profiles']['MSL_alt']
return output | Loads a list of COSMIC data files, supplied by user.
Returns a list of dicts, a dict for each file. | entailment |
def round_60(value):
""" round the number to the multiple of 60
Say a random value is represented by: 60 * n + r
n is an integer and r is an integer between 0 and 60.
if r < 30, the result is 60 * n.
otherwise, the result is 60 * (n + 1)
The use of this function is that the counter refreshment on
VNX is always 1 minute. So the delta time between samples of
counters must be the multiple of 60.
:param value: the value to be rounded.
:return: result
"""
t = 60
if value is not None:
r = value % t
if r > t / 2:
ret = value + (t - r)
else:
ret = value - r
else:
ret = NaN
return ret | round the number to the multiple of 60
Say a random value is represented by: 60 * n + r
n is an integer and r is an integer between 0 and 60.
if r < 30, the result is 60 * n.
otherwise, the result is 60 * (n + 1)
The use of this function is that the counter refreshment on
VNX is always 1 minute. So the delta time between samples of
counters must be the multiple of 60.
:param value: the value to be rounded.
:return: result | entailment |
def utilization(prev, curr, counters):
""" calculate the utilization
delta_busy = curr.busy - prev.busy
delta_idle = curr.idle - prev.idle
utilization = delta_busy / (delta_busy + delta_idle)
:param prev: previous resource
:param curr: current resource
:param counters: list of two, busy ticks and idle ticks
:return: value, NaN if invalid.
"""
busy_prop, idle_prop = counters
pb = getattr(prev, busy_prop)
pi = getattr(prev, idle_prop)
cb = getattr(curr, busy_prop)
ci = getattr(curr, idle_prop)
db = minus(cb, pb)
di = minus(ci, pi)
return mul(div(db, add(db, di)), 100) | calculate the utilization
delta_busy = curr.busy - prev.busy
delta_idle = curr.idle - prev.idle
utilization = delta_busy / (delta_busy + delta_idle)
:param prev: previous resource
:param curr: current resource
:param counters: list of two, busy ticks and idle ticks
:return: value, NaN if invalid. | entailment |
def delta_ps(prev, curr, counters):
""" calculate the delta per second of one counter
formula: (curr - prev) / delta_time
:param prev: previous resource
:param curr: current resource
:param counters: the counter to do delta and per second, one only
:return: value, NaN if invalid.
"""
counter = get_counter(counters)
pv = getattr(prev, counter)
cv = getattr(curr, counter)
return minus(cv, pv) | calculate the delta per second of one counter
formula: (curr - prev) / delta_time
:param prev: previous resource
:param curr: current resource
:param counters: the counter to do delta and per second, one only
:return: value, NaN if invalid. | entailment |
def io_size_kb(prev, curr, counters):
""" calculate the io size based on bandwidth and throughput
formula: average_io_size = bandwidth / throughput
:param prev: prev resource, not used
:param curr: current resource
:param counters: two stats, bandwidth in MB and throughput count
:return: value, NaN if invalid
"""
bw_stats, io_stats = counters
size_mb = div(getattr(curr, bw_stats), getattr(curr, io_stats))
return mul(size_mb, 1024) | calculate the io size based on bandwidth and throughput
formula: average_io_size = bandwidth / throughput
:param prev: prev resource, not used
:param curr: current resource
:param counters: two stats, bandwidth in MB and throughput count
:return: value, NaN if invalid | entailment |
def list_files(tag='', sat_id=None, data_path=None, format_str=None):
"""Return a Pandas Series of every file for chosen satellite data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are '' and 'ascii'.
If '' is specified, the primary data type (ascii) is loaded.
(default='')
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files
"""
if format_str is None and tag is not None:
if tag == '' or tag == 'ascii':
ascii_fmt = 'Density_3deg_{year:02d}_{doy:03d}.ascii'
return pysat.Files.from_os(data_path=data_path,
format_str=ascii_fmt)
else:
raise ValueError('Unrecognized tag name for CHAMP STAR')
elif format_str is None:
estr = 'A tag name must be passed to the loading routine for CHAMP'
raise ValueError(estr)
else:
return pysat.Files.from_os(data_path=data_path, format_str=format_str) | Return a Pandas Series of every file for chosen satellite data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are '' and 'ascii'.
If '' is specified, the primary data type (ascii) is loaded.
(default='')
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files | entailment |
def load(fnames, tag=None, sat_id=None):
"""Load CHAMP STAR files
Parameters
------------
fnames : (pandas.Series)
Series of filenames
tag : (str or NoneType)
tag or None (default=None)
sat_id : (str or NoneType)
satellite id or None (default=None)
Returns
---------
data : (pandas.DataFrame)
Object containing satellite data
meta : (pysat.Meta)
Object containing metadata such as column names and units
"""
import re
if len(fnames) <= 0:
return pysat.DataFrame(None), pysat.Meta(None)
if isinstance(fnames, str):
fnames = [fnames]
# Define the CHAMP STAR data types by column
champ_labels = { 'Two-digit Year (years)' : "year",
'Day of the Year (days)' : "doy",
'Second of the Day (GPS time,sec)' : "sod",
'Center Latitude of 3-degree Bin (deg)' : "bin_lat",
'Satellite Geodetic Latitude (deg)' : "sat_glat",
'Satellite Longitude (deg)' : "sat_lon",
'Satellite Height (km)' : "sat_h",
'Satellite Local Time (hours)' : "sat_lt",
'Satellite Quasi-Dipole Latitude (deg)' : "sat_qdlat",
'Satellite Magnetic Longitude (deg)' : "sat_mlon",
'Satellite Magnetic Local Time (hours)' : "sat_mlt",
'Neutral Density (kg/m^3)' : "ndens",
'Neutral Density Normalized to 400km using NRLMSISe00' :
"ndens400",
'Neutral Density Normalized to 410km using NRLMSISe00' :
"ndens410",
'NRLMSISe00 Neutral Density at Satellite Height' :
"nrlmsis_ndens",
'Uncertainty in Neutral Density (kg/m^3)' : "ndens_err",
'Number of Data Points in Current Averaging Bin' :
"npnts", \
'Number of Points in Current Averaging Bin that Required Interpolation' :
"npnts_interp", \
'Average Coefficient of Drag Used in Current Averaging Bin' :
"avg_drag_coeff", }
champ_dtypes = { 'year' : np.int32, 'doy' : np.int32, 'sod' : float,
'bin_lat' : float, 'sat_glat' : float, 'sat_lon' : float,
'sat_h' : float, 'sat_lt' : float, 'sat_qdlat' : float,
'sat_mlon' : float, 'sat_mlt' : float, 'ndens' : float,
'ndens400' : float, 'ndens410' : float,
'nrlmsis_ndens' : float, 'ndens_err' : float,
'npnts' : int, 'npnts_interp' : float,
'avg_drag_coeff' : float, }
champ_units = { 'year' : "2-digit years", 'doy' : "day of year",
'sod' : "seconds of day", 'bin_lat' : "degrees",
'sat_glat' : "degrees", 'sat_lon' : "degrees",
'sat_h' : "km", 'sat_lt' : "hours", 'sat_qdlat' : "degrees",
'sat_mlon' : "degrees", 'sat_mlt' : "hours",
'ndens' : "km m^{-3}", 'ndens400' : "km m^{-3}",
'ndens410' : "km m^{-3}", 'nrlmsis_ndens' : "km m^{-3}",
'ndens_err' : "km m^{-3}", 'npnts' : "number",
'npnts_interp' : "number", 'avg_drag_coeff' : "unitless", }
# Define the routine needed to create datetime object from the
# CHAMP time (YY DDD SSSSS)
def parse_champdate(y, d, s):
'''parse CHAMP date string (YY DDD SSSSS) into a datetime object
'''
import datetime as dt
t = dt.datetime.strptime("{:02d} {:03d}".format(int(y),int(d)), "%y %j")
fsec = float(s)
isec = np.floor(fsec)
microsec = (fsec - isec) * 1.0e6
t += dt.timedelta(seconds=isec, microseconds=microsec)
return(t)
# The header is formatted differently from the rest of the file, read it in
# first to obtain the necessary meta data
f = open(fnames[0], "r")
vdata = re.split( ";|\n", f.readline())
hdata = re.split( ";|\n", f.readline())
f.close()
try:
hdata.pop(hdata.index(''))
except:
pass
# If there are files, read in the data
data = pds.read_csv(fnames[0], delim_whitespace=True, skiprows=2,
header=None, names=[champ_labels[h] for h in hdata],
keep_date_col=True, index_col='datetime',
parse_dates={'datetime': [0,1,2]},
date_parser=parse_champdate)
# Initialize the meta data
meta = pysat.Meta()
# Because the native dtype declaration interferred with datetime indexing,
# define the data types here. Also set the meta data
for h in hdata:
col = champ_labels[h]
data[col].astype(champ_dtypes[col])
meta[col] = {"units":champ_units[col], "long_name":h}
# Return data frame and metadata object
return data, meta | Load CHAMP STAR files
Parameters
------------
fnames : (pandas.Series)
Series of filenames
tag : (str or NoneType)
tag or None (default=None)
sat_id : (str or NoneType)
satellite id or None (default=None)
Returns
---------
data : (pandas.DataFrame)
Object containing satellite data
meta : (pysat.Meta)
Object containing metadata such as column names and units | entailment |
def _assign_funcs(self, by_name=False, inst_module=None):
"""Assign all external science instrument methods to Instrument object.
"""
import importlib
# set defaults
self._list_rtn = self._pass_func
self._load_rtn = self._pass_func
self._default_rtn = self._pass_func
self._clean_rtn = self._pass_func
self._init_rtn = self._pass_func
self._download_rtn = self._pass_func
# default params
self.directory_format = None
self.file_format = None
self.multi_file_day = False
self.orbit_info = None
if by_name:
# look for code with filename name, any errors passed up
inst = importlib.import_module(''.join(('.', self.platform, '_',
self.name)),
package='pysat.instruments')
elif inst_module is not None:
# user supplied an object with relevant instrument routines
inst = inst_module
else:
# no module or name info, default pass functions assigned
return
try:
self._load_rtn = inst.load
self._list_rtn = inst.list_files
self._download_rtn = inst.download
except AttributeError:
estr = 'A load, file_list, and download routine are required for '
raise AttributeError('{:s}every instrument.'.format(estr))
try:
self._default_rtn = inst.default
except AttributeError:
pass
try:
self._init_rtn = inst.init
except AttributeError:
pass
try:
self._clean_rtn = inst.clean
except AttributeError:
pass
# look for instrument default parameters
try:
self.directory_format = inst.directory_format
except AttributeError:
pass
try:
self.multi_file_day = inst.multi_file_day
except AttributeError:
pass
try:
self.orbit_info = inst.orbit_info
except AttributeError:
pass
return | Assign all external science instrument methods to Instrument object. | entailment |
def _load_data(self, date=None, fid=None):
"""
Load data for an instrument on given date or fid, dependng upon input.
Parameters
------------
date : (dt.datetime.date object or NoneType)
file date
fid : (int or NoneType)
filename index value
Returns
--------
data : (pds.DataFrame)
pysat data
meta : (pysat.Meta)
pysat meta data
"""
if fid is not None:
# get filename based off of index value
fname = self.files[fid:fid+1]
elif date is not None:
fname = self.files[date: date+pds.DateOffset(days=1)]
else:
raise ValueError('Must supply either a date or file id number.')
if len(fname) > 0:
load_fname = [os.path.join(self.files.data_path, f) for f in fname]
data, mdata = self._load_rtn(load_fname, tag=self.tag,
sat_id=self.sat_id, **self.kwargs)
# ensure units and name are named consistently in new Meta
# object as specified by user upon Instrument instantiation
mdata.accept_default_labels(self)
else:
data = DataFrame(None)
mdata = _meta.Meta(units_label=self.units_label, name_label=self.name_label,
notes_label = self.notes_label, desc_label = self.desc_label,
plot_label = self.plot_label, axis_label = self.axis_label,
scale_label = self.scale_label, min_label = self.min_label,
max_label = self.max_label, fill_label=self.fill_label)
output_str = '{platform} {name} {tag} {sat_id}'
output_str = output_str.format(platform=self.platform,
name=self.name, tag=self.tag,
sat_id=self.sat_id)
if not data.empty:
if not isinstance(data, DataFrame):
raise TypeError(' '.join(('Data returned by instrument load',
'routine must be a pandas.DataFrame')))
if not isinstance(mdata, _meta.Meta):
raise TypeError('Metadata returned must be a pysat.Meta object')
if date is not None:
output_str = ' '.join(('Returning', output_str, 'data for',
date.strftime('%x')))
else:
if len(fname) == 1:
# this check was zero
output_str = ' '.join(('Returning', output_str, 'data from',
fname[0]))
else:
output_str = ' '.join(('Returning', output_str, 'data from',
fname[0], '::', fname[-1]))
else:
# no data signal
output_str = ' '.join(('No', output_str, 'data for',
date.strftime('%m/%d/%y')))
# remove extra spaces, if any
output_str = " ".join(output_str.split())
print (output_str)
return data, mdata | Load data for an instrument on given date or fid, dependng upon input.
Parameters
------------
date : (dt.datetime.date object or NoneType)
file date
fid : (int or NoneType)
filename index value
Returns
--------
data : (pds.DataFrame)
pysat data
meta : (pysat.Meta)
pysat meta data | entailment |
def _load_next(self):
"""Load the next days data (or file) without incrementing the date.
Repeated calls will not advance date/file and will produce the same data
Uses info stored in object to either increment the date,
or the file. Looks for self._load_by_date flag.
"""
if self._load_by_date:
next_date = self.date + pds.DateOffset(days=1)
return self._load_data(date=next_date)
else:
return self._load_data(fid=self._fid+1) | Load the next days data (or file) without incrementing the date.
Repeated calls will not advance date/file and will produce the same data
Uses info stored in object to either increment the date,
or the file. Looks for self._load_by_date flag. | entailment |
def _load_prev(self):
"""Load the next days data (or file) without decrementing the date.
Repeated calls will not decrement date/file and will produce the same
data
Uses info stored in object to either decrement the date,
or the file. Looks for self._load_by_date flag.
"""
if self._load_by_date:
prev_date = self.date - pds.DateOffset(days=1)
return self._load_data(date=prev_date)
else:
return self._load_data(fid=self._fid-1) | Load the next days data (or file) without decrementing the date.
Repeated calls will not decrement date/file and will produce the same
data
Uses info stored in object to either decrement the date,
or the file. Looks for self._load_by_date flag. | entailment |
def load(self, yr=None, doy=None, date=None, fname=None, fid=None,
verifyPad=False):
"""Load instrument data into Instrument object .data.
Parameters
----------
yr : integer
year for desired data
doy : integer
day of year
date : datetime object
date to load
fname : 'string'
filename to be loaded
verifyPad : boolean
if True, padding data not removed (debug purposes)
Returns
--------
Void. Data is added to self.data
Note
----
Loads data for a chosen instrument into .data. Any functions chosen
by the user and added to the custom processing queue (.custom.add)
are automatically applied to the data before it is available to
user in .data.
"""
# set options used by loading routine based upon user input
if date is not None:
self._set_load_parameters(date=date, fid=None)
# increment
inc = pds.DateOffset(days=1)
curr = date
elif (yr is not None) & (doy is not None):
date = pds.datetime(yr, 1, 1) + pds.DateOffset(days=(doy-1))
self._set_load_parameters(date=date, fid=None)
# increment
inc = pds.DateOffset(days=1)
curr = self.date
elif fname is not None:
# date will have to be set later by looking at the data
self._set_load_parameters(date=None,
fid=self.files.get_index(fname))
# increment one file at a time
inc = 1
curr = self._fid.copy()
elif fid is not None:
self._set_load_parameters(date=None, fid=fid)
# increment one file at a time
inc = 1
curr = fid
else:
estr = 'Must supply a yr,doy pair, or datetime object, or filename'
estr = '{:s} to load data from.'.format(estr)
raise TypeError(estr)
self.orbits._reset()
# if pad or multi_file_day is true, need to have a three day/file load
loop_pad = self.pad if self.pad is not None else pds.DateOffset(seconds=0)
if (self.pad is not None) | self.multi_file_day:
if self._next_data.empty & self._prev_data.empty:
# data has not already been loaded for previous and next days
# load data for all three
print('Initializing three day/file window')
# using current date or fid
self._prev_data, self._prev_meta = self._load_prev()
self._curr_data, self._curr_meta = \
self._load_data(date=self.date, fid=self._fid)
self._next_data, self._next_meta = self._load_next()
else:
# moving forward in time
if self._next_data_track == curr:
del self._prev_data
self._prev_data = self._curr_data
self._prev_meta = self._curr_meta
self._curr_data = self._next_data
self._curr_meta = self._next_meta
self._next_data, self._next_meta = self._load_next()
# moving backward in time
elif self._prev_data_track == curr:
del self._next_data
self._next_data = self._curr_data
self._next_meta = self._curr_meta
self._curr_data = self._prev_data
self._curr_meta = self._prev_meta
self._prev_data, self._prev_meta = self._load_prev()
# jumped in time/or switched from filebased to date based access
else:
del self._prev_data
del self._curr_data
del self._next_data
self._prev_data, self._prev_meta = self._load_prev()
self._curr_data, self._curr_meta = \
self._load_data(date=self.date, fid=self._fid)
self._next_data, self._next_meta = self._load_next()
# make sure datetime indices for all data is monotonic
if not self._prev_data.index.is_monotonic_increasing:
self._prev_data.sort_index(inplace=True)
if not self._curr_data.index.is_monotonic_increasing:
self._curr_data.sort_index(inplace=True)
if not self._next_data.index.is_monotonic_increasing:
self._next_data.sort_index(inplace=True)
# make tracking indexes consistent with new loads
self._next_data_track = curr + inc
self._prev_data_track = curr - inc
# attach data to object
if not self._curr_data.empty:
self.data = self._curr_data.copy()
self.meta = self._curr_meta.copy()
else:
self.data = DataFrame(None)
# line below removed as it would delete previous meta, if any
# if you end a seasonal analysis with a day with no data, then
# no meta: self.meta = _meta.Meta()
# multi file days can extend past a single day, only want data from
# specific date if loading by day
# set up times for the possible data padding coming up
if self._load_by_date:
#print ('double trouble')
first_time = self.date
first_pad = self.date - loop_pad
last_time = self.date + pds.DateOffset(days=1)
last_pad = self.date + pds.DateOffset(days=1) + loop_pad
want_last_pad = False
# loading by file, can't be a multi_file-day flag situation
elif (not self._load_by_date) and (not self.multi_file_day):
#print ('single trouble')
first_time = self._curr_data.index[0]
first_pad = first_time - loop_pad
last_time = self._curr_data.index[-1]
last_pad = last_time + loop_pad
want_last_pad = True
else:
raise ValueError("multi_file_day and loading by date are " +
"effectively equivalent. Can't have " +
"multi_file_day and load by file.")
#print (first_pad, first_time, last_time, last_pad)
# pad data based upon passed parameter
if (not self._prev_data.empty) & (not self.data.empty):
padLeft = self._prev_data.loc[first_pad : self.data.index[0]]
if len(padLeft) > 0:
if (padLeft.index[-1] == self.data.index[0]) :
padLeft = padLeft.iloc[:-1, :]
self.data = pds.concat([padLeft, self.data])
if (not self._next_data.empty) & (not self.data.empty):
padRight = self._next_data.loc[self.data.index[-1] : last_pad]
if len(padRight) > 0:
if (padRight.index[0] == self.data.index[-1]) :
padRight = padRight.iloc[1:, :]
self.data = pds.concat([self.data, padRight])
self.data = self.data.ix[first_pad : last_pad]
# want exclusive end slicing behavior from above
if not self.empty:
if (self.data.index[-1] == last_pad) & (not want_last_pad):
self.data = self.data.iloc[:-1, :]
## drop any possible duplicate index times
##self.data.drop_duplicates(inplace=True)
#self.data = self.data[~self.data.index.duplicated()]
# if self.pad is False, load single day
else:
self.data, meta = self._load_data(date=self.date, fid=self._fid)
if not self.data.empty:
self.meta = meta
# check if load routine actually returns meta
if self.meta.data.empty:
self.meta[self.data.columns] = {self.name_label: self.data.columns,
self.units_label: [''] *
len(self.data.columns)}
# if loading by file set the yr, doy, and date
if not self._load_by_date:
if self.pad is not None:
temp = first_time
else:
temp = self.data.index[0]
self.date = pds.datetime(temp.year, temp.month, temp.day)
self.yr, self.doy = utils.getyrdoy(self.date)
if not self.data.empty:
self._default_rtn(self)
# clean
if (not self.data.empty) & (self.clean_level != 'none'):
self._clean_rtn(self)
# apply custom functions
if not self.data.empty:
self.custom._apply_all(self)
# remove the excess padding, if any applied
if (self.pad is not None) & (not self.data.empty) & (not verifyPad):
self.data = self.data[first_time: last_time]
if not self.empty:
if (self.data.index[-1] == last_time) & (not want_last_pad):
self.data = self.data.iloc[:-1, :]
# transfer any extra attributes in meta to the Instrument object
self.meta.transfer_attributes_to_instrument(self)
sys.stdout.flush()
return | Load instrument data into Instrument object .data.
Parameters
----------
yr : integer
year for desired data
doy : integer
day of year
date : datetime object
date to load
fname : 'string'
filename to be loaded
verifyPad : boolean
if True, padding data not removed (debug purposes)
Returns
--------
Void. Data is added to self.data
Note
----
Loads data for a chosen instrument into .data. Any functions chosen
by the user and added to the custom processing queue (.custom.add)
are automatically applied to the data before it is available to
user in .data. | entailment |
def download(self, start, stop, freq='D', user=None, password=None,
**kwargs):
"""Download data for given Instrument object from start to stop.
Parameters
----------
start : pandas.datetime
start date to download data
stop : pandas.datetime
stop date to download data
freq : string
Stepsize between dates for season, 'D' for daily, 'M' monthly
(see pandas)
user : string
username, if required by instrument data archive
password : string
password, if required by instrument data archive
**kwargs : dict
Dictionary of keywords that may be options for specific instruments
Note
----
Data will be downloaded to pysat_data_dir/patform/name/tag
If Instrument bounds are set to defaults they are updated
after files are downloaded.
"""
import errno
# make sure directories are there, otherwise create them
try:
os.makedirs(self.files.data_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
print('Downloading data to: ', self.files.data_path)
date_array = utils.season_date_range(start, stop, freq=freq)
if user is None:
self._download_rtn(date_array,
tag=self.tag,
sat_id=self.sat_id,
data_path=self.files.data_path,
**kwargs)
else:
self._download_rtn(date_array,
tag=self.tag,
sat_id=self.sat_id,
data_path=self.files.data_path,
user=user,
password=password, **kwargs)
# get current file date range
first_date = self.files.start_date
last_date = self.files.stop_date
print('Updating pysat file list')
self.files.refresh()
# if instrument object has default bounds, update them
if len(self.bounds[0]) == 1:
if(self.bounds[0][0] == first_date and
self.bounds[1][0] == last_date):
print('Updating instrument object bounds.')
self.bounds = None | Download data for given Instrument object from start to stop.
Parameters
----------
start : pandas.datetime
start date to download data
stop : pandas.datetime
stop date to download data
freq : string
Stepsize between dates for season, 'D' for daily, 'M' monthly
(see pandas)
user : string
username, if required by instrument data archive
password : string
password, if required by instrument data archive
**kwargs : dict
Dictionary of keywords that may be options for specific instruments
Note
----
Data will be downloaded to pysat_data_dir/patform/name/tag
If Instrument bounds are set to defaults they are updated
after files are downloaded. | entailment |
def next(self, verifyPad=False):
"""Manually iterate through the data loaded in Instrument object.
Bounds of iteration and iteration type (day/file) are set by
`bounds` attribute.
Note
----
If there were no previous calls to load then the
first day(default)/file will be loaded.
"""
if self._iter_type == 'date':
if self.date is not None:
idx, = np.where(self._iter_list == self.date)
if (len(idx) == 0):
raise StopIteration('File list is empty. Nothing to be done.')
elif idx[-1]+1 >= len(self._iter_list):
raise StopIteration('Outside the set date boundaries.')
else:
idx += 1
self.load(date=self._iter_list[idx[0]], verifyPad=verifyPad)
else:
self.load(date=self._iter_list[0], verifyPad=verifyPad)
elif self._iter_type == 'file':
if self._fid is not None:
first = self.files.get_index(self._iter_list[0])
last = self.files.get_index(self._iter_list[-1])
if (self._fid < first) | (self._fid+1 > last):
raise StopIteration('Outside the set file boundaries.')
else:
self.load(fname=self._iter_list[self._fid+1-first],
verifyPad=verifyPad)
else:
self.load(fname=self._iter_list[0], verifyPad=verifyPad) | Manually iterate through the data loaded in Instrument object.
Bounds of iteration and iteration type (day/file) are set by
`bounds` attribute.
Note
----
If there were no previous calls to load then the
first day(default)/file will be loaded. | entailment |
def _get_var_type_code(self, coltype):
'''Determines the two-character type code for a given variable type
Parameters
----------
coltype : type or np.dtype
The type of the variable
Returns
-------
str
The variable type code for the given type'''
if type(coltype) is np.dtype:
var_type = coltype.kind + str(coltype.itemsize)
return var_type
else:
if coltype is np.int64:
return 'i8'
elif coltype is np.int32:
return 'i4'
elif coltype is np.int16:
return 'i2'
elif coltype is np.int8:
return 'i1'
elif coltype is np.uint64:
return 'u8'
elif coltype is np.uint32:
return 'u4'
elif coltype is np.uint16:
return 'u2'
elif coltype is np.uint8:
return 'u1'
elif coltype is np.float64:
return 'f8'
elif coltype is np.float32:
return 'f4'
elif issubclass(coltype, basestring):
return 'S1'
else:
raise TypeError('Unknown Variable Type' + str(coltype)) | Determines the two-character type code for a given variable type
Parameters
----------
coltype : type or np.dtype
The type of the variable
Returns
-------
str
The variable type code for the given type | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.