sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def calculate_clock_angle(inst):
""" Calculate IMF clock angle and magnitude of IMF in GSM Y-Z plane
Parameters
-----------
inst : pysat.Instrument
Instrument with OMNI HRO data
"""
# Calculate clock angle in degrees
clock_angle = np.degrees(np.arctan2(inst['BY_GSM'], inst['BZ_GSM']))
clock_angle[clock_angle < 0.0] += 360.0
inst['clock_angle'] = pds.Series(clock_angle, index=inst.data.index)
# Calculate magnitude of IMF in Y-Z plane
inst['BYZ_GSM'] = pds.Series(np.sqrt(inst['BY_GSM']**2 +
inst['BZ_GSM']**2),
index=inst.data.index)
return | Calculate IMF clock angle and magnitude of IMF in GSM Y-Z plane
Parameters
-----------
inst : pysat.Instrument
Instrument with OMNI HRO data | entailment |
def calculate_imf_steadiness(inst, steady_window=15, min_window_frac=0.75,
max_clock_angle_std=90.0/np.pi, max_bmag_cv=0.5):
""" Calculate IMF steadiness using clock angle standard deviation and
the coefficient of variation of the IMF magnitude in the GSM Y-Z plane
Parameters
-----------
inst : pysat.Instrument
Instrument with OMNI HRO data
steady_window : int
Window for calculating running statistical moments in min (default=15)
min_window_frac : float
Minimum fraction of points in a window for steadiness to be calculated
(default=0.75)
max_clock_angle_std : float
Maximum standard deviation of the clock angle in degrees (default=22.5)
max_bmag_cv : float
Maximum coefficient of variation of the IMF magnitude in the GSM
Y-Z plane (default=0.5)
"""
# We are not going to interpolate through missing values
sample_rate = int(inst.tag[0])
max_wnum = np.floor(steady_window / sample_rate)
if max_wnum != steady_window / sample_rate:
steady_window = max_wnum * sample_rate
print("WARNING: sample rate is not a factor of the statistical window")
print("new statistical window is {:.1f}".format(steady_window))
min_wnum = int(np.ceil(max_wnum * min_window_frac))
# Calculate the running coefficient of variation of the BYZ magnitude
byz_mean = inst['BYZ_GSM'].rolling(min_periods=min_wnum, center=True,
window=steady_window).mean()
byz_std = inst['BYZ_GSM'].rolling(min_periods=min_wnum, center=True,
window=steady_window).std()
inst['BYZ_CV'] = pds.Series(byz_std / byz_mean, index=inst.data.index)
# Calculate the running circular standard deviation of the clock angle
circ_kwargs = {'high':360.0, 'low':0.0}
ca = inst['clock_angle'][~np.isnan(inst['clock_angle'])]
ca_std = inst['clock_angle'].rolling(min_periods=min_wnum,
window=steady_window, \
center=True).apply(pysat.utils.nan_circstd, kwargs=circ_kwargs)
inst['clock_angle_std'] = pds.Series(ca_std, index=inst.data.index)
# Determine how long the clock angle and IMF magnitude are steady
imf_steady = np.zeros(shape=inst.data.index.shape)
steady = False
for i,cv in enumerate(inst.data['BYZ_CV']):
if steady:
del_min = int((inst.data.index[i] -
inst.data.index[i-1]).total_seconds() / 60.0)
if np.isnan(cv) or np.isnan(ca_std[i]) or del_min > sample_rate:
# Reset the steadiness flag if fill values are encountered, or
# if an entry is missing
steady = False
if cv <= max_bmag_cv and ca_std[i] <= max_clock_angle_std:
# Steadiness conditions have been met
if steady:
imf_steady[i] = imf_steady[i-1]
imf_steady[i] += sample_rate
steady = True
inst['IMF_Steady'] = pds.Series(imf_steady, index=inst.data.index)
return | Calculate IMF steadiness using clock angle standard deviation and
the coefficient of variation of the IMF magnitude in the GSM Y-Z plane
Parameters
-----------
inst : pysat.Instrument
Instrument with OMNI HRO data
steady_window : int
Window for calculating running statistical moments in min (default=15)
min_window_frac : float
Minimum fraction of points in a window for steadiness to be calculated
(default=0.75)
max_clock_angle_std : float
Maximum standard deviation of the clock angle in degrees (default=22.5)
max_bmag_cv : float
Maximum coefficient of variation of the IMF magnitude in the GSM
Y-Z plane (default=0.5) | entailment |
def calculate_dayside_reconnection(inst):
""" Calculate the dayside reconnection rate (Milan et al. 2014)
Parameters
-----------
inst : pysat.Instrument
Instrument with OMNI HRO data, requires BYZ_GSM and clock_angle
Notes
--------
recon_day = 3.8 Re (Vx / 4e5 m/s)^1/3 Vx B_yz (sin(theta/2))^9/2
"""
rearth = 6371008.8
sin_htheta = np.power(np.sin(np.radians(0.5 * inst['clock_angle'])), 4.5)
byz = inst['BYZ_GSM'] * 1.0e-9
vx = inst['flow_speed'] * 1000.0
recon_day = 3.8 * rearth * vx * byz * sin_htheta * np.power((vx / 4.0e5),
1.0/3.0)
inst['recon_day'] = pds.Series(recon_day, index=inst.data.index)
return | Calculate the dayside reconnection rate (Milan et al. 2014)
Parameters
-----------
inst : pysat.Instrument
Instrument with OMNI HRO data, requires BYZ_GSM and clock_angle
Notes
--------
recon_day = 3.8 Re (Vx / 4e5 m/s)^1/3 Vx B_yz (sin(theta/2))^9/2 | entailment |
def clear_access(self, white_list=None):
""" clear all ace entries of the share
:param white_list: list of username whose access entry won't be cleared
:return: sid list of ace entries removed successfully
"""
access_entries = self.get_ace_list()
sid_list = access_entries.sid_list
if white_list:
sid_white_list = [UnityAclUser.get_sid(self._cli,
user,
self.cifs_server.domain)
for user in white_list]
sid_list = list(set(sid_list) - set(sid_white_list))
resp = self.delete_ace(sid=sid_list)
resp.raise_if_err()
return sid_list | clear all ace entries of the share
:param white_list: list of username whose access entry won't be cleared
:return: sid list of ace entries removed successfully | entailment |
def delete_ace(self, domain=None, user=None, sid=None):
""" delete ACE for the share
delete ACE for the share. User could either supply the domain and
username or the sid of the user.
:param domain: domain of the user
:param user: username
:param sid: sid of the user or sid list of the user
:return: REST API response
"""
if sid is None:
if domain is None:
domain = self.cifs_server.domain
sid = UnityAclUser.get_sid(self._cli, user=user, domain=domain)
if isinstance(sid, six.string_types):
sid = [sid]
ace_list = [self._make_remove_ace_entry(s) for s in sid]
resp = self.action("setACEs", cifsShareACEs=ace_list)
resp.raise_if_err()
return resp | delete ACE for the share
delete ACE for the share. User could either supply the domain and
username or the sid of the user.
:param domain: domain of the user
:param user: username
:param sid: sid of the user or sid list of the user
:return: REST API response | entailment |
def luns(self):
"""Aggregator for ioclass_luns and ioclass_snapshots."""
lun_list, smp_list = [], []
if self.ioclass_luns:
lun_list = map(lambda l: VNXLun(lun_id=l.lun_id, name=l.name,
cli=self._cli), self.ioclass_luns)
if self.ioclass_snapshots:
smp_list = map(lambda smp: VNXLun(name=smp.name, cli=self._cli),
self.ioclass_snapshots)
return list(lun_list) + list(smp_list) | Aggregator for ioclass_luns and ioclass_snapshots. | entailment |
def policy(self):
"""Returns policy which contains this ioclass."""
policies = VNXIOPolicy.get(cli=self._cli)
ret = None
for policy in policies:
contained = policy.ioclasses.name
if self._get_name() in contained:
ret = VNXIOPolicy.get(name=policy.name, cli=self._cli)
break
return ret | Returns policy which contains this ioclass. | entailment |
def modify(self, new_name=None, iotype=None, lun_ids=None, smp_names=None,
ctrlmethod=None, minsize=None, maxsize=None):
"""Overwrite the current properties for a VNX ioclass.
:param new_name: new name for the ioclass
:param iotype: can be 'rw', 'r' or 'w'
:param lun_ids: list of LUN IDs
:param smp_names: list of Snapshot Mount Point names
:param ctrlmethod: the new CtrlMethod
:param minsize: minimal size in kb
:param maxsize: maximum size in kb
"""
if not any([new_name, iotype, lun_ids, smp_names, ctrlmethod]):
raise ValueError('Cannot apply modification, please specify '
'parameters to modify.')
def _do_modify():
out = self._cli.modify_ioclass(
self._get_name(), new_name, iotype, lun_ids, smp_names,
ctrlmethod, minsize, maxsize)
ex.raise_if_err(out, default=ex.VNXIOClassError)
try:
_do_modify()
except ex.VNXIOCLassRunningError:
with restart_policy(self.policy):
_do_modify()
return VNXIOClass(new_name if new_name else self._get_name(),
self._cli) | Overwrite the current properties for a VNX ioclass.
:param new_name: new name for the ioclass
:param iotype: can be 'rw', 'r' or 'w'
:param lun_ids: list of LUN IDs
:param smp_names: list of Snapshot Mount Point names
:param ctrlmethod: the new CtrlMethod
:param minsize: minimal size in kb
:param maxsize: maximum size in kb | entailment |
def add_lun(self, luns):
"""A wrapper for modify method.
.. note:: This API only append luns to existing luns.
"""
curr_lun_ids, curr_smp_names = self._get_current_names()
luns = normalize_lun(luns, self._cli)
new_ids, new_smps = convert_lun(luns)
if new_ids:
curr_lun_ids.extend(new_ids)
if new_smps:
curr_smp_names.extend(new_smps)
return self.modify(lun_ids=curr_lun_ids, smp_names=curr_smp_names) | A wrapper for modify method.
.. note:: This API only append luns to existing luns. | entailment |
def add_class(self, ioclass):
"""Add one VNXIOClass instance to policy.
.. note: due to the limitation of VNX, need to stop the policy first.
"""
current_ioclasses = self.ioclasses
if ioclass.name in current_ioclasses.name:
return
current_ioclasses.append(ioclass)
self.modify(new_ioclasses=current_ioclasses) | Add one VNXIOClass instance to policy.
.. note: due to the limitation of VNX, need to stop the policy first. | entailment |
def remove_class(self, ioclass):
"""Remove VNXIOClass instance from policy."""
current_ioclasses = self.ioclasses
new_ioclasses = filter(lambda x: x.name != ioclass.name,
current_ioclasses)
self.modify(new_ioclasses=new_ioclasses) | Remove VNXIOClass instance from policy. | entailment |
def replace_lun(self, *lun_list):
"""Replaces the exiting LUNs to lun_list."""
lun_add = self._prepare_luns_add(lun_list)
lun_remove = self._prepare_luns_remove(lun_list, False)
return self.modify(lun_add=lun_add, lun_remove=lun_remove) | Replaces the exiting LUNs to lun_list. | entailment |
def update_lun(self, add_luns=None, remove_luns=None):
"""Updates the LUNs in CG, adding the ones in `add_luns` and removing
the ones in `remove_luns`"""
if not add_luns and not remove_luns:
log.debug("Empty add_luns and remove_luns passed in, "
"skip update_lun.")
return RESP_OK
lun_add = self._prepare_luns_add(add_luns)
lun_remove = self._prepare_luns_remove(remove_luns, True)
return self.modify(lun_add=lun_add, lun_remove=lun_remove) | Updates the LUNs in CG, adding the ones in `add_luns` and removing
the ones in `remove_luns` | entailment |
def clean(inst):
"""Routine to return FPMU data cleaned to the specified level
Parameters
-----------
inst : (pysat.Instrument)
Instrument class object, whose attribute clean_level is used to return
the desired level of data selectivity.
Returns
--------
Void : (NoneType)
data in inst is modified in-place.
Notes
--------
No cleaning currently available for FPMU
"""
inst.data.replace(-999., np.nan, inplace=True) # Te
inst.data.replace(-9.9999998e+30, np.nan, inplace=True) #Ni
return None | Routine to return FPMU data cleaned to the specified level
Parameters
-----------
inst : (pysat.Instrument)
Instrument class object, whose attribute clean_level is used to return
the desired level of data selectivity.
Returns
--------
Void : (NoneType)
data in inst is modified in-place.
Notes
--------
No cleaning currently available for FPMU | entailment |
def _attach_files(self, files_info):
"""Attaches info returned by instrument list_files routine to
Instrument object.
"""
if not files_info.empty:
if (len(files_info.index.unique()) != len(files_info)):
estr = 'WARNING! Duplicate datetimes in provided file '
estr = '{:s}information.\nKeeping one of each '.format(estr)
estr = '{:s}of the duplicates, dropping the rest.'.format(estr)
print(estr)
print(files_info.index.get_duplicates())
idx = np.unique(files_info.index, return_index=True)
files_info = files_info.ix[idx[1]]
#raise ValueError('List of files must have unique datetimes.')
self.files = files_info.sort_index()
date = files_info.index[0]
self.start_date = pds.datetime(date.year, date.month, date.day)
date = files_info.index[-1]
self.stop_date = pds.datetime(date.year, date.month, date.day)
else:
self.start_date = None
self.stop_date = None
# convert to object type
# necessary if Series is empty, enables == checks with strings
self.files = files_info.astype(np.dtype('O')) | Attaches info returned by instrument list_files routine to
Instrument object. | entailment |
def _store(self):
"""Store currently loaded filelist for instrument onto filesystem"""
name = self.stored_file_name
# check if current file data is different than stored file list
# if so, move file list to previous file list, store current to file
# if not, do nothing
stored_files = self._load()
if len(stored_files) != len(self.files):
# # of items is different, things are new
new_flag = True
elif len(stored_files) == len(self.files):
# # of items equal, check specifically for equality
if stored_files.eq(self.files).all():
new_flag = False
else:
# not equal, there are new files
new_flag = True
if new_flag:
if self.write_to_disk:
stored_files.to_csv(os.path.join(self.home_path,
'previous_'+name),
date_format='%Y-%m-%d %H:%M:%S.%f')
self.files.to_csv(os.path.join(self.home_path, name),
date_format='%Y-%m-%d %H:%M:%S.%f')
else:
self._previous_file_list = stored_files
self._current_file_list = self.files.copy()
return | Store currently loaded filelist for instrument onto filesystem | entailment |
def _load(self, prev_version=False):
"""Load stored filelist and return as Pandas Series
Parameters
----------
prev_version : boolean
if True, will load previous version of file list
Returns
-------
pandas.Series
Full path file names are indexed by datetime
Series is empty if there is no file list to load
"""
fname = self.stored_file_name
if prev_version:
fname = os.path.join(self.home_path, 'previous_'+fname)
else:
fname = os.path.join(self.home_path, fname)
if os.path.isfile(fname) and (os.path.getsize(fname) > 0):
if self.write_to_disk:
return pds.read_csv(fname, index_col=0, parse_dates=True,
squeeze=True, header=None)
else:
# grab files from memory
if prev_version:
return self._previous_file_list
else:
return self._current_file_list
else:
return pds.Series([], dtype='a') | Load stored filelist and return as Pandas Series
Parameters
----------
prev_version : boolean
if True, will load previous version of file list
Returns
-------
pandas.Series
Full path file names are indexed by datetime
Series is empty if there is no file list to load | entailment |
def refresh(self):
"""Update list of files, if there are changes.
Calls underlying list_rtn for the particular science instrument.
Typically, these routines search in the pysat provided path,
pysat_data_dir/platform/name/tag/,
where pysat_data_dir is set by pysat.utils.set_data_dir(path=path).
"""
output_str = '{platform} {name} {tag} {sat_id}'
output_str = output_str.format(platform=self._sat.platform,
name=self._sat.name, tag=self._sat.tag,
sat_id=self._sat.sat_id)
output_str = " ".join(("pysat is searching for", output_str, "files."))
output_str = " ".join(output_str.split())
print (output_str)
info = self._sat._list_rtn(tag=self._sat.tag, sat_id=self._sat.sat_id,
data_path=self.data_path,
format_str=self.file_format)
if not info.empty:
print('Found {ll:d} of them.'.format(ll=len(info)))
else:
estr = "Unable to find any files that match the supplied template. "
estr += "If you have the necessary files please check pysat "
estr += "settings and file locations (e.g. pysat.pysat_dir)."
print(estr)
info = self._remove_data_dir_path(info)
self._attach_files(info)
self._store() | Update list of files, if there are changes.
Calls underlying list_rtn for the particular science instrument.
Typically, these routines search in the pysat provided path,
pysat_data_dir/platform/name/tag/,
where pysat_data_dir is set by pysat.utils.set_data_dir(path=path). | entailment |
def get_new(self):
"""List new files since last recorded file state.
pysat stores filenames in the user_home/.pysat directory. Returns
a list of all new fileanmes since the last known change to files.
Filenames are stored if there is a change and either update_files
is True at instrument object level or files.refresh() is called.
Returns
-------
pandas.Series
files are indexed by datetime
"""
# refresh files
self.refresh()
# current files
new_info = self._load()
# previous set of files
old_info = self._load(prev_version=True)
new_files = new_info[-new_info.isin(old_info)]
return new_files | List new files since last recorded file state.
pysat stores filenames in the user_home/.pysat directory. Returns
a list of all new fileanmes since the last known change to files.
Filenames are stored if there is a change and either update_files
is True at instrument object level or files.refresh() is called.
Returns
-------
pandas.Series
files are indexed by datetime | entailment |
def get_index(self, fname):
"""Return index for a given filename.
Parameters
----------
fname : string
filename
Note
----
If fname not found in the file information already attached
to the instrument.files instance, then a files.refresh() call
is made.
"""
idx, = np.where(fname == self.files)
if len(idx) == 0:
# filename not in index, try reloading files from disk
self.refresh()
#print("DEBUG get_index:", fname, self.files)
idx, = np.where(fname == np.array(self.files))
if len(idx) == 0:
raise ValueError('Could not find "' + fname +
'" in available file list. Valid Example: ' +
self.files.iloc[0])
# return a scalar rather than array - otherwise introduces array to
# index warnings.
return idx[0] | Return index for a given filename.
Parameters
----------
fname : string
filename
Note
----
If fname not found in the file information already attached
to the instrument.files instance, then a files.refresh() call
is made. | entailment |
def get_file_array(self, start, end):
"""Return a list of filenames between and including start and end.
Parameters
----------
start: array_like or single string
filenames for start of returned filelist
stop: array_like or single string
filenames inclusive end of list
Returns
-------
list of filenames between and including start and end over all
intervals.
"""
if hasattr(start, '__iter__') & hasattr(end, '__iter__'):
files = []
for (sta,stp) in zip(start, end):
id1 = self.get_index(sta)
id2 = self.get_index(stp)
files.extend(self.files.iloc[id1 : id2+1])
elif hasattr(start, '__iter__') | hasattr(end, '__iter__'):
estr = 'Either both or none of the inputs need to be iterable'
raise ValueError(estr)
else:
id1 = self.get_index(start)
id2 = self.get_index(end)
files = self.files[id1:id2+1].to_list()
return files | Return a list of filenames between and including start and end.
Parameters
----------
start: array_like or single string
filenames for start of returned filelist
stop: array_like or single string
filenames inclusive end of list
Returns
-------
list of filenames between and including start and end over all
intervals. | entailment |
def _remove_data_dir_path(self, inp=None):
# import string
"""Remove the data directory path from filenames"""
# need to add a check in here to make sure data_dir path is actually in
# the filename
if inp is not None:
split_str = os.path.join(self.data_path, '')
return inp.apply(lambda x: x.split(split_str)[-1]) | Remove the data directory path from filenames | entailment |
def from_os(cls, data_path=None, format_str=None,
two_digit_year_break=None):
"""
Produces a list of files and and formats it for Files class.
Requires fixed_width filename
Parameters
----------
data_path : string
Top level directory to search files for. This directory
is provided by pysat to the instrument_module.list_files
functions as data_path.
format_str : string with python format codes
Provides the naming pattern of the instrument files and the
locations of date information so an ordered list may be produced.
Supports 'year', 'month', 'day', 'hour', 'min', 'sec', 'version',
and 'revision'
Ex: 'cnofs_cindi_ivm_500ms_{year:4d}{month:02d}{day:02d}_v01.cdf'
two_digit_year_break : int
If filenames only store two digits for the year, then
'1900' will be added for years >= two_digit_year_break
and '2000' will be added for years < two_digit_year_break.
Note
----
Does not produce a Files instance, but the proper output
from instrument_module.list_files method.
The '?' may be used to indicate a set number of spaces for a variable
part of the name that need not be extracted.
'cnofs_cindi_ivm_500ms_{year:4d}{month:02d}{day:02d}_v??.cdf'
"""
import collections
from pysat.utils import create_datetime_index
if format_str is None:
raise ValueError("Must supply a filename template (format_str).")
if data_path is None:
raise ValueError("Must supply instrument directory path (dir_path)")
# parse format string to figure out the search string to use
# to identify files in the filesystem
search_str = ''
form = string.Formatter()
# stores the keywords extracted from format_string
keys = []
#, and length of string
snips = []
length = []
stored = collections.OrderedDict()
stored['year'] = []; stored['month'] = []; stored['day'] = [];
stored['hour'] = []; stored['min'] = []; stored['sec'] = [];
stored['version'] = []; stored['revision'] = [];
for snip in form.parse(format_str):
# collect all of the format keywords
# replace them in the string with the '*' wildcard
# then try and get width from format keywords so we know
# later on where to parse information out from
search_str += snip[0]
snips.append(snip[0])
if snip[1] is not None:
keys.append(snip[1])
search_str += '*'
# try and determine formatting width
temp = re.findall(r'\d+', snip[2])
if temp:
# there are items, try and grab width
for i in temp:
if i != 0:
length.append(int(i))
break
else:
raise ValueError("Couldn't determine formatting width")
abs_search_str = os.path.join(data_path, search_str)
files = glob.glob(abs_search_str)
# we have a list of files, now we need to extract the date information
# code below works, but only if the size of file string
# remains unchanged
# determine the locations the date information in a filename is stored
# use these indices to slice out date from loaded filenames
# test_str = format_str.format(**periods)
if len(files) > 0:
idx = 0
begin_key = []
end_key = []
for i,snip in enumerate(snips):
idx += len(snip)
if i < (len(length)):
begin_key.append(idx)
idx += length[i]
end_key.append(idx)
max_len = idx
# setting up negative indexing to pick out filenames
key_str_idx = [np.array(begin_key, dtype=int) - max_len,
np.array(end_key, dtype=int) - max_len]
# need to parse out dates for datetime index
for i,temp in enumerate(files):
for j,key in enumerate(keys):
val = temp[key_str_idx[0][j]:key_str_idx[1][j]]
stored[key].append(val)
# convert to numpy arrays
for key in stored.keys():
stored[key] = np.array(stored[key]).astype(int)
if len(stored[key]) == 0:
stored[key]=None
# deal with the possibility of two digit years
# years above or equal to break are considered to be 1900+
# years below break are considered to be 2000+
if two_digit_year_break is not None:
idx, = np.where(np.array(stored['year']) >=
two_digit_year_break)
stored['year'][idx] = stored['year'][idx] + 1900
idx, = np.where(np.array(stored['year']) < two_digit_year_break)
stored['year'][idx] = stored['year'][idx] + 2000
# need to sort the information for things to work
rec_arr = [stored[key] for key in keys]
rec_arr.append(files)
# sort all arrays
val_keys = keys + ['files']
rec_arr = np.rec.fromarrays(rec_arr, names=val_keys)
rec_arr.sort(order=val_keys, axis=0)
# pull out sorted info
for key in keys:
stored[key] = rec_arr[key]
files = rec_arr['files']
# add hour and minute information to 'sec'
if stored['sec'] is None:
stored['sec'] = np.zeros(len(files))
if stored['hour'] is not None:
stored['sec'] += 3600 * stored['hour']
if stored['min'] is not None:
stored['sec'] += 60 * stored['min']
# if stored['version'] is None:
# stored['version'] = np.zeros(len(files))
if stored['revision'] is None:
stored['revision'] = np.zeros(len(files))
index = create_datetime_index(year=stored['year'],
month=stored['month'],
day=stored['day'], uts=stored['sec'])
# if version and revision are supplied
# use these parameters to weed out files that have been replaced
# with updated versions
# first, check for duplicate index times
dups = index.get_duplicates()
if (len(dups) > 0) and (stored['version'] is not None):
# we have duplicates
# keep the highest version/revision combo
version = pds.Series(stored['version'], index=index)
revision = pds.Series(stored['revision'], index=index)
revive = version*100000. + revision
frame = pds.DataFrame({'files':files, 'revive':revive,
'time':index}, index=index)
frame = frame.sort_values(by=['time', 'revive'],
ascending=[True, False])
frame = frame.drop_duplicates(subset='time', keep='first')
return frame['files']
else:
return pds.Series(files, index=index)
else:
return pds.Series(None) | Produces a list of files and and formats it for Files class.
Requires fixed_width filename
Parameters
----------
data_path : string
Top level directory to search files for. This directory
is provided by pysat to the instrument_module.list_files
functions as data_path.
format_str : string with python format codes
Provides the naming pattern of the instrument files and the
locations of date information so an ordered list may be produced.
Supports 'year', 'month', 'day', 'hour', 'min', 'sec', 'version',
and 'revision'
Ex: 'cnofs_cindi_ivm_500ms_{year:4d}{month:02d}{day:02d}_v01.cdf'
two_digit_year_break : int
If filenames only store two digits for the year, then
'1900' will be added for years >= two_digit_year_break
and '2000' will be added for years < two_digit_year_break.
Note
----
Does not produce a Files instance, but the proper output
from instrument_module.list_files method.
The '?' may be used to indicate a set number of spaces for a variable
part of the name that need not be extracted.
'cnofs_cindi_ivm_500ms_{year:4d}{month:02d}{day:02d}_v??.cdf' | entailment |
def merge(self, other):
"""Adds metadata variables to self that are in other but not in self.
Parameters
----------
other : pysat.Meta
"""
for key in other.keys():
if key not in self:
# copies over both lower and higher dimensional data
self[key] = other[key] | Adds metadata variables to self that are in other but not in self.
Parameters
----------
other : pysat.Meta | entailment |
def drop(self, names):
"""Drops variables (names) from metadata."""
# drop lower dimension data
self._data = self._data.drop(names, axis=0)
# drop higher dimension data
for name in names:
if name in self._ho_data:
_ = self._ho_data.pop(name) | Drops variables (names) from metadata. | entailment |
def keep(self, keep_names):
"""Keeps variables (keep_names) while dropping other parameters"""
current_names = self._data.columns
drop_names = []
for name in current_names:
if name not in keep_names:
drop_names.append(name)
self.drop(drop_names) | Keeps variables (keep_names) while dropping other parameters | entailment |
def apply_default_labels(self, other):
"""Applies labels for default meta labels from self onto other.
Parameters
----------
other : Meta
Meta object to have default labels applied
Returns
-------
Meta
"""
other_updated = other.copy()
other_updated.units_label = self.units_label
other_updated.name_label = self.name_label
other_updated.notes_label = self.notes_label
other_updated.desc_label = self.desc_label
other_updated.plot_label = self.plot_label
other_updated.axis_label = self.axis_label
other_updated.scale_label = self.scale_label
other_updated.min_label = self.min_label
other_updated.max_label = self.max_label
other_updated.fill_label = self.fill_label
return other | Applies labels for default meta labels from self onto other.
Parameters
----------
other : Meta
Meta object to have default labels applied
Returns
-------
Meta | entailment |
def accept_default_labels(self, other):
"""Applies labels for default meta labels from other onto self.
Parameters
----------
other : Meta
Meta object to take default labels from
Returns
-------
Meta
"""
self.units_label = other.units_label
self.name_label = other.name_label
self.notes_label = other.notes_label
self.desc_label = other.desc_label
self.plot_label = other.plot_label
self.axis_label = other.axis_label
self.scale_label = other.scale_label
self.min_label = other.min_label
self.max_label = other.max_label
self.fill_label = other.fill_label
return | Applies labels for default meta labels from other onto self.
Parameters
----------
other : Meta
Meta object to take default labels from
Returns
-------
Meta | entailment |
def _label_setter(self, new_label, current_label, attr_label, default=np.NaN, use_names_default=False):
"""Generalized setter of default meta attributes
Parameters
----------
new_label : str
New label to use in the Meta object
current_label : str
The hidden attribute to be updated that actually stores metadata
default :
Deafult setting to use for label if there is no attribute
value
use_names_default : bool
if True, MetaData variable names are used as the default
value for the specified Meta attributes settings
Examples
--------
:
@name_label.setter
def name_label(self, new_label):
self._label_setter(new_label, self._name_label,
use_names_default=True)
Notes
-----
Not intended for end user
"""
if new_label not in self.attrs():
# new label not in metadata, including case
# update existing label, if present
if current_label in self.attrs():
# old label exists and has expected case
self.data.loc[:, new_label] = self.data.loc[:, current_label]
self.data.drop(current_label, axis=1, inplace=True)
else:
if self.has_attr(current_label):
# there is something like label, wrong case though
current_label = self.attr_case_name(current_label)
self.data.loc[:, new_label] = self.data.loc[:, current_label]
self.data.drop(current_label, axis=1, inplace=True)
else:
# there is no existing label
# setting for the first time
if use_names_default:
self.data[new_label] = self.data.index
else:
self.data[new_label] = default
# check higher order structures as well
# recursively change labels here
for key in self.keys_nD():
setattr(self.ho_data[key], attr_label, new_label)
# now update 'hidden' attribute value
# current_label = new_label
setattr(self, ''.join(('_',attr_label)), new_label) | Generalized setter of default meta attributes
Parameters
----------
new_label : str
New label to use in the Meta object
current_label : str
The hidden attribute to be updated that actually stores metadata
default :
Deafult setting to use for label if there is no attribute
value
use_names_default : bool
if True, MetaData variable names are used as the default
value for the specified Meta attributes settings
Examples
--------
:
@name_label.setter
def name_label(self, new_label):
self._label_setter(new_label, self._name_label,
use_names_default=True)
Notes
-----
Not intended for end user | entailment |
def var_case_name(self, name):
"""Provides stored name (case preserved) for case insensitive input
If name is not found (case-insensitive check) then name is returned,
as input. This function is intended to be used to help ensure the
case of a given variable name is the same across the Meta object.
Parameters
----------
name : str
variable name in any case
Returns
-------
str
string with case preserved as in metaobject
"""
lower_name = name.lower()
if name in self:
for i in self.keys():
if lower_name == i.lower():
return i
for i in self.keys_nD():
if lower_name == i.lower():
return i
return name | Provides stored name (case preserved) for case insensitive input
If name is not found (case-insensitive check) then name is returned,
as input. This function is intended to be used to help ensure the
case of a given variable name is the same across the Meta object.
Parameters
----------
name : str
variable name in any case
Returns
-------
str
string with case preserved as in metaobject | entailment |
def has_attr(self, name):
"""Returns boolean indicating presence of given attribute name
Case-insensitive check
Notes
-----
Does not check higher order meta objects
Parameters
----------
name : str
name of variable to get stored case form
Returns
-------
bool
True if case-insesitive check for attribute name is True
"""
if name.lower() in [i.lower() for i in self.data.columns]:
return True
return False | Returns boolean indicating presence of given attribute name
Case-insensitive check
Notes
-----
Does not check higher order meta objects
Parameters
----------
name : str
name of variable to get stored case form
Returns
-------
bool
True if case-insesitive check for attribute name is True | entailment |
def attr_case_name(self, name):
"""Returns preserved case name for case insensitive value of name.
Checks first within standard attributes. If not found there, checks
attributes for higher order data structures. If not found, returns
supplied name as it is available for use. Intended to be used to help
ensure that the same case is applied to all repetitions of a given
variable name.
Parameters
----------
name : str
name of variable to get stored case form
Returns
-------
str
name in proper case
"""
lower_name = name.lower()
for i in self.attrs():
if lower_name == i.lower():
return i
# check if attribute present in higher order structures
for key in self.keys_nD():
for i in self[key].children.attrs():
if lower_name == i.lower():
return i
# nothing was found if still here
# pass name back, free to be whatever
return name | Returns preserved case name for case insensitive value of name.
Checks first within standard attributes. If not found there, checks
attributes for higher order data structures. If not found, returns
supplied name as it is available for use. Intended to be used to help
ensure that the same case is applied to all repetitions of a given
variable name.
Parameters
----------
name : str
name of variable to get stored case form
Returns
-------
str
name in proper case | entailment |
def concat(self, other, strict=False):
"""Concats two metadata objects together.
Parameters
----------
other : Meta
Meta object to be concatenated
strict : bool
if True, ensure there are no duplicate variable names
Notes
-----
Uses units and name label of self if other is different
Returns
-------
Meta
Concatenated object
"""
mdata = self.copy()
# checks
if strict:
for key in other.keys():
if key in mdata:
raise RuntimeError('Duplicated keys (variable names) ' +
'across Meta objects in keys().')
for key in other.keys_nD():
if key in mdata:
raise RuntimeError('Duplicated keys (variable names) across '
'Meta objects in keys_nD().')
# make sure labels between the two objects are the same
other_updated = self.apply_default_labels(other)
# concat 1D metadata in data frames to copy of
# current metadata
# <<<<<<< ho_meta_fix
for key in other_updated.keys():
mdata.data.loc[key] = other.data.loc[key]
# add together higher order data
for key in other_updated.keys_nD():
mdata.ho_data[key] = other.ho_data[key]
# =======
# for key in other_updated.keys():
# mdata[key] = other_updated[key]
# # add together higher order data
# for key in other_updated.keys_nD():
# mdata[key] = other_updated[key]
return mdata | Concats two metadata objects together.
Parameters
----------
other : Meta
Meta object to be concatenated
strict : bool
if True, ensure there are no duplicate variable names
Notes
-----
Uses units and name label of self if other is different
Returns
-------
Meta
Concatenated object | entailment |
def pop(self, name):
"""Remove and return metadata about variable
Parameters
----------
name : str
variable name
Returns
-------
pandas.Series
Series of metadata for variable
"""
# check if present
if name in self:
# get case preserved name for variable
new_name = self.var_case_name(name)
# check if 1D or nD
if new_name in self.keys():
output = self[new_name]
self.data.drop(new_name, inplace=True, axis=0)
else:
output = self.ho_data.pop(new_name)
return output
else:
raise KeyError('Key not present in metadata variables') | Remove and return metadata about variable
Parameters
----------
name : str
variable name
Returns
-------
pandas.Series
Series of metadata for variable | entailment |
def transfer_attributes_to_instrument(self, inst, strict_names=False):
"""Transfer non-standard attributes in Meta to Instrument object.
Pysat's load_netCDF and similar routines are only able to attach
netCDF4 attributes to a Meta object. This routine identifies these
attributes and removes them from the Meta object. Intent is to
support simple transfers to the pysat.Instrument object.
Will not transfer names that conflict with pysat default attributes.
Parameters
----------
inst : pysat.Instrument
Instrument object to transfer attributes to
strict_names : boolean (False)
If True, produces an error if the Instrument object already
has an attribute with the same name to be copied.
Returns
-------
None
pysat.Instrument object modified in place with new attributes
"""
# base Instrument attributes
banned = inst._base_attr
# get base attribute set, and attributes attached to instance
base_attrb = self._base_attr
this_attrb = dir(self)
# collect these attributes into a dict
adict = {}
transfer_key = []
for key in this_attrb:
if key not in banned:
if key not in base_attrb:
# don't store _ leading attributes
if key[0] != '_':
adict[key] = self.__getattribute__(key)
transfer_key.append(key)
# store any non-standard attributes in Instrument
# get list of instrument objects attributes first
# to check if a duplicate
inst_attr = dir(inst)
for key in transfer_key:
if key not in banned:
if key not in inst_attr:
inst.__setattr__(key, adict[key])
else:
if not strict_names:
# new_name = 'pysat_attr_'+key
inst.__setattr__(key, adict[key])
else:
raise RuntimeError('Attribute ' + key +
'attached to Meta object can not be '
+ 'transferred as it already exists'
+ ' in the Instrument object.') | Transfer non-standard attributes in Meta to Instrument object.
Pysat's load_netCDF and similar routines are only able to attach
netCDF4 attributes to a Meta object. This routine identifies these
attributes and removes them from the Meta object. Intent is to
support simple transfers to the pysat.Instrument object.
Will not transfer names that conflict with pysat default attributes.
Parameters
----------
inst : pysat.Instrument
Instrument object to transfer attributes to
strict_names : boolean (False)
If True, produces an error if the Instrument object already
has an attribute with the same name to be copied.
Returns
-------
None
pysat.Instrument object modified in place with new attributes | entailment |
def from_csv(cls, name=None, col_names=None, sep=None, **kwargs):
"""Create instrument metadata object from csv.
Parameters
----------
name : string
absolute filename for csv file or name of file
stored in pandas instruments location
col_names : list-like collection of strings
column names in csv and resultant meta object
sep : string
column seperator for supplied csv filename
Note
----
column names must include at least ['name', 'long_name', 'units'],
assumed if col_names is None.
"""
import pysat
req_names = ['name','long_name','units']
if col_names is None:
col_names = req_names
elif not all([i in col_names for i in req_names]):
raise ValueError('col_names must include name, long_name, units.')
if sep is None:
sep = ','
if name is None:
raise ValueError('Must supply an instrument name or file path.')
elif not isinstance(name, str):
raise ValueError('keyword name must be related to a string')
elif not os.path.isfile(name):
# Not a real file, assume input is a pysat instrument name
# and look in the standard pysat location.
test = os.path.join(pysat.__path__[0],'instruments',name)
if os.path.isfile(test):
name = test
else:
#trying to form an absolute path for success
test = os.path.abspath(name)
if not os.path.isfile(test):
raise ValueError("Unable to create valid file path.")
else:
#success
name = test
mdata = pds.read_csv(name, names=col_names, sep=sep, **kwargs)
if not mdata.empty:
# make sure the data name is the index
mdata.index = mdata['name']
del mdata['name']
return cls(metadata=mdata)
else:
raise ValueError('Unable to retrieve information from ' + name) | Create instrument metadata object from csv.
Parameters
----------
name : string
absolute filename for csv file or name of file
stored in pandas instruments location
col_names : list-like collection of strings
column names in csv and resultant meta object
sep : string
column seperator for supplied csv filename
Note
----
column names must include at least ['name', 'long_name', 'units'],
assumed if col_names is None. | entailment |
def clean(self):
"""Routine to return C/NOFS IVM data cleaned to the specified level
Parameters
-----------
inst : (pysat.Instrument)
Instrument class object, whose attribute clean_level is used to return
the desired level of data selectivity.
Returns
--------
Void : (NoneType)
data in inst is modified in-place.
Notes
--------
Supports 'clean', 'dusty', 'dirty'
"""
# cleans cindi data
if self.clean_level == 'clean':
# choose areas below 550km
# self.data = self.data[self.data.alt <= 550]
idx, = np.where(self.data.altitude <= 550)
self.data = self[idx,:]
# make sure all -999999 values are NaN
self.data.replace(-999999., np.nan, inplace=True)
if (self.clean_level == 'clean') | (self.clean_level == 'dusty'):
try:
idx, = np.where(np.abs(self.data.ionVelmeridional) < 10000.)
self.data = self[idx,:]
except AttributeError:
pass
if self.clean_level == 'dusty':
# take out all values where RPA data quality is > 1
idx, = np.where(self.data.RPAflag <= 1)
self.data = self[idx,:]
# IDM quality flags
self.data = self.data[ (self.data.driftMeterflag<= 3) ]
else:
# take out all values where RPA data quality is > 0
idx, = np.where(self.data.RPAflag <= 0)
self.data = self[idx,:]
# IDM quality flags
self.data = self.data[ (self.data.driftMeterflag<= 0) ]
if self.clean_level == 'dirty':
# take out all values where RPA data quality is > 4
idx, = np.where(self.data.RPAflag <= 4)
self.data = self[idx,:]
# IDM quality flags
self.data = self.data[ (self.data.driftMeterflag<= 6) ]
# basic quality check on drifts and don't let UTS go above 86400.
idx, = np.where(self.data.time <= 86400.)
self.data = self[idx,:]
# make sure MLT is between 0 and 24
idx, = np.where((self.data.mlt >= 0) & (self.data.mlt <= 24.))
self.data = self[idx,:]
return | Routine to return C/NOFS IVM data cleaned to the specified level
Parameters
-----------
inst : (pysat.Instrument)
Instrument class object, whose attribute clean_level is used to return
the desired level of data selectivity.
Returns
--------
Void : (NoneType)
data in inst is modified in-place.
Notes
--------
Supports 'clean', 'dusty', 'dirty' | entailment |
def nonraw_instance(receiver):
"""
A signal receiver decorator that fetch the complete instance from db when
it's passed as raw
"""
@wraps(receiver)
def wrapper(sender, instance, raw, using, **kwargs):
if raw:
instance = sender._default_manager.using(using).get(pk=instance.pk)
return receiver(sender=sender, raw=raw, instance=instance, using=using,
**kwargs)
return wrapper | A signal receiver decorator that fetch the complete instance from db when
it's passed as raw | entailment |
def base_definition_pre_delete(sender, instance, **kwargs):
"""
This is used to pass data required for deletion to the post_delete
signal that is no more available thereafter.
"""
# see CASCADE_MARK_ORIGIN's docstring
cascade_deletion_origin = popattr(
instance._state, '_cascade_deletion_origin', None
)
if cascade_deletion_origin == 'model_def':
return
if (instance.base and issubclass(instance.base, models.Model) and
instance.base._meta.abstract):
instance._state._deletion = instance.model_def.model_class().render_state() | This is used to pass data required for deletion to the post_delete
signal that is no more available thereafter. | entailment |
def base_definition_post_delete(sender, instance, **kwargs):
"""
Make sure to delete fields inherited from an abstract model base.
"""
if hasattr(instance._state, '_deletion'):
# Make sure to flatten abstract bases since Django
# migrations can't deal with them.
model = popattr(instance._state, '_deletion')
for field in instance.base._meta.fields:
perform_ddl('remove_field', model, field) | Make sure to delete fields inherited from an abstract model base. | entailment |
def raw_field_definition_proxy_post_save(sender, instance, raw, **kwargs):
"""
When proxy field definitions are loaded from a fixture they're not
passing through the `field_definition_post_save` signal. Make sure they
are.
"""
if raw:
model_class = instance.content_type.model_class()
opts = model_class._meta
if opts.proxy and opts.concrete_model is sender:
field_definition_post_save(
sender=model_class, instance=instance.type_cast(), raw=raw,
**kwargs
) | When proxy field definitions are loaded from a fixture they're not
passing through the `field_definition_post_save` signal. Make sure they
are. | entailment |
def field_definition_post_save(sender, instance, created, raw, **kwargs):
"""
This signal is connected by all FieldDefinition subclasses
see comment in FieldDefinitionBase for more details
"""
model_class = instance.model_def.model_class().render_state()
field = instance.construct_for_migrate()
field.model = model_class
if created:
if hasattr(instance._state, '_creation_default_value'):
field.default = instance._state._creation_default_value
delattr(instance._state, '_creation_default_value')
add_column = popattr(instance._state, '_add_column', True)
if add_column:
perform_ddl('add_field', model_class, field)
# If the field definition is raw we must re-create the model class
# since ModelDefinitionAttribute.save won't be called
if raw:
instance.model_def.model_class().mark_as_obsolete()
else:
old_field = instance._state._pre_save_field
delattr(instance._state, '_pre_save_field')
perform_ddl('alter_field', model_class, old_field, field, strict=True) | This signal is connected by all FieldDefinition subclasses
see comment in FieldDefinitionBase for more details | entailment |
def popattr(obj, attr, default=NOT_PROVIDED):
"""
Useful for retrieving an object attr and removing it if it's part of it's
dict while allowing retrieving from subclass.
i.e.
class A:
a = 'a'
class B(A):
b = 'b'
>>> popattr(B, 'a', None)
'a'
>>> A.a
'a'
"""
val = getattr(obj, attr, default)
try:
delattr(obj, attr)
except AttributeError:
if default is NOT_PROVIDED:
raise
return val | Useful for retrieving an object attr and removing it if it's part of it's
dict while allowing retrieving from subclass.
i.e.
class A:
a = 'a'
class B(A):
b = 'b'
>>> popattr(B, 'a', None)
'a'
>>> A.a
'a' | entailment |
def _app_cache_deepcopy(obj):
"""
An helper that correctly deepcopy model cache state
"""
if isinstance(obj, defaultdict):
return deepcopy(obj)
elif isinstance(obj, dict):
return type(obj)((_app_cache_deepcopy(key), _app_cache_deepcopy(val)) for key, val in obj.items())
elif isinstance(obj, list):
return list(_app_cache_deepcopy(val) for val in obj)
elif isinstance(obj, AppConfig):
app_conf = Empty()
app_conf.__class__ = AppConfig
app_conf.__dict__ = _app_cache_deepcopy(obj.__dict__)
return app_conf
return obj | An helper that correctly deepcopy model cache state | entailment |
def app_cache_restorer():
"""
A context manager that restore model cache state as it was before
entering context.
"""
state = _app_cache_deepcopy(apps.__dict__)
try:
yield state
finally:
with apps_lock():
apps.__dict__ = state
# Rebind the app registry models cache to
# individual app config ones.
for app_conf in apps.get_app_configs():
app_conf.models = apps.all_models[app_conf.label]
apps.clear_cache() | A context manager that restore model cache state as it was before
entering context. | entailment |
def CASCADE_MARK_ORIGIN(collector, field, sub_objs, using):
"""
Custom on_delete handler which sets _cascade_deletion_origin on the _state
of the all relating objects that will deleted.
We use this handler on ModelDefinitionAttribute.model_def, so when we delete
a ModelDefinition we can skip field_definition_post_delete and
base_definition_post_delete and avoid an incremental columns deletion before
the entire table is dropped.
"""
CASCADE(collector, field, sub_objs, using)
if sub_objs:
for obj in sub_objs:
obj._state._cascade_deletion_origin = field.name | Custom on_delete handler which sets _cascade_deletion_origin on the _state
of the all relating objects that will deleted.
We use this handler on ModelDefinitionAttribute.model_def, so when we delete
a ModelDefinition we can skip field_definition_post_delete and
base_definition_post_delete and avoid an incremental columns deletion before
the entire table is dropped. | entailment |
def mutable_model_prepared(signal, sender, definition, existing_model_class,
**kwargs):
"""
Make sure all related model class are created and marked as dependency
when a mutable model class is prepared
"""
referenced_models = set()
# Collect all model class the obsolete model class was referring to
if existing_model_class:
for field in existing_model_class._meta.local_fields:
if isinstance(field, RelatedField):
remote_field_model = get_remote_field_model(field)
if not isinstance(remote_field_model, string_types):
referenced_models.add(remote_field_model)
# Add sender as a dependency of all mutable models it refers to
for field in sender._meta.local_fields:
if isinstance(field, RelatedField):
remote_field_model = get_remote_field_model(field)
if not isinstance(remote_field_model, string_types):
referenced_models.add(remote_field_model)
if (issubclass(remote_field_model, MutableModel) and
remote_field_model._definition != sender._definition):
remote_field_model._dependencies.add(sender._definition)
# Mark all model referring to this one as dependencies
related_model_defs = ModelDefinition.objects.filter(
Q(fielddefinitions__foreignkeydefinition__to=definition) |
Q(fielddefinitions__manytomanyfielddefinition__to=definition)
).distinct()
for model_def in related_model_defs:
if model_def != definition:
# Generate model class from definition and add it as a dependency
sender._dependencies.add(model_def.model_class()._definition)
# Clear the referenced models opts related cache
for model_class in referenced_models:
clear_opts_related_cache(model_class) | Make sure all related model class are created and marked as dependency
when a mutable model class is prepared | entailment |
def _model_class_from_pk(definition_cls, definition_pk):
"""
Helper used to unpickle MutableModel model class from their definition
pk.
"""
try:
return definition_cls.objects.get(pk=definition_pk).model_class()
except definition_cls.DoesNotExist:
pass | Helper used to unpickle MutableModel model class from their definition
pk. | entailment |
def clean(self):
"""
Make sure the lookup makes sense
"""
if self.lookup == '?': # Randomly sort
return
else:
lookups = self.lookup.split(LOOKUP_SEP)
opts = self.model_def.model_class()._meta
valid = True
while len(lookups):
lookup = lookups.pop(0)
try:
field = opts.get_field(lookup)
except FieldDoesNotExist:
valid = False
else:
if isinstance(field, models.ForeignKey):
opts = get_remote_field_model(field)._meta
elif len(lookups): # Cannot go any deeper
valid = False
finally:
if not valid:
msg = _("This field doesn't exist")
raise ValidationError({'lookup': [msg]}) | Make sure the lookup makes sense | entailment |
def lorentz_deriv((x, y, z), t0, sigma=10., beta=8./3, rho=28.0):
"""Compute the time-derivative of a Lorentz system."""
return [sigma * (y - x), x * (rho - z) - y, x * y - beta * z] | Compute the time-derivative of a Lorentz system. | entailment |
def buildParser():
''' Builds the parser for reading the command line arguments'''
parser = argparse.ArgumentParser(description='Bagfile reader')
parser.add_argument('-b', '--bag', help='Bag file to read',
required=True, type=str)
parser.add_argument('-s', '--series',
help='Msg data fields to graph',
required=True, nargs='*')
parser.add_argument('-y ', '--ylim',
help='Set min and max y lim',
required=False, nargs=2)
parser.add_argument('-c', '--combined',
help="Graph them all on one",
required=False, action="store_true", dest="sharey")
return parser | Builds the parser for reading the command line arguments | entailment |
def parse_series_args(topics, fields):
'''Return which topics and which field keys need to be examined
for plotting'''
keys = {}
for field in fields:
for topic in topics:
if field.startswith(topic):
keys[field] = (topic, field[len(topic) + 1:])
return keys | Return which topics and which field keys need to be examined
for plotting | entailment |
def bag_to_dataframe(bag_name, include=None, exclude=None, parse_header=False, seconds=False):
'''
Read in a rosbag file and create a pandas data frame that
is indexed by the time the message was recorded in the bag.
:bag_name: String name for the bag file
:include: None, String, or List Topics to include in the dataframe
if None all topics added, if string it is used as regular
expression, if list that list is used.
:exclude: None, String, or List Topics to be removed from those added
using the include option using set difference. If None no topics
removed. If String it is treated as a regular expression. A list
removes those in the list.
:seconds: time index is in seconds
:returns: a pandas dataframe object
'''
# get list of topics to parse
yaml_info = get_bag_info(bag_name)
bag_topics = get_topics(yaml_info)
bag_topics = prune_topics(bag_topics, include, exclude)
length = get_length(bag_topics, yaml_info)
msgs_to_read, msg_type = get_msg_info(yaml_info, bag_topics, parse_header)
bag = rosbag.Bag(bag_name)
dmap = create_data_map(msgs_to_read)
# create datastore
datastore = {}
for topic in dmap.keys():
for f, key in dmap[topic].iteritems():
t = msg_type[topic][f]
if isinstance(t, int) or isinstance(t, float):
arr = np.empty(length)
arr.fill(np.NAN)
elif isinstance(t, list):
arr = np.empty(length)
arr.fill(np.NAN)
for i in range(len(t)):
key_i = '{0}{1}'.format(key, i)
datastore[key_i] = arr.copy()
continue
else:
arr = np.empty(length, dtype=np.object)
datastore[key] = arr
# create the index
index = np.empty(length)
index.fill(np.NAN)
# all of the data is loaded
for idx, (topic, msg, mt) in enumerate(bag.read_messages(topics=bag_topics)):
try:
if seconds:
index[idx] = msg.header.stamp.to_sec()
else:
index[idx] = msg.header.stamp.to_nsec()
except:
if seconds:
index[idx] = mt.to_sec()
else:
index[idx] = mt.to_nsec()
fields = dmap[topic]
for f, key in fields.iteritems():
try:
d = get_message_data(msg, f)
if isinstance(d, tuple):
for i, val in enumerate(d):
key_i = '{0}{1}'.format(key, i)
datastore[key_i][idx] = val
else:
datastore[key][idx] = d
except:
pass
bag.close()
# convert the index
if not seconds:
index = pd.to_datetime(index, unit='ns')
# now we have read all of the messages its time to assemble the dataframe
return pd.DataFrame(data=datastore, index=index) | Read in a rosbag file and create a pandas data frame that
is indexed by the time the message was recorded in the bag.
:bag_name: String name for the bag file
:include: None, String, or List Topics to include in the dataframe
if None all topics added, if string it is used as regular
expression, if list that list is used.
:exclude: None, String, or List Topics to be removed from those added
using the include option using set difference. If None no topics
removed. If String it is treated as a regular expression. A list
removes those in the list.
:seconds: time index is in seconds
:returns: a pandas dataframe object | entailment |
def get_length(topics, yaml_info):
'''
Find the length (# of rows) in the created dataframe
'''
total = 0
info = yaml_info['topics']
for topic in topics:
for t in info:
if t['topic'] == topic:
total = total + t['messages']
break
return total | Find the length (# of rows) in the created dataframe | entailment |
def create_data_map(msgs_to_read):
'''
Create a data map for usage when parsing the bag
'''
dmap = {}
for topic in msgs_to_read.keys():
base_name = get_key_name(topic) + '__'
fields = {}
for f in msgs_to_read[topic]:
key = (base_name + f).replace('.', '_')
fields[f] = key
dmap[topic] = fields
return dmap | Create a data map for usage when parsing the bag | entailment |
def prune_topics(bag_topics, include, exclude):
'''prune the topics. If include is None add all to the set of topics to
use if include is a string regex match that string,
if it is a list use the list
If exclude is None do nothing, if string remove the topics with regex,
if it is a list remove those topics'''
topics_to_use = set()
# add all of the topics
if include is None:
for t in bag_topics:
topics_to_use.add(t)
elif isinstance(include, basestring):
check = re.compile(include)
for t in bag_topics:
if re.match(check, t) is not None:
topics_to_use.add(t)
else:
try:
# add all of the includes if it is in the topic
for topic in include:
if topic in bag_topics:
topics_to_use.add(topic)
except:
warnings.warn('Error in topic selection Using All!')
topics_to_use = set()
for t in bag_topics:
topics_to_use.add(t)
to_remove = set()
# now exclude the exclusions
if exclude is None:
pass
elif isinstance(exclude, basestring):
check = re.compile(exclude)
for t in list(topics_to_use):
if re.match(check, t) is not None:
to_remove.add(t)
else:
for remove in exclude:
if remove in exclude:
to_remove.add(remove)
# final set stuff to get topics to use
topics_to_use = topics_to_use - to_remove
# return a list for the results
return list(topics_to_use) | prune the topics. If include is None add all to the set of topics to
use if include is a string regex match that string,
if it is a list use the list
If exclude is None do nothing, if string remove the topics with regex,
if it is a list remove those topics | entailment |
def get_msg_info(yaml_info, topics, parse_header=True):
'''
Get info from all of the messages about what they contain
and will be added to the dataframe
'''
topic_info = yaml_info['topics']
msgs = {}
classes = {}
for topic in topics:
base_key = get_key_name(topic)
msg_paths = []
msg_types = {}
for info in topic_info:
if info['topic'] == topic:
msg_class = get_message_class(info['type'])
if msg_class is None:
warnings.warn(
'Could not find types for ' + topic + ' skpping ')
else:
(msg_paths, msg_types) = get_base_fields(msg_class(), "",
parse_header)
msgs[topic] = msg_paths
classes[topic] = msg_types
return (msgs, classes) | Get info from all of the messages about what they contain
and will be added to the dataframe | entailment |
def get_bag_info(bag_file):
'''Get uamle dict of the bag information
by calling the subprocess -- used to create correct sized
arrays'''
# Get the info on the bag
bag_info = yaml.load(subprocess.Popen(
['rosbag', 'info', '--yaml', bag_file],
stdout=subprocess.PIPE).communicate()[0])
return bag_info | Get uamle dict of the bag information
by calling the subprocess -- used to create correct sized
arrays | entailment |
def get_topics(yaml_info):
''' Returns the names of all of the topics in the bag, and prints them
to stdout if requested
'''
# Pull out the topic info
names = []
# Store all of the topics in a dictionary
topics = yaml_info['topics']
for topic in topics:
names.append(topic['topic'])
return names | Returns the names of all of the topics in the bag, and prints them
to stdout if requested | entailment |
def get_base_fields(msg, prefix='', parse_header=True):
'''function to get the full names of every message field in the message'''
slots = msg.__slots__
ret_val = []
msg_types = dict()
for i in slots:
slot_msg = getattr(msg, i)
if not parse_header and i == 'header':
continue
if hasattr(slot_msg, '__slots__'):
(subs, type_map) = get_base_fields(
slot_msg, prefix=prefix + i + '.',
parse_header=parse_header,
)
for i in subs:
ret_val.append(i)
for k, v in type_map.items():
msg_types[k] = v
else:
ret_val.append(prefix + i)
msg_types[prefix + i] = slot_msg
return (ret_val, msg_types) | function to get the full names of every message field in the message | entailment |
def get_message_data(msg, key):
'''get the datapoint from the dot delimited message field key
e.g. translation.x looks up translation than x and returns the value found
in x'''
data = msg
paths = key.split('.')
for i in paths:
data = getattr(data, i)
return data | get the datapoint from the dot delimited message field key
e.g. translation.x looks up translation than x and returns the value found
in x | entailment |
def buildParser():
''' Builds the parser for reading the command line arguments'''
parser = argparse.ArgumentParser(
description='Script to parse bagfile to csv file')
parser.add_argument('bag', help='Bag file to read',
type=str)
parser.add_argument('-i', '--include',
help='list or regex for topics to include',
nargs='*')
parser.add_argument('-e', '--exclude',
help='list or regex for topics to exclude',
nargs='*')
parser.add_argument('-o', '--output',
help='name of the output file',
nargs='*')
parser.add_argument('-f', '--fill',
help='Fill the bag forward and backwards so no missing values when present',
action='store_true')
parser.add_argument('--include-header',
help='Include the header fields. By default they are excluded',
action='store_true')
return parser | Builds the parser for reading the command line arguments | entailment |
def jsonify_payload(self):
""" Dump the payload to JSON """
# Assume already json serialized
if isinstance(self.payload, string_types):
return self.payload
return json.dumps(self.payload, cls=StandardJSONEncoder) | Dump the payload to JSON | entailment |
def _send(self):
""" Send the webhook method """
payload = self.payload
sending_metadata = {'success': False}
post_attributes = {'timeout': self.timeout}
if self.custom_headers:
post_attributes['headers'] = self.custom_headers
if not post_attributes.get('headers', None):
post_attributes['headers'] = {}
post_attributes['headers']['Content-Type'] = self.encoding
post_attributes['data'] = self.format_payload()
if self.signing_secret:
post_attributes['headers']['x-hub-signature'] = self.create_signature(post_attributes['data'], \
self.signing_secret)
for i, wait in enumerate(range(len(self.attempts) - 1)):
self.attempt = i + 1
sending_metadata['attempt'] = self.attempt
try:
print(self.url)
self.response = requests.post(self.url, **post_attributes)
if sys.version > '3':
# Converts bytes object to str object in Python 3+
self.response_content = self.response.content.decode('utf-8')
else:
self.response_content = self.response.content
sending_metadata['status_code'] = self.response.status_code
# anything with a 200 status code is a success
if self.response.status_code >= 200 and self.response.status_code < 300:
# Exit the sender method. Here we provide the payload as a result.
# This is useful for reporting.
self.notify("Attempt {}: Successfully sent webhook {}".format(
self.attempt, self.hash_value)
)
sending_metadata['response'] = self.response_content
sending_metadata['success'] = True
break
else:
self.error = "Status code (%d). Message: %s" % (self.response.status_code, self.response.text)
except Exception as ex:
err_formatted = str(ex).replace('"',"'")
sending_metadata['response'] = '{"status_code": 500, "status":"failure","error":"'+err_formatted+'"}'
self.error = err_formatted
self.notify("Attempt {}: Could not send webhook {}".format(
self.attempt, self.hash_value)
)
self.notify_debug("Webhook {}. Body: {}".format(
self.hash_value, self.payload)
)
# If last attempt
if self.attempt == (len(self.attempts) - 1):
self.notify_error("Failed to send webhook {}. Body: {}".format(
self.hash_value, self.payload)
)
else:
# Wait a bit before the next attempt
sleep(wait)
sending_metadata['error'] = None if sending_metadata['success'] or not self.error else self.error
sending_metadata['post_attributes'] = post_attributes
merged_dict = sending_metadata.copy()
if isinstance(payload, string_types):
payload = {'payload': payload}
# Add the hash value if there is one.
if self.hash_value is not None and len(self.hash_value) > 0:
payload['hash'] = self.hash_value
merged_dict.update(payload)
return merged_dict | Send the webhook method | entailment |
def verifySignature(self, signatureKey):
"""
:type signatureKey: ECPublicKey
"""
try:
parts = ByteUtil.split(self.serialized,
len(self.serialized) - self.__class__.SIGNATURE_LENGTH,
self.__class__.SIGNATURE_LENGTH)
if not Curve.verifySignature(signatureKey, parts[0], parts[1]):
raise InvalidMessageException("Invalid signature!")
except InvalidKeyException as e:
raise InvalidMessageException(e) | :type signatureKey: ECPublicKey | entailment |
def getSignature(self, signatureKey, serialized):
"""
:type signatureKey: ECPrivateKey
:type serialized: bytearray
"""
try:
return Curve.calculateSignature(signatureKey, serialized)
except InvalidKeyException as e:
raise AssertionError(e) | :type signatureKey: ECPrivateKey
:type serialized: bytearray | entailment |
def process(self, sessionRecord, message):
"""
:param sessionRecord:
:param message:
:type message: PreKeyWhisperMessage
"""
messageVersion = message.getMessageVersion()
theirIdentityKey = message.getIdentityKey()
unsignedPreKeyId = None
if not self.identityKeyStore.isTrustedIdentity(self.recipientId, theirIdentityKey):
raise UntrustedIdentityException(self.recipientId, theirIdentityKey)
if messageVersion == 2:
unsignedPreKeyId = self.processV2(sessionRecord, message)
elif messageVersion == 3:
unsignedPreKeyId = self.processV3(sessionRecord, message)
else:
raise AssertionError("Unkown version %s" % messageVersion)
self.identityKeyStore.saveIdentity(self.recipientId, theirIdentityKey)
return unsignedPreKeyId | :param sessionRecord:
:param message:
:type message: PreKeyWhisperMessage | entailment |
def processV2(self, sessionRecord, message):
"""
:type sessionRecord: SessionRecord
:type message: PreKeyWhisperMessage
"""
if message.getPreKeyId() is None:
raise InvalidKeyIdException("V2 message requires one time prekey id!")
if not self.preKeyStore.containsPreKey(message.getPreKeyId()) and \
self.sessionStore.containsSession(self.recipientId, self.deviceId):
logging.warn("We've already processed the prekey part of this V2 session, "
"letting bundled message fall through...")
return None
ourPreKey = self.preKeyStore.loadPreKey(message.getPreKeyId()).getKeyPair()
parameters = BobAxolotlParameters.newBuilder()
parameters.setOurIdentityKey(self.identityKeyStore.getIdentityKeyPair())\
.setOurSignedPreKey(ourPreKey)\
.setOurRatchetKey(ourPreKey)\
.setOurOneTimePreKey(None)\
.setTheirIdentityKey(message.getIdentityKey())\
.setTheirBaseKey(message.getBaseKey())
if not sessionRecord.isFresh():
sessionRecord.archiveCurrentState()
RatchetingSession.initializeSessionAsBob(sessionRecord.getSessionState(),
message.getMessageVersion(),
parameters.create())
sessionRecord.getSessionState().setLocalRegistrationId(self.identityKeyStore.getLocalRegistrationId())
sessionRecord.getSessionState().setRemoteRegistrationId(message.getRegistrationId())
sessionRecord.getSessionState().setAliceBaseKey(message.getBaseKey().serialize())
if message.getPreKeyId() != Medium.MAX_VALUE:
return message.getPreKeyId()
else:
return None | :type sessionRecord: SessionRecord
:type message: PreKeyWhisperMessage | entailment |
def processV3(self, sessionRecord, message):
"""
:param sessionRecord:
:param message:
:type message: PreKeyWhisperMessage
:return:
"""
if sessionRecord.hasSessionState(message.getMessageVersion(), message.getBaseKey().serialize()):
logger.warn("We've already setup a session for this V3 message, letting bundled message fall through...")
return None
ourSignedPreKey = self.signedPreKeyStore.loadSignedPreKey(message.getSignedPreKeyId()).getKeyPair()
parameters = BobAxolotlParameters.newBuilder()
parameters.setTheirBaseKey(message.getBaseKey())\
.setTheirIdentityKey(message.getIdentityKey())\
.setOurIdentityKey(self.identityKeyStore.getIdentityKeyPair())\
.setOurSignedPreKey(ourSignedPreKey)\
.setOurRatchetKey(ourSignedPreKey)
if message.getPreKeyId() is not None:
parameters.setOurOneTimePreKey(self.preKeyStore.loadPreKey(message.getPreKeyId()).getKeyPair())
else:
parameters.setOurOneTimePreKey(None)
if not sessionRecord.isFresh():
sessionRecord.archiveCurrentState()
RatchetingSession.initializeSessionAsBob(sessionRecord.getSessionState(),
message.getMessageVersion(),
parameters.create())
sessionRecord.getSessionState().setLocalRegistrationId(self.identityKeyStore.getLocalRegistrationId())
sessionRecord.getSessionState().setRemoteRegistrationId(message.getRegistrationId())
sessionRecord.getSessionState().setAliceBaseKey(message.getBaseKey().serialize())
if message.getPreKeyId() is not None and message.getPreKeyId() != Medium.MAX_VALUE:
return message.getPreKeyId()
else:
return None | :param sessionRecord:
:param message:
:type message: PreKeyWhisperMessage
:return: | entailment |
def processPreKeyBundle(self, preKey):
"""
:type preKey: PreKeyBundle
"""
if not self.identityKeyStore.isTrustedIdentity(self.recipientId, preKey.getIdentityKey()):
raise UntrustedIdentityException(self.recipientId, preKey.getIdentityKey())
if preKey.getSignedPreKey() is not None and\
not Curve.verifySignature(preKey.getIdentityKey().getPublicKey(),
preKey.getSignedPreKey().serialize(),
preKey.getSignedPreKeySignature()):
raise InvalidKeyException("Invalid signature on device key!")
if preKey.getSignedPreKey() is None and preKey.getPreKey() is None:
raise InvalidKeyException("Both signed and unsigned prekeys are absent!")
supportsV3 = preKey.getSignedPreKey() is not None
sessionRecord = self.sessionStore.loadSession(self.recipientId, self.deviceId)
ourBaseKey = Curve.generateKeyPair()
theirSignedPreKey = preKey.getSignedPreKey() if supportsV3 else preKey.getPreKey()
theirOneTimePreKey = preKey.getPreKey()
theirOneTimePreKeyId = preKey.getPreKeyId() if theirOneTimePreKey is not None else None
parameters = AliceAxolotlParameters.newBuilder()
parameters.setOurBaseKey(ourBaseKey)\
.setOurIdentityKey(self.identityKeyStore.getIdentityKeyPair())\
.setTheirIdentityKey(preKey.getIdentityKey())\
.setTheirSignedPreKey(theirSignedPreKey)\
.setTheirRatchetKey(theirSignedPreKey)\
.setTheirOneTimePreKey(theirOneTimePreKey if supportsV3 else None)
if not sessionRecord.isFresh():
sessionRecord.archiveCurrentState()
RatchetingSession.initializeSessionAsAlice(sessionRecord.getSessionState(),
3 if supportsV3 else 2,
parameters.create())
sessionRecord.getSessionState().setUnacknowledgedPreKeyMessage(theirOneTimePreKeyId,
preKey.getSignedPreKeyId(),
ourBaseKey.getPublicKey())
sessionRecord.getSessionState().setLocalRegistrationId(self.identityKeyStore.getLocalRegistrationId())
sessionRecord.getSessionState().setRemoteRegistrationId(preKey.getRegistrationId())
sessionRecord.getSessionState().setAliceBaseKey(ourBaseKey.getPublicKey().serialize())
self.sessionStore.storeSession(self.recipientId, self.deviceId, sessionRecord)
self.identityKeyStore.saveIdentity(self.recipientId, preKey.getIdentityKey()) | :type preKey: PreKeyBundle | entailment |
def process(self, senderKeyName, senderKeyDistributionMessage):
"""
:type senderKeyName: SenderKeyName
:type senderKeyDistributionMessage: SenderKeyDistributionMessage
"""
senderKeyRecord = self.senderKeyStore.loadSenderKey(senderKeyName)
senderKeyRecord.addSenderKeyState(senderKeyDistributionMessage.getId(),
senderKeyDistributionMessage.getIteration(),
senderKeyDistributionMessage.getChainKey(),
senderKeyDistributionMessage.getSignatureKey())
self.senderKeyStore.storeSenderKey(senderKeyName, senderKeyRecord) | :type senderKeyName: SenderKeyName
:type senderKeyDistributionMessage: SenderKeyDistributionMessage | entailment |
def create(self, senderKeyName):
"""
:type senderKeyName: SenderKeyName
"""
try:
senderKeyRecord = self.senderKeyStore.loadSenderKey(senderKeyName);
if senderKeyRecord.isEmpty() :
senderKeyRecord.setSenderKeyState(KeyHelper.generateSenderKeyId(),
0,
KeyHelper.generateSenderKey(),
KeyHelper.generateSenderSigningKey());
self.senderKeyStore.storeSenderKey(senderKeyName, senderKeyRecord);
state = senderKeyRecord.getSenderKeyState();
return SenderKeyDistributionMessage(state.getKeyId(),
state.getSenderChainKey().getIteration(),
state.getSenderChainKey().getSeed(),
state.getSigningKeyPublic());
except (InvalidKeyException, InvalidKeyIdException) as e:
raise AssertionError(e) | :type senderKeyName: SenderKeyName | entailment |
def initializeSession(sessionState, sessionVersion, parameters):
"""
:type sessionState: SessionState
:type sessionVersion: int
:type parameters: SymmetricAxolotlParameters
"""
if RatchetingSession.isAlice(parameters.getOurBaseKey().getPublicKey(), parameters.getTheirBaseKey()):
aliceParameters = AliceAxolotlParameters.newBuilder()
aliceParameters.setOurBaseKey(parameters.getOurBaseKey()) \
.setOurIdentityKey(parameters.getOurIdentityKey()) \
.setTheirRatchetKey(parameters.getTheirRatchetKey()) \
.setTheirIdentityKey(parameters.getTheirIdentityKey()) \
.setTheirSignedPreKey(parameters.getTheirBaseKey()) \
.setTheirOneTimePreKey(None)
RatchetingSession.initializeSessionAsAlice(sessionState, sessionVersion, aliceParameters.create())
else:
bobParameters = BobAxolotlParameters.newBuilder()
bobParameters.setOurIdentityKey(parameters.getOurIdentityKey()) \
.setOurRatchetKey(parameters.getOurRatchetKey()) \
.setOurSignedPreKey(parameters.getOurBaseKey()) \
.setOurOneTimePreKey(None) \
.setTheirBaseKey(parameters.getTheirBaseKey()) \
.setTheirIdentityKey(parameters.getTheirIdentityKey())
RatchetingSession.initializeSessionAsBob(sessionState, sessionVersion, bobParameters.create()) | :type sessionState: SessionState
:type sessionVersion: int
:type parameters: SymmetricAxolotlParameters | entailment |
def initializeSessionAsAlice(sessionState, sessionVersion, parameters):
"""
:type sessionState: SessionState
:type sessionVersion: int
:type parameters: AliceAxolotlParameters
"""
sessionState.setSessionVersion(sessionVersion)
sessionState.setRemoteIdentityKey(parameters.getTheirIdentityKey())
sessionState.setLocalIdentityKey(parameters.getOurIdentityKey().getPublicKey())
sendingRatchetKey = Curve.generateKeyPair()
secrets = bytearray()
if sessionVersion >= 3:
secrets.extend(RatchetingSession.getDiscontinuityBytes())
secrets.extend(Curve.calculateAgreement(parameters.getTheirSignedPreKey(),
parameters.getOurIdentityKey().getPrivateKey()))
secrets.extend(Curve.calculateAgreement(parameters.getTheirIdentityKey().getPublicKey(),
parameters.getOurBaseKey().getPrivateKey()))
secrets.extend(Curve.calculateAgreement(parameters.getTheirSignedPreKey(),
parameters.getOurBaseKey().getPrivateKey()))
if sessionVersion >= 3 and parameters.getTheirOneTimePreKey() is not None:
secrets.extend(Curve.calculateAgreement(parameters.getTheirOneTimePreKey(),
parameters.getOurBaseKey().getPrivateKey()))
derivedKeys = RatchetingSession.calculateDerivedKeys(sessionVersion, secrets)
sendingChain = derivedKeys.getRootKey().createChain(parameters.getTheirRatchetKey(), sendingRatchetKey)
sessionState.addReceiverChain(parameters.getTheirRatchetKey(), derivedKeys.getChainKey())
sessionState.setSenderChain(sendingRatchetKey, sendingChain[1])
sessionState.setRootKey(sendingChain[0]) | :type sessionState: SessionState
:type sessionVersion: int
:type parameters: AliceAxolotlParameters | entailment |
def initializeSessionAsBob(sessionState, sessionVersion, parameters):
"""
:type sessionState: SessionState
:type sessionVersion: int
:type parameters: BobAxolotlParameters
"""
sessionState.setSessionVersion(sessionVersion)
sessionState.setRemoteIdentityKey(parameters.getTheirIdentityKey())
sessionState.setLocalIdentityKey(parameters.getOurIdentityKey().getPublicKey())
secrets = bytearray()
if sessionVersion >= 3:
secrets.extend(RatchetingSession.getDiscontinuityBytes())
secrets.extend(Curve.calculateAgreement(parameters.getTheirIdentityKey().getPublicKey(),
parameters.getOurSignedPreKey().getPrivateKey()))
secrets.extend(Curve.calculateAgreement(parameters.getTheirBaseKey(),
parameters.getOurIdentityKey().getPrivateKey()))
secrets.extend(Curve.calculateAgreement(parameters.getTheirBaseKey(),
parameters.getOurSignedPreKey().getPrivateKey()))
if sessionVersion >= 3 and parameters.getOurOneTimePreKey() is not None:
secrets.extend(Curve.calculateAgreement(parameters.getTheirBaseKey(),
parameters.getOurOneTimePreKey().getPrivateKey()))
derivedKeys = RatchetingSession.calculateDerivedKeys(sessionVersion, secrets)
sessionState.setSenderChain(parameters.getOurRatchetKey(), derivedKeys.getChainKey())
sessionState.setRootKey(derivedKeys.getRootKey()) | :type sessionState: SessionState
:type sessionVersion: int
:type parameters: BobAxolotlParameters | entailment |
def addSenderKeyState(self, id, iteration, chainKey, signatureKey):
"""
:type id: int
:type iteration: int
:type chainKey: bytearray
:type signatureKey: ECPublicKey
"""
self.senderKeyStates.append(SenderKeyState(id, iteration, chainKey, signatureKey)) | :type id: int
:type iteration: int
:type chainKey: bytearray
:type signatureKey: ECPublicKey | entailment |
def setSenderKeyState(self, id, iteration, chainKey, signatureKey):
"""
:type id: int
:type iteration: int
:type chainKey: bytearray
:type signatureKey: ECKeyPair
"""
del self.senderKeyStates[:]
self.senderKeyStates.append(SenderKeyState(id, iteration, chainKey, signatureKeyPair=signatureKey)) | :type id: int
:type iteration: int
:type chainKey: bytearray
:type signatureKey: ECKeyPair | entailment |
def encrypt(self, paddedMessage):
"""
:type paddedMessage: str
"""
# TODO: make this less ugly and python 2 and 3 compatible
# paddedMessage = bytearray(paddedMessage.encode() if (sys.version_info >= (3, 0) and not type(paddedMessage) in (bytes, bytearray)) or type(paddedMessage) is unicode else paddedMessage)
if (sys.version_info >= (3, 0) and
not type(paddedMessage) in (bytes, bytearray)) or type(paddedMessage) is unicode:
paddedMessage = bytearray(paddedMessage.encode())
else:
paddedMessage = bytearray(paddedMessage)
sessionRecord = self.sessionStore.loadSession(self.recipientId, self.deviceId)
sessionState = sessionRecord.getSessionState()
chainKey = sessionState.getSenderChainKey()
messageKeys = chainKey.getMessageKeys()
senderEphemeral = sessionState.getSenderRatchetKey()
previousCounter = sessionState.getPreviousCounter()
sessionVersion = sessionState.getSessionVersion()
ciphertextBody = self.getCiphertext(sessionVersion, messageKeys, paddedMessage)
ciphertextMessage = WhisperMessage(sessionVersion, messageKeys.getMacKey(),
senderEphemeral, chainKey.getIndex(),
previousCounter, ciphertextBody,
sessionState.getLocalIdentityKey(),
sessionState.getRemoteIdentityKey())
if sessionState.hasUnacknowledgedPreKeyMessage():
items = sessionState.getUnacknowledgedPreKeyMessageItems()
localRegistrationid = sessionState.getLocalRegistrationId()
ciphertextMessage = PreKeyWhisperMessage(sessionVersion, localRegistrationid, items.getPreKeyId(),
items.getSignedPreKeyId(), items.getBaseKey(),
sessionState.getLocalIdentityKey(),
ciphertextMessage)
sessionState.setSenderChainKey(chainKey.getNextChainKey())
self.sessionStore.storeSession(self.recipientId, self.deviceId, sessionRecord)
return ciphertextMessage | :type paddedMessage: str | entailment |
def decryptMsg(self, ciphertext, textMsg=True):
"""
:type ciphertext: WhisperMessage
:type textMsg: Bool set this to False if you are decrypting bytes
instead of string
"""
if not self.sessionStore.containsSession(self.recipientId, self.deviceId):
raise NoSessionException("No session for: %s, %s" % (self.recipientId, self.deviceId))
sessionRecord = self.sessionStore.loadSession(self.recipientId, self.deviceId)
plaintext = self.decryptWithSessionRecord(sessionRecord, ciphertext)
self.sessionStore.storeSession(self.recipientId, self.deviceId, sessionRecord)
return plaintext | :type ciphertext: WhisperMessage
:type textMsg: Bool set this to False if you are decrypting bytes
instead of string | entailment |
def decryptPkmsg(self, ciphertext, textMsg=True):
"""
:type ciphertext: PreKeyWhisperMessage
"""
sessionRecord = self.sessionStore.loadSession(self.recipientId, self.deviceId)
unsignedPreKeyId = self.sessionBuilder.process(sessionRecord, ciphertext)
plaintext = self.decryptWithSessionRecord(sessionRecord, ciphertext.getWhisperMessage())
# callback.handlePlaintext(plaintext)
self.sessionStore.storeSession(self.recipientId, self.deviceId, sessionRecord)
if unsignedPreKeyId is not None:
self.preKeyStore.removePreKey(unsignedPreKeyId)
return plaintext | :type ciphertext: PreKeyWhisperMessage | entailment |
def decryptWithSessionRecord(self, sessionRecord, cipherText):
"""
:type sessionRecord: SessionRecord
:type cipherText: WhisperMessage
"""
previousStates = sessionRecord.getPreviousSessionStates()
exceptions = []
try:
sessionState = SessionState(sessionRecord.getSessionState())
plaintext = self.decryptWithSessionState(sessionState, cipherText)
sessionRecord.setState(sessionState)
return plaintext
except InvalidMessageException as e:
exceptions.append(e)
for i in range(0, len(previousStates)):
previousState = previousStates[i]
try:
promotedState = SessionState(previousState)
plaintext = self.decryptWithSessionState(promotedState, cipherText)
previousStates.pop(i)
sessionRecord.promoteState(promotedState)
return plaintext
except InvalidMessageException as e:
exceptions.append(e)
raise InvalidMessageException("No valid sessions", exceptions) | :type sessionRecord: SessionRecord
:type cipherText: WhisperMessage | entailment |
def getCiphertext(self, version, messageKeys, plainText):
"""
:type version: int
:type messageKeys: MessageKeys
:type plainText: bytearray
"""
cipher = None
if version >= 3:
cipher = self.getCipher(messageKeys.getCipherKey(), messageKeys.getIv())
else:
cipher = self.getCipher_v2(messageKeys.getCipherKey(), messageKeys.getCounter())
return cipher.encrypt(bytes(plainText)) | :type version: int
:type messageKeys: MessageKeys
:type plainText: bytearray | entailment |
def calculateAgreement(publicKey, privateKey):
"""
:type publicKey: ECPublicKey
:type privateKey: ECPrivateKey
"""
if publicKey.getType() != privateKey.getType():
raise InvalidKeyException("Public and private keys must be of the same type!")
if publicKey.getType() == Curve.DJB_TYPE:
return _curve.calculateAgreement(privateKey.getPrivateKey(), publicKey.getPublicKey())
else:
raise InvalidKeyException("Unknown type: %s" % publicKey.getType()) | :type publicKey: ECPublicKey
:type privateKey: ECPrivateKey | entailment |
def verifySignature(ecPublicSigningKey, message, signature):
"""
:type ecPublicSigningKey: ECPublicKey
:type message: bytearray
:type signature: bytearray
"""
if ecPublicSigningKey.getType() == Curve.DJB_TYPE:
result = _curve.verifySignature(ecPublicSigningKey.getPublicKey(), message, signature)
return result == 0
else:
raise InvalidKeyException("Unknown type: %s" % ecPublicSigningKey.getType()) | :type ecPublicSigningKey: ECPublicKey
:type message: bytearray
:type signature: bytearray | entailment |
def calculateSignature(privateSigningKey, message):
"""
:type privateSigningKey: ECPrivateKey
:type message: bytearray
"""
if privateSigningKey.getType() == Curve.DJB_TYPE:
rand = os.urandom(64)
res = _curve.calculateSignature(rand, privateSigningKey.getPrivateKey(), message)
return res
else:
raise InvalidKeyException("Unknown type: %s" % privateSigningKey.getType()) | :type privateSigningKey: ECPrivateKey
:type message: bytearray | entailment |
def encrypt(self, paddedPlaintext):
"""
:type paddedPlaintext: str
"""
# TODO: make this less ugly and python 2 and 3 compatible
# paddedMessage = bytearray(paddedMessage.encode() if (sys.version_info >= (3, 0) and not type(paddedMessage) in (bytes, bytearray)) or type(paddedMessage) is unicode else paddedMessage)
if (sys.version_info >= (3, 0) and
not type(paddedPlaintext) in (bytes, bytearray)) or type(paddedPlaintext) is unicode:
paddedPlaintext = bytearray(paddedPlaintext.encode())
else:
paddedPlaintext = bytearray(paddedPlaintext)
try:
record = self.senderKeyStore.loadSenderKey(self.senderKeyName)
senderKeyState = record.getSenderKeyState()
senderKey = senderKeyState.getSenderChainKey().getSenderMessageKey()
ciphertext = self.getCipherText(senderKey.getIv(), senderKey.getCipherKey(), paddedPlaintext)
senderKeyMessage = SenderKeyMessage(senderKeyState.getKeyId(),
senderKey.getIteration(),
ciphertext,
senderKeyState.getSigningKeyPrivate())
senderKeyState.setSenderChainKey(senderKeyState.getSenderChainKey().getNext())
self.senderKeyStore.storeSenderKey(self.senderKeyName, record)
return senderKeyMessage.serialize()
except InvalidKeyIdException as e:
raise NoSessionException(e) | :type paddedPlaintext: str | entailment |
def decrypt(self, senderKeyMessageBytes):
"""
:type senderKeyMessageBytes: bytearray
"""
try:
record = self.senderKeyStore.loadSenderKey(self.senderKeyName)
if record.isEmpty():
raise NoSessionException("No sender key for: %s" % self.senderKeyName)
senderKeyMessage = SenderKeyMessage(serialized = bytes(senderKeyMessageBytes))
senderKeyState = record.getSenderKeyState(senderKeyMessage.getKeyId())
senderKeyMessage.verifySignature(senderKeyState.getSigningKeyPublic())
senderKey = self.getSenderKey(senderKeyState, senderKeyMessage.getIteration())
plaintext = self.getPlainText(senderKey.getIv(), senderKey.getCipherKey(), senderKeyMessage.getCipherText())
self.senderKeyStore.storeSenderKey(self.senderKeyName, record)
return plaintext
except (InvalidKeyException, InvalidKeyIdException) as e:
raise InvalidMessageException(e) | :type senderKeyMessageBytes: bytearray | entailment |
def getPlainText(self, iv, key, ciphertext):
"""
:type iv: bytearray
:type key: bytearray
:type ciphertext: bytearray
"""
try:
cipher = AESCipher(key, iv)
plaintext = cipher.decrypt(ciphertext)
if sys.version_info >= (3, 0):
return plaintext.decode()
return plaintext
except Exception as e:
raise InvalidMessageException(e) | :type iv: bytearray
:type key: bytearray
:type ciphertext: bytearray | entailment |
def getCipherText(self, iv, key, plaintext):
"""
:type iv: bytearray
:type key: bytearray
:type plaintext: bytearray
"""
cipher = AESCipher(key, iv)
return cipher.encrypt(bytes(plaintext)) | :type iv: bytearray
:type key: bytearray
:type plaintext: bytearray | entailment |
def setPendingKeyExchange(self, sequence, ourBaseKey, ourRatchetKey, ourIdentityKey):
"""
:type sequence: int
:type ourBaseKey: ECKeyPair
:type ourRatchetKey: ECKeyPair
:type ourIdentityKey: IdentityKeyPair
"""
structure = self.sessionStructure.PendingKeyExchange()
structure.sequence = sequence
structure.localBaseKey = ourBaseKey.getPublicKey().serialize()
structure.localBaseKeyPrivate = ourBaseKey.getPrivateKey().serialize()
structure.localRatchetKey = ourRatchetKey.getPublicKey().serialize()
structure.localRatchetKeyPrivate = ourRatchetKey.getPrivateKey().serialize()
structure.localIdentityKey = ourIdentityKey.getPublicKey().serialize()
structure.localIdentityKeyPrivate = ourIdentityKey.getPrivateKey().serialize()
self.sessionStructure.pendingKeyExchange.MergeFrom(structure) | :type sequence: int
:type ourBaseKey: ECKeyPair
:type ourRatchetKey: ECKeyPair
:type ourIdentityKey: IdentityKeyPair | entailment |
def setUnacknowledgedPreKeyMessage(self, preKeyId, signedPreKeyId, baseKey):
"""
:type preKeyId: int
:type signedPreKeyId: int
:type baseKey: ECPublicKey
"""
self.sessionStructure.pendingPreKey.signedPreKeyId = signedPreKeyId
self.sessionStructure.pendingPreKey.baseKey = baseKey.serialize()
if preKeyId is not None:
self.sessionStructure.pendingPreKey.preKeyId = preKeyId | :type preKeyId: int
:type signedPreKeyId: int
:type baseKey: ECPublicKey | entailment |
def generateIdentityKeyPair():
"""
Generate an identity key pair. Clients should only do this once,
at install time.
@return the generated IdentityKeyPair.
"""
keyPair = Curve.generateKeyPair()
publicKey = IdentityKey(keyPair.getPublicKey())
serialized = '0a21056e8936e8367f768a7bba008ade7cf58407bdc7a6aae293e2c' \
'b7c06668dcd7d5e12205011524f0c15467100dd603e0d6020f4d293' \
'edfbcd82129b14a88791ac81365c'
serialized = binascii.unhexlify(serialized.encode())
identityKeyPair = IdentityKeyPair(publicKey, keyPair.getPrivateKey())
return identityKeyPair | Generate an identity key pair. Clients should only do this once,
at install time.
@return the generated IdentityKeyPair. | entailment |
def generatePreKeys(start, count):
"""
Generate a list of PreKeys. Clients should do this at install time, and
subsequently any time the list of PreKeys stored on the server runs low.
PreKey IDs are shorts, so they will eventually be repeated. Clients should
store PreKeys in a circular buffer, so that they are repeated as infrequently
as possible.
@param start The starting PreKey ID, inclusive.
@param count The number of PreKeys to generate.
@return the list of generated PreKeyRecords.
"""
results = []
start -= 1
for i in range(0, count):
preKeyId = ((start + i) % (Medium.MAX_VALUE - 1)) + 1
results.append(PreKeyRecord(preKeyId, Curve.generateKeyPair()))
return results | Generate a list of PreKeys. Clients should do this at install time, and
subsequently any time the list of PreKeys stored on the server runs low.
PreKey IDs are shorts, so they will eventually be repeated. Clients should
store PreKeys in a circular buffer, so that they are repeated as infrequently
as possible.
@param start The starting PreKey ID, inclusive.
@param count The number of PreKeys to generate.
@return the list of generated PreKeyRecords. | entailment |
def choices(cls, blank=False):
""" Choices for Enum
:return: List of tuples (<value>, <human-readable value>)
:rtype: list
"""
choices = sorted([(key, value) for key, value in cls.values.items()], key=lambda x: x[0])
if blank:
choices.insert(0, ('', Enum.Value('', None, '', cls)))
return choices | Choices for Enum
:return: List of tuples (<value>, <human-readable value>)
:rtype: list | entailment |
def get(cls, name_or_numeric):
""" Get Enum.Value object matching the value argument.
:param name_or_numeric: Integer value or attribute name
:type name_or_numeric: int or str
:rtype: Enum.Value
"""
if isinstance(name_or_numeric, six.string_types):
name_or_numeric = getattr(cls, name_or_numeric.upper())
return cls.values.get(name_or_numeric) | Get Enum.Value object matching the value argument.
:param name_or_numeric: Integer value or attribute name
:type name_or_numeric: int or str
:rtype: Enum.Value | entailment |
def items(cls):
"""
:return: List of tuples consisting of every enum value in the form [('NAME', value), ...]
:rtype: list
"""
items = [(value.name, key) for key, value in cls.values.items()]
return sorted(items, key=lambda x: x[1]) | :return: List of tuples consisting of every enum value in the form [('NAME', value), ...]
:rtype: list | entailment |
def is_valid_transition(cls, from_value, to_value):
""" Will check if to_value is a valid transition from from_value. Returns true if it is a valid transition.
:param from_value: Start transition point
:param to_value: End transition point
:type from_value: int
:type to_value: int
:return: Success flag
:rtype: bool
"""
try:
return from_value == to_value or from_value in cls.transition_origins(to_value)
except KeyError:
return False | Will check if to_value is a valid transition from from_value. Returns true if it is a valid transition.
:param from_value: Start transition point
:param to_value: End transition point
:type from_value: int
:type to_value: int
:return: Success flag
:rtype: bool | entailment |
def _setup_validation(self, sender, **kwargs):
"""
User a customer setter for the field to validate new value against the old one.
The current value is set as '_enum_[att_name]' on the model instance.
"""
att_name = self.get_attname()
private_att_name = '_enum_%s' % att_name
enum = self.enum
def set_enum(self, new_value):
if hasattr(self, private_att_name):
# Fetch previous value from private enum attribute.
old_value = getattr(self, private_att_name)
else:
# First setattr no previous value on instance.
old_value = new_value
# Update private enum attribute with new value
setattr(self, private_att_name, new_value)
self.__dict__[att_name] = new_value
# Run validation for new value.
validators.validate_valid_transition(enum, old_value, new_value)
def get_enum(self):
return getattr(self, private_att_name)
def delete_enum(self):
self.__dict__[att_name] = None
return setattr(self, private_att_name, None)
if not sender._meta.abstract:
setattr(sender, att_name, property(get_enum, set_enum, delete_enum)) | User a customer setter for the field to validate new value against the old one.
The current value is set as '_enum_[att_name]' on the model instance. | entailment |
def validate_valid_transition(enum, from_value, to_value):
"""
Validate that to_value is a valid choice and that to_value is a valid transition from from_value.
"""
validate_available_choice(enum, to_value)
if hasattr(enum, '_transitions') and not enum.is_valid_transition(from_value, to_value):
message = _(six.text_type('{enum} can not go from "{from_value}" to "{to_value}"'))
raise InvalidStatusOperationError(message.format(
enum=enum.__name__,
from_value=enum.name(from_value),
to_value=enum.name(to_value) or to_value
)) | Validate that to_value is a valid choice and that to_value is a valid transition from from_value. | entailment |
def validate_available_choice(enum, to_value):
"""
Validate that to_value is defined as a value in enum.
"""
if to_value is None:
return
if type(to_value) is not int:
try:
to_value = int(to_value)
except ValueError:
message_str = "'{value}' cannot be converted to int"
message = _(six.text_type(message_str))
raise InvalidStatusOperationError(message.format(value=to_value))
if to_value not in list(dict(enum.choices()).keys()):
message = _(six.text_type('Select a valid choice. {value} is not one of the available choices.'))
raise InvalidStatusOperationError(message.format(value=to_value)) | Validate that to_value is defined as a value in enum. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.