language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def convert_end_to_limit(self, blank_step, end, step_index, step_name_list, step_type, step_flow_ctrl):
"""
Takes a normal ending condition from a maccor procedure and converts it to an equivalent
limit for an arbin schedule file.
Args:
blank_step (OrderedDict): the arbin step that is being populated
end (OrderedDict): the ending condition from the maccor to convert to arbin limit
step_index (int): the index of the current step being converted
step_name_list (list): the list of labels for the steps
step_type (str): the type of step being converted so that the limit can be set
appropriately
step_flow_ctrl (dict): a dictionary of the loop steps (keys) and the goto
steps (values)
Returns:
dict: the converted limit
"""
ARBIN_SCHEMA = loadfn(os.path.join(PROTOCOL_SCHEMA_DIR, "arbin_schedule_schema.yaml"))
limit = ARBIN_SCHEMA['step_blank_limit']
limit['m_bStepLimit'] = "1"
limit['m_bLogDataLimit'] = "1"
if end['Step'] == str(int(step_index) + 2).zfill(3):
limit['m_szGotoStep'] = 'Next Step'
else:
limit['m_szGotoStep'] = step_name_list[int(end['Step']) - 1]
if end['EndType'] == 'Voltage':
limit['Equation0_szLeft'] = 'PV_CHAN_Voltage'
limit['Equation0_szCompareSign'] = end['Oper'].replace(' ', '')
limit['Equation0_szRight'] = end['Value']
elif end['EndType'] == 'Current' and blank_step['m_szStepCtrlType'] == "CCCV":
limit['Equation0_szLeft'] = 'PV_CHAN_CV_Stage_Current'
if step_type == 'Charge':
limit['Equation0_szRight'] = end['Value']
limit['Equation0_szCompareSign'] = end['Oper'].replace(' ', '')
elif step_type == 'Dischrge':
limit['Equation0_szRight'] = '-' + end['Value']
limit['Equation0_szCompareSign'] = end['Oper'].replace(' ', '').replace('<', '>')
else:
raise ValueError("Unable to convert end to limit for EndType:{} and Ctrl:{}".
format(end['EndType'], blank_step['m_szStepCtrlType']))
elif end['EndType'] == 'Current' and blank_step['m_szStepCtrlType'] == "Voltage(V)":
limit['Equation0_szLeft'] = 'PV_CHAN_Current'
if step_type == 'Charge':
limit['Equation0_szRight'] = end['Value']
limit['Equation0_szCompareSign'] = end['Oper'].replace(' ', '')
elif step_type == 'Dischrge':
limit['Equation0_szRight'] = '-' + end['Value']
limit['Equation0_szCompareSign'] = end['Oper'].replace(' ', '').replace('<', '>')
else:
raise ValueError("Unable to convert end to limit for EndType:{} and Ctrl:{}".
format(end['EndType'], blank_step['m_szStepCtrlType']))
elif end['EndType'] == 'StepTime':
limit['Equation0_szLeft'] = 'PV_CHAN_Step_Time'
limit['Equation0_szCompareSign'] = '>'
if '.' in end['Value']:
nofrag, frag = end['Value'].split(".")
frag = frag[:6] # truncate to microseconds
frag += (6 - len(frag)) * '0' # add 0s
elapsed = datetime.strptime(nofrag.replace('::', '00:00:0'),
"%H:%M:%S").replace(microsecond=int(frag)) - \
datetime.strptime("00:00:00", "%H:%M:%S")
else:
elapsed = datetime.strptime(end['Value'].replace('::', '00:00:0'), "%H:%M:%S") - \
datetime.strptime("00:00:00", "%H:%M:%S")
limit['Equation0_szRight'] = str(elapsed.total_seconds())
elif end['EndType'] == 'Loop Cnt':
loop_counter = int(re.search(r'\d+', step_type).group())
limit['Equation0_szLeft'] = 'TC_Counter{}'.format(loop_counter)
limit['Equation0_szCompareSign'] = end['Oper'].replace(' ', '')
limit['Equation0_szRight'] = end['Value']
elif end['EndType'] == 'Loop Addendum':
loop_counter = int(re.search(r'\d+', step_type).group())
limit['m_szGotoStep'] = step_flow_ctrl[step_index]
limit['Equation0_szLeft'] = 'TC_Counter{}'.format(loop_counter)
limit['Equation0_szCompareSign'] = '<'
limit['Equation0_szRight'] = end['Value']
else:
raise ValueError("Unable to set end for type {}".format(end['EndType']))
return limit | def convert_end_to_limit(self, blank_step, end, step_index, step_name_list, step_type, step_flow_ctrl):
"""
Takes a normal ending condition from a maccor procedure and converts it to an equivalent
limit for an arbin schedule file.
Args:
blank_step (OrderedDict): the arbin step that is being populated
end (OrderedDict): the ending condition from the maccor to convert to arbin limit
step_index (int): the index of the current step being converted
step_name_list (list): the list of labels for the steps
step_type (str): the type of step being converted so that the limit can be set
appropriately
step_flow_ctrl (dict): a dictionary of the loop steps (keys) and the goto
steps (values)
Returns:
dict: the converted limit
"""
ARBIN_SCHEMA = loadfn(os.path.join(PROTOCOL_SCHEMA_DIR, "arbin_schedule_schema.yaml"))
limit = ARBIN_SCHEMA['step_blank_limit']
limit['m_bStepLimit'] = "1"
limit['m_bLogDataLimit'] = "1"
if end['Step'] == str(int(step_index) + 2).zfill(3):
limit['m_szGotoStep'] = 'Next Step'
else:
limit['m_szGotoStep'] = step_name_list[int(end['Step']) - 1]
if end['EndType'] == 'Voltage':
limit['Equation0_szLeft'] = 'PV_CHAN_Voltage'
limit['Equation0_szCompareSign'] = end['Oper'].replace(' ', '')
limit['Equation0_szRight'] = end['Value']
elif end['EndType'] == 'Current' and blank_step['m_szStepCtrlType'] == "CCCV":
limit['Equation0_szLeft'] = 'PV_CHAN_CV_Stage_Current'
if step_type == 'Charge':
limit['Equation0_szRight'] = end['Value']
limit['Equation0_szCompareSign'] = end['Oper'].replace(' ', '')
elif step_type == 'Dischrge':
limit['Equation0_szRight'] = '-' + end['Value']
limit['Equation0_szCompareSign'] = end['Oper'].replace(' ', '').replace('<', '>')
else:
raise ValueError("Unable to convert end to limit for EndType:{} and Ctrl:{}".
format(end['EndType'], blank_step['m_szStepCtrlType']))
elif end['EndType'] == 'Current' and blank_step['m_szStepCtrlType'] == "Voltage(V)":
limit['Equation0_szLeft'] = 'PV_CHAN_Current'
if step_type == 'Charge':
limit['Equation0_szRight'] = end['Value']
limit['Equation0_szCompareSign'] = end['Oper'].replace(' ', '')
elif step_type == 'Dischrge':
limit['Equation0_szRight'] = '-' + end['Value']
limit['Equation0_szCompareSign'] = end['Oper'].replace(' ', '').replace('<', '>')
else:
raise ValueError("Unable to convert end to limit for EndType:{} and Ctrl:{}".
format(end['EndType'], blank_step['m_szStepCtrlType']))
elif end['EndType'] == 'StepTime':
limit['Equation0_szLeft'] = 'PV_CHAN_Step_Time'
limit['Equation0_szCompareSign'] = '>'
if '.' in end['Value']:
nofrag, frag = end['Value'].split(".")
frag = frag[:6] # truncate to microseconds
frag += (6 - len(frag)) * '0' # add 0s
elapsed = datetime.strptime(nofrag.replace('::', '00:00:0'),
"%H:%M:%S").replace(microsecond=int(frag)) - \
datetime.strptime("00:00:00", "%H:%M:%S")
else:
elapsed = datetime.strptime(end['Value'].replace('::', '00:00:0'), "%H:%M:%S") - \
datetime.strptime("00:00:00", "%H:%M:%S")
limit['Equation0_szRight'] = str(elapsed.total_seconds())
elif end['EndType'] == 'Loop Cnt':
loop_counter = int(re.search(r'\d+', step_type).group())
limit['Equation0_szLeft'] = 'TC_Counter{}'.format(loop_counter)
limit['Equation0_szCompareSign'] = end['Oper'].replace(' ', '')
limit['Equation0_szRight'] = end['Value']
elif end['EndType'] == 'Loop Addendum':
loop_counter = int(re.search(r'\d+', step_type).group())
limit['m_szGotoStep'] = step_flow_ctrl[step_index]
limit['Equation0_szLeft'] = 'TC_Counter{}'.format(loop_counter)
limit['Equation0_szCompareSign'] = '<'
limit['Equation0_szRight'] = end['Value']
else:
raise ValueError("Unable to set end for type {}".format(end['EndType']))
return limit |
Python | def convert_report_to_logging_limit(self, report):
"""
Takes the reporting conditions for the maccor step and converts them to the logging
limits for the arbin step.
Args:
report (OrderedDict): the maccor condition for recording values
Returns:
dict: a logging limit that corresponds to the recording conditions for
maccor report
"""
ARBIN_SCHEMA = loadfn(os.path.join(PROTOCOL_SCHEMA_DIR, "arbin_schedule_schema.yaml"))
limit = ARBIN_SCHEMA['step_blank_limit']
limit['m_bStepLimit'] = "0"
limit['m_bLogDataLimit'] = "1"
limit['m_szGotoStep'] = 'Next Step'
if report['ReportType'] == 'Voltage':
limit['Equation0_szLeft'] = 'DV_Voltage'
limit['Equation0_szCompareSign'] = '>'
limit['Equation0_szRight'] = report['Value']
elif report['ReportType'] == 'Current':
limit['Equation0_szLeft'] = 'DV_Current'
limit['Equation0_szRight'] = report['Value']
limit['Equation0_szCompareSign'] = '>'
elif report['ReportType'] == 'StepTime':
limit['Equation0_szLeft'] = 'DV_Time'
limit['Equation0_szCompareSign'] = '>'
if '.' in report['Value']:
nofrag, frag = report['Value'].split(".")
frag = frag[:6] # truncate to microseconds
frag += (6 - len(frag)) * '0' # add 0s
elapsed = datetime.strptime(nofrag.replace('::', '00:00:0'),
"%H:%M:%S").replace(microsecond=int(frag)) - \
datetime.strptime("00:00:00", "%H:%M:%S")
else:
elapsed = datetime.strptime(report['Value'].replace('::', '00:00:0'), "%H:%M:%S") - \
datetime.strptime("00:00:00", "%H:%M:%S")
limit['Equation0_szRight'] = str(elapsed.total_seconds())
return limit | def convert_report_to_logging_limit(self, report):
"""
Takes the reporting conditions for the maccor step and converts them to the logging
limits for the arbin step.
Args:
report (OrderedDict): the maccor condition for recording values
Returns:
dict: a logging limit that corresponds to the recording conditions for
maccor report
"""
ARBIN_SCHEMA = loadfn(os.path.join(PROTOCOL_SCHEMA_DIR, "arbin_schedule_schema.yaml"))
limit = ARBIN_SCHEMA['step_blank_limit']
limit['m_bStepLimit'] = "0"
limit['m_bLogDataLimit'] = "1"
limit['m_szGotoStep'] = 'Next Step'
if report['ReportType'] == 'Voltage':
limit['Equation0_szLeft'] = 'DV_Voltage'
limit['Equation0_szCompareSign'] = '>'
limit['Equation0_szRight'] = report['Value']
elif report['ReportType'] == 'Current':
limit['Equation0_szLeft'] = 'DV_Current'
limit['Equation0_szRight'] = report['Value']
limit['Equation0_szCompareSign'] = '>'
elif report['ReportType'] == 'StepTime':
limit['Equation0_szLeft'] = 'DV_Time'
limit['Equation0_szCompareSign'] = '>'
if '.' in report['Value']:
nofrag, frag = report['Value'].split(".")
frag = frag[:6] # truncate to microseconds
frag += (6 - len(frag)) * '0' # add 0s
elapsed = datetime.strptime(nofrag.replace('::', '00:00:0'),
"%H:%M:%S").replace(microsecond=int(frag)) - \
datetime.strptime("00:00:00", "%H:%M:%S")
else:
elapsed = datetime.strptime(report['Value'].replace('::', '00:00:0'), "%H:%M:%S") - \
datetime.strptime("00:00:00", "%H:%M:%S")
limit['Equation0_szRight'] = str(elapsed.total_seconds())
return limit |
Python | def from_run(cls, input_filename, feature_dir, processed_cycler_run, params_dict=None):
"""
This method contains the workflow for the creation of the feature class
Since the workflow should be the same for all of the feature classed this
method should not be overridden in any of the derived classes. If the class
can be created (feature generation succeeds, etc.) then the class is returned.
Otherwise the return value is False
Args:
input_filename (str): path to the input data from processed cycler run
feature_dir (str): path to the base directory for the feature sets.
processed_cycler_run (beep.structure.ProcessedCyclerRun): data from cycler run
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
(beep.featurize.BeepFeatures): class object for the feature set
"""
if cls.validate_data(processed_cycler_run, params_dict):
output_filename = cls.get_feature_object_name_and_path(input_filename, feature_dir)
feature_object = cls.features_from_processed_cycler_run(processed_cycler_run, params_dict)
metadata = cls.metadata_from_processed_cycler_run(processed_cycler_run, params_dict)
return cls(output_filename, feature_object, metadata)
else:
return False | def from_run(cls, input_filename, feature_dir, processed_cycler_run, params_dict=None):
"""
This method contains the workflow for the creation of the feature class
Since the workflow should be the same for all of the feature classed this
method should not be overridden in any of the derived classes. If the class
can be created (feature generation succeeds, etc.) then the class is returned.
Otherwise the return value is False
Args:
input_filename (str): path to the input data from processed cycler run
feature_dir (str): path to the base directory for the feature sets.
processed_cycler_run (beep.structure.ProcessedCyclerRun): data from cycler run
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
(beep.featurize.BeepFeatures): class object for the feature set
"""
if cls.validate_data(processed_cycler_run, params_dict):
output_filename = cls.get_feature_object_name_and_path(input_filename, feature_dir)
feature_object = cls.features_from_processed_cycler_run(processed_cycler_run, params_dict)
metadata = cls.metadata_from_processed_cycler_run(processed_cycler_run, params_dict)
return cls(output_filename, feature_object, metadata)
else:
return False |
Python | def validate_data(cls, processed_cycler_run, params_dict=None):
"""
This function determines if the input data has the necessary attributes for
creation of this feature class. It should test for all of the possible reasons
that feature generation would fail for this particular input data.
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun): data from cycler run
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
bool: True/False indication of ability to proceed with feature generation
"""
conditions = []
if not hasattr(processed_cycler_run, 'diagnostic_summary') & hasattr(processed_cycler_run,
'diagnostic_interpolated'):
return False
if processed_cycler_run.diagnostic_summary.empty:
return False
else:
conditions.append(any(['rpt' in x for x in processed_cycler_run.diagnostic_summary.cycle_type.unique()]))
return all(conditions) | def validate_data(cls, processed_cycler_run, params_dict=None):
"""
This function determines if the input data has the necessary attributes for
creation of this feature class. It should test for all of the possible reasons
that feature generation would fail for this particular input data.
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun): data from cycler run
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
bool: True/False indication of ability to proceed with feature generation
"""
conditions = []
if not hasattr(processed_cycler_run, 'diagnostic_summary') & hasattr(processed_cycler_run,
'diagnostic_interpolated'):
return False
if processed_cycler_run.diagnostic_summary.empty:
return False
else:
conditions.append(any(['rpt' in x for x in processed_cycler_run.diagnostic_summary.cycle_type.unique()]))
return all(conditions) |
Python | def features_from_processed_cycler_run(cls, processed_cycler_run, params_dict=None):
"""
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun)
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
pd.DataFrame containing features based on gaussian fits to dQdV features in rpt cycles
"""
if params_dict is None:
params_dict=FEATURE_HYPERPARAMS[cls.class_feature_name]
if ((params_dict['rpt_type'] == 'rpt_0.2C') and (params_dict['charge_y_n'] == 1)):
max_nr_peaks = 4
cwt_range_input = np.arange(10, 30)
elif ((params_dict['rpt_type'] == 'rpt_0.2C') and (params_dict['charge_y_n'] == 0)):
max_nr_peaks = 4
cwt_range_input = np.arange(10, 30)
elif ((params_dict['rpt_type'] == 'rpt_1C') and (params_dict['charge_y_n'] == 1)):
max_nr_peaks = 4
cwt_range_input = np.arange(10, 30)
elif ((params_dict['rpt_type'] == 'rpt_1C') and (params_dict['charge_y_n'] == 0)):
max_nr_peaks = 3
cwt_range_input = np.arange(10, 30)
elif ((params_dict['rpt_type'] == 'rpt_2C') and (params_dict['charge_y_n'] == 1)):
max_nr_peaks = 4
cwt_range_input = np.arange(10, 30)
elif ((params_dict['rpt_type'] == 'rpt_2C') and (params_dict['charge_y_n'] == 0)):
max_nr_peaks = 3
cwt_range_input = np.arange(10, 50)
peak_fit_df_ref = featurizer_helpers.generate_dQdV_peak_fits(processed_cycler_run,
diag_nr=params_dict['diag_ref'],
charge_y_n=params_dict['charge_y_n'],
rpt_type=params_dict['rpt_type'],
plotting_y_n=params_dict['plotting_y_n'],
max_nr_peaks=max_nr_peaks,
cwt_range = cwt_range_input)
peak_fit_df = featurizer_helpers.generate_dQdV_peak_fits(processed_cycler_run,
diag_nr=params_dict['diag_nr'],
charge_y_n=params_dict['charge_y_n'],
rpt_type=params_dict['rpt_type'],
plotting_y_n=params_dict['plotting_y_n'],
max_nr_peaks=max_nr_peaks,
cwt_range = cwt_range_input)
return 1 + (peak_fit_df - peak_fit_df_ref) / peak_fit_df_ref | def features_from_processed_cycler_run(cls, processed_cycler_run, params_dict=None):
"""
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun)
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
pd.DataFrame containing features based on gaussian fits to dQdV features in rpt cycles
"""
if params_dict is None:
params_dict=FEATURE_HYPERPARAMS[cls.class_feature_name]
if ((params_dict['rpt_type'] == 'rpt_0.2C') and (params_dict['charge_y_n'] == 1)):
max_nr_peaks = 4
cwt_range_input = np.arange(10, 30)
elif ((params_dict['rpt_type'] == 'rpt_0.2C') and (params_dict['charge_y_n'] == 0)):
max_nr_peaks = 4
cwt_range_input = np.arange(10, 30)
elif ((params_dict['rpt_type'] == 'rpt_1C') and (params_dict['charge_y_n'] == 1)):
max_nr_peaks = 4
cwt_range_input = np.arange(10, 30)
elif ((params_dict['rpt_type'] == 'rpt_1C') and (params_dict['charge_y_n'] == 0)):
max_nr_peaks = 3
cwt_range_input = np.arange(10, 30)
elif ((params_dict['rpt_type'] == 'rpt_2C') and (params_dict['charge_y_n'] == 1)):
max_nr_peaks = 4
cwt_range_input = np.arange(10, 30)
elif ((params_dict['rpt_type'] == 'rpt_2C') and (params_dict['charge_y_n'] == 0)):
max_nr_peaks = 3
cwt_range_input = np.arange(10, 50)
peak_fit_df_ref = featurizer_helpers.generate_dQdV_peak_fits(processed_cycler_run,
diag_nr=params_dict['diag_ref'],
charge_y_n=params_dict['charge_y_n'],
rpt_type=params_dict['rpt_type'],
plotting_y_n=params_dict['plotting_y_n'],
max_nr_peaks=max_nr_peaks,
cwt_range = cwt_range_input)
peak_fit_df = featurizer_helpers.generate_dQdV_peak_fits(processed_cycler_run,
diag_nr=params_dict['diag_nr'],
charge_y_n=params_dict['charge_y_n'],
rpt_type=params_dict['rpt_type'],
plotting_y_n=params_dict['plotting_y_n'],
max_nr_peaks=max_nr_peaks,
cwt_range = cwt_range_input)
return 1 + (peak_fit_df - peak_fit_df_ref) / peak_fit_df_ref |
Python | def validate_data(cls, processed_cycler_run, params_dict=None):
"""
This function determines if the input data has the necessary attributes for
creation of this feature class. It should test for all of the possible reasons
that feature generation would fail for this particular input data.
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun): data from cycler run
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
bool: True/False indication of ability to proceed with feature generation
"""
conditions = []
if not hasattr(processed_cycler_run, 'diagnostic_summary') & hasattr(processed_cycler_run,
'diagnostic_interpolated'):
return False
if processed_cycler_run.diagnostic_summary.empty:
return False
else:
conditions.append(any(['hppc' in x for x in processed_cycler_run.diagnostic_summary.cycle_type.unique()]))
return all(conditions) | def validate_data(cls, processed_cycler_run, params_dict=None):
"""
This function determines if the input data has the necessary attributes for
creation of this feature class. It should test for all of the possible reasons
that feature generation would fail for this particular input data.
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun): data from cycler run
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
bool: True/False indication of ability to proceed with feature generation
"""
conditions = []
if not hasattr(processed_cycler_run, 'diagnostic_summary') & hasattr(processed_cycler_run,
'diagnostic_interpolated'):
return False
if processed_cycler_run.diagnostic_summary.empty:
return False
else:
conditions.append(any(['hppc' in x for x in processed_cycler_run.diagnostic_summary.cycle_type.unique()]))
return all(conditions) |
Python | def features_from_processed_cycler_run(cls, processed_cycler_run, params_dict=None):
"""
This method calculates features based on voltage, diffusion and resistance changes in hppc cycles.
Note: Inside this function it calls function get_dr_df, but if the cell does not state of charge from 20% to
10%, the function will fail, and throw you error messages. This will only happen after cycle 37 and on fast
charging cells. Also, this function calls function get_v_diff, which takes in an argument soc_window, if you
want more correlation, you should go for low state of charge, which corresponds to soc_window = 8. However,
like the resistance feature, at cycle 142 and beyond, soc_window = 8 might fail on fast charged cells. For
lower soc_window values, smaller than or equal to 7, this should not be a problem, but it will not give you
high correlations.
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun)
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
dataframe of features based on voltage and resistance changes over a SOC window in hppc cycles
"""
if params_dict is None:
params_dict=FEATURE_HYPERPARAMS[cls.class_feature_name]
# diffusion features
diffusion_features = featurizer_helpers.get_diffusion_features(processed_cycler_run,
params_dict['diag_pos'])
hppc_r = pd.DataFrame()
# the 9 by 6 dataframe
df_dr = featurizer_helpers.get_dr_df(processed_cycler_run, params_dict['diag_pos'])
# transform this dataframe to be 1 by 54
columns = df_dr.columns
for column in columns:
for r in range(len(df_dr[column])):
name = column + str(r)
hppc_r[name] = [df_dr[column][r]]
# the variance of ocv features
hppc_ocv = featurizer_helpers.get_hppc_ocv(processed_cycler_run, params_dict['diag_pos'])
# the v_diff features
v_diff = featurizer_helpers.get_v_diff(processed_cycler_run, params_dict['diag_pos'],
params_dict['soc_window'])
# merge everything together as a final result dataframe
return pd.concat([hppc_r, hppc_ocv, v_diff, diffusion_features], axis=1) | def features_from_processed_cycler_run(cls, processed_cycler_run, params_dict=None):
"""
This method calculates features based on voltage, diffusion and resistance changes in hppc cycles.
Note: Inside this function it calls function get_dr_df, but if the cell does not state of charge from 20% to
10%, the function will fail, and throw you error messages. This will only happen after cycle 37 and on fast
charging cells. Also, this function calls function get_v_diff, which takes in an argument soc_window, if you
want more correlation, you should go for low state of charge, which corresponds to soc_window = 8. However,
like the resistance feature, at cycle 142 and beyond, soc_window = 8 might fail on fast charged cells. For
lower soc_window values, smaller than or equal to 7, this should not be a problem, but it will not give you
high correlations.
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun)
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
dataframe of features based on voltage and resistance changes over a SOC window in hppc cycles
"""
if params_dict is None:
params_dict=FEATURE_HYPERPARAMS[cls.class_feature_name]
# diffusion features
diffusion_features = featurizer_helpers.get_diffusion_features(processed_cycler_run,
params_dict['diag_pos'])
hppc_r = pd.DataFrame()
# the 9 by 6 dataframe
df_dr = featurizer_helpers.get_dr_df(processed_cycler_run, params_dict['diag_pos'])
# transform this dataframe to be 1 by 54
columns = df_dr.columns
for column in columns:
for r in range(len(df_dr[column])):
name = column + str(r)
hppc_r[name] = [df_dr[column][r]]
# the variance of ocv features
hppc_ocv = featurizer_helpers.get_hppc_ocv(processed_cycler_run, params_dict['diag_pos'])
# the v_diff features
v_diff = featurizer_helpers.get_v_diff(processed_cycler_run, params_dict['diag_pos'],
params_dict['soc_window'])
# merge everything together as a final result dataframe
return pd.concat([hppc_r, hppc_ocv, v_diff, diffusion_features], axis=1) |
Python | def validate_data(cls, processed_cycler_run, params_dict=None):
"""
This function returns if it is viable to compute the relaxation features.
Will return True if all the SOC windows for the HPPC are present for both
the 1st and 2nd diagnostic cycles, and False otherwise.
Args:
processed_cycler_run(beep.structure.ProcessedCyclerRun)
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
(boolean): True if all SOC window available in both diagnostic cycles. False otherwise.
"""
if params_dict is None:
params_dict = FEATURE_HYPERPARAMS[cls.class_feature_name]
conditions = []
if not hasattr(processed_cycler_run, 'diagnostic_summary') & hasattr(processed_cycler_run,
'diagnostic_interpolated'):
return False
if processed_cycler_run.diagnostic_summary.empty:
return False
if not any(['hppc' in x for x in processed_cycler_run.diagnostic_summary.cycle_type.unique()]):
return False
# chooses the first and the second diagnostic cycle
for hppc_chosen in [0, 1]:
# Getting just the HPPC cycles
hppc_diag_cycles = processed_cycler_run.diagnostic_interpolated[
processed_cycler_run.diagnostic_interpolated.cycle_type == "hppc"]
# Getting unique and ordered cycle index list for HPPC cycles, and choosing the hppc cycle
hppc_cycle_list = list(set(hppc_diag_cycles.cycle_index))
hppc_cycle_list.sort()
# Getting unique and ordered Regular Step List (Non-unique identifier)
reg_step_list = hppc_diag_cycles[hppc_diag_cycles.cycle_index == hppc_cycle_list[hppc_chosen]].step_index
reg_step_list = list(set(reg_step_list))
reg_step_list.sort()
# The value of 1 for regular step corresponds to all of the relaxation curves in the hppc
reg_step_relax = 1
# Getting unique and ordered Step Counter List (unique identifier)
step_count_list = hppc_diag_cycles[(hppc_diag_cycles.cycle_index == hppc_cycle_list[hppc_chosen]) &
(hppc_diag_cycles.step_index == reg_step_list[
reg_step_relax])].step_index_counter
step_count_list = list(set(step_count_list))
step_count_list.sort()
# The first one isn't a proper relaxation curve(comes out of CV) so we ignore it
step_count_list = step_count_list[1:]
conditions.append(len(step_count_list) >= params_dict['n_soc_windows'])
return all(conditions) | def validate_data(cls, processed_cycler_run, params_dict=None):
"""
This function returns if it is viable to compute the relaxation features.
Will return True if all the SOC windows for the HPPC are present for both
the 1st and 2nd diagnostic cycles, and False otherwise.
Args:
processed_cycler_run(beep.structure.ProcessedCyclerRun)
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
(boolean): True if all SOC window available in both diagnostic cycles. False otherwise.
"""
if params_dict is None:
params_dict = FEATURE_HYPERPARAMS[cls.class_feature_name]
conditions = []
if not hasattr(processed_cycler_run, 'diagnostic_summary') & hasattr(processed_cycler_run,
'diagnostic_interpolated'):
return False
if processed_cycler_run.diagnostic_summary.empty:
return False
if not any(['hppc' in x for x in processed_cycler_run.diagnostic_summary.cycle_type.unique()]):
return False
# chooses the first and the second diagnostic cycle
for hppc_chosen in [0, 1]:
# Getting just the HPPC cycles
hppc_diag_cycles = processed_cycler_run.diagnostic_interpolated[
processed_cycler_run.diagnostic_interpolated.cycle_type == "hppc"]
# Getting unique and ordered cycle index list for HPPC cycles, and choosing the hppc cycle
hppc_cycle_list = list(set(hppc_diag_cycles.cycle_index))
hppc_cycle_list.sort()
# Getting unique and ordered Regular Step List (Non-unique identifier)
reg_step_list = hppc_diag_cycles[hppc_diag_cycles.cycle_index == hppc_cycle_list[hppc_chosen]].step_index
reg_step_list = list(set(reg_step_list))
reg_step_list.sort()
# The value of 1 for regular step corresponds to all of the relaxation curves in the hppc
reg_step_relax = 1
# Getting unique and ordered Step Counter List (unique identifier)
step_count_list = hppc_diag_cycles[(hppc_diag_cycles.cycle_index == hppc_cycle_list[hppc_chosen]) &
(hppc_diag_cycles.step_index == reg_step_list[
reg_step_relax])].step_index_counter
step_count_list = list(set(step_count_list))
step_count_list.sort()
# The first one isn't a proper relaxation curve(comes out of CV) so we ignore it
step_count_list = step_count_list[1:]
conditions.append(len(step_count_list) >= params_dict['n_soc_windows'])
return all(conditions) |
Python | def features_from_processed_cycler_run(cls, processed_cycler_run, params_dict=None):
"""
This function returns all of the relaxation features in a dataframe for a given processed cycler run.
Args:
processed_cycler_run(beep.structure.ProcessedCyclerRun): ProcessedCyclerRun object for the cell
you want the diagnostic features for.
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
@featureDf(pd.DataFrame): Columns are either SOC{#%}_degrad{#%} where the first #% is the
SOC % and the second #% is the time taken at what % of the final voltage value of the relaxation
curve. The other type is names var_{#%} which is the variance of the other features taken at a
certain % of the final voltage value of the relaxation curve.
"""
if params_dict is None:
params_dict = FEATURE_HYPERPARAMS[cls.class_feature_name]
relax_feature_array = featurizer_helpers.get_relaxation_features(processed_cycler_run,
params_dict['hppc_list'])
col_names = []
full_feature_array = []
for i, percentage in enumerate(params_dict['percentage_list']):
col_names.append("var_{0}%".format(percentage))
full_feature_array.append(np.var(relax_feature_array[:, i]))
for j, soc in enumerate(params_dict['soc_list']):
col_names.append("SOC{0}%_degrad{1}%".format(soc, percentage))
full_feature_array.append(relax_feature_array[j, i])
return pd.DataFrame(dict(zip(col_names, full_feature_array)), index=[0]) | def features_from_processed_cycler_run(cls, processed_cycler_run, params_dict=None):
"""
This function returns all of the relaxation features in a dataframe for a given processed cycler run.
Args:
processed_cycler_run(beep.structure.ProcessedCyclerRun): ProcessedCyclerRun object for the cell
you want the diagnostic features for.
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
@featureDf(pd.DataFrame): Columns are either SOC{#%}_degrad{#%} where the first #% is the
SOC % and the second #% is the time taken at what % of the final voltage value of the relaxation
curve. The other type is names var_{#%} which is the variance of the other features taken at a
certain % of the final voltage value of the relaxation curve.
"""
if params_dict is None:
params_dict = FEATURE_HYPERPARAMS[cls.class_feature_name]
relax_feature_array = featurizer_helpers.get_relaxation_features(processed_cycler_run,
params_dict['hppc_list'])
col_names = []
full_feature_array = []
for i, percentage in enumerate(params_dict['percentage_list']):
col_names.append("var_{0}%".format(percentage))
full_feature_array.append(np.var(relax_feature_array[:, i]))
for j, soc in enumerate(params_dict['soc_list']):
col_names.append("SOC{0}%_degrad{1}%".format(soc, percentage))
full_feature_array.append(relax_feature_array[j, i])
return pd.DataFrame(dict(zip(col_names, full_feature_array)), index=[0]) |
Python | def validate_data(cls, processed_cycler_run, params_dict=None):
"""
This function determines if the input data has the necessary attributes for
creation of this feature class. It should test for all of the possible reasons
that feature generation would fail for this particular input data.
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun): data from cycler run
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
bool: True/False indication of ability to proceed with feature generation
"""
if params_dict is None:
params_dict = FEATURE_HYPERPARAMS[cls.class_feature_name]
conditions = []
if not hasattr(processed_cycler_run, 'diagnostic_summary') & hasattr(processed_cycler_run,
'diagnostic_interpolated'):
return False
if processed_cycler_run.diagnostic_summary.empty:
return False
else:
df = processed_cycler_run.diagnostic_summary
df = df[df.cycle_type == params_dict['diagnostic_cycle_type']]
conditions.append(df.cycle_index.nunique() >= max(params_dict['cycle_comp_num']) + 1)
return all(conditions) | def validate_data(cls, processed_cycler_run, params_dict=None):
"""
This function determines if the input data has the necessary attributes for
creation of this feature class. It should test for all of the possible reasons
that feature generation would fail for this particular input data.
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun): data from cycler run
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
bool: True/False indication of ability to proceed with feature generation
"""
if params_dict is None:
params_dict = FEATURE_HYPERPARAMS[cls.class_feature_name]
conditions = []
if not hasattr(processed_cycler_run, 'diagnostic_summary') & hasattr(processed_cycler_run,
'diagnostic_interpolated'):
return False
if processed_cycler_run.diagnostic_summary.empty:
return False
else:
df = processed_cycler_run.diagnostic_summary
df = df[df.cycle_type == params_dict['diagnostic_cycle_type']]
conditions.append(df.cycle_index.nunique() >= max(params_dict['cycle_comp_num']) + 1)
return all(conditions) |
Python | def validate_data(cls, processed_cycler_run, params_dict=None):
"""
This function determines if the input data has the necessary attributes for
creation of this feature class. It should test for all of the possible reasons
that feature generation would fail for this particular input data.
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun): data from cycler run
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
bool: True/False indication of ability to proceed with feature generation
"""
if params_dict is None:
params_dict = FEATURE_HYPERPARAMS[cls.class_feature_name]
conditions = []
if 'cycle_index' in processed_cycler_run.summary.columns:
conditions.append(processed_cycler_run.summary.cycle_index.max() > params_dict['final_pred_cycle'])
conditions.append(processed_cycler_run.summary.cycle_index.min() <= params_dict['init_pred_cycle'])
else:
conditions.append(len(processed_cycler_run.summary.index) > params_dict['final_pred_cycle'])
return all(conditions) | def validate_data(cls, processed_cycler_run, params_dict=None):
"""
This function determines if the input data has the necessary attributes for
creation of this feature class. It should test for all of the possible reasons
that feature generation would fail for this particular input data.
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun): data from cycler run
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
bool: True/False indication of ability to proceed with feature generation
"""
if params_dict is None:
params_dict = FEATURE_HYPERPARAMS[cls.class_feature_name]
conditions = []
if 'cycle_index' in processed_cycler_run.summary.columns:
conditions.append(processed_cycler_run.summary.cycle_index.max() > params_dict['final_pred_cycle'])
conditions.append(processed_cycler_run.summary.cycle_index.min() <= params_dict['init_pred_cycle'])
else:
conditions.append(len(processed_cycler_run.summary.index) > params_dict['final_pred_cycle'])
return all(conditions) |
Python | def features_from_processed_cycler_run(cls, processed_cycler_run, params_dict=None):
"""
Generate features listed in early prediction manuscript, primarily related to the
so called delta Q feature
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun): data from cycler run
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
pd.DataFrame: features indicative of degradation, derived from the input data
"""
if params_dict is None:
params_dict=FEATURE_HYPERPARAMS[cls.class_feature_name]
assert params_dict['mid_pred_cycle'] > 10 # Sufficient cycles for analysis
assert params_dict['final_pred_cycle'] > params_dict['mid_pred_cycle'] # Must have final_pred_cycle > mid_pred_cycle
i_final = params_dict['final_pred_cycle'] - 1 # python indexing
i_mid = params_dict['mid_pred_cycle'] - 1
i_ini = params_dict['init_pred_cycle'] - 1
summary = processed_cycler_run.summary
params_dict['n_nominal_cycles'] = 40 # For nominal capacity, use median discharge capacity of first n cycles
if 'step_type' in processed_cycler_run.cycles_interpolated.columns:
interpolated_df = processed_cycler_run.cycles_interpolated[
processed_cycler_run.cycles_interpolated.step_type == 'discharge']
else:
interpolated_df = processed_cycler_run.cycles_interpolated
X = pd.DataFrame(np.zeros((1, 20)))
labels = []
# Discharge capacity, cycle 2 = Q(n=2)
X[0] = summary.discharge_capacity[1]
labels.append("discharge_capacity_cycle_2")
# Max discharge capacity - discharge capacity, cycle 2 = max_n(Q(n)) - Q(n=2)
X[1] = max(summary.discharge_capacity[np.arange(i_final + 1)] - summary.discharge_capacity[1])
labels.append("max_discharge_capacity_difference")
# Discharge capacity, cycle 100 = Q(n=100)
X[2] = summary.discharge_capacity[i_final]
labels.append("discharge_capacity_cycle_100")
# Feature representing time-temperature integral over cycles 2 to 100
X[3] = np.nansum(summary.time_temperature_integrated[np.arange(i_final + 1)])
labels.append("integrated_time_temperature_cycles_1:100")
# Mean of charge times of first 5 cycles
X[4] = np.nanmean(summary.charge_duration[1:6])
labels.append("charge_time_cycles_1:5")
# Descriptors based on capacity loss between cycles 10 and 100.
Qd_final = interpolated_df.discharge_capacity[interpolated_df.cycle_index == i_final]
Qd_10 = interpolated_df.discharge_capacity[interpolated_df.cycle_index == 9]
Vd = interpolated_df.voltage[interpolated_df.cycle_index == i_ini]
Qd_diff = Qd_final.values - Qd_10.values
# If DeltaQ(V) is not an empty array, compute summary stats, else initialize with np.nan
# Cells discharged rapidly over a narrow voltage window run into have no interpolated discharge steps
if len(Qd_diff):
X[5] = np.log10(np.abs(np.nanmin(Qd_diff))) # Minimum
X[6] = np.log10(np.abs(np.nanmean(Qd_diff))) # Mean
X[7] = np.log10(np.abs(np.nanvar(Qd_diff))) # Variance
X[8] = np.log10(np.abs(skew(Qd_diff))) # Skewness
X[9] = np.log10(np.abs(kurtosis(Qd_diff))) # Kurtosis
X[10] = np.log10(np.abs(Qd_diff[0])) # First difference
else:
X[5:11] = np.nan
labels.append("abs_min_discharge_capacity_difference_cycles_2:100")
labels.append("abs_mean_discharge_capacity_difference_cycles_2:100")
labels.append("abs_variance_discharge_capacity_difference_cycles_2:100")
labels.append("abs_skew_discharge_capacity_difference_cycles_2:100")
labels.append("abs_kurtosis_discharge_capacity_difference_cycles_2:100")
labels.append("abs_first_discharge_capacity_difference_cycles_2:100")
X[11] = max(summary.temperature_maximum[list(range(1, i_final + 1))]) # Max T
labels.append("max_temperature_cycles_1:100")
X[12] = min(summary.temperature_minimum[list(range(1, i_final + 1))]) # Min T
labels.append("min_temperature_cycles_1:100")
# Slope and intercept of linear fit to discharge capacity as a fn of cycle #, cycles 2 to 100
X[13], X[14] = np.polyfit(
list(range(1, i_final + 1)),
summary.discharge_capacity[list(range(1, i_final + 1))], 1)
labels.append("slope_discharge_capacity_cycle_number_2:100")
labels.append("intercept_discharge_capacity_cycle_number_2:100")
# Slope and intercept of linear fit to discharge capacity as a fn of cycle #, cycles 91 to 100
X[15], X[16] = np.polyfit(
list(range(i_mid, i_final + 1)),
summary.discharge_capacity[list(range(i_mid, i_final + 1))], 1)
labels.append("slope_discharge_capacity_cycle_number_91:100")
labels.append("intercept_discharge_capacity_cycle_number_91:100")
IR_trend = summary.dc_internal_resistance[list(range(1, i_final + 1))]
if any(v == 0 for v in IR_trend):
IR_trend[IR_trend == 0] = np.nan
# Internal resistance minimum
X[17] = np.nanmin(IR_trend)
labels.append("min_internal_resistance_cycles_2:100")
# Internal resistance at cycle 2
X[18] = summary.dc_internal_resistance[1]
labels.append("internal_resistance_cycle_2")
# Internal resistance at cycle 100 - cycle 2
X[19] = summary.dc_internal_resistance[i_final] - summary.dc_internal_resistance[1]
labels.append("internal_resistance_difference_cycles_2:100")
# Nominal capacity
X[20] = np.median(summary.discharge_capacity.iloc[0:params_dict['n_nominal_cycles']])
labels.append("nominal_capacity_by_median")
X.columns = labels
return X | def features_from_processed_cycler_run(cls, processed_cycler_run, params_dict=None):
"""
Generate features listed in early prediction manuscript, primarily related to the
so called delta Q feature
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun): data from cycler run
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
pd.DataFrame: features indicative of degradation, derived from the input data
"""
if params_dict is None:
params_dict=FEATURE_HYPERPARAMS[cls.class_feature_name]
assert params_dict['mid_pred_cycle'] > 10 # Sufficient cycles for analysis
assert params_dict['final_pred_cycle'] > params_dict['mid_pred_cycle'] # Must have final_pred_cycle > mid_pred_cycle
i_final = params_dict['final_pred_cycle'] - 1 # python indexing
i_mid = params_dict['mid_pred_cycle'] - 1
i_ini = params_dict['init_pred_cycle'] - 1
summary = processed_cycler_run.summary
params_dict['n_nominal_cycles'] = 40 # For nominal capacity, use median discharge capacity of first n cycles
if 'step_type' in processed_cycler_run.cycles_interpolated.columns:
interpolated_df = processed_cycler_run.cycles_interpolated[
processed_cycler_run.cycles_interpolated.step_type == 'discharge']
else:
interpolated_df = processed_cycler_run.cycles_interpolated
X = pd.DataFrame(np.zeros((1, 20)))
labels = []
# Discharge capacity, cycle 2 = Q(n=2)
X[0] = summary.discharge_capacity[1]
labels.append("discharge_capacity_cycle_2")
# Max discharge capacity - discharge capacity, cycle 2 = max_n(Q(n)) - Q(n=2)
X[1] = max(summary.discharge_capacity[np.arange(i_final + 1)] - summary.discharge_capacity[1])
labels.append("max_discharge_capacity_difference")
# Discharge capacity, cycle 100 = Q(n=100)
X[2] = summary.discharge_capacity[i_final]
labels.append("discharge_capacity_cycle_100")
# Feature representing time-temperature integral over cycles 2 to 100
X[3] = np.nansum(summary.time_temperature_integrated[np.arange(i_final + 1)])
labels.append("integrated_time_temperature_cycles_1:100")
# Mean of charge times of first 5 cycles
X[4] = np.nanmean(summary.charge_duration[1:6])
labels.append("charge_time_cycles_1:5")
# Descriptors based on capacity loss between cycles 10 and 100.
Qd_final = interpolated_df.discharge_capacity[interpolated_df.cycle_index == i_final]
Qd_10 = interpolated_df.discharge_capacity[interpolated_df.cycle_index == 9]
Vd = interpolated_df.voltage[interpolated_df.cycle_index == i_ini]
Qd_diff = Qd_final.values - Qd_10.values
# If DeltaQ(V) is not an empty array, compute summary stats, else initialize with np.nan
# Cells discharged rapidly over a narrow voltage window run into have no interpolated discharge steps
if len(Qd_diff):
X[5] = np.log10(np.abs(np.nanmin(Qd_diff))) # Minimum
X[6] = np.log10(np.abs(np.nanmean(Qd_diff))) # Mean
X[7] = np.log10(np.abs(np.nanvar(Qd_diff))) # Variance
X[8] = np.log10(np.abs(skew(Qd_diff))) # Skewness
X[9] = np.log10(np.abs(kurtosis(Qd_diff))) # Kurtosis
X[10] = np.log10(np.abs(Qd_diff[0])) # First difference
else:
X[5:11] = np.nan
labels.append("abs_min_discharge_capacity_difference_cycles_2:100")
labels.append("abs_mean_discharge_capacity_difference_cycles_2:100")
labels.append("abs_variance_discharge_capacity_difference_cycles_2:100")
labels.append("abs_skew_discharge_capacity_difference_cycles_2:100")
labels.append("abs_kurtosis_discharge_capacity_difference_cycles_2:100")
labels.append("abs_first_discharge_capacity_difference_cycles_2:100")
X[11] = max(summary.temperature_maximum[list(range(1, i_final + 1))]) # Max T
labels.append("max_temperature_cycles_1:100")
X[12] = min(summary.temperature_minimum[list(range(1, i_final + 1))]) # Min T
labels.append("min_temperature_cycles_1:100")
# Slope and intercept of linear fit to discharge capacity as a fn of cycle #, cycles 2 to 100
X[13], X[14] = np.polyfit(
list(range(1, i_final + 1)),
summary.discharge_capacity[list(range(1, i_final + 1))], 1)
labels.append("slope_discharge_capacity_cycle_number_2:100")
labels.append("intercept_discharge_capacity_cycle_number_2:100")
# Slope and intercept of linear fit to discharge capacity as a fn of cycle #, cycles 91 to 100
X[15], X[16] = np.polyfit(
list(range(i_mid, i_final + 1)),
summary.discharge_capacity[list(range(i_mid, i_final + 1))], 1)
labels.append("slope_discharge_capacity_cycle_number_91:100")
labels.append("intercept_discharge_capacity_cycle_number_91:100")
IR_trend = summary.dc_internal_resistance[list(range(1, i_final + 1))]
if any(v == 0 for v in IR_trend):
IR_trend[IR_trend == 0] = np.nan
# Internal resistance minimum
X[17] = np.nanmin(IR_trend)
labels.append("min_internal_resistance_cycles_2:100")
# Internal resistance at cycle 2
X[18] = summary.dc_internal_resistance[1]
labels.append("internal_resistance_cycle_2")
# Internal resistance at cycle 100 - cycle 2
X[19] = summary.dc_internal_resistance[i_final] - summary.dc_internal_resistance[1]
labels.append("internal_resistance_difference_cycles_2:100")
# Nominal capacity
X[20] = np.median(summary.discharge_capacity.iloc[0:params_dict['n_nominal_cycles']])
labels.append("nominal_capacity_by_median")
X.columns = labels
return X |
Python | def validate_data(cls, processed_cycler_run, params_dict=None):
"""
This function determines if the input data has the necessary attributes for
creation of this feature class. It should test for all of the possible reasons
that feature generation would fail for this particular input data.
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun): data from cycler run
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
bool: True/False indication of ability to proceed with feature generation
"""
if params_dict is None:
params_dict = FEATURE_HYPERPARAMS[cls.class_feature_name]
conditions = []
cap = processed_cycler_run.summary.discharge_capacity
conditions.append(cap.min()/cap.max() < params_dict['thresh_max_cap'])
return all(conditions) | def validate_data(cls, processed_cycler_run, params_dict=None):
"""
This function determines if the input data has the necessary attributes for
creation of this feature class. It should test for all of the possible reasons
that feature generation would fail for this particular input data.
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun): data from cycler run
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
bool: True/False indication of ability to proceed with feature generation
"""
if params_dict is None:
params_dict = FEATURE_HYPERPARAMS[cls.class_feature_name]
conditions = []
cap = processed_cycler_run.summary.discharge_capacity
conditions.append(cap.min()/cap.max() < params_dict['thresh_max_cap'])
return all(conditions) |
Python | def features_from_processed_cycler_run(cls, processed_cycler_run, params_dict=None):
"""
Calculate the outcomes from the input data. In particular, the number of cycles
where we expect to reach certain thresholds of capacity loss
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun): data from cycler run
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
pd.DataFrame: cycles at which capacity/energy degradation exceeds thresholds
"""
if params_dict is None:
params_dict = FEATURE_HYPERPARAMS[cls.class_feature_name]
y = processed_cycler_run.cycles_to_reach_set_capacities(params_dict['thresh_max_cap'],
params_dict['thresh_min_cap'],
params_dict['interval_cap'])
return y | def features_from_processed_cycler_run(cls, processed_cycler_run, params_dict=None):
"""
Calculate the outcomes from the input data. In particular, the number of cycles
where we expect to reach certain thresholds of capacity loss
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun): data from cycler run
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
pd.DataFrame: cycles at which capacity/energy degradation exceeds thresholds
"""
if params_dict is None:
params_dict = FEATURE_HYPERPARAMS[cls.class_feature_name]
y = processed_cycler_run.cycles_to_reach_set_capacities(params_dict['thresh_max_cap'],
params_dict['thresh_min_cap'],
params_dict['interval_cap'])
return y |
Python | def validate_data(cls, processed_cycler_run, params_dict=None):
"""
This function determines if the input data has the necessary attributes for
creation of this feature class. It should test for all of the possible reasons
that feature generation would fail for this particular input data.
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun): data from cycler run
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
bool: True/False indication of ability to proceed with feature generation
"""
if not hasattr(processed_cycler_run, 'diagnostic_summary') & hasattr(processed_cycler_run,
'diagnostic_interpolated'):
return False
if processed_cycler_run.diagnostic_summary.empty:
return False
else:
return True | def validate_data(cls, processed_cycler_run, params_dict=None):
"""
This function determines if the input data has the necessary attributes for
creation of this feature class. It should test for all of the possible reasons
that feature generation would fail for this particular input data.
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun): data from cycler run
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
bool: True/False indication of ability to proceed with feature generation
"""
if not hasattr(processed_cycler_run, 'diagnostic_summary') & hasattr(processed_cycler_run,
'diagnostic_interpolated'):
return False
if processed_cycler_run.diagnostic_summary.empty:
return False
else:
return True |
Python | def features_from_processed_cycler_run(cls, processed_cycler_run, params_dict=None):
"""
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun): data from cycler run
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
pd.DataFrame: cycles at which capacity/energy degradation exceeds thresholds
"""
if params_dict is None:
params_dict=FEATURE_HYPERPARAMS[cls.class_feature_name]
cycle_types = processed_cycler_run.diagnostic_summary.cycle_type.unique()
X = pd.DataFrame()
for quantity in params_dict['quantities']:
for cycle_type in cycle_types:
summary_diag_cycle_type = \
featurizer_helpers.get_fractional_quantity_remaining(processed_cycler_run,
quantity, cycle_type)
summary_diag_cycle_type['cycle_type'] = cycle_type
summary_diag_cycle_type['metric'] = quantity
X = X.append(summary_diag_cycle_type)
return X | def features_from_processed_cycler_run(cls, processed_cycler_run, params_dict=None):
"""
Args:
processed_cycler_run (beep.structure.ProcessedCyclerRun): data from cycler run
params_dict (dict): dictionary of parameters governing how the ProcessedCyclerRun object
gets featurized. These could be filters for column or row operations
Returns:
pd.DataFrame: cycles at which capacity/energy degradation exceeds thresholds
"""
if params_dict is None:
params_dict=FEATURE_HYPERPARAMS[cls.class_feature_name]
cycle_types = processed_cycler_run.diagnostic_summary.cycle_type.unique()
X = pd.DataFrame()
for quantity in params_dict['quantities']:
for cycle_type in cycle_types:
summary_diag_cycle_type = \
featurizer_helpers.get_fractional_quantity_remaining(processed_cycler_run,
quantity, cycle_type)
summary_diag_cycle_type['cycle_type'] = cycle_type
summary_diag_cycle_type['metric'] = quantity
X = X.append(summary_diag_cycle_type)
return X |
Python | def process_file_list_from_json(file_list_json, processed_dir='data-share/features/'):
"""
Function to take a json file containing processed cycler run file locations,
extract features, dump the processed file into a predetermined directory,
and return a jsonable dict of feature file locations.
Args:
file_list_json (str): json string or json filename corresponding
to a dictionary with a file_list attribute,
if this string ends with ".json", a json file is assumed
and loaded, otherwise interpreted as a json string.
processed_dir (str): location for processed cycler run output files
to be placed.
features_label (str): name of feature generation method.
predict_only (bool): whether to calculate predictions or not.
prediction_type (str): Single or multi-point predictions.
predicted_quantity (str): quantity being predicted - cycle or capacity.
Returns:
str: json string of feature files (with key "file_list").
"""
# Get file list and validity from json, if ends with .json,
# assume it's a file, if not assume it's a json string
if file_list_json.endswith(".json"):
file_list_data = loadfn(file_list_json)
else:
file_list_data = json.loads(file_list_json)
# Setup Events
events = KinesisEvents(service='DataAnalyzer', mode=file_list_data['mode'])
# Add root path to processed_dir
processed_dir = os.path.join(os.environ.get("BEEP_PROCESSING_DIR", "/"),
processed_dir)
if not os.path.exists(processed_dir):
os.makedirs(processed_dir)
file_list = file_list_data['file_list']
run_ids = file_list_data['run_list']
processed_run_list = []
processed_result_list = []
processed_message_list = []
processed_paths_list = []
for path, run_id in zip(file_list, run_ids):
logger.info('run_id=%s featurizing=%s', str(run_id), path, extra=s)
processed_cycler_run = loadfn(path)
featurizer_classes = [RPTdQdVFeatures, HPPCResistanceVoltageFeatures,
HPPCRelaxationFeatures, DiagnosticSummaryStats,
DeltaQFastCharge, TrajectoryFastCharge, DiagnosticProperties]
for featurizer_class in featurizer_classes:
if featurizer_class.class_feature_name in FEATURE_HYPERPARAMS.keys():
params_dict = FEATURE_HYPERPARAMS[featurizer_class.class_feature_name]
else:
params_dict = None
featurizer = featurizer_class.from_run(path, processed_dir, processed_cycler_run, params_dict)
if featurizer:
dumpfn(featurizer, featurizer.name)
processed_paths_list.append(featurizer.name)
processed_run_list.append(run_id)
processed_result_list.append("success")
processed_message_list.append({'comment': '',
'error': ''})
logger.info('Successfully generated %s', featurizer.name, extra=s)
else:
processed_paths_list.append(path)
processed_run_list.append(run_id)
processed_result_list.append("incomplete")
processed_message_list.append({'comment': 'Insufficient or incorrect data for featurization',
'error': ''})
logger.info('Unable to featurize %s', path, extra=s)
output_data = {"file_list": processed_paths_list,
"run_list": processed_run_list,
"result_list": processed_result_list,
"message_list": processed_message_list
}
events.put_analyzing_event(output_data, 'featurizing', 'complete')
# Return jsonable file list
return json.dumps(output_data) | def process_file_list_from_json(file_list_json, processed_dir='data-share/features/'):
"""
Function to take a json file containing processed cycler run file locations,
extract features, dump the processed file into a predetermined directory,
and return a jsonable dict of feature file locations.
Args:
file_list_json (str): json string or json filename corresponding
to a dictionary with a file_list attribute,
if this string ends with ".json", a json file is assumed
and loaded, otherwise interpreted as a json string.
processed_dir (str): location for processed cycler run output files
to be placed.
features_label (str): name of feature generation method.
predict_only (bool): whether to calculate predictions or not.
prediction_type (str): Single or multi-point predictions.
predicted_quantity (str): quantity being predicted - cycle or capacity.
Returns:
str: json string of feature files (with key "file_list").
"""
# Get file list and validity from json, if ends with .json,
# assume it's a file, if not assume it's a json string
if file_list_json.endswith(".json"):
file_list_data = loadfn(file_list_json)
else:
file_list_data = json.loads(file_list_json)
# Setup Events
events = KinesisEvents(service='DataAnalyzer', mode=file_list_data['mode'])
# Add root path to processed_dir
processed_dir = os.path.join(os.environ.get("BEEP_PROCESSING_DIR", "/"),
processed_dir)
if not os.path.exists(processed_dir):
os.makedirs(processed_dir)
file_list = file_list_data['file_list']
run_ids = file_list_data['run_list']
processed_run_list = []
processed_result_list = []
processed_message_list = []
processed_paths_list = []
for path, run_id in zip(file_list, run_ids):
logger.info('run_id=%s featurizing=%s', str(run_id), path, extra=s)
processed_cycler_run = loadfn(path)
featurizer_classes = [RPTdQdVFeatures, HPPCResistanceVoltageFeatures,
HPPCRelaxationFeatures, DiagnosticSummaryStats,
DeltaQFastCharge, TrajectoryFastCharge, DiagnosticProperties]
for featurizer_class in featurizer_classes:
if featurizer_class.class_feature_name in FEATURE_HYPERPARAMS.keys():
params_dict = FEATURE_HYPERPARAMS[featurizer_class.class_feature_name]
else:
params_dict = None
featurizer = featurizer_class.from_run(path, processed_dir, processed_cycler_run, params_dict)
if featurizer:
dumpfn(featurizer, featurizer.name)
processed_paths_list.append(featurizer.name)
processed_run_list.append(run_id)
processed_result_list.append("success")
processed_message_list.append({'comment': '',
'error': ''})
logger.info('Successfully generated %s', featurizer.name, extra=s)
else:
processed_paths_list.append(path)
processed_run_list.append(run_id)
processed_result_list.append("incomplete")
processed_message_list.append({'comment': 'Insufficient or incorrect data for featurization',
'error': ''})
logger.info('Unable to featurize %s', path, extra=s)
output_data = {"file_list": processed_paths_list,
"run_list": processed_run_list,
"result_list": processed_result_list,
"message_list": processed_message_list
}
events.put_analyzing_event(output_data, 'featurizing', 'complete')
# Return jsonable file list
return json.dumps(output_data) |
Python | def Instance():
"""Return the instance reference. Create it if it doesn't exist
This method is a static method because it does not use any object
"""
if GameStateImpl.__instance is None:
GameStateImpl.__instance = super(GameStateImpl, GameStateImpl).__new__(GameStateImpl)
GameStateImpl.__instance.__init__()
GameStateImpl.__instance.SetName("Playing")
#print "GAMESTATE Playing State creating __instance object {}".format(GameStateImpl.__instance)
#print "GAMESTATE Playing State getting __instance {}".format(GameStateImpl.__instance)
return GameStateImpl.__instance | def Instance():
"""Return the instance reference. Create it if it doesn't exist
This method is a static method because it does not use any object
"""
if GameStateImpl.__instance is None:
GameStateImpl.__instance = super(GameStateImpl, GameStateImpl).__new__(GameStateImpl)
GameStateImpl.__instance.__init__()
GameStateImpl.__instance.SetName("Playing")
#print "GAMESTATE Playing State creating __instance object {}".format(GameStateImpl.__instance)
#print "GAMESTATE Playing State getting __instance {}".format(GameStateImpl.__instance)
return GameStateImpl.__instance |
Python | def loadModelTransform(self, matRot=matrix.Matrix.matIdent(), matTrans=matrix.Matrix.matIdent()):
''' Load/overwrite a transformation matrix for the object
NOTE: this function is NOT recursive
'''
for i in range(0, len(matRot.v)):
self.matRot.v[i] = matRot.v[i] # We're hand-writing a deep copy
self.matTrans.v[i] = matTrans.v[i] | def loadModelTransform(self, matRot=matrix.Matrix.matIdent(), matTrans=matrix.Matrix.matIdent()):
''' Load/overwrite a transformation matrix for the object
NOTE: this function is NOT recursive
'''
for i in range(0, len(matRot.v)):
self.matRot.v[i] = matRot.v[i] # We're hand-writing a deep copy
self.matTrans.v[i] = matTrans.v[i] |
Python | def composeModelTransform(self, matRot=matrix.Matrix.matIdent(), matTrans=matrix.Matrix.matIdent()):
''' Compose a transformation matrix for the object
NOTE: this function is NOT recursive
'''
self.matRot = matrix.mMultmat( matRot, self.matRot )
self.matTrans = matrix.mMultmat( matTrans, self.matTrans ) | def composeModelTransform(self, matRot=matrix.Matrix.matIdent(), matTrans=matrix.Matrix.matIdent()):
''' Compose a transformation matrix for the object
NOTE: this function is NOT recursive
'''
self.matRot = matrix.mMultmat( matRot, self.matRot )
self.matTrans = matrix.mMultmat( matTrans, self.matTrans ) |
Python | def updateModelTransform(self, obj_ref=None, composed_xform=matrix.Matrix.matIdent()):
''' Update _xpoints based ONLY on the "model" transforms -- translation and rotation
This function updates the geometry of the bike, whether or not it is being drawn. This function is
used for computing where the bike is, in the world, for collision detection, etc.
'''
if obj_ref is None:
obj_ref = self
matRot = matrix.mMultmat( matrix.Matrix.matRotZ(obj_ref.thz * DEGTORAD), matrix.Matrix.matRotY(obj_ref.thy * DEGTORAD) )
matRot = matrix.mMultmat( matRot, matrix.Matrix.matRotX(obj_ref.thx * DEGTORAD) )
matTrans = matrix.Matrix.matTrans(obj_ref.position.x, obj_ref.position.y, obj_ref.position.z)
local_composed_xform = matrix.mMultmat(composed_xform, matTrans) # Working from outside in, multiply the incoming composed matrix by the newly computed transformation for the object
local_composed_xform = matrix.mMultmat(local_composed_xform, matRot)
local_composed_xform = matrix.mMultmat(local_composed_xform, obj_ref.matTrans) # Now, multiply the new-new composed matrix (incoming composed * newly computed) by the currently existing composed matrix
local_composed_xform = matrix.mMultmat(local_composed_xform, obj_ref.matRot)
#print "Child objects:{}".format(obj_ref.children)
if obj_ref.children:
for _, child_obj in obj_ref.children.iteritems():
self.updateModelTransform(child_obj, local_composed_xform)
# if no children, then compute final transformation matrix and render
del obj_ref._xpoints[:]
for point in obj_ref.points:
p = matrix.mMultvec(local_composed_xform, vector.Vector(point.x, point.y, point.z, 1.0)) # Use a vector, as required by pymkfmath's matrix multiplication api (and don't forget to set homogeneous coord to 1
#print "{}: point ({}, {}, {}) -> _xpoint ({}, {}, {})".format(obj_ref, point.x, point.y, point.z, p.x, p.y, p.z)
obj_ref._xpoints.append(Point3D(p.x, p.y, p.z)) # Then convert back to Point3D to comply with the original code for this game (TODO make some synergies between Point3D and Vector)
# TODO optimize some stuff. Here, you construct Vectors for matrix mult, but then you construct Point3Ds. You do the same thing again in draw().. wasteful
obj_ref.collisionGeom.computeBounds(obj_ref) | def updateModelTransform(self, obj_ref=None, composed_xform=matrix.Matrix.matIdent()):
''' Update _xpoints based ONLY on the "model" transforms -- translation and rotation
This function updates the geometry of the bike, whether or not it is being drawn. This function is
used for computing where the bike is, in the world, for collision detection, etc.
'''
if obj_ref is None:
obj_ref = self
matRot = matrix.mMultmat( matrix.Matrix.matRotZ(obj_ref.thz * DEGTORAD), matrix.Matrix.matRotY(obj_ref.thy * DEGTORAD) )
matRot = matrix.mMultmat( matRot, matrix.Matrix.matRotX(obj_ref.thx * DEGTORAD) )
matTrans = matrix.Matrix.matTrans(obj_ref.position.x, obj_ref.position.y, obj_ref.position.z)
local_composed_xform = matrix.mMultmat(composed_xform, matTrans) # Working from outside in, multiply the incoming composed matrix by the newly computed transformation for the object
local_composed_xform = matrix.mMultmat(local_composed_xform, matRot)
local_composed_xform = matrix.mMultmat(local_composed_xform, obj_ref.matTrans) # Now, multiply the new-new composed matrix (incoming composed * newly computed) by the currently existing composed matrix
local_composed_xform = matrix.mMultmat(local_composed_xform, obj_ref.matRot)
#print "Child objects:{}".format(obj_ref.children)
if obj_ref.children:
for _, child_obj in obj_ref.children.iteritems():
self.updateModelTransform(child_obj, local_composed_xform)
# if no children, then compute final transformation matrix and render
del obj_ref._xpoints[:]
for point in obj_ref.points:
p = matrix.mMultvec(local_composed_xform, vector.Vector(point.x, point.y, point.z, 1.0)) # Use a vector, as required by pymkfmath's matrix multiplication api (and don't forget to set homogeneous coord to 1
#print "{}: point ({}, {}, {}) -> _xpoint ({}, {}, {})".format(obj_ref, point.x, point.y, point.z, p.x, p.y, p.z)
obj_ref._xpoints.append(Point3D(p.x, p.y, p.z)) # Then convert back to Point3D to comply with the original code for this game (TODO make some synergies between Point3D and Vector)
# TODO optimize some stuff. Here, you construct Vectors for matrix mult, but then you construct Point3Ds. You do the same thing again in draw().. wasteful
obj_ref.collisionGeom.computeBounds(obj_ref) |
Python | def resetModelTransform(self, obj_ref=None):
''' Recursively reset all models' Euler orientation to 0 degrees '''
if obj_ref is None:
obj_ref = self
self.thx = 0.0
self.thy = 0.0
self.thz = 0.0
if obj_ref.children:
for _, child_obj in obj_ref.children.iteritems():
self.resetModelTransform(child_obj) | def resetModelTransform(self, obj_ref=None):
''' Recursively reset all models' Euler orientation to 0 degrees '''
if obj_ref is None:
obj_ref = self
self.thx = 0.0
self.thy = 0.0
self.thz = 0.0
if obj_ref.children:
for _, child_obj in obj_ref.children.iteritems():
self.resetModelTransform(child_obj) |
Python | def Init(self):
""" Initialize bike object
NOTE: This replaces QBASIC InitBike()
"""
# TODO clean up Init(). There should be a separate model loading that occurs when the Bike object is created, but not every time the bike is reset (e.g. when it crash-lands)
dirname = os.path.dirname( sys.argv[0] )
with open(os.path.normpath("/".join((dirname, "../data/bike_model.json"))), 'r') as fd:
raw_bike_model = json.load(fd)
#logging.debug("raw_bike_model:{}".format(raw_bike_model))
# Construct the bike model
self._position = vector.Vector(0.0, 0.0, 0.0, 1.0) # TODO seriously.. either incorporate Point3D into the engine or don't..
self.model.position = Point3D(0.0, 0.0, 0.0) # Force a new obj, not simply a ref to self.position (could also write a "copy" function, similar to a copy constructor, but Python doesn't have copy constructors
self.model.children['frame'] = Wireframe()
self.model.children['frame'].collisionGeom = aabb.AABB()
for item in raw_bike_model['frame_point_data']:
pt = Point3D( item[0], item[1], item[2])
self.model.children['frame'].addPoint(pt)
# Also copy the line data
self.model.children['frame'].lines.extend(raw_bike_model['frame_line_data']) # Could've also used addLine() by iterating through my line data and calling addLine(), one by one
self.model.children['frame'].colors['frame_color'] = (0, 64, 128) # NOTE/TODO load colors as part of character select
self.model.children['frame'].colors['saddle_color'] = (92, 92, 92) # NOTE/TODO load colors as part of character select
# Now, do the handlebar
self.model.children['handlebar'] = Wireframe()
self.model.children['handlebar'].collisionGeom = aabb.AABB()
for item in raw_bike_model['handlebar_point_data']:
pt = Point3D( item[0], item[1], item[2] )
self.model.children['handlebar'].addPoint(pt)
self.model.children['handlebar'].lines.extend(raw_bike_model['handlebar_line_data'])
self.model.children['handlebar'].colors['handlebar_color'] = (0, 128, 255) # NOTE/TODO load colors as part of character select
self.model.children['handlebar'].colors['grip_color'] = (198, 198, 18) # NOTE/TODO load colors as part of character select
# Rear tire
self.model.children['frame'].children['wheel'] = Wireframe()
self.model.children['frame'].children['wheel'].collisionGeom = aabb.AABB()
self.model.children['frame'].children['wheel'].position = Point3D(-22,-5,0)
for item in raw_bike_model['wheel_point_data']:
pt = Point3D( item[0], item[1], item[2] )
self.model.children['frame'].children['wheel'].addPoint(pt)
self.model.children['frame'].children['wheel'].lines.extend(raw_bike_model['wheel_line_data'])
self.model.children['frame'].children['wheel'].colors['wheel_color'] = (64, 64, 64) # NOTE/TODO load colors as part of character select
self.model.children['frame'].children['wheel'].colors['spoke_color'] = (224, 224, 12) # NOTE/TODO load colors as part of character select
# Front tire
self.model.children['handlebar'].children['wheel'] = Wireframe()
self.model.children['handlebar'].children['wheel'].collisionGeom = aabb.AABB()
self.model.children['handlebar'].children['wheel'].position = Point3D(0,-5,0)
for item in raw_bike_model['wheel_point_data']:
pt = Point3D( item[0], item[1], item[2] )
self.model.children['handlebar'].children['wheel'].addPoint(pt)
self.model.children['handlebar'].children['wheel'].lines.extend(raw_bike_model['wheel_line_data'])
self.model.children['handlebar'].children['wheel'].colors['wheel_color'] = (64, 64, 64) # NOTE/TODO load colors as part of character select
self.model.children['handlebar'].children['wheel'].colors['spoke_color'] = (242, 12, 224) # NOTE/TODO load colors as part of character select
self.model.updateModelTransform() # This is necessary to compute transformed points, to be able to draw the bike right away
# Now that the model is 'transformed' and the _xpoints arrays are populated, compute the collision geom boundaries
self.model.children['frame'].collisionGeom.computeBounds(self.model.children['frame']) # TODO fix the janky function prototype for the computeBounds calls
self.model.children['frame'].children['wheel'].collisionGeom.computeBounds(self.model.children['frame'].children['wheel'])
self.model.children['handlebar'].collisionGeom.computeBounds(self.model.children['handlebar']) # TODO optimize the computeBounds calls, maybe, by taking in a transformation of the already existing computed bounds? (i.e. calculate once, then simply transform the calculated box?)
self.model.children['handlebar'].children['wheel'].collisionGeom.computeBounds(self.model.children['handlebar'].children['wheel'])
# Calculate the wheel radius, to be used for angular velocity calculation
dimension_min = sys.maxint - 1
dimension_max = -sys.maxint + 1
for point in raw_bike_model['wheel_point_data']: # NOTE you could do this loop above, when loading model, but separating it out here makes it clearer that we're simply calculating the wheel's radius
dimension_min = min(dimension_min, point[0])
dimension_max = max(dimension_max, point[0])
#import pdb; pdb.set_trace()
self.wheelAngVel['handlebar'].radius = (dimension_max - dimension_min) / 2.0 # NOTE that this radius need not be accurate - it's used only to compute the wheels' behavior
self.wheelAngVel['frame'].radius = (dimension_max - dimension_min) / 2.0
# TODO: remove duplicate inits. We have these vars in the constructor for reference; they should be managed outside the constructor
self.crashed = False
self.inAir = False
self.tricking = False
self.trickPhase = 1
##TODO finish converting the commented-out code to either be in a bike member function, or otherwise wherever it belongs
###BikeStyle = 5
##
### TODO Bike styles and such should be initialized elsewhere (perhaps in general game init)
##RESTORE BikeColorData
##FOR n = 1 TO BikeStyle
## READ BikeCol(1), BikeCol(2), BikeCol(3), BikeCol(4)
##NEXT n
##
##RESTORE BikerData
##FOR n = 1 TO BikeStyle
## READ self.rider.maxspd, self.rider.jump, self.rider.pump, self.rider.turn
##NEXT n
##
### TODO move trick point initialization to be with level initializtion
##RESTORE TrickPointData
##READ TotNumTricks
##FOR n = 1 TO TotNumTricks
## READ self.gamestatsRef.trickPointValue(n)
## self.gamestatsRef.timesUsed[n] = 0
##NEXT n | def Init(self):
""" Initialize bike object
NOTE: This replaces QBASIC InitBike()
"""
# TODO clean up Init(). There should be a separate model loading that occurs when the Bike object is created, but not every time the bike is reset (e.g. when it crash-lands)
dirname = os.path.dirname( sys.argv[0] )
with open(os.path.normpath("/".join((dirname, "../data/bike_model.json"))), 'r') as fd:
raw_bike_model = json.load(fd)
#logging.debug("raw_bike_model:{}".format(raw_bike_model))
# Construct the bike model
self._position = vector.Vector(0.0, 0.0, 0.0, 1.0) # TODO seriously.. either incorporate Point3D into the engine or don't..
self.model.position = Point3D(0.0, 0.0, 0.0) # Force a new obj, not simply a ref to self.position (could also write a "copy" function, similar to a copy constructor, but Python doesn't have copy constructors
self.model.children['frame'] = Wireframe()
self.model.children['frame'].collisionGeom = aabb.AABB()
for item in raw_bike_model['frame_point_data']:
pt = Point3D( item[0], item[1], item[2])
self.model.children['frame'].addPoint(pt)
# Also copy the line data
self.model.children['frame'].lines.extend(raw_bike_model['frame_line_data']) # Could've also used addLine() by iterating through my line data and calling addLine(), one by one
self.model.children['frame'].colors['frame_color'] = (0, 64, 128) # NOTE/TODO load colors as part of character select
self.model.children['frame'].colors['saddle_color'] = (92, 92, 92) # NOTE/TODO load colors as part of character select
# Now, do the handlebar
self.model.children['handlebar'] = Wireframe()
self.model.children['handlebar'].collisionGeom = aabb.AABB()
for item in raw_bike_model['handlebar_point_data']:
pt = Point3D( item[0], item[1], item[2] )
self.model.children['handlebar'].addPoint(pt)
self.model.children['handlebar'].lines.extend(raw_bike_model['handlebar_line_data'])
self.model.children['handlebar'].colors['handlebar_color'] = (0, 128, 255) # NOTE/TODO load colors as part of character select
self.model.children['handlebar'].colors['grip_color'] = (198, 198, 18) # NOTE/TODO load colors as part of character select
# Rear tire
self.model.children['frame'].children['wheel'] = Wireframe()
self.model.children['frame'].children['wheel'].collisionGeom = aabb.AABB()
self.model.children['frame'].children['wheel'].position = Point3D(-22,-5,0)
for item in raw_bike_model['wheel_point_data']:
pt = Point3D( item[0], item[1], item[2] )
self.model.children['frame'].children['wheel'].addPoint(pt)
self.model.children['frame'].children['wheel'].lines.extend(raw_bike_model['wheel_line_data'])
self.model.children['frame'].children['wheel'].colors['wheel_color'] = (64, 64, 64) # NOTE/TODO load colors as part of character select
self.model.children['frame'].children['wheel'].colors['spoke_color'] = (224, 224, 12) # NOTE/TODO load colors as part of character select
# Front tire
self.model.children['handlebar'].children['wheel'] = Wireframe()
self.model.children['handlebar'].children['wheel'].collisionGeom = aabb.AABB()
self.model.children['handlebar'].children['wheel'].position = Point3D(0,-5,0)
for item in raw_bike_model['wheel_point_data']:
pt = Point3D( item[0], item[1], item[2] )
self.model.children['handlebar'].children['wheel'].addPoint(pt)
self.model.children['handlebar'].children['wheel'].lines.extend(raw_bike_model['wheel_line_data'])
self.model.children['handlebar'].children['wheel'].colors['wheel_color'] = (64, 64, 64) # NOTE/TODO load colors as part of character select
self.model.children['handlebar'].children['wheel'].colors['spoke_color'] = (242, 12, 224) # NOTE/TODO load colors as part of character select
self.model.updateModelTransform() # This is necessary to compute transformed points, to be able to draw the bike right away
# Now that the model is 'transformed' and the _xpoints arrays are populated, compute the collision geom boundaries
self.model.children['frame'].collisionGeom.computeBounds(self.model.children['frame']) # TODO fix the janky function prototype for the computeBounds calls
self.model.children['frame'].children['wheel'].collisionGeom.computeBounds(self.model.children['frame'].children['wheel'])
self.model.children['handlebar'].collisionGeom.computeBounds(self.model.children['handlebar']) # TODO optimize the computeBounds calls, maybe, by taking in a transformation of the already existing computed bounds? (i.e. calculate once, then simply transform the calculated box?)
self.model.children['handlebar'].children['wheel'].collisionGeom.computeBounds(self.model.children['handlebar'].children['wheel'])
# Calculate the wheel radius, to be used for angular velocity calculation
dimension_min = sys.maxint - 1
dimension_max = -sys.maxint + 1
for point in raw_bike_model['wheel_point_data']: # NOTE you could do this loop above, when loading model, but separating it out here makes it clearer that we're simply calculating the wheel's radius
dimension_min = min(dimension_min, point[0])
dimension_max = max(dimension_max, point[0])
#import pdb; pdb.set_trace()
self.wheelAngVel['handlebar'].radius = (dimension_max - dimension_min) / 2.0 # NOTE that this radius need not be accurate - it's used only to compute the wheels' behavior
self.wheelAngVel['frame'].radius = (dimension_max - dimension_min) / 2.0
# TODO: remove duplicate inits. We have these vars in the constructor for reference; they should be managed outside the constructor
self.crashed = False
self.inAir = False
self.tricking = False
self.trickPhase = 1
##TODO finish converting the commented-out code to either be in a bike member function, or otherwise wherever it belongs
###BikeStyle = 5
##
### TODO Bike styles and such should be initialized elsewhere (perhaps in general game init)
##RESTORE BikeColorData
##FOR n = 1 TO BikeStyle
## READ BikeCol(1), BikeCol(2), BikeCol(3), BikeCol(4)
##NEXT n
##
##RESTORE BikerData
##FOR n = 1 TO BikeStyle
## READ self.rider.maxspd, self.rider.jump, self.rider.pump, self.rider.turn
##NEXT n
##
### TODO move trick point initialization to be with level initializtion
##RESTORE TrickPointData
##READ TotNumTricks
##FOR n = 1 TO TotNumTricks
## READ self.gamestatsRef.trickPointValue(n)
## self.gamestatsRef.timesUsed[n] = 0
##NEXT n |
Python | def update(self, dt_s, bike):
""" Update the level manager (e.g., things like curRamp and such)
Note: This takes in the bike object so it can know the bike's _position and track current ramp, and such.
To be more general, this function can maybe take in a dict of gameobjects
"""
# TODO decide -- do we want LevelManager to have an update function? Or do we want to 'manually' update the level by calling checkRamp from the game loop?
pass | def update(self, dt_s, bike):
""" Update the level manager (e.g., things like curRamp and such)
Note: This takes in the bike object so it can know the bike's _position and track current ramp, and such.
To be more general, this function can maybe take in a dict of gameobjects
"""
# TODO decide -- do we want LevelManager to have an update function? Or do we want to 'manually' update the level by calling checkRamp from the game loop?
pass |
Python | def Instance():
"""Return the instance reference. Create it if it doesn't exist
This method is a static method because it does not use any object
"""
if GameStateImpl.__instance is None:
GameStateImpl.__instance = super(GameStateImpl, GameStateImpl).__new__(GameStateImpl)
GameStateImpl.__instance.__init__()
GameStateImpl.__instance.SetName("MainMenu")
#print "GAMESTATE Playing State creating __instance object {}".format(GameStateImpl.__instance)
#print "GAMESTATE Playing State getting __instance {}".format(GameStateImpl.__instance)
return GameStateImpl.__instance | def Instance():
"""Return the instance reference. Create it if it doesn't exist
This method is a static method because it does not use any object
"""
if GameStateImpl.__instance is None:
GameStateImpl.__instance = super(GameStateImpl, GameStateImpl).__new__(GameStateImpl)
GameStateImpl.__instance.__init__()
GameStateImpl.__instance.SetName("MainMenu")
#print "GAMESTATE Playing State creating __instance object {}".format(GameStateImpl.__instance)
#print "GAMESTATE Playing State getting __instance {}".format(GameStateImpl.__instance)
return GameStateImpl.__instance |
Python | def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):
"""Trains a linear regression model of multiple features.
Args:
features: pandas DataFrame of features
targets: pandas DataFrame of targets
batch_size: Size of batches to be passed to the model
shuffle: True or False. Whether to shuffle the data.
num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
Returns:
Tuple of (features, labels) for next data batch
"""
# Convert pandas data into a dict of np arrays.
features = {key:np.array(value) for key,value in dict(features).items()}
# Construct a dataset, and configure batching/repeating.
ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
# Shuffle the data, if specified.
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels | def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):
"""Trains a linear regression model of multiple features.
Args:
features: pandas DataFrame of features
targets: pandas DataFrame of targets
batch_size: Size of batches to be passed to the model
shuffle: True or False. Whether to shuffle the data.
num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
Returns:
Tuple of (features, labels) for next data batch
"""
# Convert pandas data into a dict of np arrays.
features = {key:np.array(value) for key,value in dict(features).items()}
# Construct a dataset, and configure batching/repeating.
ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
# Shuffle the data, if specified.
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels |
Python | def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adafactor does not support sparse gradients.')
state = self.state[p]
grad_shape = grad.shape
factored, use_first_moment = self._get_options(group, grad_shape)
# State Initialization
if len(state) == 0:
state['step'] = 0
if use_first_moment:
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(grad)
if factored:
state['exp_avg_sq_row'] = torch.zeros(grad_shape[:-1]).type_as(grad)
state['exp_avg_sq_col'] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).type_as(grad)
else:
state['exp_avg_sq'] = torch.zeros_like(grad)
state['RMS'] = 0
state['step'] += 1
state['RMS'] = self._rms(p.data)
lr = self._get_lr(group, state)
beta2t = 1.0 - math.pow(state['step'], group['decay_rate'])
update = (grad**2) + group['eps'][0]
if factored:
exp_avg_sq_row = state['exp_avg_sq_row']
exp_avg_sq_col = state['exp_avg_sq_col']
exp_avg_sq_row.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-1))
exp_avg_sq_col.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-2))
# Approximation of exponential moving average of square of gradient
self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col, update)
update.mul_(grad)
else:
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(beta2t).add_(1.0 - beta2t, update)
torch.rsqrt(exp_avg_sq, out=update).mul_(grad)
update.div_(max(1.0, self._rms(update) / group['clip_threshold']))
update.mul_(lr)
if use_first_moment:
exp_avg = state['exp_avg']
exp_avg.mul_(group['beta1']).add_(1 - group['beta1'], update)
update = exp_avg
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * lr, p.data)
p.data.add_(-update)
return loss | def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adafactor does not support sparse gradients.')
state = self.state[p]
grad_shape = grad.shape
factored, use_first_moment = self._get_options(group, grad_shape)
# State Initialization
if len(state) == 0:
state['step'] = 0
if use_first_moment:
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(grad)
if factored:
state['exp_avg_sq_row'] = torch.zeros(grad_shape[:-1]).type_as(grad)
state['exp_avg_sq_col'] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).type_as(grad)
else:
state['exp_avg_sq'] = torch.zeros_like(grad)
state['RMS'] = 0
state['step'] += 1
state['RMS'] = self._rms(p.data)
lr = self._get_lr(group, state)
beta2t = 1.0 - math.pow(state['step'], group['decay_rate'])
update = (grad**2) + group['eps'][0]
if factored:
exp_avg_sq_row = state['exp_avg_sq_row']
exp_avg_sq_col = state['exp_avg_sq_col']
exp_avg_sq_row.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-1))
exp_avg_sq_col.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-2))
# Approximation of exponential moving average of square of gradient
self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col, update)
update.mul_(grad)
else:
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(beta2t).add_(1.0 - beta2t, update)
torch.rsqrt(exp_avg_sq, out=update).mul_(grad)
update.div_(max(1.0, self._rms(update) / group['clip_threshold']))
update.mul_(lr)
if use_first_moment:
exp_avg = state['exp_avg']
exp_avg.mul_(group['beta1']).add_(1 - group['beta1'], update)
update = exp_avg
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * lr, p.data)
p.data.add_(-update)
return loss |
Python | def configure_optimizers(self):
"Prepare optimizer and schedule (linear warmup and decay)"
model = self.model
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
if self.optim_name == 'adam':
optimizer = AdamW(
optimizer_grouped_parameters,
lr=self.hparams.learning_rate,
eps=self.hparams.adam_epsilon
)
elif self.optim_name == 'adafactor':
optimizer = Adafactor(
optimizer_grouped_parameters,
lr=self.hparams.learning_rate,
fix_lr_step=self.hparams.fix_lr_step
)
else:
raise NotImplementedError
self.opt = optimizer
return [optimizer] | def configure_optimizers(self):
"Prepare optimizer and schedule (linear warmup and decay)"
model = self.model
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
if self.optim_name == 'adam':
optimizer = AdamW(
optimizer_grouped_parameters,
lr=self.hparams.learning_rate,
eps=self.hparams.adam_epsilon
)
elif self.optim_name == 'adafactor':
optimizer = Adafactor(
optimizer_grouped_parameters,
lr=self.hparams.learning_rate,
fix_lr_step=self.hparams.fix_lr_step
)
else:
raise NotImplementedError
self.opt = optimizer
return [optimizer] |
Python | def image_cb(self, msg):
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
self.has_image = True
self.camera_image = msg
curr_time = int(round(time.time() * 1000))
if (curr_time - self.prev_time) > 500:
self.prev_time = curr_time
light_wp, state = self.process_traffic_lights()
'''
Debouncer / state holder:
- a state has to appear two cycles in a row (= 1 sec) to be accepted as the current state (reduces flickering of TL states).
(also applies to leaving the unknown state). EXCEPTION: If red appears, red is being recognized immediately without waiting a cycle.
- if there is an unknown state for longer than 6 cycles in a row, then the current state will also go to unknown.
'''
wait_for_unknown = 5
wait_for_light_change = 1
if self.latest_tl_state == state or state == TrafficLight.RED:
self.debounce_cntr += 1
if self.debounce_cntr >= wait_for_light_change and state != TrafficLight.UNKNOWN:
self.debounced_tl_state = state
elif self.debounce_cntr >= wait_for_unknown and state == TrafficLight.UNKNOWN:
self.debounced_tl_state = TrafficLight.UNKNOWN
else:
self.debounce_cntr = 0
self.latest_tl_state = state
if self.debounced_tl_state != TrafficLight.RED:
light_wp = -1
# note: The video says to publish the stopline waypoint, however, the written instruction
# tells us to publish the index of the traffic light waypoint (ranging from 0...NUM_OF_TRAFFIC_LIGHTS-1)
# However, the planner is not interested in the index of the traffic line but in the index of the stop line
# where it is supposed to stop.
# gerr('published stopline idx: %d',light_wp)
if self.debounced_tl_state == TrafficLight.RED:
rospy.logwarn('closest_light_index:%d, RED', light_wp)
if self.debounced_tl_state == TrafficLight.YELLOW:
rospy.logwarn('closest_light_index:%d, YELLOW', light_wp)
if self.debounced_tl_state == TrafficLight.GREEN:
rospy.logwarn('closest_light_index:%d, GREEN', light_wp)
if self.debounced_tl_state == TrafficLight.UNKNOWN:
rospy.logwarn('Traffic Light State unknown')
self.upcoming_red_light_pub.publish(Int32(light_wp)) | def image_cb(self, msg):
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
self.has_image = True
self.camera_image = msg
curr_time = int(round(time.time() * 1000))
if (curr_time - self.prev_time) > 500:
self.prev_time = curr_time
light_wp, state = self.process_traffic_lights()
'''
Debouncer / state holder:
- a state has to appear two cycles in a row (= 1 sec) to be accepted as the current state (reduces flickering of TL states).
(also applies to leaving the unknown state). EXCEPTION: If red appears, red is being recognized immediately without waiting a cycle.
- if there is an unknown state for longer than 6 cycles in a row, then the current state will also go to unknown.
'''
wait_for_unknown = 5
wait_for_light_change = 1
if self.latest_tl_state == state or state == TrafficLight.RED:
self.debounce_cntr += 1
if self.debounce_cntr >= wait_for_light_change and state != TrafficLight.UNKNOWN:
self.debounced_tl_state = state
elif self.debounce_cntr >= wait_for_unknown and state == TrafficLight.UNKNOWN:
self.debounced_tl_state = TrafficLight.UNKNOWN
else:
self.debounce_cntr = 0
self.latest_tl_state = state
if self.debounced_tl_state != TrafficLight.RED:
light_wp = -1
# note: The video says to publish the stopline waypoint, however, the written instruction
# tells us to publish the index of the traffic light waypoint (ranging from 0...NUM_OF_TRAFFIC_LIGHTS-1)
# However, the planner is not interested in the index of the traffic line but in the index of the stop line
# where it is supposed to stop.
# gerr('published stopline idx: %d',light_wp)
if self.debounced_tl_state == TrafficLight.RED:
rospy.logwarn('closest_light_index:%d, RED', light_wp)
if self.debounced_tl_state == TrafficLight.YELLOW:
rospy.logwarn('closest_light_index:%d, YELLOW', light_wp)
if self.debounced_tl_state == TrafficLight.GREEN:
rospy.logwarn('closest_light_index:%d, GREEN', light_wp)
if self.debounced_tl_state == TrafficLight.UNKNOWN:
rospy.logwarn('Traffic Light State unknown')
self.upcoming_red_light_pub.publish(Int32(light_wp)) |
Python | def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#rospy.logwarn("self.pose: {}".format(self.pose))
#rospy.logwarn("self.waypoints: {}".format(self.waypoints))
#rospy.logwarn("self.stop_wp_list: {}".format(self.stop_wp_list[0]))
if self.training_bagfile_only:
'''
The training_bagfile_only option allows to launch the tl state detection even though
there is no pose available (the current bag files dont have pose data).
'''
state = self.get_light_state()
return -1, state
elif self.pose and self.waypoints and self.stop_wp_list:
car_x = self.pose.pose.position.x
car_y = self.pose.pose.position.y
car_wp = self.get_closest_waypoint([car_x, car_y])
min_delta_index = float('inf')
closest_light_index = None
tl_idx = None
for i in range(len(self.stop_wp_list)):
delta_index = self.stop_wp_list[i] - car_wp
if delta_index < 0:
delta_index += len(self.waypoints)
if delta_index < LOOK_AHEAD and delta_index < min_delta_index:
min_delta_index = delta_index
closest_light_index = self.stop_wp_list[i]
tl_idx = i
# rospy.logerr('closest_light_index:%d',closest_light_index)
if closest_light_index:
state = self.get_light_state()
# rospy.logerr('light index:%d state:%d',closest_light_index, state)
return closest_light_index, state
return -1, TrafficLight.UNKNOWN | def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#rospy.logwarn("self.pose: {}".format(self.pose))
#rospy.logwarn("self.waypoints: {}".format(self.waypoints))
#rospy.logwarn("self.stop_wp_list: {}".format(self.stop_wp_list[0]))
if self.training_bagfile_only:
'''
The training_bagfile_only option allows to launch the tl state detection even though
there is no pose available (the current bag files dont have pose data).
'''
state = self.get_light_state()
return -1, state
elif self.pose and self.waypoints and self.stop_wp_list:
car_x = self.pose.pose.position.x
car_y = self.pose.pose.position.y
car_wp = self.get_closest_waypoint([car_x, car_y])
min_delta_index = float('inf')
closest_light_index = None
tl_idx = None
for i in range(len(self.stop_wp_list)):
delta_index = self.stop_wp_list[i] - car_wp
if delta_index < 0:
delta_index += len(self.waypoints)
if delta_index < LOOK_AHEAD and delta_index < min_delta_index:
min_delta_index = delta_index
closest_light_index = self.stop_wp_list[i]
tl_idx = i
# rospy.logerr('closest_light_index:%d',closest_light_index)
if closest_light_index:
state = self.get_light_state()
# rospy.logerr('light index:%d state:%d',closest_light_index, state)
return closest_light_index, state
return -1, TrafficLight.UNKNOWN |
Python | def control(self, lin_velocity_x, ang_velocity_z, curr_lin_velocity_x, curr_ang_velocity_z, dbw_enabled, t_now):
'''
Method executes the actual controller implementation. It needs to be called cyclically and fed with the latest
requested and measured linear velocity and yaw rate, timestamped with t_now (Since the input signals are not
provided with timestamp, there is a huge uncertainty. However, the difference between t_now and t_now-1 should
somehow contain the difference between the sample time points of the signals).
'''
throttle = 0. # range 0...1 (no acc to may acceleration)
brake = 0. # brake force in Nm, higher value => stronger braking (only positive range)
steer = 0. # in radian (rule: si-units everywhere...)
max_brake_force = 800. # Nm
# there is some jitter in the measured velocity, therefore it needs to be filtered.
curr_lin_velocity_x = self.lpf.filt(curr_lin_velocity_x)
# keep using the steer controller even if dbw is enabled (controller does not have internal states,
# so nothing can get messed up).
steer = self.lat_ctrl.get_steering(lin_velocity_x, ang_velocity_z, curr_lin_velocity_x)
t_d = None
if self.t_past is not None:
t_d = t_now - self.t_past
self.t_past = t_now
if dbw_enabled is True and t_d is not None:
# t delta between the last and current sample is required in order to integrate / differentiate properly
vel_err = lin_velocity_x-curr_lin_velocity_x
throttle = self.lon_ctrl.step(error=vel_err, sample_time=t_d)
if curr_lin_velocity_x < 0.1 and lin_velocity_x < 0.1:
'''
If there is more or less no request for velocity, it is assumed that the vehicle shall
stop. Therefore use the handbrake if the vehicle reaches the actual standstill
(defined by velocity < 0.1). This state can be left by requesting higher velocity.
'''
brake = max_brake_force
throttle = 0.
elif throttle < -0.05 and vel_err < 0.:
'''
If the difference between requested and actual velocity is negative ('vel_err < 0.'), then the vehicle must
reduce is speed. Moreover, we wait for a negative throttle (controller should follow the request), otherwise
the time between switching from acceleration to braking is more or less 0.
Note 1: There corr_factor was introduced due to testig with the script "dbw_test.py". There, the car always seems
to use much more brake force than the implementation here. The corr_factor does correct this difference.
Note 2: Waiting for the throttle to become negative is a kind of danger since it is not clear how much the brake
request is delayed.
'''
corr_factor = 2.5
decelleration = min(abs(self.max_lon_decel), abs(vel_err))
brake = abs(self.vehicle_mass * decelleration * self.wheel_radius) * corr_factor
throttle = 0.
elif throttle > 0.05 and vel_err > 0.:
'''
If the controller is planning to accelerate, just make sure there is no brake being used at the same time.
'''
brake = 0.
else:
'''
If the throttle is somewhere between -0.05 and 0.05, then neither the brake nor the throttle should be
used to avoid using both at the same time.
'''
brake = 0.
throttle = 0.
else:
# reset the internal I-value to prevent the I-part running amok when disengaged.
self.lon_ctrl.reset()
#rospy.logerr("Linear Velocity: {}".format(lin_velocity_x))
#rospy.logerr("Angular Velocity: {}".format(ang_velocity_z))
#rospy.logerr("Current Linear Velocity: {}".format(curr_lin_velocity_x))
#rospy.logerr("Current Angular Velocity: {}".format(steer))
#rospy.logerr("Throttle: {}".format(throttle))
#rospy.logerr("Brake: {}".format(brake))
return throttle, brake, steer | def control(self, lin_velocity_x, ang_velocity_z, curr_lin_velocity_x, curr_ang_velocity_z, dbw_enabled, t_now):
'''
Method executes the actual controller implementation. It needs to be called cyclically and fed with the latest
requested and measured linear velocity and yaw rate, timestamped with t_now (Since the input signals are not
provided with timestamp, there is a huge uncertainty. However, the difference between t_now and t_now-1 should
somehow contain the difference between the sample time points of the signals).
'''
throttle = 0. # range 0...1 (no acc to may acceleration)
brake = 0. # brake force in Nm, higher value => stronger braking (only positive range)
steer = 0. # in radian (rule: si-units everywhere...)
max_brake_force = 800. # Nm
# there is some jitter in the measured velocity, therefore it needs to be filtered.
curr_lin_velocity_x = self.lpf.filt(curr_lin_velocity_x)
# keep using the steer controller even if dbw is enabled (controller does not have internal states,
# so nothing can get messed up).
steer = self.lat_ctrl.get_steering(lin_velocity_x, ang_velocity_z, curr_lin_velocity_x)
t_d = None
if self.t_past is not None:
t_d = t_now - self.t_past
self.t_past = t_now
if dbw_enabled is True and t_d is not None:
# t delta between the last and current sample is required in order to integrate / differentiate properly
vel_err = lin_velocity_x-curr_lin_velocity_x
throttle = self.lon_ctrl.step(error=vel_err, sample_time=t_d)
if curr_lin_velocity_x < 0.1 and lin_velocity_x < 0.1:
'''
If there is more or less no request for velocity, it is assumed that the vehicle shall
stop. Therefore use the handbrake if the vehicle reaches the actual standstill
(defined by velocity < 0.1). This state can be left by requesting higher velocity.
'''
brake = max_brake_force
throttle = 0.
elif throttle < -0.05 and vel_err < 0.:
'''
If the difference between requested and actual velocity is negative ('vel_err < 0.'), then the vehicle must
reduce is speed. Moreover, we wait for a negative throttle (controller should follow the request), otherwise
the time between switching from acceleration to braking is more or less 0.
Note 1: There corr_factor was introduced due to testig with the script "dbw_test.py". There, the car always seems
to use much more brake force than the implementation here. The corr_factor does correct this difference.
Note 2: Waiting for the throttle to become negative is a kind of danger since it is not clear how much the brake
request is delayed.
'''
corr_factor = 2.5
decelleration = min(abs(self.max_lon_decel), abs(vel_err))
brake = abs(self.vehicle_mass * decelleration * self.wheel_radius) * corr_factor
throttle = 0.
elif throttle > 0.05 and vel_err > 0.:
'''
If the controller is planning to accelerate, just make sure there is no brake being used at the same time.
'''
brake = 0.
else:
'''
If the throttle is somewhere between -0.05 and 0.05, then neither the brake nor the throttle should be
used to avoid using both at the same time.
'''
brake = 0.
throttle = 0.
else:
# reset the internal I-value to prevent the I-part running amok when disengaged.
self.lon_ctrl.reset()
#rospy.logerr("Linear Velocity: {}".format(lin_velocity_x))
#rospy.logerr("Angular Velocity: {}".format(ang_velocity_z))
#rospy.logerr("Current Linear Velocity: {}".format(curr_lin_velocity_x))
#rospy.logerr("Current Angular Velocity: {}".format(steer))
#rospy.logerr("Throttle: {}".format(throttle))
#rospy.logerr("Brake: {}".format(brake))
return throttle, brake, steer |
Python | def pick_best_box(self, min_score, boxes, scores, classes):
"""Return a box with the highest confidence"""
n = len(classes)
idxs = []
max_score = -1
for i in range(n):
if scores[i] >= max_score and scores[i] >= min_score:
idxs = [i]
max_score = scores[i]
filtered_box = boxes[idxs, ...]
filtered_score = scores[idxs, ...]
filtered_class = classes[idxs, ...]
return filtered_box, filtered_score, filtered_class | def pick_best_box(self, min_score, boxes, scores, classes):
"""Return a box with the highest confidence"""
n = len(classes)
idxs = []
max_score = -1
for i in range(n):
if scores[i] >= max_score and scores[i] >= min_score:
idxs = [i]
max_score = scores[i]
filtered_box = boxes[idxs, ...]
filtered_score = scores[idxs, ...]
filtered_class = classes[idxs, ...]
return filtered_box, filtered_score, filtered_class |
Python | def draw_boxes(self, image, boxes, classes, scores, thickness=4):
"""Draw bounding boxes on the image"""
draw = ImageDraw.Draw(image)
for i in range(len(boxes)):
bot, left, top, right = boxes[i, ...]
color = (int(classes[i])//2*255, int(classes[i])%2*255, 0)
draw.line(
[(left, top), (left, bot), (right, bot), (right, top), (left, top)],
width=thickness,
fill=color,
)
draw.text((left, top), "{}".format(round(1E4*scores[i])/100)) | def draw_boxes(self, image, boxes, classes, scores, thickness=4):
"""Draw bounding boxes on the image"""
draw = ImageDraw.Draw(image)
for i in range(len(boxes)):
bot, left, top, right = boxes[i, ...]
color = (int(classes[i])//2*255, int(classes[i])%2*255, 0)
draw.line(
[(left, top), (left, bot), (right, bot), (right, top), (left, top)],
width=thickness,
fill=color,
)
draw.text((left, top), "{}".format(round(1E4*scores[i])/100)) |
Python | def classifier_debug_helper(self, image, box, cl, score):
""" adds the identified box to the image """
bridge = CvBridge()
"""
rqt_image_view reports with pathtrough:
ImageView.callback_image() could not convert image from '8UC3' to 'rgb8' ([8UC3] is not
a color format. but [rgb8] is. The conversion does not make sense).
=> therefore encoding
"""
image = self.draw_box(image, box, cl, score, thickness=4)
image_message = bridge.cv2_to_imgmsg(
image, encoding="rgb8"
)
self.debug_output_stream.publish(image_message) | def classifier_debug_helper(self, image, box, cl, score):
""" adds the identified box to the image """
bridge = CvBridge()
"""
rqt_image_view reports with pathtrough:
ImageView.callback_image() could not convert image from '8UC3' to 'rgb8' ([8UC3] is not
a color format. but [rgb8] is. The conversion does not make sense).
=> therefore encoding
"""
image = self.draw_box(image, box, cl, score, thickness=4)
image_message = bridge.cv2_to_imgmsg(
image, encoding="rgb8"
)
self.debug_output_stream.publish(image_message) |
Python | def draw_box(self, image, box, class_, score, thickness=4):
"""Draw bounding boxes on the image"""
draw_img = Img.fromarray(image)
draw = ImageDraw.Draw(draw_img)
width, height = draw_img.size
bot = box[0] * height
left = box[1] * width
top = box[2] * height
right = box[3] * width
if score > self.threshold:
color = (int(class_)//2*255, int(class_)%2*255, 0)
draw.line(
[(left, top), (left, bot), (right, bot), (right, top), (left, top)],
width=thickness,
fill=color,
)
draw.text((left, top), "{}".format(round(1E4*score)/100))
return np.array(draw_img) | def draw_box(self, image, box, class_, score, thickness=4):
"""Draw bounding boxes on the image"""
draw_img = Img.fromarray(image)
draw = ImageDraw.Draw(draw_img)
width, height = draw_img.size
bot = box[0] * height
left = box[1] * width
top = box[2] * height
right = box[3] * width
if score > self.threshold:
color = (int(class_)//2*255, int(class_)%2*255, 0)
draw.line(
[(left, top), (left, bot), (right, bot), (right, top), (left, top)],
width=thickness,
fill=color,
)
draw.text((left, top), "{}".format(round(1E4*score)/100))
return np.array(draw_img) |
Python | def classify_light_color(self, rgb_image):
"""
Full pipeline of classifying the traffic light color from the traffic light image
:param rgb_image: the RGB image array (height,width, RGB channel)
:return: the color index ['red', 'yellow', 'green', '_', 'unknown']
"""
hue_1d_deg = self.get_masked_hue_values(rgb_image)
if len(hue_1d_deg) == 0:
return 0
hue_1d_rad = self.convert_to_hue_angle(hue_1d_deg)
return self.classify_color_by_range(hue_1d_rad) | def classify_light_color(self, rgb_image):
"""
Full pipeline of classifying the traffic light color from the traffic light image
:param rgb_image: the RGB image array (height,width, RGB channel)
:return: the color index ['red', 'yellow', 'green', '_', 'unknown']
"""
hue_1d_deg = self.get_masked_hue_values(rgb_image)
if len(hue_1d_deg) == 0:
return 0
hue_1d_rad = self.convert_to_hue_angle(hue_1d_deg)
return self.classify_color_by_range(hue_1d_rad) |
Python | def classify_color_by_range(self, hue_value):
"""
Determine the color (red, yellow or green) in a hue value array
:param hue_value: hue_value is radians
:return: the color index ['red', 'yellow', 'green', '_', 'unknown']
"""
red_index, green_index, yellow_index = self.get_rgy_color_mask(hue_value)
color_counts = np.array([np.sum(red_index) / len(hue_value),
np.sum(yellow_index) / len(hue_value),
np.sum(green_index) / len(hue_value)])
# TODO: this could use a nicer approach
color_text = [2, 3, 1]
min_index = np.argmax(color_counts)
return color_text[min_index] | def classify_color_by_range(self, hue_value):
"""
Determine the color (red, yellow or green) in a hue value array
:param hue_value: hue_value is radians
:return: the color index ['red', 'yellow', 'green', '_', 'unknown']
"""
red_index, green_index, yellow_index = self.get_rgy_color_mask(hue_value)
color_counts = np.array([np.sum(red_index) / len(hue_value),
np.sum(yellow_index) / len(hue_value),
np.sum(green_index) / len(hue_value)])
# TODO: this could use a nicer approach
color_text = [2, 3, 1]
min_index = np.argmax(color_counts)
return color_text[min_index] |
Python | def twist_cmd_cb(self, msg):
'''
TODO: Describe why only the x velocity and yaw rate shall be used.
'''
# for debugging purposes:
if self.lin_velocity_x_cmd is None:
rospy.loginfo("First twist command received...")
self.lin_velocity_x_cmd = msg.twist.linear.x
self.ang_velocity_yaw_cmd = msg.twist.angular.z | def twist_cmd_cb(self, msg):
'''
TODO: Describe why only the x velocity and yaw rate shall be used.
'''
# for debugging purposes:
if self.lin_velocity_x_cmd is None:
rospy.loginfo("First twist command received...")
self.lin_velocity_x_cmd = msg.twist.linear.x
self.ang_velocity_yaw_cmd = msg.twist.angular.z |
Python | def car_curr_vel_cb(self, msg):
'''
Ego motion of the car measured as a feedback for the steering.
TODO: Describe why only the x velocity and yaw rate shall be used.
'''
# for debugging purposes:
if self.lin_velocity_x_feedback is None:
rospy.loginfo("First vehicle speed feedback received...")
self.lin_velocity_x_feedback = msg.twist.linear.x
self.ang_velocity_yaw_feedback = msg.twist.angular.z | def car_curr_vel_cb(self, msg):
'''
Ego motion of the car measured as a feedback for the steering.
TODO: Describe why only the x velocity and yaw rate shall be used.
'''
# for debugging purposes:
if self.lin_velocity_x_feedback is None:
rospy.loginfo("First vehicle speed feedback received...")
self.lin_velocity_x_feedback = msg.twist.linear.x
self.ang_velocity_yaw_feedback = msg.twist.angular.z |
Python | def current_bokeh_tornado_server(doc = None):
"""Returns the current bokeh.server.tornado.BokehTornado instance, or
the one associated with a particular bokeh Document"""
if doc is None:
doc = bokeh.plotting.curdoc()
host = doc.session_context.request.headers["Host"]
doc_port = None
if ":" in host:
doc_port = int(host.split(":")[-1])
for s, handler in doc.session_context.server_context.application_context.io_loop.handlers.values():
if not hasattr(s, "getsockname"): continue
if doc_port is not None and s.getsockname()[1] != doc_port: continue
connection_handler = handler.__closure__[0].cell_contents # tornado.httpserver.HTTPServer._handle_connection
http_server = connection_handler.__self__ # tornado.httpserver.HTTPServer
bokeh_tornado = http_server.request_callback # bokeh.server.tornado.BokehTornado
return bokeh_tornado | def current_bokeh_tornado_server(doc = None):
"""Returns the current bokeh.server.tornado.BokehTornado instance, or
the one associated with a particular bokeh Document"""
if doc is None:
doc = bokeh.plotting.curdoc()
host = doc.session_context.request.headers["Host"]
doc_port = None
if ":" in host:
doc_port = int(host.split(":")[-1])
for s, handler in doc.session_context.server_context.application_context.io_loop.handlers.values():
if not hasattr(s, "getsockname"): continue
if doc_port is not None and s.getsockname()[1] != doc_port: continue
connection_handler = handler.__closure__[0].cell_contents # tornado.httpserver.HTTPServer._handle_connection
http_server = connection_handler.__self__ # tornado.httpserver.HTTPServer
bokeh_tornado = http_server.request_callback # bokeh.server.tornado.BokehTornado
return bokeh_tornado |
Python | def register_module(self, name=None, force=False, module=None):
"""Register a module.
A record will be added to `self._module_dict`, whose key is the class
name or the specified name, and value is the class itself.
It can be used as a decorator or a normal function.
Example:
>>> backbones = Registry('backbone')
>>> @backbones.register_module()
>>> class ResNet:
>>> pass
>>> backbones = Registry('backbone')
>>> @backbones.register_module(name='mnet')
>>> class MobileNet:
>>> pass
>>> backbones = Registry('backbone')
>>> class ResNet:
>>> pass
>>> backbones.register_module(ResNet)
Args:
name (str | None): The module name to be registered. If not
specified, the class name will be used.
force (bool, optional): Whether to override an existing class with
the same name. Default: False.
module (type): Module class to be registered.
"""
if not isinstance(force, bool):
raise TypeError(f'force must be a boolean, but got {type(force)}')
# NOTE: This is a walkaround to be compatible with the old api,
# while it may introduce unexpected bugs.
if isinstance(name, type):
return self.deprecated_register_module(name, force=force)
# use it as a normal method: x.register_module(module=SomeClass)
if module is not None:
self._register_module(
module_class=module, module_name=name, force=force)
return module
# raise the error ahead of time
if not (name is None or isinstance(name, str)):
raise TypeError(f'name must be a str, but got {type(name)}')
# use it as a decorator: @x.register_module()
def _register(cls):
self._register_module(
module_class=cls, module_name=name, force=force)
return cls
return _register | def register_module(self, name=None, force=False, module=None):
"""Register a module.
A record will be added to `self._module_dict`, whose key is the class
name or the specified name, and value is the class itself.
It can be used as a decorator or a normal function.
Example:
>>> backbones = Registry('backbone')
>>> @backbones.register_module()
>>> class ResNet:
>>> pass
>>> backbones = Registry('backbone')
>>> @backbones.register_module(name='mnet')
>>> class MobileNet:
>>> pass
>>> backbones = Registry('backbone')
>>> class ResNet:
>>> pass
>>> backbones.register_module(ResNet)
Args:
name (str | None): The module name to be registered. If not
specified, the class name will be used.
force (bool, optional): Whether to override an existing class with
the same name. Default: False.
module (type): Module class to be registered.
"""
if not isinstance(force, bool):
raise TypeError(f'force must be a boolean, but got {type(force)}')
# NOTE: This is a walkaround to be compatible with the old api,
# while it may introduce unexpected bugs.
if isinstance(name, type):
return self.deprecated_register_module(name, force=force)
# use it as a normal method: x.register_module(module=SomeClass)
if module is not None:
self._register_module(
module_class=module, module_name=name, force=force)
return module
# raise the error ahead of time
if not (name is None or isinstance(name, str)):
raise TypeError(f'name must be a str, but got {type(name)}')
# use it as a decorator: @x.register_module()
def _register(cls):
self._register_module(
module_class=cls, module_name=name, force=force)
return cls
return _register |
Python | def prepare_packet(self):
"""input stream callback function
TODO first do it
1. check input stream timestamp is ready
2. get correct packet from input_stream to default_context
3. if default_context is ready, add self.run to schedule queue"""
flag = False
packet_timestamp = -1
if self._default_run_condition == 'one or more':
for index, stream in enumerate(self.input_streams):
# deprecated some expire package
while len(stream) and stream.get() < self:
expire_package = stream.popleft()
logger.warn('[%s] deprecated some expire package [%s]', self, expire_package)
if len(stream) and stream.get() >= self:
packet = stream.popleft()
stream_mirror = self._default_context.inputs()[index]
stream_mirror.add_packet(packet)
flag = True
packet_timestamp = packet.timestamp
# TODO prepare update accept timestamp
if flag:
self.timestamp = packet_timestamp
logger.debug('%s update timestamp:%s', self, self.timestamp)
return flag | def prepare_packet(self):
"""input stream callback function
TODO first do it
1. check input stream timestamp is ready
2. get correct packet from input_stream to default_context
3. if default_context is ready, add self.run to schedule queue"""
flag = False
packet_timestamp = -1
if self._default_run_condition == 'one or more':
for index, stream in enumerate(self.input_streams):
# deprecated some expire package
while len(stream) and stream.get() < self:
expire_package = stream.popleft()
logger.warn('[%s] deprecated some expire package [%s]', self, expire_package)
if len(stream) and stream.get() >= self:
packet = stream.popleft()
stream_mirror = self._default_context.inputs()[index]
stream_mirror.add_packet(packet)
flag = True
packet_timestamp = packet.timestamp
# TODO prepare update accept timestamp
if flag:
self.timestamp = packet_timestamp
logger.debug('%s update timestamp:%s', self, self.timestamp)
return flag |
Python | def on_group_view_change(cluster: InnoDBCluster, members: list, view_id_changed: bool) -> None:
"""
Triggered from the GroupMonitor whenever the membership view changes.
This handler should react to changes that wouldn't be noticed by regular
pod and cluster events.
It also updates cluster status in the pods and cluster objects.
"""
c = ClusterController(cluster)
c.on_group_view_change(members, view_id_changed) | def on_group_view_change(cluster: InnoDBCluster, members: list, view_id_changed: bool) -> None:
"""
Triggered from the GroupMonitor whenever the membership view changes.
This handler should react to changes that wouldn't be noticed by regular
pod and cluster events.
It also updates cluster status in the pods and cluster objects.
"""
c = ClusterController(cluster)
c.on_group_view_change(members, view_id_changed) |
Python | def on_pod_create(body: Body, logger: Logger, **kwargs):
"""
Handle MySQL server Pod creation, which can happen when:
- cluster is being first created
- cluster is being scaled up (more members added)
"""
# TODO ensure that the pod is owned by us
pod = MySQLPod.from_json(body)
# check general assumption
assert not pod.deleting
logger.info(f"POD CREATED: pod={pod.name} ContainersReady={pod.check_condition('ContainersReady')} Ready={pod.check_condition('Ready')} gate[configured]={pod.get_member_readiness_gate('configured')}")
configured = pod.get_member_readiness_gate("configured")
if not configured:
# TODO add extra diagnostics about why the pod is not ready yet, for
# example, unbound volume claims, initconf not finished etc
raise kopf.TemporaryError(f"Sidecar of {pod.name} is not yet configured", delay=10)
# If we are here all containers have started. This means, that if we are initializing
# the database from a donor (cloning) the sidecar has already started a seed instance
# and cloned from the donor into it (see initdb.py::start_clone_seed_pod())
cluster = pod.get_cluster()
logger.info(f"CLUSTER DELETING={cluster.deleting}")
assert cluster
with ClusterMutex(cluster, pod):
first_pod = pod.index == 0 and not cluster.get_create_time()
if first_pod:
cluster_objects.on_first_cluster_pod_created(cluster, logger)
g_group_monitor.monitor_cluster(
cluster, on_group_view_change, logger)
cluster_ctl = ClusterController(cluster)
cluster_ctl.on_pod_created(pod, logger)
# Remember how many restarts happened as of now
g_ephemeral_pod_state.set(
pod, "mysql-restarts", pod.get_container_restarts("mysql")) | def on_pod_create(body: Body, logger: Logger, **kwargs):
"""
Handle MySQL server Pod creation, which can happen when:
- cluster is being first created
- cluster is being scaled up (more members added)
"""
# TODO ensure that the pod is owned by us
pod = MySQLPod.from_json(body)
# check general assumption
assert not pod.deleting
logger.info(f"POD CREATED: pod={pod.name} ContainersReady={pod.check_condition('ContainersReady')} Ready={pod.check_condition('Ready')} gate[configured]={pod.get_member_readiness_gate('configured')}")
configured = pod.get_member_readiness_gate("configured")
if not configured:
# TODO add extra diagnostics about why the pod is not ready yet, for
# example, unbound volume claims, initconf not finished etc
raise kopf.TemporaryError(f"Sidecar of {pod.name} is not yet configured", delay=10)
# If we are here all containers have started. This means, that if we are initializing
# the database from a donor (cloning) the sidecar has already started a seed instance
# and cloned from the donor into it (see initdb.py::start_clone_seed_pod())
cluster = pod.get_cluster()
logger.info(f"CLUSTER DELETING={cluster.deleting}")
assert cluster
with ClusterMutex(cluster, pod):
first_pod = pod.index == 0 and not cluster.get_create_time()
if first_pod:
cluster_objects.on_first_cluster_pod_created(cluster, logger)
g_group_monitor.monitor_cluster(
cluster, on_group_view_change, logger)
cluster_ctl = ClusterController(cluster)
cluster_ctl.on_pod_created(pod, logger)
# Remember how many restarts happened as of now
g_ephemeral_pod_state.set(
pod, "mysql-restarts", pod.get_container_restarts("mysql")) |
Python | def on_pod_event(event, body: Body, logger: Logger, **kwargs):
"""
Handle low-level MySQL server pod events. The events we're interested in are:
- when a container restarts in a Pod (e.g. because of mysqld crash)
"""
# TODO ensure that the pod is owned by us
while True:
try:
pod = MySQLPod.from_json(body)
member_info = pod.get_membership_info()
ready = pod.check_containers_ready()
if pod.phase != "Running" or pod.deleting or not member_info:
logger.debug(
f"ignored pod event: pod={pod.name} containers_ready={ready} deleting={pod.deleting} phase={pod.phase} member_info={member_info}")
return
mysql_restarts = pod.get_container_restarts("mysql")
event = ""
if g_ephemeral_pod_state.get(pod, "mysql-restarts") != mysql_restarts:
event = "mysql-restarted"
containers = [
f"{c.name}={'ready' if c.ready else 'not-ready'}" for c in pod.status.container_statuses]
conditions = [
f"{c.type}={c.status}" for c in pod.status.conditions]
logger.info(f"POD EVENT {event}: pod={pod.name} containers_ready={ready} deleting={pod.deleting} phase={pod.phase} member_info={member_info} restarts={mysql_restarts} containers={containers} conditions={conditions}")
cluster = pod.get_cluster()
if not cluster:
logger.info(
f"Ignoring event for pod {pod.name} belonging to a deleted cluster")
return
with ClusterMutex(cluster, pod):
cluster_ctl = ClusterController(cluster)
# Check if a container in the pod restarted
if ready and event == "mysql-restarted":
cluster_ctl.on_pod_restarted(pod, logger)
g_ephemeral_pod_state.set(
pod, "mysql-restarts", mysql_restarts)
# Check if we should refresh the cluster status
status = cluster_ctl.probe_status_if_needed(pod, logger)
if status == diagnose.ClusterDiagStatus.UNKNOWN:
raise kopf.TemporaryError(
f"Cluster has unreachable members. status={status}", delay=15)
break
except kopf.TemporaryError as e:
# TODO review this
# Manually handle retries, the event handler isn't getting called again
# by kopf (maybe a bug or maybe we're using it wrong)
logger.info(f"{e}: retrying after {e.delay} seconds")
if e.delay:
time.sleep(e.delay)
continue | def on_pod_event(event, body: Body, logger: Logger, **kwargs):
"""
Handle low-level MySQL server pod events. The events we're interested in are:
- when a container restarts in a Pod (e.g. because of mysqld crash)
"""
# TODO ensure that the pod is owned by us
while True:
try:
pod = MySQLPod.from_json(body)
member_info = pod.get_membership_info()
ready = pod.check_containers_ready()
if pod.phase != "Running" or pod.deleting or not member_info:
logger.debug(
f"ignored pod event: pod={pod.name} containers_ready={ready} deleting={pod.deleting} phase={pod.phase} member_info={member_info}")
return
mysql_restarts = pod.get_container_restarts("mysql")
event = ""
if g_ephemeral_pod_state.get(pod, "mysql-restarts") != mysql_restarts:
event = "mysql-restarted"
containers = [
f"{c.name}={'ready' if c.ready else 'not-ready'}" for c in pod.status.container_statuses]
conditions = [
f"{c.type}={c.status}" for c in pod.status.conditions]
logger.info(f"POD EVENT {event}: pod={pod.name} containers_ready={ready} deleting={pod.deleting} phase={pod.phase} member_info={member_info} restarts={mysql_restarts} containers={containers} conditions={conditions}")
cluster = pod.get_cluster()
if not cluster:
logger.info(
f"Ignoring event for pod {pod.name} belonging to a deleted cluster")
return
with ClusterMutex(cluster, pod):
cluster_ctl = ClusterController(cluster)
# Check if a container in the pod restarted
if ready and event == "mysql-restarted":
cluster_ctl.on_pod_restarted(pod, logger)
g_ephemeral_pod_state.set(
pod, "mysql-restarts", mysql_restarts)
# Check if we should refresh the cluster status
status = cluster_ctl.probe_status_if_needed(pod, logger)
if status == diagnose.ClusterDiagStatus.UNKNOWN:
raise kopf.TemporaryError(
f"Cluster has unreachable members. status={status}", delay=15)
break
except kopf.TemporaryError as e:
# TODO review this
# Manually handle retries, the event handler isn't getting called again
# by kopf (maybe a bug or maybe we're using it wrong)
logger.info(f"{e}: retrying after {e.delay} seconds")
if e.delay:
time.sleep(e.delay)
continue |
Python | def on_pod_delete(body: Body, logger: Logger, **kwargs):
"""
Handle MySQL server Pod deletion, which can happen when:
- cluster is being scaled down (members being removed)
- cluster is being deleted
- user deletes a pod by hand
"""
# TODO ensure that the pod is owned by us
pod = MySQLPod.from_json(body)
# check general assumption
assert pod.deleting
# removeInstance the pod
cluster = pod.get_cluster()
if cluster:
with ClusterMutex(cluster, pod):
cluster_ctl = ClusterController(cluster)
cluster_ctl.on_pod_deleted(pod, body, logger)
if pod.index == 0 and cluster.deleting:
cluster_objects.on_last_cluster_pod_removed(cluster, logger)
else:
pod.remove_member_finalizer(body)
logger.error(f"Owner cluster for {pod.name} does not exist anymore") | def on_pod_delete(body: Body, logger: Logger, **kwargs):
"""
Handle MySQL server Pod deletion, which can happen when:
- cluster is being scaled down (members being removed)
- cluster is being deleted
- user deletes a pod by hand
"""
# TODO ensure that the pod is owned by us
pod = MySQLPod.from_json(body)
# check general assumption
assert pod.deleting
# removeInstance the pod
cluster = pod.get_cluster()
if cluster:
with ClusterMutex(cluster, pod):
cluster_ctl = ClusterController(cluster)
cluster_ctl.on_pod_deleted(pod, body, logger)
if pod.index == 0 and cluster.deleting:
cluster_objects.on_last_cluster_pod_removed(cluster, logger)
else:
pod.remove_member_finalizer(body)
logger.error(f"Owner cluster for {pod.name} does not exist anymore") |
Python | def prepare_backup_secrets(spec: InnoDBClusterSpec) -> dict:
"""
Secrets for authenticating backup tool with MySQL.
"""
backup_user = utils.b64encode(config.BACKUP_USER_NAME)
backup_pwd = utils.b64encode(utils.generate_password())
# We use a separate secrets object for the backup, so that we don't need to
# give access for the main secret to backup instances.
# No need to namespace it. A namespaced secret will be created by the caller
tmpl = f"""
apiVersion: v1
kind: Secret
metadata:
name: {spec.name}-backup
labels:
tier: mysql
mysql.oracle.com/cluster: {spec.name}
app.kubernetes.io/name: mysql-innodbcluster
app.kubernetes.io/instance: idc-{spec.name}
app.kubernetes.io/managed-by: mysql-operator
app.kubernetes.io/created-by: mysql-operator
data:
backupUsername: {backup_user}
backupPassword: {backup_pwd}
"""
return yaml.safe_load(tmpl) | def prepare_backup_secrets(spec: InnoDBClusterSpec) -> dict:
"""
Secrets for authenticating backup tool with MySQL.
"""
backup_user = utils.b64encode(config.BACKUP_USER_NAME)
backup_pwd = utils.b64encode(utils.generate_password())
# We use a separate secrets object for the backup, so that we don't need to
# give access for the main secret to backup instances.
# No need to namespace it. A namespaced secret will be created by the caller
tmpl = f"""
apiVersion: v1
kind: Secret
metadata:
name: {spec.name}-backup
labels:
tier: mysql
mysql.oracle.com/cluster: {spec.name}
app.kubernetes.io/name: mysql-innodbcluster
app.kubernetes.io/instance: idc-{spec.name}
app.kubernetes.io/managed-by: mysql-operator
app.kubernetes.io/created-by: mysql-operator
data:
backupUsername: {backup_user}
backupPassword: {backup_pwd}
"""
return yaml.safe_load(tmpl) |
Python | def LoadFiles (self):
"""Clear the files list and then read from the file to repopulate it."""
self.Files = {}
# Open file
with open(CONTENT_FILE) as csv_file:
# Initialise the CSV file reader
csv_reader = csv.reader(csv_file, delimiter=",")
# Loop through and save the contents to the Files attribute
for row in csv_reader:
self.Files[row[0]] = row[1] | def LoadFiles (self):
"""Clear the files list and then read from the file to repopulate it."""
self.Files = {}
# Open file
with open(CONTENT_FILE) as csv_file:
# Initialise the CSV file reader
csv_reader = csv.reader(csv_file, delimiter=",")
# Loop through and save the contents to the Files attribute
for row in csv_reader:
self.Files[row[0]] = row[1] |
Python | def PlaySoundByTitle (self, title, loop=False):
"""Gets the filename when given a title and plays the file"""
# Get file name, then run the PlaySound Method
filename = self.Files[title]
self.PlaySound(filename, loop=loop) | def PlaySoundByTitle (self, title, loop=False):
"""Gets the filename when given a title and plays the file"""
# Get file name, then run the PlaySound Method
filename = self.Files[title]
self.PlaySound(filename, loop=loop) |
Python | def SoundGenerator (self, max=PAGE_LIMIT, skip=0):
"""Yields filenames that are stored in Files"""
if max > len(self.Files)-skip: # If there are less files to loop than the max amount loop through all the files
# Iters is used to skip the given amount of iterations
iters = 0
for title, value in self.Files.items():
if iters < skip: # Skip if we haven't passed the skip amount.
iters += 1
continue
yield title
iters += 1
else: # Only loop through until there are the max amount displayed.
# Iters is used to see if we have gone over the max amount.
iters = 0
for title, value in self.Files.items():
if iters < skip:
iters += 1
continue
yield title
iters += 1
# Exit if we have gone over the max amount.
if iters >= max+skip:
break | def SoundGenerator (self, max=PAGE_LIMIT, skip=0):
"""Yields filenames that are stored in Files"""
if max > len(self.Files)-skip: # If there are less files to loop than the max amount loop through all the files
# Iters is used to skip the given amount of iterations
iters = 0
for title, value in self.Files.items():
if iters < skip: # Skip if we haven't passed the skip amount.
iters += 1
continue
yield title
iters += 1
else: # Only loop through until there are the max amount displayed.
# Iters is used to see if we have gone over the max amount.
iters = 0
for title, value in self.Files.items():
if iters < skip:
iters += 1
continue
yield title
iters += 1
# Exit if we have gone over the max amount.
if iters >= max+skip:
break |
Python | def DeleteEntry (self, entryName, deleteAudioFile=False):
"""Delete an entry from the CONTENT_FILE file and then reload the list."""
# to do this, we create a temporary file and copy every row into it from the current file unless that row
# has the same name as the given name.
tempfile = NamedTemporaryFile(delete=False, mode="w")
# Open csv file and initialise the reader
with open(CONTENT_FILE) as csvFile:
reader = csv.reader(csvFile, delimiter=",")
# loop through each element in the current file, checking if the name matches the given name
# write the row to the temp file if it does match, otherwise ignore that line
for row in reader:
if row[0] == entryName:
# The name matches, we can delete the file if needed, if not it just dont write to the new file.
if deleteAudioFile:
# Now delete the audio file if it is set too.
osRem("Files/Audio/" + str(row[1]))
else:
# Write the data to the temporary file
tempfile.write(",".join(row) + "\n")
# Close the temp file and then move it
tempfile.close()
shutil.move(tempfile.name, CONTENT_FILE)
# Re-load the files
self.LoadFiles() | def DeleteEntry (self, entryName, deleteAudioFile=False):
"""Delete an entry from the CONTENT_FILE file and then reload the list."""
# to do this, we create a temporary file and copy every row into it from the current file unless that row
# has the same name as the given name.
tempfile = NamedTemporaryFile(delete=False, mode="w")
# Open csv file and initialise the reader
with open(CONTENT_FILE) as csvFile:
reader = csv.reader(csvFile, delimiter=",")
# loop through each element in the current file, checking if the name matches the given name
# write the row to the temp file if it does match, otherwise ignore that line
for row in reader:
if row[0] == entryName:
# The name matches, we can delete the file if needed, if not it just dont write to the new file.
if deleteAudioFile:
# Now delete the audio file if it is set too.
osRem("Files/Audio/" + str(row[1]))
else:
# Write the data to the temporary file
tempfile.write(",".join(row) + "\n")
# Close the temp file and then move it
tempfile.close()
shutil.move(tempfile.name, CONTENT_FILE)
# Re-load the files
self.LoadFiles() |
Python | def AddEntry (self, files: tuple):
"""Add a audio entry to the file and also move the file to the audio folder, then reload"""
for file in files:
# split the filename from the path
filename = file.split("/")[-1]
# Move the file to the audio folder
shutil.move(file, "Files/Audio/" + str(filename))
# Write the new entry to the content file setting the title as the filename without the extension
with open(CONTENT_FILE, "a") as content:
content.write(filename.replace(".wav", "") + "," + filename + "\n")
# Reload the files list
self.LoadFiles() | def AddEntry (self, files: tuple):
"""Add a audio entry to the file and also move the file to the audio folder, then reload"""
for file in files:
# split the filename from the path
filename = file.split("/")[-1]
# Move the file to the audio folder
shutil.move(file, "Files/Audio/" + str(filename))
# Write the new entry to the content file setting the title as the filename without the extension
with open(CONTENT_FILE, "a") as content:
content.write(filename.replace(".wav", "") + "," + filename + "\n")
# Reload the files list
self.LoadFiles() |
Python | def RenameEntry (self, curName, newName):
"""Rename a current entry to a new name"""
# to do this, a temp file is created, then copy every row from the current file into it, changing the value if
# the title matches
tempfile = NamedTemporaryFile(delete=False, mode="w")
# Open csv file and initialise the reader
with open(CONTENT_FILE) as csvFile:
reader = csv.reader(csvFile, delimiter=",")
# loop through each element, add if it does not match the curName and switch if it does match
for row in reader:
if row[0] == curName:
# The names match so switch the values
row[0] = newName
# Write to the temporary file
tempfile.write(",".join(row) + "\n")
# Close the temp file and then over right the CONTENT FILE with it
tempfile.close()
shutil.move(tempfile.name, CONTENT_FILE)
# Re-load the files
self.LoadFiles() | def RenameEntry (self, curName, newName):
"""Rename a current entry to a new name"""
# to do this, a temp file is created, then copy every row from the current file into it, changing the value if
# the title matches
tempfile = NamedTemporaryFile(delete=False, mode="w")
# Open csv file and initialise the reader
with open(CONTENT_FILE) as csvFile:
reader = csv.reader(csvFile, delimiter=",")
# loop through each element, add if it does not match the curName and switch if it does match
for row in reader:
if row[0] == curName:
# The names match so switch the values
row[0] = newName
# Write to the temporary file
tempfile.write(",".join(row) + "\n")
# Close the temp file and then over right the CONTENT FILE with it
tempfile.close()
shutil.move(tempfile.name, CONTENT_FILE)
# Re-load the files
self.LoadFiles() |
Python | def MoveEntryUp (self, title):
"""Moves the audio file entry with the given title up one slot"""
# Loop through the file until we find an element with the given title
with open(CONTENT_FILE) as csvFile:
reader = csv.reader(csvFile, delimiter=",")
# Save the line that the element was on and the data of that line
line = 0
rowData = []
for row in reader:
if row[0] == title:
rowData = row
break
line += 1
# If the file isn't the first element, then do the swapping
if line > 0:
# Create a temp file to write too.
tempfile = NamedTemporaryFile(mode="w", delete=False)
with open(CONTENT_FILE) as csvFile:
reader = csv.reader(csvFile, delimiter=",")
# Deduct 1 from the swapline as this is the line we need to swap with.
swapLine = line - 1
line = - 1 # Line starts at -1 as we update the line before running
# IDK why but it wasn't working when line += 1 was at the end of the loop
for row in reader:
line += 1
# If we are on the swap line, then right the saved data, then the data that it is swapped with
if line == swapLine:
tempfile.write(",".join(rowData) + "\n")
tempfile.write(",".join(row) + "\n")
elif line == swapLine + 1:
# Skip the line the data was originally on
continue
else:
# Just right the data as this didn't need to be swapped out.
tempfile.write(",".join(row) + "\n")
# Close file and then overwrite the CONTENT_FILE
tempfile.close()
shutil.move(tempfile.name, CONTENT_FILE)
# Re-load the files
self.LoadFiles() | def MoveEntryUp (self, title):
"""Moves the audio file entry with the given title up one slot"""
# Loop through the file until we find an element with the given title
with open(CONTENT_FILE) as csvFile:
reader = csv.reader(csvFile, delimiter=",")
# Save the line that the element was on and the data of that line
line = 0
rowData = []
for row in reader:
if row[0] == title:
rowData = row
break
line += 1
# If the file isn't the first element, then do the swapping
if line > 0:
# Create a temp file to write too.
tempfile = NamedTemporaryFile(mode="w", delete=False)
with open(CONTENT_FILE) as csvFile:
reader = csv.reader(csvFile, delimiter=",")
# Deduct 1 from the swapline as this is the line we need to swap with.
swapLine = line - 1
line = - 1 # Line starts at -1 as we update the line before running
# IDK why but it wasn't working when line += 1 was at the end of the loop
for row in reader:
line += 1
# If we are on the swap line, then right the saved data, then the data that it is swapped with
if line == swapLine:
tempfile.write(",".join(rowData) + "\n")
tempfile.write(",".join(row) + "\n")
elif line == swapLine + 1:
# Skip the line the data was originally on
continue
else:
# Just right the data as this didn't need to be swapped out.
tempfile.write(",".join(row) + "\n")
# Close file and then overwrite the CONTENT_FILE
tempfile.close()
shutil.move(tempfile.name, CONTENT_FILE)
# Re-load the files
self.LoadFiles() |
Python | def MoveEntryDown(self, title):
"""Moves the audio file entry with the given title up one slot"""
with open(CONTENT_FILE) as csvFile:
reader = csv.reader(csvFile, delimiter=",")
iters = 0
rowData = []
for row in reader:
if row[0] == title:
rowData = row
break
iters += 1
with open(CONTENT_FILE) as fle: rows = sum(1 for _ in fle)
# If the item found isn't the last element then run
if iters < rows - 1: # -1 as the rows needs to become the index value
tempfile = NamedTemporaryFile(mode="w", delete=False)
with open(CONTENT_FILE) as csvFile:
reader = csv.reader(csvFile, delimiter=",")
# -1 because it didn't work for move up so i ain't risking the hassle this time
line = -1
for row in reader:
line += 1
if line == iters:
continue
elif line == iters + 1:
tempfile.write(",".join(row) + "\n")
tempfile.write(",".join(rowData) + "\n")
else:
tempfile.write(",".join(row) + "\n")
tempfile.close()
shutil.move(tempfile.name, CONTENT_FILE)
# Re-load the files
self.LoadFiles() | def MoveEntryDown(self, title):
"""Moves the audio file entry with the given title up one slot"""
with open(CONTENT_FILE) as csvFile:
reader = csv.reader(csvFile, delimiter=",")
iters = 0
rowData = []
for row in reader:
if row[0] == title:
rowData = row
break
iters += 1
with open(CONTENT_FILE) as fle: rows = sum(1 for _ in fle)
# If the item found isn't the last element then run
if iters < rows - 1: # -1 as the rows needs to become the index value
tempfile = NamedTemporaryFile(mode="w", delete=False)
with open(CONTENT_FILE) as csvFile:
reader = csv.reader(csvFile, delimiter=",")
# -1 because it didn't work for move up so i ain't risking the hassle this time
line = -1
for row in reader:
line += 1
if line == iters:
continue
elif line == iters + 1:
tempfile.write(",".join(row) + "\n")
tempfile.write(",".join(rowData) + "\n")
else:
tempfile.write(",".join(row) + "\n")
tempfile.close()
shutil.move(tempfile.name, CONTENT_FILE)
# Re-load the files
self.LoadFiles() |
Python | def showPage(self, pageName):
"""This function brings the chosen page to the top."""
page = self.Pages[pageName]
page.tkraise()
page.PageUpdate()
page.focus_set() | def showPage(self, pageName):
"""This function brings the chosen page to the top."""
page = self.Pages[pageName]
page.tkraise()
page.PageUpdate()
page.focus_set() |
Python | def PageUpdate(self):
"""Run an entire page update, mostly used by the controller when changing pages."""
self.PageNumber = 0
self.loadNames(self.PageNumber) | def PageUpdate(self):
"""Run an entire page update, mostly used by the controller when changing pages."""
self.PageNumber = 0
self.loadNames(self.PageNumber) |
Python | def loadNames (self, pageNumber=0):
"""Loads the names into the buttons"""
row = 0
col = 0
# Loop through all the titles and then update the button to reflect the title.
for title in self.controller.AudioManager.SoundGenerator(skip=pageNumber*PAGE_LIMIT):
if len(title) > 19:
dispTitle = title[0:17] + "..."
else:
dispTitle = title
self.Buttons[row][col].configure(text=dispTitle)
self.Buttons[row][col].configure(command=lambda t=title: self.playSoundName(t))
# Increase the column
col += 1
# If the column num is greater than the columns for this page, then reset to zero and increase the row.
if col >= self.COLS:
row += 1
col = 0
# if the rows are greater than the max amount for this page, break out the loop as all is filled.
if row >= self.ROWS:
break
else:
# Check if it never looped or if the rows have been completed. return False if so
# return False shows that this page had no elements
if row >= self.ROWS or (col == 0 and row == 0):
return False
# Fill in the rest of the buttons with a placeholder text and clear the command
while row < self.ROWS:
if col >= self.COLS:
col = 0
row += 1
if row >= self.ROWS:
break
self.Buttons[row][col].configure(text="*****")
self.Buttons[row][col].configure(command=lambda: None)
col += 1
# Return true to show that this page has elements
return True | def loadNames (self, pageNumber=0):
"""Loads the names into the buttons"""
row = 0
col = 0
# Loop through all the titles and then update the button to reflect the title.
for title in self.controller.AudioManager.SoundGenerator(skip=pageNumber*PAGE_LIMIT):
if len(title) > 19:
dispTitle = title[0:17] + "..."
else:
dispTitle = title
self.Buttons[row][col].configure(text=dispTitle)
self.Buttons[row][col].configure(command=lambda t=title: self.playSoundName(t))
# Increase the column
col += 1
# If the column num is greater than the columns for this page, then reset to zero and increase the row.
if col >= self.COLS:
row += 1
col = 0
# if the rows are greater than the max amount for this page, break out the loop as all is filled.
if row >= self.ROWS:
break
else:
# Check if it never looped or if the rows have been completed. return False if so
# return False shows that this page had no elements
if row >= self.ROWS or (col == 0 and row == 0):
return False
# Fill in the rest of the buttons with a placeholder text and clear the command
while row < self.ROWS:
if col >= self.COLS:
col = 0
row += 1
if row >= self.ROWS:
break
self.Buttons[row][col].configure(text="*****")
self.Buttons[row][col].configure(command=lambda: None)
col += 1
# Return true to show that this page has elements
return True |
Python | def nextPage(self):
"""Try to change page, if it works page number increases"""
if self.loadNames(pageNumber=self.PageNumber+1):
self.PageNumber += 1
self.PageText.configure(text="Page: " + str(self.PageNumber+1)) | def nextPage(self):
"""Try to change page, if it works page number increases"""
if self.loadNames(pageNumber=self.PageNumber+1):
self.PageNumber += 1
self.PageText.configure(text="Page: " + str(self.PageNumber+1)) |
Python | def prevPage(self):
"""If the page isn't already 0, decrease the page and then decrease the page number"""
if self.PageNumber >= 1:
self.PageNumber -= 1
self.PageText.configure(text="Page: " + str(self.PageNumber + 1))
self.loadNames(pageNumber=self.PageNumber) | def prevPage(self):
"""If the page isn't already 0, decrease the page and then decrease the page number"""
if self.PageNumber >= 1:
self.PageNumber -= 1
self.PageText.configure(text="Page: " + str(self.PageNumber + 1))
self.loadNames(pageNumber=self.PageNumber) |
Python | def LoopClick(self):
"""Change the loop variable so that the sounds been looped"""
# Switch the loop variable to the inverse of its current state.
self.doLoop = not self.doLoop
# Update the button colour so that the user knows the state.
if self.doLoop:
self.loopBtn.configure(bg="#ee0000")
else:
self.loopBtn.configure(bg="#eeeeee") | def LoopClick(self):
"""Change the loop variable so that the sounds been looped"""
# Switch the loop variable to the inverse of its current state.
self.doLoop = not self.doLoop
# Update the button colour so that the user knows the state.
if self.doLoop:
self.loopBtn.configure(bg="#ee0000")
else:
self.loopBtn.configure(bg="#eeeeee") |
Python | def PageUpdate(self):
"""Run an entire page update, mostly used by the controller when changing pages."""
# Reset the page number to the start and then re-load the buttons
self.PageNumber = 0
self.loadButtons(self.PageNumber) | def PageUpdate(self):
"""Run an entire page update, mostly used by the controller when changing pages."""
# Reset the page number to the start and then re-load the buttons
self.PageNumber = 0
self.loadButtons(self.PageNumber) |
Python | def loadButtons(self, pageNumber=0):
"""load the names of the buttons"""
# Clear all the current elements
for widget in self.buttonsPanel.winfo_children():
widget.destroy()
row = 0
# Loop through all the available titles for the given page and create a label and relevant buttons
for title in self.controller.AudioManager.SoundGenerator(max=self.maxPerPage, skip=pageNumber*self.maxPerPage):
# Audio title text
tk.Label(self.buttonsPanel, text=title, font=FONTS["l"]).grid(row=row, column=0, sticky="nsew")
# Remove Button
tk.Button(self.buttonsPanel, text="Remove", font=FONTS["m"],
command=lambda t=title: self.DeleteElement(t)).grid(row=row, column=1, sticky="nsew")
# Rename Button
tk.Button(self.buttonsPanel, text="Rename", font=FONTS["m"],
command=lambda t=title: self.renameElement(t)).grid(row=row, column=2, sticky="nsew")
# Move up button
tk.Button(self.buttonsPanel, text="Move Up", font=FONTS["m"],
command=lambda t=title: self.MoveElementUp(t)).grid(row=row, column=3, sticky="nsew")
# Move down button
tk.Button(self.buttonsPanel, text="Move Down", font=FONTS["m"],
command=lambda t=title: self.MoveElementDown(t)).grid(row=row, column=4, sticky="nsew")
# Increase the row
row += 1
# Tell the user than there is no audio files if none have been found!
if pageNumber == 0 and row == 0:
txt = "No audio files are available.\nAdd one now by pressing the 'Add new audio' button below."
tk.Label(self.buttonsPanel,
text=txt).grid(row=0, column=0, columnspan=3)
return False
elif row == 0:
# Go back a page as there is nothing on this page.
self.prevPage()
return False
# Return value indicates whether somethings been found or not
return True | def loadButtons(self, pageNumber=0):
"""load the names of the buttons"""
# Clear all the current elements
for widget in self.buttonsPanel.winfo_children():
widget.destroy()
row = 0
# Loop through all the available titles for the given page and create a label and relevant buttons
for title in self.controller.AudioManager.SoundGenerator(max=self.maxPerPage, skip=pageNumber*self.maxPerPage):
# Audio title text
tk.Label(self.buttonsPanel, text=title, font=FONTS["l"]).grid(row=row, column=0, sticky="nsew")
# Remove Button
tk.Button(self.buttonsPanel, text="Remove", font=FONTS["m"],
command=lambda t=title: self.DeleteElement(t)).grid(row=row, column=1, sticky="nsew")
# Rename Button
tk.Button(self.buttonsPanel, text="Rename", font=FONTS["m"],
command=lambda t=title: self.renameElement(t)).grid(row=row, column=2, sticky="nsew")
# Move up button
tk.Button(self.buttonsPanel, text="Move Up", font=FONTS["m"],
command=lambda t=title: self.MoveElementUp(t)).grid(row=row, column=3, sticky="nsew")
# Move down button
tk.Button(self.buttonsPanel, text="Move Down", font=FONTS["m"],
command=lambda t=title: self.MoveElementDown(t)).grid(row=row, column=4, sticky="nsew")
# Increase the row
row += 1
# Tell the user than there is no audio files if none have been found!
if pageNumber == 0 and row == 0:
txt = "No audio files are available.\nAdd one now by pressing the 'Add new audio' button below."
tk.Label(self.buttonsPanel,
text=txt).grid(row=0, column=0, columnspan=3)
return False
elif row == 0:
# Go back a page as there is nothing on this page.
self.prevPage()
return False
# Return value indicates whether somethings been found or not
return True |
Python | def DeleteAllElements (self):
"""Remove all elements after confirming the user wants too."""
# Ask the user if they are sure.
result = tkAskYesNo("Delete ",
"Are you sure you want to delete all elements?\nThis can NOT be undone.", icon="warning")
if result: # Run the delete function if user confirms
self.controller.AudioManager.DeleteAllEntries()
else: # Tell the user nothing has happened if the cancel
tkShowInfo("Update!", "Nothing has been deleted!")
# Update the page
self.PageUpdate() | def DeleteAllElements (self):
"""Remove all elements after confirming the user wants too."""
# Ask the user if they are sure.
result = tkAskYesNo("Delete ",
"Are you sure you want to delete all elements?\nThis can NOT be undone.", icon="warning")
if result: # Run the delete function if user confirms
self.controller.AudioManager.DeleteAllEntries()
else: # Tell the user nothing has happened if the cancel
tkShowInfo("Update!", "Nothing has been deleted!")
# Update the page
self.PageUpdate() |
Python | def DeleteElement(self, elementName):
"""Deletes an audio entry from the content file."""
# Ask the user if they are sure.
result = tkAskYesNo("Delete " + elementName,
"Are you sure you want to delete {0}?\nThis can not be undone.".format(elementName), icon="warning")
# If the user confirms they are sure, run the cancel, otherwise notify them that nothing was done.
if result:
self.controller.AudioManager.DeleteEntry(elementName)
else:
tkShowInfo("Update!", "{0} has NOT been deleted!".format(elementName))
# Re-load the buttons
self.PageUpdate() | def DeleteElement(self, elementName):
"""Deletes an audio entry from the content file."""
# Ask the user if they are sure.
result = tkAskYesNo("Delete " + elementName,
"Are you sure you want to delete {0}?\nThis can not be undone.".format(elementName), icon="warning")
# If the user confirms they are sure, run the cancel, otherwise notify them that nothing was done.
if result:
self.controller.AudioManager.DeleteEntry(elementName)
else:
tkShowInfo("Update!", "{0} has NOT been deleted!".format(elementName))
# Re-load the buttons
self.PageUpdate() |
Python | def renameElement(self, elementName):
"""Get the new name of an item and change it"""
# Update the UI then get the user inp and wait for them to interact with the pop up.
self.controller.update()
inp = widgets.GetInput(self.controller, question="New name: ", font=FONTS["l"])
self.controller.wait_window(inp.top)
# If there is no input data, then user cancelled. If not run the rename function in AudioManager
if inp.Data is None:
return None
else:
self.controller.AudioManager.RenameEntry(elementName, inp.Data)
# Re-load buttons
self.loadButtons(self.PageNumber) | def renameElement(self, elementName):
"""Get the new name of an item and change it"""
# Update the UI then get the user inp and wait for them to interact with the pop up.
self.controller.update()
inp = widgets.GetInput(self.controller, question="New name: ", font=FONTS["l"])
self.controller.wait_window(inp.top)
# If there is no input data, then user cancelled. If not run the rename function in AudioManager
if inp.Data is None:
return None
else:
self.controller.AudioManager.RenameEntry(elementName, inp.Data)
# Re-load buttons
self.loadButtons(self.PageNumber) |
Python | def nextPage(self):
"""Move to the next page if there are elements there."""
# Update the buttons with the new page number, if it is successful, update the page number
if self.loadButtons(self.PageNumber+1):
self.PageNumber += 1
else:
# If you couldn't update, re-load the current page
self.loadButtons(self.PageNumber)
# Update the page number text value
self.PageText.configure(text="Page: " + str(self.PageNumber+1)) | def nextPage(self):
"""Move to the next page if there are elements there."""
# Update the buttons with the new page number, if it is successful, update the page number
if self.loadButtons(self.PageNumber+1):
self.PageNumber += 1
else:
# If you couldn't update, re-load the current page
self.loadButtons(self.PageNumber)
# Update the page number text value
self.PageText.configure(text="Page: " + str(self.PageNumber+1)) |
Python | def prevPage(self):
"""Move to the previous page if not already on the 1st (0th) page"""
# If the pagew number is greater than the minimum (0), then update it to be lower
if self.PageNumber > 0:
self.PageNumber -= 1
# Re-load the buttons
self.loadButtons(self.PageNumber)
# Update the page number text value
self.PageText.configure(text="Page: " + str(self.PageNumber+1)) | def prevPage(self):
"""Move to the previous page if not already on the 1st (0th) page"""
# If the pagew number is greater than the minimum (0), then update it to be lower
if self.PageNumber > 0:
self.PageNumber -= 1
# Re-load the buttons
self.loadButtons(self.PageNumber)
# Update the page number text value
self.PageText.configure(text="Page: " + str(self.PageNumber+1)) |
Python | def _align_objs(objs, how="outer", sort=None):
"""Align a set of Series or Dataframe objects.
Parameters
----------
objs : list of DataFrame, Series, or Index
how : How to handle indexes on other axis (or axes),
similar to join in concat
sort : Whether to sort the resulting Index
Returns
-------
A list of reindexed and aligned objects
ready for concatenation
"""
# Check if multiindex then check if indexes match. GenericIndex
# returns ndarray tuple of bools requiring additional filter.
# Then check for duplicate index value.
i_objs = iter(objs)
first = next(i_objs)
not_matching_index = any(
not first.index.equals(rest.index) for rest in i_objs
)
if not_matching_index:
if not all(o.index.is_unique for o in objs):
raise ValueError("cannot reindex on an axis with duplicate labels")
index = objs[0].index
name = index.name
final_index = _get_combined_index(
[obj.index for obj in objs], intersect=how == "inner", sort=sort
)
final_index.name = name
return [
obj.reindex(final_index)
if not final_index.equals(obj.index)
else obj
for obj in objs
]
else:
if sort:
if not first.index.is_monotonic_increasing:
final_index = first.index.sort_values()
return [obj.reindex(final_index) for obj in objs]
return objs | def _align_objs(objs, how="outer", sort=None):
"""Align a set of Series or Dataframe objects.
Parameters
----------
objs : list of DataFrame, Series, or Index
how : How to handle indexes on other axis (or axes),
similar to join in concat
sort : Whether to sort the resulting Index
Returns
-------
A list of reindexed and aligned objects
ready for concatenation
"""
# Check if multiindex then check if indexes match. GenericIndex
# returns ndarray tuple of bools requiring additional filter.
# Then check for duplicate index value.
i_objs = iter(objs)
first = next(i_objs)
not_matching_index = any(
not first.index.equals(rest.index) for rest in i_objs
)
if not_matching_index:
if not all(o.index.is_unique for o in objs):
raise ValueError("cannot reindex on an axis with duplicate labels")
index = objs[0].index
name = index.name
final_index = _get_combined_index(
[obj.index for obj in objs], intersect=how == "inner", sort=sort
)
final_index.name = name
return [
obj.reindex(final_index)
if not final_index.equals(obj.index)
else obj
for obj in objs
]
else:
if sort:
if not first.index.is_monotonic_increasing:
final_index = first.index.sort_values()
return [obj.reindex(final_index) for obj in objs]
return objs |
Python | def _normalize_series_and_dataframe(objs, axis):
"""Convert any cudf.Series objects in objs to DataFrames in place."""
# Default to naming series by a numerical id if they are not named.
sr_name = 0
for idx, o in enumerate(objs):
if isinstance(o, cudf.Series):
if axis == 1:
name = o.name
if name is None:
name = sr_name
sr_name += 1
else:
name = sr_name
objs[idx] = o.to_frame(name=name) | def _normalize_series_and_dataframe(objs, axis):
"""Convert any cudf.Series objects in objs to DataFrames in place."""
# Default to naming series by a numerical id if they are not named.
sr_name = 0
for idx, o in enumerate(objs):
if isinstance(o, cudf.Series):
if axis == 1:
name = o.name
if name is None:
name = sr_name
sr_name += 1
else:
name = sr_name
objs[idx] = o.to_frame(name=name) |
Python | def _pivot(df, index, columns):
"""
Reorganize the values of the DataFrame according to the given
index and columns.
Parameters
----------
df : DataFrame
index : cudf.Index
Index labels of the result
columns : cudf.Index
Column labels of the result
"""
columns_labels, columns_idx = columns._encode()
index_labels, index_idx = index._encode()
column_labels = columns_labels.to_pandas().to_flat_index()
# the result of pivot always has a multicolumn
result = cudf.core.column_accessor.ColumnAccessor(
multiindex=True, level_names=(None,) + columns._data.names
)
def as_tuple(x):
return x if isinstance(x, tuple) else (x,)
for v in df:
names = [as_tuple(v) + as_tuple(name) for name in column_labels]
nrows = len(index_labels)
ncols = len(names)
num_elements = nrows * ncols
if num_elements > 0:
col = df._data[v]
scatter_map = (columns_idx * np.int32(nrows)) + index_idx
target = cudf.core.frame.Frame(
{
None: cudf.core.column.column_empty_like(
col, masked=True, newsize=nrows * ncols
)
}
)
target._data[None][scatter_map] = col
result_frames = target._split(range(nrows, nrows * ncols, nrows))
result.update(
{
name: next(iter(f._columns))
for name, f in zip(names, result_frames)
}
)
return cudf.DataFrame._from_data(
result, index=cudf.Index(index_labels, name=index.name)
) | def _pivot(df, index, columns):
"""
Reorganize the values of the DataFrame according to the given
index and columns.
Parameters
----------
df : DataFrame
index : cudf.Index
Index labels of the result
columns : cudf.Index
Column labels of the result
"""
columns_labels, columns_idx = columns._encode()
index_labels, index_idx = index._encode()
column_labels = columns_labels.to_pandas().to_flat_index()
# the result of pivot always has a multicolumn
result = cudf.core.column_accessor.ColumnAccessor(
multiindex=True, level_names=(None,) + columns._data.names
)
def as_tuple(x):
return x if isinstance(x, tuple) else (x,)
for v in df:
names = [as_tuple(v) + as_tuple(name) for name in column_labels]
nrows = len(index_labels)
ncols = len(names)
num_elements = nrows * ncols
if num_elements > 0:
col = df._data[v]
scatter_map = (columns_idx * np.int32(nrows)) + index_idx
target = cudf.core.frame.Frame(
{
None: cudf.core.column.column_empty_like(
col, masked=True, newsize=nrows * ncols
)
}
)
target._data[None][scatter_map] = col
result_frames = target._split(range(nrows, nrows * ncols, nrows))
result.update(
{
name: next(iter(f._columns))
for name, f in zip(names, result_frames)
}
)
return cudf.DataFrame._from_data(
result, index=cudf.Index(index_labels, name=index.name)
) |
Python | def pivot(data, index=None, columns=None, values=None):
"""
Return reshaped DataFrame organized by the given index and column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame.
Parameters
----------
index : column name, optional
Column used to construct the index of the result.
columns : column name, optional
Column used to construct the columns of the result.
values : column name or list of column names, optional
Column(s) whose values are rearranged to produce the result.
If not specified, all remaining columns of the DataFrame
are used.
Returns
-------
DataFrame
Examples
--------
>>> a = cudf.DataFrame()
>>> a['a'] = [1, 1, 2, 2]
>>> a['b'] = ['a', 'b', 'a', 'b']
>>> a['c'] = [1, 2, 3, 4]
>>> a.pivot(index='a', columns='b')
c
b a b
a
1 1 2
2 3 4
Pivot with missing values in result:
>>> a = cudf.DataFrame()
>>> a['a'] = [1, 1, 2]
>>> a['b'] = [1, 2, 3]
>>> a['c'] = ['one', 'two', 'three']
>>> a.pivot(index='a', columns='b')
c
b 1 2 3
a
1 one two <NA>
2 <NA> <NA> three
"""
df = data
if values is None:
values = df._columns_view(
col for col in df._column_names if col not in (index, columns)
)
else:
values = df._columns_view(values)
if index is None:
index = df.index
else:
index = cudf.core.index.Index(df.loc[:, index])
columns = cudf.Index(df.loc[:, columns])
# Create a DataFrame composed of columns from both
# columns and index
columns_index = {}
columns_index = {
i: col
for i, col in enumerate(
itertools.chain(index._data.columns, columns._data.columns)
)
}
columns_index = cudf.DataFrame(columns_index)
# Check that each row is unique:
if len(columns_index) != len(columns_index.drop_duplicates()):
raise ValueError("Duplicate index-column pairs found. Cannot reshape.")
return _pivot(values, index, columns) | def pivot(data, index=None, columns=None, values=None):
"""
Return reshaped DataFrame organized by the given index and column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame.
Parameters
----------
index : column name, optional
Column used to construct the index of the result.
columns : column name, optional
Column used to construct the columns of the result.
values : column name or list of column names, optional
Column(s) whose values are rearranged to produce the result.
If not specified, all remaining columns of the DataFrame
are used.
Returns
-------
DataFrame
Examples
--------
>>> a = cudf.DataFrame()
>>> a['a'] = [1, 1, 2, 2]
>>> a['b'] = ['a', 'b', 'a', 'b']
>>> a['c'] = [1, 2, 3, 4]
>>> a.pivot(index='a', columns='b')
c
b a b
a
1 1 2
2 3 4
Pivot with missing values in result:
>>> a = cudf.DataFrame()
>>> a['a'] = [1, 1, 2]
>>> a['b'] = [1, 2, 3]
>>> a['c'] = ['one', 'two', 'three']
>>> a.pivot(index='a', columns='b')
c
b 1 2 3
a
1 one two <NA>
2 <NA> <NA> three
"""
df = data
if values is None:
values = df._columns_view(
col for col in df._column_names if col not in (index, columns)
)
else:
values = df._columns_view(values)
if index is None:
index = df.index
else:
index = cudf.core.index.Index(df.loc[:, index])
columns = cudf.Index(df.loc[:, columns])
# Create a DataFrame composed of columns from both
# columns and index
columns_index = {}
columns_index = {
i: col
for i, col in enumerate(
itertools.chain(index._data.columns, columns._data.columns)
)
}
columns_index = cudf.DataFrame(columns_index)
# Check that each row is unique:
if len(columns_index) != len(columns_index.drop_duplicates()):
raise ValueError("Duplicate index-column pairs found. Cannot reshape.")
return _pivot(values, index, columns) |
Python | def _get_unique(column, dummy_na):
"""
Returns unique values in a column, if
dummy_na is False, nan's are also dropped.
"""
if isinstance(column, cudf.core.column.CategoricalColumn):
unique = column.categories
else:
unique = column.unique()
if not dummy_na:
if np.issubdtype(unique.dtype, np.floating):
unique = unique.nans_to_nulls()
unique = unique.dropna()
return unique | def _get_unique(column, dummy_na):
"""
Returns unique values in a column, if
dummy_na is False, nan's are also dropped.
"""
if isinstance(column, cudf.core.column.CategoricalColumn):
unique = column.categories
else:
unique = column.unique()
if not dummy_na:
if np.issubdtype(unique.dtype, np.floating):
unique = unique.nans_to_nulls()
unique = unique.dropna()
return unique |
Python | def _one_hot_encode_column(
column: ColumnBase,
categories: ColumnBase,
prefix: Optional[str],
prefix_sep: Optional[str],
dtype: Optional[Dtype],
) -> Dict[str, ColumnBase]:
"""Encode a single column with one hot encoding. The return dictionary
contains pairs of (category, encodings). The keys may be prefixed with
`prefix`, separated with category name with `prefix_sep`. The encoding
columns maybe coerced into `dtype`.
"""
if isinstance(column, CategoricalColumn):
if column.size == column.null_count:
column = column_empty_like(categories, newsize=column.size)
else:
column = column._get_decategorized_column()
if column.size * categories.size >= np.iinfo("int32").max:
raise ValueError(
"Size limitation exceeded: column.size * category.size < "
"np.iinfo('int32').max. Consider reducing size of category"
)
data = one_hot_encode(column, categories)
if prefix is not None and prefix_sep is not None:
data = {f"{prefix}{prefix_sep}{col}": enc for col, enc in data.items()}
if dtype:
data = {k: v.astype(dtype) for k, v in data.items()}
return data | def _one_hot_encode_column(
column: ColumnBase,
categories: ColumnBase,
prefix: Optional[str],
prefix_sep: Optional[str],
dtype: Optional[Dtype],
) -> Dict[str, ColumnBase]:
"""Encode a single column with one hot encoding. The return dictionary
contains pairs of (category, encodings). The keys may be prefixed with
`prefix`, separated with category name with `prefix_sep`. The encoding
columns maybe coerced into `dtype`.
"""
if isinstance(column, CategoricalColumn):
if column.size == column.null_count:
column = column_empty_like(categories, newsize=column.size)
else:
column = column._get_decategorized_column()
if column.size * categories.size >= np.iinfo("int32").max:
raise ValueError(
"Size limitation exceeded: column.size * category.size < "
"np.iinfo('int32').max. Consider reducing size of category"
)
data = one_hot_encode(column, categories)
if prefix is not None and prefix_sep is not None:
data = {f"{prefix}{prefix_sep}{col}": enc for col, enc in data.items()}
if dtype:
data = {k: v.astype(dtype) for k, v in data.items()}
return data |
Python | def _decode_type(
cls: Type,
header: dict,
frames: list,
is_valid_class: Callable[[Type, Type], bool] = operator.is_,
) -> Tuple[dict, list, Type]:
"""Decode metadata-encoded type and check validity
Parameters
----------
cls : type
class performing deserialization
header : dict
metadata for deserialization
frames : list
buffers containing data for deserialization
is_valid_class : Callable
function to call to check if the encoded class type is valid for
serialization by `cls` (default is to check type equality), called
as `is_valid_class(decoded_class, cls)`.
Returns
-------
tuple
Tuple of validated headers, frames, and the decoded class
constructor.
Raises
------
AssertionError
if the number of frames doesn't match the count encoded in the
headers, or `is_valid_class` is not true.
"""
assert header["frame_count"] == len(frames), (
f"Deserialization expected {header['frame_count']} frames, "
f"but received {len(frames)}."
)
klass = pickle.loads(header["type-serialized"])
assert is_valid_class(
klass, cls
), f"Header-encoded {klass=} does not match decoding {cls=}."
return header, frames, klass | def _decode_type(
cls: Type,
header: dict,
frames: list,
is_valid_class: Callable[[Type, Type], bool] = operator.is_,
) -> Tuple[dict, list, Type]:
"""Decode metadata-encoded type and check validity
Parameters
----------
cls : type
class performing deserialization
header : dict
metadata for deserialization
frames : list
buffers containing data for deserialization
is_valid_class : Callable
function to call to check if the encoded class type is valid for
serialization by `cls` (default is to check type equality), called
as `is_valid_class(decoded_class, cls)`.
Returns
-------
tuple
Tuple of validated headers, frames, and the decoded class
constructor.
Raises
------
AssertionError
if the number of frames doesn't match the count encoded in the
headers, or `is_valid_class` is not true.
"""
assert header["frame_count"] == len(frames), (
f"Deserialization expected {header['frame_count']} frames, "
f"but received {len(frames)}."
)
klass = pickle.loads(header["type-serialized"])
assert is_valid_class(
klass, cls
), f"Header-encoded {klass=} does not match decoding {cls=}."
return header, frames, klass |
Python | def is_categorical_dtype(obj):
"""Check whether an array-like or dtype is of the Categorical dtype.
Parameters
----------
obj : array-like or dtype
The array-like or dtype to check.
Returns
-------
bool
Whether or not the array-like or dtype is of a categorical dtype.
"""
if obj is None:
return False
if isinstance(
obj,
(
pd_CategoricalDtype,
cudf.CategoricalDtype,
cudf.core.index.CategoricalIndex,
cudf.core.column.CategoricalColumn,
pd.Categorical,
pd.CategoricalIndex,
),
):
return True
# Note that we cannot directly use `obj in (...)` because that triggers
# equality as well as identity checks and pandas extension dtypes won't
# allow converting that equality check to a boolean; `__nonzero__` is
# disabled because they treat dtypes as "array-like".
if any(
obj is t
for t in (
cudf.CategoricalDtype,
pd_CategoricalDtype,
pd_CategoricalDtypeType,
)
):
return True
if isinstance(obj, (np.ndarray, np.dtype)):
return False
if isinstance(obj, str) and obj == "category":
return True
if isinstance(
obj,
(
cudf.Index,
cudf.Series,
cudf.core.column.ColumnBase,
pd.Index,
pd.Series,
),
):
return is_categorical_dtype(obj.dtype)
if hasattr(obj, "type"):
if obj.type is pd_CategoricalDtypeType:
return True
# TODO: A lot of the above checks are probably redundant and should be
# farmed out to this function here instead.
return pd_types.is_categorical_dtype(obj) | def is_categorical_dtype(obj):
"""Check whether an array-like or dtype is of the Categorical dtype.
Parameters
----------
obj : array-like or dtype
The array-like or dtype to check.
Returns
-------
bool
Whether or not the array-like or dtype is of a categorical dtype.
"""
if obj is None:
return False
if isinstance(
obj,
(
pd_CategoricalDtype,
cudf.CategoricalDtype,
cudf.core.index.CategoricalIndex,
cudf.core.column.CategoricalColumn,
pd.Categorical,
pd.CategoricalIndex,
),
):
return True
# Note that we cannot directly use `obj in (...)` because that triggers
# equality as well as identity checks and pandas extension dtypes won't
# allow converting that equality check to a boolean; `__nonzero__` is
# disabled because they treat dtypes as "array-like".
if any(
obj is t
for t in (
cudf.CategoricalDtype,
pd_CategoricalDtype,
pd_CategoricalDtypeType,
)
):
return True
if isinstance(obj, (np.ndarray, np.dtype)):
return False
if isinstance(obj, str) and obj == "category":
return True
if isinstance(
obj,
(
cudf.Index,
cudf.Series,
cudf.core.column.ColumnBase,
pd.Index,
pd.Series,
),
):
return is_categorical_dtype(obj.dtype)
if hasattr(obj, "type"):
if obj.type is pd_CategoricalDtypeType:
return True
# TODO: A lot of the above checks are probably redundant and should be
# farmed out to this function here instead.
return pd_types.is_categorical_dtype(obj) |
Python | def is_list_dtype(obj):
"""Check whether an array-like or dtype is of the list dtype.
Parameters
----------
obj : array-like or dtype
The array-like or dtype to check.
Returns
-------
bool
Whether or not the array-like or dtype is of the list dtype.
"""
return (
type(obj) is cudf.core.dtypes.ListDtype
or obj is cudf.core.dtypes.ListDtype
or type(obj) is cudf.core.column.ListColumn
or obj is cudf.core.column.ListColumn
or (isinstance(obj, str) and obj == cudf.core.dtypes.ListDtype.name)
or (hasattr(obj, "dtype") and is_list_dtype(obj.dtype))
) | def is_list_dtype(obj):
"""Check whether an array-like or dtype is of the list dtype.
Parameters
----------
obj : array-like or dtype
The array-like or dtype to check.
Returns
-------
bool
Whether or not the array-like or dtype is of the list dtype.
"""
return (
type(obj) is cudf.core.dtypes.ListDtype
or obj is cudf.core.dtypes.ListDtype
or type(obj) is cudf.core.column.ListColumn
or obj is cudf.core.column.ListColumn
or (isinstance(obj, str) and obj == cudf.core.dtypes.ListDtype.name)
or (hasattr(obj, "dtype") and is_list_dtype(obj.dtype))
) |
Python | def is_struct_dtype(obj):
"""Check whether an array-like or dtype is of the struct dtype.
Parameters
----------
obj : array-like or dtype
The array-like or dtype to check.
Returns
-------
bool
Whether or not the array-like or dtype is of the struct dtype.
"""
# TODO: This behavior is currently inconsistent for interval types. the
# actual class IntervalDtype will return False, but instances (e.g.
# IntervalDtype(int)) will return True. For now this is not being changed
# since the interval dtype is being modified as part of the array refactor,
# but this behavior should be made consistent afterwards.
return (
isinstance(obj, cudf.core.dtypes.StructDtype)
or obj is cudf.core.dtypes.StructDtype
or (isinstance(obj, str) and obj == cudf.core.dtypes.StructDtype.name)
or (hasattr(obj, "dtype") and is_struct_dtype(obj.dtype))
) | def is_struct_dtype(obj):
"""Check whether an array-like or dtype is of the struct dtype.
Parameters
----------
obj : array-like or dtype
The array-like or dtype to check.
Returns
-------
bool
Whether or not the array-like or dtype is of the struct dtype.
"""
# TODO: This behavior is currently inconsistent for interval types. the
# actual class IntervalDtype will return False, but instances (e.g.
# IntervalDtype(int)) will return True. For now this is not being changed
# since the interval dtype is being modified as part of the array refactor,
# but this behavior should be made consistent afterwards.
return (
isinstance(obj, cudf.core.dtypes.StructDtype)
or obj is cudf.core.dtypes.StructDtype
or (isinstance(obj, str) and obj == cudf.core.dtypes.StructDtype.name)
or (hasattr(obj, "dtype") and is_struct_dtype(obj.dtype))
) |
Python | def is_decimal_dtype(obj):
"""Check whether an array-like or dtype is of the decimal dtype.
Parameters
----------
obj : array-like or dtype
The array-like or dtype to check.
Returns
-------
bool
Whether or not the array-like or dtype is of the decimal dtype.
"""
return (
is_decimal32_dtype(obj)
or is_decimal64_dtype(obj)
or is_decimal128_dtype(obj)
) | def is_decimal_dtype(obj):
"""Check whether an array-like or dtype is of the decimal dtype.
Parameters
----------
obj : array-like or dtype
The array-like or dtype to check.
Returns
-------
bool
Whether or not the array-like or dtype is of the decimal dtype.
"""
return (
is_decimal32_dtype(obj)
or is_decimal64_dtype(obj)
or is_decimal128_dtype(obj)
) |
Python | def is_interval_dtype(obj):
"""Check whether an array-like or dtype is of the interval dtype.
Parameters
----------
obj : array-like or dtype
The array-like or dtype to check.
Returns
-------
bool
Whether or not the array-like or dtype is of the interval dtype.
"""
# TODO: Should there be any branch in this function that calls
# pd.api.types.is_interval_dtype?
return (
isinstance(
obj,
(
cudf.core.dtypes.IntervalDtype,
pd.core.dtypes.dtypes.IntervalDtype,
),
)
or obj is cudf.core.dtypes.IntervalDtype
or (
isinstance(obj, str) and obj == cudf.core.dtypes.IntervalDtype.name
)
or (hasattr(obj, "dtype") and is_interval_dtype(obj.dtype))
) | def is_interval_dtype(obj):
"""Check whether an array-like or dtype is of the interval dtype.
Parameters
----------
obj : array-like or dtype
The array-like or dtype to check.
Returns
-------
bool
Whether or not the array-like or dtype is of the interval dtype.
"""
# TODO: Should there be any branch in this function that calls
# pd.api.types.is_interval_dtype?
return (
isinstance(
obj,
(
cudf.core.dtypes.IntervalDtype,
pd.core.dtypes.dtypes.IntervalDtype,
),
)
or obj is cudf.core.dtypes.IntervalDtype
or (
isinstance(obj, str) and obj == cudf.core.dtypes.IntervalDtype.name
)
or (hasattr(obj, "dtype") and is_interval_dtype(obj.dtype))
) |
Python | def values_host(self) -> "np.ndarray":
"""
Return a numpy representation of the Column.
"""
if len(self) == 0:
return np.array([], dtype=self.dtype)
if self.has_nulls():
raise ValueError("Column must have no nulls.")
return self.data_array_view.copy_to_host() | def values_host(self) -> "np.ndarray":
"""
Return a numpy representation of the Column.
"""
if len(self) == 0:
return np.array([], dtype=self.dtype)
if self.has_nulls():
raise ValueError("Column must have no nulls.")
return self.data_array_view.copy_to_host() |
Python | def values(self) -> "cupy.ndarray":
"""
Return a CuPy representation of the Column.
"""
if len(self) == 0:
return cupy.array([], dtype=self.dtype)
if self.has_nulls():
raise ValueError("Column must have no nulls.")
return cupy.asarray(self.data_array_view) | def values(self) -> "cupy.ndarray":
"""
Return a CuPy representation of the Column.
"""
if len(self) == 0:
return cupy.array([], dtype=self.dtype)
if self.has_nulls():
raise ValueError("Column must have no nulls.")
return cupy.asarray(self.data_array_view) |
Python | def nullmask(self) -> Buffer:
"""The gpu buffer for the null-mask"""
if not self.nullable:
raise ValueError("Column has no null mask")
return self.mask_array_view | def nullmask(self) -> Buffer:
"""The gpu buffer for the null-mask"""
if not self.nullable:
raise ValueError("Column has no null mask")
return self.mask_array_view |
Python | def take(
self: T, indices: ColumnBase, nullify: bool = False, check_bounds=True
) -> T:
"""Return Column by taking values from the corresponding *indices*.
Skip bounds checking if check_bounds is False.
Set rows to null for all out of bound indices if nullify is `True`.
"""
# Handle zero size
if indices.size == 0:
return cast(T, column_empty_like(self, newsize=0))
# TODO: For performance, the check and conversion of gather map should
# be done by the caller. This check will be removed in future release.
if not is_integer_dtype(indices.dtype):
indices = indices.astype("int32")
if not libcudf.copying._gather_map_is_valid(
indices, len(self), check_bounds, nullify
):
raise IndexError("Gather map index is out of bounds.")
return libcudf.copying.gather([self], indices, nullify=nullify)[
0
]._with_type_metadata(self.dtype) | def take(
self: T, indices: ColumnBase, nullify: bool = False, check_bounds=True
) -> T:
"""Return Column by taking values from the corresponding *indices*.
Skip bounds checking if check_bounds is False.
Set rows to null for all out of bound indices if nullify is `True`.
"""
# Handle zero size
if indices.size == 0:
return cast(T, column_empty_like(self, newsize=0))
# TODO: For performance, the check and conversion of gather map should
# be done by the caller. This check will be removed in future release.
if not is_integer_dtype(indices.dtype):
indices = indices.astype("int32")
if not libcudf.copying._gather_map_is_valid(
indices, len(self), check_bounds, nullify
):
raise IndexError("Gather map index is out of bounds.")
return libcudf.copying.gather([self], indices, nullify=nullify)[
0
]._with_type_metadata(self.dtype) |
Python | def isin(self, values: Sequence) -> ColumnBase:
"""Check whether values are contained in the Column.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a TypeError. Instead, turn a single string into a list
of one element.
Returns
-------
result: Column
Column of booleans indicating if each element is in values.
"""
try:
lhs, rhs = self._process_values_for_isin(values)
res = lhs._isin_earlystop(rhs)
if res is not None:
return res
except ValueError:
# pandas functionally returns all False when cleansing via
# typecasting fails
return full(len(self), False, dtype="bool")
return lhs._obtain_isin_result(rhs) | def isin(self, values: Sequence) -> ColumnBase:
"""Check whether values are contained in the Column.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a TypeError. Instead, turn a single string into a list
of one element.
Returns
-------
result: Column
Column of booleans indicating if each element is in values.
"""
try:
lhs, rhs = self._process_values_for_isin(values)
res = lhs._isin_earlystop(rhs)
if res is not None:
return res
except ValueError:
# pandas functionally returns all False when cleansing via
# typecasting fails
return full(len(self), False, dtype="bool")
return lhs._obtain_isin_result(rhs) |
Python | def _isin_earlystop(self, rhs: ColumnBase) -> Union[ColumnBase, None]:
"""
Helper function for `isin` which determines possibility of
early-stopping or not.
"""
if self.dtype != rhs.dtype:
if self.null_count and rhs.null_count:
return self.isnull()
else:
return cudf.core.column.full(len(self), False, dtype="bool")
elif self.null_count == 0 and (rhs.null_count == len(rhs)):
return cudf.core.column.full(len(self), False, dtype="bool")
else:
return None | def _isin_earlystop(self, rhs: ColumnBase) -> Union[ColumnBase, None]:
"""
Helper function for `isin` which determines possibility of
early-stopping or not.
"""
if self.dtype != rhs.dtype:
if self.null_count and rhs.null_count:
return self.isnull()
else:
return cudf.core.column.full(len(self), False, dtype="bool")
elif self.null_count == 0 and (rhs.null_count == len(rhs)):
return cudf.core.column.full(len(self), False, dtype="bool")
else:
return None |
Python | def as_mask(self) -> Buffer:
"""Convert booleans to bitmask
Returns
-------
Buffer
"""
if self.has_nulls():
raise ValueError("Column must have no nulls.")
return bools_to_mask(self) | def as_mask(self) -> Buffer:
"""Convert booleans to bitmask
Returns
-------
Buffer
"""
if self.has_nulls():
raise ValueError("Column must have no nulls.")
return bools_to_mask(self) |
Python | def unique(self) -> ColumnBase:
"""
Get unique values in the data
"""
# TODO: We could avoid performing `drop_duplicates` for
# columns with values that already are unique.
# Few things to note before we can do this optimization is
# the following issue resolved:
# https://github.com/rapidsai/cudf/issues/5286
return drop_duplicates([self], keep="first")[0] | def unique(self) -> ColumnBase:
"""
Get unique values in the data
"""
# TODO: We could avoid performing `drop_duplicates` for
# columns with values that already are unique.
# Few things to note before we can do this optimization is
# the following issue resolved:
# https://github.com/rapidsai/cudf/issues/5286
return drop_duplicates([self], keep="first")[0] |
Python | def _reduce(
self, op: str, skipna: bool = None, min_count: int = 0, *args, **kwargs
) -> ScalarLike:
"""Compute {op} of column values.
skipna : bool
Whether or not na values must be skipped.
min_count : int, default 0
The minimum number of entries for the reduction, otherwise the
reduction returns NaN.
"""
preprocessed = self._process_for_reduction(
skipna=skipna, min_count=min_count
)
if isinstance(preprocessed, ColumnBase):
return libcudf.reduce.reduce(op, preprocessed, **kwargs)
return preprocessed | def _reduce(
self, op: str, skipna: bool = None, min_count: int = 0, *args, **kwargs
) -> ScalarLike:
"""Compute {op} of column values.
skipna : bool
Whether or not na values must be skipped.
min_count : int, default 0
The minimum number of entries for the reduction, otherwise the
reduction returns NaN.
"""
preprocessed = self._process_for_reduction(
skipna=skipna, min_count=min_count
)
if isinstance(preprocessed, ColumnBase):
return libcudf.reduce.reduce(op, preprocessed, **kwargs)
return preprocessed |
Python | def _reduction_result_dtype(self, reduction_op: str) -> Dtype:
"""
Determine the correct dtype to pass to libcudf based on
the input dtype, data dtype, and specific reduction op
"""
return self.dtype | def _reduction_result_dtype(self, reduction_op: str) -> Dtype:
"""
Determine the correct dtype to pass to libcudf based on
the input dtype, data dtype, and specific reduction op
"""
return self.dtype |
Python | def column_empty(
row_count: int, dtype: Dtype = "object", masked: bool = False
) -> ColumnBase:
"""Allocate a new column like the given row_count and dtype."""
dtype = cudf.dtype(dtype)
children = () # type: Tuple[ColumnBase, ...]
if is_struct_dtype(dtype):
data = None
children = tuple(
column_empty(row_count, field_dtype)
for field_dtype in dtype.fields.values()
)
elif is_list_dtype(dtype):
data = None
children = (
full(row_count + 1, 0, dtype="int32"),
column_empty(row_count, dtype=dtype.element_type),
)
elif is_categorical_dtype(dtype):
data = None
children = (
build_column(
data=Buffer.empty(row_count * cudf.dtype("int32").itemsize),
dtype="int32",
),
)
elif dtype.kind in "OU" and not is_decimal_dtype(dtype):
data = None
children = (
full(row_count + 1, 0, dtype="int32"),
build_column(
data=Buffer.empty(row_count * cudf.dtype("int8").itemsize),
dtype="int8",
),
)
else:
data = Buffer.empty(row_count * dtype.itemsize)
if masked:
mask = create_null_mask(row_count, state=MaskState.ALL_NULL)
else:
mask = None
return build_column(
data, dtype, mask=mask, size=row_count, children=children
) | def column_empty(
row_count: int, dtype: Dtype = "object", masked: bool = False
) -> ColumnBase:
"""Allocate a new column like the given row_count and dtype."""
dtype = cudf.dtype(dtype)
children = () # type: Tuple[ColumnBase, ...]
if is_struct_dtype(dtype):
data = None
children = tuple(
column_empty(row_count, field_dtype)
for field_dtype in dtype.fields.values()
)
elif is_list_dtype(dtype):
data = None
children = (
full(row_count + 1, 0, dtype="int32"),
column_empty(row_count, dtype=dtype.element_type),
)
elif is_categorical_dtype(dtype):
data = None
children = (
build_column(
data=Buffer.empty(row_count * cudf.dtype("int32").itemsize),
dtype="int32",
),
)
elif dtype.kind in "OU" and not is_decimal_dtype(dtype):
data = None
children = (
full(row_count + 1, 0, dtype="int32"),
build_column(
data=Buffer.empty(row_count * cudf.dtype("int8").itemsize),
dtype="int8",
),
)
else:
data = Buffer.empty(row_count * dtype.itemsize)
if masked:
mask = create_null_mask(row_count, state=MaskState.ALL_NULL)
else:
mask = None
return build_column(
data, dtype, mask=mask, size=row_count, children=children
) |
Python | def build_column(
data: Union[Buffer, None],
dtype: Dtype,
*,
size: int = None,
mask: Buffer = None,
offset: int = 0,
null_count: int = None,
children: Tuple[ColumnBase, ...] = (),
) -> ColumnBase:
"""
Build a Column of the appropriate type from the given parameters
Parameters
----------
data : Buffer
The data buffer (can be None if constructing certain Column
types like StringColumn, ListColumn, or CategoricalColumn)
dtype
The dtype associated with the Column to construct
mask : Buffer, optional
The mask buffer
size : int, optional
offset : int, optional
children : tuple, optional
"""
dtype = cudf.dtype(dtype)
if _is_non_decimal_numeric_dtype(dtype):
assert data is not None
return cudf.core.column.NumericalColumn(
data=data,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
)
if is_categorical_dtype(dtype):
if not len(children) == 1:
raise ValueError(
"Must specify exactly one child column for CategoricalColumn"
)
if not isinstance(children[0], ColumnBase):
raise TypeError("children must be a tuple of Columns")
return cudf.core.column.CategoricalColumn(
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
children=children,
)
elif dtype.type is np.datetime64:
if data is None:
raise TypeError("Must specify data buffer")
return cudf.core.column.DatetimeColumn(
data=data,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
)
elif dtype.type is np.timedelta64:
if data is None:
raise TypeError("Must specify data buffer")
return cudf.core.column.TimeDeltaColumn(
data=data,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
)
elif dtype.type in (np.object_, np.str_):
return cudf.core.column.StringColumn(
mask=mask,
size=size,
offset=offset,
children=children,
null_count=null_count,
)
elif is_list_dtype(dtype):
return cudf.core.column.ListColumn(
size=size,
dtype=dtype,
mask=mask,
offset=offset,
null_count=null_count,
children=children,
)
elif is_interval_dtype(dtype):
return cudf.core.column.IntervalColumn(
dtype=dtype,
mask=mask,
size=size,
offset=offset,
children=children,
null_count=null_count,
)
elif is_struct_dtype(dtype):
if size is None:
raise TypeError("Must specify size")
return cudf.core.column.StructColumn(
data=data,
dtype=dtype,
size=size,
offset=offset,
mask=mask,
null_count=null_count,
children=children,
)
elif is_decimal64_dtype(dtype):
if size is None:
raise TypeError("Must specify size")
return cudf.core.column.Decimal64Column(
data=data,
size=size,
offset=offset,
dtype=dtype,
mask=mask,
null_count=null_count,
children=children,
)
elif is_decimal32_dtype(dtype):
if size is None:
raise TypeError("Must specify size")
return cudf.core.column.Decimal32Column(
data=data,
size=size,
offset=offset,
dtype=dtype,
mask=mask,
null_count=null_count,
children=children,
)
elif is_decimal128_dtype(dtype):
if size is None:
raise TypeError("Must specify size")
return cudf.core.column.Decimal128Column(
data=data,
size=size,
offset=offset,
dtype=dtype,
mask=mask,
null_count=null_count,
children=children,
)
elif is_interval_dtype(dtype):
return cudf.core.column.IntervalColumn(
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
children=children,
)
else:
raise TypeError(f"Unrecognized dtype: {dtype}") | def build_column(
data: Union[Buffer, None],
dtype: Dtype,
*,
size: int = None,
mask: Buffer = None,
offset: int = 0,
null_count: int = None,
children: Tuple[ColumnBase, ...] = (),
) -> ColumnBase:
"""
Build a Column of the appropriate type from the given parameters
Parameters
----------
data : Buffer
The data buffer (can be None if constructing certain Column
types like StringColumn, ListColumn, or CategoricalColumn)
dtype
The dtype associated with the Column to construct
mask : Buffer, optional
The mask buffer
size : int, optional
offset : int, optional
children : tuple, optional
"""
dtype = cudf.dtype(dtype)
if _is_non_decimal_numeric_dtype(dtype):
assert data is not None
return cudf.core.column.NumericalColumn(
data=data,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
)
if is_categorical_dtype(dtype):
if not len(children) == 1:
raise ValueError(
"Must specify exactly one child column for CategoricalColumn"
)
if not isinstance(children[0], ColumnBase):
raise TypeError("children must be a tuple of Columns")
return cudf.core.column.CategoricalColumn(
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
children=children,
)
elif dtype.type is np.datetime64:
if data is None:
raise TypeError("Must specify data buffer")
return cudf.core.column.DatetimeColumn(
data=data,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
)
elif dtype.type is np.timedelta64:
if data is None:
raise TypeError("Must specify data buffer")
return cudf.core.column.TimeDeltaColumn(
data=data,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
)
elif dtype.type in (np.object_, np.str_):
return cudf.core.column.StringColumn(
mask=mask,
size=size,
offset=offset,
children=children,
null_count=null_count,
)
elif is_list_dtype(dtype):
return cudf.core.column.ListColumn(
size=size,
dtype=dtype,
mask=mask,
offset=offset,
null_count=null_count,
children=children,
)
elif is_interval_dtype(dtype):
return cudf.core.column.IntervalColumn(
dtype=dtype,
mask=mask,
size=size,
offset=offset,
children=children,
null_count=null_count,
)
elif is_struct_dtype(dtype):
if size is None:
raise TypeError("Must specify size")
return cudf.core.column.StructColumn(
data=data,
dtype=dtype,
size=size,
offset=offset,
mask=mask,
null_count=null_count,
children=children,
)
elif is_decimal64_dtype(dtype):
if size is None:
raise TypeError("Must specify size")
return cudf.core.column.Decimal64Column(
data=data,
size=size,
offset=offset,
dtype=dtype,
mask=mask,
null_count=null_count,
children=children,
)
elif is_decimal32_dtype(dtype):
if size is None:
raise TypeError("Must specify size")
return cudf.core.column.Decimal32Column(
data=data,
size=size,
offset=offset,
dtype=dtype,
mask=mask,
null_count=null_count,
children=children,
)
elif is_decimal128_dtype(dtype):
if size is None:
raise TypeError("Must specify size")
return cudf.core.column.Decimal128Column(
data=data,
size=size,
offset=offset,
dtype=dtype,
mask=mask,
null_count=null_count,
children=children,
)
elif is_interval_dtype(dtype):
return cudf.core.column.IntervalColumn(
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
children=children,
)
else:
raise TypeError(f"Unrecognized dtype: {dtype}") |
Python | def _make_copy_replacing_NaT_with_null(column):
"""Return a copy with NaT values replaced with nulls."""
if np.issubdtype(column.dtype, np.timedelta64):
na_value = np.timedelta64("NaT", column.time_unit)
elif np.issubdtype(column.dtype, np.datetime64):
na_value = np.datetime64("NaT", column.time_unit)
else:
raise ValueError("This type does not support replacing NaT with null.")
null = column_empty_like(column, masked=True, newsize=1)
out_col = cudf._lib.replace.replace(
column,
build_column(
Buffer(np.array([na_value], dtype=column.dtype).view("|u1")),
dtype=column.dtype,
),
null,
)
return out_col | def _make_copy_replacing_NaT_with_null(column):
"""Return a copy with NaT values replaced with nulls."""
if np.issubdtype(column.dtype, np.timedelta64):
na_value = np.timedelta64("NaT", column.time_unit)
elif np.issubdtype(column.dtype, np.datetime64):
na_value = np.datetime64("NaT", column.time_unit)
else:
raise ValueError("This type does not support replacing NaT with null.")
null = column_empty_like(column, masked=True, newsize=1)
out_col = cudf._lib.replace.replace(
column,
build_column(
Buffer(np.array([na_value], dtype=column.dtype).view("|u1")),
dtype=column.dtype,
),
null,
)
return out_col |
Python | def _construct_array(
arbitrary: Any, dtype: Optional[Dtype]
) -> Union[np.ndarray, cupy.ndarray]:
"""
Construct a CuPy or NumPy array from `arbitrary`
"""
try:
dtype = dtype if dtype is None else cudf.dtype(dtype)
arbitrary = cupy.asarray(arbitrary, dtype=dtype)
except (TypeError, ValueError):
native_dtype = dtype
if (
dtype is None
and not cudf._lib.scalar._is_null_host_scalar(arbitrary)
and infer_dtype(arbitrary)
in (
"mixed",
"mixed-integer",
)
):
native_dtype = "object"
arbitrary = np.asarray(
arbitrary,
dtype=native_dtype
if native_dtype is None
else np.dtype(native_dtype),
)
return arbitrary | def _construct_array(
arbitrary: Any, dtype: Optional[Dtype]
) -> Union[np.ndarray, cupy.ndarray]:
"""
Construct a CuPy or NumPy array from `arbitrary`
"""
try:
dtype = dtype if dtype is None else cudf.dtype(dtype)
arbitrary = cupy.asarray(arbitrary, dtype=dtype)
except (TypeError, ValueError):
native_dtype = dtype
if (
dtype is None
and not cudf._lib.scalar._is_null_host_scalar(arbitrary)
and infer_dtype(arbitrary)
in (
"mixed",
"mixed-integer",
)
):
native_dtype = "object"
arbitrary = np.asarray(
arbitrary,
dtype=native_dtype
if native_dtype is None
else np.dtype(native_dtype),
)
return arbitrary |
Python | def arange(
start: Union[int, float],
stop: Union[int, float] = None,
step: Union[int, float] = 1,
dtype=None,
) -> cudf.core.column.NumericalColumn:
"""
Returns a column with evenly spaced values within a given interval.
Values are generated within the half-open interval [start, stop).
The first three arguments are mapped like the range built-in function,
i.e. start and step are optional.
Parameters
----------
start : int/float
Start of the interval.
stop : int/float, default is None
Stop of the interval.
step : int/float, default 1
Step width between each pair of consecutive values.
dtype : default None
Data type specifier. It is inferred from other arguments by default.
Returns
-------
cudf.core.column.NumericalColumn
Examples
--------
>>> import cudf
>>> col = cudf.core.column.arange(2, 7, 1, dtype='int16')
>>> col
<cudf.core.column.numerical.NumericalColumn object at 0x7ff7998f8b90>
>>> cudf.Series(col)
0 2
1 3
2 4
3 5
4 6
dtype: int16
"""
if stop is None:
stop = start
start = 0
if step is None:
step = 1
size = len(range(int(start), int(stop), int(step)))
if size == 0:
return as_column([], dtype=dtype)
return libcudf.filling.sequence(
size,
as_device_scalar(start, dtype=dtype),
as_device_scalar(step, dtype=dtype),
) | def arange(
start: Union[int, float],
stop: Union[int, float] = None,
step: Union[int, float] = 1,
dtype=None,
) -> cudf.core.column.NumericalColumn:
"""
Returns a column with evenly spaced values within a given interval.
Values are generated within the half-open interval [start, stop).
The first three arguments are mapped like the range built-in function,
i.e. start and step are optional.
Parameters
----------
start : int/float
Start of the interval.
stop : int/float, default is None
Stop of the interval.
step : int/float, default 1
Step width between each pair of consecutive values.
dtype : default None
Data type specifier. It is inferred from other arguments by default.
Returns
-------
cudf.core.column.NumericalColumn
Examples
--------
>>> import cudf
>>> col = cudf.core.column.arange(2, 7, 1, dtype='int16')
>>> col
<cudf.core.column.numerical.NumericalColumn object at 0x7ff7998f8b90>
>>> cudf.Series(col)
0 2
1 3
2 4
3 5
4 6
dtype: int16
"""
if stop is None:
stop = start
start = 0
if step is None:
step = 1
size = len(range(int(start), int(stop), int(step)))
if size == 0:
return as_column([], dtype=dtype)
return libcudf.filling.sequence(
size,
as_device_scalar(start, dtype=dtype),
as_device_scalar(step, dtype=dtype),
) |
Python | def import_tracks(path, numFrames, export=False):
"""
Import all tracked paths (using blender motionExport.py) from specified folder and join them to a single array.
Optionally, allows for export of created array containing all tracks into single .csv file
:param path: location of exported .csv tracks
:param numFrames: number of total analysed frames
:param export: boolean, writes .csv file of all combined tracks if True
:return: array of all imported tracks, row: frames, columns X / Y coordinates of individual track.
The first column consists of the frame numbers for easier readability if exported as a single file.
"""
print("importing tracks...")
files = []
tracks = np.empty([numFrames + 1, 1]) # create array for all tracks
tracks[:, 0] = np.arange(start=1, stop=numFrames + 2, step=1, dtype=int) # insert frame numbers
imported = 0
# r=root, d=directories, f = files
for r, d, f in os.walk(path):
for file in f:
if '.csv' in file:
files.append(os.path.join(r, file))
# for each new track create two "zeros" columns
# zeros are handled as nonexistent instances
tracks = np.append(tracks, np.zeros([numFrames + 1, 2]), axis=1)
with open(files[imported]) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
line_count = 0
next(csv_reader, None) # skip the headers
for row in csv_reader:
# tracks.insert())
tracks[int(row[0]) - 1, imported * 2 + 1] = int(row[1])
tracks[int(row[0]) - 1, imported * 2 + 2] = int(row[2])
line_count += 1
print("imported", str(file), f' with {line_count} points.')
imported += 1
tracks = tracks.astype(int)
if export:
export_path = path + "_all_tracks.csv"
np.savetxt(export_path, tracks, delimiter=",")
print("\nSuccessfully combined the tracks of", imported, "individuals for training and display!")
return tracks | def import_tracks(path, numFrames, export=False):
"""
Import all tracked paths (using blender motionExport.py) from specified folder and join them to a single array.
Optionally, allows for export of created array containing all tracks into single .csv file
:param path: location of exported .csv tracks
:param numFrames: number of total analysed frames
:param export: boolean, writes .csv file of all combined tracks if True
:return: array of all imported tracks, row: frames, columns X / Y coordinates of individual track.
The first column consists of the frame numbers for easier readability if exported as a single file.
"""
print("importing tracks...")
files = []
tracks = np.empty([numFrames + 1, 1]) # create array for all tracks
tracks[:, 0] = np.arange(start=1, stop=numFrames + 2, step=1, dtype=int) # insert frame numbers
imported = 0
# r=root, d=directories, f = files
for r, d, f in os.walk(path):
for file in f:
if '.csv' in file:
files.append(os.path.join(r, file))
# for each new track create two "zeros" columns
# zeros are handled as nonexistent instances
tracks = np.append(tracks, np.zeros([numFrames + 1, 2]), axis=1)
with open(files[imported]) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
line_count = 0
next(csv_reader, None) # skip the headers
for row in csv_reader:
# tracks.insert())
tracks[int(row[0]) - 1, imported * 2 + 1] = int(row[1])
tracks[int(row[0]) - 1, imported * 2 + 2] = int(row[2])
line_count += 1
print("imported", str(file), f' with {line_count} points.')
imported += 1
tracks = tracks.astype(int)
if export:
export_path = path + "_all_tracks.csv"
np.savetxt(export_path, tracks, delimiter=",")
print("\nSuccessfully combined the tracks of", imported, "individuals for training and display!")
return tracks |
Python | def display_video(cap, tracks, show=(0, math.inf), scale=1.0, target_size=100):
"""
Function displays imported footage with tracking results as overlay
:param cap: Imported video file
:param tracks: all imported tracks as a single array, created with import_tracks
:param show: tuple of desired displayed frames
:param scale: single float to up- or downscale resolution of display
"""
tracks = (scale * tracks).astype(int) # rescale pixel values of tracks
# frame counter
frame_num = show[0]
# define the size of each tracking rectangle
target_size *= scale
# get frame rate of imported footage
fps = cap.get(cv2.CAP_PROP_FPS)
# fix the seed for the same set of randomly assigned colours for each track
np.random.seed(seed=0)
colours = np.random.randint(low=0, high=255, size=((math.floor(((tracks.shape[1]) - 1) / 2)), 3))
print("\nDisplaying tracked footage!\npress 'q' to end display")
# skip to desired start frame
# Property identifier of cv2.CV_CAP_PROP_POS_FRAMES is 1, thus the first entry is 1
cap.set(1, show[0])
# set font from info display on frame
font = cv2.FONT_HERSHEY_SIMPLEX
while True: # run until no more frames are available
time_prev = time.time()
# return single frame (ret = boolean, frame = image)
ret, frame = cap.read()
if not ret:
break
# scale down the video
new_height = int(np.shape(frame)[0] * scale)
new_width = int(np.shape(frame)[1] * scale)
frame = cv2.resize(frame, (new_width, new_height))
# iterate through all columns and draw rectangles for all non 0 values
for track in range(math.floor(((tracks.shape[1]) - 1) / 2)):
if tracks[frame_num, track * 2 + 1] != 0:
# the tracks are read as centres
target_centre = np.asarray([tracks[frame_num, track * 2 + 1], tracks[frame_num, track * 2 + 2]])
# invert y axis, to fit openCV convention ( lower left -> (x=0,y=0) )
target_centre[1] = new_height - target_centre[1]
# define the starting and ending point of the bounding box rectangle, defined by "target_size"
px_start = target_centre - np.asarray([math.floor(target_size / 2), math.floor(target_size / 2)])
px_end = target_centre + np.asarray([math.floor(target_size / 2), math.floor(target_size / 2)])
# draw the defined rectangle of the track on top of the frame
cv2.rectangle(frame, (px_start[0], px_start[1]), (px_end[0], px_end[1]),
(int(colours[track, 0]), int(colours[track, 1]), int(colours[track, 2])), 2)
# write out track number of each active track
cv2.putText(frame, "track: " + str(track),
(int(target_centre[0] - target_size / 2), int(target_centre[1] - target_size / 2 - 10)),
font, 0.3, (int(colours[track, 0]), int(colours[track, 1]), int(colours[track, 2])), 1,
cv2.LINE_AA)
cv2.putText(frame, "frame: " + str(frame_num), (int(new_width / 2) - 100, 35),
font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
cv2.imshow('original frame', frame)
if frame_num > show[1]:
break
# enforce constant frame rate during display
time_to_process = (time.time() - time_prev) # compute elapsed time to enforce constant frame rate (if possible)
if time_to_process < 1 / fps:
time.sleep((1 / fps) - time_to_process)
# press q to quit, i.e. exit the display
if cv2.waitKey(1) & 0xFF == ord('q'):
break
frame_num += 1
cv2.destroyAllWindows()
# always reset frame from capture at the end to avoid incorrect skips during access
cap.set(1, 0)
print("\nReached last frame of specified video or ended by user input.\n") | def display_video(cap, tracks, show=(0, math.inf), scale=1.0, target_size=100):
"""
Function displays imported footage with tracking results as overlay
:param cap: Imported video file
:param tracks: all imported tracks as a single array, created with import_tracks
:param show: tuple of desired displayed frames
:param scale: single float to up- or downscale resolution of display
"""
tracks = (scale * tracks).astype(int) # rescale pixel values of tracks
# frame counter
frame_num = show[0]
# define the size of each tracking rectangle
target_size *= scale
# get frame rate of imported footage
fps = cap.get(cv2.CAP_PROP_FPS)
# fix the seed for the same set of randomly assigned colours for each track
np.random.seed(seed=0)
colours = np.random.randint(low=0, high=255, size=((math.floor(((tracks.shape[1]) - 1) / 2)), 3))
print("\nDisplaying tracked footage!\npress 'q' to end display")
# skip to desired start frame
# Property identifier of cv2.CV_CAP_PROP_POS_FRAMES is 1, thus the first entry is 1
cap.set(1, show[0])
# set font from info display on frame
font = cv2.FONT_HERSHEY_SIMPLEX
while True: # run until no more frames are available
time_prev = time.time()
# return single frame (ret = boolean, frame = image)
ret, frame = cap.read()
if not ret:
break
# scale down the video
new_height = int(np.shape(frame)[0] * scale)
new_width = int(np.shape(frame)[1] * scale)
frame = cv2.resize(frame, (new_width, new_height))
# iterate through all columns and draw rectangles for all non 0 values
for track in range(math.floor(((tracks.shape[1]) - 1) / 2)):
if tracks[frame_num, track * 2 + 1] != 0:
# the tracks are read as centres
target_centre = np.asarray([tracks[frame_num, track * 2 + 1], tracks[frame_num, track * 2 + 2]])
# invert y axis, to fit openCV convention ( lower left -> (x=0,y=0) )
target_centre[1] = new_height - target_centre[1]
# define the starting and ending point of the bounding box rectangle, defined by "target_size"
px_start = target_centre - np.asarray([math.floor(target_size / 2), math.floor(target_size / 2)])
px_end = target_centre + np.asarray([math.floor(target_size / 2), math.floor(target_size / 2)])
# draw the defined rectangle of the track on top of the frame
cv2.rectangle(frame, (px_start[0], px_start[1]), (px_end[0], px_end[1]),
(int(colours[track, 0]), int(colours[track, 1]), int(colours[track, 2])), 2)
# write out track number of each active track
cv2.putText(frame, "track: " + str(track),
(int(target_centre[0] - target_size / 2), int(target_centre[1] - target_size / 2 - 10)),
font, 0.3, (int(colours[track, 0]), int(colours[track, 1]), int(colours[track, 2])), 1,
cv2.LINE_AA)
cv2.putText(frame, "frame: " + str(frame_num), (int(new_width / 2) - 100, 35),
font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
cv2.imshow('original frame', frame)
if frame_num > show[1]:
break
# enforce constant frame rate during display
time_to_process = (time.time() - time_prev) # compute elapsed time to enforce constant frame rate (if possible)
if time_to_process < 1 / fps:
time.sleep((1 / fps) - time_to_process)
# press q to quit, i.e. exit the display
if cv2.waitKey(1) & 0xFF == ord('q'):
break
frame_num += 1
cv2.destroyAllWindows()
# always reset frame from capture at the end to avoid incorrect skips during access
cap.set(1, 0)
print("\nReached last frame of specified video or ended by user input.\n") |
Python | def extractPatches(frame_no, frames, tracks, patch_size=128, BW=True):
"""
extracts images patches for stacks and detections during TRAINING
:param frame_no: desired frame of NEW detections
:param frames: list of frames
:param tracks: array of all labelled tracks imported
:param patch_size: resolution (width / height in px) of extracted patches
:param BW: Boolean, if True returns the patches in black and white
:return: stacks of previous tracks and detections of new instances
"""
stacks = []
stacks_label = []
stacks_pos = []
detections = []
detections_label = []
detections_pos = []
# convert images to black and white if required
if BW:
for img in range(len(frames)):
frames[img] = cv2.cvtColor(frames[img], cv2.COLOR_BGR2GRAY)
blank_image = np.zeros((patch_size, patch_size), np.uint8)
else:
# coloured images require 3 channels
blank_image = np.zeros((patch_size, patch_size, 3), np.uint8)
# by default no images should be blank, exception occurs at the beginning of the footage when there are only
# detections and no initialized tracks yet.
num_empty_img = 0
# insert blank images to fill stacks which would otherwise be too small
blank_stack = []
if frame_no - len(frames) < 0:
num_empty_img = len(frames) - frame_no
for img in range(num_empty_img):
blank_stack.append(blank_image)
# iterate over all available tracks, step size of two, as X and Y are given
for track in range(1, tracks.shape[1], 2):
stack = []
pos = []
no_detection = 0
# iterate over all imported frames
for img in range(len(frames) - num_empty_img):
if tracks[frame_no + (img - len(frames) + num_empty_img), track] != 0:
# the tracks are read as centres
target_centre = np.asarray([tracks[frame_no + (img - len(frames) + num_empty_img), track],
tracks[frame_no + (img - len(frames) + num_empty_img), track + 1]])
# invert y axis, to fit openCV convention ( lower left -> (x=0,y=0) )
target_centre[1] = frames[0].shape[0] - target_centre[1]
# define the starting and ending point of the bounding box rectangle, defined by "target_size"
px_start = target_centre - np.asarray([math.floor(patch_size / 2), math.floor(patch_size / 2)])
px_end = target_centre + np.asarray([math.floor(patch_size / 2), math.floor(patch_size / 2)])
# extract the defined rectangle of the track from the frame and save to the stack
stack.append(frames[img][px_start[1]:px_end[1], px_start[0]:px_end[0]])
# save the position of each patch within the stack
pos.append(target_centre)
else:
# if not detection can be found insert a black image instead
stack.append(blank_image)
# in case of a blank image / no defined patch the position is set to 0,0
pos.append((0, 0))
no_detection += 1
# only return stacks which are active (i.e. at least one detection)
if no_detection != len(frames):
# add stack label to identify the stack later on
label = int(track / 2 + 0.5)
# set the newest entry of the stack as a detection for training purposes, retaining the label
if np.bitwise_xor(stack[-1], blank_image).any():
detections.append(stack[-1])
detections_label.append(label)
detections_pos.append(pos[-1])
# remove the last entry of the stack, to only have in represented as a new detection
del stack[-1]
del pos[-1]
# only return stacks if they are not empty without the newest detection
if no_detection + 1 != len(frames):
stacks.append(stack)
stacks_label.append(label)
stacks_pos.append(pos)
# convert all outputs to numpy arrays
stacks = np.array(stacks)
stacks_label = np.array(stacks_label)
stacks_pos = np.array(stacks_pos)
detections = np.array(detections)
detections_label = np.array(detections_label)
detections_pos = np.array(detections_pos)
return stacks, stacks_label, stacks_pos, detections, detections_label, detections_pos | def extractPatches(frame_no, frames, tracks, patch_size=128, BW=True):
"""
extracts images patches for stacks and detections during TRAINING
:param frame_no: desired frame of NEW detections
:param frames: list of frames
:param tracks: array of all labelled tracks imported
:param patch_size: resolution (width / height in px) of extracted patches
:param BW: Boolean, if True returns the patches in black and white
:return: stacks of previous tracks and detections of new instances
"""
stacks = []
stacks_label = []
stacks_pos = []
detections = []
detections_label = []
detections_pos = []
# convert images to black and white if required
if BW:
for img in range(len(frames)):
frames[img] = cv2.cvtColor(frames[img], cv2.COLOR_BGR2GRAY)
blank_image = np.zeros((patch_size, patch_size), np.uint8)
else:
# coloured images require 3 channels
blank_image = np.zeros((patch_size, patch_size, 3), np.uint8)
# by default no images should be blank, exception occurs at the beginning of the footage when there are only
# detections and no initialized tracks yet.
num_empty_img = 0
# insert blank images to fill stacks which would otherwise be too small
blank_stack = []
if frame_no - len(frames) < 0:
num_empty_img = len(frames) - frame_no
for img in range(num_empty_img):
blank_stack.append(blank_image)
# iterate over all available tracks, step size of two, as X and Y are given
for track in range(1, tracks.shape[1], 2):
stack = []
pos = []
no_detection = 0
# iterate over all imported frames
for img in range(len(frames) - num_empty_img):
if tracks[frame_no + (img - len(frames) + num_empty_img), track] != 0:
# the tracks are read as centres
target_centre = np.asarray([tracks[frame_no + (img - len(frames) + num_empty_img), track],
tracks[frame_no + (img - len(frames) + num_empty_img), track + 1]])
# invert y axis, to fit openCV convention ( lower left -> (x=0,y=0) )
target_centre[1] = frames[0].shape[0] - target_centre[1]
# define the starting and ending point of the bounding box rectangle, defined by "target_size"
px_start = target_centre - np.asarray([math.floor(patch_size / 2), math.floor(patch_size / 2)])
px_end = target_centre + np.asarray([math.floor(patch_size / 2), math.floor(patch_size / 2)])
# extract the defined rectangle of the track from the frame and save to the stack
stack.append(frames[img][px_start[1]:px_end[1], px_start[0]:px_end[0]])
# save the position of each patch within the stack
pos.append(target_centre)
else:
# if not detection can be found insert a black image instead
stack.append(blank_image)
# in case of a blank image / no defined patch the position is set to 0,0
pos.append((0, 0))
no_detection += 1
# only return stacks which are active (i.e. at least one detection)
if no_detection != len(frames):
# add stack label to identify the stack later on
label = int(track / 2 + 0.5)
# set the newest entry of the stack as a detection for training purposes, retaining the label
if np.bitwise_xor(stack[-1], blank_image).any():
detections.append(stack[-1])
detections_label.append(label)
detections_pos.append(pos[-1])
# remove the last entry of the stack, to only have in represented as a new detection
del stack[-1]
del pos[-1]
# only return stacks if they are not empty without the newest detection
if no_detection + 1 != len(frames):
stacks.append(stack)
stacks_label.append(label)
stacks_pos.append(pos)
# convert all outputs to numpy arrays
stacks = np.array(stacks)
stacks_label = np.array(stacks_label)
stacks_pos = np.array(stacks_pos)
detections = np.array(detections)
detections_label = np.array(detections_label)
detections_pos = np.array(detections_pos)
return stacks, stacks_label, stacks_pos, detections, detections_label, detections_pos |
Python | def create(path=None):
"""# Generate a new key pair
Generates a secp256k1 ECDSA private/public key pair to be used in the API authentications
## Parameters (optional):
path [string]: path to save the keys .pem files. No files will be saved if this parameter isn't provided
## Return:
private and public key pems
"""
private = PrivateKey()
public = private.publicKey()
private_pem = private.toPem()
public_pem = public.toPem()
if path is not None:
if not os_path.exists(path):
makedirs(path)
with open(os_path.join(path, "private-key.pem"), "w") as file:
file.write(private_pem)
with open(os_path.join(path, "public-key.pem"), "w") as file:
file.write(public_pem)
return private_pem, public_pem | def create(path=None):
"""# Generate a new key pair
Generates a secp256k1 ECDSA private/public key pair to be used in the API authentications
## Parameters (optional):
path [string]: path to save the keys .pem files. No files will be saved if this parameter isn't provided
## Return:
private and public key pems
"""
private = PrivateKey()
public = private.publicKey()
private_pem = private.toPem()
public_pem = public.toPem()
if path is not None:
if not os_path.exists(path):
makedirs(path)
with open(os_path.join(path, "private-key.pem"), "w") as file:
file.write(private_pem)
with open(os_path.join(path, "public-key.pem"), "w") as file:
file.write(public_pem)
return private_pem, public_pem |
Python | def openTunTap():
'''
\brief Open a TUN/TAP interface and switch it to TUN mode.
\return The handler of the interface, which can be used for later
read/write operations.
'''
# retrieve the ComponentId from the TUN/TAP interface
componentId = get_tuntap_ComponentId()
print('componentId = {0}'.format(componentId))
# create a win32file for manipulating the TUN/TAP interface
tuntap = win32file.CreateFile(
r'\\.\Global\%s.tap' % componentId,
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
win32file.FILE_SHARE_READ | win32file.FILE_SHARE_WRITE,
None,
win32file.OPEN_EXISTING,
win32file.FILE_ATTRIBUTE_SYSTEM | win32file.FILE_FLAG_OVERLAPPED,
None
)
print('tuntap = {0}'.format(tuntap.handle))
# have Windows consider the interface now connected
win32file.DeviceIoControl(
tuntap,
TAP_IOCTL_SET_MEDIA_STATUS,
'\x00\x00\x00\x00',
None
)
# prepare the parameter passed to the TAP_IOCTL_CONFIG_TUN commmand.
# This needs to be a 12-character long string representing
# - the tun interface's IPv4 address (4 characters)
# - the tun interface's IPv4 network address (4 characters)
# - the tun interface's IPv4 network mask (4 characters)
configTunParam = []
configTunParam += TUN_IPv4_ADDRESS
configTunParam += TUN_IPv4_NETWORK
configTunParam += TUN_IPv4_NETMASK
configTunParam = ''.join([chr(b) for b in configTunParam])
# switch to TUN mode (by default the interface runs in TAP mode)
win32file.DeviceIoControl(
tuntap,
TAP_IOCTL_CONFIG_TUN,
configTunParam,
None
)
# return the handler of the TUN interface
return tuntap | def openTunTap():
'''
\brief Open a TUN/TAP interface and switch it to TUN mode.
\return The handler of the interface, which can be used for later
read/write operations.
'''
# retrieve the ComponentId from the TUN/TAP interface
componentId = get_tuntap_ComponentId()
print('componentId = {0}'.format(componentId))
# create a win32file for manipulating the TUN/TAP interface
tuntap = win32file.CreateFile(
r'\\.\Global\%s.tap' % componentId,
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
win32file.FILE_SHARE_READ | win32file.FILE_SHARE_WRITE,
None,
win32file.OPEN_EXISTING,
win32file.FILE_ATTRIBUTE_SYSTEM | win32file.FILE_FLAG_OVERLAPPED,
None
)
print('tuntap = {0}'.format(tuntap.handle))
# have Windows consider the interface now connected
win32file.DeviceIoControl(
tuntap,
TAP_IOCTL_SET_MEDIA_STATUS,
'\x00\x00\x00\x00',
None
)
# prepare the parameter passed to the TAP_IOCTL_CONFIG_TUN commmand.
# This needs to be a 12-character long string representing
# - the tun interface's IPv4 address (4 characters)
# - the tun interface's IPv4 network address (4 characters)
# - the tun interface's IPv4 network mask (4 characters)
configTunParam = []
configTunParam += TUN_IPv4_ADDRESS
configTunParam += TUN_IPv4_NETWORK
configTunParam += TUN_IPv4_NETMASK
configTunParam = ''.join([chr(b) for b in configTunParam])
# switch to TUN mode (by default the interface runs in TAP mode)
win32file.DeviceIoControl(
tuntap,
TAP_IOCTL_CONFIG_TUN,
configTunParam,
None
)
# return the handler of the TUN interface
return tuntap |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.