body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def load_gt_obj(self): ' load bbox ground truth from files either via the provided label directory or list of label files' files = os.listdir(self.label_dir) files = list(filter((lambda x: x.endswith(self.label_ext)), files)) if (len(files) == 0): raise RuntimeError(('error: no label files found in %s' % self.label_dir)) for label_file in files: objects_per_image = list() with open(os.path.join(self.label_dir, label_file), 'rt') as flabel: for row in csv.reader(flabel, delimiter=self.label_delimiter): if (len(row) == 0): continue if (len(row) < 15): raise ValueError(('Invalid label format in "%s"' % os.path.join(self.label_dir, label_file))) gt = GroundTruthObj() gt.stype = row[0].lower() gt.truncated = float(row[1]) gt.occlusion = int(row[2]) gt.angle = float(row[3]) gt.bbox.xl = float(row[4]) gt.bbox.yt = float(row[5]) gt.bbox.xr = float(row[6]) gt.bbox.yb = float(row[7]) gt.height = float(row[8]) gt.width = float(row[9]) gt.length = float(row[10]) gt.locx = float(row[11]) gt.locy = float(row[12]) gt.locz = float(row[13]) gt.roty = float(row[14]) gt.set_type() box_dimensions = [(gt.bbox.xr - gt.bbox.xl), (gt.bbox.yb - gt.bbox.yt)] if (self.min_box_size is not None): if (not all(((x >= self.min_box_size) for x in box_dimensions))): gt.stype = '' gt.object = ObjectType.Dontcare objects_per_image.append(gt) key = os.path.splitext(label_file)[0] self.update_objects_all(key, objects_per_image)
-6,172,390,386,588,916,000
load bbox ground truth from files either via the provided label directory or list of label files
digits/extensions/data/objectDetection/utils.py
load_gt_obj
dcmartin/digits
python
def load_gt_obj(self): ' ' files = os.listdir(self.label_dir) files = list(filter((lambda x: x.endswith(self.label_ext)), files)) if (len(files) == 0): raise RuntimeError(('error: no label files found in %s' % self.label_dir)) for label_file in files: objects_per_image = list() with open(os.path.join(self.label_dir, label_file), 'rt') as flabel: for row in csv.reader(flabel, delimiter=self.label_delimiter): if (len(row) == 0): continue if (len(row) < 15): raise ValueError(('Invalid label format in "%s"' % os.path.join(self.label_dir, label_file))) gt = GroundTruthObj() gt.stype = row[0].lower() gt.truncated = float(row[1]) gt.occlusion = int(row[2]) gt.angle = float(row[3]) gt.bbox.xl = float(row[4]) gt.bbox.yt = float(row[5]) gt.bbox.xr = float(row[6]) gt.bbox.yb = float(row[7]) gt.height = float(row[8]) gt.width = float(row[9]) gt.length = float(row[10]) gt.locx = float(row[11]) gt.locy = float(row[12]) gt.locz = float(row[13]) gt.roty = float(row[14]) gt.set_type() box_dimensions = [(gt.bbox.xr - gt.bbox.xl), (gt.bbox.yb - gt.bbox.yt)] if (self.min_box_size is not None): if (not all(((x >= self.min_box_size) for x in box_dimensions))): gt.stype = gt.object = ObjectType.Dontcare objects_per_image.append(gt) key = os.path.splitext(label_file)[0] self.update_objects_all(key, objects_per_image)
def generate_mesh(meshing_dir, params): '\n Launch Mesh Generator to generate mesh.\n\n @param meshing_dir: the meshing directory\n @param params: the meshing parameters\n @return: the mesh generation log content\n @raise TypeError: if any input parameter is not of required type\n @raise ValueError: if any input parameter is None/empty, or any field of MeshingParameters is not\n of valid value\n @raise ServiceError: if error occurs during generating mesh\n ' signature = (__name__ + '.generate_mesh()') helper.log_entrance(_LOGGER, signature, {'meshing_dir': meshing_dir, 'params': params}) helper.check_not_none_nor_empty(meshing_dir, 'meshing_dir') helper.check_is_directory(meshing_dir, 'meshing_dir') helper.check_type_value(params, 'params', MeshingParameters, False) helper.check_not_none_nor_empty(params.infile, 'params.infile') helper.check_is_file(params.infile, 'params.infile') helper.check_not_none_nor_empty(params.outfile, 'params.outfile') helper.check_not_none_nor_empty(params.maxh, 'params.maxh') helper.check_not_none_nor_empty(params.minh, 'params.minh') helper.check_not_none_nor_empty(params.fineness, 'params.fineness') helper.check_not_none_nor_empty(params.grading, 'params.grading') helper.check_not_none_nor_empty(params.usetolerance, 'params.usetolerance') if (params.usetolerance == '1'): helper.check_not_none_nor_empty(params.tolerance, 'params.tolerance') try: config_file_path = os.path.join(meshing_dir, _CONFIG_FILE_NAME) log_file_path = os.path.join(meshing_dir, _LOG_FILE_NAME) with open(config_file_path, 'w') as f: f.write('\n'.join((('%s: %s' % item) for item in vars(params).items() if (item[1] is not None)))) with open(log_file_path, 'w') as log_file: _LOGGER.debug('Start mesh generator in subprocess.') subprocess.call(MESH_GENERATOR_BIN, cwd=meshing_dir, stdout=log_file) _LOGGER.debug('End mesh generator in subprocess.') with open(log_file_path, 'r') as log_file: ret = log_file.read() helper.log_exit(_LOGGER, signature, [ret]) return ret except Exception as e: helper.log_exception(_LOGGER, signature, e) raise ServiceError(('Error occurs when generating mesh. Caused by:\n' + unicode(str(e))))
-555,151,353,747,243,800
Launch Mesh Generator to generate mesh. @param meshing_dir: the meshing directory @param params: the meshing parameters @return: the mesh generation log content @raise TypeError: if any input parameter is not of required type @raise ValueError: if any input parameter is None/empty, or any field of MeshingParameters is not of valid value @raise ServiceError: if error occurs during generating mesh
source/openwarpgui/openwarp/services.py
generate_mesh
rhydar/Test
python
def generate_mesh(meshing_dir, params): '\n Launch Mesh Generator to generate mesh.\n\n @param meshing_dir: the meshing directory\n @param params: the meshing parameters\n @return: the mesh generation log content\n @raise TypeError: if any input parameter is not of required type\n @raise ValueError: if any input parameter is None/empty, or any field of MeshingParameters is not\n of valid value\n @raise ServiceError: if error occurs during generating mesh\n ' signature = (__name__ + '.generate_mesh()') helper.log_entrance(_LOGGER, signature, {'meshing_dir': meshing_dir, 'params': params}) helper.check_not_none_nor_empty(meshing_dir, 'meshing_dir') helper.check_is_directory(meshing_dir, 'meshing_dir') helper.check_type_value(params, 'params', MeshingParameters, False) helper.check_not_none_nor_empty(params.infile, 'params.infile') helper.check_is_file(params.infile, 'params.infile') helper.check_not_none_nor_empty(params.outfile, 'params.outfile') helper.check_not_none_nor_empty(params.maxh, 'params.maxh') helper.check_not_none_nor_empty(params.minh, 'params.minh') helper.check_not_none_nor_empty(params.fineness, 'params.fineness') helper.check_not_none_nor_empty(params.grading, 'params.grading') helper.check_not_none_nor_empty(params.usetolerance, 'params.usetolerance') if (params.usetolerance == '1'): helper.check_not_none_nor_empty(params.tolerance, 'params.tolerance') try: config_file_path = os.path.join(meshing_dir, _CONFIG_FILE_NAME) log_file_path = os.path.join(meshing_dir, _LOG_FILE_NAME) with open(config_file_path, 'w') as f: f.write('\n'.join((('%s: %s' % item) for item in vars(params).items() if (item[1] is not None)))) with open(log_file_path, 'w') as log_file: _LOGGER.debug('Start mesh generator in subprocess.') subprocess.call(MESH_GENERATOR_BIN, cwd=meshing_dir, stdout=log_file) _LOGGER.debug('End mesh generator in subprocess.') with open(log_file_path, 'r') as log_file: ret = log_file.read() helper.log_exit(_LOGGER, signature, [ret]) return ret except Exception as e: helper.log_exception(_LOGGER, signature, e) raise ServiceError(('Error occurs when generating mesh. Caused by:\n' + unicode(str(e))))
def simulate(simulation_dir, params): '\n Run simulation.\n\n @param simulation_dir: the simulation directory\n @param params: the simulation parameters\n @return: the simulation log content\n @raise TypeError: if any input parameter is not of required type\n @raise ValueError: if any input parameter is None/empty, or any field of SimulationParameters is not\n of valid value\n @raise ServiceError: if any other error occurred when launching the simulation\n ' signature = (__name__ + '.simulate()') helper.log_entrance(_LOGGER, signature, {'simulation_dir': simulation_dir, 'params': params}) helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir') helper.check_is_directory(simulation_dir, 'simulation_dir') helper.check_type_value(params, 'params', SimulationParameters, False) helper.check_not_none_nor_empty(params.rho, 'params.rho') helper.check_not_none_nor_empty(params.g, 'params.g') helper.check_not_none_nor_empty(params.depth, 'params.depth') helper.check_not_none_nor_empty(params.xeff, 'params.xeff') helper.check_not_none_nor_empty(params.yeff, 'params.yeff') helper.check_not_none_nor_empty(params.wave_frequencies, 'params.wave_frequencies') helper.check_not_none_nor_empty(params.min_wave_frequencies, 'params.min_wave_frequencies') helper.check_not_none_nor_empty(params.max_wave_frequencies, 'params.max_wave_frequencies') helper.check_not_none_nor_empty(params.wave_directions, 'params.wave_directions') helper.check_not_none_nor_empty(params.min_wave_directions, 'params.min_wave_directions') helper.check_not_none_nor_empty(params.max_wave_direction, 'params.max_wave_direction') helper.check_not_none_nor_empty(params.indiq_solver, 'params.indiq_solver') helper.check_not_none_nor_empty(params.ires, 'params.ires') helper.check_not_none_nor_empty(params.tol_gmres, 'params.tol_gmres') helper.check_not_none_nor_empty(params.max_iterations, 'params.max_iterations') helper.check_not_none_nor_empty(params.save_potential, 'params.save_potential') helper.check_not_none_nor_empty(params.green_tabulation_numx, 'params.green_tabulation_numx') helper.check_not_none_nor_empty(params.green_tabulation_numz, 'params.green_tabulation_numz') helper.check_not_none_nor_empty(params.green_tabulation_simpson_npoints, 'params.green_tabulation_simpson_npoints') helper.check_not_none_nor_empty(params.use_ode_influence_coefficients, 'params.use_ode_influence_coefficients') helper.check_not_none_nor_empty(params.use_higher_order, 'params.use_higher_order') helper.check_not_none_nor_empty(params.num_panel_higher_order, 'params.num_panel_higher_order') helper.check_not_none_nor_empty(params.b_spline_order, 'params.b_spline_order') helper.check_not_none_nor_empty(params.use_dipoles_implementation, 'params.use_dipoles_implementation') helper.check_not_none_nor_empty(params.thin_panels, 'params.thin_panels') helper.check_not_none_nor_empty(params.compute_drift_forces, 'params.compute_drift_forces') helper.check_not_none_nor_empty(params.remove_irregular_frequencies, 'params.remove_irregular_frequencies') helper.check_not_none_nor_empty(params.compute_yaw_moment, 'params.compute_yaw_moment') helper.check_type_value(params.floating_bodies, 'params.floating_bodies', list, True) if (params.floating_bodies is not None): for body in params.floating_bodies: helper.check_type_value(body, 'params.floating_bodies item', FloatingBody, False) helper.check_not_none_nor_empty(body.mesh_file, 'body.mesh_file') helper.check_not_none_nor_empty(body.points, 'body.points') helper.check_not_none_nor_empty(body.panels, 'body.panels') helper.check_not_none_nor_empty(body.degrees_of_freedom, 'body.degrees_of_freedom') helper.check_not_none_nor_empty(body.resulting_generalised_forces, 'body.resulting_generalised_forces') helper.check_not_none_nor_empty(body.additional_info_lines, 'body.additional_info_lines') try: with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), 'a') as hdf5_data: utility.write_calculations(params, hdf5_data) os.mkdir(os.path.join(simulation_dir, 'results')) simulation_log_path = os.path.join(simulation_dir, 'simulation_log.txt') custom_config = {'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'), 'NEMOH_CALCULATIONS_FILE': None, 'NEMOH_INPUT_FILE': None, 'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'), 'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'), 'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'), 'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'), 'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'), 'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'), 'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'), 'GREEN_TABULATION_NUMX': int(params.green_tabulation_numx), 'GREEN_TABULATION_NUMZ': int(params.green_tabulation_numz), 'GREEN_TABULATION_SIMPSON_NPOINTS': int(params.green_tabulation_simpson_npoints), 'USE_ODE_INFLUENCE_COEFFICIENTS': bool(int(params.use_ode_influence_coefficients)), 'USE_HIGHER_ORDER': bool(int(params.use_higher_order)), 'NUM_PANEL_HIGHER_ORDER': int(params.num_panel_higher_order), 'B_SPLINE_ORDER': int(params.b_spline_order), 'USE_DIPOLES_IMPLEMENTATION': bool(int(params.use_dipoles_implementation)), 'THIN_PANELS': [int(i) for i in params.thin_panels.split()], 'COMPUTE_DRIFT_FORCES': bool(int(params.compute_drift_forces)), 'COMPUTE_YAW_MOMENT': bool(int(params.compute_yaw_moment)), 'REMOVE_IRREGULAR_FREQUENCIES': bool(int(params.remove_irregular_frequencies))} _LOGGER.debug('Start preProcessor function.') run_thread(preprocessor.preprocess, (custom_config,), simulation_log_path) _LOGGER.debug('End preProcessor function.') _LOGGER.debug('Start solver function.') output = run_thread(solver.solve, (custom_config,), None) with open(simulation_log_path, 'a') as log_file: log_file.write(output) _LOGGER.debug('End solver function.') with open(simulation_log_path, 'r') as log_file: ret = log_file.read() helper.log_exit(_LOGGER, signature, [ret]) return ret except Exception as e: helper.log_exception(_LOGGER, signature, e) raise ServiceError(('Error occurs when doing simulation. Caused by:\n' + unicode(str(e))))
119,379,641,207,331,200
Run simulation. @param simulation_dir: the simulation directory @param params: the simulation parameters @return: the simulation log content @raise TypeError: if any input parameter is not of required type @raise ValueError: if any input parameter is None/empty, or any field of SimulationParameters is not of valid value @raise ServiceError: if any other error occurred when launching the simulation
source/openwarpgui/openwarp/services.py
simulate
rhydar/Test
python
def simulate(simulation_dir, params): '\n Run simulation.\n\n @param simulation_dir: the simulation directory\n @param params: the simulation parameters\n @return: the simulation log content\n @raise TypeError: if any input parameter is not of required type\n @raise ValueError: if any input parameter is None/empty, or any field of SimulationParameters is not\n of valid value\n @raise ServiceError: if any other error occurred when launching the simulation\n ' signature = (__name__ + '.simulate()') helper.log_entrance(_LOGGER, signature, {'simulation_dir': simulation_dir, 'params': params}) helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir') helper.check_is_directory(simulation_dir, 'simulation_dir') helper.check_type_value(params, 'params', SimulationParameters, False) helper.check_not_none_nor_empty(params.rho, 'params.rho') helper.check_not_none_nor_empty(params.g, 'params.g') helper.check_not_none_nor_empty(params.depth, 'params.depth') helper.check_not_none_nor_empty(params.xeff, 'params.xeff') helper.check_not_none_nor_empty(params.yeff, 'params.yeff') helper.check_not_none_nor_empty(params.wave_frequencies, 'params.wave_frequencies') helper.check_not_none_nor_empty(params.min_wave_frequencies, 'params.min_wave_frequencies') helper.check_not_none_nor_empty(params.max_wave_frequencies, 'params.max_wave_frequencies') helper.check_not_none_nor_empty(params.wave_directions, 'params.wave_directions') helper.check_not_none_nor_empty(params.min_wave_directions, 'params.min_wave_directions') helper.check_not_none_nor_empty(params.max_wave_direction, 'params.max_wave_direction') helper.check_not_none_nor_empty(params.indiq_solver, 'params.indiq_solver') helper.check_not_none_nor_empty(params.ires, 'params.ires') helper.check_not_none_nor_empty(params.tol_gmres, 'params.tol_gmres') helper.check_not_none_nor_empty(params.max_iterations, 'params.max_iterations') helper.check_not_none_nor_empty(params.save_potential, 'params.save_potential') helper.check_not_none_nor_empty(params.green_tabulation_numx, 'params.green_tabulation_numx') helper.check_not_none_nor_empty(params.green_tabulation_numz, 'params.green_tabulation_numz') helper.check_not_none_nor_empty(params.green_tabulation_simpson_npoints, 'params.green_tabulation_simpson_npoints') helper.check_not_none_nor_empty(params.use_ode_influence_coefficients, 'params.use_ode_influence_coefficients') helper.check_not_none_nor_empty(params.use_higher_order, 'params.use_higher_order') helper.check_not_none_nor_empty(params.num_panel_higher_order, 'params.num_panel_higher_order') helper.check_not_none_nor_empty(params.b_spline_order, 'params.b_spline_order') helper.check_not_none_nor_empty(params.use_dipoles_implementation, 'params.use_dipoles_implementation') helper.check_not_none_nor_empty(params.thin_panels, 'params.thin_panels') helper.check_not_none_nor_empty(params.compute_drift_forces, 'params.compute_drift_forces') helper.check_not_none_nor_empty(params.remove_irregular_frequencies, 'params.remove_irregular_frequencies') helper.check_not_none_nor_empty(params.compute_yaw_moment, 'params.compute_yaw_moment') helper.check_type_value(params.floating_bodies, 'params.floating_bodies', list, True) if (params.floating_bodies is not None): for body in params.floating_bodies: helper.check_type_value(body, 'params.floating_bodies item', FloatingBody, False) helper.check_not_none_nor_empty(body.mesh_file, 'body.mesh_file') helper.check_not_none_nor_empty(body.points, 'body.points') helper.check_not_none_nor_empty(body.panels, 'body.panels') helper.check_not_none_nor_empty(body.degrees_of_freedom, 'body.degrees_of_freedom') helper.check_not_none_nor_empty(body.resulting_generalised_forces, 'body.resulting_generalised_forces') helper.check_not_none_nor_empty(body.additional_info_lines, 'body.additional_info_lines') try: with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), 'a') as hdf5_data: utility.write_calculations(params, hdf5_data) os.mkdir(os.path.join(simulation_dir, 'results')) simulation_log_path = os.path.join(simulation_dir, 'simulation_log.txt') custom_config = {'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'), 'NEMOH_CALCULATIONS_FILE': None, 'NEMOH_INPUT_FILE': None, 'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'), 'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'), 'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'), 'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'), 'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'), 'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'), 'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'), 'GREEN_TABULATION_NUMX': int(params.green_tabulation_numx), 'GREEN_TABULATION_NUMZ': int(params.green_tabulation_numz), 'GREEN_TABULATION_SIMPSON_NPOINTS': int(params.green_tabulation_simpson_npoints), 'USE_ODE_INFLUENCE_COEFFICIENTS': bool(int(params.use_ode_influence_coefficients)), 'USE_HIGHER_ORDER': bool(int(params.use_higher_order)), 'NUM_PANEL_HIGHER_ORDER': int(params.num_panel_higher_order), 'B_SPLINE_ORDER': int(params.b_spline_order), 'USE_DIPOLES_IMPLEMENTATION': bool(int(params.use_dipoles_implementation)), 'THIN_PANELS': [int(i) for i in params.thin_panels.split()], 'COMPUTE_DRIFT_FORCES': bool(int(params.compute_drift_forces)), 'COMPUTE_YAW_MOMENT': bool(int(params.compute_yaw_moment)), 'REMOVE_IRREGULAR_FREQUENCIES': bool(int(params.remove_irregular_frequencies))} _LOGGER.debug('Start preProcessor function.') run_thread(preprocessor.preprocess, (custom_config,), simulation_log_path) _LOGGER.debug('End preProcessor function.') _LOGGER.debug('Start solver function.') output = run_thread(solver.solve, (custom_config,), None) with open(simulation_log_path, 'a') as log_file: log_file.write(output) _LOGGER.debug('End solver function.') with open(simulation_log_path, 'r') as log_file: ret = log_file.read() helper.log_exit(_LOGGER, signature, [ret]) return ret except Exception as e: helper.log_exception(_LOGGER, signature, e) raise ServiceError(('Error occurs when doing simulation. Caused by:\n' + unicode(str(e))))
def postprocess(simulation_dir, params): '\n Run post-processing.\n\n @param simulation_dir: the simulation directory\n @param params: the post-processing parameters\n @return: the post-processing log content\n @raise TypeError: if any input parameter is not of required type\n @raise ValueError: if any input parameter is None/empty, or any field of PostprocessingParameters is not\n of valid value\n @raise ServiceError: if error occurs during launching the post-processing\n ' signature = (__name__ + '.postprocess()') helper.log_entrance(_LOGGER, signature, {'simulation_dir': simulation_dir, 'params': params}) helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir') helper.check_is_directory(simulation_dir, 'simulation_dir') helper.check_type_value(params, 'params', PostprocessingParameters, False) helper.check_type_value(params.irf, 'params.irf', list, False) for irf_item in params.irf: helper.check_not_none_nor_empty(irf_item, 'irf_item') helper.check_not_none_nor_empty(params.show_pressure, 'params.show_pressure') helper.check_type_value(params.kochin_function, 'params.kochin_function', list, False) for kochin_function_item in params.kochin_function: helper.check_not_none_nor_empty(kochin_function_item, 'kochin_function_item') helper.check_type_value(params.free_surface_elevation, 'params.free_surface_elevation', list, False) for elevation_item in params.free_surface_elevation: helper.check_not_none_nor_empty(elevation_item, 'elevation_item') try: with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), 'a') as hdf5_data: utility.write_postprocessing_section(params, hdf5_data) postprocessing_log_path = os.path.join(simulation_dir, 'postprocessing_log.txt') custom_config = {'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'), 'NEMOH_CALCULATIONS_FILE': None, 'NEMOH_INPUT_FILE': None, 'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'), 'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'), 'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'), 'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'), 'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'), 'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'), 'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'), 'GREEN_TABULATION_NUMX': 328, 'GREEN_TABULATION_NUMZ': 46, 'GREEN_TABULATION_SIMPSON_NPOINTS': 251, 'USE_ODE_INFLUENCE_COEFFICIENTS': False, 'USE_HIGHER_ORDER': False, 'NUM_PANEL_HIGHER_ORDER': 1, 'B_SPLINE_ORDER': 1, 'USE_DIPOLES_IMPLEMENTATION': False, 'THIN_PANELS': [(- 1)], 'COMPUTE_DRIFT_FORCES': False, 'COMPUTE_YAW_MOMENT': False, 'REMOVE_IRREGULAR_FREQUENCIES': False} _LOGGER.debug('Start postProcessor function.') run_thread(postprocessor.postprocess, (custom_config,), postprocessing_log_path) _LOGGER.debug('End postProcessor in subprocess.') with open(postprocessing_log_path, 'r') as log_file: ret = log_file.read() helper.log_exit(_LOGGER, signature, [ret]) return ret except Exception as e: helper.log_exception(_LOGGER, signature, e) raise ServiceError(('Error occurs when running postprocess. Caused by:\n' + unicode(str(e))))
5,305,011,815,453,659,000
Run post-processing. @param simulation_dir: the simulation directory @param params: the post-processing parameters @return: the post-processing log content @raise TypeError: if any input parameter is not of required type @raise ValueError: if any input parameter is None/empty, or any field of PostprocessingParameters is not of valid value @raise ServiceError: if error occurs during launching the post-processing
source/openwarpgui/openwarp/services.py
postprocess
rhydar/Test
python
def postprocess(simulation_dir, params): '\n Run post-processing.\n\n @param simulation_dir: the simulation directory\n @param params: the post-processing parameters\n @return: the post-processing log content\n @raise TypeError: if any input parameter is not of required type\n @raise ValueError: if any input parameter is None/empty, or any field of PostprocessingParameters is not\n of valid value\n @raise ServiceError: if error occurs during launching the post-processing\n ' signature = (__name__ + '.postprocess()') helper.log_entrance(_LOGGER, signature, {'simulation_dir': simulation_dir, 'params': params}) helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir') helper.check_is_directory(simulation_dir, 'simulation_dir') helper.check_type_value(params, 'params', PostprocessingParameters, False) helper.check_type_value(params.irf, 'params.irf', list, False) for irf_item in params.irf: helper.check_not_none_nor_empty(irf_item, 'irf_item') helper.check_not_none_nor_empty(params.show_pressure, 'params.show_pressure') helper.check_type_value(params.kochin_function, 'params.kochin_function', list, False) for kochin_function_item in params.kochin_function: helper.check_not_none_nor_empty(kochin_function_item, 'kochin_function_item') helper.check_type_value(params.free_surface_elevation, 'params.free_surface_elevation', list, False) for elevation_item in params.free_surface_elevation: helper.check_not_none_nor_empty(elevation_item, 'elevation_item') try: with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), 'a') as hdf5_data: utility.write_postprocessing_section(params, hdf5_data) postprocessing_log_path = os.path.join(simulation_dir, 'postprocessing_log.txt') custom_config = {'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'), 'NEMOH_CALCULATIONS_FILE': None, 'NEMOH_INPUT_FILE': None, 'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'), 'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'), 'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'), 'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'), 'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'), 'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'), 'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'), 'GREEN_TABULATION_NUMX': 328, 'GREEN_TABULATION_NUMZ': 46, 'GREEN_TABULATION_SIMPSON_NPOINTS': 251, 'USE_ODE_INFLUENCE_COEFFICIENTS': False, 'USE_HIGHER_ORDER': False, 'NUM_PANEL_HIGHER_ORDER': 1, 'B_SPLINE_ORDER': 1, 'USE_DIPOLES_IMPLEMENTATION': False, 'THIN_PANELS': [(- 1)], 'COMPUTE_DRIFT_FORCES': False, 'COMPUTE_YAW_MOMENT': False, 'REMOVE_IRREGULAR_FREQUENCIES': False} _LOGGER.debug('Start postProcessor function.') run_thread(postprocessor.postprocess, (custom_config,), postprocessing_log_path) _LOGGER.debug('End postProcessor in subprocess.') with open(postprocessing_log_path, 'r') as log_file: ret = log_file.read() helper.log_exit(_LOGGER, signature, [ret]) return ret except Exception as e: helper.log_exception(_LOGGER, signature, e) raise ServiceError(('Error occurs when running postprocess. Caused by:\n' + unicode(str(e))))
def visualize(simulation_dir): '\n Launch ParaView to visualize simulation results.\n\n @param simulation_dir: the simulation directory\n @raise TypeError: if any input parameter is not of required type\n @raise ValueError: if any input parameter is None/empty\n @raise ServiceError: if error occurs during launching the ParaView\n ' signature = (__name__ + '.visualize()') helper.log_entrance(_LOGGER, signature, {'simulation_dir': simulation_dir}) helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir') helper.check_is_directory(simulation_dir, 'simulation_dir') try: files = [] for f in os.listdir(os.path.join(simulation_dir, 'results')): for ext in VISUALIZATION_FILE_EXTENSIONS: if fnmatch.fnmatch(f, ('*.' + ext)): files.append(os.path.join(simulation_dir, 'results', f)) if (len(files) == 0): raise ServiceError('There is no accepted file to visualize.') _LOGGER.debug('List of files to load:') _LOGGER.debug(str(files)) paraview_script = os.path.join(os.path.join(simulation_dir, 'results'), 'load_data.py') prepare_paraview_script(paraview_script, files) _LOGGER.debug('Start launching ParaView in subprocess.') subprocess.Popen([PARAVIEW_BIN, (('--script=' + paraview_script) + '')]) _LOGGER.debug('End launching ParaView in subprocess.') helper.log_exit(_LOGGER, signature, None) except Exception as e: helper.log_exception(_LOGGER, signature, e) raise ServiceError(('Error occurs when launching the ParaView. Caused by:\n' + unicode(str(e))))
-6,515,608,174,183,623,000
Launch ParaView to visualize simulation results. @param simulation_dir: the simulation directory @raise TypeError: if any input parameter is not of required type @raise ValueError: if any input parameter is None/empty @raise ServiceError: if error occurs during launching the ParaView
source/openwarpgui/openwarp/services.py
visualize
rhydar/Test
python
def visualize(simulation_dir): '\n Launch ParaView to visualize simulation results.\n\n @param simulation_dir: the simulation directory\n @raise TypeError: if any input parameter is not of required type\n @raise ValueError: if any input parameter is None/empty\n @raise ServiceError: if error occurs during launching the ParaView\n ' signature = (__name__ + '.visualize()') helper.log_entrance(_LOGGER, signature, {'simulation_dir': simulation_dir}) helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir') helper.check_is_directory(simulation_dir, 'simulation_dir') try: files = [] for f in os.listdir(os.path.join(simulation_dir, 'results')): for ext in VISUALIZATION_FILE_EXTENSIONS: if fnmatch.fnmatch(f, ('*.' + ext)): files.append(os.path.join(simulation_dir, 'results', f)) if (len(files) == 0): raise ServiceError('There is no accepted file to visualize.') _LOGGER.debug('List of files to load:') _LOGGER.debug(str(files)) paraview_script = os.path.join(os.path.join(simulation_dir, 'results'), 'load_data.py') prepare_paraview_script(paraview_script, files) _LOGGER.debug('Start launching ParaView in subprocess.') subprocess.Popen([PARAVIEW_BIN, (('--script=' + paraview_script) + )]) _LOGGER.debug('End launching ParaView in subprocess.') helper.log_exit(_LOGGER, signature, None) except Exception as e: helper.log_exception(_LOGGER, signature, e) raise ServiceError(('Error occurs when launching the ParaView. Caused by:\n' + unicode(str(e))))
def prepare_paraview_script(script_path, files): '\n Prepare a script to be run by ParaView from a template.\n\n @param script_path: path of the new script to create\n @param files: a list of data files path\n @raise Exception: to its caller if any error occurs\n ' with open(PARAVIEW_SCRIPT_TEMPLATE, 'r') as fin: with open(script_path, 'w') as fout: for line in fin.readlines(): fout.write((line.rstrip().replace('<parameter_files>', str(files)) + '\n'))
8,403,920,437,899,173,000
Prepare a script to be run by ParaView from a template. @param script_path: path of the new script to create @param files: a list of data files path @raise Exception: to its caller if any error occurs
source/openwarpgui/openwarp/services.py
prepare_paraview_script
rhydar/Test
python
def prepare_paraview_script(script_path, files): '\n Prepare a script to be run by ParaView from a template.\n\n @param script_path: path of the new script to create\n @param files: a list of data files path\n @raise Exception: to its caller if any error occurs\n ' with open(PARAVIEW_SCRIPT_TEMPLATE, 'r') as fin: with open(script_path, 'w') as fout: for line in fin.readlines(): fout.write((line.rstrip().replace('<parameter_files>', str(files)) + '\n'))
def wrapper_io(func, fd, args, return_dict): '\n Run a function while redirecting its output to a file descriptor\n Args:\n func: A python function to run\n fd: a file descriptor\n args: A tuple containing argument for the function\n return_dict: Dictionary where to put the result of the function\n\n ' return_dict['output'] = '' with warnings.catch_warnings(): warnings.simplefilter('ignore') if fd: with Silence(stdout=fd, stderr=os.devnull, mode='a'): return_dict['output'] = func(*args) else: return_dict['output'] = func(*args)
5,322,863,883,814,633,000
Run a function while redirecting its output to a file descriptor Args: func: A python function to run fd: a file descriptor args: A tuple containing argument for the function return_dict: Dictionary where to put the result of the function
source/openwarpgui/openwarp/services.py
wrapper_io
rhydar/Test
python
def wrapper_io(func, fd, args, return_dict): '\n Run a function while redirecting its output to a file descriptor\n Args:\n func: A python function to run\n fd: a file descriptor\n args: A tuple containing argument for the function\n return_dict: Dictionary where to put the result of the function\n\n ' return_dict['output'] = with warnings.catch_warnings(): warnings.simplefilter('ignore') if fd: with Silence(stdout=fd, stderr=os.devnull, mode='a'): return_dict['output'] = func(*args) else: return_dict['output'] = func(*args)
def run_thread(func, args, fd): '\n Run a python function in a thread and wait for it to complete.\n Redirect its output to fd\n Args:\n func: A python function to run\n args: A tuple containing argument for the function\n fd: a file descriptor\n ' manager = Manager() return_dict = manager.dict() p = Process(target=wrapper_io, args=(func, fd, args, return_dict)) p.start() p.join() return return_dict['output']
7,948,268,067,498,178,000
Run a python function in a thread and wait for it to complete. Redirect its output to fd Args: func: A python function to run args: A tuple containing argument for the function fd: a file descriptor
source/openwarpgui/openwarp/services.py
run_thread
rhydar/Test
python
def run_thread(func, args, fd): '\n Run a python function in a thread and wait for it to complete.\n Redirect its output to fd\n Args:\n func: A python function to run\n args: A tuple containing argument for the function\n fd: a file descriptor\n ' manager = Manager() return_dict = manager.dict() p = Process(target=wrapper_io, args=(func, fd, args, return_dict)) p.start() p.join() return return_dict['output']
def writeline_if_not_none(fout, data): '\n Write one line to the specified file if data is not None.\n\n @param fout: the file object to write line in\n @param data: the data to write as line\n ' if (data is not None): fout.write((str(data) + '\n'))
-718,766,003,065,456,600
Write one line to the specified file if data is not None. @param fout: the file object to write line in @param data: the data to write as line
source/openwarpgui/openwarp/services.py
writeline_if_not_none
rhydar/Test
python
def writeline_if_not_none(fout, data): '\n Write one line to the specified file if data is not None.\n\n @param fout: the file object to write line in\n @param data: the data to write as line\n ' if (data is not None): fout.write((str(data) + '\n'))
def prepare_dir(prefix): '\n Prepare a directory, the directory will be a sub-directory of USER_DATA_DIRECTORY with current timestamp\n prefixed given prefix as the directory name.\n\n @param prefix: the directory prefix\n @return: the meshing/simulation directory full path\n @raise TypeError: if any input parameter is not of required type\n @raise ValueError: if any input parameter is None/empty\n @raise ServiceError: if any error occurred when preparing the directory\n ' signature = (__name__ + '.prepare_dir()') helper.log_entrance(_LOGGER, signature, {'prefix': prefix}) helper.check_not_none_nor_empty(prefix, 'prefix') try: run_dir = os.path.join(USER_DATA_DIRECTORY, (((prefix + time.strftime('%Y%m%d%H%M%S')) + '_') + uuid.uuid1().hex)) os.makedirs(run_dir) helper.log_exit(_LOGGER, signature, [run_dir]) return run_dir except Exception as e: helper.log_exception(_LOGGER, signature, e) raise ServiceError(('Error occurs when preparing the directory. Caused by:\n' + unicode(str(e))))
-1,163,555,207,973,630,700
Prepare a directory, the directory will be a sub-directory of USER_DATA_DIRECTORY with current timestamp prefixed given prefix as the directory name. @param prefix: the directory prefix @return: the meshing/simulation directory full path @raise TypeError: if any input parameter is not of required type @raise ValueError: if any input parameter is None/empty @raise ServiceError: if any error occurred when preparing the directory
source/openwarpgui/openwarp/services.py
prepare_dir
rhydar/Test
python
def prepare_dir(prefix): '\n Prepare a directory, the directory will be a sub-directory of USER_DATA_DIRECTORY with current timestamp\n prefixed given prefix as the directory name.\n\n @param prefix: the directory prefix\n @return: the meshing/simulation directory full path\n @raise TypeError: if any input parameter is not of required type\n @raise ValueError: if any input parameter is None/empty\n @raise ServiceError: if any error occurred when preparing the directory\n ' signature = (__name__ + '.prepare_dir()') helper.log_entrance(_LOGGER, signature, {'prefix': prefix}) helper.check_not_none_nor_empty(prefix, 'prefix') try: run_dir = os.path.join(USER_DATA_DIRECTORY, (((prefix + time.strftime('%Y%m%d%H%M%S')) + '_') + uuid.uuid1().hex)) os.makedirs(run_dir) helper.log_exit(_LOGGER, signature, [run_dir]) return run_dir except Exception as e: helper.log_exception(_LOGGER, signature, e) raise ServiceError(('Error occurs when preparing the directory. Caused by:\n' + unicode(str(e))))
def __init__(self, stdout=os.devnull, stderr=os.devnull, mode='w'): '\n Initialize\n Args:\n self: The class itself\n stdout: the descriptor or file name where to redirect stdout\n stdout: the descriptor or file name where to redirect stdout\n mode: the output descriptor or file mode\n ' self.outfiles = (stdout, stderr) self.combine = (stdout == stderr) self.mode = mode
7,559,297,123,415,256,000
Initialize Args: self: The class itself stdout: the descriptor or file name where to redirect stdout stdout: the descriptor or file name where to redirect stdout mode: the output descriptor or file mode
source/openwarpgui/openwarp/services.py
__init__
rhydar/Test
python
def __init__(self, stdout=os.devnull, stderr=os.devnull, mode='w'): '\n Initialize\n Args:\n self: The class itself\n stdout: the descriptor or file name where to redirect stdout\n stdout: the descriptor or file name where to redirect stdout\n mode: the output descriptor or file mode\n ' self.outfiles = (stdout, stderr) self.combine = (stdout == stderr) self.mode = mode
def __enter__(self): '\n Enter the context\n Args:\n self: The class itself\n ' import sys self.sys = sys self.saved_streams = saved_streams = (sys.__stdout__, sys.__stderr__) self.fds = fds = [s.fileno() for s in saved_streams] self.saved_fds = map(os.dup, fds) for s in saved_streams: s.flush() if self.combine: null_streams = ([open(self.outfiles[0], self.mode, 0)] * 2) if (self.outfiles[0] != os.devnull): (sys.stdout, sys.stderr) = map(os.fdopen, fds, (['w'] * 2), ([0] * 2)) else: null_streams = [open(f, self.mode, 0) for f in self.outfiles] self.null_fds = null_fds = [s.fileno() for s in null_streams] self.null_streams = null_streams map(os.dup2, null_fds, fds)
2,806,386,025,515,811,000
Enter the context Args: self: The class itself
source/openwarpgui/openwarp/services.py
__enter__
rhydar/Test
python
def __enter__(self): '\n Enter the context\n Args:\n self: The class itself\n ' import sys self.sys = sys self.saved_streams = saved_streams = (sys.__stdout__, sys.__stderr__) self.fds = fds = [s.fileno() for s in saved_streams] self.saved_fds = map(os.dup, fds) for s in saved_streams: s.flush() if self.combine: null_streams = ([open(self.outfiles[0], self.mode, 0)] * 2) if (self.outfiles[0] != os.devnull): (sys.stdout, sys.stderr) = map(os.fdopen, fds, (['w'] * 2), ([0] * 2)) else: null_streams = [open(f, self.mode, 0) for f in self.outfiles] self.null_fds = null_fds = [s.fileno() for s in null_streams] self.null_streams = null_streams map(os.dup2, null_fds, fds)
def __exit__(self, *args): '\n Exit the context\n Args:\n self: The class itself\n args: other arguments\n ' sys = self.sys for s in self.saved_streams: s.flush() map(os.dup2, self.saved_fds, self.fds) (sys.stdout, sys.stderr) = self.saved_streams for s in self.null_streams: s.close() for fd in self.saved_fds: os.close(fd) return False
3,997,459,935,956,700,000
Exit the context Args: self: The class itself args: other arguments
source/openwarpgui/openwarp/services.py
__exit__
rhydar/Test
python
def __exit__(self, *args): '\n Exit the context\n Args:\n self: The class itself\n args: other arguments\n ' sys = self.sys for s in self.saved_streams: s.flush() map(os.dup2, self.saved_fds, self.fds) (sys.stdout, sys.stderr) = self.saved_streams for s in self.null_streams: s.close() for fd in self.saved_fds: os.close(fd) return False
def get_token(self): '\n Generates an Azure SAS token for pre-authorizing a file upload.\n\n Returns a tuple in the following format: (token_dict, object_name), where\n - token_dict has a `token` key which contains the SAS token as a string\n - object_name is a string\n ' account = self.CloudStorageAccount(account_name=self.account_name, account_key=self.storage_key) bbs = account.create_block_blob_service() object_name = self.object_name() sas_token = bbs.generate_blob_shared_access_signature(self.container_name, object_name, permission=self.BlobPermissions.CREATE, expiry=(datetime.utcnow() + self.timeout), protocol='https') return ({'token': sas_token}, object_name)
4,943,684,934,307,843,000
Generates an Azure SAS token for pre-authorizing a file upload. Returns a tuple in the following format: (token_dict, object_name), where - token_dict has a `token` key which contains the SAS token as a string - object_name is a string
atst/domain/csp/file_uploads.py
get_token
philip-dds/atst
python
def get_token(self): '\n Generates an Azure SAS token for pre-authorizing a file upload.\n\n Returns a tuple in the following format: (token_dict, object_name), where\n - token_dict has a `token` key which contains the SAS token as a string\n - object_name is a string\n ' account = self.CloudStorageAccount(account_name=self.account_name, account_key=self.storage_key) bbs = account.create_block_blob_service() object_name = self.object_name() sas_token = bbs.generate_blob_shared_access_signature(self.container_name, object_name, permission=self.BlobPermissions.CREATE, expiry=(datetime.utcnow() + self.timeout), protocol='https') return ({'token': sas_token}, object_name)
def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None): "\n args:\n root - path to the lasot dataset.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\n videos with subscripts -1, -3, and -5 from each class will be used for training.\n split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\n vid_ids or split option can be used at a time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n " root = (env_settings().lasot_dir if (root is None) else root) super().__init__('LaSOT', root, image_loader) self.class_list = [f for f in os.listdir(self.root)] self.class_to_id = {cls_name: cls_id for (cls_id, cls_name) in enumerate(self.class_list)} self.sequence_list = self._build_sequence_list(vid_ids, split) if (data_fraction is not None): self.sequence_list = random.sample(self.sequence_list, int((len(self.sequence_list) * data_fraction))) self.seq_per_class = self._build_class_list()
8,845,800,174,379,827,000
args: root - path to the lasot dataset. image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the videos with subscripts -1, -3, and -5 from each class will be used for training. split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of vid_ids or split option can be used at a time. data_fraction - Fraction of dataset to be used. The complete dataset is used by default
Stark-main/external/AR/ltr/dataset/lasot.py
__init__
2021-DGSW-Ensemble/Ensemble-AI
python
def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None): "\n args:\n root - path to the lasot dataset.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\n videos with subscripts -1, -3, and -5 from each class will be used for training.\n split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\n vid_ids or split option can be used at a time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n " root = (env_settings().lasot_dir if (root is None) else root) super().__init__('LaSOT', root, image_loader) self.class_list = [f for f in os.listdir(self.root)] self.class_to_id = {cls_name: cls_id for (cls_id, cls_name) in enumerate(self.class_list)} self.sequence_list = self._build_sequence_list(vid_ids, split) if (data_fraction is not None): self.sequence_list = random.sample(self.sequence_list, int((len(self.sequence_list) * data_fraction))) self.seq_per_class = self._build_class_list()
def __init__(self, args, base_dir=Path.db_root_dir('pascal'), split='train'): '\n :param base_dir: path to VOC dataset directory\n :param split: train/val\n :param transform: transform to apply\n ' super().__init__() self._base_dir = base_dir self._image_dir = os.path.join(self._base_dir, 'JPEGImages') self._cat_dir = os.path.join(self._base_dir, 'SegmentationClass') self._sp_dir = os.path.join(self._base_dir, 'super_pixel') if isinstance(split, str): self.split = [split] else: split.sort() self.split = split self.args = args _splits_dir = os.path.join(self._base_dir, 'ImageSets', 'Segmentation') self.im_ids = [] self.images = [] self.categories = [] self.super_pixel = [] for splt in self.split: with open(os.path.join(os.path.join(_splits_dir, (splt + '.txt'))), 'r') as f: lines = f.read().splitlines() for (ii, line) in enumerate(lines): _image = os.path.join(self._image_dir, (line + '.jpg')) _cat = os.path.join(self._cat_dir, (line + '.png')) _sp = os.path.join(self._sp_dir, (line + '.ppm.jpg')) assert os.path.isfile(_image) assert os.path.isfile(_cat) assert os.path.isfile(_sp) self.im_ids.append(line) self.images.append(_image) self.categories.append(_cat) self.super_pixel.append(_sp) assert (len(self.images) == len(self.categories)) print('Number of images in {}: {:d}'.format(split, len(self.images)))
-1,031,724,533,508,999,200
:param base_dir: path to VOC dataset directory :param split: train/val :param transform: transform to apply
dataloaders/datasets/pascal.py
__init__
ChenyanWu/seg_super_pixel
python
def __init__(self, args, base_dir=Path.db_root_dir('pascal'), split='train'): '\n :param base_dir: path to VOC dataset directory\n :param split: train/val\n :param transform: transform to apply\n ' super().__init__() self._base_dir = base_dir self._image_dir = os.path.join(self._base_dir, 'JPEGImages') self._cat_dir = os.path.join(self._base_dir, 'SegmentationClass') self._sp_dir = os.path.join(self._base_dir, 'super_pixel') if isinstance(split, str): self.split = [split] else: split.sort() self.split = split self.args = args _splits_dir = os.path.join(self._base_dir, 'ImageSets', 'Segmentation') self.im_ids = [] self.images = [] self.categories = [] self.super_pixel = [] for splt in self.split: with open(os.path.join(os.path.join(_splits_dir, (splt + '.txt'))), 'r') as f: lines = f.read().splitlines() for (ii, line) in enumerate(lines): _image = os.path.join(self._image_dir, (line + '.jpg')) _cat = os.path.join(self._cat_dir, (line + '.png')) _sp = os.path.join(self._sp_dir, (line + '.ppm.jpg')) assert os.path.isfile(_image) assert os.path.isfile(_cat) assert os.path.isfile(_sp) self.im_ids.append(line) self.images.append(_image) self.categories.append(_cat) self.super_pixel.append(_sp) assert (len(self.images) == len(self.categories)) print('Number of images in {}: {:d}'.format(split, len(self.images)))
def image_psf(image, stars, size=15, normalize=False, return_cutouts=False): '\n Get global psf from image using photutils routines\n\n Parameters\n ----------\n image: np.ndarray or path\n stars: np.ndarray\n stars positions with shape (n,2)\n size: int\n size of the cuts around stars (in pixels)\n normalize: bool, optional\n weather to normalize the cutout, default is False\n\n Returns\n -------\n np.ndarray of shape (size, size)\n\n ' (_, cuts) = cutouts(image, stars, size=size) cuts = cuts.data if normalize: cuts = [(c / np.sum(c)) for c in cuts] if return_cutouts: return (np.median(cuts, axis=0), cuts) else: return np.median(cuts, axis=0)
6,398,578,586,264,584,000
Get global psf from image using photutils routines Parameters ---------- image: np.ndarray or path stars: np.ndarray stars positions with shape (n,2) size: int size of the cuts around stars (in pixels) normalize: bool, optional weather to normalize the cutout, default is False Returns ------- np.ndarray of shape (size, size)
prose/blocks/psf.py
image_psf
lgrcia/prose
python
def image_psf(image, stars, size=15, normalize=False, return_cutouts=False): '\n Get global psf from image using photutils routines\n\n Parameters\n ----------\n image: np.ndarray or path\n stars: np.ndarray\n stars positions with shape (n,2)\n size: int\n size of the cuts around stars (in pixels)\n normalize: bool, optional\n weather to normalize the cutout, default is False\n\n Returns\n -------\n np.ndarray of shape (size, size)\n\n ' (_, cuts) = cutouts(image, stars, size=size) cuts = cuts.data if normalize: cuts = [(c / np.sum(c)) for c in cuts] if return_cutouts: return (np.median(cuts, axis=0), cuts) else: return np.median(cuts, axis=0)
def cutouts(image, stars, size=15): 'Custom version to extract stars cutouts\n\n Parameters\n ----------\n Parameters\n ----------\n image: np.ndarray or path\n stars: np.ndarray\n stars positions with shape (n,2)\n size: int\n size of the cuts around stars (in pixels), by default 15\n\n Returns\n -------\n np.ndarray of shape (size, size)\n \n ' if isinstance(image, str): image = fits.getdata(image) warnings.simplefilter('ignore') if (np.shape(stars) > (1, 2)): stars_tbl = Table([stars[:, 0], stars[:, 1], np.arange(len(stars))], names=['x', 'y', 'id']) stars = extract_stars(NDData(data=image), stars_tbl, size=size) idxs = np.array([s.id_label for s in stars]) return (idxs, stars) else: stars_tbl = Table(data=np.array([stars[0][0], stars[0][1]]), names=['x', 'y']) stars = extract_stars(NDData(data=image), stars_tbl, size=size) return stars
-796,233,181,387,293,200
Custom version to extract stars cutouts Parameters ---------- Parameters ---------- image: np.ndarray or path stars: np.ndarray stars positions with shape (n,2) size: int size of the cuts around stars (in pixels), by default 15 Returns ------- np.ndarray of shape (size, size)
prose/blocks/psf.py
cutouts
lgrcia/prose
python
def cutouts(image, stars, size=15): 'Custom version to extract stars cutouts\n\n Parameters\n ----------\n Parameters\n ----------\n image: np.ndarray or path\n stars: np.ndarray\n stars positions with shape (n,2)\n size: int\n size of the cuts around stars (in pixels), by default 15\n\n Returns\n -------\n np.ndarray of shape (size, size)\n \n ' if isinstance(image, str): image = fits.getdata(image) warnings.simplefilter('ignore') if (np.shape(stars) > (1, 2)): stars_tbl = Table([stars[:, 0], stars[:, 1], np.arange(len(stars))], names=['x', 'y', 'id']) stars = extract_stars(NDData(data=image), stars_tbl, size=size) idxs = np.array([s.id_label for s in stars]) return (idxs, stars) else: stars_tbl = Table(data=np.array([stars[0][0], stars[0][1]]), names=['x', 'y']) stars = extract_stars(NDData(data=image), stars_tbl, size=size) return stars
def moments(data): 'Returns (height, x, y, width_x, width_y)\n the gaussian parameters of a 2D distribution by calculating its\n moments ' height = data.max() background = data.min() data = (data - np.min(data)) total = data.sum() (x, y) = np.indices(data.shape) x = ((x * data).sum() / total) y = ((y * data).sum() / total) col = data[:, int(y)] width_x = np.sqrt((abs((((np.arange(col.size) - y) ** 2) * col)).sum() / col.sum())) row = data[int(x), :] width_y = np.sqrt((abs((((np.arange(row.size) - x) ** 2) * row)).sum() / row.sum())) width_x /= gaussian_sigma_to_fwhm width_y /= gaussian_sigma_to_fwhm return (height, x, y, width_x, width_y, 0.0, background)
-6,192,023,492,880,741,000
Returns (height, x, y, width_x, width_y) the gaussian parameters of a 2D distribution by calculating its moments
prose/blocks/psf.py
moments
lgrcia/prose
python
def moments(data): 'Returns (height, x, y, width_x, width_y)\n the gaussian parameters of a 2D distribution by calculating its\n moments ' height = data.max() background = data.min() data = (data - np.min(data)) total = data.sum() (x, y) = np.indices(data.shape) x = ((x * data).sum() / total) y = ((y * data).sum() / total) col = data[:, int(y)] width_x = np.sqrt((abs((((np.arange(col.size) - y) ** 2) * col)).sum() / col.sum())) row = data[int(x), :] width_y = np.sqrt((abs((((np.arange(row.size) - x) ** 2) * row)).sum() / row.sum())) width_x /= gaussian_sigma_to_fwhm width_y /= gaussian_sigma_to_fwhm return (height, x, y, width_x, width_y, 0.0, background)
def _correct_folder(folder: str) -> str: "Ensures the folder follows a standard.\n\n Pathlib.parent in the root folder results in '.', whereas in other places\n we should use '' for the root folder. This function makes sure the root\n folder is always empty string.\n\n Args:\n folder: the folder to be corrected.\n\n Returns:\n The corrected folder.\n " if (folder == '.'): return '' return folder
5,241,296,615,931,418,000
Ensures the folder follows a standard. Pathlib.parent in the root folder results in '.', whereas in other places we should use '' for the root folder. This function makes sure the root folder is always empty string. Args: folder: the folder to be corrected. Returns: The corrected folder.
tensorflow_datasets/core/github_api/github_path.py
_correct_folder
YangDong2002/datasets
python
def _correct_folder(folder: str) -> str: "Ensures the folder follows a standard.\n\n Pathlib.parent in the root folder results in '.', whereas in other places\n we should use for the root folder. This function makes sure the root\n folder is always empty string.\n\n Args:\n folder: the folder to be corrected.\n\n Returns:\n The corrected folder.\n " if (folder == '.'): return return folder
def _parse_github_path(path: str) -> Tuple[(str, str, str)]: 'Parse the absolute github path.\n\n Args:\n path: The full github path.\n\n Returns:\n repo: The repository identifiant.\n branch: Repository branch.\n subpath: The inner path.\n\n Raises:\n ValueError: If the path is invalid\n ' err_msg = f'Invalid github path: {path}. Expected format: `github://<owner>/<name>/tree/<branch>[/<path>]`.' if (not path.startswith(_URI_PREFIX)): raise ValueError(err_msg) if path.endswith('/'): raise ValueError((err_msg + ' Trailing `/` not supported.')) parts = path[len(_URI_PREFIX):].split('/') if (len(parts) < 4): raise ValueError(err_msg) (owner, repo, tree, branch, *subpath) = parts if (tree != 'tree'): raise ValueError((err_msg + ". `/blob/` isn't accepted. Only `/tree/`.")) return (f'{owner}/{repo}', branch, '/'.join(subpath))
7,302,151,068,288,946,000
Parse the absolute github path. Args: path: The full github path. Returns: repo: The repository identifiant. branch: Repository branch. subpath: The inner path. Raises: ValueError: If the path is invalid
tensorflow_datasets/core/github_api/github_path.py
_parse_github_path
YangDong2002/datasets
python
def _parse_github_path(path: str) -> Tuple[(str, str, str)]: 'Parse the absolute github path.\n\n Args:\n path: The full github path.\n\n Returns:\n repo: The repository identifiant.\n branch: Repository branch.\n subpath: The inner path.\n\n Raises:\n ValueError: If the path is invalid\n ' err_msg = f'Invalid github path: {path}. Expected format: `github://<owner>/<name>/tree/<branch>[/<path>]`.' if (not path.startswith(_URI_PREFIX)): raise ValueError(err_msg) if path.endswith('/'): raise ValueError((err_msg + ' Trailing `/` not supported.')) parts = path[len(_URI_PREFIX):].split('/') if (len(parts) < 4): raise ValueError(err_msg) (owner, repo, tree, branch, *subpath) = parts if (tree != 'tree'): raise ValueError((err_msg + ". `/blob/` isn't accepted. Only `/tree/`.")) return (f'{owner}/{repo}', branch, '/'.join(subpath))
def query(self, url: str) -> JsonValue: 'Launches a Github API query and returns the result.' headers = {} if self._token: headers['Authorization'] = f'token {self._token}' resp = requests.get(url, headers=headers) if (resp.status_code != 200): raise FileNotFoundError(f'''Request failed: Request: {url} Error: {resp.status_code} Reason: {resp.content}''') return resp.json()
2,435,902,409,758,191,000
Launches a Github API query and returns the result.
tensorflow_datasets/core/github_api/github_path.py
query
YangDong2002/datasets
python
def query(self, url: str) -> JsonValue: headers = {} if self._token: headers['Authorization'] = f'token {self._token}' resp = requests.get(url, headers=headers) if (resp.status_code != 200): raise FileNotFoundError(f'Request failed: Request: {url} Error: {resp.status_code} Reason: {resp.content}') return resp.json()
def query_tree(self, repo: str, branch: str) -> JsonValue: 'Queries a repository tree.\n\n See https://docs.github.com/en/rest/reference/git#trees\n\n Args:\n repo: the repository\n branch: the branch for which to get the tree\n\n Returns:\n JSON dict with the tree.\n ' url = f'https://api.github.com/repos/{repo}/git/trees/{branch}?recursive=1' return self.query(url)
-6,298,859,450,915,724,000
Queries a repository tree. See https://docs.github.com/en/rest/reference/git#trees Args: repo: the repository branch: the branch for which to get the tree Returns: JSON dict with the tree.
tensorflow_datasets/core/github_api/github_path.py
query_tree
YangDong2002/datasets
python
def query_tree(self, repo: str, branch: str) -> JsonValue: 'Queries a repository tree.\n\n See https://docs.github.com/en/rest/reference/git#trees\n\n Args:\n repo: the repository\n branch: the branch for which to get the tree\n\n Returns:\n JSON dict with the tree.\n ' url = f'https://api.github.com/repos/{repo}/git/trees/{branch}?recursive=1' return self.query(url)
@classmethod def from_json(cls, value) -> '_GithubTree': 'Parses a GithubTree from the given JSON.' if ((not isinstance(value, dict)) or ('tree' not in value)): raise ValueError(f'Github API response not supported: {value}') files_per_folder: MutableMapping[(str, Set[str])] = {} for element in value['tree']: github_element = _GithubElement.from_path(path=pathlib.PurePosixPath(element['path']), is_folder=(element['type'] == 'tree')) if (element['type'] in {'blob', 'tree'}): files_per_folder.setdefault(github_element.parent_folder, set()) files_per_folder[github_element.parent_folder].add(github_element) return _GithubTree(files_per_folder=files_per_folder)
4,277,081,197,266,624,500
Parses a GithubTree from the given JSON.
tensorflow_datasets/core/github_api/github_path.py
from_json
YangDong2002/datasets
python
@classmethod def from_json(cls, value) -> '_GithubTree': if ((not isinstance(value, dict)) or ('tree' not in value)): raise ValueError(f'Github API response not supported: {value}') files_per_folder: MutableMapping[(str, Set[str])] = {} for element in value['tree']: github_element = _GithubElement.from_path(path=pathlib.PurePosixPath(element['path']), is_folder=(element['type'] == 'tree')) if (element['type'] in {'blob', 'tree'}): files_per_folder.setdefault(github_element.parent_folder, set()) files_per_folder[github_element.parent_folder].add(github_element) return _GithubTree(files_per_folder=files_per_folder)
@staticmethod @functools.lru_cache(maxsize=None) def from_cache(repo: str, branch: str) -> '_GithubTree': 'Factory which caches the entire Github tree.' tree_json = GithubApi().query_tree(repo, branch) assert (not tree_json.get('truncated', False)) return _GithubTree.from_json(tree_json)
-7,261,570,799,538,644,000
Factory which caches the entire Github tree.
tensorflow_datasets/core/github_api/github_path.py
from_cache
YangDong2002/datasets
python
@staticmethod @functools.lru_cache(maxsize=None) def from_cache(repo: str, branch: str) -> '_GithubTree': tree_json = GithubApi().query_tree(repo, branch) assert (not tree_json.get('truncated', False)) return _GithubTree.from_json(tree_json)
@classmethod def from_repo(cls, repo: str, branch: str='master') -> 'GithubPath': "Factory to creates a GithubPath from a repo name.\n\n Args:\n repo: Repo name (e.g. `tensorflow/datasets`)\n branch: Branch name (e.g. `master`, 'v1.2.0', 'EXAMPLE_KEY'). Default to\n master.\n\n Returns:\n github_path: The repository root dir at head\n " return cls(f'github://{repo}/tree/{branch}')
6,186,761,111,058,657,000
Factory to creates a GithubPath from a repo name. Args: repo: Repo name (e.g. `tensorflow/datasets`) branch: Branch name (e.g. `master`, 'v1.2.0', 'EXAMPLE_KEY'). Default to master. Returns: github_path: The repository root dir at head
tensorflow_datasets/core/github_api/github_path.py
from_repo
YangDong2002/datasets
python
@classmethod def from_repo(cls, repo: str, branch: str='master') -> 'GithubPath': "Factory to creates a GithubPath from a repo name.\n\n Args:\n repo: Repo name (e.g. `tensorflow/datasets`)\n branch: Branch name (e.g. `master`, 'v1.2.0', 'EXAMPLE_KEY'). Default to\n master.\n\n Returns:\n github_path: The repository root dir at head\n " return cls(f'github://{repo}/tree/{branch}')
@property def subpath(self) -> str: 'The inner path (e.g. `core/__init__.py`).' return self._metadata.subpath
-8,259,573,421,062,299,000
The inner path (e.g. `core/__init__.py`).
tensorflow_datasets/core/github_api/github_path.py
subpath
YangDong2002/datasets
python
@property def subpath(self) -> str: return self._metadata.subpath
@property def repo(self) -> str: 'The repository identifier (e.g. `tensorflow/datasets`).' return self._metadata.repo
-4,497,430,041,800,673,000
The repository identifier (e.g. `tensorflow/datasets`).
tensorflow_datasets/core/github_api/github_path.py
repo
YangDong2002/datasets
python
@property def repo(self) -> str: return self._metadata.repo
@property def branch(self) -> str: 'The branch (e.g. `master`, `v2`, `43bbad116df`,...).' return self._metadata.branch
6,229,227,912,246,695,000
The branch (e.g. `master`, `v2`, `43bbad116df`,...).
tensorflow_datasets/core/github_api/github_path.py
branch
YangDong2002/datasets
python
@property def branch(self) -> str: return self._metadata.branch
def as_raw_url(self) -> str: 'Returns the raw content url (https://raw.githubusercontent.com).' return f'https://raw.githubusercontent.com/{self.repo}/{self.branch}/{self.subpath}'
-6,023,169,806,691,042,000
Returns the raw content url (https://raw.githubusercontent.com).
tensorflow_datasets/core/github_api/github_path.py
as_raw_url
YangDong2002/datasets
python
def as_raw_url(self) -> str: return f'https://raw.githubusercontent.com/{self.repo}/{self.branch}/{self.subpath}'
def as_human_friendly_url(self) -> str: 'Returns the human friendly url.' return f'https://github.com/{self.repo}/blob/{self.branch}/{self.subpath}'
-315,517,247,223,268,350
Returns the human friendly url.
tensorflow_datasets/core/github_api/github_path.py
as_human_friendly_url
YangDong2002/datasets
python
def as_human_friendly_url(self) -> str: return f'https://github.com/{self.repo}/blob/{self.branch}/{self.subpath}'
def iterdir(self) -> Iterator['GithubPath']: 'Yields the sub-paths.' if (not self.is_dir()): raise NotADirectoryError(f'{self.subpath} is not a directory.') for filename in self.github_tree.files_per_folder[self.subpath]: (yield (self / filename.name))
-7,273,975,935,590,726,000
Yields the sub-paths.
tensorflow_datasets/core/github_api/github_path.py
iterdir
YangDong2002/datasets
python
def iterdir(self) -> Iterator['GithubPath']: if (not self.is_dir()): raise NotADirectoryError(f'{self.subpath} is not a directory.') for filename in self.github_tree.files_per_folder[self.subpath]: (yield (self / filename.name))
def is_dir(self) -> bool: 'Returns True if the path is a directory or submodule.' return self.github_tree.is_folder(self.subpath)
-7,686,961,024,988,698,000
Returns True if the path is a directory or submodule.
tensorflow_datasets/core/github_api/github_path.py
is_dir
YangDong2002/datasets
python
def is_dir(self) -> bool: return self.github_tree.is_folder(self.subpath)
def is_file(self) -> bool: 'Returns True if the path is a file.' return self.github_tree.is_file(pathlib.PurePosixPath(self.subpath))
-4,208,411,006,704,021,500
Returns True if the path is a file.
tensorflow_datasets/core/github_api/github_path.py
is_file
YangDong2002/datasets
python
def is_file(self) -> bool: return self.github_tree.is_file(pathlib.PurePosixPath(self.subpath))
def exists(self) -> bool: 'Returns True if the path exists.' return (self.is_dir() or self.is_file())
3,038,122,737,878,214,700
Returns True if the path exists.
tensorflow_datasets/core/github_api/github_path.py
exists
YangDong2002/datasets
python
def exists(self) -> bool: return (self.is_dir() or self.is_file())
def read_bytes(self) -> bytes: 'Returns the file content as bytes.' url = self.as_raw_url() return get_content(url)
-303,842,329,040,540,740
Returns the file content as bytes.
tensorflow_datasets/core/github_api/github_path.py
read_bytes
YangDong2002/datasets
python
def read_bytes(self) -> bytes: url = self.as_raw_url() return get_content(url)
def read_text(self, encoding: Optional[str]=None) -> str: 'Returns the file content as string.' return self.read_bytes().decode(encoding=(encoding or 'utf-8'))
-1,727,593,445,862,952,400
Returns the file content as string.
tensorflow_datasets/core/github_api/github_path.py
read_text
YangDong2002/datasets
python
def read_text(self, encoding: Optional[str]=None) -> str: return self.read_bytes().decode(encoding=(encoding or 'utf-8'))
def copy(self, dst: utils.PathLike, overwrite: bool=False) -> utils.ReadWritePath: 'Copy the current file to the given destination.\n\n Args:\n dst: Target file. It can be any PathLike compatible path (e.g. `gs://...`)\n overwrite: Whether the file should be overwritten or not\n\n Returns:\n The new created file.\n\n Raises:\n FileExistsError: If `overwrite` is false and destination exists.\n ' dst = utils.as_path(dst) if ((not overwrite) and dst.exists()): raise FileExistsError(f'Cannot copy {self}. Destination {dst} exists.') dst.write_bytes(self.read_bytes()) return dst
-5,535,817,320,094,609,000
Copy the current file to the given destination. Args: dst: Target file. It can be any PathLike compatible path (e.g. `gs://...`) overwrite: Whether the file should be overwritten or not Returns: The new created file. Raises: FileExistsError: If `overwrite` is false and destination exists.
tensorflow_datasets/core/github_api/github_path.py
copy
YangDong2002/datasets
python
def copy(self, dst: utils.PathLike, overwrite: bool=False) -> utils.ReadWritePath: 'Copy the current file to the given destination.\n\n Args:\n dst: Target file. It can be any PathLike compatible path (e.g. `gs://...`)\n overwrite: Whether the file should be overwritten or not\n\n Returns:\n The new created file.\n\n Raises:\n FileExistsError: If `overwrite` is false and destination exists.\n ' dst = utils.as_path(dst) if ((not overwrite) and dst.exists()): raise FileExistsError(f'Cannot copy {self}. Destination {dst} exists.') dst.write_bytes(self.read_bytes()) return dst
def glance_detail(request): '\n OpenStack specific action to get image details from Glance\n :param request: HTTPRequest\n :return: rendered HTML\n ' required_fields = set(['imageId']) if (not required_fields.issubset(request.POST)): return render(request, 'ajax/ajaxError.html', {'error': 'Invalid Parameters in POST'}) image_id = request.POST['imageId'] image = get_object_or_404(Image, pk=image_id) if openstackUtils.connect_to_openstack(): glance_id = openstackUtils.get_image_id_for_name(image.name) glance_json = dict() if (glance_id is not None): glance_json = openstackUtils.get_glance_image_detail(glance_id) logger.debug(('glance json of %s is' % glance_id)) logger.debug(glance_json) logger.debug('---') return render(request, 'images/glance_detail.html', {'image': glance_json, 'image_id': image_id, 'glance_id': glance_id, 'openstack_host': configuration.openstack_host}) else: return render(request, 'error.html', {'error': 'Could not connect to OpenStack'})
1,670,277,878,723,460,400
OpenStack specific action to get image details from Glance :param request: HTTPRequest :return: rendered HTML
images/views.py
glance_detail
Juniper/wistar
python
def glance_detail(request): '\n OpenStack specific action to get image details from Glance\n :param request: HTTPRequest\n :return: rendered HTML\n ' required_fields = set(['imageId']) if (not required_fields.issubset(request.POST)): return render(request, 'ajax/ajaxError.html', {'error': 'Invalid Parameters in POST'}) image_id = request.POST['imageId'] image = get_object_or_404(Image, pk=image_id) if openstackUtils.connect_to_openstack(): glance_id = openstackUtils.get_image_id_for_name(image.name) glance_json = dict() if (glance_id is not None): glance_json = openstackUtils.get_glance_image_detail(glance_id) logger.debug(('glance json of %s is' % glance_id)) logger.debug(glance_json) logger.debug('---') return render(request, 'images/glance_detail.html', {'image': glance_json, 'image_id': image_id, 'glance_id': glance_id, 'openstack_host': configuration.openstack_host}) else: return render(request, 'error.html', {'error': 'Could not connect to OpenStack'})
def import_from_glance(request, glance_id): '\n Creates a local db entry for the glance image\n Everything in Wistar depends on a db entry in the Images table\n If you have an existing openstack cluster, you may want to import those\n images here without having to physically copy the images to local disk\n :param request: HTTPRequest object\n :param glance_id: id of the glance image to import\n :return: redirect to /images/image_id\n ' if openstackUtils.connect_to_openstack(): image_details = openstackUtils.get_glance_image_detail(glance_id) image = Image() image.description = 'Imported from Glance' image.name = image_details['name'] image.type = 'blank' image.save() logger.debug('All done') return HttpResponseRedirect(('/images/%s' % image.id)) context = {'error': 'Could not connect to OpenStack'} return render(request, 'error.html', context)
9,054,538,078,449,184,000
Creates a local db entry for the glance image Everything in Wistar depends on a db entry in the Images table If you have an existing openstack cluster, you may want to import those images here without having to physically copy the images to local disk :param request: HTTPRequest object :param glance_id: id of the glance image to import :return: redirect to /images/image_id
images/views.py
import_from_glance
Juniper/wistar
python
def import_from_glance(request, glance_id): '\n Creates a local db entry for the glance image\n Everything in Wistar depends on a db entry in the Images table\n If you have an existing openstack cluster, you may want to import those\n images here without having to physically copy the images to local disk\n :param request: HTTPRequest object\n :param glance_id: id of the glance image to import\n :return: redirect to /images/image_id\n ' if openstackUtils.connect_to_openstack(): image_details = openstackUtils.get_glance_image_detail(glance_id) image = Image() image.description = 'Imported from Glance' image.name = image_details['name'] image.type = 'blank' image.save() logger.debug('All done') return HttpResponseRedirect(('/images/%s' % image.id)) context = {'error': 'Could not connect to OpenStack'} return render(request, 'error.html', context)
def copy_bits(self, dest): 'copies the bits of the image to the provided destination address' desc = _NuiSurfaceDesc() PlanarImage._GetLevelDesc(self, 0, ctypes.byref(desc)) rect = _NuiLockedRect() PlanarImage._LockRect(self, 0, ctypes.byref(rect), None, 0) ctypes.memmove(dest, rect.bits, (desc.height * rect.pitch)) PlanarImage._UnlockRect(self, 0)
-4,890,728,892,160,774,000
copies the bits of the image to the provided destination address
pykinect/nui/structs.py
copy_bits
howieraem/KinectActionDetection
python
def copy_bits(self, dest): desc = _NuiSurfaceDesc() PlanarImage._GetLevelDesc(self, 0, ctypes.byref(desc)) rect = _NuiLockedRect() PlanarImage._LockRect(self, 0, ctypes.byref(rect), None, 0) ctypes.memmove(dest, rect.bits, (desc.height * rect.pitch)) PlanarImage._UnlockRect(self, 0)
def calculate_bone_orientations(self): 'Calculate bone orientations for a skeleton.\n\n The function calculates hierarchical and absolute joint angles for the skeleton, which can\n be used in animating an avatar (Avateering). The HipCenter joint is the root of the hierarchy,\n and describes an absolute rotation in the right-hand camera coordinate system. All other\n joints describe rotations relative to their parent joint orientation. The angles are returned\n in the same order as the joints are defined.\n\n Returns a sequence of SkeletonBoneOrientation objects.' arr = (SkeletonBoneOrientation * JointId.Count)() _NuiSkeletonCalculateBoneOrientations(self, arr) return tuple(arr)
2,338,955,707,845,455,400
Calculate bone orientations for a skeleton. The function calculates hierarchical and absolute joint angles for the skeleton, which can be used in animating an avatar (Avateering). The HipCenter joint is the root of the hierarchy, and describes an absolute rotation in the right-hand camera coordinate system. All other joints describe rotations relative to their parent joint orientation. The angles are returned in the same order as the joints are defined. Returns a sequence of SkeletonBoneOrientation objects.
pykinect/nui/structs.py
calculate_bone_orientations
howieraem/KinectActionDetection
python
def calculate_bone_orientations(self): 'Calculate bone orientations for a skeleton.\n\n The function calculates hierarchical and absolute joint angles for the skeleton, which can\n be used in animating an avatar (Avateering). The HipCenter joint is the root of the hierarchy,\n and describes an absolute rotation in the right-hand camera coordinate system. All other\n joints describe rotations relative to their parent joint orientation. The angles are returned\n in the same order as the joints are defined.\n\n Returns a sequence of SkeletonBoneOrientation objects.' arr = (SkeletonBoneOrientation * JointId.Count)() _NuiSkeletonCalculateBoneOrientations(self, arr) return tuple(arr)
@pytest.mark.parametrize('query_results, results_count', [([], 0), (MOCK_QUERY_RESULTS, len(MOCK_QUERY_RESULTS)), (MOCK_QUERY_RESULTS[:2], 2)]) def test_formats_count(app_instance, mocker, query_results, results_count): 'Test that results match high-level expectations.' query = mocker.patch('AIPscan.Data.report_data._formats_count_query') query.return_value = query_results get_ss = mocker.patch('AIPscan.Data._get_storage_service') get_ss.return_value = MOCK_STORAGE_SERVICE test_location = create_test_storage_location() get_location = mocker.patch('AIPscan.Data._get_storage_location') get_location.return_value = test_location report = report_data.formats_count(storage_service_id=MOCK_STORAGE_SERVICE_ID, start_date=datetime.min, end_date=datetime.max, storage_location_id=test_location.id) assert (report[fields.FIELD_STORAGE_NAME] == MOCK_STORAGE_SERVICE_NAME) assert (report[fields.FIELD_STORAGE_LOCATION] == test_location.description) assert (len(report[fields.FIELD_FORMATS]) == results_count)
-1,554,558,207,751,655,700
Test that results match high-level expectations.
AIPscan/Data/tests/test_formats_count.py
test_formats_count
artefactual-labs/AIPscan
python
@pytest.mark.parametrize('query_results, results_count', [([], 0), (MOCK_QUERY_RESULTS, len(MOCK_QUERY_RESULTS)), (MOCK_QUERY_RESULTS[:2], 2)]) def test_formats_count(app_instance, mocker, query_results, results_count): query = mocker.patch('AIPscan.Data.report_data._formats_count_query') query.return_value = query_results get_ss = mocker.patch('AIPscan.Data._get_storage_service') get_ss.return_value = MOCK_STORAGE_SERVICE test_location = create_test_storage_location() get_location = mocker.patch('AIPscan.Data._get_storage_location') get_location.return_value = test_location report = report_data.formats_count(storage_service_id=MOCK_STORAGE_SERVICE_ID, start_date=datetime.min, end_date=datetime.max, storage_location_id=test_location.id) assert (report[fields.FIELD_STORAGE_NAME] == MOCK_STORAGE_SERVICE_NAME) assert (report[fields.FIELD_STORAGE_LOCATION] == test_location.description) assert (len(report[fields.FIELD_FORMATS]) == results_count)
@pytest.mark.parametrize('test_format', [mock_result for mock_result in MOCK_QUERY_RESULTS]) def test_formats_count_elements(app_instance, mocker, test_format): 'Test that structure of versions data matches expectations.' mock_query = mocker.patch('AIPscan.Data.report_data._formats_count_query') mock_query.return_value = [test_format] mock_get_ss_name = mocker.patch('AIPscan.Data._get_storage_service') mock_get_ss_name.return_value = MOCK_STORAGE_SERVICE report = report_data.formats_count(MOCK_STORAGE_SERVICE_ID, datetime.min, datetime.max) report_format = report[fields.FIELD_FORMATS][0] assert (test_format.file_format == report_format.get(fields.FIELD_FORMAT)) assert (test_format.file_count == report_format.get(fields.FIELD_COUNT)) assert (test_format.total_size == report_format.get(fields.FIELD_SIZE))
6,305,090,356,362,009,000
Test that structure of versions data matches expectations.
AIPscan/Data/tests/test_formats_count.py
test_formats_count_elements
artefactual-labs/AIPscan
python
@pytest.mark.parametrize('test_format', [mock_result for mock_result in MOCK_QUERY_RESULTS]) def test_formats_count_elements(app_instance, mocker, test_format): mock_query = mocker.patch('AIPscan.Data.report_data._formats_count_query') mock_query.return_value = [test_format] mock_get_ss_name = mocker.patch('AIPscan.Data._get_storage_service') mock_get_ss_name.return_value = MOCK_STORAGE_SERVICE report = report_data.formats_count(MOCK_STORAGE_SERVICE_ID, datetime.min, datetime.max) report_format = report[fields.FIELD_FORMATS][0] assert (test_format.file_format == report_format.get(fields.FIELD_FORMAT)) assert (test_format.file_count == report_format.get(fields.FIELD_COUNT)) assert (test_format.total_size == report_format.get(fields.FIELD_SIZE))
@pytest.mark.parametrize('start_date, end_date, format_count, total_file_count, total_file_size', [(None, None, 2, 3, TOTAL_FILE_SIZE), (DATE_BEFORE_AIP_1, None, 2, 3, TOTAL_FILE_SIZE), (AIP_1_CREATION_DATE, None, 2, 3, TOTAL_FILE_SIZE), (DATE_AFTER_AIP_1, None, 2, 2, JPEG_1_02_FILE_SIZE), (None, DATE_BEFORE_AIP_2, 1, 1, JPEG_1_01_FILE_SIZE), (None, AIP_2_CREATION_DATE, 2, 3, TOTAL_FILE_SIZE), (None, DATE_AFTER_AIP_2, 2, 3, TOTAL_FILE_SIZE), ('2019-01-01', '2019-01-02', 0, 0, 0), (True, 'NOT A DATE', 2, 3, TOTAL_FILE_SIZE)]) def test_formats_count_contents(app_with_populated_format_versions, start_date, end_date, format_count, total_file_count, total_file_size): 'Test that content of response matches expectations.\n\n This integration test uses a pre-populated fixture to verify that\n the database access layer of our endpoint returns what we expect.\n ' results = report_data.formats_count(storage_service_id=1, start_date=parse_datetime_bound(start_date), end_date=parse_datetime_bound(end_date, upper=True)) formats = results[fields.FIELD_FORMATS] assert (len(formats) == format_count) assert (sum((format_.get(fields.FIELD_COUNT, 0) for format_ in formats)) == total_file_count) assert (sum((format_.get(fields.FIELD_SIZE, 0) for format_ in formats)) == total_file_size)
-4,245,950,110,821,830,000
Test that content of response matches expectations. This integration test uses a pre-populated fixture to verify that the database access layer of our endpoint returns what we expect.
AIPscan/Data/tests/test_formats_count.py
test_formats_count_contents
artefactual-labs/AIPscan
python
@pytest.mark.parametrize('start_date, end_date, format_count, total_file_count, total_file_size', [(None, None, 2, 3, TOTAL_FILE_SIZE), (DATE_BEFORE_AIP_1, None, 2, 3, TOTAL_FILE_SIZE), (AIP_1_CREATION_DATE, None, 2, 3, TOTAL_FILE_SIZE), (DATE_AFTER_AIP_1, None, 2, 2, JPEG_1_02_FILE_SIZE), (None, DATE_BEFORE_AIP_2, 1, 1, JPEG_1_01_FILE_SIZE), (None, AIP_2_CREATION_DATE, 2, 3, TOTAL_FILE_SIZE), (None, DATE_AFTER_AIP_2, 2, 3, TOTAL_FILE_SIZE), ('2019-01-01', '2019-01-02', 0, 0, 0), (True, 'NOT A DATE', 2, 3, TOTAL_FILE_SIZE)]) def test_formats_count_contents(app_with_populated_format_versions, start_date, end_date, format_count, total_file_count, total_file_size): 'Test that content of response matches expectations.\n\n This integration test uses a pre-populated fixture to verify that\n the database access layer of our endpoint returns what we expect.\n ' results = report_data.formats_count(storage_service_id=1, start_date=parse_datetime_bound(start_date), end_date=parse_datetime_bound(end_date, upper=True)) formats = results[fields.FIELD_FORMATS] assert (len(formats) == format_count) assert (sum((format_.get(fields.FIELD_COUNT, 0) for format_ in formats)) == total_file_count) assert (sum((format_.get(fields.FIELD_SIZE, 0) for format_ in formats)) == total_file_size)
def __init__(self, ct_logs, db, cert_db, temp_db_factory, monitor_state_dir, agent=None, state_keeper_class=None): 'Initialize from a CtLogs proto.' threading.Thread.__init__(self) self.__monitors = [] self.__db = db if (not agent): agent = twisted_client.Agent(reactor) if (not state_keeper_class): state_keeper_class = state.StateKeeper for log in ct_logs.ctlog: if ((not log.log_server) or (not log.log_id) or (not log.public_key_info)): raise RuntimeError(('Cannot start monitor: log proto has missing or empty fields: %s' % log)) temp_db = temp_db_factory.create_storage(log.log_server) client = log_client.AsyncLogClient(agent, log.log_server, temp_db) hasher = merkle.TreeHasher() verifier = verify.LogVerifier(log.public_key_info, merkle.MerkleVerifier(hasher)) log_id_urlsafe = log.log_id.replace('/', '_').replace('+', '-') state_keeper = state_keeper_class(((monitor_state_dir + '/') + log_id_urlsafe)) log_key = db.get_log_id(log.log_server) self.__monitors.append(monitor.Monitor(client, verifier, hasher, db, cert_db, log_key, state_keeper)) self.__last_update_start_time = 0 self.__stopped = False self.__called_later = None
-455,337,136,996,825,500
Initialize from a CtLogs proto.
vendor/github.com/google/certificate-transparency/python/ct/client/prober.py
__init__
DavadDi/archon
python
def __init__(self, ct_logs, db, cert_db, temp_db_factory, monitor_state_dir, agent=None, state_keeper_class=None): threading.Thread.__init__(self) self.__monitors = [] self.__db = db if (not agent): agent = twisted_client.Agent(reactor) if (not state_keeper_class): state_keeper_class = state.StateKeeper for log in ct_logs.ctlog: if ((not log.log_server) or (not log.log_id) or (not log.public_key_info)): raise RuntimeError(('Cannot start monitor: log proto has missing or empty fields: %s' % log)) temp_db = temp_db_factory.create_storage(log.log_server) client = log_client.AsyncLogClient(agent, log.log_server, temp_db) hasher = merkle.TreeHasher() verifier = verify.LogVerifier(log.public_key_info, merkle.MerkleVerifier(hasher)) log_id_urlsafe = log.log_id.replace('/', '_').replace('+', '-') state_keeper = state_keeper_class(((monitor_state_dir + '/') + log_id_urlsafe)) log_key = db.get_log_id(log.log_server) self.__monitors.append(monitor.Monitor(client, verifier, hasher, db, cert_db, log_key, state_keeper)) self.__last_update_start_time = 0 self.__stopped = False self.__called_later = None
def prettify(self, elem): '\n Return a pretty-printed XML string for the Element.\n ' rough_string = ElementTree.tostring(elem, 'utf8') root = etree.fromstring(rough_string) return etree.tostring(root, pretty_print=True, encoding=ENCODE_METHOD).replace(' '.encode(), '\t'.encode()) 'reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent="\t", encoding=ENCODE_METHOD)'
-2,985,847,206,361,713,700
Return a pretty-printed XML string for the Element.
libs/pascal_voc_io.py
prettify
yuxluo/umtri_video_label
python
def prettify(self, elem): '\n \n ' rough_string = ElementTree.tostring(elem, 'utf8') root = etree.fromstring(rough_string) return etree.tostring(root, pretty_print=True, encoding=ENCODE_METHOD).replace(' '.encode(), '\t'.encode()) 'reparsed = minidom.parseString(rough_string)\n return reparsed.toprettyxml(indent="\t", encoding=ENCODE_METHOD)'
def genXML(self): '\n Return XML root\n ' if ((self.filename is None) or (self.foldername is None) or (self.imgSize is None)): return None top = Element('annotation') if self.verified: top.set('verified', 'yes') folder = SubElement(top, 'data_set') folder.text = self.foldername size_part = SubElement(top, 'size') width = SubElement(size_part, 'width') height = SubElement(size_part, 'height') depth = SubElement(size_part, 'depth') width.text = str(self.imgSize[1]) height.text = str(self.imgSize[0]) if (len(self.imgSize) == 3): depth.text = str(self.imgSize[2]) else: depth.text = '1' return top
-2,343,235,265,172,610,600
Return XML root
libs/pascal_voc_io.py
genXML
yuxluo/umtri_video_label
python
def genXML(self): '\n \n ' if ((self.filename is None) or (self.foldername is None) or (self.imgSize is None)): return None top = Element('annotation') if self.verified: top.set('verified', 'yes') folder = SubElement(top, 'data_set') folder.text = self.foldername size_part = SubElement(top, 'size') width = SubElement(size_part, 'width') height = SubElement(size_part, 'height') depth = SubElement(size_part, 'depth') width.text = str(self.imgSize[1]) height.text = str(self.imgSize[0]) if (len(self.imgSize) == 3): depth.text = str(self.imgSize[2]) else: depth.text = '1' return top
def log_to_dataframe(log_file, regex, headers, logformat): ' Function to transform log file to dataframe ' log_messages = [] linecount = 0 with open(log_file, 'r') as fin: for line in fin.readlines(): try: match = regex.search(line.strip()) message = [match.group(header) for header in headers] log_messages.append(message) linecount += 1 except Exception as e: pass logdf = pd.DataFrame(log_messages, columns=headers) logdf.insert(0, 'LineId', None) logdf['LineId'] = [(i + 1) for i in range(linecount)] return logdf
2,983,973,701,135,370,000
Function to transform log file to dataframe
logparser/SLCT/SLCT.py
log_to_dataframe
LogAnalysisTeam/logparser
python
def log_to_dataframe(log_file, regex, headers, logformat): ' ' log_messages = [] linecount = 0 with open(log_file, 'r') as fin: for line in fin.readlines(): try: match = regex.search(line.strip()) message = [match.group(header) for header in headers] log_messages.append(message) linecount += 1 except Exception as e: pass logdf = pd.DataFrame(log_messages, columns=headers) logdf.insert(0, 'LineId', None) logdf['LineId'] = [(i + 1) for i in range(linecount)] return logdf
def generate_logformat_regex(logformat): ' \n Function to generate regular expression to split log messages\n ' headers = [] splitters = re.split('(<[^<>]+>)', logformat) regex = '' for k in range(len(splitters)): if ((k % 2) == 0): splitter = re.sub(' +', '\\s+', splitters[k]) regex += splitter else: header = splitters[k].strip('<').strip('>') regex += ('(?P<%s>.*?)' % header) headers.append(header) regex = re.compile((('^' + regex) + '$')) return (headers, regex)
76,824,784,785,815,180
Function to generate regular expression to split log messages
logparser/SLCT/SLCT.py
generate_logformat_regex
LogAnalysisTeam/logparser
python
def generate_logformat_regex(logformat): ' \n \n ' headers = [] splitters = re.split('(<[^<>]+>)', logformat) regex = for k in range(len(splitters)): if ((k % 2) == 0): splitter = re.sub(' +', '\\s+', splitters[k]) regex += splitter else: header = splitters[k].strip('<').strip('>') regex += ('(?P<%s>.*?)' % header) headers.append(header) regex = re.compile((('^' + regex) + '$')) return (headers, regex)
@commands.command(hidden=True) @commands.is_owner() async def commandstats(self, ctx, limit=20): 'Shows command stats.\n\n Use a negative number for bottom instead of top.\n This is only for the current session.\n ' counter = self.bot.command_stats width = len(max(counter, key=len)) total = sum(counter.values()) if (limit > 0): common = counter.most_common(limit) else: common = counter.most_common()[limit:] output = '\n'.join((f'{k:<{width}}: {c}' for (k, c) in common)) (await ctx.send(f'''``` {output} ```'''))
-6,335,433,249,142,624,000
Shows command stats. Use a negative number for bottom instead of top. This is only for the current session.
cogs/stats.py
commandstats
ymypengueni/RoboDanny
python
@commands.command(hidden=True) @commands.is_owner() async def commandstats(self, ctx, limit=20): 'Shows command stats.\n\n Use a negative number for bottom instead of top.\n This is only for the current session.\n ' counter = self.bot.command_stats width = len(max(counter, key=len)) total = sum(counter.values()) if (limit > 0): common = counter.most_common(limit) else: common = counter.most_common()[limit:] output = '\n'.join((f'{k:<{width}}: {c}' for (k, c) in common)) (await ctx.send(f'``` {output} ```'))
@commands.command() async def uptime(self, ctx): 'Tells you how long the bot has been up for.' (await ctx.send(f'Uptime: **{self.get_bot_uptime()}**'))
1,641,823,471,392,304,400
Tells you how long the bot has been up for.
cogs/stats.py
uptime
ymypengueni/RoboDanny
python
@commands.command() async def uptime(self, ctx): (await ctx.send(f'Uptime: **{self.get_bot_uptime()}**'))
@commands.command() async def about(self, ctx): 'Tells you information about the bot itself.' revision = self.get_last_commits() embed = discord.Embed(description=('Latest Changes:\n' + revision)) embed.title = 'Official Bot Server Invite' embed.url = 'https://discord.gg/DWEaqMy' embed.colour = discord.Colour.blurple() owner = self.bot.get_user(self.bot.owner_id) embed.set_author(name=str(owner), icon_url=owner.avatar_url) total_members = 0 total_online = 0 offline = discord.Status.offline for member in self.bot.get_all_members(): total_members += 1 if (member.status is not offline): total_online += 1 total_unique = len(self.bot.users) text = 0 voice = 0 guilds = 0 for guild in self.bot.guilds: guilds += 1 for channel in guild.channels: if isinstance(channel, discord.TextChannel): text += 1 elif isinstance(channel, discord.VoiceChannel): voice += 1 embed.add_field(name='Members', value=f'''{total_members} total {total_unique} unique {total_online} unique online''') embed.add_field(name='Channels', value=f'''{(text + voice)} total {text} text {voice} voice''') memory_usage = (self.process.memory_full_info().uss / (1024 ** 2)) cpu_usage = (self.process.cpu_percent() / psutil.cpu_count()) embed.add_field(name='Process', value=f'''{memory_usage:.2f} MiB {cpu_usage:.2f}% CPU''') version = pkg_resources.get_distribution('discord.py').version embed.add_field(name='Guilds', value=guilds) embed.add_field(name='Commands Run', value=sum(self.bot.command_stats.values())) embed.add_field(name='Uptime', value=self.get_bot_uptime(brief=True)) embed.set_footer(text=f'Made with discord.py v{version}', icon_url='http://i.imgur.com/5BFecvA.png') embed.timestamp = datetime.datetime.utcnow() (await ctx.send(embed=embed))
-2,914,060,566,776,610,300
Tells you information about the bot itself.
cogs/stats.py
about
ymypengueni/RoboDanny
python
@commands.command() async def about(self, ctx): revision = self.get_last_commits() embed = discord.Embed(description=('Latest Changes:\n' + revision)) embed.title = 'Official Bot Server Invite' embed.url = 'https://discord.gg/DWEaqMy' embed.colour = discord.Colour.blurple() owner = self.bot.get_user(self.bot.owner_id) embed.set_author(name=str(owner), icon_url=owner.avatar_url) total_members = 0 total_online = 0 offline = discord.Status.offline for member in self.bot.get_all_members(): total_members += 1 if (member.status is not offline): total_online += 1 total_unique = len(self.bot.users) text = 0 voice = 0 guilds = 0 for guild in self.bot.guilds: guilds += 1 for channel in guild.channels: if isinstance(channel, discord.TextChannel): text += 1 elif isinstance(channel, discord.VoiceChannel): voice += 1 embed.add_field(name='Members', value=f'{total_members} total {total_unique} unique {total_online} unique online') embed.add_field(name='Channels', value=f'{(text + voice)} total {text} text {voice} voice') memory_usage = (self.process.memory_full_info().uss / (1024 ** 2)) cpu_usage = (self.process.cpu_percent() / psutil.cpu_count()) embed.add_field(name='Process', value=f'{memory_usage:.2f} MiB {cpu_usage:.2f}% CPU') version = pkg_resources.get_distribution('discord.py').version embed.add_field(name='Guilds', value=guilds) embed.add_field(name='Commands Run', value=sum(self.bot.command_stats.values())) embed.add_field(name='Uptime', value=self.get_bot_uptime(brief=True)) embed.set_footer(text=f'Made with discord.py v{version}', icon_url='http://i.imgur.com/5BFecvA.png') embed.timestamp = datetime.datetime.utcnow() (await ctx.send(embed=embed))
@commands.group(invoke_without_command=True) @commands.guild_only() @commands.cooldown(1, 30.0, type=commands.BucketType.member) async def stats(self, ctx, *, member: discord.Member=None): 'Tells you command usage stats for the server or a member.' async with ctx.typing(): if (member is None): (await self.show_guild_stats(ctx)) else: (await self.show_member_stats(ctx, member))
8,762,986,668,212,130,000
Tells you command usage stats for the server or a member.
cogs/stats.py
stats
ymypengueni/RoboDanny
python
@commands.group(invoke_without_command=True) @commands.guild_only() @commands.cooldown(1, 30.0, type=commands.BucketType.member) async def stats(self, ctx, *, member: discord.Member=None): async with ctx.typing(): if (member is None): (await self.show_guild_stats(ctx)) else: (await self.show_member_stats(ctx, member))
@stats.command(name='global') @commands.is_owner() async def stats_global(self, ctx): 'Global all time command statistics.' query = 'SELECT COUNT(*) FROM commands;' total = (await ctx.db.fetchrow(query)) e = discord.Embed(title='Command Stats', colour=discord.Colour.blurple()) e.description = f'{total[0]} commands used.' lookup = ('🥇', '🥈', '🥉', '🏅', '🏅') query = 'SELECT command, COUNT(*) AS "uses"\n FROM commands\n GROUP BY command\n ORDER BY "uses" DESC\n LIMIT 5;\n ' records = (await ctx.db.fetch(query)) value = '\n'.join((f'{lookup[index]}: {command} ({uses} uses)' for (index, (command, uses)) in enumerate(records))) e.add_field(name='Top Commands', value=value, inline=False) query = 'SELECT guild_id, COUNT(*) AS "uses"\n FROM commands\n GROUP BY guild_id\n ORDER BY "uses" DESC\n LIMIT 5;\n ' records = (await ctx.db.fetch(query)) value = [] for (index, (guild_id, uses)) in enumerate(records): if (guild_id is None): guild = 'Private Message' else: guild = self.censor_object((self.bot.get_guild(guild_id) or f'<Unknown {guild_id}>')) emoji = lookup[index] value.append(f'{emoji}: {guild} ({uses} uses)') e.add_field(name='Top Guilds', value='\n'.join(value), inline=False) query = 'SELECT author_id, COUNT(*) AS "uses"\n FROM commands\n GROUP BY author_id\n ORDER BY "uses" DESC\n LIMIT 5;\n ' records = (await ctx.db.fetch(query)) value = [] for (index, (author_id, uses)) in enumerate(records): user = self.censor_object((self.bot.get_user(author_id) or f'<Unknown {author_id}>')) emoji = lookup[index] value.append(f'{emoji}: {user} ({uses} uses)') e.add_field(name='Top Users', value='\n'.join(value), inline=False) (await ctx.send(embed=e))
-1,137,157,467,025,001,100
Global all time command statistics.
cogs/stats.py
stats_global
ymypengueni/RoboDanny
python
@stats.command(name='global') @commands.is_owner() async def stats_global(self, ctx): query = 'SELECT COUNT(*) FROM commands;' total = (await ctx.db.fetchrow(query)) e = discord.Embed(title='Command Stats', colour=discord.Colour.blurple()) e.description = f'{total[0]} commands used.' lookup = ('🥇', '🥈', '🥉', '🏅', '🏅') query = 'SELECT command, COUNT(*) AS "uses"\n FROM commands\n GROUP BY command\n ORDER BY "uses" DESC\n LIMIT 5;\n ' records = (await ctx.db.fetch(query)) value = '\n'.join((f'{lookup[index]}: {command} ({uses} uses)' for (index, (command, uses)) in enumerate(records))) e.add_field(name='Top Commands', value=value, inline=False) query = 'SELECT guild_id, COUNT(*) AS "uses"\n FROM commands\n GROUP BY guild_id\n ORDER BY "uses" DESC\n LIMIT 5;\n ' records = (await ctx.db.fetch(query)) value = [] for (index, (guild_id, uses)) in enumerate(records): if (guild_id is None): guild = 'Private Message' else: guild = self.censor_object((self.bot.get_guild(guild_id) or f'<Unknown {guild_id}>')) emoji = lookup[index] value.append(f'{emoji}: {guild} ({uses} uses)') e.add_field(name='Top Guilds', value='\n'.join(value), inline=False) query = 'SELECT author_id, COUNT(*) AS "uses"\n FROM commands\n GROUP BY author_id\n ORDER BY "uses" DESC\n LIMIT 5;\n ' records = (await ctx.db.fetch(query)) value = [] for (index, (author_id, uses)) in enumerate(records): user = self.censor_object((self.bot.get_user(author_id) or f'<Unknown {author_id}>')) emoji = lookup[index] value.append(f'{emoji}: {user} ({uses} uses)') e.add_field(name='Top Users', value='\n'.join(value), inline=False) (await ctx.send(embed=e))
@stats.command(name='today') @commands.is_owner() async def stats_today(self, ctx): 'Global command statistics for the day.' query = "SELECT failed, COUNT(*) FROM commands WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day') GROUP BY failed;" total = (await ctx.db.fetch(query)) failed = 0 success = 0 question = 0 for (state, count) in total: if (state is False): success += count elif (state is True): failed += count else: question += count e = discord.Embed(title='Last 24 Hour Command Stats', colour=discord.Colour.blurple()) e.description = f'{((failed + success) + question)} commands used today. ({success} succeeded, {failed} failed, {question} unknown)' lookup = ('🥇', '🥈', '🥉', '🏅', '🏅') query = 'SELECT command, COUNT(*) AS "uses"\n FROM commands\n WHERE used > (CURRENT_TIMESTAMP - INTERVAL \'1 day\')\n GROUP BY command\n ORDER BY "uses" DESC\n LIMIT 5;\n ' records = (await ctx.db.fetch(query)) value = '\n'.join((f'{lookup[index]}: {command} ({uses} uses)' for (index, (command, uses)) in enumerate(records))) e.add_field(name='Top Commands', value=value, inline=False) query = 'SELECT guild_id, COUNT(*) AS "uses"\n FROM commands\n WHERE used > (CURRENT_TIMESTAMP - INTERVAL \'1 day\')\n GROUP BY guild_id\n ORDER BY "uses" DESC\n LIMIT 5;\n ' records = (await ctx.db.fetch(query)) value = [] for (index, (guild_id, uses)) in enumerate(records): if (guild_id is None): guild = 'Private Message' else: guild = self.censor_object((self.bot.get_guild(guild_id) or f'<Unknown {guild_id}>')) emoji = lookup[index] value.append(f'{emoji}: {guild} ({uses} uses)') e.add_field(name='Top Guilds', value='\n'.join(value), inline=False) query = 'SELECT author_id, COUNT(*) AS "uses"\n FROM commands\n WHERE used > (CURRENT_TIMESTAMP - INTERVAL \'1 day\')\n GROUP BY author_id\n ORDER BY "uses" DESC\n LIMIT 5;\n ' records = (await ctx.db.fetch(query)) value = [] for (index, (author_id, uses)) in enumerate(records): user = self.censor_object((self.bot.get_user(author_id) or f'<Unknown {author_id}>')) emoji = lookup[index] value.append(f'{emoji}: {user} ({uses} uses)') e.add_field(name='Top Users', value='\n'.join(value), inline=False) (await ctx.send(embed=e))
4,463,294,225,893,276,000
Global command statistics for the day.
cogs/stats.py
stats_today
ymypengueni/RoboDanny
python
@stats.command(name='today') @commands.is_owner() async def stats_today(self, ctx): query = "SELECT failed, COUNT(*) FROM commands WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day') GROUP BY failed;" total = (await ctx.db.fetch(query)) failed = 0 success = 0 question = 0 for (state, count) in total: if (state is False): success += count elif (state is True): failed += count else: question += count e = discord.Embed(title='Last 24 Hour Command Stats', colour=discord.Colour.blurple()) e.description = f'{((failed + success) + question)} commands used today. ({success} succeeded, {failed} failed, {question} unknown)' lookup = ('🥇', '🥈', '🥉', '🏅', '🏅') query = 'SELECT command, COUNT(*) AS "uses"\n FROM commands\n WHERE used > (CURRENT_TIMESTAMP - INTERVAL \'1 day\')\n GROUP BY command\n ORDER BY "uses" DESC\n LIMIT 5;\n ' records = (await ctx.db.fetch(query)) value = '\n'.join((f'{lookup[index]}: {command} ({uses} uses)' for (index, (command, uses)) in enumerate(records))) e.add_field(name='Top Commands', value=value, inline=False) query = 'SELECT guild_id, COUNT(*) AS "uses"\n FROM commands\n WHERE used > (CURRENT_TIMESTAMP - INTERVAL \'1 day\')\n GROUP BY guild_id\n ORDER BY "uses" DESC\n LIMIT 5;\n ' records = (await ctx.db.fetch(query)) value = [] for (index, (guild_id, uses)) in enumerate(records): if (guild_id is None): guild = 'Private Message' else: guild = self.censor_object((self.bot.get_guild(guild_id) or f'<Unknown {guild_id}>')) emoji = lookup[index] value.append(f'{emoji}: {guild} ({uses} uses)') e.add_field(name='Top Guilds', value='\n'.join(value), inline=False) query = 'SELECT author_id, COUNT(*) AS "uses"\n FROM commands\n WHERE used > (CURRENT_TIMESTAMP - INTERVAL \'1 day\')\n GROUP BY author_id\n ORDER BY "uses" DESC\n LIMIT 5;\n ' records = (await ctx.db.fetch(query)) value = [] for (index, (author_id, uses)) in enumerate(records): user = self.censor_object((self.bot.get_user(author_id) or f'<Unknown {author_id}>')) emoji = lookup[index] value.append(f'{emoji}: {user} ({uses} uses)') e.add_field(name='Top Users', value='\n'.join(value), inline=False) (await ctx.send(embed=e))
@commands.command(hidden=True) @commands.is_owner() async def bothealth(self, ctx): 'Various bot health monitoring tools.' HEALTHY = discord.Colour(value=4437377) UNHEALTHY = discord.Colour(value=15747399) WARNING = discord.Colour(value=15769159) total_warnings = 0 embed = discord.Embed(title='Bot Health Report', colour=HEALTHY) pool = self.bot.pool total_waiting = len(pool._queue._getters) current_generation = pool._generation description = [f'Total `Pool.acquire` Waiters: {total_waiting}', f'Current Pool Generation: {current_generation}', f'Connections In Use: {(len(pool._holders) - pool._queue.qsize())}'] questionable_connections = 0 connection_value = [] for (index, holder) in enumerate(pool._holders, start=1): generation = holder._generation in_use = (holder._in_use is not None) is_closed = ((holder._con is None) or holder._con.is_closed()) display = f'gen={holder._generation} in_use={in_use} closed={is_closed}' questionable_connections += any((in_use, (generation != current_generation))) connection_value.append(f'<Holder i={index} {display}>') joined_value = '\n'.join(connection_value) embed.add_field(name='Connections', value=f'''```py {joined_value} ```''', inline=False) spam_control = self.bot.spam_control being_spammed = [str(key) for (key, value) in spam_control._cache.items() if (value._tokens == 0)] description.append(f"Current Spammers: {(', '.join(being_spammed) if being_spammed else 'None')}") description.append(f'Questionable Connections: {questionable_connections}') total_warnings += questionable_connections if being_spammed: embed.colour = WARNING total_warnings += 1 try: task_retriever = asyncio.Task.all_tasks except AttributeError: task_retriever = asyncio.all_tasks else: all_tasks = task_retriever(loop=self.bot.loop) event_tasks = [t for t in all_tasks if (('Client._run_event' in repr(t)) and (not t.done()))] cogs_directory = os.path.dirname(__file__) tasks_directory = os.path.join('discord', 'ext', 'tasks', '__init__.py') inner_tasks = [t for t in all_tasks if ((cogs_directory in repr(t)) or (tasks_directory in repr(t)))] bad_inner_tasks = ', '.join((hex(id(t)) for t in inner_tasks if (t.done() and (t._exception is not None)))) total_warnings += bool(bad_inner_tasks) embed.add_field(name='Inner Tasks', value=f'''Total: {len(inner_tasks)} Failed: {(bad_inner_tasks or 'None')}''') embed.add_field(name='Events Waiting', value=f'Total: {len(event_tasks)}', inline=False) command_waiters = len(self._data_batch) is_locked = self._batch_lock.locked() description.append(f'Commands Waiting: {command_waiters}, Batch Locked: {is_locked}') yesterday = (datetime.datetime.utcnow() - datetime.timedelta(days=1)) total_resumes = sum((1 for dt in self._resumes if (dt > yesterday))) identifies = {shard_id: sum((1 for dt in dates if (dt > yesterday))) for (shard_id, dates) in self._identifies.items()} absolute_total_identifies = sum(identifies.values()) resume_info_builder = [f'Total RESUMEs: {total_resumes}', f'Total IDENTIFYs: {absolute_total_identifies}'] for (shard_id, total) in identifies.items(): resume_info_builder.append(f'Shard ID {shard_id} IDENTIFYs: {total}') if (absolute_total_identifies >= (len(self.bot.shards) * 5)): total_warnings += 1 embed.colour = WARNING embed.add_field(name='Gateway (last 24 hours)', value='\n'.join(resume_info_builder), inline=False) memory_usage = (self.process.memory_full_info().uss / (1024 ** 2)) cpu_usage = (self.process.cpu_percent() / psutil.cpu_count()) embed.add_field(name='Process', value=f'''{memory_usage:.2f} MiB {cpu_usage:.2f}% CPU''', inline=False) global_rate_limit = (not self.bot.http._global_over.is_set()) description.append(f'Global Rate Limit: {global_rate_limit}') if (command_waiters >= 8): total_warnings += 1 embed.colour = WARNING if (global_rate_limit or (total_warnings >= 9)): embed.colour = UNHEALTHY embed.set_footer(text=f'{total_warnings} warning(s)') embed.description = '\n'.join(description) (await ctx.send(embed=embed))
2,352,962,447,775,214,600
Various bot health monitoring tools.
cogs/stats.py
bothealth
ymypengueni/RoboDanny
python
@commands.command(hidden=True) @commands.is_owner() async def bothealth(self, ctx): HEALTHY = discord.Colour(value=4437377) UNHEALTHY = discord.Colour(value=15747399) WARNING = discord.Colour(value=15769159) total_warnings = 0 embed = discord.Embed(title='Bot Health Report', colour=HEALTHY) pool = self.bot.pool total_waiting = len(pool._queue._getters) current_generation = pool._generation description = [f'Total `Pool.acquire` Waiters: {total_waiting}', f'Current Pool Generation: {current_generation}', f'Connections In Use: {(len(pool._holders) - pool._queue.qsize())}'] questionable_connections = 0 connection_value = [] for (index, holder) in enumerate(pool._holders, start=1): generation = holder._generation in_use = (holder._in_use is not None) is_closed = ((holder._con is None) or holder._con.is_closed()) display = f'gen={holder._generation} in_use={in_use} closed={is_closed}' questionable_connections += any((in_use, (generation != current_generation))) connection_value.append(f'<Holder i={index} {display}>') joined_value = '\n'.join(connection_value) embed.add_field(name='Connections', value=f'```py {joined_value} ```', inline=False) spam_control = self.bot.spam_control being_spammed = [str(key) for (key, value) in spam_control._cache.items() if (value._tokens == 0)] description.append(f"Current Spammers: {(', '.join(being_spammed) if being_spammed else 'None')}") description.append(f'Questionable Connections: {questionable_connections}') total_warnings += questionable_connections if being_spammed: embed.colour = WARNING total_warnings += 1 try: task_retriever = asyncio.Task.all_tasks except AttributeError: task_retriever = asyncio.all_tasks else: all_tasks = task_retriever(loop=self.bot.loop) event_tasks = [t for t in all_tasks if (('Client._run_event' in repr(t)) and (not t.done()))] cogs_directory = os.path.dirname(__file__) tasks_directory = os.path.join('discord', 'ext', 'tasks', '__init__.py') inner_tasks = [t for t in all_tasks if ((cogs_directory in repr(t)) or (tasks_directory in repr(t)))] bad_inner_tasks = ', '.join((hex(id(t)) for t in inner_tasks if (t.done() and (t._exception is not None)))) total_warnings += bool(bad_inner_tasks) embed.add_field(name='Inner Tasks', value=f'Total: {len(inner_tasks)} Failed: {(bad_inner_tasks or 'None')}') embed.add_field(name='Events Waiting', value=f'Total: {len(event_tasks)}', inline=False) command_waiters = len(self._data_batch) is_locked = self._batch_lock.locked() description.append(f'Commands Waiting: {command_waiters}, Batch Locked: {is_locked}') yesterday = (datetime.datetime.utcnow() - datetime.timedelta(days=1)) total_resumes = sum((1 for dt in self._resumes if (dt > yesterday))) identifies = {shard_id: sum((1 for dt in dates if (dt > yesterday))) for (shard_id, dates) in self._identifies.items()} absolute_total_identifies = sum(identifies.values()) resume_info_builder = [f'Total RESUMEs: {total_resumes}', f'Total IDENTIFYs: {absolute_total_identifies}'] for (shard_id, total) in identifies.items(): resume_info_builder.append(f'Shard ID {shard_id} IDENTIFYs: {total}') if (absolute_total_identifies >= (len(self.bot.shards) * 5)): total_warnings += 1 embed.colour = WARNING embed.add_field(name='Gateway (last 24 hours)', value='\n'.join(resume_info_builder), inline=False) memory_usage = (self.process.memory_full_info().uss / (1024 ** 2)) cpu_usage = (self.process.cpu_percent() / psutil.cpu_count()) embed.add_field(name='Process', value=f'{memory_usage:.2f} MiB {cpu_usage:.2f}% CPU', inline=False) global_rate_limit = (not self.bot.http._global_over.is_set()) description.append(f'Global Rate Limit: {global_rate_limit}') if (command_waiters >= 8): total_warnings += 1 embed.colour = WARNING if (global_rate_limit or (total_warnings >= 9)): embed.colour = UNHEALTHY embed.set_footer(text=f'{total_warnings} warning(s)') embed.description = '\n'.join(description) (await ctx.send(embed=embed))
@commands.command(hidden=True, aliases=['cancel_task']) @commands.is_owner() async def debug_task(self, ctx, memory_id: hex_value): 'Debug a task by a memory location.' task = object_at(memory_id) if ((task is None) or (not isinstance(task, asyncio.Task))): return (await ctx.send(f'Could not find Task object at {hex(memory_id)}.')) if (ctx.invoked_with == 'cancel_task'): task.cancel() return (await ctx.send(f'Cancelled task object {task!r}.')) paginator = commands.Paginator(prefix='```py') fp = io.StringIO() frames = len(task.get_stack()) paginator.add_line(f'# Total Frames: {frames}') task.print_stack(file=fp) for line in fp.getvalue().splitlines(): paginator.add_line(line) for page in paginator.pages: (await ctx.send(page))
6,734,935,313,058,193,000
Debug a task by a memory location.
cogs/stats.py
debug_task
ymypengueni/RoboDanny
python
@commands.command(hidden=True, aliases=['cancel_task']) @commands.is_owner() async def debug_task(self, ctx, memory_id: hex_value): task = object_at(memory_id) if ((task is None) or (not isinstance(task, asyncio.Task))): return (await ctx.send(f'Could not find Task object at {hex(memory_id)}.')) if (ctx.invoked_with == 'cancel_task'): task.cancel() return (await ctx.send(f'Cancelled task object {task!r}.')) paginator = commands.Paginator(prefix='```py') fp = io.StringIO() frames = len(task.get_stack()) paginator.add_line(f'# Total Frames: {frames}') task.print_stack(file=fp) for line in fp.getvalue().splitlines(): paginator.add_line(line) for page in paginator.pages: (await ctx.send(page))
@commands.group(hidden=True, invoke_without_command=True) @commands.is_owner() async def command_history(self, ctx): 'Command history.' query = 'SELECT\n CASE failed\n WHEN TRUE THEN command || \' [!]\'\n ELSE command\n END AS "command",\n to_char(used, \'Mon DD HH12:MI:SS AM\') AS "invoked",\n author_id,\n guild_id\n FROM commands\n ORDER BY used DESC\n LIMIT 15;\n ' (await self.tabulate_query(ctx, query))
-2,068,843,010,320,469,000
Command history.
cogs/stats.py
command_history
ymypengueni/RoboDanny
python
@commands.group(hidden=True, invoke_without_command=True) @commands.is_owner() async def command_history(self, ctx): query = 'SELECT\n CASE failed\n WHEN TRUE THEN command || \' [!]\'\n ELSE command\n END AS "command",\n to_char(used, \'Mon DD HH12:MI:SS AM\') AS "invoked",\n author_id,\n guild_id\n FROM commands\n ORDER BY used DESC\n LIMIT 15;\n ' (await self.tabulate_query(ctx, query))
@command_history.command(name='for') @commands.is_owner() async def command_history_for(self, ctx, days: typing.Optional[int]=7, *, command: str): 'Command history for a command.' query = 'SELECT *, t.success + t.failed AS "total"\n FROM (\n SELECT guild_id,\n SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",\n SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"\n FROM commands\n WHERE command=$1\n AND used > (CURRENT_TIMESTAMP - $2::interval)\n GROUP BY guild_id\n ) AS t\n ORDER BY "total" DESC\n LIMIT 30;\n ' (await self.tabulate_query(ctx, query, command, datetime.timedelta(days=days)))
-4,612,241,515,661,905,000
Command history for a command.
cogs/stats.py
command_history_for
ymypengueni/RoboDanny
python
@command_history.command(name='for') @commands.is_owner() async def command_history_for(self, ctx, days: typing.Optional[int]=7, *, command: str): query = 'SELECT *, t.success + t.failed AS "total"\n FROM (\n SELECT guild_id,\n SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",\n SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"\n FROM commands\n WHERE command=$1\n AND used > (CURRENT_TIMESTAMP - $2::interval)\n GROUP BY guild_id\n ) AS t\n ORDER BY "total" DESC\n LIMIT 30;\n ' (await self.tabulate_query(ctx, query, command, datetime.timedelta(days=days)))
@command_history.command(name='guild', aliases=['server']) @commands.is_owner() async def command_history_guild(self, ctx, guild_id: int): 'Command history for a guild.' query = 'SELECT\n CASE failed\n WHEN TRUE THEN command || \' [!]\'\n ELSE command\n END AS "command",\n channel_id,\n author_id,\n used\n FROM commands\n WHERE guild_id=$1\n ORDER BY used DESC\n LIMIT 15;\n ' (await self.tabulate_query(ctx, query, guild_id))
8,836,294,313,546,763,000
Command history for a guild.
cogs/stats.py
command_history_guild
ymypengueni/RoboDanny
python
@command_history.command(name='guild', aliases=['server']) @commands.is_owner() async def command_history_guild(self, ctx, guild_id: int): query = 'SELECT\n CASE failed\n WHEN TRUE THEN command || \' [!]\'\n ELSE command\n END AS "command",\n channel_id,\n author_id,\n used\n FROM commands\n WHERE guild_id=$1\n ORDER BY used DESC\n LIMIT 15;\n ' (await self.tabulate_query(ctx, query, guild_id))
@command_history.command(name='user', aliases=['member']) @commands.is_owner() async def command_history_user(self, ctx, user_id: int): 'Command history for a user.' query = 'SELECT\n CASE failed\n WHEN TRUE THEN command || \' [!]\'\n ELSE command\n END AS "command",\n guild_id,\n used\n FROM commands\n WHERE author_id=$1\n ORDER BY used DESC\n LIMIT 20;\n ' (await self.tabulate_query(ctx, query, user_id))
6,525,985,542,778,223,000
Command history for a user.
cogs/stats.py
command_history_user
ymypengueni/RoboDanny
python
@command_history.command(name='user', aliases=['member']) @commands.is_owner() async def command_history_user(self, ctx, user_id: int): query = 'SELECT\n CASE failed\n WHEN TRUE THEN command || \' [!]\'\n ELSE command\n END AS "command",\n guild_id,\n used\n FROM commands\n WHERE author_id=$1\n ORDER BY used DESC\n LIMIT 20;\n ' (await self.tabulate_query(ctx, query, user_id))
@command_history.command(name='log') @commands.is_owner() async def command_history_log(self, ctx, days=7): 'Command history log for the last N days.' query = 'SELECT command, COUNT(*)\n FROM commands\n WHERE used > (CURRENT_TIMESTAMP - $1::interval)\n GROUP BY command\n ORDER BY 2 DESC\n ' all_commands = {c.qualified_name: 0 for c in self.bot.walk_commands()} records = (await ctx.db.fetch(query, datetime.timedelta(days=days))) for (name, uses) in records: if (name in all_commands): all_commands[name] = uses as_data = sorted(all_commands.items(), key=(lambda t: t[1]), reverse=True) table = formats.TabularData() table.set_columns(['Command', 'Uses']) table.add_rows((tup for tup in as_data)) render = table.render() embed = discord.Embed(title='Summary', colour=discord.Colour.green()) embed.set_footer(text='Since').timestamp = (datetime.datetime.utcnow() - datetime.timedelta(days=days)) top_ten = '\n'.join((f'{command}: {uses}' for (command, uses) in records[:10])) bottom_ten = '\n'.join((f'{command}: {uses}' for (command, uses) in records[(- 10):])) embed.add_field(name='Top 10', value=top_ten) embed.add_field(name='Bottom 10', value=bottom_ten) unused = ', '.join((name for (name, uses) in as_data if (uses == 0))) if (len(unused) > 1024): unused = 'Way too many...' embed.add_field(name='Unused', value=unused, inline=False) (await ctx.send(embed=embed, file=discord.File(io.BytesIO(render.encode()), filename='full_results.txt')))
-7,858,791,333,496,422,000
Command history log for the last N days.
cogs/stats.py
command_history_log
ymypengueni/RoboDanny
python
@command_history.command(name='log') @commands.is_owner() async def command_history_log(self, ctx, days=7): query = 'SELECT command, COUNT(*)\n FROM commands\n WHERE used > (CURRENT_TIMESTAMP - $1::interval)\n GROUP BY command\n ORDER BY 2 DESC\n ' all_commands = {c.qualified_name: 0 for c in self.bot.walk_commands()} records = (await ctx.db.fetch(query, datetime.timedelta(days=days))) for (name, uses) in records: if (name in all_commands): all_commands[name] = uses as_data = sorted(all_commands.items(), key=(lambda t: t[1]), reverse=True) table = formats.TabularData() table.set_columns(['Command', 'Uses']) table.add_rows((tup for tup in as_data)) render = table.render() embed = discord.Embed(title='Summary', colour=discord.Colour.green()) embed.set_footer(text='Since').timestamp = (datetime.datetime.utcnow() - datetime.timedelta(days=days)) top_ten = '\n'.join((f'{command}: {uses}' for (command, uses) in records[:10])) bottom_ten = '\n'.join((f'{command}: {uses}' for (command, uses) in records[(- 10):])) embed.add_field(name='Top 10', value=top_ten) embed.add_field(name='Bottom 10', value=bottom_ten) unused = ', '.join((name for (name, uses) in as_data if (uses == 0))) if (len(unused) > 1024): unused = 'Way too many...' embed.add_field(name='Unused', value=unused, inline=False) (await ctx.send(embed=embed, file=discord.File(io.BytesIO(render.encode()), filename='full_results.txt')))
@command_history.command(name='cog') @commands.is_owner() async def command_history_cog(self, ctx, days: typing.Optional[int]=7, *, cog: str=None): 'Command history for a cog or grouped by a cog.' interval = datetime.timedelta(days=days) if (cog is not None): cog = self.bot.get_cog(cog) if (cog is None): return (await ctx.send(f'Unknown cog: {cog}')) query = 'SELECT *, t.success + t.failed AS "total"\n FROM (\n SELECT command,\n SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",\n SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"\n FROM commands\n WHERE command = any($1::text[])\n AND used > (CURRENT_TIMESTAMP - $2::interval)\n GROUP BY command\n ) AS t\n ORDER BY "total" DESC\n LIMIT 30;\n ' return (await self.tabulate_query(ctx, query, [c.qualified_name for c in cog.walk_commands()], interval)) query = 'SELECT *, t.success + t.failed AS "total"\n FROM (\n SELECT command,\n SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",\n SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"\n FROM commands\n WHERE used > (CURRENT_TIMESTAMP - $1::interval)\n GROUP BY command\n ) AS t;\n ' class Count(): __slots__ = ('success', 'failed', 'total') def __init__(self): self.success = 0 self.failed = 0 self.total = 0 def add(self, record): self.success += record['success'] self.failed += record['failed'] self.total += record['total'] data = defaultdict(Count) records = (await ctx.db.fetch(query, interval)) for record in records: command = self.bot.get_command(record['command']) if ((command is None) or (command.cog is None)): data['No Cog'].add(record) else: data[command.cog.qualified_name].add(record) table = formats.TabularData() table.set_columns(['Cog', 'Success', 'Failed', 'Total']) data = sorted([(cog, e.success, e.failed, e.total) for (cog, e) in data.items()], key=(lambda t: t[(- 1)]), reverse=True) table.add_rows(data) render = table.render() (await ctx.safe_send(f'''``` {render} ```'''))
-2,708,958,935,261,370,000
Command history for a cog or grouped by a cog.
cogs/stats.py
command_history_cog
ymypengueni/RoboDanny
python
@command_history.command(name='cog') @commands.is_owner() async def command_history_cog(self, ctx, days: typing.Optional[int]=7, *, cog: str=None): interval = datetime.timedelta(days=days) if (cog is not None): cog = self.bot.get_cog(cog) if (cog is None): return (await ctx.send(f'Unknown cog: {cog}')) query = 'SELECT *, t.success + t.failed AS "total"\n FROM (\n SELECT command,\n SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",\n SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"\n FROM commands\n WHERE command = any($1::text[])\n AND used > (CURRENT_TIMESTAMP - $2::interval)\n GROUP BY command\n ) AS t\n ORDER BY "total" DESC\n LIMIT 30;\n ' return (await self.tabulate_query(ctx, query, [c.qualified_name for c in cog.walk_commands()], interval)) query = 'SELECT *, t.success + t.failed AS "total"\n FROM (\n SELECT command,\n SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",\n SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"\n FROM commands\n WHERE used > (CURRENT_TIMESTAMP - $1::interval)\n GROUP BY command\n ) AS t;\n ' class Count(): __slots__ = ('success', 'failed', 'total') def __init__(self): self.success = 0 self.failed = 0 self.total = 0 def add(self, record): self.success += record['success'] self.failed += record['failed'] self.total += record['total'] data = defaultdict(Count) records = (await ctx.db.fetch(query, interval)) for record in records: command = self.bot.get_command(record['command']) if ((command is None) or (command.cog is None)): data['No Cog'].add(record) else: data[command.cog.qualified_name].add(record) table = formats.TabularData() table.set_columns(['Cog', 'Success', 'Failed', 'Total']) data = sorted([(cog, e.success, e.failed, e.total) for (cog, e) in data.items()], key=(lambda t: t[(- 1)]), reverse=True) table.add_rows(data) render = table.render() (await ctx.safe_send(f'``` {render} ```'))
def test_get_points_within_radius_of_cameras(): 'Verify that points that fall outside of 10 meter radius of two camera poses.\n\n Cameras are placed at (0,0,0) and (10,0,0).\n ' wTi0 = Pose3(Rot3(), np.zeros(3)) wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0])) wTi_list = [wTi0, wTi1] points_3d = np.array([[(- 15), 0, 0], [0, 15, 0], [(- 5), 0, 0], [15, 0, 0], [25, 0, 0]]) radius = 10.0 nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(wTi_list, points_3d, radius) expected_nearby_points_3d = np.array([[(- 5), 0, 0], [15, 0, 0]]) np.testing.assert_allclose(nearby_points_3d, expected_nearby_points_3d)
-5,620,679,280,221,742,000
Verify that points that fall outside of 10 meter radius of two camera poses. Cameras are placed at (0,0,0) and (10,0,0).
tests/utils/test_geometry_comparisons.py
test_get_points_within_radius_of_cameras
yuancaimaiyi/gtsfm
python
def test_get_points_within_radius_of_cameras(): 'Verify that points that fall outside of 10 meter radius of two camera poses.\n\n Cameras are placed at (0,0,0) and (10,0,0).\n ' wTi0 = Pose3(Rot3(), np.zeros(3)) wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0])) wTi_list = [wTi0, wTi1] points_3d = np.array([[(- 15), 0, 0], [0, 15, 0], [(- 5), 0, 0], [15, 0, 0], [25, 0, 0]]) radius = 10.0 nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(wTi_list, points_3d, radius) expected_nearby_points_3d = np.array([[(- 5), 0, 0], [15, 0, 0]]) np.testing.assert_allclose(nearby_points_3d, expected_nearby_points_3d)
def test_get_points_within_radius_of_cameras_negative_radius(): 'Catch degenerate input.' wTi0 = Pose3(Rot3(), np.zeros(3)) wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0])) wTi_list = [wTi0, wTi1] points_3d = np.array([[(- 15), 0, 0], [0, 15, 0], [(- 5), 0, 0], [15, 0, 0], [25, 0, 0]]) radius = (- 5) nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(wTi_list, points_3d, radius) assert (nearby_points_3d is None), 'Non-positive radius is not allowed'
-7,282,060,333,028,995,000
Catch degenerate input.
tests/utils/test_geometry_comparisons.py
test_get_points_within_radius_of_cameras_negative_radius
yuancaimaiyi/gtsfm
python
def test_get_points_within_radius_of_cameras_negative_radius(): wTi0 = Pose3(Rot3(), np.zeros(3)) wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0])) wTi_list = [wTi0, wTi1] points_3d = np.array([[(- 15), 0, 0], [0, 15, 0], [(- 5), 0, 0], [15, 0, 0], [25, 0, 0]]) radius = (- 5) nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(wTi_list, points_3d, radius) assert (nearby_points_3d is None), 'Non-positive radius is not allowed'
def test_get_points_within_radius_of_cameras_no_points(): 'Catch degenerate input.' wTi0 = Pose3(Rot3(), np.zeros(3)) wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0])) wTi_list = [wTi0, wTi1] points_3d = np.zeros((0, 3)) radius = 10.0 nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(wTi_list, points_3d, radius) assert (nearby_points_3d is None), 'At least one 3d point must be provided'
1,175,814,159,659,153,700
Catch degenerate input.
tests/utils/test_geometry_comparisons.py
test_get_points_within_radius_of_cameras_no_points
yuancaimaiyi/gtsfm
python
def test_get_points_within_radius_of_cameras_no_points(): wTi0 = Pose3(Rot3(), np.zeros(3)) wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0])) wTi_list = [wTi0, wTi1] points_3d = np.zeros((0, 3)) radius = 10.0 nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(wTi_list, points_3d, radius) assert (nearby_points_3d is None), 'At least one 3d point must be provided'
def test_get_points_within_radius_of_cameras_no_poses(): 'Catch degenerate input.' wTi_list = [] points_3d = np.array([[(- 15), 0, 0], [0, 15, 0], [(- 5), 0, 0], [15, 0, 0], [25, 0, 0]]) radius = 10.0 nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(wTi_list, points_3d, radius) assert (nearby_points_3d is None), 'At least one camera pose must be provided'
-8,889,441,781,887,105,000
Catch degenerate input.
tests/utils/test_geometry_comparisons.py
test_get_points_within_radius_of_cameras_no_poses
yuancaimaiyi/gtsfm
python
def test_get_points_within_radius_of_cameras_no_poses(): wTi_list = [] points_3d = np.array([[(- 15), 0, 0], [0, 15, 0], [(- 5), 0, 0], [15, 0, 0], [25, 0, 0]]) radius = 10.0 nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(wTi_list, points_3d, radius) assert (nearby_points_3d is None), 'At least one camera pose must be provided'
def test_align_rotations(self): 'Tests the alignment of rotations.' input_list = [Rot3.RzRyRx(np.deg2rad(0), np.deg2rad((- 10)), 0), Rot3.RzRyRx(np.deg2rad(0), np.deg2rad(30), 0)] ref_list = [Rot3.RzRyRx(np.deg2rad(0), np.deg2rad(80), 0), Rot3.RzRyRx(np.deg2rad(0), np.deg2rad((- 40)), 0)] computed = geometry_comparisons.align_rotations(input_list, ref_list) expected = [Rot3.RzRyRx(0, np.deg2rad(80), 0), Rot3.RzRyRx(0, np.deg2rad(120), 0)] self.__assert_equality_on_rot3s(computed, expected)
-3,114,668,187,556,526,000
Tests the alignment of rotations.
tests/utils/test_geometry_comparisons.py
test_align_rotations
yuancaimaiyi/gtsfm
python
def test_align_rotations(self): input_list = [Rot3.RzRyRx(np.deg2rad(0), np.deg2rad((- 10)), 0), Rot3.RzRyRx(np.deg2rad(0), np.deg2rad(30), 0)] ref_list = [Rot3.RzRyRx(np.deg2rad(0), np.deg2rad(80), 0), Rot3.RzRyRx(np.deg2rad(0), np.deg2rad((- 40)), 0)] computed = geometry_comparisons.align_rotations(input_list, ref_list) expected = [Rot3.RzRyRx(0, np.deg2rad(80), 0), Rot3.RzRyRx(0, np.deg2rad(120), 0)] self.__assert_equality_on_rot3s(computed, expected)
def test_align_poses_after_sim3_transform(self): 'Test for alignment of poses after applying a SIM3 transformation.' translation_shift = np.array([5, 10, (- 5)]) rotation_shift = Rot3.RzRyRx(0, 0, np.deg2rad(30)) scaling_factor = 0.7 transform = Similarity3(rotation_shift, translation_shift, scaling_factor) ref_list = [transform.transformFrom(x) for x in sample_poses.CIRCLE_TWO_EDGES_GLOBAL_POSES] computed_poses = geometry_comparisons.align_poses_sim3(sample_poses.CIRCLE_TWO_EDGES_GLOBAL_POSES, ref_list) self.__assert_equality_on_pose3s(computed_poses, sample_poses.CIRCLE_TWO_EDGES_GLOBAL_POSES)
5,758,185,141,826,801,000
Test for alignment of poses after applying a SIM3 transformation.
tests/utils/test_geometry_comparisons.py
test_align_poses_after_sim3_transform
yuancaimaiyi/gtsfm
python
def test_align_poses_after_sim3_transform(self): translation_shift = np.array([5, 10, (- 5)]) rotation_shift = Rot3.RzRyRx(0, 0, np.deg2rad(30)) scaling_factor = 0.7 transform = Similarity3(rotation_shift, translation_shift, scaling_factor) ref_list = [transform.transformFrom(x) for x in sample_poses.CIRCLE_TWO_EDGES_GLOBAL_POSES] computed_poses = geometry_comparisons.align_poses_sim3(sample_poses.CIRCLE_TWO_EDGES_GLOBAL_POSES, ref_list) self.__assert_equality_on_pose3s(computed_poses, sample_poses.CIRCLE_TWO_EDGES_GLOBAL_POSES)
def test_align_poses_on_panorama_after_sim3_transform(self): 'Test for alignment of poses after applying a forward motion transformation.' translation_shift = np.array([0, 5, 0]) rotation_shift = Rot3.RzRyRx(0, 0, np.deg2rad(30)) scaling_factor = 1.0 aTi_list = sample_poses.PANORAMA_GLOBAL_POSES bSa = Similarity3(rotation_shift, translation_shift, scaling_factor) bTi_list = [bSa.transformFrom(x) for x in aTi_list] aTi_list_ = geometry_comparisons.align_poses_sim3(aTi_list, bTi_list) self.__assert_equality_on_pose3s(aTi_list_, aTi_list)
-5,600,359,633,403,404,000
Test for alignment of poses after applying a forward motion transformation.
tests/utils/test_geometry_comparisons.py
test_align_poses_on_panorama_after_sim3_transform
yuancaimaiyi/gtsfm
python
def test_align_poses_on_panorama_after_sim3_transform(self): translation_shift = np.array([0, 5, 0]) rotation_shift = Rot3.RzRyRx(0, 0, np.deg2rad(30)) scaling_factor = 1.0 aTi_list = sample_poses.PANORAMA_GLOBAL_POSES bSa = Similarity3(rotation_shift, translation_shift, scaling_factor) bTi_list = [bSa.transformFrom(x) for x in aTi_list] aTi_list_ = geometry_comparisons.align_poses_sim3(aTi_list, bTi_list) self.__assert_equality_on_pose3s(aTi_list_, aTi_list)
@patch('gtsfm.utils.geometry_comparisons.align_rotations', return_value=[Rot3.RzRyRx(0, np.deg2rad(32), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 22))), Rot3.RzRyRx(0, 0, np.deg2rad(83))]) def test_compare_rotations_with_all_valid_rot3s_success(self, align_rotations_mocked): 'Tests the comparison results on list of rotations.' aRi_list = [Rot3.RzRyRx(0, np.deg2rad(25), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 20))), Rot3.RzRyRx(0, 0, np.deg2rad(80))] bRi_list = [Rot3.RzRyRx(0, np.deg2rad(31), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 22))), Rot3.RzRyRx(0, 0, np.deg2rad(77.5))] self.assertTrue(geometry_comparisons.compare_rotations(aRi_list, bRi_list, 10)) align_rotations_mocked.assert_called_once()
-6,894,803,049,350,792,000
Tests the comparison results on list of rotations.
tests/utils/test_geometry_comparisons.py
test_compare_rotations_with_all_valid_rot3s_success
yuancaimaiyi/gtsfm
python
@patch('gtsfm.utils.geometry_comparisons.align_rotations', return_value=[Rot3.RzRyRx(0, np.deg2rad(32), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 22))), Rot3.RzRyRx(0, 0, np.deg2rad(83))]) def test_compare_rotations_with_all_valid_rot3s_success(self, align_rotations_mocked): aRi_list = [Rot3.RzRyRx(0, np.deg2rad(25), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 20))), Rot3.RzRyRx(0, 0, np.deg2rad(80))] bRi_list = [Rot3.RzRyRx(0, np.deg2rad(31), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 22))), Rot3.RzRyRx(0, 0, np.deg2rad(77.5))] self.assertTrue(geometry_comparisons.compare_rotations(aRi_list, bRi_list, 10)) align_rotations_mocked.assert_called_once()
@patch('gtsfm.utils.geometry_comparisons.align_rotations', return_value=[Rot3.RzRyRx(0, np.deg2rad(32), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 22))), Rot3.RzRyRx(0, 0, np.deg2rad(83))]) def test_compare_rotations_with_all_valid_rot3s_failure(self, align_rotations_mocked): 'Tests the comparison results on list of rotations.' aRi_list = [Rot3.RzRyRx(0, np.deg2rad(25), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 20))), Rot3.RzRyRx(0, 0, np.deg2rad(80))] bRi_list = [Rot3.RzRyRx(0, np.deg2rad(31), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 22))), Rot3.RzRyRx(0, 0, np.deg2rad(77.5))] self.assertFalse(geometry_comparisons.compare_rotations(aRi_list, bRi_list, 5)) align_rotations_mocked.assert_called_once()
4,278,046,596,358,366,000
Tests the comparison results on list of rotations.
tests/utils/test_geometry_comparisons.py
test_compare_rotations_with_all_valid_rot3s_failure
yuancaimaiyi/gtsfm
python
@patch('gtsfm.utils.geometry_comparisons.align_rotations', return_value=[Rot3.RzRyRx(0, np.deg2rad(32), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 22))), Rot3.RzRyRx(0, 0, np.deg2rad(83))]) def test_compare_rotations_with_all_valid_rot3s_failure(self, align_rotations_mocked): aRi_list = [Rot3.RzRyRx(0, np.deg2rad(25), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 20))), Rot3.RzRyRx(0, 0, np.deg2rad(80))] bRi_list = [Rot3.RzRyRx(0, np.deg2rad(31), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 22))), Rot3.RzRyRx(0, 0, np.deg2rad(77.5))] self.assertFalse(geometry_comparisons.compare_rotations(aRi_list, bRi_list, 5)) align_rotations_mocked.assert_called_once()
@patch('gtsfm.utils.geometry_comparisons.align_rotations', return_value=[Rot3.RzRyRx(0, np.deg2rad(25), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 20)))]) def test_compare_rotations_with_nones_at_same_indices(self, align_rotations_mocked): 'Tests the comparison results on list of rotations.' list1 = [Rot3.RzRyRx(0, np.deg2rad(25), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 20))), None] list2 = [Rot3.RzRyRx(0, np.deg2rad(31), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 22))), None] threshold_degrees = 10 self.assertTrue(geometry_comparisons.compare_rotations(list1, list2, threshold_degrees)) align_rotations_mocked.assert_called_once()
5,023,609,743,041,675,000
Tests the comparison results on list of rotations.
tests/utils/test_geometry_comparisons.py
test_compare_rotations_with_nones_at_same_indices
yuancaimaiyi/gtsfm
python
@patch('gtsfm.utils.geometry_comparisons.align_rotations', return_value=[Rot3.RzRyRx(0, np.deg2rad(25), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 20)))]) def test_compare_rotations_with_nones_at_same_indices(self, align_rotations_mocked): list1 = [Rot3.RzRyRx(0, np.deg2rad(25), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 20))), None] list2 = [Rot3.RzRyRx(0, np.deg2rad(31), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 22))), None] threshold_degrees = 10 self.assertTrue(geometry_comparisons.compare_rotations(list1, list2, threshold_degrees)) align_rotations_mocked.assert_called_once()
@patch('gtsfm.utils.geometry_comparisons.align_rotations', return_value=None) def test_compare_rotations_with_nones_at_different_indices(self, aligned_rotations_mocked): 'Tests the comparison results on list of rotations.' list1 = [Rot3.RzRyRx(0, np.deg2rad(25), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 20))), None] list2 = [Rot3.RzRyRx(0, np.deg2rad(31), 0), None, Rot3.RzRyRx(0, 0, np.deg2rad((- 22)))] self.assertFalse(geometry_comparisons.compare_rotations(list1, list2, 10)) aligned_rotations_mocked.assert_not_called()
2,320,318,608,087,057,000
Tests the comparison results on list of rotations.
tests/utils/test_geometry_comparisons.py
test_compare_rotations_with_nones_at_different_indices
yuancaimaiyi/gtsfm
python
@patch('gtsfm.utils.geometry_comparisons.align_rotations', return_value=None) def test_compare_rotations_with_nones_at_different_indices(self, aligned_rotations_mocked): list1 = [Rot3.RzRyRx(0, np.deg2rad(25), 0), Rot3.RzRyRx(0, 0, np.deg2rad((- 20))), None] list2 = [Rot3.RzRyRx(0, np.deg2rad(31), 0), None, Rot3.RzRyRx(0, 0, np.deg2rad((- 22)))] self.assertFalse(geometry_comparisons.compare_rotations(list1, list2, 10)) aligned_rotations_mocked.assert_not_called()
def test_compute_relative_rotation_angle(self): 'Tests the relative angle between two rotations.' R_1 = Rot3.RzRyRx(0, np.deg2rad(45), np.deg2rad(22.5)) R_2 = Rot3.RzRyRx(0, np.deg2rad(90), np.deg2rad(22.5)) computed_deg = geometry_comparisons.compute_relative_rotation_angle(R_1, R_2) expected_deg = 45 np.testing.assert_allclose(computed_deg, expected_deg, rtol=0.001, atol=0.001)
7,784,699,289,404,444,000
Tests the relative angle between two rotations.
tests/utils/test_geometry_comparisons.py
test_compute_relative_rotation_angle
yuancaimaiyi/gtsfm
python
def test_compute_relative_rotation_angle(self): R_1 = Rot3.RzRyRx(0, np.deg2rad(45), np.deg2rad(22.5)) R_2 = Rot3.RzRyRx(0, np.deg2rad(90), np.deg2rad(22.5)) computed_deg = geometry_comparisons.compute_relative_rotation_angle(R_1, R_2) expected_deg = 45 np.testing.assert_allclose(computed_deg, expected_deg, rtol=0.001, atol=0.001)
def test_compute_relative_unit_translation_angle(self): 'Tests the relative angle between two unit-translations.' U_1 = Unit3(np.array([1, 0, 0])) U_2 = Unit3(np.array([0.5, 0.5, 0])) computed_deg = geometry_comparisons.compute_relative_unit_translation_angle(U_1, U_2) expected_deg = 45 self.assertAlmostEqual(computed_deg, expected_deg, places=3)
-7,068,969,421,371,341,000
Tests the relative angle between two unit-translations.
tests/utils/test_geometry_comparisons.py
test_compute_relative_unit_translation_angle
yuancaimaiyi/gtsfm
python
def test_compute_relative_unit_translation_angle(self): U_1 = Unit3(np.array([1, 0, 0])) U_2 = Unit3(np.array([0.5, 0.5, 0])) computed_deg = geometry_comparisons.compute_relative_unit_translation_angle(U_1, U_2) expected_deg = 45 self.assertAlmostEqual(computed_deg, expected_deg, places=3)
def test_align_poses_sim3_ignore_missing(self): 'Consider a simple cases with 4 poses in a line. Suppose SfM only recovers 2 of the 4 poses.' wT0 = Pose3(Rot3(np.eye(3)), np.zeros(3)) wT1 = Pose3(Rot3(np.eye(3)), np.ones(3)) wT2 = Pose3(Rot3(np.eye(3)), (np.ones(3) * 2)) wT3 = Pose3(Rot3(np.eye(3)), (np.ones(3) * 3)) aTi_list = [wT0, wT1, wT2, wT3] bTi_list = [None, wT1, None, wT3] aTi_list_ = geometry_comparisons.align_poses_sim3_ignore_missing(aTi_list, bTi_list) assert (aTi_list_[0] is None) assert (aTi_list_[2] is None) self.__assert_equality_on_pose3s(computed=[aTi_list_[1], aTi_list_[3]], expected=[aTi_list[1], aTi_list[3]])
8,555,432,123,078,173,000
Consider a simple cases with 4 poses in a line. Suppose SfM only recovers 2 of the 4 poses.
tests/utils/test_geometry_comparisons.py
test_align_poses_sim3_ignore_missing
yuancaimaiyi/gtsfm
python
def test_align_poses_sim3_ignore_missing(self): wT0 = Pose3(Rot3(np.eye(3)), np.zeros(3)) wT1 = Pose3(Rot3(np.eye(3)), np.ones(3)) wT2 = Pose3(Rot3(np.eye(3)), (np.ones(3) * 2)) wT3 = Pose3(Rot3(np.eye(3)), (np.ones(3) * 3)) aTi_list = [wT0, wT1, wT2, wT3] bTi_list = [None, wT1, None, wT3] aTi_list_ = geometry_comparisons.align_poses_sim3_ignore_missing(aTi_list, bTi_list) assert (aTi_list_[0] is None) assert (aTi_list_[2] is None) self.__assert_equality_on_pose3s(computed=[aTi_list_[1], aTi_list_[3]], expected=[aTi_list[1], aTi_list[3]])
def rsync_snapshots(machine_name, simulation_directory_from='', simulation_directory_to='.', snapshot_indices=snapshot_indices_keep): "\n Use rsync to copy snapshot file[s].\n\n Parameters\n ----------\n machine_name : str : 'pfe', 'stampede', 'bw', 'peloton'\n directory_from : str : directory to copy from\n directory_to : str : local directory to put snapshots\n snapshot_indices : int or list : index[s] of snapshots to transfer\n " snapshot_name_base = 'snap*_{:03d}*' directory_from = (ut.io.get_path(simulation_directory_from) + 'output/') directory_to = (ut.io.get_path(simulation_directory_to) + 'output/.') if np.isscalar(snapshot_indices): snapshot_indices = [snapshot_indices] snapshot_path_names = '' for snapshot_index in snapshot_indices: snapshot_path_names += ((directory_from + snapshot_name_base.format(snapshot_index)) + ' ') command = 'rsync -ahvP --size-only ' command += '{}:"{}" {}'.format(machine_name, snapshot_path_names, directory_to) print('\n* executing:\n{}\n'.format(command)) os.system(command)
-6,493,826,717,357,637,000
Use rsync to copy snapshot file[s]. Parameters ---------- machine_name : str : 'pfe', 'stampede', 'bw', 'peloton' directory_from : str : directory to copy from directory_to : str : local directory to put snapshots snapshot_indices : int or list : index[s] of snapshots to transfer
students_final_projects/group-f/gizmo_analysis/gizmo_file.py
rsync_snapshots
UAPH4582/PH482_582
python
def rsync_snapshots(machine_name, simulation_directory_from=, simulation_directory_to='.', snapshot_indices=snapshot_indices_keep): "\n Use rsync to copy snapshot file[s].\n\n Parameters\n ----------\n machine_name : str : 'pfe', 'stampede', 'bw', 'peloton'\n directory_from : str : directory to copy from\n directory_to : str : local directory to put snapshots\n snapshot_indices : int or list : index[s] of snapshots to transfer\n " snapshot_name_base = 'snap*_{:03d}*' directory_from = (ut.io.get_path(simulation_directory_from) + 'output/') directory_to = (ut.io.get_path(simulation_directory_to) + 'output/.') if np.isscalar(snapshot_indices): snapshot_indices = [snapshot_indices] snapshot_path_names = for snapshot_index in snapshot_indices: snapshot_path_names += ((directory_from + snapshot_name_base.format(snapshot_index)) + ' ') command = 'rsync -ahvP --size-only ' command += '{}:"{}" {}'.format(machine_name, snapshot_path_names, directory_to) print('\n* executing:\n{}\n'.format(command)) os.system(command)
def rsync_simulation_files(machine_name, directory_from='/oldscratch/projects/xsede/GalaxiesOnFIRE', directory_to='.'): "\n Use rsync to copy simulation files.\n\n Parameters\n ----------\n machine_name : str : 'pfe', 'stampede', 'bw', 'peloton'\n directory_from : str : directory to copy from\n directory_to : str : directory to put files\n " excludes = ['output/', 'restartfiles/', 'ewald_spc_table_64_dbl.dat', 'spcool_tables/', 'TREECOOL', 'energy.txt', 'balance.txt', 'GasReturn.txt', 'HIIheating.txt', 'MomWinds.txt', 'SNeIIheating.txt', '*.ics', 'snapshot_scale-factors.txt', 'submit_gizmo*.py', '*.bin', '*.particles', '*.bak', '*.err', '*.pyc', '*.o', '*.pro', '*.perl', '.ipynb_checkpoints', '.slurm', '.DS_Store', '*~', '._*', '#*#'] directory_from = ((machine_name + ':') + ut.io.get_path(directory_from)) directory_to = ut.io.get_path(directory_to) command = 'rsync -ahvP --size-only ' arguments = '' for exclude in excludes: arguments += '--exclude="{}" '.format(exclude) command += ((((arguments + directory_from) + ' ') + directory_to) + '.') print('\n* executing:\n{}\n'.format(command)) os.system(command)
-2,482,469,648,714,984,400
Use rsync to copy simulation files. Parameters ---------- machine_name : str : 'pfe', 'stampede', 'bw', 'peloton' directory_from : str : directory to copy from directory_to : str : directory to put files
students_final_projects/group-f/gizmo_analysis/gizmo_file.py
rsync_simulation_files
UAPH4582/PH482_582
python
def rsync_simulation_files(machine_name, directory_from='/oldscratch/projects/xsede/GalaxiesOnFIRE', directory_to='.'): "\n Use rsync to copy simulation files.\n\n Parameters\n ----------\n machine_name : str : 'pfe', 'stampede', 'bw', 'peloton'\n directory_from : str : directory to copy from\n directory_to : str : directory to put files\n " excludes = ['output/', 'restartfiles/', 'ewald_spc_table_64_dbl.dat', 'spcool_tables/', 'TREECOOL', 'energy.txt', 'balance.txt', 'GasReturn.txt', 'HIIheating.txt', 'MomWinds.txt', 'SNeIIheating.txt', '*.ics', 'snapshot_scale-factors.txt', 'submit_gizmo*.py', '*.bin', '*.particles', '*.bak', '*.err', '*.pyc', '*.o', '*.pro', '*.perl', '.ipynb_checkpoints', '.slurm', '.DS_Store', '*~', '._*', '#*#'] directory_from = ((machine_name + ':') + ut.io.get_path(directory_from)) directory_to = ut.io.get_path(directory_to) command = 'rsync -ahvP --size-only ' arguments = for exclude in excludes: arguments += '--exclude="{}" '.format(exclude) command += ((((arguments + directory_from) + ' ') + directory_to) + '.') print('\n* executing:\n{}\n'.format(command)) os.system(command)
def delete_snapshots(snapshot_directory='output', snapshot_index_limits=[1, 599], delete_halos=False): '\n Delete all snapshots in given directory within snapshot_index_limits,\n except for those in snapshot_indices_keep list.\n\n Parameters\n ----------\n snapshot_directory : str : directory of snapshots\n snapshot_index_limits : list : min and max snapshot indices to delete\n delete_halos : bool : whether to delete halo catalog files at same snapshot times\n ' snapshot_name_base = 'snap*_{:03d}*' if (not snapshot_directory): snapshot_directory = 'output/' halo_name_base = 'halos_{:03d}*' halo_directory = 'halo/rockstar_dm/catalog/' if (snapshot_directory[(- 1)] != '/'): snapshot_directory += '/' if ((snapshot_index_limits is None) or (not len(snapshot_index_limits))): snapshot_index_limits = [1, 599] snapshot_indices = np.arange(snapshot_index_limits[0], (snapshot_index_limits[1] + 1)) print() for snapshot_index in snapshot_indices: if (snapshot_index not in snapshot_indices_keep): snapshot_name = (snapshot_directory + snapshot_name_base.format(snapshot_index)) print('* deleting: {}'.format(snapshot_name)) os.system('rm -rf {}'.format(snapshot_name)) if delete_halos: halo_name = (halo_directory + halo_name_base.format(snapshot_index)) print('* deleting: {}'.format(halo_name)) os.system('rm -rf {}'.format(halo_name)) print()
3,725,941,081,509,534,000
Delete all snapshots in given directory within snapshot_index_limits, except for those in snapshot_indices_keep list. Parameters ---------- snapshot_directory : str : directory of snapshots snapshot_index_limits : list : min and max snapshot indices to delete delete_halos : bool : whether to delete halo catalog files at same snapshot times
students_final_projects/group-f/gizmo_analysis/gizmo_file.py
delete_snapshots
UAPH4582/PH482_582
python
def delete_snapshots(snapshot_directory='output', snapshot_index_limits=[1, 599], delete_halos=False): '\n Delete all snapshots in given directory within snapshot_index_limits,\n except for those in snapshot_indices_keep list.\n\n Parameters\n ----------\n snapshot_directory : str : directory of snapshots\n snapshot_index_limits : list : min and max snapshot indices to delete\n delete_halos : bool : whether to delete halo catalog files at same snapshot times\n ' snapshot_name_base = 'snap*_{:03d}*' if (not snapshot_directory): snapshot_directory = 'output/' halo_name_base = 'halos_{:03d}*' halo_directory = 'halo/rockstar_dm/catalog/' if (snapshot_directory[(- 1)] != '/'): snapshot_directory += '/' if ((snapshot_index_limits is None) or (not len(snapshot_index_limits))): snapshot_index_limits = [1, 599] snapshot_indices = np.arange(snapshot_index_limits[0], (snapshot_index_limits[1] + 1)) print() for snapshot_index in snapshot_indices: if (snapshot_index not in snapshot_indices_keep): snapshot_name = (snapshot_directory + snapshot_name_base.format(snapshot_index)) print('* deleting: {}'.format(snapshot_name)) os.system('rm -rf {}'.format(snapshot_name)) if delete_halos: halo_name = (halo_directory + halo_name_base.format(snapshot_index)) print('* deleting: {}'.format(halo_name)) os.system('rm -rf {}'.format(halo_name)) print()
def compress_snapshots(self, directory='output', directory_out='', snapshot_index_limits=[0, 600], thread_number=1): '\n Compress all snapshots in input directory.\n\n Parameters\n ----------\n directory : str : directory of snapshots\n directory_out : str : directory to write compressed snapshots\n snapshot_index_limits : list : min and max snapshot indices to compress\n syncronize : bool : whether to synchronize parallel tasks,\n wait for each thread bundle to complete before starting new bundle\n ' snapshot_indices = np.arange(snapshot_index_limits[0], (snapshot_index_limits[1] + 1)) args_list = [(directory, directory_out, snapshot_index) for snapshot_index in snapshot_indices] ut.io.run_in_parallel(self.compress_snapshot, args_list, thread_number=thread_number)
-3,041,412,715,459,912,000
Compress all snapshots in input directory. Parameters ---------- directory : str : directory of snapshots directory_out : str : directory to write compressed snapshots snapshot_index_limits : list : min and max snapshot indices to compress syncronize : bool : whether to synchronize parallel tasks, wait for each thread bundle to complete before starting new bundle
students_final_projects/group-f/gizmo_analysis/gizmo_file.py
compress_snapshots
UAPH4582/PH482_582
python
def compress_snapshots(self, directory='output', directory_out=, snapshot_index_limits=[0, 600], thread_number=1): '\n Compress all snapshots in input directory.\n\n Parameters\n ----------\n directory : str : directory of snapshots\n directory_out : str : directory to write compressed snapshots\n snapshot_index_limits : list : min and max snapshot indices to compress\n syncronize : bool : whether to synchronize parallel tasks,\n wait for each thread bundle to complete before starting new bundle\n ' snapshot_indices = np.arange(snapshot_index_limits[0], (snapshot_index_limits[1] + 1)) args_list = [(directory, directory_out, snapshot_index) for snapshot_index in snapshot_indices] ut.io.run_in_parallel(self.compress_snapshot, args_list, thread_number=thread_number)
def compress_snapshot(self, directory='output', directory_out='', snapshot_index=600, analysis_directory='~/analysis', python_executable='python3'): '\n Compress single snapshot (which may be multiple files) in input directory.\n\n Parameters\n ----------\n directory : str : directory of snapshot\n directory_out : str : directory to write compressed snapshot\n snapshot_index : int : index of snapshot\n analysis_directory : str : directory of analysis code\n ' executable = '{} {}/manipulate_hdf5/compactify_hdf5.py -L 0'.format(python_executable, analysis_directory) snapshot_name_base = 'snap*_{:03d}*' if (directory[(- 1)] != '/'): directory += '/' if (directory_out and (directory_out[(- 1)] != '/')): directory_out += '/' path_file_names = glob.glob((directory + snapshot_name_base.format(snapshot_index))) if len(path_file_names): if ('snapdir' in path_file_names[0]): path_file_names = glob.glob((path_file_names[0] + '/*')) path_file_names.sort() for path_file_name in path_file_names: if directory_out: path_file_name_out = path_file_name.replace(directory, directory_out) else: path_file_name_out = path_file_name executable_i = '{} -o {} {}'.format(executable, path_file_name_out, path_file_name) self.say('executing: {}'.format(executable_i)) os.system(executable_i)
4,972,527,731,161,589,000
Compress single snapshot (which may be multiple files) in input directory. Parameters ---------- directory : str : directory of snapshot directory_out : str : directory to write compressed snapshot snapshot_index : int : index of snapshot analysis_directory : str : directory of analysis code
students_final_projects/group-f/gizmo_analysis/gizmo_file.py
compress_snapshot
UAPH4582/PH482_582
python
def compress_snapshot(self, directory='output', directory_out=, snapshot_index=600, analysis_directory='~/analysis', python_executable='python3'): '\n Compress single snapshot (which may be multiple files) in input directory.\n\n Parameters\n ----------\n directory : str : directory of snapshot\n directory_out : str : directory to write compressed snapshot\n snapshot_index : int : index of snapshot\n analysis_directory : str : directory of analysis code\n ' executable = '{} {}/manipulate_hdf5/compactify_hdf5.py -L 0'.format(python_executable, analysis_directory) snapshot_name_base = 'snap*_{:03d}*' if (directory[(- 1)] != '/'): directory += '/' if (directory_out and (directory_out[(- 1)] != '/')): directory_out += '/' path_file_names = glob.glob((directory + snapshot_name_base.format(snapshot_index))) if len(path_file_names): if ('snapdir' in path_file_names[0]): path_file_names = glob.glob((path_file_names[0] + '/*')) path_file_names.sort() for path_file_name in path_file_names: if directory_out: path_file_name_out = path_file_name.replace(directory, directory_out) else: path_file_name_out = path_file_name executable_i = '{} -o {} {}'.format(executable, path_file_name_out, path_file_name) self.say('executing: {}'.format(executable_i)) os.system(executable_i)
def test_compression(self, snapshot_indices='all', simulation_directory='.', snapshot_directory='output', compression_level=0): '\n Read headers from all snapshot files in simulation_directory to check whether files have\n been compressed.\n ' header_compression_name = 'compression.level' simulation_directory = ut.io.get_path(simulation_directory) snapshot_directory = ut.io.get_path(snapshot_directory) Read = gizmo_io.ReadClass() compression_wrong_snapshots = [] compression_none_snapshots = [] if ((snapshot_indices is None) or (snapshot_indices == 'all')): (_path_file_names, snapshot_indices) = Read.get_snapshot_file_names_indices((simulation_directory + snapshot_directory)) elif np.isscalar(snapshot_indices): snapshot_indices = [snapshot_indices] for snapshot_index in snapshot_indices: header = Read.read_header('index', snapshot_index, simulation_directory, verbose=False) if (header_compression_name in header): if ((compression_level is not None) and (header[header_compression_name] != compression_level)): compression_wrong_snapshots.append(snapshot_index) else: compression_none_snapshots.append(snapshot_index) self.say('* tested {} snapshots: {} - {}'.format(len(snapshot_indices), min(snapshot_indices), max(snapshot_indices))) self.say('* {} are uncompressed'.format(len(compression_none_snapshots))) if len(compression_none_snapshots): self.say('{}'.format(compression_none_snapshots)) self.say('* {} have wrong compression (level != {})'.format(len(compression_wrong_snapshots), compression_level)) if len(compression_wrong_snapshots): self.say('{}'.format(compression_wrong_snapshots))
6,285,998,842,586,060,000
Read headers from all snapshot files in simulation_directory to check whether files have been compressed.
students_final_projects/group-f/gizmo_analysis/gizmo_file.py
test_compression
UAPH4582/PH482_582
python
def test_compression(self, snapshot_indices='all', simulation_directory='.', snapshot_directory='output', compression_level=0): '\n Read headers from all snapshot files in simulation_directory to check whether files have\n been compressed.\n ' header_compression_name = 'compression.level' simulation_directory = ut.io.get_path(simulation_directory) snapshot_directory = ut.io.get_path(snapshot_directory) Read = gizmo_io.ReadClass() compression_wrong_snapshots = [] compression_none_snapshots = [] if ((snapshot_indices is None) or (snapshot_indices == 'all')): (_path_file_names, snapshot_indices) = Read.get_snapshot_file_names_indices((simulation_directory + snapshot_directory)) elif np.isscalar(snapshot_indices): snapshot_indices = [snapshot_indices] for snapshot_index in snapshot_indices: header = Read.read_header('index', snapshot_index, simulation_directory, verbose=False) if (header_compression_name in header): if ((compression_level is not None) and (header[header_compression_name] != compression_level)): compression_wrong_snapshots.append(snapshot_index) else: compression_none_snapshots.append(snapshot_index) self.say('* tested {} snapshots: {} - {}'.format(len(snapshot_indices), min(snapshot_indices), max(snapshot_indices))) self.say('* {} are uncompressed'.format(len(compression_none_snapshots))) if len(compression_none_snapshots): self.say('{}'.format(compression_none_snapshots)) self.say('* {} have wrong compression (level != {})'.format(len(compression_wrong_snapshots), compression_level)) if len(compression_wrong_snapshots): self.say('{}'.format(compression_wrong_snapshots))
def submit_transfer(self, simulation_path_directory='.', snapshot_directory='output', batch_file_name='globus_batch.txt', machine_name='peloton'): "\n Submit globus transfer of simulation files.\n Must initiate from Stampede.\n\n Parameters\n ----------\n simulation_path_directory : str : '.' or full path + directory of simulation\n snapshot_directory : str : directory of snapshot files within simulation_directory\n batch_file_name : str : name of file to write\n machine_name : str : name of machine transfering files to\n " simulation_path_directory = ut.io.get_path(simulation_path_directory) if (simulation_path_directory == './'): simulation_path_directory = os.getcwd() if (simulation_path_directory[(- 1)] != '/'): simulation_path_directory += '/' command = 'globus transfer $(globus bookmark show stampede){}'.format(simulation_path_directory[1:]) path_directories = simulation_path_directory.split('/') simulation_directory = path_directories[(- 2)] if (machine_name == 'peloton'): if ('elvis' in simulation_directory): directory_to = 'm12_elvis' else: directory_to = simulation_directory.split('_')[0] directory_to += (('/' + simulation_directory) + '/') command += ' $(globus bookmark show peloton-scratch){}'.format(directory_to) command += ' --sync-level=checksum --preserve-mtime --verify-checksum' command += ' --label "{}" --batch < {}'.format(simulation_directory, batch_file_name) self.write_batch_file(simulation_path_directory, snapshot_directory, batch_file_name) self.say('* executing:\n{}\n'.format(command)) os.system(command)
8,423,068,657,635,517,000
Submit globus transfer of simulation files. Must initiate from Stampede. Parameters ---------- simulation_path_directory : str : '.' or full path + directory of simulation snapshot_directory : str : directory of snapshot files within simulation_directory batch_file_name : str : name of file to write machine_name : str : name of machine transfering files to
students_final_projects/group-f/gizmo_analysis/gizmo_file.py
submit_transfer
UAPH4582/PH482_582
python
def submit_transfer(self, simulation_path_directory='.', snapshot_directory='output', batch_file_name='globus_batch.txt', machine_name='peloton'): "\n Submit globus transfer of simulation files.\n Must initiate from Stampede.\n\n Parameters\n ----------\n simulation_path_directory : str : '.' or full path + directory of simulation\n snapshot_directory : str : directory of snapshot files within simulation_directory\n batch_file_name : str : name of file to write\n machine_name : str : name of machine transfering files to\n " simulation_path_directory = ut.io.get_path(simulation_path_directory) if (simulation_path_directory == './'): simulation_path_directory = os.getcwd() if (simulation_path_directory[(- 1)] != '/'): simulation_path_directory += '/' command = 'globus transfer $(globus bookmark show stampede){}'.format(simulation_path_directory[1:]) path_directories = simulation_path_directory.split('/') simulation_directory = path_directories[(- 2)] if (machine_name == 'peloton'): if ('elvis' in simulation_directory): directory_to = 'm12_elvis' else: directory_to = simulation_directory.split('_')[0] directory_to += (('/' + simulation_directory) + '/') command += ' $(globus bookmark show peloton-scratch){}'.format(directory_to) command += ' --sync-level=checksum --preserve-mtime --verify-checksum' command += ' --label "{}" --batch < {}'.format(simulation_directory, batch_file_name) self.write_batch_file(simulation_path_directory, snapshot_directory, batch_file_name) self.say('* executing:\n{}\n'.format(command)) os.system(command)
def write_batch_file(self, simulation_directory='.', snapshot_directory='output', file_name='globus_batch.txt'): '\n Write batch file that sets files to transfer via globus.\n\n Parameters\n ----------\n simulation_directory : str : directory of simulation\n snapshot_directory : str : directory of snapshot files within simulation_directory\n file_name : str : name of batch file to write\n ' simulation_directory = ut.io.get_path(simulation_directory) snapshot_directory = ut.io.get_path(snapshot_directory) transfer_string = '' transfer_items = ['gizmo/', 'gizmo_config.sh', 'gizmo_parameters.txt', 'gizmo_parameters.txt-usedvalues', 'gizmo.out.txt', 'snapshot_times.txt', 'notes.txt', 'track/', 'halo/rockstar_dm/catalog_hdf5/'] for transfer_item in transfer_items: if os.path.exists((simulation_directory + transfer_item)): command = '{} {}' if (transfer_item[(- 1)] == '/'): transfer_item = transfer_item[:(- 1)] command += ' --recursive' command = (command.format(transfer_item, transfer_item) + '\n') transfer_string += command transfer_items = glob.glob((simulation_directory + 'initial_condition*/*')) for transfer_item in transfer_items: if ('.ics' not in transfer_item): transfer_item = transfer_item.replace(simulation_directory, '') command = '{} {}\n'.format(transfer_item, transfer_item) transfer_string += command for snapshot_index in snapshot_indices_keep: snapshot_name = '{}snapdir_{:03d}'.format(snapshot_directory, snapshot_index) if os.path.exists((simulation_directory + snapshot_name)): snapshot_string = '{} {} --recursive\n'.format(snapshot_name, snapshot_name) transfer_string += snapshot_string snapshot_name = '{}snapshot_{:03d}.hdf5'.format(snapshot_directory, snapshot_index) if os.path.exists((simulation_directory + snapshot_name)): snapshot_string = '{} {}\n'.format(snapshot_name, snapshot_name) transfer_string += snapshot_string with open(file_name, 'w') as file_out: file_out.write(transfer_string)
-8,373,690,995,233,226,000
Write batch file that sets files to transfer via globus. Parameters ---------- simulation_directory : str : directory of simulation snapshot_directory : str : directory of snapshot files within simulation_directory file_name : str : name of batch file to write
students_final_projects/group-f/gizmo_analysis/gizmo_file.py
write_batch_file
UAPH4582/PH482_582
python
def write_batch_file(self, simulation_directory='.', snapshot_directory='output', file_name='globus_batch.txt'): '\n Write batch file that sets files to transfer via globus.\n\n Parameters\n ----------\n simulation_directory : str : directory of simulation\n snapshot_directory : str : directory of snapshot files within simulation_directory\n file_name : str : name of batch file to write\n ' simulation_directory = ut.io.get_path(simulation_directory) snapshot_directory = ut.io.get_path(snapshot_directory) transfer_string = transfer_items = ['gizmo/', 'gizmo_config.sh', 'gizmo_parameters.txt', 'gizmo_parameters.txt-usedvalues', 'gizmo.out.txt', 'snapshot_times.txt', 'notes.txt', 'track/', 'halo/rockstar_dm/catalog_hdf5/'] for transfer_item in transfer_items: if os.path.exists((simulation_directory + transfer_item)): command = '{} {}' if (transfer_item[(- 1)] == '/'): transfer_item = transfer_item[:(- 1)] command += ' --recursive' command = (command.format(transfer_item, transfer_item) + '\n') transfer_string += command transfer_items = glob.glob((simulation_directory + 'initial_condition*/*')) for transfer_item in transfer_items: if ('.ics' not in transfer_item): transfer_item = transfer_item.replace(simulation_directory, ) command = '{} {}\n'.format(transfer_item, transfer_item) transfer_string += command for snapshot_index in snapshot_indices_keep: snapshot_name = '{}snapdir_{:03d}'.format(snapshot_directory, snapshot_index) if os.path.exists((simulation_directory + snapshot_name)): snapshot_string = '{} {} --recursive\n'.format(snapshot_name, snapshot_name) transfer_string += snapshot_string snapshot_name = '{}snapshot_{:03d}.hdf5'.format(snapshot_directory, snapshot_index) if os.path.exists((simulation_directory + snapshot_name)): snapshot_string = '{} {}\n'.format(snapshot_name, snapshot_name) transfer_string += snapshot_string with open(file_name, 'w') as file_out: file_out.write(transfer_string)
def __init__(self): '\n Initialise service and configuration\n ' logger.info('Initialised Backend-Service - Ready for gRPC Calls.')
-7,262,744,825,607,908,000
Initialise service and configuration
backend-service/backend_service/service.py
__init__
dgildeh/otel-python-cloud-run
python
def __init__(self): '\n \n ' logger.info('Initialised Backend-Service - Ready for gRPC Calls.')
def test_parse_nexus_tree(self): 'parse_nexus_tree returns a dnd string and a translation table list' (Trans_table, dnd) = parse_nexus_tree(Nexus_tree) self.assertEqual(dnd['tree PAUP_1'], '(1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);') self.assertEqual(Trans_table['1'], 'outgroup25') self.assertEqual(Trans_table['2'], 'AF078391l') self.assertEqual(Trans_table['3'], 'AF078211af') self.assertEqual(Trans_table['4'], 'AF078393l') self.assertEqual(Trans_table['5'], 'AF078187af') self.assertEqual(Trans_table['6'], 'AF078320l') self.assertEqual(Trans_table['21'], 'outgroup258') self.assertEqual(Trans_table['20'], 'AF078179af') self.assertEqual(Trans_table['19'], 'AF078251af') (Trans_table, dnd) = parse_nexus_tree(Nexus_tree_2) self.assertEqual(Trans_table, None) self.assertEqual(dnd['tree nj'], '((((((((((YA10260L1:0.01855,SARAG06_Y:0.00367):0.01965,(((YA270L1G0:0.01095,SARAD10_Y:0.00699):0.01744,YA270L1A0:0.04329):0.00028,((YA165L1C1:0.01241,SARAA02_Y:0.02584):0.00213,((YA165L1H0:0.00092,SARAF10_Y:-0.00092):0.00250,(YA165L1A0:0.00177,SARAH10_Y:0.01226):0.00198):0.00131):0.00700):0.01111):0.11201,(YA160L1F0:0.00348,SARAG01_Y:-0.00122):0.13620):0.01202,((((YRM60L1D0:0.00357,(YRM60L1C0:0.00477,SARAE10_Y:-0.00035):0.00086):0.00092,SARAE03_Y:0.00126):0.00125,SARAC11_Y:0.00318):0.00160,YRM60L1H0:0.00593):0.09975):0.07088,SARAA01_Y:0.02880):0.00190,SARAB04_Y:0.05219):0.00563,YRM60L1E0:0.06099):0.00165,(YRM60L1H0:0.00450,SARAF11_Y:0.01839):0.00288):0.00129,YRM60L1B1:0.00713):0.00194,(YRM60L1G0:0.00990,(YA165L1G0:0.00576,(YA160L1G0:0.01226,SARAA11_Y:0.00389):0.00088):0.00300):0.00614,SARAC06_Y:0.00381);')
-4,173,345,485,592,903,700
parse_nexus_tree returns a dnd string and a translation table list
tests/test_parse/test_nexus.py
test_parse_nexus_tree
tla256/cogent3
python
def test_parse_nexus_tree(self): (Trans_table, dnd) = parse_nexus_tree(Nexus_tree) self.assertEqual(dnd['tree PAUP_1'], '(1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);') self.assertEqual(Trans_table['1'], 'outgroup25') self.assertEqual(Trans_table['2'], 'AF078391l') self.assertEqual(Trans_table['3'], 'AF078211af') self.assertEqual(Trans_table['4'], 'AF078393l') self.assertEqual(Trans_table['5'], 'AF078187af') self.assertEqual(Trans_table['6'], 'AF078320l') self.assertEqual(Trans_table['21'], 'outgroup258') self.assertEqual(Trans_table['20'], 'AF078179af') self.assertEqual(Trans_table['19'], 'AF078251af') (Trans_table, dnd) = parse_nexus_tree(Nexus_tree_2) self.assertEqual(Trans_table, None) self.assertEqual(dnd['tree nj'], '((((((((((YA10260L1:0.01855,SARAG06_Y:0.00367):0.01965,(((YA270L1G0:0.01095,SARAD10_Y:0.00699):0.01744,YA270L1A0:0.04329):0.00028,((YA165L1C1:0.01241,SARAA02_Y:0.02584):0.00213,((YA165L1H0:0.00092,SARAF10_Y:-0.00092):0.00250,(YA165L1A0:0.00177,SARAH10_Y:0.01226):0.00198):0.00131):0.00700):0.01111):0.11201,(YA160L1F0:0.00348,SARAG01_Y:-0.00122):0.13620):0.01202,((((YRM60L1D0:0.00357,(YRM60L1C0:0.00477,SARAE10_Y:-0.00035):0.00086):0.00092,SARAE03_Y:0.00126):0.00125,SARAC11_Y:0.00318):0.00160,YRM60L1H0:0.00593):0.09975):0.07088,SARAA01_Y:0.02880):0.00190,SARAB04_Y:0.05219):0.00563,YRM60L1E0:0.06099):0.00165,(YRM60L1H0:0.00450,SARAF11_Y:0.01839):0.00288):0.00129,YRM60L1B1:0.00713):0.00194,(YRM60L1G0:0.00990,(YA165L1G0:0.00576,(YA160L1G0:0.01226,SARAA11_Y:0.00389):0.00088):0.00300):0.00614,SARAC06_Y:0.00381);')
def test_parse_nexus_tree_sq(self): 'remove single quotes from tree and translate tables' (Trans_table, dnd) = parse_nexus_tree(Nexus_tree_3) self.assertEqual(dnd['tree PAUP_1'], '(1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);') self.assertEqual(Trans_table['1'], 'outgroup25') self.assertEqual(Trans_table['2'], 'AF078391l') self.assertEqual(Trans_table['3'], 'AF078211af') self.assertEqual(Trans_table['4'], 'AF078393l') self.assertEqual(Trans_table['5'], 'AF078187af') self.assertEqual(Trans_table['6'], 'AF078320l') self.assertEqual(Trans_table['21'], 'outgroup258') self.assertEqual(Trans_table['20'], 'AF078179af') self.assertEqual(Trans_table['19'], 'AF078251af')
-1,260,149,600,308,407,300
remove single quotes from tree and translate tables
tests/test_parse/test_nexus.py
test_parse_nexus_tree_sq
tla256/cogent3
python
def test_parse_nexus_tree_sq(self): (Trans_table, dnd) = parse_nexus_tree(Nexus_tree_3) self.assertEqual(dnd['tree PAUP_1'], '(1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);') self.assertEqual(Trans_table['1'], 'outgroup25') self.assertEqual(Trans_table['2'], 'AF078391l') self.assertEqual(Trans_table['3'], 'AF078211af') self.assertEqual(Trans_table['4'], 'AF078393l') self.assertEqual(Trans_table['5'], 'AF078187af') self.assertEqual(Trans_table['6'], 'AF078320l') self.assertEqual(Trans_table['21'], 'outgroup258') self.assertEqual(Trans_table['20'], 'AF078179af') self.assertEqual(Trans_table['19'], 'AF078251af')
def test_get_tree_info(self): 'get_tree_info returns the Nexus file section that describes the tree' result = get_tree_info(Nexus_tree) self.assertEqual(len(result), 33) self.assertEqual(result[0], 'Begin trees; [Treefile saved Wednesday, May 5, 2004 5:02 PM]') self.assertEqual(result[31], 'tree PAUP_1 = [&R] (1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);')
2,344,120,915,064,810,500
get_tree_info returns the Nexus file section that describes the tree
tests/test_parse/test_nexus.py
test_get_tree_info
tla256/cogent3
python
def test_get_tree_info(self): result = get_tree_info(Nexus_tree) self.assertEqual(len(result), 33) self.assertEqual(result[0], 'Begin trees; [Treefile saved Wednesday, May 5, 2004 5:02 PM]') self.assertEqual(result[31], 'tree PAUP_1 = [&R] (1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);')
def test_split_tree_info(self): 'split_tree_info splits lines into header, Trans_table, and dnd' tree_info = get_tree_info(Nexus_tree) (header, trans_table, dnd) = split_tree_info(tree_info) self.assertEqual(len(header), 9) self.assertEqual(len(trans_table), 22) self.assertEqual(len(dnd), 2) self.assertEqual(header[0], 'Begin trees; [Treefile saved Wednesday, May 5, 2004 5:02 PM]') self.assertEqual(header[8], '\tTranslate') self.assertEqual(trans_table[0], '\t\t1 outgroup25,') self.assertEqual(trans_table[21], '\t\t;') self.assertEqual(dnd[0], 'tree PAUP_1 = [&R] (1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);')
-4,046,928,395,436,165,600
split_tree_info splits lines into header, Trans_table, and dnd
tests/test_parse/test_nexus.py
test_split_tree_info
tla256/cogent3
python
def test_split_tree_info(self): tree_info = get_tree_info(Nexus_tree) (header, trans_table, dnd) = split_tree_info(tree_info) self.assertEqual(len(header), 9) self.assertEqual(len(trans_table), 22) self.assertEqual(len(dnd), 2) self.assertEqual(header[0], 'Begin trees; [Treefile saved Wednesday, May 5, 2004 5:02 PM]') self.assertEqual(header[8], '\tTranslate') self.assertEqual(trans_table[0], '\t\t1 outgroup25,') self.assertEqual(trans_table[21], '\t\t;') self.assertEqual(dnd[0], 'tree PAUP_1 = [&R] (1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);')
def test_parse_trans_table(self): 'parse_trans_table returns a dict with the taxa names indexed by number' tree_info = get_tree_info(Nexus_tree) (header, trans_table, dnd) = split_tree_info(tree_info) Trans_table = parse_trans_table(trans_table) self.assertEqual(len(Trans_table), 21) self.assertEqual(Trans_table['1'], 'outgroup25') self.assertEqual(Trans_table['2'], 'AF078391l') self.assertEqual(Trans_table['3'], 'AF078211af') self.assertEqual(Trans_table['4'], 'AF078393l') self.assertEqual(Trans_table['5'], 'AF078187af') self.assertEqual(Trans_table['6'], 'AF078320l') self.assertEqual(Trans_table['21'], 'outgroup258') self.assertEqual(Trans_table['20'], 'AF078179af') self.assertEqual(Trans_table['19'], 'AF078251af')
-6,303,950,845,564,525,000
parse_trans_table returns a dict with the taxa names indexed by number
tests/test_parse/test_nexus.py
test_parse_trans_table
tla256/cogent3
python
def test_parse_trans_table(self): tree_info = get_tree_info(Nexus_tree) (header, trans_table, dnd) = split_tree_info(tree_info) Trans_table = parse_trans_table(trans_table) self.assertEqual(len(Trans_table), 21) self.assertEqual(Trans_table['1'], 'outgroup25') self.assertEqual(Trans_table['2'], 'AF078391l') self.assertEqual(Trans_table['3'], 'AF078211af') self.assertEqual(Trans_table['4'], 'AF078393l') self.assertEqual(Trans_table['5'], 'AF078187af') self.assertEqual(Trans_table['6'], 'AF078320l') self.assertEqual(Trans_table['21'], 'outgroup258') self.assertEqual(Trans_table['20'], 'AF078179af') self.assertEqual(Trans_table['19'], 'AF078251af')
def test_parse_dnd(self): 'parse_dnd returns a dict with dnd indexed by tree name' tree_info = get_tree_info(Nexus_tree) (header, trans_table, dnd) = split_tree_info(tree_info) dnd_dict = parse_dnd(dnd) self.assertEqual(dnd_dict['tree PAUP_1'], '(1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);')
5,552,166,177,345,554,000
parse_dnd returns a dict with dnd indexed by tree name
tests/test_parse/test_nexus.py
test_parse_dnd
tla256/cogent3
python
def test_parse_dnd(self): tree_info = get_tree_info(Nexus_tree) (header, trans_table, dnd) = split_tree_info(tree_info) dnd_dict = parse_dnd(dnd) self.assertEqual(dnd_dict['tree PAUP_1'], '(1,(2,(((3,4),(5,(((((6,10),9),(11,18)),((((7,15),19),17),(8,(12,(14,16))))),13))),20)),21);')
def test_get_BL_table(self): 'get_BL_table returns the section of the log file w/ the BL table' BL_table = get_BL_table(PAUP_log) self.assertEqual(len(BL_table), 40) self.assertEqual(BL_table[0], ' 40 root 0 0 0') self.assertEqual(BL_table[39], 'outgroup258 (21)* 40 45 27 67')
4,894,002,121,790,787,000
get_BL_table returns the section of the log file w/ the BL table
tests/test_parse/test_nexus.py
test_get_BL_table
tla256/cogent3
python
def test_get_BL_table(self): BL_table = get_BL_table(PAUP_log) self.assertEqual(len(BL_table), 40) self.assertEqual(BL_table[0], ' 40 root 0 0 0') self.assertEqual(BL_table[39], 'outgroup258 (21)* 40 45 27 67')
def test_find_fields(self): 'find_fields takes BL table line and returns field names mapped to info' result = find_fields(line1) self.assertEqual(result['taxa'], '40') self.assertEqual(result['bl'], '0') self.assertEqual(result['parent'], 'root')
-6,646,046,789,345,280,000
find_fields takes BL table line and returns field names mapped to info
tests/test_parse/test_nexus.py
test_find_fields
tla256/cogent3
python
def test_find_fields(self): result = find_fields(line1) self.assertEqual(result['taxa'], '40') self.assertEqual(result['bl'], '0') self.assertEqual(result['parent'], 'root')
def test_parse_taxa(self): 'parse_taxa should return the taxa # from a taxa_field from find_fields' result1 = find_fields(line1) result2 = find_fields(line2) result3 = find_fields(line3) result4 = find_fields(line4) self.assertEqual(parse_taxa(result1['taxa']), '40') self.assertEqual(parse_taxa(result2['taxa']), '1') self.assertEqual(parse_taxa(result3['taxa']), '39') self.assertEqual(parse_taxa(result4['taxa']), '2')
-8,151,969,477,806,218,000
parse_taxa should return the taxa # from a taxa_field from find_fields
tests/test_parse/test_nexus.py
test_parse_taxa
tla256/cogent3
python
def test_parse_taxa(self): result1 = find_fields(line1) result2 = find_fields(line2) result3 = find_fields(line3) result4 = find_fields(line4) self.assertEqual(parse_taxa(result1['taxa']), '40') self.assertEqual(parse_taxa(result2['taxa']), '1') self.assertEqual(parse_taxa(result3['taxa']), '39') self.assertEqual(parse_taxa(result4['taxa']), '2')
def test_parse_PAUP_log(self): 'parse_PAUP_log extracts branch length info from a PAUP log file' BL_dict = parse_PAUP_log(PAUP_log) self.assertEqual(len(BL_dict), 40) self.assertEqual(BL_dict['1'], ('40', 40)) self.assertEqual(BL_dict['40'], ('root', 0)) self.assertEqual(BL_dict['39'], ('40', 57)) self.assertEqual(BL_dict['2'], ('39', 56)) self.assertEqual(BL_dict['26'], ('34', 5)) self.assertEqual(BL_dict['21'], ('40', 45))
-7,684,371,037,230,970,000
parse_PAUP_log extracts branch length info from a PAUP log file
tests/test_parse/test_nexus.py
test_parse_PAUP_log
tla256/cogent3
python
def test_parse_PAUP_log(self): BL_dict = parse_PAUP_log(PAUP_log) self.assertEqual(len(BL_dict), 40) self.assertEqual(BL_dict['1'], ('40', 40)) self.assertEqual(BL_dict['40'], ('root', 0)) self.assertEqual(BL_dict['39'], ('40', 57)) self.assertEqual(BL_dict['2'], ('39', 56)) self.assertEqual(BL_dict['26'], ('34', 5)) self.assertEqual(BL_dict['21'], ('40', 45))
def test_align_with_comments(self): 'correctly handle an alignment block containing comments' parser = MinimalNexusAlignParser('data/nexus_comments.nex') got = {n: s for (n, s) in parser} expect = {'Ephedra': 'TTAAGCCATGCATGTCTAAGTATGAACTAATTCCAAACGGTGA', 'Gnetum': 'TTAAGCCATGCATGTCTATGTACGAACTAATC-AGAACGGTGA', 'Welwitschia': 'TTAAGCCATGCACGTGTAAGTATGAACTAGTC-GAAACGGTGA', 'Ginkgo': 'TTAAGCCATGCATGTGTAAGTATGAACTCTTTACAGACTGTGA', 'Pinus': 'TTAAGCCATGCATGTCTAAGTATGAACTAATTGCAGACTGTGA'} self.assertEqual(got, expect)
-8,109,012,507,756,125,000
correctly handle an alignment block containing comments
tests/test_parse/test_nexus.py
test_align_with_comments
tla256/cogent3
python
def test_align_with_comments(self): parser = MinimalNexusAlignParser('data/nexus_comments.nex') got = {n: s for (n, s) in parser} expect = {'Ephedra': 'TTAAGCCATGCATGTCTAAGTATGAACTAATTCCAAACGGTGA', 'Gnetum': 'TTAAGCCATGCATGTCTATGTACGAACTAATC-AGAACGGTGA', 'Welwitschia': 'TTAAGCCATGCACGTGTAAGTATGAACTAGTC-GAAACGGTGA', 'Ginkgo': 'TTAAGCCATGCATGTGTAAGTATGAACTCTTTACAGACTGTGA', 'Pinus': 'TTAAGCCATGCATGTCTAAGTATGAACTAATTGCAGACTGTGA'} self.assertEqual(got, expect)
def test_align_with_spaced_seqs(self): 'correctly handle an alignment block with spaces in seqs' parser = MinimalNexusAlignParser('data/nexus_dna.nex') seqs = {n: s for (n, s) in parser} self.assertEqual(len(seqs), 10) lengths = set((len(seqs[n]) for n in seqs)) self.assertEqual(lengths, {705})
-6,359,138,814,970,381,000
correctly handle an alignment block with spaces in seqs
tests/test_parse/test_nexus.py
test_align_with_spaced_seqs
tla256/cogent3
python
def test_align_with_spaced_seqs(self): parser = MinimalNexusAlignParser('data/nexus_dna.nex') seqs = {n: s for (n, s) in parser} self.assertEqual(len(seqs), 10) lengths = set((len(seqs[n]) for n in seqs)) self.assertEqual(lengths, {705})
def test_align_from_mixed(self): 'correctly handle a file with tree and alignment block' parser = MinimalNexusAlignParser('data/nexus_mixed.nex') got = {n: s for (n, s) in parser} expect = {'fish': 'ACATAGAGGGTACCTCTAAG', 'frog': 'ACATAGAGGGTACCTCTAAG', 'snake': 'ACATAGAGGGTACCTCTAAG', 'mouse': 'ACATAGAGGGTACCTCTAAG'} self.assertEqual(got, expect)
4,272,722,143,379,951,000
correctly handle a file with tree and alignment block
tests/test_parse/test_nexus.py
test_align_from_mixed
tla256/cogent3
python
def test_align_from_mixed(self): parser = MinimalNexusAlignParser('data/nexus_mixed.nex') got = {n: s for (n, s) in parser} expect = {'fish': 'ACATAGAGGGTACCTCTAAG', 'frog': 'ACATAGAGGGTACCTCTAAG', 'snake': 'ACATAGAGGGTACCTCTAAG', 'mouse': 'ACATAGAGGGTACCTCTAAG'} self.assertEqual(got, expect)