language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def inform_about_app_extras(extras: Sequence[str]) -> NoReturn: """Inform the user about required package extras and exit.""" exec_name = Path(sys.argv[0]).stem extras_str = ",".join(extras) logger.error( "\nYou have to install the" f' {extras_str} {"extra" if len(extras) == 1 else "extras"} for {exec_name} to' ' work.\nWith pip, you can do it with something like: "pip3 install' f' taskwarrior-syncall[{extras_str}]"\nExiting.' ) sys.exit(1)
def inform_about_app_extras(extras: Sequence[str]) -> NoReturn: """Inform the user about required package extras and exit.""" exec_name = Path(sys.argv[0]).stem extras_str = ",".join(extras) logger.error( "\nYou have to install the" f' {extras_str} {"extra" if len(extras) == 1 else "extras"} for {exec_name} to' ' work.\nWith pip, you can do it with something like: "pip3 install' f' taskwarrior-syncall[{extras_str}]"\nExiting.' ) sys.exit(1)
Python
def fetch_from_pass_manager(password_path: str) -> str: """ Gpg-decrypt and read the contents of a password file. The path should be either relative to the password store directory or fullpath. """ logger.debug(f"Attempting to read {password_path} from UNIX Password Store...") pass_dir = valid_path(os.environ.get("PASSWORD_STORE_DIR", "~/.password-store")) if str(password_path).startswith(str(pass_dir)): path = Path(password_path) else: path = pass_dir / password_path pass_full_path = path.with_suffix(".gpg") try: passwd = read_gpg_token(pass_full_path) except subprocess.CalledProcessError as err: logger.error( "\n".join( [ f"Couldn't read {password_path} from pass\n\nFull path: {pass_full_path}", non_empty("stdout", err.stdout.decode("utf-8"), join_with=": "), non_empty("stderr", err.stderr.decode("utf-8"), join_with=": "), ] ) ) sys.exit(1) return passwd
def fetch_from_pass_manager(password_path: str) -> str: """ Gpg-decrypt and read the contents of a password file. The path should be either relative to the password store directory or fullpath. """ logger.debug(f"Attempting to read {password_path} from UNIX Password Store...") pass_dir = valid_path(os.environ.get("PASSWORD_STORE_DIR", "~/.password-store")) if str(password_path).startswith(str(pass_dir)): path = Path(password_path) else: path = pass_dir / password_path pass_full_path = path.with_suffix(".gpg") try: passwd = read_gpg_token(pass_full_path) except subprocess.CalledProcessError as err: logger.error( "\n".join( [ f"Couldn't read {password_path} from pass\n\nFull path: {pass_full_path}", non_empty("stdout", err.stdout.decode("utf-8"), join_with=": "), non_empty("stderr", err.stderr.decode("utf-8"), join_with=": "), ] ) ) sys.exit(1) return passwd
Python
def _note_has_label(self, note: TopLevelNode, label: Label) -> bool: """True if the given Google Keep note has the given label.""" for la in note.labels.all(): if label == la: return True return False
def _note_has_label(self, note: TopLevelNode, label: Label) -> bool: """True if the given Google Keep note has the given label.""" for la in note.labels.all(): if label == la: return True return False
Python
def _note_has_label_str(self, note: TopLevelNode, label_str: str) -> bool: """True if the given Google Keep note has the given label.""" for la in note.labels.all(): if label_str == la.name: return True return False
def _note_has_label_str(self, note: TopLevelNode, label_str: str) -> bool: """True if the given Google Keep note has the given label.""" for la in note.labels.all(): if label_str == la.name: return True return False
Python
def _create_note(self, note_title: str) -> GKeepList: """Create a new note (list of items) in Google Keep. Applies the predefined label to the note - if one was provided during initialization. """ li = self._keep.createList(note_title) if self._notes_label is not None: li.labels.add(self._notes_label) return li
def _create_note(self, note_title: str) -> GKeepList: """Create a new note (list of items) in Google Keep. Applies the predefined label to the note - if one was provided during initialization. """ li = self._keep.createList(note_title) if self._notes_label is not None: li.labels.add(self._notes_label) return li
Python
def notion_simple_todo() -> NotionTodoBlockItem: """Simple to_do block returned by Notion Python SDK. - Unarchived (not deleted) - Unchecked (not completed) """ return { "object": "block", "id": "7de89eb6-4ee1-472c-abcd-8231049e9d8d", "created_time": "2021-11-04T19:07:00.000Z", "last_edited_time": "2021-12-04T10:01:00.000Z", "has_children": False, "archived": False, "type": "to_do", "to_do": { "text": [ { "type": "text", "text": {"content": "Lacinato kale", "link": None}, "annotations": { "bold": False, "italic": False, "strikethrough": False, "underline": False, "code": False, "color": "default", }, "plain_text": "Lacinato kale", "href": None, } ], "checked": False, }, }
def notion_simple_todo() -> NotionTodoBlockItem: """Simple to_do block returned by Notion Python SDK. - Unarchived (not deleted) - Unchecked (not completed) """ return { "object": "block", "id": "7de89eb6-4ee1-472c-abcd-8231049e9d8d", "created_time": "2021-11-04T19:07:00.000Z", "last_edited_time": "2021-12-04T10:01:00.000Z", "has_children": False, "archived": False, "type": "to_do", "to_do": { "text": [ { "type": "text", "text": {"content": "Lacinato kale", "link": None}, "annotations": { "bold": False, "italic": False, "strikethrough": False, "underline": False, "code": False, "color": "default", }, "plain_text": "Lacinato kale", "href": None, } ], "checked": False, }, }
Python
def notion_chained_todo() -> NotionTodoBlockItem: """ More complex to_do block returned by Notion Python SDK. Represents a todo with the following text (markdown notation in use): "Bringing it *back* with *style* and *glamour*" """ return { "object": "block", "id": "9146e728-d7c4-4678-bab4-377a3991ebb8", "created_time": "2021-11-04T19:07:00.000Z", "last_edited_time": "2021-12-04T11:30:00.000Z", "has_children": False, "archived": False, "type": "to_do", "to_do": { "text": [ { "type": "text", "text": {"content": "Bringing it ", "link": None}, "annotations": { "bold": False, "italic": False, "strikethrough": False, "underline": False, "code": False, "color": "default", }, "plain_text": "Bringing it ", "href": None, }, { "type": "text", "text": {"content": "back", "link": None}, "annotations": { "bold": True, "italic": False, "strikethrough": False, "underline": False, "code": False, "color": "default", }, "plain_text": "back", "href": None, }, { "type": "text", "text": {"content": " with ", "link": None}, "annotations": { "bold": False, "italic": False, "strikethrough": False, "underline": False, "code": False, "color": "default", }, "plain_text": " with ", "href": None, }, { "type": "text", "text": {"content": "style", "link": None}, "annotations": { "bold": False, "italic": False, "strikethrough": False, "underline": False, "code": True, "color": "default", }, "plain_text": "style", "href": None, }, { "type": "text", "text": {"content": " and ", "link": None}, "annotations": { "bold": False, "italic": False, "strikethrough": False, "underline": False, "code": False, "color": "default", }, "plain_text": " and ", "href": None, }, { "type": "text", "text": {"content": "glamour", "link": None}, "annotations": { "bold": False, "italic": False, "strikethrough": False, "underline": False, "code": True, "color": "default", }, "plain_text": "glamour", "href": None, }, ], "checked": False, }, }
def notion_chained_todo() -> NotionTodoBlockItem: """ More complex to_do block returned by Notion Python SDK. Represents a todo with the following text (markdown notation in use): "Bringing it *back* with *style* and *glamour*" """ return { "object": "block", "id": "9146e728-d7c4-4678-bab4-377a3991ebb8", "created_time": "2021-11-04T19:07:00.000Z", "last_edited_time": "2021-12-04T11:30:00.000Z", "has_children": False, "archived": False, "type": "to_do", "to_do": { "text": [ { "type": "text", "text": {"content": "Bringing it ", "link": None}, "annotations": { "bold": False, "italic": False, "strikethrough": False, "underline": False, "code": False, "color": "default", }, "plain_text": "Bringing it ", "href": None, }, { "type": "text", "text": {"content": "back", "link": None}, "annotations": { "bold": True, "italic": False, "strikethrough": False, "underline": False, "code": False, "color": "default", }, "plain_text": "back", "href": None, }, { "type": "text", "text": {"content": " with ", "link": None}, "annotations": { "bold": False, "italic": False, "strikethrough": False, "underline": False, "code": False, "color": "default", }, "plain_text": " with ", "href": None, }, { "type": "text", "text": {"content": "style", "link": None}, "annotations": { "bold": False, "italic": False, "strikethrough": False, "underline": False, "code": True, "color": "default", }, "plain_text": "style", "href": None, }, { "type": "text", "text": {"content": " and ", "link": None}, "annotations": { "bold": False, "italic": False, "strikethrough": False, "underline": False, "code": False, "color": "default", }, "plain_text": " and ", "href": None, }, { "type": "text", "text": {"content": "glamour", "link": None}, "annotations": { "bold": False, "italic": False, "strikethrough": False, "underline": False, "code": True, "color": "default", }, "plain_text": "glamour", "href": None, }, ], "checked": False, }, }
Python
def with_temporary_mappings(m): """Allow temporary localized altering of type mappings.""" def f(orig): def every(self, *args): global _types o = _types.copy() for k,v in m.items(): if v: _types[k] = v else: del _types[k] try: return orig(self, *args) finally: _types = o return every return f
def with_temporary_mappings(m): """Allow temporary localized altering of type mappings.""" def f(orig): def every(self, *args): global _types o = _types.copy() for k,v in m.items(): if v: _types[k] = v else: del _types[k] try: return orig(self, *args) finally: _types = o return every return f
Python
def _run_embedded_tx(*args, **kwargs): """Run the embedded tx executable with the list of positional arguments. All keyword arguments are forwarded to subprocess.run function. Return: subprocess.CompletedProcess object with the following attributes: args, returncode, stdout, stderr. """ with path(__name__, TX_EXE) as tx_cli: return subprocess.run([str(tx_cli)] + list(args), **kwargs)
def _run_embedded_tx(*args, **kwargs): """Run the embedded tx executable with the list of positional arguments. All keyword arguments are forwarded to subprocess.run function. Return: subprocess.CompletedProcess object with the following attributes: args, returncode, stdout, stderr. """ with path(__name__, TX_EXE) as tx_cli: return subprocess.run([str(tx_cli)] + list(args), **kwargs)
Python
def subroutinize( otf: ttLib.TTFont, cff_version: Optional[int] = None, keep_glyph_names: bool = True, inplace: bool = True, ) -> ttLib.TTFont: """Run subroutinizer on a FontTools TTFont's 'CFF ' or 'CFF2' table. Args: otf (TTFont): the input CFF-flavored OTF as a FontTools TTFont. It should contain either 'CFF ' or 'CFF2' table. cff_version (Optional[str]): the output table format version, 1 for 'CFF ', 2 for 'CFF2'. By default, it's the same as the input table format. keep_glyph_names (bool): CFF 1.0 stores the postscript glyph names and uses the more compact post table format 3.0. CFF2 does not contain glyph names. When converting from CFF to CFF2, the post table will be set to format 2.0 to preserve the glyph names. If you prefer instead to drop all glyph names and keep the post format 3.0, set keep_glyph_names=False. inplace (bool): whether to create a copy or modify the input font. By default the input font is modified. Returns: The modified font containing the subroutinized CFF or CFF2 table. This will be a different TTFont object if inplace=False. Raises: cffsubr.Error if the font doesn't contain 'CFF ' or 'CFF2' table, or if subroutinization process fails. """ input_format = _sniff_cff_table_format(otf) if cff_version is None: output_format = input_format else: output_format = CFFTableTag.from_version(cff_version) if not inplace: otf = copy.deepcopy(otf) # ensure the glyph order is decompiled before CFF table is replaced _ = otf.getGlyphOrder() buf = io.BytesIO() otf.save(buf) otf_data = buf.getvalue() compressed_cff_data = _tx_subroutinize(otf_data, output_format) cff_table = ttLib.newTable(output_format) cff_table.decompile(compressed_cff_data, otf) del otf[input_format] otf[output_format] = cff_table if ( input_format == CFFTableTag.CFF and output_format == CFFTableTag.CFF2 and keep_glyph_names ): # set 'post' to format 2 to keep the glyph names dropped from CFF2 set_post_table_format(otf, 2.0) elif ( input_format == CFFTableTag.CFF2 and output_format == CFFTableTag.CFF ): # set 'post' to format 3 so CFF glyph names are not stored twice # TODO convert to CID when keep_glyph_names=False? set_post_table_format(otf, 3.0) return otf
def subroutinize( otf: ttLib.TTFont, cff_version: Optional[int] = None, keep_glyph_names: bool = True, inplace: bool = True, ) -> ttLib.TTFont: """Run subroutinizer on a FontTools TTFont's 'CFF ' or 'CFF2' table. Args: otf (TTFont): the input CFF-flavored OTF as a FontTools TTFont. It should contain either 'CFF ' or 'CFF2' table. cff_version (Optional[str]): the output table format version, 1 for 'CFF ', 2 for 'CFF2'. By default, it's the same as the input table format. keep_glyph_names (bool): CFF 1.0 stores the postscript glyph names and uses the more compact post table format 3.0. CFF2 does not contain glyph names. When converting from CFF to CFF2, the post table will be set to format 2.0 to preserve the glyph names. If you prefer instead to drop all glyph names and keep the post format 3.0, set keep_glyph_names=False. inplace (bool): whether to create a copy or modify the input font. By default the input font is modified. Returns: The modified font containing the subroutinized CFF or CFF2 table. This will be a different TTFont object if inplace=False. Raises: cffsubr.Error if the font doesn't contain 'CFF ' or 'CFF2' table, or if subroutinization process fails. """ input_format = _sniff_cff_table_format(otf) if cff_version is None: output_format = input_format else: output_format = CFFTableTag.from_version(cff_version) if not inplace: otf = copy.deepcopy(otf) # ensure the glyph order is decompiled before CFF table is replaced _ = otf.getGlyphOrder() buf = io.BytesIO() otf.save(buf) otf_data = buf.getvalue() compressed_cff_data = _tx_subroutinize(otf_data, output_format) cff_table = ttLib.newTable(output_format) cff_table.decompile(compressed_cff_data, otf) del otf[input_format] otf[output_format] = cff_table if ( input_format == CFFTableTag.CFF and output_format == CFFTableTag.CFF2 and keep_glyph_names ): # set 'post' to format 2 to keep the glyph names dropped from CFF2 set_post_table_format(otf, 2.0) elif ( input_format == CFFTableTag.CFF2 and output_format == CFFTableTag.CFF ): # set 'post' to format 3 so CFF glyph names are not stored twice # TODO convert to CID when keep_glyph_names=False? set_post_table_format(otf, 3.0) return otf
Python
def has_subroutines(otf: ttLib.TTFont) -> bool: """Return True if the font's CFF or CFF2 table contains any subroutines.""" table_tag = _sniff_cff_table_format(otf) top_dict = otf[table_tag].cff.topDictIndex[0] all_subrs = [top_dict.GlobalSubrs] if hasattr(top_dict, "FDArray"): all_subrs.extend( fd.Private.Subrs for fd in top_dict.FDArray if hasattr(fd.Private, "Subrs") ) elif hasattr(top_dict.Private, "Subrs"): all_subrs.append(top_dict.Private.Subrs) return any(all_subrs)
def has_subroutines(otf: ttLib.TTFont) -> bool: """Return True if the font's CFF or CFF2 table contains any subroutines.""" table_tag = _sniff_cff_table_format(otf) top_dict = otf[table_tag].cff.topDictIndex[0] all_subrs = [top_dict.GlobalSubrs] if hasattr(top_dict, "FDArray"): all_subrs.extend( fd.Private.Subrs for fd in top_dict.FDArray if hasattr(fd.Private, "Subrs") ) elif hasattr(top_dict.Private, "Subrs"): all_subrs.append(top_dict.Private.Subrs) return any(all_subrs)
Python
def desubroutinize(otf: ttLib.TTFont, inplace=True) -> ttLib.TTFont: """Remove all subroutines from the font. Args: otf (ttLib.TTFont): the input font object. inplace (bool): whether to create a copy or modify the input font. By default the input font is modified. Returns: The modified font containing the desubroutinized CFF or CFF2 table. This will be a different TTFont object if inplace=False. Raises: cffsubr.Error if the font doesn't contain 'CFF ' or 'CFF2' table, or if desubroutinization process fails. """ # the 'desubroutinize' method is dynamically added to the CFF table class # as a side-effect of importing the fontTools.subset.cff module... from fontTools.subset import cff as _ if not inplace: otf = copy.deepcopy(otf) table_tag = _sniff_cff_table_format(otf) try: otf[table_tag].desubroutinize() except Exception as e: raise Error("Desubroutinization failed") from e return otf
def desubroutinize(otf: ttLib.TTFont, inplace=True) -> ttLib.TTFont: """Remove all subroutines from the font. Args: otf (ttLib.TTFont): the input font object. inplace (bool): whether to create a copy or modify the input font. By default the input font is modified. Returns: The modified font containing the desubroutinized CFF or CFF2 table. This will be a different TTFont object if inplace=False. Raises: cffsubr.Error if the font doesn't contain 'CFF ' or 'CFF2' table, or if desubroutinization process fails. """ # the 'desubroutinize' method is dynamically added to the CFF table class # as a side-effect of importing the fontTools.subset.cff module... from fontTools.subset import cff as _ if not inplace: otf = copy.deepcopy(otf) table_tag = _sniff_cff_table_format(otf) try: otf[table_tag].desubroutinize() except Exception as e: raise Error("Desubroutinization failed") from e return otf
Python
def main(args=None): """Compress OpenType Font's CFF or CFF2 table by computing subroutines.""" parser = argparse.ArgumentParser("cffsubr", description=main.__doc__) parser.add_argument( "input_file", help="input font file. Must contain either CFF or CFF2 table" ) output_group = parser.add_mutually_exclusive_group() output_group.add_argument( "-o", "--output-file", default=None, help="optional path to output file. By default, dump binary data to stdout", ) output_group.add_argument( "-i", "--inplace", action="store_true", help="whether to overwrite the input file", ) parser.add_argument( "-f", "--cff-version", default=None, type=int, choices=(1, 2), help="output CFF table format version", ) parser.add_argument( "-N", "--no-glyph-names", dest="keep_glyph_names", action="store_false", help="whether to drop postscript glyph names when converting from CFF to CFF2.", ) parser.add_argument( "-d", "--desubroutinize", action="store_true", help="Don't subroutinize, instead remove all subroutines (in any).", ) options = parser.parse_args(args) if options.inplace: options.output_file = options.input_file elif not options.output_file: options.output_file = sys.stdout.buffer # Load TTFont lazily by default assuming output != input; load non-lazily if -i # option is passed, so that fontTools let us overwrite the input file. lazy = True if not options.inplace else None with ttLib.TTFont(options.input_file, lazy=lazy) as font: if options.desubroutinize: cffsubr.desubroutinize(font) else: cffsubr.subroutinize(font, options.cff_version, options.keep_glyph_names) font.save(options.output_file)
def main(args=None): """Compress OpenType Font's CFF or CFF2 table by computing subroutines.""" parser = argparse.ArgumentParser("cffsubr", description=main.__doc__) parser.add_argument( "input_file", help="input font file. Must contain either CFF or CFF2 table" ) output_group = parser.add_mutually_exclusive_group() output_group.add_argument( "-o", "--output-file", default=None, help="optional path to output file. By default, dump binary data to stdout", ) output_group.add_argument( "-i", "--inplace", action="store_true", help="whether to overwrite the input file", ) parser.add_argument( "-f", "--cff-version", default=None, type=int, choices=(1, 2), help="output CFF table format version", ) parser.add_argument( "-N", "--no-glyph-names", dest="keep_glyph_names", action="store_false", help="whether to drop postscript glyph names when converting from CFF to CFF2.", ) parser.add_argument( "-d", "--desubroutinize", action="store_true", help="Don't subroutinize, instead remove all subroutines (in any).", ) options = parser.parse_args(args) if options.inplace: options.output_file = options.input_file elif not options.output_file: options.output_file = sys.stdout.buffer # Load TTFont lazily by default assuming output != input; load non-lazily if -i # option is passed, so that fontTools let us overwrite the input file. lazy = True if not options.inplace else None with ttLib.TTFont(options.input_file, lazy=lazy) as font: if options.desubroutinize: cffsubr.desubroutinize(font) else: cffsubr.subroutinize(font, options.cff_version, options.keep_glyph_names) font.save(options.output_file)
Python
def flip_label(target, ratio, pattern=0): """ Induce label noise by randomly corrupting labels :param target: list or array of labels :param ratio: float: noise ratio :param pattern: flag to choose which type of noise. 0 or mod(pattern, #classes) == 0 = symmetric int = asymmetric -1 = flip :return: """ assert 0 <= ratio < 1 target = np.array(target).astype(int) label = target.copy() n_class = len(np.unique(label)) if type(pattern) is int: for i in range(label.shape[0]): # symmetric noise if (pattern % n_class) == 0: p1 = ratio / (n_class - 1) * np.ones(n_class) p1[label[i]] = 1 - ratio label[i] = np.random.choice(n_class, p=p1) elif pattern > 0: # Asymm label[i] = np.random.choice([label[i], (target[i] + pattern) % n_class], p=[1 - ratio, ratio]) else: # Flip noise label[i] = np.random.choice([label[i], 0], p=[1 - ratio, ratio]) elif type(pattern) is str: raise ValueError mask = np.array([int(x != y) for (x, y) in zip(target, label)]) return label, mask
def flip_label(target, ratio, pattern=0): """ Induce label noise by randomly corrupting labels :param target: list or array of labels :param ratio: float: noise ratio :param pattern: flag to choose which type of noise. 0 or mod(pattern, #classes) == 0 = symmetric int = asymmetric -1 = flip :return: """ assert 0 <= ratio < 1 target = np.array(target).astype(int) label = target.copy() n_class = len(np.unique(label)) if type(pattern) is int: for i in range(label.shape[0]): # symmetric noise if (pattern % n_class) == 0: p1 = ratio / (n_class - 1) * np.ones(n_class) p1[label[i]] = 1 - ratio label[i] = np.random.choice(n_class, p=p1) elif pattern > 0: # Asymm label[i] = np.random.choice([label[i], (target[i] + pattern) % n_class], p=[1 - ratio, ratio]) else: # Flip noise label[i] = np.random.choice([label[i], 0], p=[1 - ratio, ratio]) elif type(pattern) is str: raise ValueError mask = np.array([int(x != y) for (x, y) in zip(target, label)]) return label, mask
Python
def cluster_accuracy(y_true, y_predicted, cluster_number=None): """ Calculate clustering accuracy after using the linear_sum_assignment function in SciPy to determine reassignments. :param y_true: list of true cluster numbers, an integer array 0-indexed :param y_predicted: list of predicted cluster numbers, an integer array 0-indexed :param cluster_number: number of clusters, if None then calculated from input :return: reassignment dictionary, clustering accuracy """ if cluster_number is None: cluster_number = ( max(y_predicted.max(), y_true.max()) + 1 ) # assume labels are 0-indexed count_matrix = np.zeros((cluster_number, cluster_number), dtype=np.int64) for i in range(y_predicted.size): count_matrix[y_predicted[i], y_true[i]] += 1 row_ind, col_ind = linear_sum_assignment(count_matrix.max() - count_matrix) reassignment = dict(zip(row_ind, col_ind)) accuracy = count_matrix[row_ind, col_ind].sum() / y_predicted.size return reassignment, accuracy
def cluster_accuracy(y_true, y_predicted, cluster_number=None): """ Calculate clustering accuracy after using the linear_sum_assignment function in SciPy to determine reassignments. :param y_true: list of true cluster numbers, an integer array 0-indexed :param y_predicted: list of predicted cluster numbers, an integer array 0-indexed :param cluster_number: number of clusters, if None then calculated from input :return: reassignment dictionary, clustering accuracy """ if cluster_number is None: cluster_number = ( max(y_predicted.max(), y_true.max()) + 1 ) # assume labels are 0-indexed count_matrix = np.zeros((cluster_number, cluster_number), dtype=np.int64) for i in range(y_predicted.size): count_matrix[y_predicted[i], y_true[i]] += 1 row_ind, col_ind = linear_sum_assignment(count_matrix.max() - count_matrix) reassignment = dict(zip(row_ind, col_ind)) accuracy = count_matrix[row_ind, col_ind].sum() / y_predicted.size return reassignment, accuracy
Python
def r2score(actual: np.ndarray, predicted: np.ndarray): """ Return the coefficient of determination R^2 of the prediction.""" u = ((actual - predicted) ** 2).sum() v = ((actual - predicted.mean()) ** 2).sum() return 1 - u / (v + EPSILON)
def r2score(actual: np.ndarray, predicted: np.ndarray): """ Return the coefficient of determination R^2 of the prediction.""" u = ((actual - predicted) ** 2).sum() v = ((actual - predicted.mean()) ** 2).sum() return 1 - u / (v + EPSILON)
Python
def reset_rng(func): """ decorator wrapper to reset the numpy rng """ @functools.wraps(func) def wrapper_decorator(*args, **kwargs): np.random.seed(SEED) print(f'Reset RNG. Seed:{SEED}.') value = func(*args, **kwargs) # Do something after return value return wrapper_decorator
def reset_rng(func): """ decorator wrapper to reset the numpy rng """ @functools.wraps(func) def wrapper_decorator(*args, **kwargs): np.random.seed(SEED) print(f'Reset RNG. Seed:{SEED}.') value = func(*args, **kwargs) # Do something after return value return wrapper_decorator
Python
def mixup_data(x, y, alpha=1.0, device='cuda'): '''Returns mixed inputs, pairs of targets, and lambda''' if alpha > 0: lam = np.random.beta(alpha, alpha) else: lam = 1 batch_size = x.size()[0] if device == 'cuda': index = torch.randperm(batch_size).cuda() else: index = torch.randperm(batch_size) mixed_x = lam * x + (1 - lam) * x[index, :] y_a, y_b = y, y[index] return mixed_x, y_a, y_b, lam
def mixup_data(x, y, alpha=1.0, device='cuda'): '''Returns mixed inputs, pairs of targets, and lambda''' if alpha > 0: lam = np.random.beta(alpha, alpha) else: lam = 1 batch_size = x.size()[0] if device == 'cuda': index = torch.randperm(batch_size).cuda() else: index = torch.randperm(batch_size) mixed_x = lam * x + (1 - lam) * x[index, :] y_a, y_b = y, y[index] return mixed_x, y_a, y_b, lam
Python
def mixup_data_Boot(x, y, alpha=1.0, device='cuda'): '''Returns mixed inputs, pairs of targets, and lambda''' if alpha > 0: lam = np.random.beta(alpha, alpha) else: lam = 1 batch_size = x.size()[0] if device == 'cuda': index = torch.randperm(batch_size).to(device) else: index = torch.randperm(batch_size) mixed_x = lam * x + (1 - lam) * x[index, :] y_a, y_b = y, y[index] return mixed_x, y_a, y_b, lam, index
def mixup_data_Boot(x, y, alpha=1.0, device='cuda'): '''Returns mixed inputs, pairs of targets, and lambda''' if alpha > 0: lam = np.random.beta(alpha, alpha) else: lam = 1 batch_size = x.size()[0] if device == 'cuda': index = torch.randperm(batch_size).to(device) else: index = torch.randperm(batch_size) mixed_x = lam * x + (1 - lam) * x[index, :] y_a, y_b = y, y[index] return mixed_x, y_a, y_b, lam, index
Python
def mixup_data_beta(x, y, B, device='cuda'): '''Returns mixed inputs, pairs of targets, and lambda''' batch_size = x.size()[0] if device == 'cuda': index = torch.randperm(batch_size).cuda() else: index = torch.randperm(batch_size) lam = ((1 - B) + (1 - B[index])) fac1 = ((1 - B) / lam) fac2 = ((1 - B[index]) / lam) for _ in range(x.dim() - 1): fac1.unsqueeze_(1) fac2.unsqueeze_(1) mixed_x = fac1 * x + fac2 * x[index, :] y_a, y_b = y, y[index] return mixed_x, y_a, y_b, index
def mixup_data_beta(x, y, B, device='cuda'): '''Returns mixed inputs, pairs of targets, and lambda''' batch_size = x.size()[0] if device == 'cuda': index = torch.randperm(batch_size).cuda() else: index = torch.randperm(batch_size) lam = ((1 - B) + (1 - B[index])) fac1 = ((1 - B) / lam) fac2 = ((1 - B[index]) / lam) for _ in range(x.dim() - 1): fac1.unsqueeze_(1) fac2.unsqueeze_(1) mixed_x = fac1 * x + fac2 * x[index, :] y_a, y_b = y, y[index] return mixed_x, y_a, y_b, index
Python
def gen_items(stac_link): """ Generate STAC Items from STAC Catalog entrypoint. """ cat = Catalog.open(stac_link) # Check if root if cat.id == cat.root(): for child in cat.children(): for item in child.items(): yield item else: for item in cat.items(): yield item
def gen_items(stac_link): """ Generate STAC Items from STAC Catalog entrypoint. """ cat = Catalog.open(stac_link) # Check if root if cat.id == cat.root(): for child in cat.children(): for item in child.items(): yield item else: for item in cat.items(): yield item
Python
def add_child(self, command): """ Set a command as a child. """ if command in self.children: return self.children.append(command) command.add_parent(self)
def add_child(self, command): """ Set a command as a child. """ if command in self.children: return self.children.append(command) command.add_parent(self)
Python
def add_parent(self, command): """ Set a command as a parent. """ if self.parent is None: self.parent = command elif self.parent == command: return else: raise InputError("Already There is a parent")
def add_parent(self, command): """ Set a command as a parent. """ if self.parent is None: self.parent = command elif self.parent == command: return else: raise InputError("Already There is a parent")
Python
def _raw_mode(file): """ Make terminal raw mode for getting an event pressing a key. """ old_attrs = termios.tcgetattr(file.fileno()) new_attrs = old_attrs[:] new_attrs[3] = new_attrs[3] & ~(termios.ECHO | termios.ICANON) try: termios.tcsetattr(file.fileno(), termios.TCSADRAIN, new_attrs) yield finally: termios.tcsetattr(file.fileno(), termios.TCSADRAIN, old_attrs)
def _raw_mode(file): """ Make terminal raw mode for getting an event pressing a key. """ old_attrs = termios.tcgetattr(file.fileno()) new_attrs = old_attrs[:] new_attrs[3] = new_attrs[3] & ~(termios.ECHO | termios.ICANON) try: termios.tcsetattr(file.fileno(), termios.TCSADRAIN, new_attrs) yield finally: termios.tcsetattr(file.fileno(), termios.TCSADRAIN, old_attrs)
Python
def redmine_input(prompt='', complete_command=None, history=False): """ Customized input function for redmine shell. """ if complete_command is None: complete_command = [] # TODO: inline sys.stdout.write(prompt) sys.stdout.flush() with _raw_mode(sys.stdin): def rewrite(new, old): origin_len = len(old) sys.stdout.write('\r{}\r'.format(' ' * (origin_len + len(prompt)))) sys.stdout.write(prompt + ''.join(new)) sys.stdout.flush() def complete(buf): target = ''.join(buf).strip() if not target: sys.stdout.write('\r{}\r'.format(' ' * (len(buf) + len(prompt)))) for command in complete_command: print(command) sys.stdout.write(prompt) sys.stdout.flush() return [] str_len = len(target) filtered = [x for x in complete_command if len(x) >= str_len] filtered = [x for x in filtered if x.startswith(target) is True] if filtered: min_cmd = sorted(filtered)[0] if min_cmd == target: return list(target) i = start = len(target) until = len(min_cmd) while start <= i < until: compare = filtered[0][i] is_diff = False for cmd in filtered: if compare != cmd[i]: is_diff = True break if is_diff is True: break i += 1 return list(min_cmd[:i]) else: return buf def finder(buf): target = ''.join(buf) lookup = [] for cmd in complete_command: if cmd.startswith(target) is True: lookup.append(cmd) if lookup: sys.stdout.write('\r{}\r'.format(' ' * (len(buf) + len(prompt)))) print("---------- CMDS ---------") for cmd in lookup: print(cmd) sys.stdout.write(prompt + ''.join(target)) sys.stdout.flush() def ctrl_d(keyword): raise EOFError def ctrl_p(keyword): # chr(16) # history up if keyword['history'] is True: old = keyword['type_buf'] cmd = keyword['history_move'].move_up() if cmd is None: pass else: new = list(cmd) rewrite(new, old) keyword['type_buf'] = new return State.CONTINUE def ctrl_j(keyword): # char(14) # Ctrl + j # history down if keyword['history'] is True: old = keyword['type_buf'] cmd = keyword['history_move'].move_down() if cmd is None: new = [''] else: new = list(cmd) rewrite(new, old) keyword['type_buf'] = new return State.CONTINUE def ctrl_l(keyword): # chr(12) # Ctrl + l return State.CONTINUE def ctrl_h(keyword): # chr(8) # Ctrl + h old = keyword['type_buf'] new = keyword['type_buf'][:-1] rewrite(new, old) keyword['type_buf'] = new return State.CONTINUE def tab(keyword): # chr(9) # Tab old = keyword['type_buf'] new = complete(old) if new: if ''.join(new) == ''.join(old): finder(new) else: rewrite(new, old) keyword['type_buf'] = new return State.CONTINUE def newline(keyword): # chr(10) # Newline print("") return ''.join(keyword['type_buf']) def backspace(keyword): # chr(127) # Backspace old = keyword['type_buf'] new = keyword['type_buf'][:-1] rewrite(new, old) keyword['type_buf'] = new return State.CONTINUE def normal(keyword): keyword['type_buf'].append(keyword['char']) rewrite(keyword['type_buf'], keyword['type_buf']) return State.CONTINUE def other(keyword): return State.CONTINUE keyword = {'prompt': prompt, 'complete_command': complete_command, 'history': history,} keyword['type_buf'] = [] keyword['history_move'] = HistoryMove( History.instance().load()) special_key_handlers = {chr(4): ctrl_d, chr(16): ctrl_p, chr(14): ctrl_j, # MacOS uses 13 as ctrl-j chr(13): ctrl_j, chr(12): ctrl_l, chr(8): ctrl_h, chr(9): tab, chr(10): newline, chr(127): backspace, } while True: char = sys.stdin.read(1) if not char: break if char in special_key_handlers: handler = special_key_handlers[char] elif 41 <= ord(char) <= 176 or ord(char) == 32: handler = normal else: handler = other keyword['char'] = char ret = handler(keyword) if ret == State.CONTINUE: continue elif ret == State.BREAK: break else: return ret
def redmine_input(prompt='', complete_command=None, history=False): """ Customized input function for redmine shell. """ if complete_command is None: complete_command = [] # TODO: inline sys.stdout.write(prompt) sys.stdout.flush() with _raw_mode(sys.stdin): def rewrite(new, old): origin_len = len(old) sys.stdout.write('\r{}\r'.format(' ' * (origin_len + len(prompt)))) sys.stdout.write(prompt + ''.join(new)) sys.stdout.flush() def complete(buf): target = ''.join(buf).strip() if not target: sys.stdout.write('\r{}\r'.format(' ' * (len(buf) + len(prompt)))) for command in complete_command: print(command) sys.stdout.write(prompt) sys.stdout.flush() return [] str_len = len(target) filtered = [x for x in complete_command if len(x) >= str_len] filtered = [x for x in filtered if x.startswith(target) is True] if filtered: min_cmd = sorted(filtered)[0] if min_cmd == target: return list(target) i = start = len(target) until = len(min_cmd) while start <= i < until: compare = filtered[0][i] is_diff = False for cmd in filtered: if compare != cmd[i]: is_diff = True break if is_diff is True: break i += 1 return list(min_cmd[:i]) else: return buf def finder(buf): target = ''.join(buf) lookup = [] for cmd in complete_command: if cmd.startswith(target) is True: lookup.append(cmd) if lookup: sys.stdout.write('\r{}\r'.format(' ' * (len(buf) + len(prompt)))) print("---------- CMDS ---------") for cmd in lookup: print(cmd) sys.stdout.write(prompt + ''.join(target)) sys.stdout.flush() def ctrl_d(keyword): raise EOFError def ctrl_p(keyword): # chr(16) # history up if keyword['history'] is True: old = keyword['type_buf'] cmd = keyword['history_move'].move_up() if cmd is None: pass else: new = list(cmd) rewrite(new, old) keyword['type_buf'] = new return State.CONTINUE def ctrl_j(keyword): # char(14) # Ctrl + j # history down if keyword['history'] is True: old = keyword['type_buf'] cmd = keyword['history_move'].move_down() if cmd is None: new = [''] else: new = list(cmd) rewrite(new, old) keyword['type_buf'] = new return State.CONTINUE def ctrl_l(keyword): # chr(12) # Ctrl + l return State.CONTINUE def ctrl_h(keyword): # chr(8) # Ctrl + h old = keyword['type_buf'] new = keyword['type_buf'][:-1] rewrite(new, old) keyword['type_buf'] = new return State.CONTINUE def tab(keyword): # chr(9) # Tab old = keyword['type_buf'] new = complete(old) if new: if ''.join(new) == ''.join(old): finder(new) else: rewrite(new, old) keyword['type_buf'] = new return State.CONTINUE def newline(keyword): # chr(10) # Newline print("") return ''.join(keyword['type_buf']) def backspace(keyword): # chr(127) # Backspace old = keyword['type_buf'] new = keyword['type_buf'][:-1] rewrite(new, old) keyword['type_buf'] = new return State.CONTINUE def normal(keyword): keyword['type_buf'].append(keyword['char']) rewrite(keyword['type_buf'], keyword['type_buf']) return State.CONTINUE def other(keyword): return State.CONTINUE keyword = {'prompt': prompt, 'complete_command': complete_command, 'history': history,} keyword['type_buf'] = [] keyword['history_move'] = HistoryMove( History.instance().load()) special_key_handlers = {chr(4): ctrl_d, chr(16): ctrl_p, chr(14): ctrl_j, # MacOS uses 13 as ctrl-j chr(13): ctrl_j, chr(12): ctrl_l, chr(8): ctrl_h, chr(9): tab, chr(10): newline, chr(127): backspace, } while True: char = sys.stdin.read(1) if not char: break if char in special_key_handlers: handler = special_key_handlers[char] elif 41 <= ord(char) <= 176 or ord(char) == 32: handler = normal else: handler = other keyword['char'] = char ret = handler(keyword) if ret == State.CONTINUE: continue elif ret == State.BREAK: break else: return ret
Python
def _setup_script_path(self): ''' Setup the script directories Tree. ''' try: os.mkdir(self.script_path) except FileExistsError: pass try: os.mkdir(self.script_key_path) except FileExistsError: pass
def _setup_script_path(self): ''' Setup the script directories Tree. ''' try: os.mkdir(self.script_path) except FileExistsError: pass try: os.mkdir(self.script_key_path) except FileExistsError: pass
Python
def _load_script(self): ''' Load Scripts from the path. ''' for dpath, dnames, _ in os.walk(self.script_key_path): for dname in dnames: try: index = int(dname) except ValueError: continue self.script[index] = {} path = '/'.join([dpath, str(index)]) self.script[index]['path'] = path title_path = '/'.join([path, 'title']) try: with open(title_path, 'r') as file_obj: title = file_obj.read().strip() except FileNotFoundError: title = '' self.script[index]['title'] = title memo_path = '/'.join([path, 'memo']) try: with open(memo_path, 'r') as file_obj: memo = file_obj.read() except FileNotFoundError: memo = '' self.script[index]['memo'] = memo # Only walk one depth. break
def _load_script(self): ''' Load Scripts from the path. ''' for dpath, dnames, _ in os.walk(self.script_key_path): for dname in dnames: try: index = int(dname) except ValueError: continue self.script[index] = {} path = '/'.join([dpath, str(index)]) self.script[index]['path'] = path title_path = '/'.join([path, 'title']) try: with open(title_path, 'r') as file_obj: title = file_obj.read().strip() except FileNotFoundError: title = '' self.script[index]['title'] = title memo_path = '/'.join([path, 'memo']) try: with open(memo_path, 'r') as file_obj: memo = file_obj.read() except FileNotFoundError: memo = '' self.script[index]['memo'] = memo # Only walk one depth. break
Python
def help_user_input(self, init_content, editor=DEFAULT_EDITOR, _temp=None): """ Write the user input message to Temporary File and return. Params: init_content: First showed messages when it opened. editor : the type of editors. _temp : Use external tempfile object. It should be delete mode. """ if _temp is None: tmp_file = tempfile.NamedTemporaryFile(suffix='.txt', delete=False) else: if _temp.delete is True: raise InputError("Tempfile should be delete mode.") tmp_file = _temp try: # Initialize Tempfile. tmp_file.write(init_content) tmp_file.truncate() tmp_file.flush() # Mac OS Issue # Vim in Mac OS sometimes doesn't save the file editted until # calling "close()" function. tmp_file.close() # Show the file to user. name = tmp_file.name if editor == 'code': cmd = '{} {} --wait'.format(editor, name) proc = subprocess.Popen(cmd, shell=True) exit_code = proc.wait() # wait edit done.. print('vscode edit done ({})'.format(exit_code)) else: # deafult (vi) cmd = '{} {}'.format(editor, name) os.system(cmd) # Read the tempfile with open(name, 'r') as f: data = f.read() return data except: os.unlink(tmp_file.name) return None os.unlink(tmp_file.name) return None
def help_user_input(self, init_content, editor=DEFAULT_EDITOR, _temp=None): """ Write the user input message to Temporary File and return. Params: init_content: First showed messages when it opened. editor : the type of editors. _temp : Use external tempfile object. It should be delete mode. """ if _temp is None: tmp_file = tempfile.NamedTemporaryFile(suffix='.txt', delete=False) else: if _temp.delete is True: raise InputError("Tempfile should be delete mode.") tmp_file = _temp try: # Initialize Tempfile. tmp_file.write(init_content) tmp_file.truncate() tmp_file.flush() # Mac OS Issue # Vim in Mac OS sometimes doesn't save the file editted until # calling "close()" function. tmp_file.close() # Show the file to user. name = tmp_file.name if editor == 'code': cmd = '{} {} --wait'.format(editor, name) proc = subprocess.Popen(cmd, shell=True) exit_code = proc.wait() # wait edit done.. print('vscode edit done ({})'.format(exit_code)) else: # deafult (vi) cmd = '{} {}'.format(editor, name) os.system(cmd) # Read the tempfile with open(name, 'r') as f: data = f.read() return data except: os.unlink(tmp_file.name) return None os.unlink(tmp_file.name) return None
Python
def help_edit_description(self, issue): """ Edit description by using editor. """ desc = self.help_get_description(issue) enc_content = desc.encode() cnt = self.help_user_input(enc_content) print(cnt) ret = self.help_ask_write_issue() if not ret: return True self.help_update_description(issue, cnt)
def help_edit_description(self, issue): """ Edit description by using editor. """ desc = self.help_get_description(issue) enc_content = desc.encode() cnt = self.help_user_input(enc_content) print(cnt) ret = self.help_ask_write_issue() if not ret: return True self.help_update_description(issue, cnt)
Python
def help_ask_issue_number(self): """ Interactively get issue number from user. """ from redmine_shell.shell.input import redmine_input while True: try: # TODO: try except EOF Error.. issue = int(redmine_input("Issue number?: ").strip()) except ValueError: print("Input Wrong Number") return None except EOFError: print("") return None except KeyboardInterrupt: print("") return None answer = self.help_confirm_issue_number(issue) if answer == 'y': break return issue
def help_ask_issue_number(self): """ Interactively get issue number from user. """ from redmine_shell.shell.input import redmine_input while True: try: # TODO: try except EOF Error.. issue = int(redmine_input("Issue number?: ").strip()) except ValueError: print("Input Wrong Number") return None except EOFError: print("") return None except KeyboardInterrupt: print("") return None answer = self.help_confirm_issue_number(issue) if answer == 'y': break return issue
Python
def help_confirm_issue_number(self, issue): ''' Confirm the issue's subject is correct. ''' from redmine_shell.shell.input import redmine_input try: tmp_issue_ins = self.issue.get(issue) except exceptions.ResourceNotFoundError: print("Invalid issue number: out of range.") return None answer = redmine_input( "[#{} {}] -> (y/n)".format( tmp_issue_ins.id, tmp_issue_ins.subject)) return answer
def help_confirm_issue_number(self, issue): ''' Confirm the issue's subject is correct. ''' from redmine_shell.shell.input import redmine_input try: tmp_issue_ins = self.issue.get(issue) except exceptions.ResourceNotFoundError: print("Invalid issue number: out of range.") return None answer = redmine_input( "[#{} {}] -> (y/n)".format( tmp_issue_ins.id, tmp_issue_ins.subject)) return answer
Python
def version_check(cls): """ Check current redmine_shell is updated or outdated. """ print("--------------------- Program Check -----------------------") helper = RedmineHelper(VERSION_CHECK_SERVER) data = helper.help_redmine( helper.wiki_page.get, "Wiki", project_id="test", timeout=3) if data is None: print("CANNOT CONNECT {} SERVER [TIMEOUT]".format( VERSION_CHECK_SERVER)) return wiki = data.text versions = [] for line in wiki.split('\n'): if line.startswith('h3. '): version = line.replace('h3. ', '').strip() versions.append(version) try: latest_version = versions[0] except IndexError: print("NO VERSION") return if VERSION == latest_version: print(VERSION_CHECK_FORMAT.format("UPDATED")) elif VERSION in versions: # OUTDATED print(UPDATE_RECOMMAND_FORMAT.format(latest_version)) print("--> RELEASE NOTE") releases = wiki.split('---') latest_release = releases[0].strip() for line in latest_release.split('\n'): if line.startswith('h3. '): continue print(' ' + line) else: print(VERSION_CHECK_FORMAT.format("INVALID")) print(UPDATE_WARNING_MESSAGE)
def version_check(cls): """ Check current redmine_shell is updated or outdated. """ print("--------------------- Program Check -----------------------") helper = RedmineHelper(VERSION_CHECK_SERVER) data = helper.help_redmine( helper.wiki_page.get, "Wiki", project_id="test", timeout=3) if data is None: print("CANNOT CONNECT {} SERVER [TIMEOUT]".format( VERSION_CHECK_SERVER)) return wiki = data.text versions = [] for line in wiki.split('\n'): if line.startswith('h3. '): version = line.replace('h3. ', '').strip() versions.append(version) try: latest_version = versions[0] except IndexError: print("NO VERSION") return if VERSION == latest_version: print(VERSION_CHECK_FORMAT.format("UPDATED")) elif VERSION in versions: # OUTDATED print(UPDATE_RECOMMAND_FORMAT.format(latest_version)) print("--> RELEASE NOTE") releases = wiki.split('---') latest_release = releases[0].strip() for line in latest_release.split('\n'): if line.startswith('h3. '): continue print(' ' + line) else: print(VERSION_CHECK_FORMAT.format("INVALID")) print(UPDATE_WARNING_MESSAGE)
Python
def list_sibling(self): """ Return the list of sibling commands. """ if self.current.parent is None: return None children = self.current.parent.get_children() if len(children) == 1 and children[0] == self.current: return None return children
def list_sibling(self): """ Return the list of sibling commands. """ if self.current.parent is None: return None children = self.current.parent.get_children() if len(children) == 1 and children[0] == self.current: return None return children
Python
def list_children(self): """ Return the list of children commands. """ children = self.current.get_children() ret = [] for child in children: ret.append(child.name) return ret
def list_children(self): """ Return the list of children commands. """ children = self.current.get_children() ret = [] for child in children: ret.append(child.name) return ret
Python
def find_child(self, key): """ Find current a child command that has the key. """ if key.isdigit(): idx = int(key) children = self.current.get_children() if idx < 0 or idx >= len(children): return None return children[idx] children = self.current.get_children() for child in children: if child.name == key: return child return None
def find_child(self, key): """ Find current a child command that has the key. """ if key.isdigit(): idx = int(key) children = self.current.get_children() if idx < 0 or idx >= len(children): return None return children[idx] children = self.current.get_children() for child in children: if child.name == key: return child return None
Python
def rollback(self, goto_root=False): """ Rollback the previous state after an executing command. """ if goto_root is True: self.goto_root() else: self._goto_prev()
def rollback(self, goto_root=False): """ Rollback the previous state after an executing command. """ if goto_root is True: self.goto_root() else: self._goto_prev()
Python
def run(self, shell): """ EXECUTE TYPE COMMAND SHOULD BE OVERRIDEN """ commands = self.load() print("============================") if commands: print("\n".join(commands)) else: print("No history") print("============================")
def run(self, shell): """ EXECUTE TYPE COMMAND SHOULD BE OVERRIDEN """ commands = self.load() print("============================") if commands: print("\n".join(commands)) else: print("No history") print("============================")
Python
def test(self): """Test simultaneous breakpoints in multiple threads.""" self.build(dictionary=self.getBuildFlags()) exe = self.getBuildArtifact("a.out") self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET) # This should create a breakpoint in the main thread. lldbutil.run_break_set_by_file_and_line( self, "main.cpp", self.breakpoint, num_expected_locations=1) # Run the program. self.runCmd("run", RUN_SUCCEEDED) # The stop reason of the thread should be breakpoint. # The breakpoint may be hit in either thread 2 or thread 3. self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT, substrs=['stopped', 'stop reason = breakpoint']) # Get the target process target = self.dbg.GetSelectedTarget() process = target.GetProcess() # Get the number of threads num_threads = process.GetNumThreads() # Make sure we see all three threads self.assertTrue( num_threads >= 3, 'Number of expected threads and actual threads do not match.') # Get the thread objects thread1 = process.GetThreadAtIndex(0) thread2 = process.GetThreadAtIndex(1) thread3 = process.GetThreadAtIndex(2) # Make sure both threads are stopped self.assertTrue( thread1.IsStopped(), "Primary thread didn't stop during breakpoint") self.assertTrue( thread2.IsStopped(), "Secondary thread didn't stop during breakpoint") self.assertTrue( thread3.IsStopped(), "Tertiary thread didn't stop during breakpoint") # Delete the first breakpoint then continue self.runCmd("breakpoint delete 1") # Run to completion self.runCmd("continue") # At this point, the inferior process should have exited. self.assertEqual( process.GetState(), lldb.eStateExited, PROCESS_EXITED)
def test(self): """Test simultaneous breakpoints in multiple threads.""" self.build(dictionary=self.getBuildFlags()) exe = self.getBuildArtifact("a.out") self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET) # This should create a breakpoint in the main thread. lldbutil.run_break_set_by_file_and_line( self, "main.cpp", self.breakpoint, num_expected_locations=1) # Run the program. self.runCmd("run", RUN_SUCCEEDED) # The stop reason of the thread should be breakpoint. # The breakpoint may be hit in either thread 2 or thread 3. self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT, substrs=['stopped', 'stop reason = breakpoint']) # Get the target process target = self.dbg.GetSelectedTarget() process = target.GetProcess() # Get the number of threads num_threads = process.GetNumThreads() # Make sure we see all three threads self.assertTrue( num_threads >= 3, 'Number of expected threads and actual threads do not match.') # Get the thread objects thread1 = process.GetThreadAtIndex(0) thread2 = process.GetThreadAtIndex(1) thread3 = process.GetThreadAtIndex(2) # Make sure both threads are stopped self.assertTrue( thread1.IsStopped(), "Primary thread didn't stop during breakpoint") self.assertTrue( thread2.IsStopped(), "Secondary thread didn't stop during breakpoint") self.assertTrue( thread3.IsStopped(), "Tertiary thread didn't stop during breakpoint") # Delete the first breakpoint then continue self.runCmd("breakpoint delete 1") # Run to completion self.runCmd("continue") # At this point, the inferior process should have exited. self.assertEqual( process.GetState(), lldb.eStateExited, PROCESS_EXITED)
Python
def build_mock_model(path, signature): """Build and save the mock model with the given signature""" module = tf.Module() # We have to set this useless variable in order for the TF C API to correctly # intake it module.var = tf.Variable(0.) def action(*inputs): s = tf.reduce_sum([tf.cast(x, tf.float32) for x in tf.nest.flatten(inputs)]) return {signature['output']: float('inf') + s + module.var} module.action = tf.function()(action) action = {'action': module.action.get_concrete_function(signature['inputs'])} tf.saved_model.save(module, path, signatures=action) output_spec_path = get_output_spec_path(path) with open(output_spec_path, 'w') as f: print(f'Writing output spec to {output_spec_path}.') f.write(signature['output_spec'])
def build_mock_model(path, signature): """Build and save the mock model with the given signature""" module = tf.Module() # We have to set this useless variable in order for the TF C API to correctly # intake it module.var = tf.Variable(0.) def action(*inputs): s = tf.reduce_sum([tf.cast(x, tf.float32) for x in tf.nest.flatten(inputs)]) return {signature['output']: float('inf') + s + module.var} module.action = tf.function()(action) action = {'action': module.action.get_concrete_function(signature['inputs'])} tf.saved_model.save(module, path, signatures=action) output_spec_path = get_output_spec_path(path) with open(output_spec_path, 'w') as f: print(f'Writing output spec to {output_spec_path}.') f.write(signature['output_spec'])
Python
def test(self): """Test with 5 watchpoint and breakpoint threads.""" self.build(dictionary=self.getBuildFlags()) self.do_thread_actions(num_watchpoint_threads=5, num_breakpoint_threads=5)
def test(self): """Test with 5 watchpoint and breakpoint threads.""" self.build(dictionary=self.getBuildFlags()) self.do_thread_actions(num_watchpoint_threads=5, num_breakpoint_threads=5)
Python
def cleanup_source(app, docname, source): """ Cleans up source files generated by automodapi. """ # Don't cleanup anything beside automodapi-generated sources. if not automodapi_toctreedirnm in docname: return processed = source[0] # Don't show the list of inheritance info as there is no inheritance in the # SBI API. This avoids all the repeated text on all doc pages that a # class inherits from 'object'. processed = processed.replace(":show-inheritance:", "") # Remove the SWIG generated 'thisown' attribute. It just bloats the generated # documentation and users shouldn't fiddle with the value anyway. processed = re.sub(r'~SB[a-zA-Z]+\.thisown', "", processed) processed = processed.replace(" .. autoattribute:: thisown", "") # After removing 'thisown', many objects don't have any attributes left. # Remove all now empty attribute summary/documentation sections with # some rather ugly regex. processed = empty_attr_summary.sub('.. rubric::', processed) processed = empty_attr_documentation.sub('.. rubric::', processed) # Replace the original source with the processed one (source is a single # element list). source[0] = processed
def cleanup_source(app, docname, source): """ Cleans up source files generated by automodapi. """ # Don't cleanup anything beside automodapi-generated sources. if not automodapi_toctreedirnm in docname: return processed = source[0] # Don't show the list of inheritance info as there is no inheritance in the # SBI API. This avoids all the repeated text on all doc pages that a # class inherits from 'object'. processed = processed.replace(":show-inheritance:", "") # Remove the SWIG generated 'thisown' attribute. It just bloats the generated # documentation and users shouldn't fiddle with the value anyway. processed = re.sub(r'~SB[a-zA-Z]+\.thisown', "", processed) processed = processed.replace(" .. autoattribute:: thisown", "") # After removing 'thisown', many objects don't have any attributes left. # Remove all now empty attribute summary/documentation sections with # some rather ugly regex. processed = empty_attr_summary.sub('.. rubric::', processed) processed = empty_attr_documentation.sub('.. rubric::', processed) # Replace the original source with the processed one (source is a single # element list). source[0] = processed
Python
def _get_steps(self, builderIR): """Generate a list of debugger steps from a test case. """ debugger_controller = self._init_debugger_controller() debugger_controller = run_debugger_subprocess( debugger_controller, self.context.working_directory.path) steps = debugger_controller.step_collection steps.builder = builderIR return steps
def _get_steps(self, builderIR): """Generate a list of debugger steps from a test case. """ debugger_controller = self._init_debugger_controller() debugger_controller = run_debugger_subprocess( debugger_controller, self.context.working_directory.path) steps = debugger_controller.step_collection steps.builder = builderIR return steps
Python
def _get_results_path(self, test_name): """Returns the path to the test results directory for the test denoted by test_name. """ return os.path.join(self.context.options.results_directory, self._get_results_basename(test_name))
def _get_results_path(self, test_name): """Returns the path to the test results directory for the test denoted by test_name. """ return os.path.join(self.context.options.results_directory, self._get_results_basename(test_name))
Python
def _get_results_text_path(self, test_name): """Returns path results .txt file for test denoted by test_name. """ test_results_path = self._get_results_path(test_name) return '{}.txt'.format(test_results_path)
def _get_results_text_path(self, test_name): """Returns path results .txt file for test denoted by test_name. """ test_results_path = self._get_results_path(test_name) return '{}.txt'.format(test_results_path)
Python
def _get_results_pickle_path(self, test_name): """Returns path results .dextIR file for test denoted by test_name. """ test_results_path = self._get_results_path(test_name) return '{}.dextIR'.format(test_results_path)
def _get_results_pickle_path(self, test_name): """Returns path results .dextIR file for test denoted by test_name. """ test_results_path = self._get_results_path(test_name) return '{}.dextIR'.format(test_results_path)
Python
def _record_steps(self, test_name, steps): """Write out the set of steps out to the test's .txt and .json results file. """ output_text_path = self._get_results_text_path(test_name) with open(output_text_path, 'w') as fp: self.context.o.auto(str(steps), stream=Stream(fp)) output_dextIR_path = self._get_results_pickle_path(test_name) with open(output_dextIR_path, 'wb') as fp: pickle.dump(steps, fp, protocol=pickle.HIGHEST_PROTOCOL)
def _record_steps(self, test_name, steps): """Write out the set of steps out to the test's .txt and .json results file. """ output_text_path = self._get_results_text_path(test_name) with open(output_text_path, 'w') as fp: self.context.o.auto(str(steps), stream=Stream(fp)) output_dextIR_path = self._get_results_pickle_path(test_name) with open(output_dextIR_path, 'wb') as fp: pickle.dump(steps, fp, protocol=pickle.HIGHEST_PROTOCOL)
Python
def _record_score(self, test_name, heuristic): """Write out the test's heuristic score to the results .txt file. """ output_text_path = self._get_results_text_path(test_name) with open(output_text_path, 'a') as fp: self.context.o.auto(heuristic.verbose_output, stream=Stream(fp))
def _record_score(self, test_name, heuristic): """Write out the test's heuristic score to the results .txt file. """ output_text_path = self._get_results_text_path(test_name) with open(output_text_path, 'a') as fp: self.context.o.auto(heuristic.verbose_output, stream=Stream(fp))
Python
def test(self): """ Test GDB remote fallback to 'p' packet when 'g' packet does not include all registers. """ class MyResponder(MockGDBServerResponder): def qXferRead(self, obj, annex, offset, length): if annex == "target.xml": return """<?xml version="1.0"?> <!DOCTYPE feature SYSTEM "gdb-target.dtd"> <target> <architecture>arm</architecture> <feature name="org.gnu.gdb.arm.m-profile"> <reg name="r0" bitsize="32" type="uint32" group="general"/> <reg name="r1" bitsize="32" type="uint32" group="general"/> <reg name="r2" bitsize="32" type="uint32" group="general"/> <reg name="r3" bitsize="32" type="uint32" group="general"/> <reg name="r4" bitsize="32" type="uint32" group="general"/> <reg name="r5" bitsize="32" type="uint32" group="general"/> <reg name="r6" bitsize="32" type="uint32" group="general"/> <reg name="r7" bitsize="32" type="uint32" group="general"/> <reg name="r8" bitsize="32" type="uint32" group="general"/> <reg name="r9" bitsize="32" type="uint32" group="general"/> <reg name="r10" bitsize="32" type="uint32" group="general"/> <reg name="r11" bitsize="32" type="uint32" group="general"/> <reg name="r12" bitsize="32" type="uint32" group="general"/> <reg name="sp" bitsize="32" type="data_ptr" group="general"/> <reg name="lr" bitsize="32" type="uint32" group="general"/> <reg name="pc" bitsize="32" type="code_ptr" group="general"/> <reg name="xpsr" bitsize="32" regnum="25" type="uint32" group="general"/> <reg name="MSP" bitsize="32" regnum="26" type="uint32" group="general"/> <reg name="PSP" bitsize="32" regnum="27" type="uint32" group="general"/> <reg name="PRIMASK" bitsize="32" regnum="28" type="uint32" group="general"/> <reg name="BASEPRI" bitsize="32" regnum="29" type="uint32" group="general"/> <reg name="FAULTMASK" bitsize="32" regnum="30" type="uint32" group="general"/> <reg name="CONTROL" bitsize="32" regnum="31" type="uint32" group="general"/> </feature> </target>""", False else: return None, False def readRegister(self, regnum): if regnum == 31: return "cdcc8c3f00000000" return "E01" def readRegisters(self): return "20000000f8360020001000002fcb0008f8360020a0360020200c0020000000000000000000000000000000000000000000000000b87f0120b7d100082ed2000800000001" def haltReason(self): return "S05" def qfThreadInfo(self): return "mdead" def qC(self): return "" def qSupported(self, client_supported): return "PacketSize=4000;qXfer:memory-map:read-;QStartNoAckMode+;qXfer:threads:read+;hwbreak+;qXfer:features:read+" def QThreadSuffixSupported(self): return "OK" def QListThreadsInStopReply(self): return "OK" self.server.responder = MyResponder() if self.TraceOn(): self.runCmd("log enable gdb-remote packets") self.addTearDownHook( lambda: self.runCmd("log disable gdb-remote packets")) self.dbg.SetDefaultArchitecture("armv7em") target = self.dbg.CreateTargetWithFileAndArch(None, None) process = self.connect(target) if self.TraceOn(): interp = self.dbg.GetCommandInterpreter() result = lldb.SBCommandReturnObject() interp.HandleCommand("target list", result) print(result.GetOutput()) r0_valobj = process.GetThreadAtIndex( 0).GetFrameAtIndex(0).FindRegister("r0") self.assertEqual(r0_valobj.GetValueAsUnsigned(), 0x20) pc_valobj = process.GetThreadAtIndex( 0).GetFrameAtIndex(0).FindRegister("pc") self.assertEqual(pc_valobj.GetValueAsUnsigned(), 0x0800d22e) pc_valobj = process.GetThreadAtIndex( 0).GetFrameAtIndex(0).FindRegister("CONTROL") self.assertEqual(pc_valobj.GetValueAsUnsigned(), 0x3f8ccccd)
def test(self): """ Test GDB remote fallback to 'p' packet when 'g' packet does not include all registers. """ class MyResponder(MockGDBServerResponder): def qXferRead(self, obj, annex, offset, length): if annex == "target.xml": return """<?xml version="1.0"?> <!DOCTYPE feature SYSTEM "gdb-target.dtd"> <target> <architecture>arm</architecture> <feature name="org.gnu.gdb.arm.m-profile"> <reg name="r0" bitsize="32" type="uint32" group="general"/> <reg name="r1" bitsize="32" type="uint32" group="general"/> <reg name="r2" bitsize="32" type="uint32" group="general"/> <reg name="r3" bitsize="32" type="uint32" group="general"/> <reg name="r4" bitsize="32" type="uint32" group="general"/> <reg name="r5" bitsize="32" type="uint32" group="general"/> <reg name="r6" bitsize="32" type="uint32" group="general"/> <reg name="r7" bitsize="32" type="uint32" group="general"/> <reg name="r8" bitsize="32" type="uint32" group="general"/> <reg name="r9" bitsize="32" type="uint32" group="general"/> <reg name="r10" bitsize="32" type="uint32" group="general"/> <reg name="r11" bitsize="32" type="uint32" group="general"/> <reg name="r12" bitsize="32" type="uint32" group="general"/> <reg name="sp" bitsize="32" type="data_ptr" group="general"/> <reg name="lr" bitsize="32" type="uint32" group="general"/> <reg name="pc" bitsize="32" type="code_ptr" group="general"/> <reg name="xpsr" bitsize="32" regnum="25" type="uint32" group="general"/> <reg name="MSP" bitsize="32" regnum="26" type="uint32" group="general"/> <reg name="PSP" bitsize="32" regnum="27" type="uint32" group="general"/> <reg name="PRIMASK" bitsize="32" regnum="28" type="uint32" group="general"/> <reg name="BASEPRI" bitsize="32" regnum="29" type="uint32" group="general"/> <reg name="FAULTMASK" bitsize="32" regnum="30" type="uint32" group="general"/> <reg name="CONTROL" bitsize="32" regnum="31" type="uint32" group="general"/> </feature> </target>""", False else: return None, False def readRegister(self, regnum): if regnum == 31: return "cdcc8c3f00000000" return "E01" def readRegisters(self): return "20000000f8360020001000002fcb0008f8360020a0360020200c0020000000000000000000000000000000000000000000000000b87f0120b7d100082ed2000800000001" def haltReason(self): return "S05" def qfThreadInfo(self): return "mdead" def qC(self): return "" def qSupported(self, client_supported): return "PacketSize=4000;qXfer:memory-map:read-;QStartNoAckMode+;qXfer:threads:read+;hwbreak+;qXfer:features:read+" def QThreadSuffixSupported(self): return "OK" def QListThreadsInStopReply(self): return "OK" self.server.responder = MyResponder() if self.TraceOn(): self.runCmd("log enable gdb-remote packets") self.addTearDownHook( lambda: self.runCmd("log disable gdb-remote packets")) self.dbg.SetDefaultArchitecture("armv7em") target = self.dbg.CreateTargetWithFileAndArch(None, None) process = self.connect(target) if self.TraceOn(): interp = self.dbg.GetCommandInterpreter() result = lldb.SBCommandReturnObject() interp.HandleCommand("target list", result) print(result.GetOutput()) r0_valobj = process.GetThreadAtIndex( 0).GetFrameAtIndex(0).FindRegister("r0") self.assertEqual(r0_valobj.GetValueAsUnsigned(), 0x20) pc_valobj = process.GetThreadAtIndex( 0).GetFrameAtIndex(0).FindRegister("pc") self.assertEqual(pc_valobj.GetValueAsUnsigned(), 0x0800d22e) pc_valobj = process.GetThreadAtIndex( 0).GetFrameAtIndex(0).FindRegister("CONTROL") self.assertEqual(pc_valobj.GetValueAsUnsigned(), 0x3f8ccccd)
Python
def esc(text): """Escape any html in the given text.""" text = re.sub(r'&', '&amp;', text) text = re.sub(r'<', '&lt;', text) text = re.sub(r'>', '&gt;', text) def link_if_exists(m): name = m.group(1) url = 'https://clang.llvm.org/doxygen/classclang_1_1%s.html' % name if url not in doxygen_probes: try: print('Probing %s...' % url) urlopen(url) doxygen_probes[url] = True except: doxygen_probes[url] = False if doxygen_probes[url]: return r'Matcher&lt;<a href="%s">%s</a>&gt;' % (url, name) else: return m.group(0) text = re.sub( r'Matcher&lt;([^\*&]+)&gt;', link_if_exists, text) return text
def esc(text): """Escape any html in the given text.""" text = re.sub(r'&', '&amp;', text) text = re.sub(r'<', '&lt;', text) text = re.sub(r'>', '&gt;', text) def link_if_exists(m): name = m.group(1) url = 'https://clang.llvm.org/doxygen/classclang_1_1%s.html' % name if url not in doxygen_probes: try: print('Probing %s...' % url) urlopen(url) doxygen_probes[url] = True except: doxygen_probes[url] = False if doxygen_probes[url]: return r'Matcher&lt;<a href="%s">%s</a>&gt;' % (url, name) else: return m.group(0) text = re.sub( r'Matcher&lt;([^\*&]+)&gt;', link_if_exists, text) return text
Python
def strip_doxygen(comment): """Returns the given comment without \-escaped words.""" # If there is only a doxygen keyword in the line, delete the whole line. comment = re.sub(r'^\\[^\s]+\n', r'', comment, flags=re.M) # If there is a doxygen \see command, change the \see prefix into "See also:". # FIXME: it would be better to turn this into a link to the target instead. comment = re.sub(r'\\see', r'See also:', comment) # Delete the doxygen command and the following whitespace. comment = re.sub(r'\\[^\s]+\s+', r'', comment) return comment
def strip_doxygen(comment): """Returns the given comment without \-escaped words.""" # If there is only a doxygen keyword in the line, delete the whole line. comment = re.sub(r'^\\[^\s]+\n', r'', comment, flags=re.M) # If there is a doxygen \see command, change the \see prefix into "See also:". # FIXME: it would be better to turn this into a link to the target instead. comment = re.sub(r'\\see', r'See also:', comment) # Delete the doxygen command and the following whitespace. comment = re.sub(r'\\[^\s]+\s+', r'', comment) return comment
Python
def unify_arguments(args): """Gets rid of anything the user doesn't care about in the argument list.""" args = re.sub(r'internal::', r'', args) args = re.sub(r'extern const\s+(.*)&', r'\1 ', args) args = re.sub(r'&', r' ', args) args = re.sub(r'(^|\s)M\d?(\s)', r'\1Matcher<*>\2', args) args = re.sub(r'BindableMatcher', r'Matcher', args) args = re.sub(r'const Matcher', r'Matcher', args) return args
def unify_arguments(args): """Gets rid of anything the user doesn't care about in the argument list.""" args = re.sub(r'internal::', r'', args) args = re.sub(r'extern const\s+(.*)&', r'\1 ', args) args = re.sub(r'&', r' ', args) args = re.sub(r'(^|\s)M\d?(\s)', r'\1Matcher<*>\2', args) args = re.sub(r'BindableMatcher', r'Matcher', args) args = re.sub(r'const Matcher', r'Matcher', args) return args
Python
def add_matcher(result_type, name, args, comment, is_dyncast=False): """Adds a matcher to one of our categories.""" if name == 'id': # FIXME: Figure out whether we want to support the 'id' matcher. return matcher_id = '%s%d' % (name, ids[name]) ids[name] += 1 args = unify_arguments(args) result_type = unify_type(result_type) docs_result_type = esc('Matcher<%s>' % result_type); if name == 'mapAnyOf': args = "nodeMatcherFunction..." docs_result_type = "<em>unspecified</em>" matcher_html = TD_TEMPLATE % { 'result': docs_result_type, 'name': name, 'args': esc(args), 'comment': esc(strip_doxygen(comment)), 'id': matcher_id, } if is_dyncast: dict = node_matchers lookup = result_type + name # Use a heuristic to figure out whether a matcher is a narrowing or # traversal matcher. By default, matchers that take other matchers as # arguments (and are not node matchers) do traversal. We specifically # exclude known narrowing matchers that also take other matchers as # arguments. elif ('Matcher<' not in args or name in ['allOf', 'anyOf', 'anything', 'unless', 'mapAnyOf']): dict = narrowing_matchers lookup = result_type + name + esc(args) else: dict = traversal_matchers lookup = result_type + name + esc(args) if dict.get(lookup) is None or len(dict.get(lookup)) < len(matcher_html): dict[lookup] = matcher_html
def add_matcher(result_type, name, args, comment, is_dyncast=False): """Adds a matcher to one of our categories.""" if name == 'id': # FIXME: Figure out whether we want to support the 'id' matcher. return matcher_id = '%s%d' % (name, ids[name]) ids[name] += 1 args = unify_arguments(args) result_type = unify_type(result_type) docs_result_type = esc('Matcher<%s>' % result_type); if name == 'mapAnyOf': args = "nodeMatcherFunction..." docs_result_type = "<em>unspecified</em>" matcher_html = TD_TEMPLATE % { 'result': docs_result_type, 'name': name, 'args': esc(args), 'comment': esc(strip_doxygen(comment)), 'id': matcher_id, } if is_dyncast: dict = node_matchers lookup = result_type + name # Use a heuristic to figure out whether a matcher is a narrowing or # traversal matcher. By default, matchers that take other matchers as # arguments (and are not node matchers) do traversal. We specifically # exclude known narrowing matchers that also take other matchers as # arguments. elif ('Matcher<' not in args or name in ['allOf', 'anyOf', 'anything', 'unless', 'mapAnyOf']): dict = narrowing_matchers lookup = result_type + name + esc(args) else: dict = traversal_matchers lookup = result_type + name + esc(args) if dict.get(lookup) is None or len(dict.get(lookup)) < len(matcher_html): dict[lookup] = matcher_html
Python
def sort_table(matcher_type, matcher_map): """Returns the sorted html table for the given row map.""" table = '' for key in sorted(matcher_map.keys()): table += matcher_map[key] + '\n' return ('<!-- START_%(type)s_MATCHERS -->\n' + '%(table)s' + '<!--END_%(type)s_MATCHERS -->') % { 'type': matcher_type, 'table': table, }
def sort_table(matcher_type, matcher_map): """Returns the sorted html table for the given row map.""" table = '' for key in sorted(matcher_map.keys()): table += matcher_map[key] + '\n' return ('<!-- START_%(type)s_MATCHERS -->\n' + '%(table)s' + '<!--END_%(type)s_MATCHERS -->') % { 'type': matcher_type, 'table': table, }
Python
def buildDsym(self, sender=None, architecture=None, compiler=None, dictionary=None, testdir=None, testname=None): """Build the binaries with dsym debug info.""" commands = [] commands.append( self.getMake(testdir, testname) + [ "MAKE_DSYM=YES", self.getArchCFlags(architecture), self.getArchSpec(architecture), self.getCCSpec(compiler), self.getExtraMakeArgs(), self.getSDKRootSpec(), self.getModuleCacheSpec(), "all", self.getCmdLine(dictionary) ]) self.runBuildCommands(commands, sender=sender) # True signifies that we can handle building dsym. return True
def buildDsym(self, sender=None, architecture=None, compiler=None, dictionary=None, testdir=None, testname=None): """Build the binaries with dsym debug info.""" commands = [] commands.append( self.getMake(testdir, testname) + [ "MAKE_DSYM=YES", self.getArchCFlags(architecture), self.getArchSpec(architecture), self.getCCSpec(compiler), self.getExtraMakeArgs(), self.getSDKRootSpec(), self.getModuleCacheSpec(), "all", self.getCmdLine(dictionary) ]) self.runBuildCommands(commands, sender=sender) # True signifies that we can handle building dsym. return True
Python
def _get_valid_commands(): """Return all top level DExTer test commands. Returns: { name (str): command (class) } """ return { DexDeclareFile.get_name() : DexDeclareFile, DexExpectProgramState.get_name() : DexExpectProgramState, DexExpectStepKind.get_name() : DexExpectStepKind, DexExpectStepOrder.get_name() : DexExpectStepOrder, DexExpectWatchType.get_name() : DexExpectWatchType, DexExpectWatchValue.get_name() : DexExpectWatchValue, DexLabel.get_name() : DexLabel, DexLimitSteps.get_name() : DexLimitSteps, DexUnreachable.get_name() : DexUnreachable, DexWatch.get_name() : DexWatch }
def _get_valid_commands(): """Return all top level DExTer test commands. Returns: { name (str): command (class) } """ return { DexDeclareFile.get_name() : DexDeclareFile, DexExpectProgramState.get_name() : DexExpectProgramState, DexExpectStepKind.get_name() : DexExpectStepKind, DexExpectStepOrder.get_name() : DexExpectStepOrder, DexExpectWatchType.get_name() : DexExpectWatchType, DexExpectWatchValue.get_name() : DexExpectWatchValue, DexLabel.get_name() : DexLabel, DexLimitSteps.get_name() : DexLimitSteps, DexUnreachable.get_name() : DexUnreachable, DexWatch.get_name() : DexWatch }
Python
def _get_command_name(command_raw: str) -> str: """Return command name by splitting up DExTer command contained in command_raw on the first opening paranthesis and further stripping any potential leading or trailing whitespace. """ return command_raw.split('(', 1)[0].rstrip()
def _get_command_name(command_raw: str) -> str: """Return command name by splitting up DExTer command contained in command_raw on the first opening paranthesis and further stripping any potential leading or trailing whitespace. """ return command_raw.split('(', 1)[0].rstrip()
Python
def _merge_subcommands(command_name: str, valid_commands: dict) -> dict: """Merge valid_commands and command_name's subcommands into a new dict. Returns: { name (str): command (class) } """ subcommands = valid_commands[command_name].get_subcommands() if subcommands: return { **valid_commands, **subcommands } return valid_commands
def _merge_subcommands(command_name: str, valid_commands: dict) -> dict: """Merge valid_commands and command_name's subcommands into a new dict. Returns: { name (str): command (class) } """ subcommands = valid_commands[command_name].get_subcommands() if subcommands: return { **valid_commands, **subcommands } return valid_commands
Python
def _search_line_for_cmd_end(line: str, start: int, paren_balance: int) -> (int, int): """Find the end of a command by looking for balanced parentheses. Args: line: String to scan. start: Index into `line` to start looking. paren_balance(int): paren_balance after previous call. Note: On the first call `start` should point at the opening parenthesis and `paren_balance` should be set to 0. Subsequent calls should pass in the returned `paren_balance`. Returns: ( end, paren_balance ) Where end is 1 + the index of the last char in the command or, if the parentheses are not balanced, the end of the line. paren_balance will be 0 when the parentheses are balanced. """ for end in range(start, len(line)): ch = line[end] if ch == '(': paren_balance += 1 elif ch == ')': paren_balance -=1 if paren_balance == 0: break end += 1 return (end, paren_balance)
def _search_line_for_cmd_end(line: str, start: int, paren_balance: int) -> (int, int): """Find the end of a command by looking for balanced parentheses. Args: line: String to scan. start: Index into `line` to start looking. paren_balance(int): paren_balance after previous call. Note: On the first call `start` should point at the opening parenthesis and `paren_balance` should be set to 0. Subsequent calls should pass in the returned `paren_balance`. Returns: ( end, paren_balance ) Where end is 1 + the index of the last char in the command or, if the parentheses are not balanced, the end of the line. paren_balance will be 0 when the parentheses are balanced. """ for end in range(start, len(line)): ch = line[end] if ch == '(': paren_balance += 1 elif ch == ')': paren_balance -=1 if paren_balance == 0: break end += 1 return (end, paren_balance)
Python
def _find_all_commands_in_lines(self, lines): """Use DExTer parsing methods to find all the mock commands in lines. Returns: { cmd_name: { (path, line): command_obj } } """ cmds, declared_files = _find_all_commands_in_file(__file__, lines, self.valid_commands, None) return cmds
def _find_all_commands_in_lines(self, lines): """Use DExTer parsing methods to find all the mock commands in lines. Returns: { cmd_name: { (path, line): command_obj } } """ cmds, declared_files = _find_all_commands_in_file(__file__, lines, self.valid_commands, None) return cmds
Python
def _find_all_mock_values_in_lines(self, lines): """Use DExTer parsing methods to find all mock command values in lines. Returns: values (list(str)): MockCmd values found in lines. """ cmds = self._find_all_commands_in_lines(lines) mocks = cmds.get(TestParseCommand.MockCmd.get_name(), None) return [v.value for v in mocks.values()] if mocks else []
def _find_all_mock_values_in_lines(self, lines): """Use DExTer parsing methods to find all mock command values in lines. Returns: values (list(str)): MockCmd values found in lines. """ cmds = self._find_all_commands_in_lines(lines) mocks = cmds.get(TestParseCommand.MockCmd.get_name(), None) return [v.value for v in mocks.values()] if mocks else []
Python
def create_during_step_base(self, step_cmd, step_stop_reason): """Test thread creation while using step-in.""" exe = self.getBuildArtifact("a.out") self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET) # Get the target process target = self.dbg.GetSelectedTarget() # This should create a breakpoint in the stepping thread. self.bkpt = target.BreakpointCreateByLocation("main.cpp", self.breakpoint) # Run the program. self.runCmd("run", RUN_SUCCEEDED) process = target.GetProcess() # The stop reason of the thread should be breakpoint. stepping_thread = lldbutil.get_one_thread_stopped_at_breakpoint(process, self.bkpt) self.assertTrue(stepping_thread.IsValid(), "We stopped at the right breakpoint") # Get the number of threads num_threads = process.GetNumThreads() # Make sure we see only two threads self.assertEqual( num_threads, 2, 'Number of expected threads and actual threads do not match.') # Get the thread objects thread1 = process.GetThreadAtIndex(0) thread2 = process.GetThreadAtIndex(1) current_line = self.breakpoint # Keep stepping until we've reached our designated continue point while current_line != self.continuepoint: if stepping_thread != process.GetSelectedThread(): process.SetSelectedThread(stepping_thread) self.runCmd(step_cmd) frame = stepping_thread.GetFrameAtIndex(0) current_line = frame.GetLineEntry().GetLine() # Make sure we're still where we thought we were self.assertTrue( current_line >= self.breakpoint, "Stepped to unexpected line, " + str(current_line)) self.assertTrue( current_line <= self.continuepoint, "Stepped to unexpected line, " + str(current_line)) # Update the number of threads num_threads = process.GetNumThreads() # Check to see that we increased the number of threads as expected self.assertEqual( num_threads, 3, 'Number of expected threads and actual threads do not match after thread exit.') stop_reason = stepping_thread.GetStopReason() self.assertEqual(stop_reason, lldb.eStopReasonPlanComplete, "Stopped for plan completion") # Run to completion self.runCmd("process continue") # At this point, the inferior process should have exited. self.assertEqual( process.GetState(), lldb.eStateExited, PROCESS_EXITED)
def create_during_step_base(self, step_cmd, step_stop_reason): """Test thread creation while using step-in.""" exe = self.getBuildArtifact("a.out") self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET) # Get the target process target = self.dbg.GetSelectedTarget() # This should create a breakpoint in the stepping thread. self.bkpt = target.BreakpointCreateByLocation("main.cpp", self.breakpoint) # Run the program. self.runCmd("run", RUN_SUCCEEDED) process = target.GetProcess() # The stop reason of the thread should be breakpoint. stepping_thread = lldbutil.get_one_thread_stopped_at_breakpoint(process, self.bkpt) self.assertTrue(stepping_thread.IsValid(), "We stopped at the right breakpoint") # Get the number of threads num_threads = process.GetNumThreads() # Make sure we see only two threads self.assertEqual( num_threads, 2, 'Number of expected threads and actual threads do not match.') # Get the thread objects thread1 = process.GetThreadAtIndex(0) thread2 = process.GetThreadAtIndex(1) current_line = self.breakpoint # Keep stepping until we've reached our designated continue point while current_line != self.continuepoint: if stepping_thread != process.GetSelectedThread(): process.SetSelectedThread(stepping_thread) self.runCmd(step_cmd) frame = stepping_thread.GetFrameAtIndex(0) current_line = frame.GetLineEntry().GetLine() # Make sure we're still where we thought we were self.assertTrue( current_line >= self.breakpoint, "Stepped to unexpected line, " + str(current_line)) self.assertTrue( current_line <= self.continuepoint, "Stepped to unexpected line, " + str(current_line)) # Update the number of threads num_threads = process.GetNumThreads() # Check to see that we increased the number of threads as expected self.assertEqual( num_threads, 3, 'Number of expected threads and actual threads do not match after thread exit.') stop_reason = stepping_thread.GetStopReason() self.assertEqual(stop_reason, lldb.eStopReasonPlanComplete, "Stopped for plan completion") # Run to completion self.runCmd("process continue") # At this point, the inferior process should have exited. self.assertEqual( process.GetState(), lldb.eStateExited, PROCESS_EXITED)
Python
def create_debug_adaptor(self, lldbVSCodeEnv=None): '''Create the Visual Studio Code debug adaptor''' self.assertTrue(os.path.exists(self.lldbVSCodeExec), 'lldb-vscode must exist') log_file_path = self.getBuildArtifact('vscode.txt') self.vscode = vscode.DebugAdaptor( executable=self.lldbVSCodeExec, init_commands=self.setUpCommands(), log_file=log_file_path, env=lldbVSCodeEnv)
def create_debug_adaptor(self, lldbVSCodeEnv=None): '''Create the Visual Studio Code debug adaptor''' self.assertTrue(os.path.exists(self.lldbVSCodeExec), 'lldb-vscode must exist') log_file_path = self.getBuildArtifact('vscode.txt') self.vscode = vscode.DebugAdaptor( executable=self.lldbVSCodeExec, init_commands=self.setUpCommands(), log_file=log_file_path, env=lldbVSCodeEnv)
Python
def verify_breakpoint_hit(self, breakpoint_ids): '''Wait for the process we are debugging to stop, and verify we hit any breakpoint location in the "breakpoint_ids" array. "breakpoint_ids" should be a list of breakpoint ID strings (["1", "2"]). The return value from self.set_source_breakpoints() or self.set_function_breakpoints() can be passed to this function''' stopped_events = self.vscode.wait_for_stopped() for stopped_event in stopped_events: if 'body' in stopped_event: body = stopped_event['body'] if 'reason' not in body: continue if body['reason'] != 'breakpoint': continue if 'description' not in body: continue # Descriptions for breakpoints will be in the form # "breakpoint 1.1", so look for any description that matches # ("breakpoint 1.") in the description field as verification # that one of the breakpoint locations was hit. VSCode doesn't # allow breakpoints to have multiple locations, but LLDB does. # So when looking at the description we just want to make sure # the right breakpoint matches and not worry about the actual # location. description = body['description'] for breakpoint_id in breakpoint_ids: match_desc = 'breakpoint %s.' % (breakpoint_id) if match_desc in description: return self.assertTrue(False, "breakpoint not hit")
def verify_breakpoint_hit(self, breakpoint_ids): '''Wait for the process we are debugging to stop, and verify we hit any breakpoint location in the "breakpoint_ids" array. "breakpoint_ids" should be a list of breakpoint ID strings (["1", "2"]). The return value from self.set_source_breakpoints() or self.set_function_breakpoints() can be passed to this function''' stopped_events = self.vscode.wait_for_stopped() for stopped_event in stopped_events: if 'body' in stopped_event: body = stopped_event['body'] if 'reason' not in body: continue if body['reason'] != 'breakpoint': continue if 'description' not in body: continue # Descriptions for breakpoints will be in the form # "breakpoint 1.1", so look for any description that matches # ("breakpoint 1.") in the description field as verification # that one of the breakpoint locations was hit. VSCode doesn't # allow breakpoints to have multiple locations, but LLDB does. # So when looking at the description we just want to make sure # the right breakpoint matches and not worry about the actual # location. description = body['description'] for breakpoint_id in breakpoint_ids: match_desc = 'breakpoint %s.' % (breakpoint_id) if match_desc in description: return self.assertTrue(False, "breakpoint not hit")
Python
def verify_exception_breakpoint_hit(self, filter_label): '''Wait for the process we are debugging to stop, and verify the stop reason is 'exception' and that the description matches 'filter_label' ''' stopped_events = self.vscode.wait_for_stopped() for stopped_event in stopped_events: if 'body' in stopped_event: body = stopped_event['body'] if 'reason' not in body: continue if body['reason'] != 'exception': continue if 'description' not in body: continue description = body['description'] if filter_label == description: return True return False
def verify_exception_breakpoint_hit(self, filter_label): '''Wait for the process we are debugging to stop, and verify the stop reason is 'exception' and that the description matches 'filter_label' ''' stopped_events = self.vscode.wait_for_stopped() for stopped_event in stopped_events: if 'body' in stopped_event: body = stopped_event['body'] if 'reason' not in body: continue if body['reason'] != 'exception': continue if 'description' not in body: continue description = body['description'] if filter_label == description: return True return False
Python
def attach(self, program=None, pid=None, waitFor=None, trace=None, initCommands=None, preRunCommands=None, stopCommands=None, exitCommands=None, attachCommands=None, coreFile=None, disconnectAutomatically=True, terminateCommands=None, postRunCommands=None): '''Build the default Makefile target, create the VSCode debug adaptor, and attach to the process. ''' # Make sure we disconnect and terminate the VSCode debug adaptor even # if we throw an exception during the test case. def cleanup(): if disconnectAutomatically: self.vscode.request_disconnect(terminateDebuggee=True) self.vscode.terminate() # Execute the cleanup function during test case tear down. self.addTearDownHook(cleanup) # Initialize and launch the program self.vscode.request_initialize() response = self.vscode.request_attach( program=program, pid=pid, waitFor=waitFor, trace=trace, initCommands=initCommands, preRunCommands=preRunCommands, stopCommands=stopCommands, exitCommands=exitCommands, attachCommands=attachCommands, terminateCommands=terminateCommands, coreFile=coreFile, postRunCommands=postRunCommands) if not (response and response['success']): self.assertTrue(response['success'], 'attach failed (%s)' % (response['message']))
def attach(self, program=None, pid=None, waitFor=None, trace=None, initCommands=None, preRunCommands=None, stopCommands=None, exitCommands=None, attachCommands=None, coreFile=None, disconnectAutomatically=True, terminateCommands=None, postRunCommands=None): '''Build the default Makefile target, create the VSCode debug adaptor, and attach to the process. ''' # Make sure we disconnect and terminate the VSCode debug adaptor even # if we throw an exception during the test case. def cleanup(): if disconnectAutomatically: self.vscode.request_disconnect(terminateDebuggee=True) self.vscode.terminate() # Execute the cleanup function during test case tear down. self.addTearDownHook(cleanup) # Initialize and launch the program self.vscode.request_initialize() response = self.vscode.request_attach( program=program, pid=pid, waitFor=waitFor, trace=trace, initCommands=initCommands, preRunCommands=preRunCommands, stopCommands=stopCommands, exitCommands=exitCommands, attachCommands=attachCommands, terminateCommands=terminateCommands, coreFile=coreFile, postRunCommands=postRunCommands) if not (response and response['success']): self.assertTrue(response['success'], 'attach failed (%s)' % (response['message']))
Python
def build_and_launch(self, program, args=None, cwd=None, env=None, stopOnEntry=False, disableASLR=True, disableSTDIO=False, shellExpandArguments=False, trace=False, initCommands=None, preRunCommands=None, stopCommands=None, exitCommands=None, terminateCommands=None, sourcePath=None, debuggerRoot=None, runInTerminal=False, disconnectAutomatically=True, postRunCommands=None, lldbVSCodeEnv=None): '''Build the default Makefile target, create the VSCode debug adaptor, and launch the process. ''' self.build_and_create_debug_adaptor(lldbVSCodeEnv) self.assertTrue(os.path.exists(program), 'executable must exist') return self.launch(program, args, cwd, env, stopOnEntry, disableASLR, disableSTDIO, shellExpandArguments, trace, initCommands, preRunCommands, stopCommands, exitCommands, terminateCommands, sourcePath, debuggerRoot, runInTerminal=runInTerminal, disconnectAutomatically=disconnectAutomatically, postRunCommands=postRunCommands)
def build_and_launch(self, program, args=None, cwd=None, env=None, stopOnEntry=False, disableASLR=True, disableSTDIO=False, shellExpandArguments=False, trace=False, initCommands=None, preRunCommands=None, stopCommands=None, exitCommands=None, terminateCommands=None, sourcePath=None, debuggerRoot=None, runInTerminal=False, disconnectAutomatically=True, postRunCommands=None, lldbVSCodeEnv=None): '''Build the default Makefile target, create the VSCode debug adaptor, and launch the process. ''' self.build_and_create_debug_adaptor(lldbVSCodeEnv) self.assertTrue(os.path.exists(program), 'executable must exist') return self.launch(program, args, cwd, env, stopOnEntry, disableASLR, disableSTDIO, shellExpandArguments, trace, initCommands, preRunCommands, stopCommands, exitCommands, terminateCommands, sourcePath, debuggerRoot, runInTerminal=runInTerminal, disconnectAutomatically=disconnectAutomatically, postRunCommands=postRunCommands)
Python
def test(self): """Test two threads that trigger a watchpoint and one (1 second delay) breakpoint thread. """ self.build(dictionary=self.getBuildFlags()) self.do_thread_actions( num_watchpoint_threads=2, num_delay_breakpoint_threads=1)
def test(self): """Test two threads that trigger a watchpoint and one (1 second delay) breakpoint thread. """ self.build(dictionary=self.getBuildFlags()) self.do_thread_actions( num_watchpoint_threads=2, num_delay_breakpoint_threads=1)
Python
def read_packet(f, verbose=False, trace_file=None): '''Decode a JSON packet that starts with the content length and is followed by the JSON bytes from a file 'f'. Returns None on EOF. ''' line = f.readline().decode("utf-8") if len(line) == 0: return None # EOF. # Watch for line that starts with the prefix prefix = 'Content-Length: ' if line.startswith(prefix): # Decode length of JSON bytes if verbose: print('content: "%s"' % (line)) length = int(line[len(prefix):]) if verbose: print('length: "%u"' % (length)) # Skip empty line line = f.readline() if verbose: print('empty: "%s"' % (line)) # Read JSON bytes json_str = f.read(length) if verbose: print('json: "%s"' % (json_str)) if trace_file: trace_file.write('from adaptor:\n%s\n' % (json_str)) # Decode the JSON bytes into a python dictionary return json.loads(json_str) raise Exception("unexpected malformed message from lldb-vscode: " + line)
def read_packet(f, verbose=False, trace_file=None): '''Decode a JSON packet that starts with the content length and is followed by the JSON bytes from a file 'f'. Returns None on EOF. ''' line = f.readline().decode("utf-8") if len(line) == 0: return None # EOF. # Watch for line that starts with the prefix prefix = 'Content-Length: ' if line.startswith(prefix): # Decode length of JSON bytes if verbose: print('content: "%s"' % (line)) length = int(line[len(prefix):]) if verbose: print('length: "%u"' % (length)) # Skip empty line line = f.readline() if verbose: print('empty: "%s"' % (line)) # Read JSON bytes json_str = f.read(length) if verbose: print('json: "%s"' % (json_str)) if trace_file: trace_file.write('from adaptor:\n%s\n' % (json_str)) # Decode the JSON bytes into a python dictionary return json.loads(json_str) raise Exception("unexpected malformed message from lldb-vscode: " + line)
Python
def handle_recv_packet(self, packet): '''Called by the read thread that is waiting for all incoming packets to store the incoming packet in "self.recv_packets" in a thread safe way. This function will then signal the "self.recv_condition" to indicate a new packet is available. Returns True if the caller should keep calling this function for more packets. ''' # If EOF, notify the read thread by enqueuing a None. if not packet: self.enqueue_recv_packet(None) return False # Check the packet to see if is an event packet keepGoing = True packet_type = packet['type'] if packet_type == 'event': event = packet['event'] body = None if 'body' in packet: body = packet['body'] # Handle the event packet and cache information from these packets # as they come in if event == 'output': # Store any output we receive so clients can retrieve it later. category = body['category'] output = body['output'] self.output_condition.acquire() if category in self.output: self.output[category] += output else: self.output[category] = output self.output_condition.notify() self.output_condition.release() # no need to add 'output' event packets to our packets list return keepGoing elif event == 'process': # When a new process is attached or launched, remember the # details that are available in the body of the event self.process_event_body = body elif event == 'stopped': # Each thread that stops with a reason will send a # 'stopped' event. We need to remember the thread stop # reasons since the 'threads' command doesn't return # that information. self._process_stopped() tid = body['threadId'] self.thread_stop_reasons[tid] = body elif event == 'breakpoint': # Breakpoint events come in when a breakpoint has locations # added or removed. Keep track of them so we can look for them # in tests. self.breakpoint_events.append(packet) # no need to add 'breakpoint' event packets to our packets list return keepGoing elif event.startswith('progress'): # Progress events come in as 'progressStart', 'progressUpdate', # and 'progressEnd' events. Keep these around in case test # cases want to verify them. self.progress_events.append(packet) # No need to add 'progress' event packets to our packets list. return keepGoing elif packet_type == 'response': if packet['command'] == 'disconnect': keepGoing = False self.enqueue_recv_packet(packet) return keepGoing
def handle_recv_packet(self, packet): '''Called by the read thread that is waiting for all incoming packets to store the incoming packet in "self.recv_packets" in a thread safe way. This function will then signal the "self.recv_condition" to indicate a new packet is available. Returns True if the caller should keep calling this function for more packets. ''' # If EOF, notify the read thread by enqueuing a None. if not packet: self.enqueue_recv_packet(None) return False # Check the packet to see if is an event packet keepGoing = True packet_type = packet['type'] if packet_type == 'event': event = packet['event'] body = None if 'body' in packet: body = packet['body'] # Handle the event packet and cache information from these packets # as they come in if event == 'output': # Store any output we receive so clients can retrieve it later. category = body['category'] output = body['output'] self.output_condition.acquire() if category in self.output: self.output[category] += output else: self.output[category] = output self.output_condition.notify() self.output_condition.release() # no need to add 'output' event packets to our packets list return keepGoing elif event == 'process': # When a new process is attached or launched, remember the # details that are available in the body of the event self.process_event_body = body elif event == 'stopped': # Each thread that stops with a reason will send a # 'stopped' event. We need to remember the thread stop # reasons since the 'threads' command doesn't return # that information. self._process_stopped() tid = body['threadId'] self.thread_stop_reasons[tid] = body elif event == 'breakpoint': # Breakpoint events come in when a breakpoint has locations # added or removed. Keep track of them so we can look for them # in tests. self.breakpoint_events.append(packet) # no need to add 'breakpoint' event packets to our packets list return keepGoing elif event.startswith('progress'): # Progress events come in as 'progressStart', 'progressUpdate', # and 'progressEnd' events. Keep these around in case test # cases want to verify them. self.progress_events.append(packet) # No need to add 'progress' event packets to our packets list. return keepGoing elif packet_type == 'response': if packet['command'] == 'disconnect': keepGoing = False self.enqueue_recv_packet(packet) return keepGoing
Python
def send_packet(self, command_dict, set_sequence=True): '''Take the "command_dict" python dictionary and encode it as a JSON string and send the contents as a packet to the VSCode debug adaptor''' # Set the sequence ID for this command automatically if set_sequence: command_dict['seq'] = self.sequence self.sequence += 1 # Encode our command dictionary as a JSON string json_str = json.dumps(command_dict, separators=(',', ':')) if self.trace_file: self.trace_file.write('to adaptor:\n%s\n' % (json_str)) length = len(json_str) if length > 0: # Send the encoded JSON packet and flush the 'send' file self.send.write(self.encode_content(json_str)) self.send.flush()
def send_packet(self, command_dict, set_sequence=True): '''Take the "command_dict" python dictionary and encode it as a JSON string and send the contents as a packet to the VSCode debug adaptor''' # Set the sequence ID for this command automatically if set_sequence: command_dict['seq'] = self.sequence self.sequence += 1 # Encode our command dictionary as a JSON string json_str = json.dumps(command_dict, separators=(',', ':')) if self.trace_file: self.trace_file.write('to adaptor:\n%s\n' % (json_str)) length = len(json_str) if length > 0: # Send the encoded JSON packet and flush the 'send' file self.send.write(self.encode_content(json_str)) self.send.flush()
Python
def recv_packet(self, filter_type=None, filter_event=None, timeout=None): '''Get a JSON packet from the VSCode debug adaptor. This function assumes a thread that reads packets is running and will deliver any received packets by calling handle_recv_packet(...). This function will wait for the packet to arrive and return it when it does.''' while True: try: self.recv_condition.acquire() packet = None while True: for (i, curr_packet) in enumerate(self.recv_packets): if not curr_packet: raise EOFError packet_type = curr_packet['type'] if filter_type is None or packet_type in filter_type: if (filter_event is None or (packet_type == 'event' and curr_packet['event'] in filter_event)): packet = self.recv_packets.pop(i) break if packet: break # Sleep until packet is received len_before = len(self.recv_packets) self.recv_condition.wait(timeout) len_after = len(self.recv_packets) if len_before == len_after: return None # Timed out return packet except EOFError: return None finally: self.recv_condition.release() return None
def recv_packet(self, filter_type=None, filter_event=None, timeout=None): '''Get a JSON packet from the VSCode debug adaptor. This function assumes a thread that reads packets is running and will deliver any received packets by calling handle_recv_packet(...). This function will wait for the packet to arrive and return it when it does.''' while True: try: self.recv_condition.acquire() packet = None while True: for (i, curr_packet) in enumerate(self.recv_packets): if not curr_packet: raise EOFError packet_type = curr_packet['type'] if filter_type is None or packet_type in filter_type: if (filter_event is None or (packet_type == 'event' and curr_packet['event'] in filter_event)): packet = self.recv_packets.pop(i) break if packet: break # Sleep until packet is received len_before = len(self.recv_packets) self.recv_condition.wait(timeout) len_after = len(self.recv_packets) if len_before == len_after: return None # Timed out return packet except EOFError: return None finally: self.recv_condition.release() return None
Python
def send_recv(self, command): '''Send a command python dictionary as JSON and receive the JSON response. Validates that the response is the correct sequence and command in the reply. Any events that are received are added to the events list in this object''' self.send_packet(command) done = False while not done: response_or_request = self.recv_packet(filter_type=['response', 'request']) if response_or_request is None: desc = 'no response for "%s"' % (command['command']) raise ValueError(desc) if response_or_request['type'] == 'response': self.validate_response(command, response_or_request) return response_or_request else: if response_or_request['command'] == 'runInTerminal': subprocess.Popen(response_or_request['arguments']['args'], env=response_or_request['arguments']['env']) self.send_packet({ "type": "response", "seq": -1, "request_seq": response_or_request['seq'], "success": True, "command": "runInTerminal", "body": {} }, set_sequence=False) else: desc = 'unkonwn reverse request "%s"' % (response_or_request['command']) raise ValueError(desc) return None
def send_recv(self, command): '''Send a command python dictionary as JSON and receive the JSON response. Validates that the response is the correct sequence and command in the reply. Any events that are received are added to the events list in this object''' self.send_packet(command) done = False while not done: response_or_request = self.recv_packet(filter_type=['response', 'request']) if response_or_request is None: desc = 'no response for "%s"' % (command['command']) raise ValueError(desc) if response_or_request['type'] == 'response': self.validate_response(command, response_or_request) return response_or_request else: if response_or_request['command'] == 'runInTerminal': subprocess.Popen(response_or_request['arguments']['args'], env=response_or_request['arguments']['env']) self.send_packet({ "type": "response", "seq": -1, "request_seq": response_or_request['seq'], "success": True, "command": "runInTerminal", "body": {} }, set_sequence=False) else: desc = 'unkonwn reverse request "%s"' % (response_or_request['command']) raise ValueError(desc) return None
Python
def request_threads(self): '''Request a list of all threads and combine any information from any "stopped" events since those contain more information about why a thread actually stopped. Returns an array of thread dictionaries with information about all threads''' command_dict = { 'command': 'threads', 'type': 'request', 'arguments': {} } response = self.send_recv(command_dict) body = response['body'] # Fill in "self.threads" correctly so that clients that call # self.get_threads() or self.get_thread_id(...) can get information # on threads when the process is stopped. if 'threads' in body: self.threads = body['threads'] for thread in self.threads: # Copy the thread dictionary so we can add key/value pairs to # it without affecting the original info from the "threads" # command. tid = thread['id'] if tid in self.thread_stop_reasons: thread_stop_info = self.thread_stop_reasons[tid] copy_keys = ['reason', 'description', 'text'] for key in copy_keys: if key in thread_stop_info: thread[key] = thread_stop_info[key] else: self.threads = None return response
def request_threads(self): '''Request a list of all threads and combine any information from any "stopped" events since those contain more information about why a thread actually stopped. Returns an array of thread dictionaries with information about all threads''' command_dict = { 'command': 'threads', 'type': 'request', 'arguments': {} } response = self.send_recv(command_dict) body = response['body'] # Fill in "self.threads" correctly so that clients that call # self.get_threads() or self.get_thread_id(...) can get information # on threads when the process is stopped. if 'threads' in body: self.threads = body['threads'] for thread in self.threads: # Copy the thread dictionary so we can add key/value pairs to # it without affecting the original info from the "threads" # command. tid = thread['id'] if tid in self.thread_stop_reasons: thread_stop_info = self.thread_stop_reasons[tid] copy_keys = ['reason', 'description', 'text'] for key in copy_keys: if key in thread_stop_info: thread[key] = thread_stop_info[key] else: self.threads = None return response
Python
def validate(categories, exact_match): """ For each category in categories, ensure that it's a valid category (if exact_match is false, unique prefixes are also accepted). If a category is invalid, print a message and quit. If all categories are valid, return the list of categories. Prefixes are expanded in the returned list. """ result = [] for category in categories: origCategory = category if category not in all_categories and not exact_match: category = unique_string_match(category, all_categories) if (category not in all_categories) or category is None: print( "fatal error: category '" + origCategory + "' is not a valid category") print("if you have added a new category, please edit test_categories.py, adding your new category to all_categories") print("else, please specify one or more of the following: " + str(list(all_categories.keys()))) sys.exit(1) result.append(category) return result
def validate(categories, exact_match): """ For each category in categories, ensure that it's a valid category (if exact_match is false, unique prefixes are also accepted). If a category is invalid, print a message and quit. If all categories are valid, return the list of categories. Prefixes are expanded in the returned list. """ result = [] for category in categories: origCategory = category if category not in all_categories and not exact_match: category = unique_string_match(category, all_categories) if (category not in all_categories) or category is None: print( "fatal error: category '" + origCategory + "' is not a valid category") print("if you have added a new category, please edit test_categories.py, adding your new category to all_categories") print("else, please specify one or more of the following: " + str(list(all_categories.keys()))) sys.exit(1) result.append(category) return result
Python
def read_memory_at_address(addr, size): """ Get a memory buffer from the scripted process at a certain address, of a certain size. Args: addr (int): Address from which we should start reading. size (int): Size of the memory to read. Returns: lldb.SBData: An `lldb.SBData` buffer with the target byte size and byte order storing the memory read. """ pass
def read_memory_at_address(addr, size): """ Get a memory buffer from the scripted process at a certain address, of a certain size. Args: addr (int): Address from which we should start reading. size (int): Size of the memory to read. Returns: lldb.SBData: An `lldb.SBData` buffer with the target byte size and byte order storing the memory read. """ pass
Python
def test(self): """Test two threads that trigger a breakpoint and one watchpoint thread. """ self.build(dictionary=self.getBuildFlags()) self.do_thread_actions( num_breakpoint_threads=2, num_watchpoint_threads=1)
def test(self): """Test two threads that trigger a breakpoint and one watchpoint thread. """ self.build(dictionary=self.getBuildFlags()) self.do_thread_actions( num_breakpoint_threads=2, num_watchpoint_threads=1)
Python
def _get_scalar_map(self) -> _ir.AffineMap: """Create an empty affine map used to index a scalar.""" with self.context: return _ir.AffineMap.get( dim_count=self.affine_state.dim_count, symbol_count=self.affine_state.symbol_count, exprs=list())
def _get_scalar_map(self) -> _ir.AffineMap: """Create an empty affine map used to index a scalar.""" with self.context: return _ir.AffineMap.get( dim_count=self.affine_state.dim_count, symbol_count=self.affine_state.symbol_count, exprs=list())
Python
def from_linalg_op_def( tc_op_def: LinalgOpDef, context: Optional[_ir.Context] = None) -> Sequence["LinalgOpConfig"]: """Expands a LinalgOpDef into corresponding Linalg configured ops.""" # TODO: Many LinalgOpDef patterns need to expand to multiple generics. assert len( tc_op_def.comprehensions) == 1, "Only one comprehension supported" return [ LinalgOpConfig( tc_op_def.metadata, structured_op=LinalgStructuredOpConfig( tc_op_def.comprehensions[0], tc_op_def.domain, tc_op_def.registered_operands.values(), context)), ]
def from_linalg_op_def( tc_op_def: LinalgOpDef, context: Optional[_ir.Context] = None) -> Sequence["LinalgOpConfig"]: """Expands a LinalgOpDef into corresponding Linalg configured ops.""" # TODO: Many LinalgOpDef patterns need to expand to multiple generics. assert len( tc_op_def.comprehensions) == 1, "Only one comprehension supported" return [ LinalgOpConfig( tc_op_def.metadata, structured_op=LinalgStructuredOpConfig( tc_op_def.comprehensions[0], tc_op_def.domain, tc_op_def.registered_operands.values(), context)), ]
Python
def merge_replacement_files(tmpdir, mergefile): """Merge all replacement files in a directory into a single file""" # The fixes suggested by clang-tidy >= 4.0.0 are given under # the top level key 'Diagnostics' in the output yaml files mergekey = "Diagnostics" merged=[] for replacefile in glob.iglob(os.path.join(tmpdir, '*.yaml')): content = yaml.safe_load(open(replacefile, 'r')) if not content: continue # Skip empty files. merged.extend(content.get(mergekey, [])) if merged: # MainSourceFile: The key is required by the definition inside # include/clang/Tooling/ReplacementsYaml.h, but the value # is actually never used inside clang-apply-replacements, # so we set it to '' here. output = {'MainSourceFile': '', mergekey: merged} with open(mergefile, 'w') as out: yaml.safe_dump(output, out) else: # Empty the file: open(mergefile, 'w').close()
def merge_replacement_files(tmpdir, mergefile): """Merge all replacement files in a directory into a single file""" # The fixes suggested by clang-tidy >= 4.0.0 are given under # the top level key 'Diagnostics' in the output yaml files mergekey = "Diagnostics" merged=[] for replacefile in glob.iglob(os.path.join(tmpdir, '*.yaml')): content = yaml.safe_load(open(replacefile, 'r')) if not content: continue # Skip empty files. merged.extend(content.get(mergekey, [])) if merged: # MainSourceFile: The key is required by the definition inside # include/clang/Tooling/ReplacementsYaml.h, but the value # is actually never used inside clang-apply-replacements, # so we set it to '' here. output = {'MainSourceFile': '', mergekey: merged} with open(mergefile, 'w') as out: yaml.safe_dump(output, out) else: # Empty the file: open(mergefile, 'w').close()
Python
def run_tidy(args, tmpdir, build_path, queue, lock, failed_files): """Takes filenames out of queue and runs clang-tidy on them.""" while True: name = queue.get() invocation = get_tidy_invocation(name, args.clang_tidy_binary, args.checks, tmpdir, build_path, args.header_filter, args.allow_enabling_alpha_checkers, args.extra_arg, args.extra_arg_before, args.quiet, args.config, args.line_filter) proc = subprocess.Popen(invocation, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, err = proc.communicate() if proc.returncode != 0: if proc.returncode < 0: msg = "%s: terminated by signal %d\n" % (name, -proc.returncode) err += msg.encode('utf-8') failed_files.append(name) with lock: sys.stdout.write(' '.join(invocation) + '\n' + output.decode('utf-8')) if len(err) > 0: sys.stdout.flush() sys.stderr.write(err.decode('utf-8')) queue.task_done()
def run_tidy(args, tmpdir, build_path, queue, lock, failed_files): """Takes filenames out of queue and runs clang-tidy on them.""" while True: name = queue.get() invocation = get_tidy_invocation(name, args.clang_tidy_binary, args.checks, tmpdir, build_path, args.header_filter, args.allow_enabling_alpha_checkers, args.extra_arg, args.extra_arg_before, args.quiet, args.config, args.line_filter) proc = subprocess.Popen(invocation, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, err = proc.communicate() if proc.returncode != 0: if proc.returncode < 0: msg = "%s: terminated by signal %d\n" % (name, -proc.returncode) err += msg.encode('utf-8') failed_files.append(name) with lock: sys.stdout.write(' '.join(invocation) + '\n' + output.decode('utf-8')) if len(err) > 0: sys.stdout.flush() sys.stderr.write(err.decode('utf-8')) queue.task_done()
Python
def _remove_cxx_namespace(typename): """Removed libc++ specific namespace from the type. Arguments: typename(string): A type, such as std::__u::something. Returns: A string without the libc++ specific part, such as std::something. """ return re.sub("std::__.*?::", "std::", typename)
def _remove_cxx_namespace(typename): """Removed libc++ specific namespace from the type. Arguments: typename(string): A type, such as std::__u::something. Returns: A string without the libc++ specific part, such as std::something. """ return re.sub("std::__.*?::", "std::", typename)
Python
def _remove_generics(typename): """Remove generics part of the type. Assumes typename is not empty. Arguments: typename(string): A type such as std::my_collection<element>. Returns: The prefix up to the generic part, such as std::my_collection. """ match = re.match("^([^<]+)", typename) return match.group(1)
def _remove_generics(typename): """Remove generics part of the type. Assumes typename is not empty. Arguments: typename(string): A type such as std::my_collection<element>. Returns: The prefix up to the generic part, such as std::my_collection. """ match = re.match("^([^<]+)", typename) return match.group(1)
Python
def _prettify_typename(gdb_type): """Returns a pretty name for the type, or None if no name can be found. Arguments: gdb_type(gdb.Type): A type object. Returns: A string, without type_defs, libc++ namespaces, and common substitutions applied. """ type_without_typedefs = gdb_type.strip_typedefs() typename = type_without_typedefs.name or type_without_typedefs.tag or \ str(type_without_typedefs) result = _remove_cxx_namespace(typename) for find_str, subst_str in _common_substitutions: result = re.sub(find_str, subst_str, result) return result
def _prettify_typename(gdb_type): """Returns a pretty name for the type, or None if no name can be found. Arguments: gdb_type(gdb.Type): A type object. Returns: A string, without type_defs, libc++ namespaces, and common substitutions applied. """ type_without_typedefs = gdb_type.strip_typedefs() typename = type_without_typedefs.name or type_without_typedefs.tag or \ str(type_without_typedefs) result = _remove_cxx_namespace(typename) for find_str, subst_str in _common_substitutions: result = re.sub(find_str, subst_str, result) return result
Python
def _typename_for_nth_generic_argument(gdb_type, n): """Returns a pretty string for the nth argument of the given type. Arguments: gdb_type(gdb.Type): A type object, such as the one for std::map<int, int> n: The (zero indexed) index of the argument to return. Returns: A string for the nth argument, such a "std::string" """ element_type = gdb_type.template_argument(n) return _prettify_typename(element_type)
def _typename_for_nth_generic_argument(gdb_type, n): """Returns a pretty string for the nth argument of the given type. Arguments: gdb_type(gdb.Type): A type object, such as the one for std::map<int, int> n: The (zero indexed) index of the argument to return. Returns: A string for the nth argument, such a "std::string" """ element_type = gdb_type.template_argument(n) return _prettify_typename(element_type)
Python
def _get_base_subobject(child_class_value, index=0): """Returns the object's value in the form of the parent class at index. This function effectively casts the child_class_value to the base_class's type, but the type-to-cast to is stored in the field at index, and once we know the field, we can just return the data. Args: child_class_value: the value to cast index: the parent class index Raises: Exception: field at index was not a base-class field. """ field = child_class_value.type.fields()[index] if not field.is_base_class: raise Exception("Not a base-class field.") return child_class_value[field]
def _get_base_subobject(child_class_value, index=0): """Returns the object's value in the form of the parent class at index. This function effectively casts the child_class_value to the base_class's type, but the type-to-cast to is stored in the field at index, and once we know the field, we can just return the data. Args: child_class_value: the value to cast index: the parent class index Raises: Exception: field at index was not a base-class field. """ field = child_class_value.type.fields()[index] if not field.is_base_class: raise Exception("Not a base-class field.") return child_class_value[field]
Python
def _get_short_size(self, short_field, short_size): """Short size depends on both endianness and a compile-time define.""" # If the padding field is present after all this indirection, then string # was compiled with _LIBCPP_ABI_ALTERNATE_STRING_LAYOUT defined. field = short_field.type.fields()[1].type.fields()[0] libcpp_abi_alternate_string_layout = field.name and "__padding" in field.name # This logical structure closely follows the original code (which is clearer # in C++). Keep them parallel to make them easier to compare. if libcpp_abi_alternate_string_layout: if _libcpp_big_endian: return short_size >> 1 else: return short_size elif _libcpp_big_endian: return short_size else: return short_size >> 1
def _get_short_size(self, short_field, short_size): """Short size depends on both endianness and a compile-time define.""" # If the padding field is present after all this indirection, then string # was compiled with _LIBCPP_ABI_ALTERNATE_STRING_LAYOUT defined. field = short_field.type.fields()[1].type.fields()[0] libcpp_abi_alternate_string_layout = field.name and "__padding" in field.name # This logical structure closely follows the original code (which is clearer # in C++). Keep them parallel to make them easier to compare. if libcpp_abi_alternate_string_layout: if _libcpp_big_endian: return short_size >> 1 else: return short_size elif _libcpp_big_endian: return short_size else: return short_size >> 1
Python
def to_string(self): """Build a python string from the data whether stored inline or separately.""" value_field = _value_of_pair_first(self.val["__r_"]) short_field = value_field["__s"] short_size = short_field["__size_"] if short_size == 0: return "" short_mask = self.val["__short_mask"] # Counter intuitive to compare the size and short_mask to see if the string # is long, but that's the way the implementation does it. Note that # __is_long() doesn't use get_short_size in C++. is_long = short_size & short_mask if is_long: long_field = value_field["__l"] data = long_field["__data_"] size = long_field["__size_"] else: data = short_field["__data_"] size = self._get_short_size(short_field, short_size) if hasattr(data, "lazy_string"): return data.lazy_string(length=size) return data.string(length=size)
def to_string(self): """Build a python string from the data whether stored inline or separately.""" value_field = _value_of_pair_first(self.val["__r_"]) short_field = value_field["__s"] short_size = short_field["__size_"] if short_size == 0: return "" short_mask = self.val["__short_mask"] # Counter intuitive to compare the size and short_mask to see if the string # is long, but that's the way the implementation does it. Note that # __is_long() doesn't use get_short_size in C++. is_long = short_size & short_mask if is_long: long_field = value_field["__l"] data = long_field["__data_"] size = long_field["__size_"] else: data = short_field["__data_"] size = self._get_short_size(short_field, short_size) if hasattr(data, "lazy_string"): return data.lazy_string(length=size) return data.string(length=size)
Python
def to_string(self): # pylint: disable=g-bad-name """GDB calls this to compute the pretty-printed form.""" ptr = self.val["__data"] length = self.val["__size"] print_length = length # We print more than just a simple string (i.e. we also print # "of length %d"). Thus we can't use the "string" display_hint, # and thus we have to handle "print elements" ourselves. # For reference sake, gdb ensures limit == None or limit > 0. limit = gdb.parameter("print elements") if limit is not None: print_length = min(print_length, limit) # FIXME: Passing ISO-8859-1 here isn't always correct. string = ptr.string("ISO-8859-1", "ignore", print_length) if length > print_length: string += "..." return "std::string_view of length %d: \"%s\"" % (length, string)
def to_string(self): # pylint: disable=g-bad-name """GDB calls this to compute the pretty-printed form.""" ptr = self.val["__data"] length = self.val["__size"] print_length = length # We print more than just a simple string (i.e. we also print # "of length %d"). Thus we can't use the "string" display_hint, # and thus we have to handle "print elements" ourselves. # For reference sake, gdb ensures limit == None or limit > 0. limit = gdb.parameter("print elements") if limit is not None: print_length = min(print_length, limit) # FIXME: Passing ISO-8859-1 here isn't always correct. string = ptr.string("ISO-8859-1", "ignore", print_length) if length > print_length: string += "..." return "std::string_view of length %d: \"%s\"" % (length, string)
Python
def to_string(self): """Returns self as a string.""" typename = _remove_generics(_prettify_typename(self.val.type)) pointee_type = _remove_generics( _prettify_typename(self.val.type.template_argument(0))) if not self.addr: return "%s is nullptr" % typename refcount = self.val["__cntrl_"] if refcount != 0: try: usecount = refcount["__shared_owners_"] + 1 weakcount = refcount["__shared_weak_owners_"] if usecount == 0: state = "expired, weak %d" % weakcount else: state = "count %d, weak %d" % (usecount, weakcount) except: # Debug info for a class with virtual functions is emitted # in the same place as its key function. That means that # for std::shared_ptr, __shared_owners_ is emitted into # into libcxx.[so|a] itself, rather than into the shared_ptr # instantiation point. So if libcxx.so was built without # debug info, these fields will be missing. state = "count ?, weak ? (libc++ missing debug info)" return "%s<%s> %s containing" % (typename, pointee_type, state)
def to_string(self): """Returns self as a string.""" typename = _remove_generics(_prettify_typename(self.val.type)) pointee_type = _remove_generics( _prettify_typename(self.val.type.template_argument(0))) if not self.addr: return "%s is nullptr" % typename refcount = self.val["__cntrl_"] if refcount != 0: try: usecount = refcount["__shared_owners_"] + 1 weakcount = refcount["__shared_weak_owners_"] if usecount == 0: state = "expired, weak %d" % weakcount else: state = "count %d, weak %d" % (usecount, weakcount) except: # Debug info for a class with virtual functions is emitted # in the same place as its key function. That means that # for std::shared_ptr, __shared_owners_ is emitted into # into libcxx.[so|a] itself, rather than into the shared_ptr # instantiation point. So if libcxx.so was built without # debug info, these fields will be missing. state = "count ?, weak ? (libc++ missing debug info)" return "%s<%s> %s containing" % (typename, pointee_type, state)
Python
def _calculate_block_size(self, element_type): """Calculates the number of elements in a full block.""" size = element_type.sizeof # Copied from struct __deque_block_size implementation of libcxx. return 4096 / size if size < 256 else 16
def _calculate_block_size(self, element_type): """Calculates the number of elements in a full block.""" size = element_type.sizeof # Copied from struct __deque_block_size implementation of libcxx. return 4096 / size if size < 256 else 16
Python
def parent(self, node): """Return the parent of node, if it exists.""" # If this is the root, then from the algorithm's point of view, it has no # parent. if node == self.root: return None # We don't have enough information to tell if this is the end_node (which # doesn't have a __parent_ field), or the root (which doesn't have a parent # from the algorithm's point of view), so cast_type may not be correct for # this particular node. Use heuristics. # The end_node's left child is the root. Note that when printing interators # in isolation, the root is unknown. if self.left_child(node) == self.root: return None parent = node.cast(self.cast_type).dereference()["__parent_"] # If the value at the offset of __parent_ doesn't look like a valid pointer, # then assume that node is the end_node (and therefore has no parent). # End_node type has a pointer embedded, so should have pointer alignment. if addr_as_long(parent) % _void_pointer_type.alignof: return None # This is ugly, but the only other option is to dereference an invalid # pointer. 0x8000 is fairly arbitrary, but has had good results in # practice. If there was a way to tell if a pointer is invalid without # actually dereferencing it and spewing error messages, that would be ideal. if parent < 0x8000: return None return parent
def parent(self, node): """Return the parent of node, if it exists.""" # If this is the root, then from the algorithm's point of view, it has no # parent. if node == self.root: return None # We don't have enough information to tell if this is the end_node (which # doesn't have a __parent_ field), or the root (which doesn't have a parent # from the algorithm's point of view), so cast_type may not be correct for # this particular node. Use heuristics. # The end_node's left child is the root. Note that when printing interators # in isolation, the root is unknown. if self.left_child(node) == self.root: return None parent = node.cast(self.cast_type).dereference()["__parent_"] # If the value at the offset of __parent_ doesn't look like a valid pointer, # then assume that node is the end_node (and therefore has no parent). # End_node type has a pointer embedded, so should have pointer alignment. if addr_as_long(parent) % _void_pointer_type.alignof: return None # This is ugly, but the only other option is to dereference an invalid # pointer. 0x8000 is fairly arbitrary, but has had good results in # practice. If there was a way to tell if a pointer is invalid without # actually dereferencing it and spewing error messages, that would be ideal. if parent < 0x8000: return None return parent
Python
def _traverse(self): """Traverses the binary search tree in order.""" current = self.util.root skip_left_child = False while True: if not skip_left_child and self.util.left_child(current): current = self.util.left_child(current) continue skip_left_child = False for key_value in self._get_key_value(current): yield "", key_value right_child = self.util.right_child(current) if right_child: current = right_child continue while self.util.is_right_child(current): current = self.util.parent(current) if self.util.is_left_child(current): current = self.util.parent(current) skip_left_child = True continue break
def _traverse(self): """Traverses the binary search tree in order.""" current = self.util.root skip_left_child = False while True: if not skip_left_child and self.util.left_child(current): current = self.util.left_child(current) continue skip_left_child = False for key_value in self._get_key_value(current): yield "", key_value right_child = self.util.right_child(current) if right_child: current = right_child continue while self.util.is_right_child(current): current = self.util.parent(current) if self.util.is_left_child(current): current = self.util.parent(current) skip_left_child = True continue break
Python
def test(self): """Test breakpoint handling after a thread join.""" self.build(dictionary=self.getBuildFlags()) exe = self.getBuildArtifact("a.out") self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET) # This should create a breakpoint in the main thread. lldbutil.run_break_set_by_file_and_line( self, "main.cpp", self.breakpoint, num_expected_locations=1) # The breakpoint list should show 1 location. self.expect( "breakpoint list -f", "Breakpoint location shown correctly", substrs=[ "1: file = 'main.cpp', line = %d, exact_match = 0, locations = 1" % self.breakpoint]) # Run the program. self.runCmd("run", RUN_SUCCEEDED) # The stop reason of the thread should be breakpoint. self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT, substrs=['stopped', 'stop reason = breakpoint']) # Get the target process target = self.dbg.GetSelectedTarget() process = target.GetProcess() # The exit probably occurred during breakpoint handling, but it isn't # guaranteed. The main thing we're testing here is that the debugger # handles this cleanly is some way. # Get the number of threads num_threads = process.GetNumThreads() # Make sure we see at least six threads self.assertTrue( num_threads >= 6, 'Number of expected threads and actual threads do not match.') # Make sure all threads are stopped for i in range(0, num_threads): self.assertTrue( process.GetThreadAtIndex(i).IsStopped(), "Thread {0} didn't stop during breakpoint.".format(i)) # Run to completion self.runCmd("continue") # If the process hasn't exited, collect some information if process.GetState() != lldb.eStateExited: self.runCmd("thread list") self.runCmd("process status") # At this point, the inferior process should have exited. self.assertEqual( process.GetState(), lldb.eStateExited, PROCESS_EXITED)
def test(self): """Test breakpoint handling after a thread join.""" self.build(dictionary=self.getBuildFlags()) exe = self.getBuildArtifact("a.out") self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET) # This should create a breakpoint in the main thread. lldbutil.run_break_set_by_file_and_line( self, "main.cpp", self.breakpoint, num_expected_locations=1) # The breakpoint list should show 1 location. self.expect( "breakpoint list -f", "Breakpoint location shown correctly", substrs=[ "1: file = 'main.cpp', line = %d, exact_match = 0, locations = 1" % self.breakpoint]) # Run the program. self.runCmd("run", RUN_SUCCEEDED) # The stop reason of the thread should be breakpoint. self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT, substrs=['stopped', 'stop reason = breakpoint']) # Get the target process target = self.dbg.GetSelectedTarget() process = target.GetProcess() # The exit probably occurred during breakpoint handling, but it isn't # guaranteed. The main thing we're testing here is that the debugger # handles this cleanly is some way. # Get the number of threads num_threads = process.GetNumThreads() # Make sure we see at least six threads self.assertTrue( num_threads >= 6, 'Number of expected threads and actual threads do not match.') # Make sure all threads are stopped for i in range(0, num_threads): self.assertTrue( process.GetThreadAtIndex(i).IsStopped(), "Thread {0} didn't stop during breakpoint.".format(i)) # Run to completion self.runCmd("continue") # If the process hasn't exited, collect some information if process.GetState() != lldb.eStateExited: self.runCmd("thread list") self.runCmd("process status") # At this point, the inferior process should have exited. self.assertEqual( process.GetState(), lldb.eStateExited, PROCESS_EXITED)
Python
def subprocess_error_handler(msg: str): """ Build decorator that prints an error message and prevents CompilationDriver from continuing when a called subprocess exits with non-zero status """ def decorator(func): def decorated(self, *args, **kwargs): if self.ok: try: return func(self, *args, **kwargs) except subprocess.CalledProcessError: print(msg, file=sys.stderr) self.ok = False return decorated return decorator
def subprocess_error_handler(msg: str): """ Build decorator that prints an error message and prevents CompilationDriver from continuing when a called subprocess exits with non-zero status """ def decorator(func): def decorated(self, *args, **kwargs): if self.ok: try: return func(self, *args, **kwargs) except subprocess.CalledProcessError: print(msg, file=sys.stderr) self.ok = False return decorated return decorator
Python
def _link_multi_inputs(self): """Link all input files into a single .bc""" llvm_link = self.clang_path / "llvm-link" args = [str(llvm_link), *self.inputs, "-o", str(self.before_opt_src) ] self._dump_cmd("00-link_multi_inputs.cmd", args) subprocess.run(args, check=True)
def _link_multi_inputs(self): """Link all input files into a single .bc""" llvm_link = self.clang_path / "llvm-link" args = [str(llvm_link), *self.inputs, "-o", str(self.before_opt_src) ] self._dump_cmd("00-link_multi_inputs.cmd", args) subprocess.run(args, check=True)
Python
def _run_preparation(self): """Run the various sycl->HLS conversion passes""" # We try to avoid as many optimization as possible # to give vitis the opportunity to use its custom # optimizations outstem = self.outstem self.prepared_bc = ( self.tmpdir / f"{outstem}-kernels-prepared.bc" ) opt_options = ["--sycl-vxx", "-preparesycl", "-globaldce"] if not self.hls_flow: opt_options.extend([ "-inline", "-infer-address-spaces", "-flat-address-space=0", "-globaldce" ]) opt_options.extend([ "-instcombine", "-domtree", "-argpromotion", "-deadargelim", "-globalopt", "-domtree", "-inline", "-instcombine", "-domtree", "-argpromotion", "-deadargelim", "-inSPIRation", "-o", f"{self.prepared_bc}" ]) opt = self.clang_path / "opt" args = [opt, *opt_options, self.before_opt_src] self._dump_cmd("01-run_preparation.cmd", args) proc = subprocess.run(args, check=True, capture_output=True) if bytes("SYCL_VXX_UNSUPPORTED_SPIR_BUILTINS", "ascii") in proc.stderr: print("Unsupported SPIR builtins found : stopping compilation") self.ok = False
def _run_preparation(self): """Run the various sycl->HLS conversion passes""" # We try to avoid as many optimization as possible # to give vitis the opportunity to use its custom # optimizations outstem = self.outstem self.prepared_bc = ( self.tmpdir / f"{outstem}-kernels-prepared.bc" ) opt_options = ["--sycl-vxx", "-preparesycl", "-globaldce"] if not self.hls_flow: opt_options.extend([ "-inline", "-infer-address-spaces", "-flat-address-space=0", "-globaldce" ]) opt_options.extend([ "-instcombine", "-domtree", "-argpromotion", "-deadargelim", "-globalopt", "-domtree", "-inline", "-instcombine", "-domtree", "-argpromotion", "-deadargelim", "-inSPIRation", "-o", f"{self.prepared_bc}" ]) opt = self.clang_path / "opt" args = [opt, *opt_options, self.before_opt_src] self._dump_cmd("01-run_preparation.cmd", args) proc = subprocess.run(args, check=True, capture_output=True) if bytes("SYCL_VXX_UNSUPPORTED_SPIR_BUILTINS", "ascii") in proc.stderr: print("Unsupported SPIR builtins found : stopping compilation") self.ok = False
Python
def _asm_ir(self): """Assemble downgraded IR to bitcode using Vitis llvm-as""" args = [ self.vitis_clang_bin / "llvm-as", self.downgraded_ir, "-o", self.vpp_llvm_input ] self._dump_cmd("05-asm_ir.cmd", args) subprocess.run(args, check=True)
def _asm_ir(self): """Assemble downgraded IR to bitcode using Vitis llvm-as""" args = [ self.vitis_clang_bin / "llvm-as", self.downgraded_ir, "-o", self.vpp_llvm_input ] self._dump_cmd("05-asm_ir.cmd", args) subprocess.run(args, check=True)
Python
def _link_kernels(self): """Call v++ to link all kernel in one .xclbin""" vpp = self.vitis_bin_dir / "v++" link_config = environ.get('SYCL_VXX_LINK_CONFIG') command = [ vpp, "--target", self.vitis_mode, "--advanced.param", "compiler.hlsDataflowStrictMode=off", "--platform", self.xilinx_platform, "--temp_dir", self.tmpdir / 'vxx_link_tmp', "--log_dir", self.tmpdir / 'vxx_link_log', "--report_dir", self.tmpdir / 'vxx_link_report', "--save-temps", "-l", "-o", self.outpath ] if link_config is not None and Path(link_config).is_file(): command.extend(("--config", Path(link_config).resolve())) for kernelprop in self.kernel_properties['kernels']: targets = dict() for mem_assign in kernelprop["bundle_hw_mapping"]: command.extend(( "--connectivity.sp", "{}_1.m_axi_{}:{}".format( kernelprop["name"], mem_assign["maxi_bundle_name"], mem_assign["target_bank"] ) )) targets[mem_assign["maxi_bundle_name"] ] = mem_assign["target_bank"] for arg_assign in kernelprop["arg_bundle_mapping"]: arg_name = arg_assign["arg_name"] target = targets[arg_assign["maxi_bundle_name"]] command.extend(( "--connectivity.sp", "{}_1.{}:{}".format( kernelprop["name"], arg_name, target ) )) command.extend(self.extra_link_args) command.extend(self.compiled_kernels) self._dump_cmd("07-vxxlink.cmd", command) subprocess.run(command, check=True)
def _link_kernels(self): """Call v++ to link all kernel in one .xclbin""" vpp = self.vitis_bin_dir / "v++" link_config = environ.get('SYCL_VXX_LINK_CONFIG') command = [ vpp, "--target", self.vitis_mode, "--advanced.param", "compiler.hlsDataflowStrictMode=off", "--platform", self.xilinx_platform, "--temp_dir", self.tmpdir / 'vxx_link_tmp', "--log_dir", self.tmpdir / 'vxx_link_log', "--report_dir", self.tmpdir / 'vxx_link_report', "--save-temps", "-l", "-o", self.outpath ] if link_config is not None and Path(link_config).is_file(): command.extend(("--config", Path(link_config).resolve())) for kernelprop in self.kernel_properties['kernels']: targets = dict() for mem_assign in kernelprop["bundle_hw_mapping"]: command.extend(( "--connectivity.sp", "{}_1.m_axi_{}:{}".format( kernelprop["name"], mem_assign["maxi_bundle_name"], mem_assign["target_bank"] ) )) targets[mem_assign["maxi_bundle_name"] ] = mem_assign["target_bank"] for arg_assign in kernelprop["arg_bundle_mapping"]: arg_name = arg_assign["arg_name"] target = targets[arg_assign["maxi_bundle_name"]] command.extend(( "--connectivity.sp", "{}_1.{}:{}".format( kernelprop["name"], arg_name, target ) )) command.extend(self.extra_link_args) command.extend(self.compiled_kernels) self._dump_cmd("07-vxxlink.cmd", command) subprocess.run(command, check=True)
Python
def killProcessAndChildren(pid): """This function kills a process with ``pid`` and all its running children (recursively). It is currently implemented using the psutil module on some platforms which provides a simple platform neutral implementation. TODO: Reimplement this without using psutil on all platforms so we can remove our dependency on it. """ if platform.system() == 'AIX': subprocess.call('kill -kill $(ps -o pid= -L{})'.format(pid), shell=True) else: import psutil try: psutilProc = psutil.Process(pid) # Handle the different psutil API versions try: # psutil >= 2.x children_iterator = psutilProc.children(recursive=True) except AttributeError: # psutil 1.x children_iterator = psutilProc.get_children(recursive=True) for child in children_iterator: try: child.kill() except psutil.NoSuchProcess: pass psutilProc.kill() except psutil.NoSuchProcess: pass
def killProcessAndChildren(pid): """This function kills a process with ``pid`` and all its running children (recursively). It is currently implemented using the psutil module on some platforms which provides a simple platform neutral implementation. TODO: Reimplement this without using psutil on all platforms so we can remove our dependency on it. """ if platform.system() == 'AIX': subprocess.call('kill -kill $(ps -o pid= -L{})'.format(pid), shell=True) else: import psutil try: psutilProc = psutil.Process(pid) # Handle the different psutil API versions try: # psutil >= 2.x children_iterator = psutilProc.children(recursive=True) except AttributeError: # psutil 1.x children_iterator = psutilProc.get_children(recursive=True) for child in children_iterator: try: child.kill() except psutil.NoSuchProcess: pass psutilProc.kill() except psutil.NoSuchProcess: pass
Python
def breakpoint_ignore_count_python(self): """Use Python APIs to set breakpoint ignore count.""" target, process, thread, bkpt = lldbutil.run_to_source_breakpoint(self, self.stop_in_main, lldb.SBFileSpec("main.c")) # Now create a breakpoint on main.c by name 'c'. breakpoint = target.BreakpointCreateByName('c', 'a.out') self.assertTrue(breakpoint and breakpoint.GetNumLocations() == 1, VALID_BREAKPOINT) # Get the breakpoint location from breakpoint after we verified that, # indeed, it has one location. location = breakpoint.GetLocationAtIndex(0) self.assertTrue(location and location.IsEnabled(), VALID_BREAKPOINT_LOCATION) # Set the ignore count on the breakpoint location. location.SetIgnoreCount(2) self.assertEqual(location.GetIgnoreCount(), 2, "SetIgnoreCount() works correctly") # Now continue and hit our breakpoint on c: process.Continue() # Frame#0 should be on main.c:37, frame#1 should be on main.c:25, and # frame#2 should be on main.c:48. # lldbutil.print_stacktraces(process) from lldbsuite.test.lldbutil import get_stopped_thread thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint) self.assertTrue( thread.IsValid(), "There should be a thread stopped due to breakpoint") frame0 = thread.GetFrameAtIndex(0) frame1 = thread.GetFrameAtIndex(1) frame2 = thread.GetFrameAtIndex(2) self.assertTrue(frame0.GetLineEntry().GetLine() == self.line1 and frame1.GetLineEntry().GetLine() == self.line3 and frame2.GetLineEntry().GetLine() == self.line4, STOPPED_DUE_TO_BREAKPOINT_IGNORE_COUNT) # The hit count for the breakpoint should be 3. self.assertEqual(breakpoint.GetHitCount(), 3)
def breakpoint_ignore_count_python(self): """Use Python APIs to set breakpoint ignore count.""" target, process, thread, bkpt = lldbutil.run_to_source_breakpoint(self, self.stop_in_main, lldb.SBFileSpec("main.c")) # Now create a breakpoint on main.c by name 'c'. breakpoint = target.BreakpointCreateByName('c', 'a.out') self.assertTrue(breakpoint and breakpoint.GetNumLocations() == 1, VALID_BREAKPOINT) # Get the breakpoint location from breakpoint after we verified that, # indeed, it has one location. location = breakpoint.GetLocationAtIndex(0) self.assertTrue(location and location.IsEnabled(), VALID_BREAKPOINT_LOCATION) # Set the ignore count on the breakpoint location. location.SetIgnoreCount(2) self.assertEqual(location.GetIgnoreCount(), 2, "SetIgnoreCount() works correctly") # Now continue and hit our breakpoint on c: process.Continue() # Frame#0 should be on main.c:37, frame#1 should be on main.c:25, and # frame#2 should be on main.c:48. # lldbutil.print_stacktraces(process) from lldbsuite.test.lldbutil import get_stopped_thread thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint) self.assertTrue( thread.IsValid(), "There should be a thread stopped due to breakpoint") frame0 = thread.GetFrameAtIndex(0) frame1 = thread.GetFrameAtIndex(1) frame2 = thread.GetFrameAtIndex(2) self.assertTrue(frame0.GetLineEntry().GetLine() == self.line1 and frame1.GetLineEntry().GetLine() == self.line3 and frame2.GetLineEntry().GetLine() == self.line4, STOPPED_DUE_TO_BREAKPOINT_IGNORE_COUNT) # The hit count for the breakpoint should be 3. self.assertEqual(breakpoint.GetHitCount(), 3)