python_code
stringlengths
0
679k
repo_name
stringlengths
9
41
file_path
stringlengths
6
149
''' Convert Google Code .wiki files into .tex formatted files. Output is designed to be included within a larger TeX project, it is not standalone. ''' import sys import re import codecs print(sys.argv) ''' A "rule" is a begin tag, an end tag, and how to reformat the inner text (function) ''' def encase(pre, post, strip=False): """Return a function that prepends pre and postpends post""" def f(txt): if strip: return pre + txt.strip() + post else: return pre + txt + post return f def constant(text): def f(txt): return text return f def encase_with_rules(pre, post, rules, strip=False): def f(txt): if strip: return pre + apply_rules(txt, rules).strip() + post else: return pre + apply_rules(txt, rules) + post return f def encase_escape_underscore(pre, post): def f(txt): txt = sub(r'_', r'\_', txt) return pre + txt + post return f def sub(pat, repl, txt): """Substitute in repl for pat in txt, txt can be multiple lines""" return re.compile(pat, re.MULTILINE).sub(repl, txt) def process_list(rules): def f(txt): txt = ' *' + txt # was removed to match begin tag of list res = '\\begin{itemize}\n' for ln in txt.split('\n'): # Convert " *" to "\item " ln = sub(r'^ \*', r'\\item ', ln) res += apply_rules(ln, rules) + '\n' res += '\\end{itemize}\n' return res return f def process_link(rules): def f(txt): lst = txt.split(' ') lnk = lst[0] desc = apply_rules(' '.join(lst[1:]), rules) if lnk[:7] == 'http://': desc = apply_rules(' '.join(lst[1:]), rules) return r'\href{' + lnk + r'}{' + desc + r'}' if len(lst) > 1: return r'\href{}{' + desc + r'}' return r'\href{}{' + lnk + r'}' return f # Some rules can be used inside some other rules (backticks in section names) link_rules = [ ['_', '', constant(r'\_')], ] section_rules = [ ['`', '`', encase_escape_underscore(r'\texttt{', r'}')], ] item_rules = [ ['`', '`', encase(r'\verb|', r'|')], ['[', ']', process_link(link_rules)], ] # Main rules for Latex formatting rules = [ ['{{{', '}}}', encase(r'\begin{lstlisting}[language=c++]', r'\end{lstlisting}')], ['[', ']', process_link(link_rules)], [' *', '\n\n', process_list(item_rules)], ['"', '"', encase("``", "''")], ['`', '`', encase(r'\verb|', r'|')], ['*', '*', encase(r'\emph{', r'}')], ['_', '_', encase(r'\emph{', r'}')], ['==', '==', encase_with_rules(r'\section{', r'}', section_rules, True)], ['=', '=', encase_with_rules(r'\chapter{', r'}', section_rules, True)], ['(e.g. f(x) -> y and f(x,y) -> ', 'z)', constant(r'(e.g. $f(x)\to y$ and $f(x,y)\to z$)')], ] def match_rules(txt, rules): """Find rule that first matches in txt""" # Find first begin tag first_begin_loc = 10e100 matching_rule = None for rule in rules: begin_tag, end_tag, func = rule loc = txt.find(begin_tag) if loc > -1 and loc < first_begin_loc: first_begin_loc = loc matching_rule = rule return (matching_rule, first_begin_loc) def apply_rules(txt, rules): """Apply set of rules to give txt, return transformed version of txt""" matching_rule, first_begin_loc = match_rules(txt, rules) if matching_rule is None: return txt begin_tag, end_tag, func = matching_rule end_loc = txt.find(end_tag, first_begin_loc + 1) if end_loc == -1: sys.exit('Could not find end tag {0} after position {1}'.format(end_tag, first_begin_loc + 1)) inner_txt = txt[first_begin_loc + len(begin_tag) : end_loc] # Copy characters up until begin tag # Then have output of rule function on inner text new_txt_start = txt[:first_begin_loc] + func(inner_txt) # Follow with the remaining processed text remaining_txt = txt[end_loc + len(end_tag):] return new_txt_start + apply_rules(remaining_txt, rules) def split_sections(contents): """Given one string of all file contents, return list of sections Return format is list of pairs, each pair has section title and list of lines. Result is ordered as the original input. """ res = [] cur_section = '' section = [] for ln in contents.split('\n'): if len(ln) > 0 and ln[0] == '=': # remove = formatting from line section_title = sub(r'^\=+ (.*) \=+', r'\1', ln) res.append((cur_section, section)) cur_section = section_title section = [ln] else: section.append(ln) res.append((cur_section, section)) return res def filter_sections(splitinput, removelst): """Take split input and remove sections in removelst""" res = [] for sectname, sectcontents in splitinput: if sectname in removelst: pass else: res.extend(sectcontents) # convert to single string for output return '\n'.join(res) def main(): infile = codecs.open(sys.argv[1], encoding='utf-8') outfile = codecs.open(sys.argv[2], mode='w', encoding='utf-8') contents = infile.read() # Remove first three lines contents = '\n'.join(contents.split('\n')[3:]) # Split sections and filter out some of them sections = split_sections(contents) contents = filter_sections(sections, ['Introduction', 'Prerequisites', 'Simple Example']) # Convert to latex format contents = apply_rules(contents, rules) infile.close() outfile.write(contents) outfile.close() return 0 if __name__ == '__main__': sys.exit(main())
cccl-main
thrust/internal/scripts/wiki2tex.py
#! /usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # Copyright (c) 2018 NVIDIA Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### from sys import exit from os.path import join, dirname, basename, realpath from csv import DictReader as csv_dict_reader from subprocess import Popen from argparse import ArgumentParser as argument_parser ############################################################################### def printable_cmd(c): """Converts a `list` of `str`s representing a shell command to a printable `str`.""" return " ".join(map(lambda e: '"' + str(e) + '"', c)) ############################################################################### def print_file(p): """Open the path `p` and print its contents to `stdout`.""" print "********************************************************************************" with open(p) as f: for line in f: print line, print "********************************************************************************" ############################################################################### ap = argument_parser( description = ( "CUDA Eris driver script: runs a benchmark suite multiple times, combines " "the results, and outputs them in the CUDA Eris performance result format." ) ) ap.add_argument( "-b", "--benchmark", help = ("The location of the benchmark suite executable to run."), type = str, default = join(dirname(realpath(__file__)), "bench"), metavar = "R" ) ap.add_argument( "-p", "--postprocess", help = ("The location of the postprocessing script to run to combine the " "results."), type = str, default = join(dirname(realpath(__file__)), "combine_benchmark_results.py"), metavar = "R" ) ap.add_argument( "-r", "--runs", help = ("Run the benchmark suite `R` times.a),"), type = int, default = 5, metavar = "R" ) args = ap.parse_args() if args.runs <= 0: print "ERROR: `--runs` must be greater than `0`." ap.print_help() exit(1) BENCHMARK_EXE = args.benchmark BENCHMARK_NAME = basename(BENCHMARK_EXE) POSTPROCESS_EXE = args.postprocess OUTPUT_FILE_NAME = lambda i: BENCHMARK_NAME + "_" + str(i) + ".csv" COMBINED_OUTPUT_FILE_NAME = BENCHMARK_NAME + "_combined.csv" ############################################################################### print '&&&& RUNNING {0}'.format(BENCHMARK_NAME) print '#### RUNS {0}'.format(args.runs) ############################################################################### print '#### CMD {0}'.format(BENCHMARK_EXE) for i in xrange(args.runs): with open(OUTPUT_FILE_NAME(i), "w") as output_file: print '#### RUN {0} OUTPUT -> {1}'.format(i, OUTPUT_FILE_NAME(i)) p = None try: p = Popen(BENCHMARK_EXE, stdout = output_file, stderr = output_file) p.communicate() except OSError as ex: print_file(OUTPUT_FILE_NAME(i)) print '#### ERROR Caught OSError `{0}`.'.format(ex) print '&&&& FAILED {0}'.format(BENCHMARK_NAME) exit(-1) print_file(OUTPUT_FILE_NAME(i)) if p.returncode != 0: print '#### ERROR Process exited with code {0}.'.format(p.returncode) print '&&&& FAILED {0}'.format(BENCHMARK_NAME) exit(p.returncode) ############################################################################### post_cmd = [POSTPROCESS_EXE] # Add dependent variable options. post_cmd += ["-dSTL Average Walltime,STL Walltime Uncertainty,STL Trials"] post_cmd += ["-dSTL Average Throughput,STL Throughput Uncertainty,STL Trials"] post_cmd += ["-dThrust Average Walltime,Thrust Walltime Uncertainty,Thrust Trials"] post_cmd += ["-dThrust Average Throughput,Thrust Throughput Uncertainty,Thrust Trials"] post_cmd += [OUTPUT_FILE_NAME(i) for i in range(args.runs)] print '#### CMD {0}'.format(printable_cmd(post_cmd)) with open(COMBINED_OUTPUT_FILE_NAME, "w") as output_file: p = None try: p = Popen(post_cmd, stdout = output_file, stderr = output_file) p.communicate() except OSError as ex: print_file(COMBINED_OUTPUT_FILE_NAME) print '#### ERROR Caught OSError `{0}`.'.format(ex) print '&&&& FAILED {0}'.format(BENCHMARK_NAME) exit(-1) print_file(COMBINED_OUTPUT_FILE_NAME) if p.returncode != 0: print '#### ERROR Process exited with code {0}.'.format(p.returncode) print '&&&& FAILED {0}'.format(BENCHMARK_NAME) exit(p.returncode) with open(COMBINED_OUTPUT_FILE_NAME) as input_file: reader = csv_dict_reader(input_file) variable_units = reader.next() # Get units header row. distinguishing_variables = reader.fieldnames measured_variables = [ ("STL Average Throughput", "+"), ("Thrust Average Throughput", "+") ] for record in reader: for variable, directionality in measured_variables: # Don't monitor regressions for STL implementations, nvbug 28980890: if "STL" in variable: continue print "&&&& PERF {0}_{1}_{2}bit_{3}mib_{4} {5} {6}{7}".format( record["Algorithm"], record["Element Type"], record["Element Size"], record["Total Input Size"], variable.replace(" ", "_").lower(), record[variable], directionality, variable_units[variable] ) ############################################################################### print '&&&& PASSED {0}'.format(BENCHMARK_NAME)
cccl-main
thrust/internal/scripts/eris_perf.py
''' Helper script for creating a header file that includes all of Thrust's public headers. This is useful for instance, to quickly check that all the thrust headers obey proper syntax or are warning free. This script simply outputs a list of C-style #include's to the standard output--this should be redirected to a header file by the caller. ''' import sys import os import re from stat import * thrustdir = sys.argv[1] def find_headers(base_dir, rel_dir, exclude = ['\B']): ''' Recursively find all *.h files inside base_dir/rel_dir, except any that match the exclude regexp list ''' assert(type(exclude) == list) full_dir = base_dir + '/' + rel_dir result = [] for f in os.listdir(full_dir): rel_file = rel_dir + '/' + f for e in exclude: if re.match(e, rel_file): break else: if f.endswith('.h'): result.append(rel_file) elif S_ISDIR(os.stat(full_dir + '/' + f).st_mode): result.extend(find_headers(base_dir, rel_file, exclude)) return result print('/* File is generated by ' + sys.argv[0] + ' */') exclude_re = ['.*/detail$', 'thrust/iterator', 'thrust/random', 'thrust/system/tbb'] headers = find_headers(thrustdir, 'thrust', exclude_re) if len(headers) == 0: print('#error no include files found\n') print('#define THRUST_CPP11_REQUIRED_NO_ERROR') print('#define THRUST_CPP14_REQUIRED_NO_ERROR') print('#define THRUST_MODERN_GCC_REQUIRED_NO_ERROR') for h in headers: print('#include <' + h + '>') exit()
cccl-main
thrust/internal/build/warningstester_create_uber_header.py
import gdb import sys if sys.version_info[0] > 2: Iterator = object else: # "Polyfill" for Python2 Iterator interface class Iterator: def next(self): return self.__next__() class ThrustVectorPrinter(gdb.printing.PrettyPrinter): "Print a thrust::*_vector" class _host_accessible_iterator(Iterator): def __init__(self, start, size): self.item = start self.size = size self.count = 0 def __iter__(self): return self def __next__(self): if self.count >= self.size: raise StopIteration elt = self.item.dereference() count = self.count self.item = self.item + 1 self.count = self.count + 1 return ('[%d]' % count, elt) class _cuda_iterator(Iterator): def __init__(self, start, size): self.exec = exec self.item = start self.size = size self.count = 0 self.buffer = None self.sizeof = self.item.dereference().type.sizeof self.buffer_start = 0 # At most 1 MB or size, at least 1 self.buffer_size = min(size, max(1, 2 ** 20 // self.sizeof)) self.buffer = gdb.parse_and_eval( '(void*)malloc(%s)' % (self.buffer_size * self.sizeof)) self.buffer.fetch_lazy() self.buffer_count = self.buffer_size self.update_buffer() def update_buffer(self): if self.buffer_count >= self.buffer_size: self.buffer_item = gdb.parse_and_eval( hex(self.buffer)).cast(self.item.type) self.buffer_count = 0 self.buffer_start = self.count device_addr = hex(self.item.dereference().address) buffer_addr = hex(self.buffer) size = min(self.buffer_size, self.size - self.buffer_start) * self.sizeof status = gdb.parse_and_eval( '(cudaError)cudaMemcpy(%s, %s, %d, cudaMemcpyDeviceToHost)' % (buffer_addr, device_addr, size)) if status != 0: raise gdb.MemoryError( 'memcpy from device failed: %s' % status) def __del__(self): gdb.parse_and_eval('(void)free(%s)' % hex(self.buffer)).fetch_lazy() def __iter__(self): return self def __next__(self): if self.count >= self.size: raise StopIteration self.update_buffer() elt = self.buffer_item.dereference() self.buffer_item = self.buffer_item + 1 self.buffer_count = self.buffer_count + 1 count = self.count self.item = self.item + 1 self.count = self.count + 1 return ('[%d]' % count, elt) def __init__(self, val): self.val = val self.pointer = val['m_storage']['m_begin']['m_iterator'] self.size = int(val['m_size']) self.capacity = int(val['m_storage']['m_size']) self.is_device_vector = str(self.pointer.type).startswith("thrust::device_ptr") if self.is_device_vector: self.pointer = self.pointer['m_iterator'] self.is_cuda_vector = "cuda" in str(val['m_storage']['m_allocator']) def children(self): if self.is_cuda_vector: return self._cuda_iterator(self.pointer, self.size) else: return self._host_accessible_iterator(self.pointer, self.size) def to_string(self): typename = str(self.val.type) return ('%s of length %d, capacity %d' % (typename, self.size, self.capacity)) def display_hint(self): return 'array' class ThrustCUDAReferencePrinter(gdb.printing.PrettyPrinter): "Print a thrust::device_reference that resides in CUDA memory space" def __init__(self, val): self.val = val self.pointer = val['ptr']['m_iterator'] self.type = self.pointer.dereference().type sizeof = self.type.sizeof self.buffer = gdb.parse_and_eval('(void*)malloc(%s)' % sizeof) device_addr = hex(self.pointer) buffer_addr = hex(self.buffer) status = gdb.parse_and_eval('(cudaError)cudaMemcpy(%s, %s, %d, cudaMemcpyDeviceToHost)' % ( buffer_addr, device_addr, sizeof)) if status != 0: raise gdb.MemoryError('memcpy from device failed: %s' % status) self.buffer_val = gdb.parse_and_eval( hex(self.buffer)).cast(self.pointer.type).dereference() def __del__(self): gdb.parse_and_eval('(void)free(%s)' % hex(self.buffer)).fetch_lazy() def children(self): return [] def to_string(self): typename = str(self.val.type) return ('(%s) @%s: %s' % (typename, self.pointer, self.buffer_val)) def display_hint(self): return None class ThrustHostAccessibleReferencePrinter(gdb.printing.PrettyPrinter): def __init__(self, val): self.val = val self.pointer = val['ptr']['m_iterator'] def children(self): return [] def to_string(self): typename = str(self.val.type) return ('(%s) @%s: %s' % (typename, self.pointer, self.pointer.dereference())) def display_hint(self): return None def lookup_thrust_type(val): if not str(val.type.unqualified()).startswith('thrust::'): return None suffix = str(val.type.unqualified())[8:] if suffix.startswith('host_vector') or suffix.startswith('device_vector'): return ThrustVectorPrinter(val) elif int(gdb.VERSION.split(".")[0]) >= 10 and suffix.startswith('device_reference'): # look for tag in type name if "cuda" in "".join(str(field.type) for field in val["ptr"].type.fields()): return ThrustCUDAReferencePrinter(val) return ThrustHostAccessibleReferencePrinter(val) return None gdb.pretty_printers.append(lookup_thrust_type)
cccl-main
thrust/scripts/gdb-pretty-printers.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """Commands used to automate testing gdb pretty printers. This script is part of a larger framework to test gdb pretty printers. It runs the program, detects test cases, checks them, and prints results. See gdb_pretty_printer_test.sh.cpp on how to write a test case. """ from __future__ import print_function import re import gdb test_failures = 0 class CheckResult(gdb.Command): def __init__(self): super(CheckResult, self).__init__( "print_and_compare", gdb.COMMAND_DATA) def invoke(self, arg, from_tty): try: # Stack frame is: # 0. StopForDebugger # 1. ComparePrettyPrintToChars or ComparePrettyPrintToRegex # 2. TestCase compare_frame = gdb.newest_frame().older() testcase_frame = compare_frame.older() test_loc = testcase_frame.find_sal() # Use interactive commands in the correct context to get the pretty # printed version value_str = self._get_value_string(compare_frame, testcase_frame) # Ignore the convenience variable name and newline value = value_str[value_str.find("= ") + 2:-1] gdb.newest_frame().select() expectation_val = compare_frame.read_var("expectation") if "PrettyPrintToRegex" in compare_frame.name(): check_literal = expectation_val.string() test_fails = not re.match(check_literal, value) else: check_literal_string = expectation_val.string(encoding="utf-8") check_literal = str(check_literal_string.encode("utf-8")) test_fails = value != check_literal if test_fails: global test_failures print("FAIL: " + test_loc.symtab.filename + ":" + str(test_loc.line)) print("GDB printed:") print(" " + value) print("Value should match:") print(" " + check_literal) test_failures += 1 else: print("PASS: " + test_loc.symtab.filename + ":" + str(test_loc.line)) except RuntimeError as e: # At this point, lots of different things could be wrong, so don't try to # recover or figure it out. Don't exit either, because then it's # impossible debug the framework itself. print("FAIL: Something is wrong in the test framework.") print(str(e)) test_failures += 1 def _get_value_string(self, compare_frame, testcase_frame): compare_frame.select() if "ComparePrettyPrint" in compare_frame.name(): return gdb.execute("p value", to_string=True) value_str = str(compare_frame.read_var("value")) clean_expression_str = value_str.strip("'\"") testcase_frame.select() return gdb.execute("p " + clean_expression_str, to_string=True) def exit_handler(event=None): global test_failures if test_failures: print("FAILED %d cases" % test_failures) exit(test_failures) # Start code executed at load time # Disable terminal paging gdb.execute("set height 0") gdb.execute("set python print-stack full") test_failures = 0 CheckResult() test_bp = gdb.Breakpoint("StopForDebugger") test_bp.enabled = True test_bp.silent = True test_bp.commands = "print_and_compare\ncontinue" # "run" won't return if the program exits; ensure the script regains control. gdb.events.exited.connect(exit_handler) gdb.execute("run") # If the program didn't exit, something went wrong, but we don't # know what. Fail on exit. test_failures += 1 exit_handler(None)
cccl-main
libcudacxx/libcxx/test/pretty_printers/gdb_pretty_printer_test.py
import sys import os import socket import stat # Ensure that this is being run on a specific platform assert sys.platform.startswith('linux') or sys.platform.startswith('darwin') \ or sys.platform.startswith('cygwin') or sys.platform.startswith('freebsd') \ or sys.platform.startswith('netbsd') def env_path(): ep = os.environ.get('LIBCXX_FILESYSTEM_DYNAMIC_TEST_ROOT') assert ep is not None ep = os.path.realpath(ep) assert os.path.isdir(ep) return ep env_path_global = env_path() # Make sure we don't try and write outside of env_path. # All paths used should be sanitized def sanitize(p): p = os.path.realpath(p) if os.path.commonprefix([env_path_global, p]): return p assert False """ Some of the tests restrict permissions to induce failures. Before we delete the test environment, we have to walk it and re-raise the permissions. """ def clean_recursive(root_p): if not os.path.islink(root_p): os.chmod(root_p, 0o777) for ent in os.listdir(root_p): p = os.path.join(root_p, ent) if os.path.islink(p) or not os.path.isdir(p): os.remove(p) else: assert os.path.isdir(p) clean_recursive(p) os.rmdir(p) def init_test_directory(root_p): root_p = sanitize(root_p) assert not os.path.exists(root_p) os.makedirs(root_p) def destroy_test_directory(root_p): root_p = sanitize(root_p) clean_recursive(root_p) os.rmdir(root_p) def create_file(fname, size): with open(sanitize(fname), 'w') as f: f.write('c' * size) def create_dir(dname): os.mkdir(sanitize(dname)) def create_symlink(source, link): os.symlink(sanitize(source), sanitize(link)) def create_hardlink(source, link): os.link(sanitize(source), sanitize(link)) def create_fifo(source): os.mkfifo(sanitize(source)) def create_socket(source): sock = socket.socket(socket.AF_UNIX) sanitized_source = sanitize(source) # AF_UNIX sockets may have very limited path length, so split it # into chdir call (with technically unlimited length) followed # by bind() relative to the directory os.chdir(os.path.dirname(sanitized_source)) sock.bind(os.path.basename(sanitized_source)) if __name__ == '__main__': command = " ".join(sys.argv[1:]) eval(command) sys.exit(0)
cccl-main
libcudacxx/libcxx/test/support/filesystem_dynamic_test_helper.py
#!/usr/bin/env python #===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## from argparse import ArgumentParser import sys def print_and_exit(msg): sys.stderr.write(msg + '\n') sys.exit(1) def main(): parser = ArgumentParser( description="Concatenate two files into a single file") parser.add_argument( '-o', '--output', dest='output', required=True, help='The output file. stdout is used if not given', type=str, action='store') parser.add_argument( 'files', metavar='files', nargs='+', help='The files to concatenate') args = parser.parse_args() if len(args.files) < 2: print_and_exit('fewer than 2 inputs provided') data = '' for filename in args.files: with open(filename, 'r') as f: data += f.read() if len(data) != 0 and data[-1] != '\n': data += '\n' assert len(data) > 0 and "cannot cat empty files" with open(args.output, 'w') as f: f.write(data) if __name__ == '__main__': main() sys.exit(0)
cccl-main
libcudacxx/libcxx/utils/cat_files.py
#!/usr/bin/env python #===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """ Generate a linker script that links libc++ to the proper ABI library. An example script for c++abi would look like "INPUT(libc++.so.1 -lc++abi)". """ import argparse import os import sys def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--dryrun", help="Don't write any output", action="store_true", default=False) parser.add_argument("--rename", action="store_true", default=False, help="Rename the output as input so we can replace it") parser.add_argument("--input", help="Path to libc++ library", required=True) parser.add_argument("--output", help="Path to libc++ linker script", required=True) parser.add_argument("libraries", nargs="+", help="List of libraries libc++ depends on") args = parser.parse_args() # Use the relative path for the libc++ library. libcxx = os.path.relpath(args.input, os.path.dirname(args.output)) # Prepare the list of public libraries to link. public_libs = ['-l%s' % l for l in args.libraries] # Generate the linker script contents. contents = "INPUT(%s)" % ' '.join([libcxx] + public_libs) if args.dryrun: print("GENERATING SCRIPT: '%s' as file %s" % (contents, args.output)) return 0 # Remove the existing libc++ symlink if it exists. if os.path.islink(args.output): os.unlink(args.output) # Replace it with the linker script. with open(args.output, 'w') as f: f.write(contents + "\n") return 0 if __name__ == '__main__': sys.exit(main())
cccl-main
libcudacxx/libcxx/utils/gen_link_script.py
#!/usr/bin/env python #===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """ sym_extract - Extract and output a list of symbols from a shared library. """ from argparse import ArgumentParser from libcxx.sym_check import extract, util def main(): parser = ArgumentParser( description='Extract a list of symbols from a shared library.') parser.add_argument('library', metavar='shared-lib', type=str, help='The library to extract symbols from') parser.add_argument('-o', '--output', dest='output', help='The output file. stdout is used if not given', type=str, action='store', default=None) parser.add_argument('--names-only', dest='names_only', help='Output only the name of the symbol', action='store_true', default=False) parser.add_argument('--only-stdlib-symbols', dest='only_stdlib', help="Filter all symbols not related to the stdlib", action='store_true', default=False) parser.add_argument('--defined-only', dest='defined_only', help="Filter all symbols that are not defined", action='store_true', default=False) parser.add_argument('--undefined-only', dest='undefined_only', help="Filter all symbols that are defined", action='store_true', default=False) args = parser.parse_args() assert not (args.undefined_only and args.defined_only) if args.output is not None: print('Extracting symbols from %s to %s.' % (args.library, args.output)) syms = extract.extract_symbols(args.library) if args.only_stdlib: syms, other_syms = util.filter_stdlib_symbols(syms) filter = lambda x: x if args.defined_only: filter = lambda l: list([x for x in l if x['is_defined']]) if args.undefined_only: filter = lambda l: list([x for x in l if not x['is_defined']]) util.write_syms(syms, out=args.output, names_only=args.names_only, filter=filter) if __name__ == '__main__': main()
cccl-main
libcudacxx/libcxx/utils/sym_extract.py
#!/usr/bin/env python #===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """ sym_diff - Compare two symbol lists and output the differences. """ from argparse import ArgumentParser import sys from libcxx.sym_check import diff, util def main(): parser = ArgumentParser( description='Extract a list of symbols from a shared library.') parser.add_argument( '--names-only', dest='names_only', help='Only print symbol names', action='store_true', default=False) parser.add_argument( '--removed-only', dest='removed_only', help='Only print removed symbols', action='store_true', default=False) parser.add_argument('--only-stdlib-symbols', dest='only_stdlib', help="Filter all symbols not related to the stdlib", action='store_true', default=False) parser.add_argument('--strict', dest='strict', help="Exit with a non-zero status if any symbols " "differ", action='store_true', default=False) parser.add_argument( '-o', '--output', dest='output', help='The output file. stdout is used if not given', type=str, action='store', default=None) parser.add_argument( '--demangle', dest='demangle', action='store_true', default=False) parser.add_argument( 'old_syms', metavar='old-syms', type=str, help='The file containing the old symbol list or a library') parser.add_argument( 'new_syms', metavar='new-syms', type=str, help='The file containing the new symbol list or a library') args = parser.parse_args() old_syms_list = util.extract_or_load(args.old_syms) new_syms_list = util.extract_or_load(args.new_syms) if args.only_stdlib: old_syms_list, _ = util.filter_stdlib_symbols(old_syms_list) new_syms_list, _ = util.filter_stdlib_symbols(new_syms_list) added, removed, changed = diff.diff(old_syms_list, new_syms_list) if args.removed_only: added = {} report, is_break, is_different = diff.report_diff( added, removed, changed, names_only=args.names_only, demangle=args.demangle) if args.output is None: print(report) else: with open(args.output, 'w') as f: f.write(report + '\n') exit_code = 1 if is_break or (args.strict and is_different) else 0 sys.exit(exit_code) if __name__ == '__main__': main()
cccl-main
libcudacxx/libcxx/utils/sym_diff.py
#!/usr/bin/env python import os import tempfile def get_libcxx_paths(): utils_path = os.path.dirname(os.path.abspath(__file__)) script_name = os.path.basename(__file__) assert os.path.exists(utils_path) src_root = os.path.dirname(utils_path) include_path = os.path.join(src_root, 'include') assert os.path.exists(include_path) docs_path = os.path.join(src_root, 'docs') assert os.path.exists(docs_path) macro_test_path = os.path.join(src_root, 'test', 'std', 'language.support', 'support.limits', 'support.limits.general') assert os.path.exists(macro_test_path) assert os.path.exists(os.path.join(macro_test_path, 'version.version.pass.cpp')) return script_name, src_root, include_path, docs_path, macro_test_path script_name, source_root, include_path, docs_path, macro_test_path = get_libcxx_paths() def has_header(h): h_path = os.path.join(include_path, h) return os.path.exists(h_path) def add_version_header(tc): tc["headers"].append("version") return tc feature_test_macros = sorted([ add_version_header(x) for x in [ # C++14 macros {"name": "__cpp_lib_integer_sequence", "values": { "c++14": 201304L }, "headers": ["utility"], }, {"name": "__cpp_lib_exchange_function", "values": { "c++14": 201304L }, "headers": ["utility"], }, {"name": "__cpp_lib_tuples_by_type", "values": { "c++14": 201304L }, "headers": ["utility", "tuple"], }, {"name": "__cpp_lib_tuple_element_t", "values": { "c++14": 201402L }, "headers": ["tuple"], }, {"name": "__cpp_lib_make_unique", "values": { "c++14": 201304L }, "headers": ["memory"], }, {"name": "__cpp_lib_transparent_operators", "values": { "c++14": 201210L, "c++17": 201510L, }, "headers": ["functional"], }, {"name": "__cpp_lib_integral_constant_callable", "values": { "c++14": 201304L }, "headers": ["type_traits"], }, {"name": "__cpp_lib_transformation_trait_aliases", "values": { "c++14": 201304L, }, "headers": ["type_traits"] }, {"name": "__cpp_lib_result_of_sfinae", "values": { "c++14": 201210L, }, "headers": ["functional", "type_traits"] }, {"name": "__cpp_lib_is_final", "values": { "c++14": 201402L, }, "headers": ["type_traits"] }, {"name": "__cpp_lib_is_null_pointer", "values": { "c++14": 201309L, }, "headers": ["type_traits"] }, {"name": "__cpp_lib_chrono_udls", "values": { "c++14": 201304L, }, "headers": ["chrono"] }, {"name": "__cpp_lib_string_udls", "values": { "c++14": 201304L, }, "headers": ["string"] }, {"name": "__cpp_lib_generic_associative_lookup", "values": { "c++14": 201304L, }, "headers": ["map", "set"] }, {"name": "__cpp_lib_null_iterators", "values": { "c++14": 201304L, }, "headers": ["iterator"] }, {"name": "__cpp_lib_make_reverse_iterator", "values": { "c++14": 201402L, }, "headers": ["iterator"] }, {"name": "__cpp_lib_robust_nonmodifying_seq_ops", "values": { "c++14": 201304L, }, "headers": ["algorithm"] }, {"name": "__cpp_lib_complex_udls", "values": { "c++14": 201309L, }, "headers": ["complex"] }, {"name": "__cpp_lib_quoted_string_io", "values": { "c++14": 201304L, }, "headers": ["iomanip"] }, {"name": "__cpp_lib_shared_timed_mutex", "values": { "c++14": 201402L, }, "headers": ["shared_mutex"], "depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)", "internal_depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)", }, # C++17 macros {"name": "__cpp_lib_atomic_is_always_lock_free", "values": { "c++17": 201603L, }, "headers": ["atomic"], "depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)", "internal_depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)", }, {"name": "__cpp_lib_filesystem", "values": { "c++17": 201703L, }, "headers": ["filesystem"] }, {"name": "__cpp_lib_invoke", "values": { "c++17": 201411L, }, "headers": ["functional"] }, {"name": "__cpp_lib_void_t", "values": { "c++17": 201411L, }, "headers": ["type_traits"] }, {"name": "__cpp_lib_node_extract", "values": { "c++17": 201606L, }, "headers": ["map", "set", "unordered_map", "unordered_set"] }, {"name": "__cpp_lib_byte", "values": { "c++17": 201603L, }, "headers": ["cstddef"], }, {"name": "__cpp_lib_hardware_interference_size", "values": { "c++17": 201703L, }, "headers": ["new"], }, {"name": "__cpp_lib_launder", "values": { "c++17": 201606L, }, "headers": ["new"], }, {"name": "__cpp_lib_uncaught_exceptions", "values": { "c++17": 201411L, }, "headers": ["exception"], }, {"name": "__cpp_lib_as_const", "values": { "c++17": 201510L, }, "headers": ["utility"], }, {"name": "__cpp_lib_make_from_tuple", "values": { "c++17": 201606L, }, "headers": ["tuple"], }, {"name": "__cpp_lib_apply", "values": { "c++17": 201603L, }, "headers": ["tuple"], }, {"name": "__cpp_lib_optional", "values": { "c++17": 201606L, }, "headers": ["optional"], }, {"name": "__cpp_lib_variant", "values": { "c++17": 201606L, }, "headers": ["variant"], }, {"name": "__cpp_lib_any", "values": { "c++17": 201606L, }, "headers": ["any"], }, {"name": "__cpp_lib_addressof_constexpr", "values": { "c++17": 201603L, }, "headers": ["memory"], "depends": "TEST_HAS_BUILTIN(__builtin_addressof) || TEST_GCC_VER >= 700", "internal_depends": "defined(_LIBCUDACXX_ADDRESSOF)", }, {"name": "__cpp_lib_raw_memory_algorithms", "values": { "c++17": 201606L, }, "headers": ["memory"], }, {"name": "__cpp_lib_enable_shared_from_this", "values": { "c++17": 201603L, }, "headers": ["memory"], }, {"name": "__cpp_lib_shared_ptr_weak_type", "values": { "c++17": 201606L, }, "headers": ["memory"], }, {"name": "__cpp_lib_shared_ptr_arrays", "values": { "c++17": 201611L, }, "headers": ["memory"], "unimplemented": True, }, {"name": "__cpp_lib_memory_resource", "values": { "c++17": 201603L, }, "headers": ["memory_resource"], "unimplemented": True, }, {"name": "__cpp_lib_boyer_moore_searcher", "values": { "c++17": 201603L, }, "headers": ["functional"], "unimplemented": True, }, {"name": "__cpp_lib_not_fn", "values": { "c++17": 201603L, }, "headers": ["functional"], }, {"name": "__cpp_lib_bool_constant", "values": { "c++17": 201505L, }, "headers": ["type_traits"], }, {"name": "__cpp_lib_type_trait_variable_templates", "values": { "c++17": 201510L, }, "headers": ["type_traits"], }, {"name": "__cpp_lib_logical_traits", "values": { "c++17": 201510L, }, "headers": ["type_traits"], }, {"name": "__cpp_lib_is_swappable", "values": { "c++17": 201603L, }, "headers": ["type_traits"], }, {"name": "__cpp_lib_is_invocable", "values": { "c++17": 201703L, }, "headers": ["type_traits"], }, {"name": "__cpp_lib_has_unique_object_representations", "values": { "c++17": 201606L, }, "headers": ["type_traits"], "depends": "TEST_HAS_BUILTIN_IDENTIFIER(__has_unique_object_representations) || TEST_GCC_VER >= 700", "internal_depends": "defined(_LIBCUDACXX_HAS_UNIQUE_OBJECT_REPRESENTATIONS)", }, {"name": "__cpp_lib_is_aggregate", "values": { "c++17": 201703L, }, "headers": ["type_traits"], "depends": "TEST_HAS_BUILTIN_IDENTIFIER(__is_aggregate) || TEST_GCC_VER_NEW >= 7001", "internal_depends": "!defined(_LIBCUDACXX_HAS_NO_IS_AGGREGATE)", }, {"name": "__cpp_lib_chrono", "values": { "c++17": 201611L, }, "headers": ["chrono"], }, {"name": "__cpp_lib_execution", "values": { "c++17": 201603L, }, "headers": ["execution"], "unimplemented": True }, {"name": "__cpp_lib_parallel_algorithm", "values": { "c++17": 201603L, }, "headers": ["algorithm", "numeric"], "unimplemented": True, }, {"name": "__cpp_lib_to_chars", "values": { "c++17": 201611L, }, "headers": ["utility"], "unimplemented": True, }, {"name": "__cpp_lib_string_view", "values": { "c++17": 201606L, }, "headers": ["string", "string_view"], }, {"name": "__cpp_lib_allocator_traits_is_always_equal", "values": { "c++17": 201411L, }, "headers": ["memory", "scoped_allocator", "string", "deque", "forward_list", "list", "vector", "map", "set", "unordered_map", "unordered_set"], }, {"name": "__cpp_lib_incomplete_container_elements", "values": { "c++17": 201505L, }, "headers": ["forward_list", "list", "vector"], }, {"name": "__cpp_lib_map_try_emplace", "values": { "c++17": 201411L, }, "headers": ["map"], }, {"name": "__cpp_lib_unordered_map_try_emplace", "values": { "c++17": 201411L, }, "headers": ["unordered_map"], }, {"name": "__cpp_lib_array_constexpr", "values": { "c++17": 201603L, }, "headers": ["iterator", "array"], }, {"name": "__cpp_lib_nonmember_container_access", "values": { "c++17": 201411L, }, "headers": ["iterator", "array", "deque", "forward_list", "list", "map", "regex", "set", "string", "unordered_map", "unordered_set", "vector"], }, {"name": "__cpp_lib_sample", "values": { "c++17": 201603L, }, "headers": ["algorithm"], }, {"name": "__cpp_lib_clamp", "values": { "c++17": 201603L, }, "headers": ["algorithm"], }, {"name": "__cpp_lib_gcd_lcm", "values": { "c++17": 201606L, }, "headers": ["numeric"], }, {"name": "__cpp_lib_hypot", "values": { "c++17": 201603L, }, "headers": ["cmath"], }, {"name": "__cpp_lib_math_special_functions", "values": { "c++17": 201603L, }, "headers": ["cmath"], "unimplemented": True, }, {"name": "__cpp_lib_shared_mutex", "values": { "c++17": 201505L, }, "headers": ["shared_mutex"], "depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)", "internal_depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)", }, {"name": "__cpp_lib_scoped_lock", "values": { "c++17": 201703L, }, "headers": ["mutex"], }, # C++2a {"name": "__cpp_lib_char8_t", "values": { "c++2a": 201811L, }, "headers": ["atomic", "filesystem", "istream", "limits", "locale", "ostream", "string", "string_view"], "depends": "defined(__cpp_char8_t)", "internal_depends": "!defined(_LIBCUDACXX_NO_HAS_CHAR8_T)", }, {"name": "__cpp_lib_erase_if", "values": { "c++2a": 201811L, }, "headers": ["string", "deque", "forward_list", "list", "vector", "map", "set", "unordered_map", "unordered_set"] }, {"name": "__cpp_lib_destroying_delete", "values": { "c++2a": 201806L, }, "headers": ["new"], "depends": "TEST_STD_VER > 17" " && defined(__cpp_impl_destroying_delete)" " && __cpp_impl_destroying_delete >= 201806L", "internal_depends": "_LIBCUDACXX_STD_VER > 17" " && defined(__cpp_impl_destroying_delete)" " && __cpp_impl_destroying_delete >= 201806L", }, {"name": "__cpp_lib_three_way_comparison", "values": { "c++2a": 201711L, }, "headers": ["compare"], "unimplemented": True, }, {"name": "__cpp_lib_concepts", "values": { "c++14": 202002L, }, "headers": ["concepts"], }, {"name": "__cpp_lib_constexpr_swap_algorithms", "values": { "c++2a": 201806L, }, "headers": ["algorithm"], "unimplemented": True, }, {"name": "__cpp_lib_constexpr_misc", "values": { "c++2a": 201811L, }, "headers": ["array", "functional", "iterator", "string_view", "tuple", "utility"], "unimplemented": True, }, {"name": "__cpp_lib_bind_front", "values": { "c++17": 201907L, }, "headers": ["functional"], }, {"name": "__cpp_lib_is_constant_evaluated", "values": { "c++2a": 201811L, }, "headers": ["type_traits"], "depends": "TEST_HAS_BUILTIN(__builtin_is_constant_evaluated) || TEST_GCC_VER >= 900", "internal_depends": "defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED)", }, {"name": "__cpp_lib_list_remove_return_type", "values": { "c++2a": 201806L, }, "headers": ["forward_list", "list"], "unimplemented": True, }, {"name": "__cpp_lib_generic_unordered_lookup", "values": { "c++2a": 201811L, }, "headers": ["unordered_map", "unordered_set"], "unimplemented": True, }, {"name": "__cpp_lib_ranges", "values": { "c++2a": 201811L, }, "headers": ["algorithm", "functional", "iterator", "memory", "ranges"], "unimplemented": True, }, {"name": "__cpp_lib_bit_cast", "values": { "c++2a": 201806L, }, "headers": ["bit"], "unimplemented": True, }, {"name": "__cpp_lib_atomic_ref", "values": { "c++2a": 201806L, }, "headers": ["atomic"], "unimplemented": True, "depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)", "internal_depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)", }, {"name": "__cpp_lib_interpolate", "values": { "c++2a": 201902L, }, "headers": ["numeric"], }, ]], key=lambda tc: tc["name"]) def get_std_dialects(): std_dialects = ['c++14', 'c++17', 'c++2a'] return list(std_dialects) def get_first_std(d): for s in get_std_dialects(): if s in d.keys(): return s return None def get_last_std(d): rev_dialects = get_std_dialects() rev_dialects.reverse() for s in rev_dialects: if s in d.keys(): return s return None def get_std_before(d, std): std_dialects = get_std_dialects() candidates = std_dialects[0:std_dialects.index(std)] candidates.reverse() for cand in candidates: if cand in d.keys(): return cand return None def get_value_before(d, std): new_std = get_std_before(d, std) if new_std is None: return None return d[new_std] def get_for_std(d, std): # This catches the C++11 case for which there should be no defined feature # test macros. std_dialects = get_std_dialects() if std not in std_dialects: return None # Find the value for the newest C++ dialect between C++14 and std std_list = list(std_dialects[0:std_dialects.index(std)+1]) std_list.reverse() for s in std_list: if s in d.keys(): return d[s] return None """ Functions to produce the <version> header """ def produce_macros_definition_for_std(std): result = "" indent = 56 for tc in feature_test_macros: if std not in tc["values"]: continue inner_indent = 1 if 'depends' in tc.keys(): assert 'internal_depends' in tc.keys() result += "# if %s\n" % tc["internal_depends"] inner_indent += 2 if get_value_before(tc["values"], std) is not None: assert 'depends' not in tc.keys() result += "# undef %s\n" % tc["name"] line = "#%sdefine %s" % ((" " * inner_indent), tc["name"]) line += " " * (indent - len(line)) line += "%sL" % tc["values"][std] if 'unimplemented' in tc.keys(): line = "// " + line result += line result += "\n" if 'depends' in tc.keys(): result += "# endif\n" return result def chunks(l, n): """Yield successive n-sized chunks from l.""" for i in range(0, len(l), n): yield l[i:i + n] def produce_version_synopsis(): indent = 56 header_indent = 56 + len("20XXYYL ") result = "" def indent_to(s, val): if len(s) >= val: return s s += " " * (val - len(s)) return s line = indent_to("Macro name", indent) + "Value" line = indent_to(line, header_indent) + "Headers" result += line + "\n" for tc in feature_test_macros: prev_defined_std = get_last_std(tc["values"]) line = "{name: <{indent}}{value}L ".format(name=tc['name'], indent=indent, value=tc["values"][prev_defined_std]) headers = list(tc["headers"]) headers.remove("version") for chunk in chunks(headers, 3): line = indent_to(line, header_indent) chunk = ['<%s>' % header for header in chunk] line += ' '.join(chunk) result += line result += "\n" line = "" while True: prev_defined_std = get_std_before(tc["values"], prev_defined_std) if prev_defined_std is None: break result += "%s%sL // %s\n" % (indent_to("", indent), tc["values"][prev_defined_std], prev_defined_std.replace("c++", "C++")) return result def produce_version_header(): template="""// -*- C++ -*- //===--------------------------- version ----------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #ifndef _LIBCUDACXX_VERSIONH #define _LIBCUDACXX_VERSIONH /* version synopsis {synopsis} */ #include <__config> #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) #pragma GCC system_header #endif #if _LIBCUDACXX_STD_VER > 11 {cxx14_macros} #endif #if _LIBCUDACXX_STD_VER > 14 {cxx17_macros} #endif #if _LIBCUDACXX_STD_VER > 17 {cxx2a_macros} #endif #endif // _LIBCUDACXX_VERSIONH """ return template.format( synopsis=produce_version_synopsis().strip(), cxx14_macros=produce_macros_definition_for_std('c++14').strip(), cxx17_macros=produce_macros_definition_for_std('c++17').strip(), cxx2a_macros=produce_macros_definition_for_std('c++2a').strip()) """ Functions to produce test files """ test_types = { "undefined": """ # ifdef {name} # error "{name} should not be defined before {std_first}" # endif """, "depends": """ # if {depends} # ifndef {name} # error "{name} should be defined in {std}" # endif # if {name} != {value} # error "{name} should have the value {value} in {std}" # endif # else # ifdef {name} # error "{name} should not be defined when {depends} is not defined!" # endif # endif """, "unimplemented": """ # if !defined(_LIBCUDACXX_VERSION) # ifndef {name} # error "{name} should be defined in {std}" # endif # if {name} != {value} # error "{name} should have the value {value} in {std}" # endif # else // _LIBCUDACXX_VERSION # ifdef {name} # error "{name} should not be defined because it is unimplemented in libc++!" # endif # endif """, "defined":""" # ifndef {name} # error "{name} should be defined in {std}" # endif # if {name} != {value} # error "{name} should have the value {value} in {std}" # endif """ } def generate_std_test(test_list, std): result = "" for tc in test_list: val = get_for_std(tc["values"], std) if val is not None: val = "%sL" % val if val is None: result += test_types["undefined"].format(name=tc["name"], std_first=get_first_std(tc["values"])) elif 'unimplemented' in tc.keys(): result += test_types["unimplemented"].format(name=tc["name"], value=val, std=std) elif "depends" in tc.keys(): result += test_types["depends"].format(name=tc["name"], value=val, std=std, depends=tc["depends"]) else: result += test_types["defined"].format(name=tc["name"], value=val, std=std) return result def generate_synopsis(test_list): max_name_len = max([len(tc["name"]) for tc in test_list]) indent = max_name_len + 8 def mk_line(prefix, suffix): return "{prefix: <{max_len}}{suffix}\n".format(prefix=prefix, suffix=suffix, max_len=indent) result = "" result += mk_line("/* Constant", "Value") for tc in test_list: prefix = " %s" % tc["name"] for std in [s for s in get_std_dialects() if s in tc["values"].keys()]: result += mk_line(prefix, "%sL [%s]" % (tc["values"][std], std.replace("c++", "C++"))) prefix = "" result += "*/" return result def is_threading_header_unsafe_to_include(h): # NOTE: "<mutex>" does not blow up when included without threads. return h in ['atomic', 'shared_mutex'] def produce_tests(): headers = set([h for tc in feature_test_macros for h in tc["headers"]]) for h in headers: test_list = [tc for tc in feature_test_macros if h in tc["headers"]] if not has_header(h): for tc in test_list: assert 'unimplemented' in tc.keys() continue test_tags = "" if is_threading_header_unsafe_to_include(h): test_tags += '\n// UNSUPPORTED: libcpp-has-no-threads\n' test_body = \ """//===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // WARNING: This test was generated by {script_name} // and should not be edited manually. {test_tags} // <{header}> // Test the feature test macros defined by <{header}> {synopsis} #include <{header}> #include "test_macros.h" #if TEST_STD_VER < 14 {cxx11_tests} #elif TEST_STD_VER == 14 {cxx14_tests} #elif TEST_STD_VER == 17 {cxx17_tests} #elif TEST_STD_VER > 17 {cxx2a_tests} #endif // TEST_STD_VER > 17 int main(int, char**) {{ return 0; }} """.format(script_name=script_name, header=h, test_tags=test_tags, synopsis=generate_synopsis(test_list), cxx11_tests=generate_std_test(test_list, 'c++11').strip(), cxx14_tests=generate_std_test(test_list, 'c++14').strip(), cxx17_tests=generate_std_test(test_list, 'c++17').strip(), cxx2a_tests=generate_std_test(test_list, 'c++2a').strip()) test_name = "{header}.version.pass.cpp".format(header=h) out_path = os.path.join(macro_test_path, test_name) with open(out_path, 'w') as f: f.write(test_body) """ Produce documentation for the feature test macros """ def make_widths(grid): widths = [] for i in range(0, len(grid[0])): cell_width = 2 + max(reduce(lambda x,y: x+y, [[len(row[i])] for row in grid], [])) widths += [cell_width] return widths def create_table(grid, indent): indent_str = ' '*indent col_widths = make_widths(grid) num_cols = len(grid[0]) result = indent_str + add_divider(col_widths, 2) header_flag = 2 for row_i in xrange(0, len(grid)): row = grid[row_i] result = result + indent_str + ' '.join([pad_cell(row[i], col_widths[i]) for i in range(0, len(row))]) + '\n' is_cxx_header = row[0].startswith('**') if row_i == len(grid) - 1: header_flag = 2 result = result + indent_str + add_divider(col_widths, 1 if is_cxx_header else header_flag) header_flag = 0 return result def add_divider(widths, header_flag): if header_flag == 2: return ' '.join(['='*w for w in widths]) + '\n' if header_flag == 1: return '-'.join(['-'*w for w in widths]) + '\n' else: return ' '.join(['-'*w for w in widths]) + '\n' def pad_cell(s, length, left_align=True): padding = ((length - len(s)) * ' ') return s + padding def get_status_table(): table = [["Macro Name", "Value"]] for std in get_std_dialects(): table += [["**" + std.replace("c++", "C++ ") + "**", ""]] for tc in feature_test_macros: if std not in tc["values"].keys(): continue value = "``%sL``" % tc["values"][std] if 'unimplemented' in tc.keys(): value = '*unimplemented*' table += [["``%s``" % tc["name"], value]] return table def produce_docs(): doc_str = """.. _FeatureTestMacroTable: ========================== Feature Test Macro Support ========================== .. contents:: :local: Overview ======== This file documents the feature test macros currently supported by libc++. .. _feature-status: Status ====== .. table:: Current Status :name: feature-status-table :widths: auto {status_tables} """.format(status_tables=create_table(get_status_table(), 4)) table_doc_path = os.path.join(docs_path, 'FeatureTestMacroTable.rst') with open(table_doc_path, 'w') as f: f.write(doc_str) def main(): with tempfile.NamedTemporaryFile(mode='w', prefix='version.', delete=False) as tmp_file: print("producing new <version> header as %s" % tmp_file.name) tmp_file.write(produce_version_header()) produce_tests() produce_docs() if __name__ == '__main__': main()
cccl-main
libcudacxx/libcxx/utils/generate_feature_test_macro_components.py
#!/usr/bin/env python #===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """ sym_match - Match all symbols in a list against a list of regexes. """ from argparse import ArgumentParser import sys from libcxx.sym_check import util, match, extract def main(): parser = ArgumentParser( description='Extract a list of symbols from a shared library.') parser.add_argument( '--blacklist', dest='blacklist', type=str, action='store', default=None) parser.add_argument( 'symbol_list', metavar='symbol_list', type=str, help='The file containing the old symbol list') parser.add_argument( 'regexes', metavar='regexes', default=[], nargs='*', help='The file containing the new symbol list or a library') args = parser.parse_args() if not args.regexes and args.blacklist is None: sys.stderr.write('Either a regex or a blacklist must be specified.\n') sys.exit(1) if args.blacklist: search_list = util.read_blacklist(args.blacklist) else: search_list = args.regexes symbol_list = util.extract_or_load(args.symbol_list) matching_count, report = match.find_and_report_matching( symbol_list, search_list) sys.stdout.write(report) if matching_count != 0: print('%d matching symbols found...' % matching_count) if __name__ == '__main__': main()
cccl-main
libcudacxx/libcxx/utils/sym_match.py
#!/usr/bin/env python #===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## from argparse import ArgumentParser from ctypes.util import find_library import distutils.spawn import glob import tempfile import os import shutil import subprocess import signal import sys temp_directory_root = None def exit_with_cleanups(status): if temp_directory_root is not None: shutil.rmtree(temp_directory_root) sys.exit(status) def print_and_exit(msg): sys.stderr.write(msg + '\n') exit_with_cleanups(1) def find_and_diagnose_missing(lib, search_paths): if os.path.exists(lib): return os.path.abspath(lib) if not lib.startswith('lib') or not lib.endswith('.a'): print_and_exit(("input file '%s' not not name a static library. " "It should start with 'lib' and end with '.a") % lib) for sp in search_paths: assert type(sp) is list and len(sp) == 1 path = os.path.join(sp[0], lib) if os.path.exists(path): return os.path.abspath(path) print_and_exit("input '%s' does not exist" % lib) def execute_command(cmd, cwd=None): """ Execute a command, capture and return its output. """ kwargs = { 'stdin': subprocess.PIPE, 'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE, 'cwd': cwd, 'universal_newlines': True } p = subprocess.Popen(cmd, **kwargs) out, err = p.communicate() exitCode = p.wait() if exitCode == -signal.SIGINT: raise KeyboardInterrupt return out, err, exitCode def execute_command_verbose(cmd, cwd=None, verbose=False): """ Execute a command and print its output on failure. """ out, err, exitCode = execute_command(cmd, cwd=cwd) if exitCode != 0 or verbose: report = "Command: %s\n" % ' '.join(["'%s'" % a for a in cmd]) if exitCode != 0: report += "Exit Code: %d\n" % exitCode if out: report += "Standard Output:\n--\n%s--" % out if err: report += "Standard Error:\n--\n%s--" % err if exitCode != 0: report += "\n\nFailed!" sys.stderr.write('%s\n' % report) if exitCode != 0: exit_with_cleanups(exitCode) return out def main(): parser = ArgumentParser( description="Merge multiple archives into a single library") parser.add_argument( '-v', '--verbose', dest='verbose', action='store_true', default=False) parser.add_argument( '-o', '--output', dest='output', required=True, help='The output file. stdout is used if not given', type=str, action='store') parser.add_argument( '-L', dest='search_paths', help='Paths to search for the libraries along', action='append', nargs=1) parser.add_argument( '--ar', dest='ar_exe', required=False, help='The ar executable to use, finds \'ar\' in the path if not given', type=str, action='store') parser.add_argument( '--use-libtool', dest='use_libtool', action='store_true', default=False) parser.add_argument( '--libtool', dest='libtool_exe', required=False, help='The libtool executable to use, finds \'libtool\' in the path if not given', type=str, action='store') parser.add_argument( 'archives', metavar='archives', nargs='+', help='The archives to merge') args = parser.parse_args() ar_exe = args.ar_exe if not ar_exe: ar_exe = distutils.spawn.find_executable('ar') if not ar_exe: print_and_exit("failed to find 'ar' executable") if args.use_libtool: libtool_exe = args.libtool_exe if not libtool_exe: libtool_exe = distutils.spawn.find_executable('libtool') if not libtool_exe: print_and_exit("failed to find 'libtool' executable") if len(args.archives) < 2: print_and_exit('fewer than 2 inputs provided') archives = [find_and_diagnose_missing(ar, args.search_paths) for ar in args.archives] print ('Merging archives: %s' % archives) if not os.path.exists(os.path.dirname(args.output)): print_and_exit("output path doesn't exist: '%s'" % args.output) global temp_directory_root temp_directory_root = tempfile.mkdtemp('.libcxx.merge.archives') files = [] for arc in archives: execute_command_verbose([ar_exe, 'x', arc], cwd=temp_directory_root, verbose=args.verbose) out = execute_command_verbose([ar_exe, 't', arc]) files.extend(out.splitlines()) if args.use_libtool: files = [f for f in files if not f.startswith('__.SYMDEF')] execute_command_verbose([libtool_exe, '-static', '-o', args.output] + files, cwd=temp_directory_root, verbose=args.verbose) else: execute_command_verbose([ar_exe, 'rcs', args.output] + files, cwd=temp_directory_root, verbose=args.verbose) if __name__ == '__main__': main() exit_with_cleanups(0)
cccl-main
libcudacxx/libcxx/utils/merge_archives.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """not.py is a utility for inverting the return code of commands. It acts similar to llvm/utils/not. ex: python /path/to/not.py ' echo hello echo $? // (prints 1) """ import subprocess import sys def which_cannot_find_program(prog): # Allow for import errors on distutils.spawn try: import distutils.spawn prog = distutils.spawn.find_executable(prog[0]) if prog is None: sys.stderr.write('Failed to find program %s' % prog[0]) return True return False except: return False def main(): argv = list(sys.argv) del argv[0] if len(argv) > 0 and argv[0] == '--crash': del argv[0] expectCrash = True else: expectCrash = False if len(argv) == 0: return 1 if which_cannot_find_program(argv[0]): return 1 rc = subprocess.call(argv) if rc < 0: return 0 if expectCrash else 1 if expectCrash: return 1 return rc == 0 if __name__ == '__main__': exit(main())
cccl-main
libcudacxx/libcxx/utils/not.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## import platform import os import libcxx.util class CXXCompiler(object): CM_Default = 0 CM_PreProcess = 1 CM_Compile = 2 CM_Link = 3 def __init__(self, path, first_arg, flags=None, compile_flags=None, link_flags=None, warning_flags=None, verify_supported=None, verify_flags=None, use_verify=False, modules_flags=None, use_modules=False, use_ccache=False, use_warnings=False, compile_env=None, cxx_type=None, cxx_version=None): self.source_lang = 'c++' self.path = path self.first_arg = first_arg or '' self.flags = list(flags or []) self.compile_flags = list(compile_flags or []) self.link_flags = list(link_flags or []) self.warning_flags = list(warning_flags or []) self.verify_supported = verify_supported self.use_verify = use_verify self.verify_flags = list(verify_flags or []) assert not use_verify or verify_supported assert not use_verify or verify_flags is not None self.modules_flags = list(modules_flags or []) self.use_modules = use_modules assert not use_modules or modules_flags is not None self.use_ccache = use_ccache self.use_warnings = use_warnings if compile_env is not None: self.compile_env = dict(compile_env) else: self.compile_env = None self.type = cxx_type self.version = cxx_version if self.type is None or self.version is None: self._initTypeAndVersion() def isVerifySupported(self): if self.verify_supported is None: self.verify_supported = self.hasCompileFlag(['-Xclang', '-verify-ignore-unexpected']) if self.verify_supported: self.verify_flags = [ '-Xclang', '-verify', '-Xclang', '-verify-ignore-unexpected=note', '-ferror-limit=1024' ] return self.verify_supported def useVerify(self, value=True): self.use_verify = value assert not self.use_verify or self.verify_flags is not None def useModules(self, value=True): self.use_modules = value assert not self.use_modules or self.modules_flags is not None def useCCache(self, value=True): self.use_ccache = value def useWarnings(self, value=True): self.use_warnings = value def _initTypeAndVersion(self): # Get compiler type and version try: macros = self.dumpMacros() compiler_type = None major_ver = minor_ver = patchlevel = None self.is_nvrtc = False if '__NVCC__' in macros.keys(): compiler_type = 'nvcc' major_ver = macros['__CUDACC_VER_MAJOR__'] minor_ver = macros['__CUDACC_VER_MINOR__'] patchlevel = macros['__CUDACC_VER_BUILD__'] if '__LIBCUDACXX_NVRTC_TEST__' in macros.keys(): self.is_nvrtc = True elif '__NVCOMPILER' in macros.keys(): compiler_type = "nvhpc" # nvhpc, unfortunately, adds an extra space between the macro name # and macro value in their macro dump mode. major_ver = macros['__NVCOMPILER'].strip() minor_ver = macros['___NVCOMPILER_MINOR__'].strip() patchlevel = macros['___NVCOMPILER_PATCHLEVEL__'].strip() elif '__INTEL_COMPILER' in macros.keys(): compiler_type = "icc" major_ver = int(macros['__INTEL_COMPILER']) / 100 minor_ver = (int(macros['__INTEL_COMPILER']) % 100) / 10 patchlevel = int(macros['__INTEL_COMPILER']) % 10 elif '__clang__' in macros.keys(): compiler_type = 'clang' # Treat Apple's LLVM fork differently. if '__apple_build_version__' in macros.keys(): compiler_type = 'apple-clang' major_ver = macros['__clang_major__'] minor_ver = macros['__clang_minor__'] patchlevel = macros['__clang_patchlevel__'] elif '__GNUC__' in macros.keys(): compiler_type = 'gcc' major_ver = macros['__GNUC__'] minor_ver = macros['__GNUC_MINOR__'] patchlevel = macros['__GNUC_PATCHLEVEL__'] if '__cplusplus' in macros.keys(): cplusplus = macros['__cplusplus'] if cplusplus[-1] == 'L': cplusplus = cplusplus[:-1] cpp_standard = int(cplusplus) if cpp_standard <= 199711: default_dialect = "c++03" elif cpp_standard <= 201103: default_dialect = "c++11" elif cpp_standard <= 201402: default_dialect = "c++14" elif cpp_standard <= 201703: default_dialect = "c++17" else: default_dialect = "c++20" else: default_dialect = "c++03" self.type = compiler_type self.version = (major_ver, minor_ver, patchlevel) self.default_dialect = default_dialect except: (self.type, self.version, self.default_dialect, self.is_nvrtc) = \ self.dumpVersion() if self.type == 'nvcc': # Treat C++ as CUDA when the compiler is NVCC. self.source_lang = 'cu' def _basicCmd(self, source_files, out, mode=CM_Default, flags=[], input_is_cxx=False): cmd = [] if self.use_ccache \ and not mode == self.CM_Link \ and not mode == self.CM_PreProcess: cmd += [os.environ.get('CMAKE_CXX_COMPILER_LAUNCHER')] cmd += [self.path] + ([self.first_arg] if self.first_arg != '' else []) if out is not None: cmd += ['-o', out] if input_is_cxx: cmd += ['-x', self.source_lang] if isinstance(source_files, list): cmd += source_files elif isinstance(source_files, str): cmd += [source_files] else: raise TypeError('source_files must be a string or list') if mode == self.CM_PreProcess: cmd += ['-E'] elif mode == self.CM_Compile: cmd += ['-c'] cmd += self.flags if self.use_verify: cmd += self.verify_flags assert mode in [self.CM_Default, self.CM_Compile] if self.use_modules: cmd += self.modules_flags if mode != self.CM_Link: cmd += self.compile_flags if self.use_warnings: cmd += self.warning_flags if mode != self.CM_PreProcess and mode != self.CM_Compile: cmd += self.link_flags cmd += flags return cmd def preprocessCmd(self, source_files, out=None, flags=[]): return self._basicCmd(source_files, out, flags=flags, mode=self.CM_PreProcess, input_is_cxx=True) def compileCmd(self, source_files, out=None, flags=[]): return self._basicCmd(source_files, out, flags=flags, mode=self.CM_Compile, input_is_cxx=True) + ['-c'] def linkCmd(self, source_files, out=None, flags=[]): return self._basicCmd(source_files, out, flags=flags, mode=self.CM_Link) def compileLinkCmd(self, source_files, out=None, flags=[]): return self._basicCmd(source_files, out, flags=flags) def preprocess(self, source_files, out=None, flags=[], cwd=None): cmd = self.preprocessCmd(source_files, out, flags) out, err, rc = libcxx.util.executeCommand(cmd, env=self.compile_env, cwd=cwd) return cmd, out, err, rc def compile(self, source_files, out=None, flags=[], cwd=None): cmd = self.compileCmd(source_files, out, flags) out, err, rc = libcxx.util.executeCommand(cmd, env=self.compile_env, cwd=cwd) return cmd, out, err, rc def link(self, source_files, out=None, flags=[], cwd=None): cmd = self.linkCmd(source_files, out, flags) out, err, rc = libcxx.util.executeCommand(cmd, env=self.compile_env, cwd=cwd) return cmd, out, err, rc def compileLink(self, source_files, out=None, flags=[], cwd=None): cmd = self.compileLinkCmd(source_files, out, flags) out, err, rc = libcxx.util.executeCommand(cmd, env=self.compile_env, cwd=cwd) return cmd, out, err, rc def compileLinkTwoSteps(self, source_file, out=None, object_file=None, flags=[], cwd=None): if not isinstance(source_file, str): raise TypeError('This function only accepts a single input file') if object_file is None: # Create, use and delete a temporary object file if none is given. with_fn = lambda: libcxx.util.guardedTempFilename(suffix='.o') else: # Otherwise wrap the filename in a context manager function. with_fn = lambda: libcxx.util.nullContext(object_file) with with_fn() as object_file: cc_cmd, cc_stdout, cc_stderr, rc = self.compile( source_file, object_file, flags=flags, cwd=cwd) if rc != 0: return cc_cmd, cc_stdout, cc_stderr, rc link_cmd, link_stdout, link_stderr, rc = self.link( object_file, out=out, flags=flags, cwd=cwd) return (cc_cmd + ['&&'] + link_cmd, cc_stdout + link_stdout, cc_stderr + link_stderr, rc) def dumpVersion(self, flags=[], cwd=None): dumpversion_cpp = os.path.join( os.path.dirname(os.path.abspath(__file__)), "dumpversion.cpp") with_fn = lambda: libcxx.util.guardedTempFilename(suffix=".exe") with with_fn() as exe: cmd, out, err, rc = self.compileLink([dumpversion_cpp], out=exe, flags=flags, cwd=cwd) if rc != 0: return ("unknown", (0, 0, 0), "c++03", False) out, err, rc = libcxx.util.executeCommand(exe, env=self.compile_env, cwd=cwd) version = None try: version = eval(out) except: pass if not (isinstance(version, tuple) and 4 == len(version)): version = ("unknown", (0, 0, 0), "c++03", False) return version def dumpMacros(self, source_files=None, flags=[], cwd=None): if source_files is None: source_files = os.devnull flags = ['-dM'] + flags cmd, out, err, rc = self.preprocess(source_files, flags=flags, cwd=cwd) if rc != 0: flags = ['-Xcompiler'] + flags cmd, out, err, rc = self.preprocess(source_files, flags=flags, cwd=cwd) if rc != 0: return cmd, out, err, rc parsed_macros = {} lines = [l.strip() for l in out.split('\n') if l.strip()] for l in lines: # NVHPC also outputs the file contents from -E -dM for some reason; handle that if not l.startswith('#define '): if '__NVCOMPILER' not in parsed_macros.keys(): assert False, "a line not starting with '#define' encountered in predefined macro dump" else: continue l = l[len('#define '):] macro, _, value = l.partition(' ') parsed_macros[macro] = value return parsed_macros def getTriple(self): if self.type == "msvc": return "x86_64-pc-windows-msvc" cmd = [self.path] + self.flags + ['-dumpmachine'] return libcxx.util.capture(cmd).strip() def hasCompileFlag(self, flag): if isinstance(flag, list): flags = list(flag) else: flags = [flag] # Add -Werror to ensure that an unrecognized flag causes a non-zero # exit code. -Werror is supported on all known non-nvcc compiler types. if self.type is not None and self.type != 'nvcc' and self.type != 'msvc': flags += ['-Werror', '-fsyntax-only'] empty_cpp = os.path.join(os.path.dirname(os.path.abspath(__file__)), "empty.cpp") cmd, out, err, rc = self.compile(empty_cpp, out=os.devnull, flags=flags) if out.find('flag is not supported with the configured host compiler') != -1: return False if err.find('flag is not supported with the configured host compiler') != -1: return False return rc == 0 def addFlagIfSupported(self, flag): if isinstance(flag, list): flags = list(flag) else: flags = [flag] if self.hasCompileFlag(flags): self.flags += flags return True else: return False def addCompileFlagIfSupported(self, flag): if isinstance(flag, list): flags = list(flag) else: flags = [flag] if self.hasCompileFlag(flags): self.compile_flags += flags return True else: return False def hasWarningFlag(self, flag): """ hasWarningFlag - Test if the compiler supports a given warning flag. Unlike addCompileFlagIfSupported, this function detects when "-Wno-<warning>" flags are unsupported. If flag is a "-Wno-<warning>" GCC will not emit an unknown option diagnostic unless another error is triggered during compilation. """ assert isinstance(flag, str) assert flag.startswith('-W') if not flag.startswith('-Wno-'): return self.hasCompileFlag(flag) flags = ['-Werror', flag] old_use_warnings = self.use_warnings self.useWarnings(False) cmd = self.compileCmd('-', os.devnull, flags) self.useWarnings(old_use_warnings) # Remove '-v' because it will cause the command line invocation # to be printed as part of the error output. # TODO(EricWF): Are there other flags we need to worry about? if '-v' in cmd: cmd.remove('-v') out, err, rc = libcxx.util.executeCommand( cmd, input=libcxx.util.to_bytes('#error\n')) assert rc != 0 if flag in err: return False return True def addWarningFlagIfSupported(self, flag): if self.hasWarningFlag(flag): if flag not in self.warning_flags: self.warning_flags += [flag] return True return False
cccl-main
libcudacxx/libcxx/utils/libcxx/compiler.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## from contextlib import contextmanager import errno import os import platform import signal import subprocess import sys import tempfile import threading # FIXME: Most of these functions are cribbed from LIT def to_bytes(str): # Encode to UTF-8 to get binary data. if isinstance(str, bytes): return str return str.encode('utf-8') def to_string(bytes): if isinstance(bytes, str): return bytes return to_bytes(bytes) def convert_string(bytes): try: return to_string(bytes.decode('utf-8')) except AttributeError: # 'str' object has no attribute 'decode'. return str(bytes) except UnicodeError: return str(bytes) def cleanFile(filename): try: os.remove(filename) except OSError: pass @contextmanager def guardedTempFilename(suffix='', prefix='', dir=None): # Creates and yeilds a temporary filename within a with statement. The file # is removed upon scope exit. handle, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir) os.close(handle) yield name cleanFile(name) @contextmanager def guardedFilename(name): # yeilds a filename within a with statement. The file is removed upon scope # exit. yield name cleanFile(name) @contextmanager def nullContext(value): # yeilds a variable within a with statement. No action is taken upon scope # exit. yield value def makeReport(cmd, out, err, rc): report = "Command: %s\n" % cmd report += "Exit Code: %d\n" % rc if out: report += "Standard Output:\n--\n%s--\n" % out if err: report += "Standard Error:\n--\n%s--\n" % err report += '\n' return report def capture(args, env=None): """capture(command) - Run the given command (or argv list) in a shell and return the standard output. Raises a CalledProcessError if the command exits with a non-zero status.""" p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) out, err = p.communicate() out = convert_string(out) err = convert_string(err) if p.returncode != 0: raise subprocess.CalledProcessError(cmd=args, returncode=p.returncode, output="{}\n{}".format(out, err)) return out def which(command, paths = None): """which(command, [paths]) - Look up the given command in the paths string (or the PATH environment variable, if unspecified).""" if paths is None: paths = os.environ.get('PATH','') # Check for absolute match first. if os.path.isfile(command): return command # Would be nice if Python had a lib function for this. if not paths: paths = os.defpath # Get suffixes to search. # On Cygwin, 'PATHEXT' may exist but it should not be used. if os.pathsep == ';': pathext = os.environ.get('PATHEXT', '').split(';') else: pathext = [''] # Search the paths... for path in paths.split(os.pathsep): for ext in pathext: p = os.path.join(path, command + ext) if os.path.exists(p) and not os.path.isdir(p): return p return None def checkToolsPath(dir, tools): for tool in tools: if not os.path.exists(os.path.join(dir, tool)): return False return True def whichTools(tools, paths): for path in paths.split(os.pathsep): if checkToolsPath(path, tools): return path return None def mkdir_p(path): """mkdir_p(path) - Make the "path" directory, if it does not exist; this will also make directories for any missing parent directories.""" if not path or os.path.exists(path): return parent = os.path.dirname(path) if parent != path: mkdir_p(parent) try: os.mkdir(path) except OSError: e = sys.exc_info()[1] # Ignore EEXIST, which may occur during a race condition. if e.errno != errno.EEXIST: raise class ExecuteCommandTimeoutException(Exception): def __init__(self, msg, out, err, exitCode): assert isinstance(msg, str) assert isinstance(out, str) assert isinstance(err, str) assert isinstance(exitCode, int) self.msg = msg self.out = out self.err = err self.exitCode = exitCode # Close extra file handles on UNIX (on Windows this cannot be done while # also redirecting input). kUseCloseFDs = not (platform.system() == 'Windows') def executeCommand(command, cwd=None, env=None, input=None, timeout=0): """ Execute command ``command`` (list of arguments or string) with * working directory ``cwd`` (str), use None to use the current working directory * environment ``env`` (dict), use None for none * Input to the command ``input`` (str), use string to pass no input. * Max execution time ``timeout`` (int) seconds. Use 0 for no timeout. Returns a tuple (out, err, exitCode) where * ``out`` (str) is the standard output of running the command * ``err`` (str) is the standard error of running the command * ``exitCode`` (int) is the exitCode of running the command If the timeout is hit an ``ExecuteCommandTimeoutException`` is raised. """ if input is not None: input = to_bytes(input) p = subprocess.Popen(command, cwd=cwd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, close_fds=kUseCloseFDs) timerObject = None # FIXME: Because of the way nested function scopes work in Python 2.x we # need to use a reference to a mutable object rather than a plain # bool. In Python 3 we could use the "nonlocal" keyword but we need # to support Python 2 as well. hitTimeOut = [False] try: if timeout > 0: def killProcess(): # We may be invoking a shell so we need to kill the # process and all its children. hitTimeOut[0] = True killProcessAndChildren(p.pid) timerObject = threading.Timer(timeout, killProcess) timerObject.start() out,err = p.communicate(input=input) exitCode = p.wait() finally: if timerObject != None: timerObject.cancel() # Ensure the resulting output is always of string type. out = convert_string(out) err = convert_string(err) if hitTimeOut[0]: raise ExecuteCommandTimeoutException( msg='Reached timeout of {} seconds'.format(timeout), out=out, err=err, exitCode=exitCode ) # Detect Ctrl-C in subprocess. if exitCode == -signal.SIGINT: raise KeyboardInterrupt return out, err, exitCode def killProcessAndChildren(pid): """ This function kills a process with ``pid`` and all its running children (recursively). It is currently implemented using the psutil module which provides a simple platform neutral implementation. TODO: Reimplement this without using psutil so we can remove our dependency on it. """ if platform.system() == 'AIX': subprocess.call('kill -kill $(ps -o pid= -L{})'.format(pid), shell=True) else: import psutil try: psutilProc = psutil.Process(pid) # Handle the different psutil API versions try: # psutil >= 2.x children_iterator = psutilProc.children(recursive=True) except AttributeError: # psutil 1.x children_iterator = psutilProc.get_children(recursive=True) for child in children_iterator: try: child.kill() except psutil.NoSuchProcess: pass psutilProc.kill() except psutil.NoSuchProcess: pass def executeCommandVerbose(cmd, *args, **kwargs): """ Execute a command and print its output on failure. """ out, err, exitCode = executeCommand(cmd, *args, **kwargs) if exitCode != 0: report = makeReport(cmd, out, err, exitCode) report += "\n\nFailed!" sys.stderr.write('%s\n' % report) return out, err, exitCode
cccl-main
libcudacxx/libcxx/utils/libcxx/util.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """libcxx python utilities""" __author__ = 'Eric Fiselier' __email__ = '[email protected]' __versioninfo__ = (0, 1, 0) __version__ = ' '.join(str(v) for v in __versioninfo__) + 'dev' __all__ = []
cccl-main
libcudacxx/libcxx/utils/libcxx/__init__.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## import os import inspect def trace_function(function, log_calls, log_results, label=''): def wrapper(*args, **kwargs): kwarg_strs = ['{}={}'.format(k, v) for (k, v) in kwargs] arg_str = ', '.join([str(a) for a in args] + kwarg_strs) call_str = '{}({})'.format(function.func_name, arg_str) # Perform the call itself, logging before, after, and anything thrown. try: if log_calls: print('{}: Calling {}'.format(label, call_str)) res = function(*args, **kwargs) if log_results: print('{}: {} -> {}'.format(label, call_str, res)) return res except Exception as ex: if log_results: print('{}: {} raised {}'.format(label, call_str, type(ex))) raise ex return wrapper def trace_object(obj, log_calls, log_results, label=''): for name, member in inspect.getmembers(obj): if inspect.ismethod(member): # Skip meta-functions, decorate everything else if not member.func_name.startswith('__'): setattr(obj, name, trace_function(member, log_calls, log_results, label)) return obj
cccl-main
libcudacxx/libcxx/utils/libcxx/test/tracing.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## import locale import os import platform import pkgutil import pipes import re import shlex import shutil import sys from libcxx.compiler import CXXCompiler from libcxx.test.target_info import make_target_info from libcxx.test.executor import * from libcxx.test.tracing import * import libcxx.util def loadSiteConfig(lit_config, config, param_name, env_name): # We haven't loaded the site specific configuration (the user is # probably trying to run on a test file directly, and either the site # configuration hasn't been created by the build system, or we are in an # out-of-tree build situation). site_cfg = lit_config.params.get(param_name, os.environ.get(env_name)) if not site_cfg: lit_config.warning('No site specific configuration file found!' ' Running the tests in the default configuration.') elif not os.path.isfile(site_cfg): lit_config.fatal( "Specified site configuration file does not exist: '%s'" % site_cfg) else: lit_config.note('using site specific configuration at %s' % site_cfg) ld_fn = lit_config.load_config # Null out the load_config function so that lit.site.cfg doesn't # recursively load a config even if it tries. # TODO: This is one hell of a hack. Fix it. def prevent_reload_fn(*args, **kwargs): pass lit_config.load_config = prevent_reload_fn ld_fn(config, site_cfg) lit_config.load_config = ld_fn # Extract the value of a numeric macro such as __cplusplus or a feature-test # macro. def intMacroValue(token): return int(token.rstrip('LlUu')) class Configuration(object): # pylint: disable=redefined-outer-name def __init__(self, lit_config, config): self.lit_config = lit_config self.config = config self.is_windows = platform.system() == 'Windows' self.cxx = None self.cxx_is_clang_cl = None self.cxx_stdlib_under_test = None self.project_obj_root = None self.libcxx_src_root = None self.libcxx_obj_root = None self.cxx_library_root = None self.cxx_runtime_root = None self.abi_library_root = None self.link_shared = self.get_lit_bool('enable_shared', default=True) self.debug_build = self.get_lit_bool('debug_build', default=False) self.exec_env = dict(os.environ) self.use_target = False self.use_system_cxx_lib = False self.use_clang_verify = False self.long_tests = None self.execute_external = False def get_lit_conf(self, name, default=None): val = self.lit_config.params.get(name, None) if val is None: val = getattr(self.config, name, None) if val is None: val = default return val def get_lit_bool(self, name, default=None, env_var=None): def check_value(value, var_name): if value is None: return default if isinstance(value, bool): return value if not isinstance(value, str): raise TypeError('expected bool or string') if value.lower() in ('1', 'true'): return True if value.lower() in ('', '0', 'false'): return False self.lit_config.fatal( "parameter '{}' should be true or false".format(var_name)) conf_val = self.get_lit_conf(name) if env_var is not None and env_var in os.environ and \ os.environ[env_var] is not None: val = os.environ[env_var] if conf_val is not None: self.lit_config.warning( 'Environment variable %s=%s is overriding explicit ' '--param=%s=%s' % (env_var, val, name, conf_val)) return check_value(val, env_var) return check_value(conf_val, name) def get_modules_enabled(self): return self.get_lit_bool('enable_modules', default=False, env_var='LIBCXX_ENABLE_MODULES') def make_static_lib_name(self, name): """Return the full filename for the specified library name""" if self.is_windows: assert name == 'c++' # Only allow libc++ to use this function for now. return 'lib' + name + '.lib' else: return 'lib' + name + '.a' def configure(self): self.configure_executor() self.configure_use_system_cxx_lib() self.configure_target_info() self.configure_cxx() self.configure_triple() self.configure_deployment() self.configure_src_root() self.configure_obj_root() self.configure_cxx_stdlib_under_test() self.configure_cxx_library_root() self.configure_use_clang_verify() self.configure_use_thread_safety() self.configure_no_execute() self.configure_execute_external() self.configure_ccache() self.configure_compile_flags() self.configure_filesystem_compile_flags() self.configure_link_flags() self.configure_env() self.configure_color_diagnostics() self.configure_debug_mode() self.configure_warnings() self.configure_sanitizer() self.configure_coverage() self.configure_modules() self.configure_coroutines() self.configure_substitutions() self.configure_features() def print_config_info(self): # Print the final compile and link flags. self.lit_config.note('Using compiler: %s' % self.cxx.path) self.lit_config.note('Using flags: %s' % self.cxx.flags) if self.cxx.use_modules: self.lit_config.note('Using modules flags: %s' % self.cxx.modules_flags) self.lit_config.note('Using compile flags: %s' % self.cxx.compile_flags) if len(self.cxx.warning_flags): self.lit_config.note('Using warnings: %s' % self.cxx.warning_flags) self.lit_config.note('Using link flags: %s' % self.cxx.link_flags) # Print as list to prevent "set([...])" from being printed. self.lit_config.note('Using available_features: %s' % list(self.config.available_features)) show_env_vars = {} for k,v in self.exec_env.items(): if k not in os.environ or os.environ[k] != v: show_env_vars[k] = v self.lit_config.note('Adding environment variables: %r' % show_env_vars) sys.stderr.flush() # Force flushing to avoid broken output on Windows def get_test_format(self): from libcxx.test.format import LibcxxTestFormat return LibcxxTestFormat( self.cxx, self.use_clang_verify, self.execute_external, self.executor, exec_env=self.exec_env) def configure_executor(self): exec_str = self.get_lit_conf('executor', "None") te = eval(exec_str) if te: self.lit_config.note("Using executor: %r" % exec_str) if self.lit_config.useValgrind: # We have no way of knowing where in the chain the # ValgrindExecutor is supposed to go. It is likely # that the user wants it at the end, but we have no # way of getting at that easily. selt.lit_config.fatal("Cannot infer how to create a Valgrind " " executor.") else: te = LocalExecutor() if self.lit_config.useValgrind: te = ValgrindExecutor(self.lit_config.valgrindArgs, te) self.executor = te def configure_target_info(self): self.target_info = make_target_info(self) def configure_cxx(self): # Gather various compiler parameters. cxx = self.get_lit_conf('cxx_under_test') cxx_first_arg = self.get_lit_conf('cxx_first_arg') self.cxx_is_clang_cl = cxx is not None and \ os.path.basename(cxx) == 'clang-cl.exe' # If no specific cxx_under_test was given, attempt to infer it as # clang++. if cxx is None or self.cxx_is_clang_cl: search_paths = self.config.environment['PATH'] if cxx is not None and os.path.isabs(cxx): search_paths = os.path.dirname(cxx) clangxx = libcxx.util.which('clang++', search_paths) if clangxx: cxx = clangxx self.lit_config.note( "inferred cxx_under_test as: %r" % cxx) elif self.cxx_is_clang_cl: self.lit_config.fatal('Failed to find clang++ substitution for' ' clang-cl') if not cxx: self.lit_config.fatal('must specify user parameter cxx_under_test ' '(e.g., --param=cxx_under_test=clang++)') self.cxx = CXXCompiler(cxx, cxx_first_arg) if not self.cxx_is_clang_cl else \ self._configure_clang_cl(cxx) cxx_type = self.cxx.type if cxx_type is not None: assert self.cxx.version is not None maj_v, min_v, patch_v = self.cxx.version self.config.available_features.add(cxx_type) self.config.available_features.add('%s-%s' % (cxx_type, maj_v)) self.config.available_features.add('%s-%s.%s' % ( cxx_type, maj_v, min_v)) self.config.available_features.add('%s-%s.%s.%s' % ( cxx_type, maj_v, min_v, patch_v)) self.lit_config.note("detected cxx.type as: {}".format( self.cxx.type)) self.lit_config.note("detected cxx.version as: {}".format( self.cxx.version)) self.lit_config.note("detected cxx.default_dialect as: {}".format( self.cxx.default_dialect)) self.lit_config.note("detected cxx.is_nvrtc as: {}".format( self.cxx.is_nvrtc)) self.cxx.compile_env = dict(os.environ) # 'CCACHE_CPP2' prevents ccache from stripping comments while # preprocessing. This is required to prevent stripping of '-verify' # comments. self.cxx.compile_env['CCACHE_CPP2'] = '1' if self.cxx.type == 'nvcc' and not self.cxx.is_nvrtc: nvcc_host_compiler = self.get_lit_conf('nvcc_host_compiler') if len(nvcc_host_compiler.strip()) == 0: if platform.system() == 'Darwin': nvcc_host_compiler = 'clang' elif platform.system() == 'Windows': nvcc_host_compiler = 'cl.exe' else: nvcc_host_compiler = 'gcc' self.host_cxx = CXXCompiler(nvcc_host_compiler, None) self.host_cxx_type = self.host_cxx.type if self.host_cxx_type is not None: assert self.host_cxx.version is not None maj_v, min_v, _ = self.host_cxx.version self.config.available_features.add(self.host_cxx_type) self.config.available_features.add('%s-%s' % ( self.host_cxx_type, maj_v)) self.config.available_features.add('%s-%s.%s' % ( self.host_cxx_type, maj_v, min_v)) self.lit_config.note("detected host_cxx.type as: {}".format( self.host_cxx.type)) self.lit_config.note("detected host_cxx.version as: {}".format( self.host_cxx.version)) self.lit_config.note("detected host_cxx.default_dialect as: {}".format( self.host_cxx.default_dialect)) self.lit_config.note("detected host_cxx.is_nvrtc as: {}".format( self.host_cxx.is_nvrtc)) if 'icc' in self.config.available_features: self.cxx.link_flags += ['-lirc'] def _configure_clang_cl(self, clang_path): def _split_env_var(var): return [p.strip() for p in os.environ.get(var, '').split(';') if p.strip()] def _prefixed_env_list(var, prefix): from itertools import chain return list(chain.from_iterable((prefix, path) for path in _split_env_var(var))) assert self.cxx_is_clang_cl flags = [] compile_flags = _prefixed_env_list('INCLUDE', '-isystem') link_flags = _prefixed_env_list('LIB', '-L') for path in _split_env_var('LIB'): self.add_path(self.exec_env, path) return CXXCompiler(clang_path, flags=flags, compile_flags=compile_flags, link_flags=link_flags) def _dump_macros_verbose(self, *args, **kwargs): macros_or_error = self.cxx.dumpMacros(*args, **kwargs) if isinstance(macros_or_error, tuple): cmd, out, err, rc = macros_or_error report = libcxx.util.makeReport(cmd, out, err, rc) report += "Compiler failed unexpectedly when dumping macros!" self.lit_config.fatal(report) return None assert isinstance(macros_or_error, dict) return macros_or_error def configure_src_root(self): self.libcxx_src_root = self.get_lit_conf( 'libcxx_src_root', os.path.dirname(self.config.test_source_root)) def configure_obj_root(self): self.project_obj_root = self.get_lit_conf('project_obj_root') self.libcxx_obj_root = self.get_lit_conf('libcxx_obj_root') if not self.libcxx_obj_root and self.project_obj_root is not None: possible_roots = [ os.path.join(self.project_obj_root, 'libcxx'), os.path.join(self.project_obj_root, 'projects', 'libcxx'), os.path.join(self.project_obj_root, 'runtimes', 'libcxx'), ] for possible_root in possible_roots: if os.path.isdir(possible_root): self.libcxx_obj_root = possible_root break else: self.libcxx_obj_root = self.project_obj_root def configure_cxx_library_root(self): self.cxx_library_root = self.get_lit_conf('cxx_library_root', self.libcxx_obj_root) self.cxx_runtime_root = self.get_lit_conf('cxx_runtime_root', self.cxx_library_root) def configure_use_system_cxx_lib(self): # This test suite supports testing against either the system library or # the locally built one; the former mode is useful for testing ABI # compatibility between the current headers and a shipping dynamic # library. # Default to testing against the locally built libc++ library. self.use_system_cxx_lib = self.get_lit_conf('use_system_cxx_lib') if self.use_system_cxx_lib == 'true': self.use_system_cxx_lib = True elif self.use_system_cxx_lib == 'false': self.use_system_cxx_lib = False elif self.use_system_cxx_lib: assert os.path.isdir(self.use_system_cxx_lib), "the specified use_system_cxx_lib parameter (%s) is not a valid directory" % self.use_system_cxx_lib self.use_system_cxx_lib = os.path.abspath(self.use_system_cxx_lib) self.lit_config.note( "inferred use_system_cxx_lib as: %r" % self.use_system_cxx_lib) def configure_cxx_stdlib_under_test(self): self.cxx_stdlib_under_test = self.get_lit_conf( 'cxx_stdlib_under_test', 'libc++') if self.cxx_stdlib_under_test not in \ ['libc++', 'libstdc++', 'msvc', 'cxx_default']: self.lit_config.fatal( 'unsupported value for "cxx_stdlib_under_test": %s' % self.cxx_stdlib_under_test) self.config.available_features.add(self.cxx_stdlib_under_test) if self.cxx_stdlib_under_test == 'libstdc++': self.config.available_features.add('libstdc++') # Manually enable the experimental and filesystem tests for libstdc++ # if the options aren't present. # FIXME this is a hack. if self.get_lit_conf('enable_experimental') is None: self.config.enable_experimental = 'true' def configure_use_clang_verify(self): '''If set, run clang with -verify on failing tests.''' self.use_clang_verify = self.get_lit_bool('use_clang_verify') if self.use_clang_verify is None: # NOTE: We do not test for the -verify flag directly because # -verify will always exit with non-zero on an empty file. self.use_clang_verify = self.cxx.isVerifySupported() self.lit_config.note( "inferred use_clang_verify as: %r" % self.use_clang_verify) if self.use_clang_verify: self.config.available_features.add('verify-support') def configure_use_thread_safety(self): '''If set, run clang with -verify on failing tests.''' has_thread_safety = self.cxx.hasCompileFlag('-Werror=thread-safety') if has_thread_safety: self.cxx.compile_flags += ['-Werror=thread-safety'] self.config.available_features.add('thread-safety') self.lit_config.note("enabling thread-safety annotations") def configure_execute_external(self): # Choose between lit's internal shell pipeline runner and a real shell. # If LIT_USE_INTERNAL_SHELL is in the environment, we use that as the # default value. Otherwise we ask the target_info. use_lit_shell_default = os.environ.get('LIT_USE_INTERNAL_SHELL') if use_lit_shell_default is not None: use_lit_shell_default = use_lit_shell_default != '0' else: use_lit_shell_default = self.target_info.use_lit_shell_default() # Check for the command line parameter using the default value if it is # not present. use_lit_shell = self.get_lit_bool('use_lit_shell', use_lit_shell_default) self.execute_external = not use_lit_shell def configure_no_execute(self): if type(self.executor) == NoopExecutor: self.config.available_features.add('no_execute') def configure_ccache(self): use_ccache_default = os.environ.get('CMAKE_CXX_COMPILER_LAUNCHER') is not None use_ccache = self.get_lit_bool('use_ccache', use_ccache_default) if use_ccache: self.cxx.use_ccache = True self.lit_config.note('enabling ccache') def add_deployment_feature(self, feature): (arch, name, version) = self.config.deployment self.config.available_features.add('%s=%s-%s' % (feature, arch, name)) self.config.available_features.add('%s=%s' % (feature, name)) self.config.available_features.add('%s=%s%s' % (feature, name, version)) def configure_features(self): additional_features = self.get_lit_conf('additional_features') if additional_features: for f in additional_features.split(','): self.config.available_features.add(f.strip()) self.target_info.add_locale_features(self.config.available_features) target_platform = self.target_info.platform() # Write an "available feature" that combines the triple when # use_system_cxx_lib is enabled. This is so that we can easily write # XFAIL markers for tests that are known to fail with versions of # libc++ as were shipped with a particular triple. if self.use_system_cxx_lib: self.config.available_features.add('with_system_cxx_lib') self.config.available_features.add( 'with_system_cxx_lib=%s' % self.config.target_triple) # Add subcomponents individually. target_components = self.config.target_triple.split('-') for component in target_components: self.config.available_features.add( 'with_system_cxx_lib=%s' % component) # Add available features for more generic versions of the target # triple attached to with_system_cxx_lib. if self.use_deployment: self.add_deployment_feature('with_system_cxx_lib') # Configure the availability feature. Availability is only enabled # with libc++, because other standard libraries do not provide # availability markup. if self.use_deployment and self.cxx_stdlib_under_test == 'libc++': self.config.available_features.add('availability') self.add_deployment_feature('availability') if platform.system() == 'Darwin': self.config.available_features.add('apple-darwin') # Insert the platform name into the available features as a lower case. self.config.available_features.add(target_platform) # Simulator testing can take a really long time for some of these tests # so add a feature check so we can REQUIRES: long_tests in them self.long_tests = self.get_lit_bool('long_tests') if self.long_tests is None: # Default to running long tests. self.long_tests = True self.lit_config.note( "inferred long_tests as: %r" % self.long_tests) if self.long_tests: self.config.available_features.add('long_tests') if not self.get_lit_bool('enable_filesystem', default=True): self.config.available_features.add('c++filesystem-disabled') self.config.available_features.add('dylib-has-no-filesystem') # Run a compile test for the -fsized-deallocation flag. This is needed # in test/std/language.support/support.dynamic/new.delete if self.cxx.hasCompileFlag('-fsized-deallocation'): self.config.available_features.add('-fsized-deallocation') if self.cxx.hasCompileFlag('-faligned-allocation'): self.config.available_features.add('-faligned-allocation') else: # FIXME remove this once more than just clang-4.0 support # C++17 aligned allocation. self.config.available_features.add('no-aligned-allocation') if self.cxx.hasCompileFlag('-fdelayed-template-parsing'): self.config.available_features.add('fdelayed-template-parsing') if self.get_lit_bool('has_libatomic', False): self.config.available_features.add('libatomic') if 'msvc' not in self.config.available_features: macros = self._dump_macros_verbose() if '__cpp_if_constexpr' not in macros: self.config.available_features.add('libcpp-no-if-constexpr') if '__cpp_structured_bindings' not in macros: self.config.available_features.add('libcpp-no-structured-bindings') if '__cpp_deduction_guides' not in macros or \ intMacroValue(macros['__cpp_deduction_guides']) < 201611: self.config.available_features.add('libcpp-no-deduction-guides') if self.is_windows: self.config.available_features.add('windows') if self.cxx_stdlib_under_test == 'libc++': # LIBCXX-WINDOWS-FIXME is the feature name used to XFAIL the # initial Windows failures until they can be properly diagnosed # and fixed. This allows easier detection of new test failures # and regressions. Note: New failures should not be suppressed # using this feature. (Also see llvm.org/PR32730) self.config.available_features.add('LIBCXX-WINDOWS-FIXME') if 'msvc' not in self.config.available_features: # Attempt to detect the glibc version by querying for __GLIBC__ # in 'features.h'. macros = self.cxx.dumpMacros(flags=['-include', 'features.h']) if isinstance(macros, dict) and '__GLIBC__' in macros: maj_v, min_v = (macros['__GLIBC__'], macros['__GLIBC_MINOR__']) self.config.available_features.add('glibc') self.config.available_features.add('glibc-%s' % maj_v) self.config.available_features.add('glibc-%s.%s' % (maj_v, min_v)) libcxx_gdb = self.get_lit_conf('libcxx_gdb') if libcxx_gdb and 'NOTFOUND' not in libcxx_gdb: self.config.available_features.add('libcxx_gdb') self.cxx.libcxx_gdb = libcxx_gdb # Support Objective-C++ only on MacOS and if the compiler supports it. if self.target_info.platform() == "darwin" and \ self.target_info.is_host_macosx() and \ self.cxx.hasCompileFlag(["-x", "objective-c++", "-fobjc-arc"]): self.config.available_features.add("objective-c++") def configure_compile_flags(self): self.configure_default_compile_flags() # Configure extra flags compile_flags_str = self.get_lit_conf('compile_flags', '') self.cxx.compile_flags += shlex.split(compile_flags_str) if self.is_windows: # FIXME: Can we remove this? self.cxx.compile_flags += ['-D_CRT_SECURE_NO_WARNINGS'] # Required so that tests using min/max don't fail on Windows, # and so that those tests don't have to be changed to tolerate # this insanity. self.cxx.compile_flags += ['-DNOMINMAX'] if 'msvc' in self.config.available_features: if self.cxx.type == 'nvcc': self.cxx.compile_flags += ['-Xcompiler'] self.cxx.compile_flags += ['/bigobj'] additional_flags = self.get_lit_conf('test_compiler_flags') if additional_flags: self.cxx.compile_flags += shlex.split(additional_flags) compute_archs = self.get_lit_conf('compute_archs') if self.cxx.is_nvrtc is True: self.config.available_features.add("nvrtc") if self.cxx.type == 'nvcc': self.cxx.compile_flags += ['--extended-lambda'] if compute_archs and self.cxx.type == 'nvcc': pre_sm_32 = False pre_sm_60 = False pre_sm_70 = False pre_sm_90 = False compute_archs = [int(a) for a in sorted(shlex.split(compute_archs))] for arch in compute_archs: if arch < 32: pre_sm_32 = True if arch < 60: pre_sm_60 = True if arch < 70: pre_sm_70 = True if arch < 90: pre_sm_90 = True arch_flag = '-gencode=arch=compute_{0},code=sm_{0}'.format(arch) self.cxx.compile_flags += [arch_flag] enable_compute_future = self.get_lit_conf('enable_compute_future') if enable_compute_future: arch_flag = '-gencode=arch=compute_{0},code=compute_{0}'.format(arch) self.cxx.compile_flags += [arch_flag] if pre_sm_32: self.config.available_features.add("pre-sm-32") if pre_sm_60: self.config.available_features.add("pre-sm-60") if pre_sm_70: self.config.available_features.add("pre-sm-70") if pre_sm_90: self.config.available_features.add("pre-sm-90") def configure_default_compile_flags(self): nvcc_host_compiler = self.get_lit_conf('nvcc_host_compiler') if nvcc_host_compiler and self.cxx.type == 'nvcc': self.cxx.compile_flags += ['-ccbin={0}'.format(nvcc_host_compiler)] # Try and get the std version from the command line. Fall back to # default given in lit.site.cfg is not present. If default is not # present then force c++11. std = self.get_lit_conf('std') if not std: # Choose the newest possible language dialect if none is given. possible_stds = ['c++2a', 'c++17', 'c++1z', 'c++14', 'c++11', 'c++03'] if self.cxx.type == 'gcc': maj_v, _, _ = self.cxx.version maj_v = int(maj_v) if maj_v < 7: possible_stds.remove('c++1z') possible_stds.remove('c++17') # FIXME: How many C++14 tests actually fail under GCC 5 and 6? # Should we XFAIL them individually instead? if maj_v < 6: possible_stds.remove('c++14') for s in possible_stds: cxx = self.cxx success = True if self.cxx.type == 'nvcc': # NVCC warns, but doesn't error, if the host compiler # doesn't support the dialect. It's also possible that the # host compiler supports the dialect, but NVCC doesn't. # So, first we need to check if NVCC supports the dialect... if not self.cxx.hasCompileFlag('-std=%s' % s): # If it doesn't, give up on this dialect. success = False # ... then we need to check if host compiler supports the # dialect. cxx = self.host_cxx if cxx.type == 'msvc': if not cxx.hasCompileFlag('/std:%s' % s): success = False else: if not cxx.hasCompileFlag('-std=%s' % s): success = False if success: std = s self.lit_config.note('inferred language dialect as: %s' % std) break if std: # We found a dialect flag. if self.cxx.type == 'msvc': self.cxx.compile_flags += ['/std:{0}'.format(std)] else: self.cxx.compile_flags += ['-std={0}'.format(std)] if not std: # There is no dialect flag. This happens with older MSVC. if self.cxx.type == 'nvcc': std = self.host_cxx.default_dialect else: std = self.cxx.default_dialect self.lit_config.note('using default language dialect: %s' % std) std_feature = std.replace('gnu++', 'c++') std_feature = std.replace('1z', '17') std_feature = std.replace('2a', '20') self.config.available_features.add(std_feature) # Configure include paths self.configure_compile_flags_header_includes() self.target_info.add_cxx_compile_flags(self.cxx.compile_flags) # Configure feature flags. self.configure_compile_flags_exceptions() self.configure_compile_flags_rtti() self.configure_compile_flags_abi_version() enable_32bit = self.get_lit_bool('enable_32bit', False) if enable_32bit: self.cxx.flags += ['-m32'] # Use verbose output for better errors self.cxx.flags += ['-v'] sysroot = self.get_lit_conf('sysroot') if sysroot: self.cxx.flags += ['--sysroot=' + sysroot] gcc_toolchain = self.get_lit_conf('gcc_toolchain') if gcc_toolchain: self.cxx.flags += ['--gcc-toolchain=' + gcc_toolchain] # NOTE: the _DEBUG definition must preceed the triple check because for # the Windows build of libc++, the forced inclusion of a header requires # that _DEBUG is defined. Incorrect ordering will result in -target # being elided. if self.is_windows and self.debug_build: self.cxx.compile_flags += ['-D_DEBUG'] if self.use_target: if not self.cxx.addFlagIfSupported( ['--target=' + self.config.target_triple]): self.lit_config.warning('use_target is true but --target is '\ 'not supported by the compiler') if self.use_deployment: arch, name, version = self.config.deployment self.cxx.flags += ['-arch', arch] self.cxx.flags += ['-m' + name + '-version-min=' + version] # Add includes for support headers used in the tests. support_path = os.path.join(self.libcxx_src_root, 'test/support') self.cxx.compile_flags += ['-I' + support_path] # Add includes for the PSTL headers pstl_src_root = self.get_lit_conf('pstl_src_root') pstl_obj_root = self.get_lit_conf('pstl_obj_root') if pstl_src_root is not None and pstl_obj_root is not None: self.cxx.compile_flags += ['-I' + os.path.join(pstl_src_root, 'include')] self.cxx.compile_flags += ['-I' + os.path.join(pstl_obj_root, 'generated_headers')] self.cxx.compile_flags += ['-I' + os.path.join(pstl_src_root, 'test')] self.config.available_features.add('parallel-algorithms') # FIXME(EricWF): variant_size.pass.cpp requires a slightly larger # template depth with older Clang versions. self.cxx.addFlagIfSupported('-ftemplate-depth=270') def configure_compile_flags_header_includes(self): support_path = os.path.join(self.libcxx_src_root, 'test', 'support') self.configure_config_site_header() if self.cxx_stdlib_under_test != 'libstdc++' and \ not self.is_windows: self.cxx.compile_flags += [ '-include', os.path.join(support_path, 'nasty_macros.h')] if self.cxx_stdlib_under_test == 'msvc': self.cxx.compile_flags += [ '-include', os.path.join(support_path, 'msvc_stdlib_force_include.h')] pass if self.is_windows and self.debug_build and \ self.cxx_stdlib_under_test != 'msvc': self.cxx.compile_flags += [ '-include', os.path.join(support_path, 'set_windows_crt_report_mode.h') ] cxx_headers = self.get_lit_conf('cxx_headers') if cxx_headers == '' or (cxx_headers is None and self.cxx_stdlib_under_test != 'libc++'): self.lit_config.note('using the system cxx headers') return if self.cxx.type != 'nvcc' and self.cxx.type != 'nvhpc': self.cxx.compile_flags += ['-nostdinc++'] if cxx_headers is None: cxx_headers = os.path.join(self.libcxx_src_root, 'include') if not os.path.isdir(cxx_headers): self.lit_config.fatal("cxx_headers='%s' is not a directory." % cxx_headers) self.cxx.compile_flags += ['-I' + cxx_headers] if self.libcxx_obj_root is not None: cxxabi_headers = os.path.join(self.libcxx_obj_root, 'include', 'c++build') if os.path.isdir(cxxabi_headers): self.cxx.compile_flags += ['-I' + cxxabi_headers] def configure_config_site_header(self): # Check for a possible __config_site in the build directory. We # use this if it exists. if self.libcxx_obj_root is None: return config_site_header = os.path.join(self.libcxx_obj_root, '__config_site') if not os.path.isfile(config_site_header): return contained_macros = self.parse_config_site_and_add_features( config_site_header) self.lit_config.note('Using __config_site header %s with macros: %r' % (config_site_header, contained_macros)) # FIXME: This must come after the call to # 'parse_config_site_and_add_features(...)' in order for it to work. self.cxx.compile_flags += ['-include', config_site_header] def parse_config_site_and_add_features(self, header): """ parse_config_site_and_add_features - Deduce and add the test features that that are implied by the #define's in the __config_site header. Return a dictionary containing the macros found in the '__config_site' header. """ # MSVC can't dump macros, so we just give up. if 'msvc' in self.config.available_features: return {} # Parse the macro contents of __config_site by dumping the macros # using 'c++ -dM -E' and filtering the predefines. predefines = self._dump_macros_verbose() macros = self._dump_macros_verbose(header) feature_macros_keys = set(macros.keys()) - set(predefines.keys()) feature_macros = {} for k in feature_macros_keys: feature_macros[k] = macros[k] # We expect the header guard to be one of the definitions assert '_LIBCUDACXX_CONFIG_SITE' in feature_macros del feature_macros['_LIBCUDACXX_CONFIG_SITE'] # The __config_site header should be non-empty. Otherwise it should # have never been emitted by CMake. assert len(feature_macros) > 0 # FIXME: This is a hack that should be fixed using module maps. # If modules are enabled then we have to lift all of the definitions # in __config_site onto the command line. for m in feature_macros: define = '-D%s' % m if feature_macros[m]: define += '=%s' % (feature_macros[m]) self.cxx.modules_flags += [define] if self.cxx.hasCompileFlag('-Wno-macro-redefined'): self.cxx.compile_flags += ['-Wno-macro-redefined'] # Transform each macro name into the feature name used in the tests. # Ex. _LIBCUDACXX_HAS_NO_THREADS -> libcpp-has-no-threads for m in feature_macros: if m == '_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS' or \ m == '_LIBCUDACXX_HIDE_FROM_ABI_PER_TU_BY_DEFAULT': continue if m == '_LIBCUDACXX_ABI_VERSION': self.config.available_features.add('libcpp-abi-version-v%s' % feature_macros[m]) continue if m == '_LIBCUDACXX_NO_VCRUNTIME': self.config.available_features.add('libcpp-no-vcruntime') continue assert m.startswith('_LIBCUDACXX_HAS_') or m.startswith('_LIBCUDACXX_ABI_') m = m.lower()[1:].replace('_', '-') self.config.available_features.add(m) return feature_macros def configure_compile_flags_exceptions(self): enable_exceptions = self.get_lit_bool('enable_exceptions', True) if not enable_exceptions: self.config.available_features.add('libcpp-no-exceptions') if 'nvhpc' in self.config.available_features: # NVHPC reports all expressions as `noexcept(true)` with its # "no exceptions" mode. Override the setting from CMake as # a temporary workaround for that. pass # TODO: I don't know how to shut off exceptions with MSVC. elif 'msvc' not in self.config.available_features: if self.cxx.type == 'nvcc': self.cxx.compile_flags += ['-Xcompiler'] self.cxx.compile_flags += ['-fno-exceptions'] def configure_compile_flags_rtti(self): enable_rtti = self.get_lit_bool('enable_rtti', True) if not enable_rtti: self.config.available_features.add('libcpp-no-rtti') if self.cxx.type == 'nvcc': self.cxx.compile_flags += ['-Xcompiler'] if 'nvhpc' in self.config.available_features: self.cxx.compile_flags += ['--no_rtti'] elif 'msvc' in self.config.available_features: self.cxx.compile_flags += ['/GR-'] else: self.cxx.compile_flags += ['-fno-rtti'] self.cxx.compile_flags += ['-D_LIBCUDACXX_NO_RTTI'] def configure_compile_flags_abi_version(self): abi_version = self.get_lit_conf('abi_version', '').strip() abi_unstable = self.get_lit_bool('abi_unstable') # Only add the ABI version when it is non-default. # FIXME(EricWF): Get the ABI version from the "__config_site". if abi_version and abi_version != '1': self.cxx.compile_flags += ['-D_LIBCUDACXX_ABI_VERSION=' + abi_version] if abi_unstable: self.config.available_features.add('libcpp-abi-unstable') self.cxx.compile_flags += ['-D_LIBCUDACXX_ABI_UNSTABLE'] def configure_filesystem_compile_flags(self): if not self.get_lit_bool('enable_filesystem', default=True): return static_env = os.path.join(self.libcxx_src_root, 'test', 'std', 'input.output', 'filesystems', 'Inputs', 'static_test_env') static_env = os.path.realpath(static_env) assert os.path.isdir(static_env) self.cxx.compile_flags += ['-DLIBCXX_FILESYSTEM_STATIC_TEST_ROOT="%s"' % static_env] dynamic_env = os.path.join(self.config.test_exec_root, 'filesystem', 'Output', 'dynamic_env') dynamic_env = os.path.realpath(dynamic_env) if not os.path.isdir(dynamic_env): os.makedirs(dynamic_env) self.cxx.compile_flags += ['-DLIBCXX_FILESYSTEM_DYNAMIC_TEST_ROOT="%s"' % dynamic_env] self.exec_env['LIBCXX_FILESYSTEM_DYNAMIC_TEST_ROOT'] = ("%s" % dynamic_env) dynamic_helper = os.path.join(self.libcxx_src_root, 'test', 'support', 'filesystem_dynamic_test_helper.py') assert os.path.isfile(dynamic_helper) self.cxx.compile_flags += ['-DLIBCXX_FILESYSTEM_DYNAMIC_TEST_HELPER="%s %s"' % (sys.executable, dynamic_helper)] def configure_link_flags(self): nvcc_host_compiler = self.get_lit_conf('nvcc_host_compiler') if nvcc_host_compiler and self.cxx.type == 'nvcc': self.cxx.link_flags += ['-ccbin={0}'.format(nvcc_host_compiler)] # Configure library path self.configure_link_flags_cxx_library_path() self.configure_link_flags_abi_library_path() # Configure libraries if self.cxx_stdlib_under_test == 'libc++': if self.get_lit_conf('name') != 'libcu++': if 'nvhpc' not in self.config.available_features or not self.cxx.is_nvrtc: if self.cxx.type == 'nvcc': self.cxx.link_flags += ['-Xcompiler'] if self.cxx.type != 'nvhpc': self.cxx.link_flags += ['-nodefaultlibs'] # FIXME: Handle MSVCRT as part of the ABI library handling. if self.is_windows and 'msvc' not in self.config.available_features: if self.cxx.type == 'nvcc': self.cxx.link_flags += ['-Xcompiler'] self.cxx.link_flags += ['-nostdlib'] self.configure_link_flags_cxx_library() self.configure_link_flags_abi_library() self.configure_extra_library_flags() elif self.cxx_stdlib_under_test == 'libstdc++': self.config.available_features.add('c++experimental') self.cxx.link_flags += ['-lstdc++fs', '-lm', '-pthread'] elif self.cxx_stdlib_under_test == 'msvc': # FIXME: Correctly setup debug/release flags here. pass elif self.cxx_stdlib_under_test == 'cxx_default': self.cxx.link_flags += ['-pthread'] else: self.lit_config.fatal('invalid stdlib under test') link_flags_str = self.get_lit_conf('link_flags', '') self.cxx.link_flags += shlex.split(link_flags_str) def configure_link_flags_cxx_library_path(self): if not self.use_system_cxx_lib: if self.cxx_library_root: self.cxx.link_flags += ['-L' + self.cxx_library_root] if self.is_windows and self.link_shared: self.add_path(self.cxx.compile_env, self.cxx_library_root) if self.cxx_runtime_root: if not self.is_windows: if self.cxx.type == 'nvcc': self.cxx.link_flags += ['-Xcompiler', '"-Wl,-rpath,' + self.cxx_runtime_root + '"'] else: self.cxx.link_flags += ['-Wl,-rpath,' + self.cxx_runtime_root] elif self.is_windows and self.link_shared: self.add_path(self.exec_env, self.cxx_runtime_root) elif os.path.isdir(str(self.use_system_cxx_lib)): self.cxx.link_flags += ['-L' + self.use_system_cxx_lib] if not self.is_windows: if self.cxx.type == 'nvcc': self.cxx.link_flags += ['-Xcompiler', '"-Wl,-rpath,' + self.cxx_runtime_root + '"'] else: self.cxx.link_flags += ['-Wl,-rpath,' + self.use_system_cxx_lib] if self.is_windows and self.link_shared: self.add_path(self.cxx.compile_env, self.use_system_cxx_lib) additional_flags = self.get_lit_conf('test_linker_flags') if additional_flags: self.cxx.link_flags += shlex.split(additional_flags) def configure_link_flags_abi_library_path(self): # Configure ABI library paths. self.abi_library_root = self.get_lit_conf('abi_library_path') if self.abi_library_root: self.cxx.link_flags += ['-L' + self.abi_library_root] if not self.is_windows: if self.cxx.type == 'nvcc': self.cxx.link_flags += ['-Xcompiler', '"-Wl,-rpath,' + self.cxx_runtime_root + '"'] else: self.cxx.link_flags += ['-Wl,-rpath,' + self.abi_library_root] else: self.add_path(self.exec_env, self.abi_library_root) def configure_link_flags_cxx_library(self): libcxx_experimental = self.get_lit_bool('enable_experimental', default=False) if libcxx_experimental: self.config.available_features.add('c++experimental') self.cxx.link_flags += ['-lc++experimental'] if self.link_shared: self.cxx.link_flags += ['-lc++'] elif self.cxx.type != 'nvcc': cxx_library_root = self.get_lit_conf('cxx_library_root') if cxx_library_root: libname = self.make_static_lib_name('c++') abs_path = os.path.join(cxx_library_root, libname) assert os.path.exists(abs_path) and \ "static libc++ library does not exist" self.cxx.link_flags += [abs_path] else: self.cxx.link_flags += ['-lc++'] def configure_link_flags_abi_library(self): cxx_abi = self.get_lit_conf('cxx_abi', 'libcxxabi') if cxx_abi == 'libstdc++': self.cxx.link_flags += ['-lstdc++'] elif cxx_abi == 'libsupc++': self.cxx.link_flags += ['-lsupc++'] elif cxx_abi == 'libcxxabi': # If the C++ library requires explicitly linking to libc++abi, or # if we're testing libc++abi itself (the test configs are shared), # then link it. testing_libcxxabi = self.get_lit_conf('name', '') == 'libc++abi' if self.target_info.allow_cxxabi_link() or testing_libcxxabi: libcxxabi_shared = self.get_lit_bool('libcxxabi_shared', default=True) if libcxxabi_shared: self.cxx.link_flags += ['-lc++abi'] else: cxxabi_library_root = self.get_lit_conf('abi_library_path') if cxxabi_library_root: libname = self.make_static_lib_name('c++abi') abs_path = os.path.join(cxxabi_library_root, libname) self.cxx.link_flags += [abs_path] else: self.cxx.link_flags += ['-lc++abi'] elif cxx_abi == 'libcxxrt': self.cxx.link_flags += ['-lcxxrt'] elif cxx_abi == 'vcruntime': debug_suffix = 'd' if self.debug_build else '' self.cxx.link_flags += ['-l%s%s' % (lib, debug_suffix) for lib in ['vcruntime', 'ucrt', 'msvcrt']] elif cxx_abi == 'none' or cxx_abi == 'default': if self.is_windows: debug_suffix = 'd' if self.debug_build else '' self.cxx.link_flags += ['-lmsvcrt%s' % debug_suffix] else: self.lit_config.fatal( 'C++ ABI setting %s unsupported for tests' % cxx_abi) def configure_extra_library_flags(self): if self.get_lit_bool('cxx_ext_threads', default=False): self.cxx.link_flags += ['-lc++external_threads'] self.target_info.add_cxx_link_flags(self.cxx.link_flags) def configure_color_diagnostics(self): use_color = self.get_lit_conf('color_diagnostics') if use_color is None: use_color = os.environ.get('LIBCXX_COLOR_DIAGNOSTICS') if use_color is None: return if use_color != '': self.lit_config.fatal('Invalid value for color_diagnostics "%s".' % use_color) color_flag = '-fdiagnostics-color=always' # Check if the compiler supports the color diagnostics flag. Issue a # warning if it does not since color diagnostics have been requested. if not self.cxx.hasCompileFlag(color_flag): self.lit_config.warning( 'color diagnostics have been requested but are not supported ' 'by the compiler') else: self.cxx.flags += [color_flag] def configure_debug_mode(self): debug_level = self.get_lit_conf('debug_level', None) if not debug_level: return if debug_level not in ['0', '1']: self.lit_config.fatal('Invalid value for debug_level "%s".' % debug_level) self.cxx.compile_flags += ['-D_LIBCUDACXX_DEBUG=%s' % debug_level] def configure_warnings(self): default_enable_warnings = 'clang' in self.config.available_features or \ 'msvc' in self.config.available_features enable_warnings = self.get_lit_bool('enable_warnings', default_enable_warnings) self.cxx.useWarnings(enable_warnings) if 'nvcc' in self.config.available_features: self.cxx.warning_flags += [ '-Xcudafe', '--display_error_number' ] if 'msvc' in self.config.available_features: self.cxx.warning_flags += [ '-Xcompiler', '/W4', '-Xcompiler', '/WX' ] # warning C4100: 'quack': unreferenced formal parameter self.cxx.warning_flags += [ '-Xcompiler', '-wd4100' ] # warning C4127: conditional expression is constant self.cxx.warning_flags += [ '-Xcompiler', '-wd4127' ] # warning C4180: qualifier applied to function type has no meaning; ignored self.cxx.warning_flags += [ '-Xcompiler', '-wd4180' ] # warning C4309: 'moo': truncation of constant value self.cxx.warning_flags += [ '-Xcompiler', '-wd4309' ] else: # TODO: Re-enable soon. #self.cxx.warning_flags += [ '-Xcompiler', '-Wall', '-Xcompiler', '-Werror' ] pass else: self.cxx.warning_flags += [ '-D_LIBCUDACXX_DISABLE_PRAGMA_GCC_SYSTEM_HEADER', '-Wall', '-Wextra', '-Werror' ] if self.cxx.hasWarningFlag('-Wuser-defined-warnings'): self.cxx.warning_flags += ['-Wuser-defined-warnings'] self.config.available_features.add('diagnose-if-support') self.cxx.addWarningFlagIfSupported('-Wshadow') self.cxx.addWarningFlagIfSupported('-Wno-unused-command-line-argument') self.cxx.addWarningFlagIfSupported('-Wno-attributes') self.cxx.addWarningFlagIfSupported('-Wno-pessimizing-move') self.cxx.addWarningFlagIfSupported('-Wno-c++11-extensions') self.cxx.addWarningFlagIfSupported('-Wno-user-defined-literals') self.cxx.addWarningFlagIfSupported('-Wno-noexcept-type') self.cxx.addWarningFlagIfSupported('-Wno-aligned-allocation-unavailable') # These warnings should be enabled in order to support the MSVC # team using the test suite; They enable the warnings below and # expect the test suite to be clean. self.cxx.addWarningFlagIfSupported('-Wsign-compare') self.cxx.addWarningFlagIfSupported('-Wunused-variable') self.cxx.addWarningFlagIfSupported('-Wunused-parameter') self.cxx.addWarningFlagIfSupported('-Wunreachable-code') std = self.get_lit_conf('std', None) if std in ['c++98', 'c++03']: if 'nvcc' not in self.config.available_features: # The '#define static_assert' provided by libc++ in C++03 mode # causes an unused local typedef whenever it is used. self.cxx.addWarningFlagIfSupported('-Wno-unused-local-typedef') def configure_sanitizer(self): san = self.get_lit_conf('use_sanitizer', '').strip() if san: self.target_info.add_sanitizer_features(san, self.config.available_features) # Search for llvm-symbolizer along the compiler path first # and then along the PATH env variable. symbolizer_search_paths = os.environ.get('PATH', '') cxx_path = libcxx.util.which(self.cxx.path) if cxx_path is not None: symbolizer_search_paths = ( os.path.dirname(cxx_path) + os.pathsep + symbolizer_search_paths) llvm_symbolizer = libcxx.util.which('llvm-symbolizer', symbolizer_search_paths) def add_ubsan(): self.cxx.flags += ['-fsanitize=undefined', '-fno-sanitize=float-divide-by-zero', '-fno-sanitize-recover=all'] self.exec_env['UBSAN_OPTIONS'] = 'print_stacktrace=1' self.config.available_features.add('ubsan') # Setup the sanitizer compile flags self.cxx.flags += ['-g', '-fno-omit-frame-pointer'] if san == 'Address' or san == 'Address;Undefined' or san == 'Undefined;Address': self.cxx.flags += ['-fsanitize=address'] if llvm_symbolizer is not None: self.exec_env['ASAN_SYMBOLIZER_PATH'] = llvm_symbolizer # FIXME: Turn ODR violation back on after PR28391 is resolved # https://bugs.llvm.org/show_bug.cgi?id=28391 self.exec_env['ASAN_OPTIONS'] = 'detect_odr_violation=0' self.config.available_features.add('asan') self.config.available_features.add('sanitizer-new-delete') self.cxx.compile_flags += ['-O1'] if san == 'Address;Undefined' or san == 'Undefined;Address': add_ubsan() elif san == 'Memory' or san == 'MemoryWithOrigins': self.cxx.flags += ['-fsanitize=memory'] if san == 'MemoryWithOrigins': self.cxx.compile_flags += [ '-fsanitize-memory-track-origins'] if llvm_symbolizer is not None: self.exec_env['MSAN_SYMBOLIZER_PATH'] = llvm_symbolizer self.config.available_features.add('msan') self.config.available_features.add('sanitizer-new-delete') self.cxx.compile_flags += ['-O1'] elif san == 'Undefined': add_ubsan() self.cxx.compile_flags += ['-O2'] elif san == 'Thread': self.cxx.flags += ['-fsanitize=thread'] self.config.available_features.add('tsan') self.config.available_features.add('sanitizer-new-delete') else: self.lit_config.fatal('unsupported value for ' 'use_sanitizer: {0}'.format(san)) san_lib = self.get_lit_conf('sanitizer_library') if san_lib: if self.cxx.type == 'nvcc': self.cxx.link_flags += ['-Xcompiler', '"-Wl,-rpath,' + os.path.dirname(san_lib) + '"'] else: self.cxx.link_flags += ['-Wl,-rpath,' + os.path.dirname(san_lib)] def configure_coverage(self): self.generate_coverage = self.get_lit_bool('generate_coverage', False) if self.generate_coverage: self.cxx.flags += ['-g', '--coverage'] self.cxx.compile_flags += ['-O0'] def configure_coroutines(self): if self.cxx.hasCompileFlag('-fcoroutines-ts'): macros = self._dump_macros_verbose(flags=['-fcoroutines-ts']) if '__cpp_coroutines' not in macros: self.lit_config.warning('-fcoroutines-ts is supported but ' '__cpp_coroutines is not defined') # Consider coroutines supported only when the feature test macro # reflects a recent value. if intMacroValue(macros['__cpp_coroutines']) >= 201703: self.config.available_features.add('fcoroutines-ts') def configure_modules(self): modules_flags = ['-fmodules'] if platform.system() != 'Darwin': modules_flags += ['-Xclang', '-fmodules-local-submodule-visibility'] supports_modules = self.cxx.hasCompileFlag(modules_flags) enable_modules = self.get_modules_enabled() if enable_modules and not supports_modules: self.lit_config.fatal( '-fmodules is enabled but not supported by the compiler') if not supports_modules: return self.config.available_features.add('modules-support') module_cache = os.path.join(self.config.test_exec_root, 'modules.cache') module_cache = os.path.realpath(module_cache) if os.path.isdir(module_cache): shutil.rmtree(module_cache) os.makedirs(module_cache) self.cxx.modules_flags += modules_flags + \ ['-fmodules-cache-path=' + module_cache] if enable_modules: self.config.available_features.add('-fmodules') self.cxx.useModules() def configure_substitutions(self): sub = self.config.substitutions cxx_path = pipes.quote(self.cxx.path) # Configure compiler substitutions sub.append(('%cxx', cxx_path)) sub.append(('%libcxx_src_root', self.libcxx_src_root)) # Configure flags substitutions flags_str = ' '.join([pipes.quote(f) for f in self.cxx.flags]) compile_flags_str = ' '.join([pipes.quote(f) for f in self.cxx.compile_flags]) link_flags_str = ' '.join([pipes.quote(f) for f in self.cxx.link_flags]) all_flags = '%s %s %s' % (flags_str, compile_flags_str, link_flags_str) sub.append(('%flags', flags_str)) sub.append(('%compile_flags', compile_flags_str)) sub.append(('%link_flags', link_flags_str)) sub.append(('%all_flags', all_flags)) if self.cxx.isVerifySupported(): verify_str = ' ' + ' '.join(self.cxx.verify_flags) + ' ' sub.append(('%verify', verify_str)) # Add compile and link shortcuts compile_str = (cxx_path + ' -o %t.o %s -c ' + flags_str + ' ' + compile_flags_str) link_str = (cxx_path + ' -o %t.exe %t.o ' + flags_str + ' ' + link_flags_str) assert type(link_str) is str build_str = cxx_path + ' -o %t.exe %s ' + all_flags if self.cxx.use_modules: sub.append(('%compile_module', compile_str)) sub.append(('%build_module', build_str)) elif self.cxx.modules_flags is not None: modules_str = ' '.join(self.cxx.modules_flags) + ' ' sub.append(('%compile_module', compile_str + ' ' + modules_str)) sub.append(('%build_module', build_str + ' ' + modules_str)) sub.append(('%compile', compile_str)) sub.append(('%link', link_str)) sub.append(('%build', build_str)) # Configure exec prefix substitutions. # Configure run env substitution. sub.append(('%run', '%t.exe')) # Configure not program substitutions not_py = os.path.join(self.libcxx_src_root, 'utils', 'not.py') not_str = '%s %s ' % (pipes.quote(sys.executable), pipes.quote(not_py)) sub.append(('not ', not_str)) if self.get_lit_conf('libcxx_gdb'): sub.append(('%libcxx_gdb', self.get_lit_conf('libcxx_gdb'))) sub.append(['%syntaxonly', '-fsyntax-only' if self.cxx.type != 'nvhpc' else '']) sub.append(['%noexceptions', '-fno-exceptions' if self.cxx.type != 'nvhpc' else '']) def can_use_deployment(self): # Check if the host is on an Apple platform using clang. if not self.target_info.platform() == "darwin": return False if not self.target_info.is_host_macosx(): return False if not self.cxx.type.endswith('clang'): return False return True def configure_triple(self): # Get or infer the target triple. target_triple = self.get_lit_conf('target_triple') self.use_target = self.get_lit_bool('use_target', False) if self.use_target and target_triple: self.lit_config.warning('use_target is true but no triple is specified') # Use deployment if possible. self.use_deployment = not self.use_target and self.can_use_deployment() if self.use_deployment: return # Save the triple (and warn on Apple platforms). self.config.target_triple = target_triple if self.use_target and 'apple' in target_triple: self.lit_config.warning('consider using arch and platform instead' ' of target_triple on Apple platforms') # If no target triple was given, try to infer it from the compiler # under test. if not self.config.target_triple: target_triple = (self.cxx if self.cxx.type != 'nvcc' else self.host_cxx).getTriple() # Drop sub-major version components from the triple, because the # current XFAIL handling expects exact matches for feature checks. # Example: x86_64-apple-darwin14.0.0 -> x86_64-apple-darwin14 # The 5th group handles triples greater than 3 parts # (ex x86_64-pc-linux-gnu). target_triple = re.sub(r'([^-]+)-([^-]+)-([^.]+)([^-]*)(.*)', r'\1-\2-\3\5', target_triple) # linux-gnu is needed in the triple to properly identify linuxes # that use GLIBC. Handle redhat and opensuse triples as special # cases and append the missing `-gnu` portion. if (target_triple.endswith('redhat-linux') or target_triple.endswith('suse-linux')): target_triple += '-gnu' self.config.target_triple = target_triple self.lit_config.note( "inferred target_triple as: %r" % self.config.target_triple) def configure_deployment(self): assert not self.use_deployment is None assert not self.use_target is None if not self.use_deployment: # Warn about ignored parameters. if self.get_lit_conf('arch'): self.lit_config.warning('ignoring arch, using target_triple') if self.get_lit_conf('platform'): self.lit_config.warning('ignoring platform, using target_triple') return assert not self.use_target assert self.target_info.is_host_macosx() # Always specify deployment explicitly on Apple platforms, since # otherwise a platform is picked up from the SDK. If the SDK version # doesn't match the system version, tests that use the system library # may fail spuriously. arch = self.get_lit_conf('arch') if not arch: arch = (self.cxx if self.cxx.type != 'nvcc' else self.host_cxx).getTriple().split('-', 1)[0] self.lit_config.note("inferred arch as: %r" % arch) inferred_platform, name, version = self.target_info.get_platform() if inferred_platform: self.lit_config.note("inferred platform as: %r" % (name + version)) self.config.deployment = (arch, name, version) # Set the target triple for use by lit. self.config.target_triple = arch + '-apple-' + name + version self.lit_config.note( "computed target_triple as: %r" % self.config.target_triple) # If we're testing a system libc++ as opposed to the upstream LLVM one, # take the version of the system libc++ into account to compute which # features are enabled/disabled. Otherwise, disable availability markup, # which is not relevant for non-shipped flavors of libc++. if self.use_system_cxx_lib: # Dylib support for shared_mutex was added in macosx10.12. if name == 'macosx' and version in ('10.%s' % v for v in range(7, 12)): self.config.available_features.add('dylib-has-no-shared_mutex') self.lit_config.note("shared_mutex is not supported by the deployment target") # Throwing bad_optional_access, bad_variant_access and bad_any_cast is # supported starting in macosx10.14. if name == 'macosx' and version in ('10.%s' % v for v in range(7, 14)): self.config.available_features.add('dylib-has-no-bad_optional_access') self.lit_config.note("throwing bad_optional_access is not supported by the deployment target") self.config.available_features.add('dylib-has-no-bad_variant_access') self.lit_config.note("throwing bad_variant_access is not supported by the deployment target") self.config.available_features.add('dylib-has-no-bad_any_cast') self.lit_config.note("throwing bad_any_cast is not supported by the deployment target") # Filesystem is support on Apple platforms starting with macosx10.15. if name == 'macosx' and version in ('10.%s' % v for v in range(7, 15)): self.config.available_features.add('dylib-has-no-filesystem') self.lit_config.note("the deployment target does not support <filesystem>") else: self.cxx.flags += ['-D_LIBCUDACXX_DISABLE_AVAILABILITY'] def configure_env(self): self.target_info.configure_env(self.exec_env) def add_path(self, dest_env, new_path): if 'PATH' not in dest_env: dest_env['PATH'] = new_path else: split_char = ';' if self.is_windows else ':' dest_env['PATH'] = '%s%s%s' % (new_path, split_char, dest_env['PATH'])
cccl-main
libcudacxx/libcxx/utils/libcxx/test/config.py
cccl-main
libcudacxx/libcxx/utils/libcxx/test/__init__.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## import copy import errno import os import time import random import lit.Test # pylint: disable=import-error import lit.TestRunner # pylint: disable=import-error from lit.TestRunner import ParserKind, IntegratedTestKeywordParser \ # pylint: disable=import-error from libcxx.test.executor import LocalExecutor as LocalExecutor import libcxx.util class LibcxxTestFormat(object): """ Custom test format handler for use with the test format use by libc++. Tests fall into two categories: FOO.pass.cpp - Executable test which should compile, run, and exit with code 0. FOO.fail.cpp - Negative test case which is expected to fail compilation. FOO.runfail.cpp - Negative test case which is expected to compile, run, and exit with non-zero exit code. FOO.sh.cpp - A test that uses LIT's ShTest format. """ def __init__(self, cxx, use_verify_for_fail, execute_external, executor, exec_env): self.cxx = copy.deepcopy(cxx) self.use_verify_for_fail = use_verify_for_fail self.execute_external = execute_external self.executor = executor self.exec_env = dict(exec_env) @staticmethod def _make_custom_parsers(): return [ IntegratedTestKeywordParser('FLAKY_TEST.', ParserKind.TAG, initial_value=False), IntegratedTestKeywordParser('MODULES_DEFINES:', ParserKind.LIST, initial_value=[]) ] @staticmethod def _get_parser(key, parsers): for p in parsers: if p.keyword == key: return p assert False and "parser not found" # TODO: Move this into lit's FileBasedTest def getTestsInDirectory(self, testSuite, path_in_suite, litConfig, localConfig): source_path = testSuite.getSourcePath(path_in_suite) for filename in os.listdir(source_path): # Ignore dot files and excluded tests. if filename.startswith('.') or filename in localConfig.excludes: continue filepath = os.path.join(source_path, filename) if not os.path.isdir(filepath): if any([filename.endswith(ext) for ext in localConfig.suffixes]): yield lit.Test.Test(testSuite, path_in_suite + (filename,), localConfig) def execute(self, test, lit_config): while True: try: return self._execute(test, lit_config) except OSError as oe: if oe.errno != errno.ETXTBSY: raise time.sleep(0.1) def _execute(self, test, lit_config): name = test.path_in_suite[-1] name_root, name_ext = os.path.splitext(name) is_libcxx_test = test.path_in_suite[0] == 'libcxx' is_sh_test = name_root.endswith('.sh') is_pass_test = name.endswith('.pass.cpp') or name.endswith('.pass.mm') is_fail_test = name.endswith('.fail.cpp') or name.endswith('.fail.mm') is_runfail_test = name.endswith('.runfail.cpp') or name.endswith('.runfail.mm') is_objcxx_test = name.endswith('.mm') is_objcxx_arc_test = name.endswith('.arc.pass.mm') or \ name.endswith('.arc.fail.mm') assert is_sh_test or name_ext == '.cpp' or name_ext == '.mm', \ 'non-cpp file must be sh test' if test.config.unsupported: return (lit.Test.UNSUPPORTED, "A lit.local.cfg marked this unsupported") if is_objcxx_test and not \ 'objective-c++' in test.config.available_features: return (lit.Test.UNSUPPORTED, "Objective-C++ is not supported") parsers = self._make_custom_parsers() script = lit.TestRunner.parseIntegratedTestScript( test, additional_parsers=parsers, require_script=is_sh_test) # Check if a result for the test was returned. If so return that # result. if isinstance(script, lit.Test.Result): return script if lit_config.noExecute: return lit.Test.Result(lit.Test.PASS) # Check that we don't have run lines on tests that don't support them. if not is_sh_test and len(script) != 0: lit_config.fatal('Unsupported RUN line found in test %s' % name) tmpDir, tmpBase = lit.TestRunner.getTempPaths(test) substitutions = lit.TestRunner.getDefaultSubstitutions(test, tmpDir, tmpBase) script = lit.TestRunner.applySubstitutions(script, substitutions) test_cxx = copy.deepcopy(self.cxx) if is_fail_test: test_cxx.useCCache(False) test_cxx.useWarnings(False) extra_modules_defines = self._get_parser('MODULES_DEFINES:', parsers).getValue() if '-fmodules' in test.config.available_features: test_cxx.compile_flags += [('-D%s' % mdef.strip()) for mdef in extra_modules_defines] test_cxx.addWarningFlagIfSupported('-Wno-macro-redefined') # FIXME: libc++ debug tests #define _LIBCUDACXX_ASSERT to override it # If we see this we need to build the test against uniquely built # modules. if is_libcxx_test: with open(test.getSourcePath(), 'rb') as f: contents = f.read() if b'#define _LIBCUDACXX_ASSERT' in contents: test_cxx.useModules(False) if is_objcxx_test: test_cxx.source_lang = 'objective-c++' if is_objcxx_arc_test: test_cxx.compile_flags += ['-fobjc-arc'] else: test_cxx.compile_flags += ['-fno-objc-arc'] test_cxx.link_flags += ['-framework', 'Foundation'] # Dispatch the test based on its suffix. if is_sh_test: if not isinstance(self.executor, LocalExecutor): # We can't run ShTest tests with a executor yet. # For now, bail on trying to run them return lit.Test.UNSUPPORTED, 'ShTest format not yet supported' test.config.environment = dict(self.exec_env) return lit.TestRunner._runShTest(test, lit_config, self.execute_external, script, tmpBase) elif is_fail_test: return self._evaluate_fail_test(test, test_cxx, parsers) elif is_pass_test: return self._evaluate_pass_test(test, tmpBase, lit_config, test_cxx, parsers) elif is_runfail_test: return self._evaluate_pass_test(test, tmpBase, lit_config, test_cxx, parsers, run_should_pass=False) else: assert False and "no other test" # No other test type is supported def _clean(self, exec_path): # pylint: disable=no-self-use libcxx.util.cleanFile(exec_path) def _evaluate_pass_test(self, test, tmpBase, lit_config, test_cxx, parsers, run_should_pass=True): execDir = os.path.dirname(test.getExecPath()) source_path = test.getSourcePath() exec_path = tmpBase + '.exe' object_path = tmpBase + '.o' # Create the output directory if it does not already exist. libcxx.util.mkdir_p(os.path.dirname(tmpBase)) try: # Compile the test cmd, out, err, rc = test_cxx.compileLinkTwoSteps( source_path, out=exec_path, object_file=object_path, cwd=execDir) compile_cmd = cmd if rc != 0: report = libcxx.util.makeReport(cmd, out, err, rc) report += "Compilation failed unexpectedly!" return lit.Test.Result(lit.Test.FAIL, report) # Run the test local_cwd = os.path.dirname(source_path) env = None if self.exec_env: env = self.exec_env # TODO: Only list actually needed files in file_deps. # Right now we just mark all of the .dat files in the same # directory as dependencies, but it's likely less than that. We # should add a `// FILE-DEP: foo.dat` to each test to track this. data_files = [os.path.join(local_cwd, f) for f in os.listdir(local_cwd) if f.endswith('.dat')] is_flaky = self._get_parser('FLAKY_TEST.', parsers).getValue() max_retry = 5 if is_flaky else 1 for retry_count in range(max_retry): cmd, out, err, rc = self.executor.run(exec_path, [exec_path], local_cwd, data_files, env) report = "Compiled With: '%s'\n" % ' '.join(compile_cmd) report += libcxx.util.makeReport(cmd, out, err, rc) result_expected = (rc == 0) == run_should_pass if result_expected: res = lit.Test.PASS if retry_count == 0 else lit.Test.FLAKYPASS return lit.Test.Result(res, report) elif retry_count + 1 == max_retry: if run_should_pass: report += "Compiled test failed unexpectedly!" else: report += "Compiled test succeeded unexpectedly!" return lit.Test.Result(lit.Test.FAIL, report) assert False # Unreachable finally: # Note that cleanup of exec_file happens in `_clean()`. If you # override this, cleanup is your reponsibility. libcxx.util.cleanFile(object_path) self._clean(exec_path) def _evaluate_fail_test(self, test, test_cxx, parsers): source_path = test.getSourcePath() # FIXME: lift this detection into LLVM/LIT. with open(source_path, 'rb') as f: contents = f.read() verify_tags = [b'expected-note', b'expected-remark', b'expected-warning', b'expected-error', b'expected-no-diagnostics'] use_verify = self.use_verify_for_fail and \ any([tag in contents for tag in verify_tags]) # FIXME(EricWF): GCC 5 does not evaluate static assertions that # are dependant on a template parameter when '-fsyntax-only' is passed. # This is fixed in GCC 6. However for now we only pass "-fsyntax-only" # when using Clang. if test_cxx.type != 'gcc' and test_cxx.type != 'nvcc': test_cxx.flags += ['-fsyntax-only'] if use_verify: test_cxx.useVerify() test_cxx.useWarnings() if '-Wuser-defined-warnings' in test_cxx.warning_flags: test_cxx.warning_flags += ['-Wno-error=user-defined-warnings'] else: # We still need to enable certain warnings on .fail.cpp test when # -verify isn't enabled. Such as -Werror=unused-result. However, # we don't want it enabled too liberally, which might incorrectly # allow unrelated failure tests to 'pass'. # # Therefore, we check if the test was expected to fail because of # nodiscard before enabling it test_str_list = [b'ignoring return value', b'nodiscard', b'NODISCARD'] if any(test_str in contents for test_str in test_str_list): test_cxx.flags += ['-Werror=unused-result'] cmd, out, err, rc = test_cxx.compile(source_path, out=os.devnull) check_rc = lambda rc: rc == 0 if use_verify else rc != 0 report = libcxx.util.makeReport(cmd, out, err, rc) if check_rc(rc): return lit.Test.Result(lit.Test.PASS, report) else: report += ('Expected compilation to fail!\n' if not use_verify else 'Expected compilation using verify to pass!\n') return lit.Test.Result(lit.Test.FAIL, report)
cccl-main
libcudacxx/libcxx/utils/libcxx/test/format.py
from __future__ import absolute_import import os import subprocess import sys import lit.Test import lit.TestRunner import lit.util from lit.formats.base import TestFormat kIsWindows = sys.platform in ['win32', 'cygwin'] class GoogleBenchmark(TestFormat): def __init__(self, test_sub_dirs, test_suffix, benchmark_args=[]): self.benchmark_args = list(benchmark_args) self.test_sub_dirs = os.path.normcase(str(test_sub_dirs)).split(';') # On Windows, assume tests will also end in '.exe'. exe_suffix = str(test_suffix) if kIsWindows: exe_suffix += '.exe' # Also check for .py files for testing purposes. self.test_suffixes = {exe_suffix, test_suffix + '.py'} def getBenchmarkTests(self, path, litConfig, localConfig): """getBenchmarkTests(path) - [name] Return the tests available in gtest executable. Args: path: String path to a gtest executable litConfig: LitConfig instance localConfig: TestingConfig instance""" # TODO: allow splitting tests according to the "benchmark family" so # the output for a single family of tests all belongs to the same test # target. list_test_cmd = [path, '--benchmark_list_tests'] try: output = subprocess.check_output(list_test_cmd, env=localConfig.environment) except subprocess.CalledProcessError as exc: litConfig.warning( "unable to discover google-benchmarks in %r: %s. Process output: %s" % (path, sys.exc_info()[1], exc.output)) raise StopIteration nested_tests = [] for ln in output.splitlines(False): # Don't keep newlines. ln = lit.util.to_string(ln) if not ln.strip(): continue index = 0 while ln[index*2:index*2+2] == ' ': index += 1 while len(nested_tests) > index: nested_tests.pop() ln = ln[index*2:] if ln.endswith('.'): nested_tests.append(ln) elif any([name.startswith('DISABLED_') for name in nested_tests + [ln]]): # Gtest will internally skip these tests. No need to launch a # child process for it. continue else: yield ''.join(nested_tests) + ln def getTestsInDirectory(self, testSuite, path_in_suite, litConfig, localConfig): source_path = testSuite.getSourcePath(path_in_suite) for subdir in self.test_sub_dirs: dir_path = os.path.join(source_path, subdir) if not os.path.isdir(dir_path): continue for fn in lit.util.listdir_files(dir_path, suffixes=self.test_suffixes): # Discover the tests in this executable. execpath = os.path.join(source_path, subdir, fn) testnames = self.getBenchmarkTests(execpath, litConfig, localConfig) for testname in testnames: testPath = path_in_suite + (subdir, fn, testname) yield lit.Test.Test(testSuite, testPath, localConfig, file_path=execpath) def execute(self, test, litConfig): testPath,testName = os.path.split(test.getSourcePath()) while not os.path.exists(testPath): # Handle GTest parametrized and typed tests, whose name includes # some '/'s. testPath, namePrefix = os.path.split(testPath) testName = namePrefix + '/' + testName cmd = [testPath, '--benchmark_filter=%s$' % testName ] + self.benchmark_args if litConfig.noExecute: return lit.Test.PASS, '' try: out, err, exitCode = lit.util.executeCommand( cmd, env=test.config.environment, timeout=litConfig.maxIndividualTestTime) except lit.util.ExecuteCommandTimeoutException: return (lit.Test.TIMEOUT, 'Reached timeout of {} seconds'.format( litConfig.maxIndividualTestTime) ) if exitCode: return lit.Test.FAIL, ('exit code: %d\n' % exitCode) + out + err passing_test_line = testName if passing_test_line not in out: msg = ('Unable to find %r in google benchmark output:\n\n%s%s' % (passing_test_line, out, err)) return lit.Test.UNRESOLVED, msg return lit.Test.PASS, err + out
cccl-main
libcudacxx/libcxx/utils/libcxx/test/googlebenchmark.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## import platform import os from libcxx.test import tracing from libcxx.util import executeCommand class Executor(object): def run(self, exe_path, cmd, local_cwd, file_deps=None, env=None): """Execute a command. Be very careful not to change shared state in this function. Executor objects are shared between python processes in `lit -jN`. Args: exe_path: str: Local path to the executable to be run cmd: [str]: subprocess.call style command local_cwd: str: Local path to the working directory file_deps: [str]: Files required by the test env: {str: str}: Environment variables to execute under Returns: cmd, out, err, exitCode """ raise NotImplementedError class LocalExecutor(Executor): def __init__(self): super(LocalExecutor, self).__init__() self.is_windows = platform.system() == 'Windows' def run(self, exe_path, cmd=None, work_dir='.', file_deps=None, env=None): cmd = cmd or [exe_path] if work_dir == '.': work_dir = os.getcwd() out, err, rc = executeCommand(cmd, cwd=work_dir, env=env) return (cmd, out, err, rc) class NoopExecutor(Executor): def __init__(self): super(NoopExecutor, self).__init__() def run(self, exe_path, cmd=None, work_dir='.', file_deps=None, env=None): return (cmd, '', '', 0) class PrefixExecutor(Executor): """Prefix an executor with some other command wrapper. Most useful for setting ulimits on commands, or running an emulator like qemu and valgrind. """ def __init__(self, commandPrefix, chain): super(PrefixExecutor, self).__init__() self.commandPrefix = commandPrefix self.chain = chain def run(self, exe_path, cmd=None, work_dir='.', file_deps=None, env=None): cmd = cmd or [exe_path] return self.chain.run(exe_path, self.commandPrefix + cmd, work_dir, file_deps, env=env) class PostfixExecutor(Executor): """Postfix an executor with some args.""" def __init__(self, commandPostfix, chain): super(PostfixExecutor, self).__init__() self.commandPostfix = commandPostfix self.chain = chain def run(self, exe_path, cmd=None, work_dir='.', file_deps=None, env=None): cmd = cmd or [exe_path] return self.chain.run(cmd + self.commandPostfix, work_dir, file_deps, env=env) class TimeoutExecutor(PrefixExecutor): """Execute another action under a timeout. Deprecated. http://reviews.llvm.org/D6584 adds timeouts to LIT. """ def __init__(self, duration, chain): super(TimeoutExecutor, self).__init__( ['timeout', duration], chain) class RemoteExecutor(Executor): def __init__(self): self.local_run = executeCommand def remote_temp_dir(self): return self._remote_temp(True) def remote_temp_file(self): return self._remote_temp(False) def _remote_temp(self, is_dir): raise NotImplementedError() def copy_in(self, local_srcs, remote_dsts): # This could be wrapped up in a tar->scp->untar for performance # if there are lots of files to be copied/moved for src, dst in zip(local_srcs, remote_dsts): self._copy_in_file(src, dst) def _copy_in_file(self, src, dst): raise NotImplementedError() def delete_remote(self, remote): try: self._execute_command_remote(['rm', '-rf', remote]) except OSError: # TODO: Log failure to delete? pass def run(self, exe_path, cmd=None, work_dir='.', file_deps=None, env=None): target_exe_path = None target_cwd = None try: target_cwd = self.remote_temp_dir() target_exe_path = os.path.join(target_cwd, 'libcxx_test.exe') if cmd: # Replace exe_path with target_exe_path. cmd = [c if c != exe_path else target_exe_path for c in cmd] else: cmd = [target_exe_path] srcs = [exe_path] dsts = [target_exe_path] if file_deps is not None: dev_paths = [os.path.join(target_cwd, os.path.basename(f)) for f in file_deps] srcs.extend(file_deps) dsts.extend(dev_paths) self.copy_in(srcs, dsts) # TODO(jroelofs): capture the copy_in and delete_remote commands, # and conjugate them with '&&'s around the first tuple element # returned here: return self._execute_command_remote(cmd, target_cwd, env) finally: if target_cwd: self.delete_remote(target_cwd) def _execute_command_remote(self, cmd, remote_work_dir='.', env=None): raise NotImplementedError() class SSHExecutor(RemoteExecutor): def __init__(self, host, username=None): super(SSHExecutor, self).__init__() self.user_prefix = username + '@' if username else '' self.host = host self.scp_command = 'scp' self.ssh_command = 'ssh' # TODO(jroelofs): switch this on some -super-verbose-debug config flag if False: self.local_run = tracing.trace_function( self.local_run, log_calls=True, log_results=True, label='ssh_local') def _remote_temp(self, is_dir): # TODO: detect what the target system is, and use the correct # mktemp command for it. (linux and darwin differ here, and I'm # sure windows has another way to do it) # Not sure how to do suffix on osx yet dir_arg = '-d' if is_dir else '' cmd = 'mktemp -q {} /tmp/libcxx.XXXXXXXXXX'.format(dir_arg) _, temp_path, err, exitCode = self._execute_command_remote([cmd]) temp_path = temp_path.strip() if exitCode != 0: raise RuntimeError(err) return temp_path def _copy_in_file(self, src, dst): scp = self.scp_command remote = self.host remote = self.user_prefix + remote cmd = [scp, '-p', src, remote + ':' + dst] self.local_run(cmd) def _execute_command_remote(self, cmd, remote_work_dir='.', env=None): remote = self.user_prefix + self.host ssh_cmd = [self.ssh_command, '-oBatchMode=yes', remote] if env: env_cmd = ['env'] + ['%s="%s"' % (k, v) for k, v in env.items()] else: env_cmd = [] remote_cmd = ' '.join(env_cmd + cmd) if remote_work_dir != '.': remote_cmd = 'cd ' + remote_work_dir + ' && ' + remote_cmd out, err, rc = self.local_run(ssh_cmd + [remote_cmd]) return (remote_cmd, out, err, rc)
cccl-main
libcudacxx/libcxx/utils/libcxx/test/executor.py
#===----------------------------------------------------------------------===// # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===// import importlib import locale import os import platform import re import subprocess import sys from libcxx.util import executeCommand class DefaultTargetInfo(object): def __init__(self, full_config): self.full_config = full_config def platform(self): return sys.platform.lower().strip() def add_locale_features(self, features): self.full_config.lit_config.warning( "No locales entry for target_system: %s" % self.platform()) def add_cxx_compile_flags(self, flags): pass def add_cxx_link_flags(self, flags): pass def configure_env(self, env): pass def allow_cxxabi_link(self): return True def add_sanitizer_features(self, sanitizer_type, features): pass def use_lit_shell_default(self): return False def test_locale(loc): assert loc is not None default_locale = locale.setlocale(locale.LC_ALL) try: locale.setlocale(locale.LC_ALL, loc) return True except locale.Error: return False finally: locale.setlocale(locale.LC_ALL, default_locale) def add_common_locales(features, lit_config, is_windows=False): # A list of locales needed by the test-suite. # The list uses the canonical name for the locale used in the test-suite # TODO: On Linux ISO8859 *may* needs to hyphenated. locales = [ ('en_US.UTF-8', 'English_United States.1252'), ('fr_FR.UTF-8', 'French_France.1252'), ('ru_RU.UTF-8', 'Russian_Russia.1251'), ('zh_CN.UTF-8', 'Chinese_China.936'), ('fr_CA.ISO8859-1', 'French_Canada.1252'), ('cs_CZ.ISO8859-2', 'Czech_Czech Republic.1250') ] for loc_id, windows_loc_name in locales: loc_name = windows_loc_name if is_windows else loc_id if test_locale(loc_name): features.add('locale.{0}'.format(loc_id)) else: lit_config.warning('The locale {0} is not supported by ' 'your platform. Some tests will be ' 'unsupported.'.format(loc_name)) class DarwinLocalTI(DefaultTargetInfo): def __init__(self, full_config): super(DarwinLocalTI, self).__init__(full_config) def is_host_macosx(self): name = subprocess.check_output(['sw_vers', '-productName']).strip() return name == "Mac OS X" def get_macosx_version(self): assert self.is_host_macosx() version = subprocess.check_output( ['sw_vers', '-productVersion']).strip() version = re.sub(r'([0-9]+\.[0-9]+)(\..*)?', r'\1', version) return version def get_sdk_version(self, name): assert self.is_host_macosx() cmd = ['xcrun', '--sdk', name, '--show-sdk-path'] try: out = subprocess.check_output(cmd).strip() except OSError: pass if not out: self.full_config.lit_config.fatal( "cannot infer sdk version with: %r" % cmd) return re.sub(r'.*/[^0-9]+([0-9.]+)\.sdk', r'\1', out) def get_platform(self): platform = self.full_config.get_lit_conf('platform') if platform: platform = re.sub(r'([^0-9]+)([0-9\.]*)', r'\1-\2', platform) name, version = tuple(platform.split('-', 1)) else: name = 'macosx' version = None if version: return (False, name, version) # Infer the version, either from the SDK or the system itself. For # macosx, ignore the SDK version; what matters is what's at # /usr/lib/libc++.dylib. if name == 'macosx': version = self.get_macosx_version() else: version = self.get_sdk_version(name) return (True, name, version) def add_locale_features(self, features): add_common_locales(features, self.full_config.lit_config) def add_cxx_compile_flags(self, flags): if self.full_config.use_deployment: _, name, _ = self.full_config.config.deployment cmd = ['xcrun', '--sdk', name, '--show-sdk-path'] else: cmd = ['xcrun', '--show-sdk-path'] out, err, exit_code = executeCommand(cmd) if exit_code != 0: self.full_config.lit_config.warning("Could not determine macOS SDK path! stderr was " + err) if exit_code == 0 and out: sdk_path = out.strip() self.full_config.lit_config.note('using SDKROOT: %r' % sdk_path) assert isinstance(sdk_path, str) flags += ["-isysroot", sdk_path] def add_cxx_link_flags(self, flags): flags += ['-lSystem'] def configure_env(self, env): library_paths = [] # Configure the library path for libc++ if self.full_config.cxx_runtime_root: library_paths += [self.full_config.cxx_runtime_root] elif self.full_config.use_system_cxx_lib: if (os.path.isdir(str(self.full_config.use_system_cxx_lib))): library_paths += [self.full_config.use_system_cxx_lib] # Configure the abi library path if self.full_config.abi_library_root: library_paths += [self.full_config.abi_library_root] if library_paths: env['DYLD_LIBRARY_PATH'] = ':'.join(library_paths) def allow_cxxabi_link(self): # Don't link libc++abi explicitly on OS X because the symbols # should be available in libc++ directly. return False class FreeBSDLocalTI(DefaultTargetInfo): def __init__(self, full_config): super(FreeBSDLocalTI, self).__init__(full_config) def add_locale_features(self, features): add_common_locales(features, self.full_config.lit_config) def add_cxx_link_flags(self, flags): flags += ['-lc', '-lm', '-lpthread', '-lgcc_s', '-lcxxrt'] class NetBSDLocalTI(DefaultTargetInfo): def __init__(self, full_config): super(NetBSDLocalTI, self).__init__(full_config) def add_locale_features(self, features): add_common_locales(features, self.full_config.lit_config) def add_cxx_link_flags(self, flags): flags += ['-lc', '-lm', '-lpthread', '-lgcc_s', '-lc++abi', '-lunwind'] class LinuxLocalTI(DefaultTargetInfo): def __init__(self, full_config): super(LinuxLocalTI, self).__init__(full_config) def platform(self): return 'linux' def add_locale_features(self, features): add_common_locales(features, self.full_config.lit_config) def add_cxx_compile_flags(self, flags): flags += ['-D__STDC_FORMAT_MACROS', '-D__STDC_LIMIT_MACROS', '-D__STDC_CONSTANT_MACROS'] def add_cxx_link_flags(self, flags): enable_threads = ('libcpp-has-no-threads' not in self.full_config.config.available_features) llvm_unwinder = self.full_config.get_lit_bool('llvm_unwinder', False) shared_libcxx = self.full_config.get_lit_bool('enable_shared', True) flags += ['-lm'] if not llvm_unwinder: flags += ['-lgcc_s', '-lgcc'] if enable_threads: flags += ['-lpthread'] if not shared_libcxx: flags += ['-lrt'] flags += ['-lc'] if llvm_unwinder: flags += ['-lunwind', '-ldl'] else: flags += ['-lgcc_s'] builtins_lib = self.full_config.get_lit_conf('builtins_library') if builtins_lib: flags += [builtins_lib] else: flags += ['-lgcc'] use_libatomic = self.full_config.get_lit_bool('use_libatomic', False) if use_libatomic: flags += ['-latomic'] san = self.full_config.get_lit_conf('use_sanitizer', '').strip() if san: # The libraries and their order are taken from the # linkSanitizerRuntimeDeps function in # clang/lib/Driver/Tools.cpp flags += ['-lpthread', '-lrt', '-lm', '-ldl'] class WindowsLocalTI(DefaultTargetInfo): def __init__(self, full_config): super(WindowsLocalTI, self).__init__(full_config) def add_locale_features(self, features): add_common_locales(features, self.full_config.lit_config, is_windows=True) def use_lit_shell_default(self): # Default to the internal shell on Windows, as bash on Windows is # usually very slow. return True def make_target_info(full_config): default = "libcxx.test.target_info.LocalTI" info_str = full_config.get_lit_conf('target_info', default) if info_str != default: mod_path, _, info = info_str.rpartition('.') mod = importlib.import_module(mod_path) target_info = getattr(mod, info)(full_config) full_config.lit_config.note("inferred target_info as: %r" % info_str) return target_info target_system = platform.system() if target_system == 'Darwin': return DarwinLocalTI(full_config) if target_system == 'FreeBSD': return FreeBSDLocalTI(full_config) if target_system == 'NetBSD': return NetBSDLocalTI(full_config) if target_system == 'Linux': return LinuxLocalTI(full_config) if target_system == 'Windows': return WindowsLocalTI(full_config) return DefaultTargetInfo(full_config)
cccl-main
libcudacxx/libcxx/utils/libcxx/test/target_info.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## import ast import distutils.spawn import sys import re import libcxx.util from pprint import pformat def read_syms_from_list(slist): """ Read a list of symbols from a list of strings. Each string is one symbol. """ return [ast.literal_eval(l) for l in slist] def read_syms_from_file(filename): """ Read a list of symbols in from a file. """ with open(filename, 'r') as f: data = f.read() return read_syms_from_list(data.splitlines()) def read_blacklist(filename): with open(filename, 'r') as f: data = f.read() lines = [l.strip() for l in data.splitlines() if l.strip()] lines = [l for l in lines if not l.startswith('#')] return lines def write_syms(sym_list, out=None, names_only=False, filter=None): """ Write a list of symbols to the file named by out. """ out_str = '' out_list = sym_list out_list.sort(key=lambda x: x['name']) if filter is not None: out_list = filter(out_list) if names_only: out_list = [sym['name'] for sym in out_list] for sym in out_list: # Use pformat for consistent ordering of keys. out_str += pformat(sym, width=100000) + '\n' if out is None: sys.stdout.write(out_str) else: with open(out, 'w') as f: f.write(out_str) _cppfilt_exe = distutils.spawn.find_executable('c++filt') def demangle_symbol(symbol): if _cppfilt_exe is None: return symbol out, _, exit_code = libcxx.util.executeCommandVerbose( [_cppfilt_exe], input=symbol) if exit_code != 0: return symbol return out def is_elf(filename): with open(filename, 'rb') as f: magic_bytes = f.read(4) return magic_bytes == b'\x7fELF' def is_mach_o(filename): with open(filename, 'rb') as f: magic_bytes = f.read(4) return magic_bytes in [ '\xfe\xed\xfa\xce', # MH_MAGIC '\xce\xfa\xed\xfe', # MH_CIGAM '\xfe\xed\xfa\xcf', # MH_MAGIC_64 '\xcf\xfa\xed\xfe', # MH_CIGAM_64 '\xca\xfe\xba\xbe', # FAT_MAGIC '\xbe\xba\xfe\xca' # FAT_CIGAM ] def is_library_file(filename): if sys.platform == 'darwin': return is_mach_o(filename) else: return is_elf(filename) def extract_or_load(filename): import libcxx.sym_check.extract if is_library_file(filename): return libcxx.sym_check.extract.extract_symbols(filename) return read_syms_from_file(filename) def adjust_mangled_name(name): if not name.startswith('__Z'): return name return name[1:] new_delete_std_symbols = [ '_Znam', '_Znwm', '_ZdaPv', '_ZdaPvm', '_ZdlPv', '_ZdlPvm' ] cxxabi_symbols = [ '___dynamic_cast', '___gxx_personality_v0', '_ZTIDi', '_ZTIDn', '_ZTIDs', '_ZTIPDi', '_ZTIPDn', '_ZTIPDs', '_ZTIPKDi', '_ZTIPKDn', '_ZTIPKDs', '_ZTIPKa', '_ZTIPKb', '_ZTIPKc', '_ZTIPKd', '_ZTIPKe', '_ZTIPKf', '_ZTIPKh', '_ZTIPKi', '_ZTIPKj', '_ZTIPKl', '_ZTIPKm', '_ZTIPKs', '_ZTIPKt', '_ZTIPKv', '_ZTIPKw', '_ZTIPKx', '_ZTIPKy', '_ZTIPa', '_ZTIPb', '_ZTIPc', '_ZTIPd', '_ZTIPe', '_ZTIPf', '_ZTIPh', '_ZTIPi', '_ZTIPj', '_ZTIPl', '_ZTIPm', '_ZTIPs', '_ZTIPt', '_ZTIPv', '_ZTIPw', '_ZTIPx', '_ZTIPy', '_ZTIa', '_ZTIb', '_ZTIc', '_ZTId', '_ZTIe', '_ZTIf', '_ZTIh', '_ZTIi', '_ZTIj', '_ZTIl', '_ZTIm', '_ZTIs', '_ZTIt', '_ZTIv', '_ZTIw', '_ZTIx', '_ZTIy', '_ZTSDi', '_ZTSDn', '_ZTSDs', '_ZTSPDi', '_ZTSPDn', '_ZTSPDs', '_ZTSPKDi', '_ZTSPKDn', '_ZTSPKDs', '_ZTSPKa', '_ZTSPKb', '_ZTSPKc', '_ZTSPKd', '_ZTSPKe', '_ZTSPKf', '_ZTSPKh', '_ZTSPKi', '_ZTSPKj', '_ZTSPKl', '_ZTSPKm', '_ZTSPKs', '_ZTSPKt', '_ZTSPKv', '_ZTSPKw', '_ZTSPKx', '_ZTSPKy', '_ZTSPa', '_ZTSPb', '_ZTSPc', '_ZTSPd', '_ZTSPe', '_ZTSPf', '_ZTSPh', '_ZTSPi', '_ZTSPj', '_ZTSPl', '_ZTSPm', '_ZTSPs', '_ZTSPt', '_ZTSPv', '_ZTSPw', '_ZTSPx', '_ZTSPy', '_ZTSa', '_ZTSb', '_ZTSc', '_ZTSd', '_ZTSe', '_ZTSf', '_ZTSh', '_ZTSi', '_ZTSj', '_ZTSl', '_ZTSm', '_ZTSs', '_ZTSt', '_ZTSv', '_ZTSw', '_ZTSx', '_ZTSy' ] def is_stdlib_symbol_name(name, sym): name = adjust_mangled_name(name) if re.search("@GLIBC|@GCC", name): # Only when symbol is defined do we consider it ours return sym['is_defined'] if re.search('(St[0-9])|(__cxa)|(__cxxabi)', name): return True if name in new_delete_std_symbols: return True if name in cxxabi_symbols: return True if name.startswith('_Z'): return True return False def filter_stdlib_symbols(syms): stdlib_symbols = [] other_symbols = [] for s in syms: canon_name = adjust_mangled_name(s['name']) if not is_stdlib_symbol_name(canon_name, s): other_symbols += [s] else: stdlib_symbols += [s] return stdlib_symbols, other_symbols
cccl-main
libcudacxx/libcxx/utils/libcxx/sym_check/util.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """libcxx abi symbol checker""" __author__ = 'Eric Fiselier' __email__ = '[email protected]' __versioninfo__ = (0, 1, 0) __version__ = ' '.join(str(v) for v in __versioninfo__) + 'dev' __all__ = ['diff', 'extract', 'util']
cccl-main
libcudacxx/libcxx/utils/libcxx/sym_check/__init__.py
# -*- Python -*- vim: set syntax=python tabstop=4 expandtab cc=80: #===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """ diff - A set of functions for diff-ing two symbol lists. """ from libcxx.sym_check import util def _symbol_difference(lhs, rhs): lhs_names = set(((n['name'], n['type']) for n in lhs)) rhs_names = set(((n['name'], n['type']) for n in rhs)) diff_names = lhs_names - rhs_names return [n for n in lhs if (n['name'], n['type']) in diff_names] def _find_by_key(sym_list, k): for sym in sym_list: if sym['name'] == k: return sym return None def added_symbols(old, new): return _symbol_difference(new, old) def removed_symbols(old, new): return _symbol_difference(old, new) def changed_symbols(old, new): changed = [] for old_sym in old: if old_sym in new: continue new_sym = _find_by_key(new, old_sym['name']) if (new_sym is not None and not new_sym in old and old_sym != new_sym): changed += [(old_sym, new_sym)] return changed def diff(old, new): added = added_symbols(old, new) removed = removed_symbols(old, new) changed = changed_symbols(old, new) return added, removed, changed def report_diff(added_syms, removed_syms, changed_syms, names_only=False, demangle=True): def maybe_demangle(name): return util.demangle_symbol(name) if demangle else name report = '' for sym in added_syms: report += 'Symbol added: %s\n' % maybe_demangle(sym['name']) if not names_only: report += ' %s\n\n' % sym if added_syms and names_only: report += '\n' for sym in removed_syms: report += 'SYMBOL REMOVED: %s\n' % maybe_demangle(sym['name']) if not names_only: report += ' %s\n\n' % sym if removed_syms and names_only: report += '\n' if not names_only: for sym_pair in changed_syms: old_sym, new_sym = sym_pair old_str = '\n OLD SYMBOL: %s' % old_sym new_str = '\n NEW SYMBOL: %s' % new_sym report += ('SYMBOL CHANGED: %s%s%s\n\n' % (maybe_demangle(old_sym['name']), old_str, new_str)) added = bool(len(added_syms) != 0) abi_break = bool(len(removed_syms)) if not names_only: abi_break = abi_break or len(changed_syms) if added or abi_break: report += 'Summary\n' report += ' Added: %d\n' % len(added_syms) report += ' Removed: %d\n' % len(removed_syms) if not names_only: report += ' Changed: %d\n' % len(changed_syms) if not abi_break: report += 'Symbols added.' else: report += 'ABI BREAKAGE: SYMBOLS ADDED OR REMOVED!' else: report += 'Symbols match.' is_different = abi_break or bool(len(added_syms)) \ or bool(len(changed_syms)) return report, abi_break, is_different
cccl-main
libcudacxx/libcxx/utils/libcxx/sym_check/diff.py
# -*- Python -*- vim: set syntax=python tabstop=4 expandtab cc=80: #===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """ extract - A set of function that extract symbol lists from shared libraries. """ import distutils.spawn import os.path import sys import re import libcxx.util from libcxx.sym_check import util extract_ignore_names = ['_init', '_fini'] class NMExtractor(object): """ NMExtractor - Extract symbol lists from libraries using nm. """ @staticmethod def find_tool(): """ Search for the nm executable and return the path. """ return distutils.spawn.find_executable('nm') def __init__(self, static_lib): """ Initialize the nm executable and flags that will be used to extract symbols from shared libraries. """ self.nm_exe = self.find_tool() if self.nm_exe is None: # ERROR no NM found print("ERROR: Could not find nm") sys.exit(1) self.static_lib = static_lib self.flags = ['-P', '-g'] def extract(self, lib): """ Extract symbols from a library and return the results as a dict of parsed symbols. """ cmd = [self.nm_exe] + self.flags + [lib] out, _, exit_code = libcxx.util.executeCommandVerbose(cmd) if exit_code != 0: raise RuntimeError('Failed to run %s on %s' % (self.nm_exe, lib)) fmt_syms = (self._extract_sym(l) for l in out.splitlines() if l.strip()) # Cast symbol to string. final_syms = (repr(s) for s in fmt_syms if self._want_sym(s)) # Make unique and sort strings. tmp_list = list(sorted(set(final_syms))) # Cast string back to symbol. return util.read_syms_from_list(tmp_list) def _extract_sym(self, sym_str): bits = sym_str.split() # Everything we want has at least two columns. if len(bits) < 2: return None new_sym = { 'name': bits[0], 'type': bits[1], 'is_defined': (bits[1].lower() != 'u') } new_sym['name'] = new_sym['name'].replace('@@', '@') new_sym = self._transform_sym_type(new_sym) # NM types which we want to save the size for. if new_sym['type'] == 'OBJECT' and len(bits) > 3: new_sym['size'] = int(bits[3], 16) return new_sym @staticmethod def _want_sym(sym): """ Check that s is a valid symbol that we want to keep. """ if sym is None or len(sym) < 2: return False if sym['name'] in extract_ignore_names: return False bad_types = ['t', 'b', 'r', 'd', 'w'] return (sym['type'] not in bad_types and sym['name'] not in ['__bss_start', '_end', '_edata']) @staticmethod def _transform_sym_type(sym): """ Map the nm single letter output for type to either FUNC or OBJECT. If the type is not recognized it is left unchanged. """ func_types = ['T', 'W'] obj_types = ['B', 'D', 'R', 'V', 'S'] if sym['type'] in func_types: sym['type'] = 'FUNC' elif sym['type'] in obj_types: sym['type'] = 'OBJECT' return sym class ReadElfExtractor(object): """ ReadElfExtractor - Extract symbol lists from libraries using readelf. """ @staticmethod def find_tool(): """ Search for the readelf executable and return the path. """ return distutils.spawn.find_executable('readelf') def __init__(self, static_lib): """ Initialize the readelf executable and flags that will be used to extract symbols from shared libraries. """ self.tool = self.find_tool() if self.tool is None: # ERROR no NM found print("ERROR: Could not find readelf") sys.exit(1) # TODO: Support readelf for reading symbols from archives assert not static_lib and "RealElf does not yet support static libs" self.flags = ['--wide', '--symbols'] def extract(self, lib): """ Extract symbols from a library and return the results as a dict of parsed symbols. """ cmd = [self.tool] + self.flags + [lib] out, _, exit_code = libcxx.util.executeCommandVerbose(cmd) if exit_code != 0: raise RuntimeError('Failed to run %s on %s' % (self.nm_exe, lib)) dyn_syms = self.get_dynsym_table(out) return self.process_syms(dyn_syms) def process_syms(self, sym_list): new_syms = [] for s in sym_list: parts = s.split() if not parts: continue assert len(parts) == 7 or len(parts) == 8 or len(parts) == 9 if len(parts) == 7: continue new_sym = { 'name': parts[7], 'size': int(parts[2]), 'type': parts[3], 'is_defined': (parts[6] != 'UND') } assert new_sym['type'] in ['OBJECT', 'FUNC', 'NOTYPE', 'TLS'] if new_sym['name'] in extract_ignore_names: continue if new_sym['type'] == 'NOTYPE': continue if new_sym['type'] == 'FUNC': del new_sym['size'] new_syms += [new_sym] return new_syms def get_dynsym_table(self, out): lines = out.splitlines() start = -1 end = -1 for i in range(len(lines)): if lines[i].startswith("Symbol table '.dynsym'"): start = i + 2 if start != -1 and end == -1 and not lines[i].strip(): end = i + 1 assert start != -1 if end == -1: end = len(lines) return lines[start:end] def extract_symbols(lib_file, static_lib=None): """ Extract and return a list of symbols extracted from a static or dynamic library. The symbols are extracted using NM or readelf. They are then filtered and formated. Finally they symbols are made unique. """ if static_lib is None: _, ext = os.path.splitext(lib_file) static_lib = True if ext in ['.a'] else False if ReadElfExtractor.find_tool() and not static_lib: extractor = ReadElfExtractor(static_lib=static_lib) else: extractor = NMExtractor(static_lib=static_lib) return extractor.extract(lib_file)
cccl-main
libcudacxx/libcxx/utils/libcxx/sym_check/extract.py
# -*- Python -*- vim: set syntax=python tabstop=4 expandtab cc=80: #===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """ match - A set of functions for matching symbols in a list to a list of regexs """ import re def find_and_report_matching(symbol_list, regex_list): report = '' found_count = 0 for regex_str in regex_list: report += 'Matching regex "%s":\n' % regex_str matching_list = find_matching_symbols(symbol_list, regex_str) if not matching_list: report += ' No matches found\n\n' continue # else found_count += len(matching_list) for m in matching_list: report += ' MATCHES: %s\n' % m['name'] report += '\n' return found_count, report def find_matching_symbols(symbol_list, regex_str): regex = re.compile(regex_str) matching_list = [] for s in symbol_list: if regex.match(s['name']): matching_list += [s] return matching_list
cccl-main
libcudacxx/libcxx/utils/libcxx/sym_check/match.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """GDB pretty-printers for libc++. These should work for objects compiled when _LIBCUDACXX_ABI_UNSTABLE is defined and when it is undefined. """ from __future__ import print_function import re import gdb # One under-documented feature of the gdb pretty-printer API # is that clients can call any other member of the API # before they call to_string. # Therefore all self.FIELDs must be set in the pretty-printer's # __init__ function. _void_pointer_type = gdb.lookup_type("void").pointer() _long_int_type = gdb.lookup_type("unsigned long long") _libcpp_big_endian = False def addr_as_long(addr): return int(addr.cast(_long_int_type)) # The size of a pointer in bytes. _pointer_size = _void_pointer_type.sizeof def _remove_cxx_namespace(typename): """Removed libc++ specific namespace from the type. Arguments: typename(string): A type, such as std::__u::something. Returns: A string without the libc++ specific part, such as std::something. """ return re.sub("std::__.*?::", "std::", typename) def _remove_generics(typename): """Remove generics part of the type. Assumes typename is not empty. Arguments: typename(string): A type such as std::my_collection<element>. Returns: The prefix up to the generic part, such as std::my_collection. """ match = re.match("^([^<]+)", typename) return match.group(1) # Some common substitutions on the types to reduce visual clutter (A user who # wants to see the actual details can always use print/r). _common_substitutions = [ ("std::basic_string<char, std::char_traits<char>, std::allocator<char> >", "std::string"), ] def _prettify_typename(gdb_type): """Returns a pretty name for the type, or None if no name can be found. Arguments: gdb_type(gdb.Type): A type object. Returns: A string, without type_defs, libc++ namespaces, and common substitutions applied. """ type_without_typedefs = gdb_type.strip_typedefs() typename = type_without_typedefs.name or type_without_typedefs.tag or \ str(type_without_typedefs) result = _remove_cxx_namespace(typename) for find_str, subst_str in _common_substitutions: result = re.sub(find_str, subst_str, result) return result def _typename_for_nth_generic_argument(gdb_type, n): """Returns a pretty string for the nth argument of the given type. Arguments: gdb_type(gdb.Type): A type object, such as the one for std::map<int, int> n: The (zero indexed) index of the argument to return. Returns: A string for the nth argument, such a "std::string" """ element_type = gdb_type.template_argument(n) return _prettify_typename(element_type) def _typename_with_n_generic_arguments(gdb_type, n): """Return a string for the type with the first n (1, ...) generic args.""" base_type = _remove_generics(_prettify_typename(gdb_type)) arg_list = [base_type] template = "%s<" for i in range(n): arg_list.append(_typename_for_nth_generic_argument(gdb_type, i)) template += "%s, " result = (template[:-2] + ">") % tuple(arg_list) return result def _typename_with_first_generic_argument(gdb_type): return _typename_with_n_generic_arguments(gdb_type, 1) class StdTuplePrinter(object): """Print a std::tuple.""" class _Children(object): """Class to iterate over the tuple's children.""" def __init__(self, val): self.val = val self.child_iter = iter(self.val["__base_"].type.fields()) self.count = 0 def __iter__(self): return self def next(self): # child_iter raises StopIteration when appropriate. field_name = self.child_iter.next() child = self.val["__base_"][field_name]["__value_"] self.count += 1 return ("[%d]" % self.count, child) def __init__(self, val): self.val = val def to_string(self): typename = _remove_generics(_prettify_typename(self.val.type)) if not self.val.type.fields(): return "empty %s" % typename return "%s containing" % typename def children(self): if not self.val.type.fields(): return iter(()) return self._Children(self.val) def _get_base_subobject(child_class_value, index=0): """Returns the object's value in the form of the parent class at index. This function effectively casts the child_class_value to the base_class's type, but the type-to-cast to is stored in the field at index, and once we know the field, we can just return the data. Args: child_class_value: the value to cast index: the parent class index Raises: Exception: field at index was not a base-class field. """ field = child_class_value.type.fields()[index] if not field.is_base_class: raise Exception("Not a base-class field.") return child_class_value[field] def _value_of_pair_first(value): """Convenience for _get_base_subobject, for the common case.""" return _get_base_subobject(value, 0)["__value_"] class StdStringPrinter(object): """Print a std::string.""" def _get_short_size(self, short_field, short_size): """Short size depends on both endianness and a compile-time define.""" # If the padding field is present after all this indirection, then string # was compiled with _LIBCUDACXX_ABI_ALTERNATE_STRING_LAYOUT defined. field = short_field.type.fields()[1].type.fields()[0] libcpp_abi_alternate_string_layout = field.name and "__padding" in field.name # This logical structure closely follows the original code (which is clearer # in C++). Keep them parallel to make them easier to compare. if libcpp_abi_alternate_string_layout: if _libcpp_big_endian: return short_size >> 1 else: return short_size elif _libcpp_big_endian: return short_size else: return short_size >> 1 def __init__(self, val): self.val = val def to_string(self): """Build a python string from the data whether stored inline or separately.""" value_field = _value_of_pair_first(self.val["__r_"]) short_field = value_field["__s"] short_size = short_field["__size_"] if short_size == 0: return "" short_mask = self.val["__short_mask"] # Counter intuitive to compare the size and short_mask to see if the string # is long, but that's the way the implementation does it. Note that # __is_long() doesn't use get_short_size in C++. is_long = short_size & short_mask if is_long: long_field = value_field["__l"] data = long_field["__data_"] size = long_field["__size_"] else: data = short_field["__data_"] size = self._get_short_size(short_field, short_size) if hasattr(data, "lazy_string"): return data.lazy_string(length=size) return data.string(length=size) def display_hint(self): return "string" class StdUniquePtrPrinter(object): """Print a std::unique_ptr.""" def __init__(self, val): self.val = val self.addr = _value_of_pair_first(self.val["__ptr_"]) self.pointee_type = self.val.type.template_argument(0) def to_string(self): typename = _remove_generics(_prettify_typename(self.val.type)) if not self.addr: return "%s is nullptr" % typename return ("%s<%s> containing" % (typename, _remove_generics(_prettify_typename(self.pointee_type)))) def __iter__(self): if self.addr: yield "__ptr_", self.addr.cast(self.pointee_type.pointer()) def children(self): return self class StdSharedPointerPrinter(object): """Print a std::shared_ptr.""" def __init__(self, val): self.val = val self.addr = self.val["__ptr_"] def to_string(self): """Returns self as a string.""" typename = _remove_generics(_prettify_typename(self.val.type)) pointee_type = _remove_generics( _prettify_typename(self.val.type.template_argument(0))) if not self.addr: return "%s is nullptr" % typename refcount = self.val["__cntrl_"] if refcount != 0: usecount = refcount["__shared_owners_"] + 1 weakcount = refcount["__shared_weak_owners_"] if usecount == 0: state = "expired, weak %d" % weakcount else: state = "count %d, weak %d" % (usecount, weakcount) return "%s<%s> %s containing" % (typename, pointee_type, state) def __iter__(self): if self.addr: yield "__ptr_", self.addr def children(self): return self class StdVectorPrinter(object): """Print a std::vector.""" class _VectorBoolIterator(object): """Class to iterate over the bool vector's children.""" def __init__(self, begin, size, bits_per_word): self.item = begin self.size = size self.bits_per_word = bits_per_word self.count = 0 self.offset = 0 def __iter__(self): return self def next(self): """Retrieve the next element.""" self.count += 1 if self.count > self.size: raise StopIteration entry = self.item.dereference() if entry & (1 << self.offset): outbit = 1 else: outbit = 0 self.offset += 1 if self.offset >= self.bits_per_word: self.item += 1 self.offset = 0 return ("[%d]" % self.count, outbit) class _VectorIterator(object): """Class to iterate over the non-bool vector's children.""" def __init__(self, begin, end): self.item = begin self.end = end self.count = 0 def __iter__(self): return self def next(self): self.count += 1 if self.item == self.end: raise StopIteration entry = self.item.dereference() self.item += 1 return ("[%d]" % self.count, entry) def __init__(self, val): """Set val, length, capacity, and iterator for bool and normal vectors.""" self.val = val self.typename = _remove_generics(_prettify_typename(val.type)) begin = self.val["__begin_"] if self.val.type.template_argument(0).code == gdb.TYPE_CODE_BOOL: self.typename += "<bool>" self.length = self.val["__size_"] bits_per_word = self.val["__bits_per_word"] self.capacity = _value_of_pair_first( self.val["__cap_alloc_"]) * bits_per_word self.iterator = self._VectorBoolIterator( begin, self.length, bits_per_word) else: end = self.val["__end_"] self.length = end - begin self.capacity = _get_base_subobject( self.val["__end_cap_"])["__value_"] - begin self.iterator = self._VectorIterator(begin, end) def to_string(self): return ("%s of length %d, capacity %d" % (self.typename, self.length, self.capacity)) def children(self): return self.iterator def display_hint(self): return "array" class StdBitsetPrinter(object): """Print a std::bitset.""" def __init__(self, val): self.val = val self.n_words = int(self.val["__n_words"]) self.bits_per_word = int(self.val["__bits_per_word"]) if self.n_words == 1: self.values = [int(self.val["__first_"])] else: self.values = [int(self.val["__first_"][index]) for index in range(self.n_words)] def to_string(self): typename = _prettify_typename(self.val.type) return "%s" % typename def _byte_it(self, value): index = -1 while value: index += 1 will_yield = value % 2 value /= 2 if will_yield: yield index def _list_it(self): for word_index in range(self.n_words): current = self.values[word_index] if current: for n in self._byte_it(current): yield ("[%d]" % (word_index * self.bits_per_word + n), 1) def __iter__(self): return self._list_it() def children(self): return self class StdDequePrinter(object): """Print a std::deque.""" def __init__(self, val): self.val = val self.size = int(_value_of_pair_first(val["__size_"])) self.start_ptr = self.val["__map_"]["__begin_"] self.first_block_start_index = int(self.val["__start_"]) self.node_type = self.start_ptr.type self.block_size = self._calculate_block_size( val.type.template_argument(0)) def _calculate_block_size(self, element_type): """Calculates the number of elements in a full block.""" size = element_type.sizeof # Copied from struct __deque_block_size implementation of libcxx. return 4096 / size if size < 256 else 16 def _bucket_it(self, start_addr, start_index, end_index): for i in range(start_index, end_index): yield i, (start_addr.dereference() + i).dereference() def _list_it(self): """Primary iteration worker.""" num_emitted = 0 current_addr = self.start_ptr start_index = self.first_block_start_index while num_emitted < self.size: end_index = min(start_index + self.size - num_emitted, self.block_size) for _, elem in self._bucket_it(current_addr, start_index, end_index): yield "", elem num_emitted += end_index - start_index current_addr = gdb.Value(addr_as_long(current_addr) + _pointer_size) \ .cast(self.node_type) start_index = 0 def to_string(self): typename = _remove_generics(_prettify_typename(self.val.type)) if self.size: return "%s with %d elements" % (typename, self.size) return "%s is empty" % typename def __iter__(self): return self._list_it() def children(self): return self def display_hint(self): return "array" class StdListPrinter(object): """Print a std::list.""" def __init__(self, val): self.val = val size_alloc_field = self.val["__size_alloc_"] self.size = int(_value_of_pair_first(size_alloc_field)) dummy_node = self.val["__end_"] self.nodetype = gdb.lookup_type( re.sub("__list_node_base", "__list_node", str(dummy_node.type.strip_typedefs()))).pointer() self.first_node = dummy_node["__next_"] def to_string(self): typename = _remove_generics(_prettify_typename(self.val.type)) if self.size: return "%s with %d elements" % (typename, self.size) return "%s is empty" % typename def _list_iter(self): current_node = self.first_node for _ in range(self.size): yield "", current_node.cast(self.nodetype).dereference()["__value_"] current_node = current_node.dereference()["__next_"] def __iter__(self): return self._list_iter() def children(self): return self if self.nodetype else iter(()) def display_hint(self): return "array" class StdQueueOrStackPrinter(object): """Print a std::queue or std::stack.""" def __init__(self, val): self.val = val self.underlying = val["c"] def to_string(self): typename = _remove_generics(_prettify_typename(self.val.type)) return "%s wrapping" % typename def children(self): return iter([("", self.underlying)]) def display_hint(self): return "array" class StdPriorityQueuePrinter(object): """Print a std::priority_queue.""" def __init__(self, val): self.val = val self.underlying = val["c"] def to_string(self): # TODO(tamur): It would be nice to print the top element. The technical # difficulty is that, the implementation refers to the underlying # container, which is a generic class. libstdcxx pretty printers do not # print the top element. typename = _remove_generics(_prettify_typename(self.val.type)) return "%s wrapping" % typename def children(self): return iter([("", self.underlying)]) def display_hint(self): return "array" class RBTreeUtils(object): """Utility class for std::(multi)map, and std::(multi)set and iterators.""" def __init__(self, cast_type, root): self.cast_type = cast_type self.root = root def left_child(self, node): result = node.cast(self.cast_type).dereference()["__left_"] return result def right_child(self, node): result = node.cast(self.cast_type).dereference()["__right_"] return result def parent(self, node): """Return the parent of node, if it exists.""" # If this is the root, then from the algorithm's point of view, it has no # parent. if node == self.root: return None # We don't have enough information to tell if this is the end_node (which # doesn't have a __parent_ field), or the root (which doesn't have a parent # from the algorithm's point of view), so cast_type may not be correct for # this particular node. Use heuristics. # The end_node's left child is the root. Note that when printing interators # in isolation, the root is unknown. if self.left_child(node) == self.root: return None parent = node.cast(self.cast_type).dereference()["__parent_"] # If the value at the offset of __parent_ doesn't look like a valid pointer, # then assume that node is the end_node (and therefore has no parent). # End_node type has a pointer embedded, so should have pointer alignment. if addr_as_long(parent) % _void_pointer_type.alignof: return None # This is ugly, but the only other option is to dereference an invalid # pointer. 0x8000 is fairly arbitrary, but has had good results in # practice. If there was a way to tell if a pointer is invalid without # actually dereferencing it and spewing error messages, that would be ideal. if parent < 0x8000: return None return parent def is_left_child(self, node): parent = self.parent(node) return parent is not None and self.left_child(parent) == node def is_right_child(self, node): parent = self.parent(node) return parent is not None and self.right_child(parent) == node class AbstractRBTreePrinter(object): """Abstract super class for std::(multi)map, and std::(multi)set.""" def __init__(self, val): self.val = val tree = self.val["__tree_"] self.size = int(_value_of_pair_first(tree["__pair3_"])) dummy_root = tree["__pair1_"] root = _value_of_pair_first(dummy_root)["__left_"] cast_type = self._init_cast_type(val.type) self.util = RBTreeUtils(cast_type, root) def _get_key_value(self, node): """Subclasses should override to return a list of values to yield.""" raise NotImplementedError def _traverse(self): """Traverses the binary search tree in order.""" current = self.util.root skip_left_child = False while True: if not skip_left_child and self.util.left_child(current): current = self.util.left_child(current) continue skip_left_child = False for key_value in self._get_key_value(current): yield "", key_value right_child = self.util.right_child(current) if right_child: current = right_child continue while self.util.is_right_child(current): current = self.util.parent(current) if self.util.is_left_child(current): current = self.util.parent(current) skip_left_child = True continue break def __iter__(self): return self._traverse() def children(self): return self if self.util.cast_type and self.size > 0 else iter(()) def to_string(self): typename = _remove_generics(_prettify_typename(self.val.type)) if self.size: return "%s with %d elements" % (typename, self.size) return "%s is empty" % typename class StdMapPrinter(AbstractRBTreePrinter): """Print a std::map or std::multimap.""" def _init_cast_type(self, val_type): map_it_type = gdb.lookup_type( str(val_type) + "::iterator").strip_typedefs() tree_it_type = map_it_type.template_argument(0) node_ptr_type = tree_it_type.template_argument(1) return node_ptr_type def display_hint(self): return "map" def _get_key_value(self, node): key_value = node.cast(self.util.cast_type).dereference()[ "__value_"]["__cc"] return [key_value["first"], key_value["second"]] class StdSetPrinter(AbstractRBTreePrinter): """Print a std::set.""" def _init_cast_type(self, val_type): set_it_type = gdb.lookup_type( str(val_type) + "::iterator").strip_typedefs() node_ptr_type = set_it_type.template_argument(1) return node_ptr_type def display_hint(self): return "array" def _get_key_value(self, node): key_value = node.cast(self.util.cast_type).dereference()["__value_"] return [key_value] class AbstractRBTreeIteratorPrinter(object): """Abstract super class for std::(multi)map, and std::(multi)set iterator.""" def _initialize(self, val, typename): self.typename = typename self.val = val self.addr = self.val["__ptr_"] cast_type = self.val.type.template_argument(1) self.util = RBTreeUtils(cast_type, None) if self.addr: self.node = self.addr.cast(cast_type).dereference() def _is_valid_node(self): if not self.util.parent(self.addr): return False return self.util.is_left_child(self.addr) or \ self.util.is_right_child(self.addr) def to_string(self): if not self.addr: return "%s is nullptr" % self.typename return "%s " % self.typename def _get_node_value(self, node): raise NotImplementedError def __iter__(self): addr_str = "[%s]" % str(self.addr) if not self._is_valid_node(): yield addr_str, " end()" else: yield addr_str, self._get_node_value(self.node) def children(self): return self if self.addr else iter(()) class MapIteratorPrinter(AbstractRBTreeIteratorPrinter): """Print a std::(multi)map iterator.""" def __init__(self, val): self._initialize(val["__i_"], _remove_generics(_prettify_typename(val.type))) def _get_node_value(self, node): return node["__value_"]["__cc"] class SetIteratorPrinter(AbstractRBTreeIteratorPrinter): """Print a std::(multi)set iterator.""" def __init__(self, val): self._initialize(val, _remove_generics(_prettify_typename(val.type))) def _get_node_value(self, node): return node["__value_"] class StdFposPrinter(object): """Print a std::fpos or std::streampos.""" def __init__(self, val): self.val = val def to_string(self): typename = _remove_generics(_prettify_typename(self.val.type)) offset = self.val["__off_"] state = self.val["__st_"] count = state["__count"] value = state["__value"]["__wch"] return "%s with stream offset:%s with state: {count:%s value:%s}" % ( typename, offset, count, value) class AbstractUnorderedCollectionPrinter(object): """Abstract super class for std::unordered_(multi)[set|map].""" def __init__(self, val): self.val = val self.table = val["__table_"] self.sentinel = self.table["__p1_"] self.size = int(_value_of_pair_first(self.table["__p2_"])) node_base_type = self.sentinel.type.template_argument(0) self.cast_type = node_base_type.template_argument(0) def _list_it(self, sentinel_ptr): next_ptr = _value_of_pair_first(sentinel_ptr)["__next_"] while str(next_ptr.cast(_void_pointer_type)) != "0x0": next_val = next_ptr.cast(self.cast_type).dereference() for key_value in self._get_key_value(next_val): yield "", key_value next_ptr = next_val["__next_"] def to_string(self): typename = _remove_generics(_prettify_typename(self.val.type)) if self.size: return "%s with %d elements" % (typename, self.size) return "%s is empty" % typename def _get_key_value(self, node): """Subclasses should override to return a list of values to yield.""" raise NotImplementedError def children(self): return self if self.cast_type and self.size > 0 else iter(()) def __iter__(self): return self._list_it(self.sentinel) class StdUnorderedSetPrinter(AbstractUnorderedCollectionPrinter): """Print a std::unordered_(multi)set.""" def _get_key_value(self, node): return [node["__value_"]] def display_hint(self): return "array" class StdUnorderedMapPrinter(AbstractUnorderedCollectionPrinter): """Print a std::unordered_(multi)map.""" def _get_key_value(self, node): key_value = node["__value_"]["__cc"] return [key_value["first"], key_value["second"]] def display_hint(self): return "map" class AbstractHashMapIteratorPrinter(object): """Abstract class for unordered collection iterators.""" def _initialize(self, val, addr): self.val = val self.typename = _remove_generics(_prettify_typename(self.val.type)) self.addr = addr if self.addr: self.node = self.addr.cast(self.cast_type).dereference() def _get_key_value(self): """Subclasses should override to return a list of values to yield.""" raise NotImplementedError def to_string(self): if not self.addr: return "%s = end()" % self.typename return "%s " % self.typename def children(self): return self if self.addr else iter(()) def __iter__(self): for key_value in self._get_key_value(): yield "", key_value class StdUnorderedSetIteratorPrinter(AbstractHashMapIteratorPrinter): """Print a std::(multi)set iterator.""" def __init__(self, val): self.cast_type = val.type.template_argument(0) self._initialize(val, val["__node_"]) def _get_key_value(self): return [self.node["__value_"]] def display_hint(self): return "array" class StdUnorderedMapIteratorPrinter(AbstractHashMapIteratorPrinter): """Print a std::(multi)map iterator.""" def __init__(self, val): self.cast_type = val.type.template_argument(0).template_argument(0) self._initialize(val, val["__i_"]["__node_"]) def _get_key_value(self): key_value = self.node["__value_"]["__cc"] return [key_value["first"], key_value["second"]] def display_hint(self): return "map" def _remove_std_prefix(typename): match = re.match("^std::(.+)", typename) return match.group(1) if match is not None else "" class LibcxxPrettyPrinter(object): """PrettyPrinter object so gdb-commands like 'info pretty-printers' work.""" def __init__(self, name): super(LibcxxPrettyPrinter, self).__init__() self.name = name self.enabled = True self.lookup = { "basic_string": StdStringPrinter, "string": StdStringPrinter, "tuple": StdTuplePrinter, "unique_ptr": StdUniquePtrPrinter, "shared_ptr": StdSharedPointerPrinter, "weak_ptr": StdSharedPointerPrinter, "bitset": StdBitsetPrinter, "deque": StdDequePrinter, "list": StdListPrinter, "queue": StdQueueOrStackPrinter, "stack": StdQueueOrStackPrinter, "priority_queue": StdPriorityQueuePrinter, "map": StdMapPrinter, "multimap": StdMapPrinter, "set": StdSetPrinter, "multiset": StdSetPrinter, "vector": StdVectorPrinter, "__map_iterator": MapIteratorPrinter, "__map_const_iterator": MapIteratorPrinter, "__tree_iterator": SetIteratorPrinter, "__tree_const_iterator": SetIteratorPrinter, "fpos": StdFposPrinter, "unordered_set": StdUnorderedSetPrinter, "unordered_multiset": StdUnorderedSetPrinter, "unordered_map": StdUnorderedMapPrinter, "unordered_multimap": StdUnorderedMapPrinter, "__hash_map_iterator": StdUnorderedMapIteratorPrinter, "__hash_map_const_iterator": StdUnorderedMapIteratorPrinter, "__hash_iterator": StdUnorderedSetIteratorPrinter, "__hash_const_iterator": StdUnorderedSetIteratorPrinter, } self.subprinters = [] for name, subprinter in self.lookup.items(): # Subprinters and names are used only for the rarely used command "info # pretty" (and related), so the name of the first data structure it prints # is a reasonable choice. if subprinter not in self.subprinters: subprinter.name = name self.subprinters.append(subprinter) def __call__(self, val): """Return the pretty printer for a val, if the type is supported.""" # Do not handle any type that is not a struct/class. if val.type.strip_typedefs().code != gdb.TYPE_CODE_STRUCT: return None # Don't attempt types known to be inside libstdcxx. typename = val.type.name or val.type.tag or str(val.type) match = re.match("^std::(__.*?)::", typename) if match is None or match.group(1) in ["__cxx1998", "__debug", "__7", "__g"]: return None # Handle any using declarations or other typedefs. typename = _prettify_typename(val.type) if not typename: return None without_generics = _remove_generics(typename) lookup_name = _remove_std_prefix(without_generics) if lookup_name in self.lookup: return self.lookup[lookup_name](val) return None _libcxx_printer_name = "libcxx_pretty_printer" # These are called for every binary object file, which could be thousands in # certain pathological cases. Limit our pretty printers to the progspace. def _register_libcxx_printers(event): progspace = event.new_objfile.progspace # It would be ideal to get the endianness at print time, but # gdb.execute clears gdb's internal wrap buffer, removing any values # already generated as part of a larger data structure, and there is # no python api to get the endianness. Mixed-endianness debugging # rare enough that this workaround should be adequate. _libcpp_big_endian = "big endian" in gdb.execute("show endian", to_string=True) if not getattr(progspace, _libcxx_printer_name, False): print("Loading libc++ pretty-printers.") gdb.printing.register_pretty_printer( progspace, LibcxxPrettyPrinter(_libcxx_printer_name)) setattr(progspace, _libcxx_printer_name, True) def _unregister_libcxx_printers(event): progspace = event.progspace if getattr(progspace, _libcxx_printer_name, False): for printer in progspace.pretty_printers: if getattr(printer, "name", "none") == _libcxx_printer_name: progspace.pretty_printers.remove(printer) setattr(progspace, _libcxx_printer_name, False) break def register_libcxx_printer_loader(): """Register event handlers to load libc++ pretty-printers.""" gdb.events.new_objfile.connect(_register_libcxx_printers) gdb.events.clear_objfiles.connect(_unregister_libcxx_printers)
cccl-main
libcudacxx/libcxx/utils/gdb/libcxx/printers.py
#! /usr/bin/env python # encoding: utf-8 import argparse import errno import logging import os import platform import re import sys import subprocess import tempfile try: import winreg except ImportError: import _winreg as winreg try: import urllib.request as request except ImportError: import urllib as request try: import urllib.parse as parse except ImportError: import urlparse as parse class EmptyLogger(object): ''' Provides an implementation that performs no logging ''' def debug(self, *k, **kw): pass def info(self, *k, **kw): pass def warn(self, *k, **kw): pass def error(self, *k, **kw): pass def critical(self, *k, **kw): pass def setLevel(self, *k, **kw): pass urls = ( 'http://downloads.sourceforge.net/project/mingw-w64/Toolchains%20' 'targetting%20Win32/Personal%20Builds/mingw-builds/installer/' 'repository.txt', 'http://downloads.sourceforge.net/project/mingwbuilds/host-windows/' 'repository.txt' ) ''' A list of mingw-build repositories ''' def repository(urls = urls, log = EmptyLogger()): ''' Downloads and parse mingw-build repository files and parses them ''' log.info('getting mingw-builds repository') versions = {} re_sourceforge = re.compile(r'http://sourceforge.net/projects/([^/]+)/files') re_sub = r'http://downloads.sourceforge.net/project/\1' for url in urls: log.debug(' - requesting: %s', url) socket = request.urlopen(url) repo = socket.read() if not isinstance(repo, str): repo = repo.decode(); socket.close() for entry in repo.split('\n')[:-1]: value = entry.split('|') version = tuple([int(n) for n in value[0].strip().split('.')]) version = versions.setdefault(version, {}) arch = value[1].strip() if arch == 'x32': arch = 'i686' elif arch == 'x64': arch = 'x86_64' arch = version.setdefault(arch, {}) threading = arch.setdefault(value[2].strip(), {}) exceptions = threading.setdefault(value[3].strip(), {}) revision = exceptions.setdefault(int(value[4].strip()[3:]), re_sourceforge.sub(re_sub, value[5].strip())) return versions def find_in_path(file, path=None): ''' Attempts to find an executable in the path ''' if platform.system() == 'Windows': file += '.exe' if path is None: path = os.environ.get('PATH', '') if type(path) is type(''): path = path.split(os.pathsep) return list(filter(os.path.exists, map(lambda dir, file=file: os.path.join(dir, file), path))) def find_7zip(log = EmptyLogger()): ''' Attempts to find 7zip for unpacking the mingw-build archives ''' log.info('finding 7zip') path = find_in_path('7z') if not path: key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\7-Zip') path, _ = winreg.QueryValueEx(key, 'Path') path = [os.path.join(path, '7z.exe')] log.debug('found \'%s\'', path[0]) return path[0] find_7zip() def unpack(archive, location, log = EmptyLogger()): ''' Unpacks a mingw-builds archive ''' sevenzip = find_7zip(log) log.info('unpacking %s', os.path.basename(archive)) cmd = [sevenzip, 'x', archive, '-o' + location, '-y'] log.debug(' - %r', cmd) with open(os.devnull, 'w') as devnull: subprocess.check_call(cmd, stdout = devnull) def download(url, location, log = EmptyLogger()): ''' Downloads and unpacks a mingw-builds archive ''' log.info('downloading MinGW') log.debug(' - url: %s', url) log.debug(' - location: %s', location) re_content = re.compile(r'attachment;[ \t]*filename=(")?([^"]*)(")?[\r\n]*') stream = request.urlopen(url) try: content = stream.getheader('Content-Disposition') or '' except AttributeError: content = stream.headers.getheader('Content-Disposition') or '' matches = re_content.match(content) if matches: filename = matches.group(2) else: parsed = parse.urlparse(stream.geturl()) filename = os.path.basename(parsed.path) try: os.makedirs(location) except OSError as e: if e.errno == errno.EEXIST and os.path.isdir(location): pass else: raise archive = os.path.join(location, filename) with open(archive, 'wb') as out: while True: buf = stream.read(1024) if not buf: break out.write(buf) unpack(archive, location, log = log) os.remove(archive) possible = os.path.join(location, 'mingw64') if not os.path.exists(possible): possible = os.path.join(location, 'mingw32') if not os.path.exists(possible): raise ValueError('Failed to find unpacked MinGW: ' + possible) return possible def root(location = None, arch = None, version = None, threading = None, exceptions = None, revision = None, log = EmptyLogger()): ''' Returns the root folder of a specific version of the mingw-builds variant of gcc. Will download the compiler if needed ''' # Get the repository if we don't have all the information if not (arch and version and threading and exceptions and revision): versions = repository(log = log) # Determine some defaults version = version or max(versions.keys()) if not arch: arch = platform.machine().lower() if arch == 'x86': arch = 'i686' elif arch == 'amd64': arch = 'x86_64' if not threading: keys = versions[version][arch].keys() if 'posix' in keys: threading = 'posix' elif 'win32' in keys: threading = 'win32' else: threading = keys[0] if not exceptions: keys = versions[version][arch][threading].keys() if 'seh' in keys: exceptions = 'seh' elif 'sjlj' in keys: exceptions = 'sjlj' else: exceptions = keys[0] if revision == None: revision = max(versions[version][arch][threading][exceptions].keys()) if not location: location = os.path.join(tempfile.gettempdir(), 'mingw-builds') # Get the download url url = versions[version][arch][threading][exceptions][revision] # Tell the user whatzzup log.info('finding MinGW %s', '.'.join(str(v) for v in version)) log.debug(' - arch: %s', arch) log.debug(' - threading: %s', threading) log.debug(' - exceptions: %s', exceptions) log.debug(' - revision: %s', revision) log.debug(' - url: %s', url) # Store each specific revision differently slug = '{version}-{arch}-{threading}-{exceptions}-rev{revision}' slug = slug.format( version = '.'.join(str(v) for v in version), arch = arch, threading = threading, exceptions = exceptions, revision = revision ) if arch == 'x86_64': root_dir = os.path.join(location, slug, 'mingw64') elif arch == 'i686': root_dir = os.path.join(location, slug, 'mingw32') else: raise ValueError('Unknown MinGW arch: ' + arch) # Download if needed if not os.path.exists(root_dir): downloaded = download(url, os.path.join(location, slug), log = log) if downloaded != root_dir: raise ValueError('The location of mingw did not match\n%s\n%s' % (downloaded, root_dir)) return root_dir def str2ver(string): ''' Converts a version string into a tuple ''' try: version = tuple(int(v) for v in string.split('.')) if len(version) is not 3: raise ValueError() except ValueError: raise argparse.ArgumentTypeError( 'please provide a three digit version string') return version def main(): ''' Invoked when the script is run directly by the python interpreter ''' parser = argparse.ArgumentParser( description = 'Downloads a specific version of MinGW', formatter_class = argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('--location', help = 'the location to download the compiler to', default = os.path.join(tempfile.gettempdir(), 'mingw-builds')) parser.add_argument('--arch', required = True, choices = ['i686', 'x86_64'], help = 'the target MinGW architecture string') parser.add_argument('--version', type = str2ver, help = 'the version of GCC to download') parser.add_argument('--threading', choices = ['posix', 'win32'], help = 'the threading type of the compiler') parser.add_argument('--exceptions', choices = ['sjlj', 'seh', 'dwarf'], help = 'the method to throw exceptions') parser.add_argument('--revision', type=int, help = 'the revision of the MinGW release') group = parser.add_mutually_exclusive_group() group.add_argument('-v', '--verbose', action='store_true', help='increase the script output verbosity') group.add_argument('-q', '--quiet', action='store_true', help='only print errors and warning') args = parser.parse_args() # Create the logger logger = logging.getLogger('mingw') handler = logging.StreamHandler() formatter = logging.Formatter('%(message)s') handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.INFO) if args.quiet: logger.setLevel(logging.WARN) if args.verbose: logger.setLevel(logging.DEBUG) # Get MinGW root_dir = root(location = args.location, arch = args.arch, version = args.version, threading = args.threading, exceptions = args.exceptions, revision = args.revision, log = logger) sys.stdout.write('%s\n' % os.path.join(root_dir, 'bin')) if __name__ == '__main__': try: main() except IOError as e: sys.stderr.write('IO error: %s\n' % e) sys.exit(1) except OSError as e: sys.stderr.write('OS error: %s\n' % e) sys.exit(1) except KeyboardInterrupt as e: sys.stderr.write('Killed\n') sys.exit(1)
cccl-main
libcudacxx/libcxx/utils/google-benchmark/mingw.py
import os import ycm_core # These are the compilation flags that will be used in case there's no # compilation database set (by default, one is not set). # CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR. flags = [ '-Wall', '-Werror', '-pedantic-errors', '-std=c++0x', '-fno-strict-aliasing', '-O3', '-DNDEBUG', # ...and the same thing goes for the magic -x option which specifies the # language that the files to be compiled are written in. This is mostly # relevant for c++ headers. # For a C project, you would set this to 'c' instead of 'c++'. '-x', 'c++', '-I', 'include', '-isystem', '/usr/include', '-isystem', '/usr/local/include', ] # Set this to the absolute path to the folder (NOT the file!) containing the # compile_commands.json file to use that instead of 'flags'. See here for # more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html # # Most projects will NOT need to set this to anything; you can just change the # 'flags' list of compilation flags. Notice that YCM itself uses that approach. compilation_database_folder = '' if os.path.exists( compilation_database_folder ): database = ycm_core.CompilationDatabase( compilation_database_folder ) else: database = None SOURCE_EXTENSIONS = [ '.cc' ] def DirectoryOfThisScript(): return os.path.dirname( os.path.abspath( __file__ ) ) def MakeRelativePathsInFlagsAbsolute( flags, working_directory ): if not working_directory: return list( flags ) new_flags = [] make_next_absolute = False path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ] for flag in flags: new_flag = flag if make_next_absolute: make_next_absolute = False if not flag.startswith( '/' ): new_flag = os.path.join( working_directory, flag ) for path_flag in path_flags: if flag == path_flag: make_next_absolute = True break if flag.startswith( path_flag ): path = flag[ len( path_flag ): ] new_flag = path_flag + os.path.join( working_directory, path ) break if new_flag: new_flags.append( new_flag ) return new_flags def IsHeaderFile( filename ): extension = os.path.splitext( filename )[ 1 ] return extension in [ '.h', '.hxx', '.hpp', '.hh' ] def GetCompilationInfoForFile( filename ): # The compilation_commands.json file generated by CMake does not have entries # for header files. So we do our best by asking the db for flags for a # corresponding source file, if any. If one exists, the flags for that file # should be good enough. if IsHeaderFile( filename ): basename = os.path.splitext( filename )[ 0 ] for extension in SOURCE_EXTENSIONS: replacement_file = basename + extension if os.path.exists( replacement_file ): compilation_info = database.GetCompilationInfoForFile( replacement_file ) if compilation_info.compiler_flags_: return compilation_info return None return database.GetCompilationInfoForFile( filename ) def FlagsForFile( filename, **kwargs ): if database: # Bear in mind that compilation_info.compiler_flags_ does NOT return a # python list, but a "list-like" StringVec object compilation_info = GetCompilationInfoForFile( filename ) if not compilation_info: return None final_flags = MakeRelativePathsInFlagsAbsolute( compilation_info.compiler_flags_, compilation_info.compiler_working_dir_ ) else: relative_to = DirectoryOfThisScript() final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to ) return { 'flags': final_flags, 'do_cache': True }
cccl-main
libcudacxx/libcxx/utils/google-benchmark/.ycm_extra_conf.py
#!/usr/bin/env python """ strip_asm.py - Cleanup ASM output for the specified file """ from argparse import ArgumentParser import sys import os import re def find_used_labels(asm): found = set() label_re = re.compile("\s*j[a-z]+\s+\.L([a-zA-Z0-9][a-zA-Z0-9_]*)") for l in asm.splitlines(): m = label_re.match(l) if m: found.add('.L%s' % m.group(1)) return found def normalize_labels(asm): decls = set() label_decl = re.compile("^[.]{0,1}L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)") for l in asm.splitlines(): m = label_decl.match(l) if m: decls.add(m.group(0)) if len(decls) == 0: return asm needs_dot = next(iter(decls))[0] != '.' if not needs_dot: return asm for ld in decls: asm = re.sub("(^|\s+)" + ld + "(?=:|\s)", '\\1.' + ld, asm) return asm def transform_labels(asm): asm = normalize_labels(asm) used_decls = find_used_labels(asm) new_asm = '' label_decl = re.compile("^\.L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)") for l in asm.splitlines(): m = label_decl.match(l) if not m or m.group(0) in used_decls: new_asm += l new_asm += '\n' return new_asm def is_identifier(tk): if len(tk) == 0: return False first = tk[0] if not first.isalpha() and first != '_': return False for i in range(1, len(tk)): c = tk[i] if not c.isalnum() and c != '_': return False return True def process_identifiers(l): """ process_identifiers - process all identifiers and modify them to have consistent names across all platforms; specifically across ELF and MachO. For example, MachO inserts an additional understore at the beginning of names. This function removes that. """ parts = re.split(r'([a-zA-Z0-9_]+)', l) new_line = '' for tk in parts: if is_identifier(tk): if tk.startswith('__Z'): tk = tk[1:] elif tk.startswith('_') and len(tk) > 1 and \ tk[1].isalpha() and tk[1] != 'Z': tk = tk[1:] new_line += tk return new_line def process_asm(asm): """ Strip the ASM of unwanted directives and lines """ new_contents = '' asm = transform_labels(asm) # TODO: Add more things we want to remove discard_regexes = [ re.compile("\s+\..*$"), # directive re.compile("\s*#(NO_APP|APP)$"), #inline ASM re.compile("\s*#.*$"), # comment line re.compile("\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"), #global directive re.compile("\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)"), ] keep_regexes = [ ] fn_label_def = re.compile("^[a-zA-Z_][a-zA-Z0-9_.]*:") for l in asm.splitlines(): # Remove Mach-O attribute l = l.replace('@GOTPCREL', '') add_line = True for reg in discard_regexes: if reg.match(l) is not None: add_line = False break for reg in keep_regexes: if reg.match(l) is not None: add_line = True break if add_line: if fn_label_def.match(l) and len(new_contents) != 0: new_contents += '\n' l = process_identifiers(l) new_contents += l new_contents += '\n' return new_contents def main(): parser = ArgumentParser( description='generate a stripped assembly file') parser.add_argument( 'input', metavar='input', type=str, nargs=1, help='An input assembly file') parser.add_argument( 'out', metavar='output', type=str, nargs=1, help='The output file') args, unknown_args = parser.parse_known_args() input = args.input[0] output = args.out[0] if not os.path.isfile(input): print(("ERROR: input file '%s' does not exist") % input) sys.exit(1) contents = None with open(input, 'r') as f: contents = f.read() new_contents = process_asm(contents) with open(output, 'w') as f: f.write(new_contents) if __name__ == '__main__': main() # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 # kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off; # kate: indent-mode python; remove-trailing-spaces modified;
cccl-main
libcudacxx/libcxx/utils/google-benchmark/tools/strip_asm.py
#!/usr/bin/env python import unittest """ compare.py - versatile benchmark output compare tool """ import argparse from argparse import ArgumentParser import sys import gbench from gbench import util, report from gbench.util import * def check_inputs(in1, in2, flags): """ Perform checking on the user provided inputs and diagnose any abnormalities """ in1_kind, in1_err = classify_input_file(in1) in2_kind, in2_err = classify_input_file(in2) output_file = find_benchmark_flag('--benchmark_out=', flags) output_type = find_benchmark_flag('--benchmark_out_format=', flags) if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file: print(("WARNING: '--benchmark_out=%s' will be passed to both " "benchmarks causing it to be overwritten") % output_file) if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0: print("WARNING: passing optional flags has no effect since both " "inputs are JSON") if output_type is not None and output_type != 'json': print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`" " is not supported.") % output_type) sys.exit(1) def create_parser(): parser = ArgumentParser( description='versatile benchmark output compare tool') parser.add_argument( '-a', '--display_aggregates_only', dest='display_aggregates_only', action="store_true", help="If there are repetitions, by default, we display everything - the" " actual runs, and the aggregates computed. Sometimes, it is " "desirable to only view the aggregates. E.g. when there are a lot " "of repetitions. Do note that only the display is affected. " "Internally, all the actual runs are still used, e.g. for U test.") utest = parser.add_argument_group() utest.add_argument( '--no-utest', dest='utest', default=True, action="store_false", help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS)) alpha_default = 0.05 utest.add_argument( "--alpha", dest='utest_alpha', default=alpha_default, type=float, help=("significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)") % alpha_default) subparsers = parser.add_subparsers( help='This tool has multiple modes of operation:', dest='mode') parser_a = subparsers.add_parser( 'benchmarks', help='The most simple use-case, compare all the output of these two benchmarks') baseline = parser_a.add_argument_group( 'baseline', 'The benchmark baseline') baseline.add_argument( 'test_baseline', metavar='test_baseline', type=argparse.FileType('r'), nargs=1, help='A benchmark executable or JSON output file') contender = parser_a.add_argument_group( 'contender', 'The benchmark that will be compared against the baseline') contender.add_argument( 'test_contender', metavar='test_contender', type=argparse.FileType('r'), nargs=1, help='A benchmark executable or JSON output file') parser_a.add_argument( 'benchmark_options', metavar='benchmark_options', nargs=argparse.REMAINDER, help='Arguments to pass when running benchmark executables') parser_b = subparsers.add_parser( 'filters', help='Compare filter one with the filter two of benchmark') baseline = parser_b.add_argument_group( 'baseline', 'The benchmark baseline') baseline.add_argument( 'test', metavar='test', type=argparse.FileType('r'), nargs=1, help='A benchmark executable or JSON output file') baseline.add_argument( 'filter_baseline', metavar='filter_baseline', type=str, nargs=1, help='The first filter, that will be used as baseline') contender = parser_b.add_argument_group( 'contender', 'The benchmark that will be compared against the baseline') contender.add_argument( 'filter_contender', metavar='filter_contender', type=str, nargs=1, help='The second filter, that will be compared against the baseline') parser_b.add_argument( 'benchmark_options', metavar='benchmark_options', nargs=argparse.REMAINDER, help='Arguments to pass when running benchmark executables') parser_c = subparsers.add_parser( 'benchmarksfiltered', help='Compare filter one of first benchmark with filter two of the second benchmark') baseline = parser_c.add_argument_group( 'baseline', 'The benchmark baseline') baseline.add_argument( 'test_baseline', metavar='test_baseline', type=argparse.FileType('r'), nargs=1, help='A benchmark executable or JSON output file') baseline.add_argument( 'filter_baseline', metavar='filter_baseline', type=str, nargs=1, help='The first filter, that will be used as baseline') contender = parser_c.add_argument_group( 'contender', 'The benchmark that will be compared against the baseline') contender.add_argument( 'test_contender', metavar='test_contender', type=argparse.FileType('r'), nargs=1, help='The second benchmark executable or JSON output file, that will be compared against the baseline') contender.add_argument( 'filter_contender', metavar='filter_contender', type=str, nargs=1, help='The second filter, that will be compared against the baseline') parser_c.add_argument( 'benchmark_options', metavar='benchmark_options', nargs=argparse.REMAINDER, help='Arguments to pass when running benchmark executables') return parser def main(): # Parse the command line flags parser = create_parser() args, unknown_args = parser.parse_known_args() if args.mode is None: parser.print_help() exit(1) assert not unknown_args benchmark_options = args.benchmark_options if args.mode == 'benchmarks': test_baseline = args.test_baseline[0].name test_contender = args.test_contender[0].name filter_baseline = '' filter_contender = '' # NOTE: if test_baseline == test_contender, you are analyzing the stdev description = 'Comparing %s to %s' % (test_baseline, test_contender) elif args.mode == 'filters': test_baseline = args.test[0].name test_contender = args.test[0].name filter_baseline = args.filter_baseline[0] filter_contender = args.filter_contender[0] # NOTE: if filter_baseline == filter_contender, you are analyzing the # stdev description = 'Comparing %s to %s (from %s)' % ( filter_baseline, filter_contender, args.test[0].name) elif args.mode == 'benchmarksfiltered': test_baseline = args.test_baseline[0].name test_contender = args.test_contender[0].name filter_baseline = args.filter_baseline[0] filter_contender = args.filter_contender[0] # NOTE: if test_baseline == test_contender and # filter_baseline == filter_contender, you are analyzing the stdev description = 'Comparing %s (from %s) to %s (from %s)' % ( filter_baseline, test_baseline, filter_contender, test_contender) else: # should never happen print("Unrecognized mode of operation: '%s'" % args.mode) parser.print_help() exit(1) check_inputs(test_baseline, test_contender, benchmark_options) if args.display_aggregates_only: benchmark_options += ['--benchmark_display_aggregates_only=true'] options_baseline = [] options_contender = [] if filter_baseline and filter_contender: options_baseline = ['--benchmark_filter=%s' % filter_baseline] options_contender = ['--benchmark_filter=%s' % filter_contender] # Run the benchmarks and report the results json1 = json1_orig = gbench.util.run_or_load_benchmark( test_baseline, benchmark_options + options_baseline) json2 = json2_orig = gbench.util.run_or_load_benchmark( test_contender, benchmark_options + options_contender) # Now, filter the benchmarks so that the difference report can work if filter_baseline and filter_contender: replacement = '[%s vs. %s]' % (filter_baseline, filter_contender) json1 = gbench.report.filter_benchmark( json1_orig, filter_baseline, replacement) json2 = gbench.report.filter_benchmark( json2_orig, filter_contender, replacement) # Diff and output output_lines = gbench.report.generate_difference_report( json1, json2, args.display_aggregates_only, args.utest, args.utest_alpha) print(description) for ln in output_lines: print(ln) class TestParser(unittest.TestCase): def setUp(self): self.parser = create_parser() testInputs = os.path.join( os.path.dirname( os.path.realpath(__file__)), 'gbench', 'Inputs') self.testInput0 = os.path.join(testInputs, 'test1_run1.json') self.testInput1 = os.path.join(testInputs, 'test1_run2.json') def test_benchmarks_basic(self): parsed = self.parser.parse_args( ['benchmarks', self.testInput0, self.testInput1]) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarks') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertFalse(parsed.benchmark_options) def test_benchmarks_basic_without_utest(self): parsed = self.parser.parse_args( ['--no-utest', 'benchmarks', self.testInput0, self.testInput1]) self.assertFalse(parsed.display_aggregates_only) self.assertFalse(parsed.utest) self.assertEqual(parsed.utest_alpha, 0.05) self.assertEqual(parsed.mode, 'benchmarks') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertFalse(parsed.benchmark_options) def test_benchmarks_basic_display_aggregates_only(self): parsed = self.parser.parse_args( ['-a', 'benchmarks', self.testInput0, self.testInput1]) self.assertTrue(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarks') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertFalse(parsed.benchmark_options) def test_benchmarks_basic_with_utest_alpha(self): parsed = self.parser.parse_args( ['--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1]) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.utest_alpha, 0.314) self.assertEqual(parsed.mode, 'benchmarks') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertFalse(parsed.benchmark_options) def test_benchmarks_basic_without_utest_with_utest_alpha(self): parsed = self.parser.parse_args( ['--no-utest', '--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1]) self.assertFalse(parsed.display_aggregates_only) self.assertFalse(parsed.utest) self.assertEqual(parsed.utest_alpha, 0.314) self.assertEqual(parsed.mode, 'benchmarks') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertFalse(parsed.benchmark_options) def test_benchmarks_with_remainder(self): parsed = self.parser.parse_args( ['benchmarks', self.testInput0, self.testInput1, 'd']) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarks') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertEqual(parsed.benchmark_options, ['d']) def test_benchmarks_with_remainder_after_doubleminus(self): parsed = self.parser.parse_args( ['benchmarks', self.testInput0, self.testInput1, '--', 'e']) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarks') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertEqual(parsed.benchmark_options, ['e']) def test_filters_basic(self): parsed = self.parser.parse_args( ['filters', self.testInput0, 'c', 'd']) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'filters') self.assertEqual(parsed.test[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') self.assertEqual(parsed.filter_contender[0], 'd') self.assertFalse(parsed.benchmark_options) def test_filters_with_remainder(self): parsed = self.parser.parse_args( ['filters', self.testInput0, 'c', 'd', 'e']) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'filters') self.assertEqual(parsed.test[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') self.assertEqual(parsed.filter_contender[0], 'd') self.assertEqual(parsed.benchmark_options, ['e']) def test_filters_with_remainder_after_doubleminus(self): parsed = self.parser.parse_args( ['filters', self.testInput0, 'c', 'd', '--', 'f']) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'filters') self.assertEqual(parsed.test[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') self.assertEqual(parsed.filter_contender[0], 'd') self.assertEqual(parsed.benchmark_options, ['f']) def test_benchmarksfiltered_basic(self): parsed = self.parser.parse_args( ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e']) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarksfiltered') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertEqual(parsed.filter_contender[0], 'e') self.assertFalse(parsed.benchmark_options) def test_benchmarksfiltered_with_remainder(self): parsed = self.parser.parse_args( ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f']) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarksfiltered') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertEqual(parsed.filter_contender[0], 'e') self.assertEqual(parsed.benchmark_options[0], 'f') def test_benchmarksfiltered_with_remainder_after_doubleminus(self): parsed = self.parser.parse_args( ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g']) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarksfiltered') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertEqual(parsed.filter_contender[0], 'e') self.assertEqual(parsed.benchmark_options[0], 'g') if __name__ == '__main__': # unittest.main() main() # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 # kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off; # kate: indent-mode python; remove-trailing-spaces modified;
cccl-main
libcudacxx/libcxx/utils/google-benchmark/tools/compare.py
"""util.py - General utilities for running, loading, and processing benchmarks """ import json import os import tempfile import subprocess import sys # Input file type enumeration IT_Invalid = 0 IT_JSON = 1 IT_Executable = 2 _num_magic_bytes = 2 if sys.platform.startswith('win') else 4 def is_executable_file(filename): """ Return 'True' if 'filename' names a valid file which is likely an executable. A file is considered an executable if it starts with the magic bytes for a EXE, Mach O, or ELF file. """ if not os.path.isfile(filename): return False with open(filename, mode='rb') as f: magic_bytes = f.read(_num_magic_bytes) if sys.platform == 'darwin': return magic_bytes in [ b'\xfe\xed\xfa\xce', # MH_MAGIC b'\xce\xfa\xed\xfe', # MH_CIGAM b'\xfe\xed\xfa\xcf', # MH_MAGIC_64 b'\xcf\xfa\xed\xfe', # MH_CIGAM_64 b'\xca\xfe\xba\xbe', # FAT_MAGIC b'\xbe\xba\xfe\xca' # FAT_CIGAM ] elif sys.platform.startswith('win'): return magic_bytes == b'MZ' else: return magic_bytes == b'\x7FELF' def is_json_file(filename): """ Returns 'True' if 'filename' names a valid JSON output file. 'False' otherwise. """ try: with open(filename, 'r') as f: json.load(f) return True except BaseException: pass return False def classify_input_file(filename): """ Return a tuple (type, msg) where 'type' specifies the classified type of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable string represeting the error. """ ftype = IT_Invalid err_msg = None if not os.path.exists(filename): err_msg = "'%s' does not exist" % filename elif not os.path.isfile(filename): err_msg = "'%s' does not name a file" % filename elif is_executable_file(filename): ftype = IT_Executable elif is_json_file(filename): ftype = IT_JSON else: err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename return ftype, err_msg def check_input_file(filename): """ Classify the file named by 'filename' and return the classification. If the file is classified as 'IT_Invalid' print an error message and exit the program. """ ftype, msg = classify_input_file(filename) if ftype == IT_Invalid: print("Invalid input file: %s" % msg) sys.exit(1) return ftype def find_benchmark_flag(prefix, benchmark_flags): """ Search the specified list of flags for a flag matching `<prefix><arg>` and if it is found return the arg it specifies. If specified more than once the last value is returned. If the flag is not found None is returned. """ assert prefix.startswith('--') and prefix.endswith('=') result = None for f in benchmark_flags: if f.startswith(prefix): result = f[len(prefix):] return result def remove_benchmark_flags(prefix, benchmark_flags): """ Return a new list containing the specified benchmark_flags except those with the specified prefix. """ assert prefix.startswith('--') and prefix.endswith('=') return [f for f in benchmark_flags if not f.startswith(prefix)] def load_benchmark_results(fname): """ Read benchmark output from a file and return the JSON object. REQUIRES: 'fname' names a file containing JSON benchmark output. """ with open(fname, 'r') as f: return json.load(f) def run_benchmark(exe_name, benchmark_flags): """ Run a benchmark specified by 'exe_name' with the specified 'benchmark_flags'. The benchmark is run directly as a subprocess to preserve real time console output. RETURNS: A JSON object representing the benchmark output """ output_name = find_benchmark_flag('--benchmark_out=', benchmark_flags) is_temp_output = False if output_name is None: is_temp_output = True thandle, output_name = tempfile.mkstemp() os.close(thandle) benchmark_flags = list(benchmark_flags) + \ ['--benchmark_out=%s' % output_name] cmd = [exe_name] + benchmark_flags print("RUNNING: %s" % ' '.join(cmd)) exitCode = subprocess.call(cmd) if exitCode != 0: print('TEST FAILED...') sys.exit(exitCode) json_res = load_benchmark_results(output_name) if is_temp_output: os.unlink(output_name) return json_res def run_or_load_benchmark(filename, benchmark_flags): """ Get the results for a specified benchmark. If 'filename' specifies an executable benchmark then the results are generated by running the benchmark. Otherwise 'filename' must name a valid JSON output file, which is loaded and the result returned. """ ftype = check_input_file(filename) if ftype == IT_JSON: return load_benchmark_results(filename) elif ftype == IT_Executable: return run_benchmark(filename, benchmark_flags) else: assert False # This branch is unreachable
cccl-main
libcudacxx/libcxx/utils/google-benchmark/tools/gbench/util.py
"""Google Benchmark tooling""" __author__ = 'Eric Fiselier' __email__ = '[email protected]' __versioninfo__ = (0, 5, 0) __version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev' __all__ = []
cccl-main
libcudacxx/libcxx/utils/google-benchmark/tools/gbench/__init__.py
import unittest """report.py - Utilities for reporting statistics about benchmark results """ import os import re import copy from scipy.stats import mannwhitneyu class BenchmarkColor(object): def __init__(self, name, code): self.name = name self.code = code def __repr__(self): return '%s%r' % (self.__class__.__name__, (self.name, self.code)) def __format__(self, format): return self.code # Benchmark Colors Enumeration BC_NONE = BenchmarkColor('NONE', '') BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m') BC_CYAN = BenchmarkColor('CYAN', '\033[96m') BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m') BC_OKGREEN = BenchmarkColor('OKGREEN', '\033[32m') BC_HEADER = BenchmarkColor('HEADER', '\033[92m') BC_WARNING = BenchmarkColor('WARNING', '\033[93m') BC_WHITE = BenchmarkColor('WHITE', '\033[97m') BC_FAIL = BenchmarkColor('FAIL', '\033[91m') BC_ENDC = BenchmarkColor('ENDC', '\033[0m') BC_BOLD = BenchmarkColor('BOLD', '\033[1m') BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m') UTEST_MIN_REPETITIONS = 2 UTEST_OPTIMAL_REPETITIONS = 9 # Lowest reasonable number, More is better. UTEST_COL_NAME = "_pvalue" def color_format(use_color, fmt_str, *args, **kwargs): """ Return the result of 'fmt_str.format(*args, **kwargs)' after transforming 'args' and 'kwargs' according to the value of 'use_color'. If 'use_color' is False then all color codes in 'args' and 'kwargs' are replaced with the empty string. """ assert use_color is True or use_color is False if not use_color: args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE for arg in args] kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE for key, arg in kwargs.items()} return fmt_str.format(*args, **kwargs) def find_longest_name(benchmark_list): """ Return the length of the longest benchmark name in a given list of benchmark JSON objects """ longest_name = 1 for bc in benchmark_list: if len(bc['name']) > longest_name: longest_name = len(bc['name']) return longest_name def calculate_change(old_val, new_val): """ Return a float representing the decimal change between old_val and new_val. """ if old_val == 0 and new_val == 0: return 0.0 if old_val == 0: return float(new_val - old_val) / (float(old_val + new_val) / 2) return float(new_val - old_val) / abs(old_val) def filter_benchmark(json_orig, family, replacement=""): """ Apply a filter to the json, and only leave the 'family' of benchmarks. """ regex = re.compile(family) filtered = {} filtered['benchmarks'] = [] for be in json_orig['benchmarks']: if not regex.search(be['name']): continue filteredbench = copy.deepcopy(be) # Do NOT modify the old name! filteredbench['name'] = regex.sub(replacement, filteredbench['name']) filtered['benchmarks'].append(filteredbench) return filtered def get_unique_benchmark_names(json): """ While *keeping* the order, give all the unique 'names' used for benchmarks. """ seen = set() uniqued = [x['name'] for x in json['benchmarks'] if x['name'] not in seen and (seen.add(x['name']) or True)] return uniqued def intersect(list1, list2): """ Given two lists, get a new list consisting of the elements only contained in *both of the input lists*, while preserving the ordering. """ return [x for x in list1 if x in list2] def partition_benchmarks(json1, json2): """ While preserving the ordering, find benchmarks with the same names in both of the inputs, and group them. (i.e. partition/filter into groups with common name) """ json1_unique_names = get_unique_benchmark_names(json1) json2_unique_names = get_unique_benchmark_names(json2) names = intersect(json1_unique_names, json2_unique_names) partitions = [] for name in names: # Pick the time unit from the first entry of the lhs benchmark. time_unit = (x['time_unit'] for x in json1['benchmarks'] if x['name'] == name).next() # Filter by name and time unit. lhs = [x for x in json1['benchmarks'] if x['name'] == name and x['time_unit'] == time_unit] rhs = [x for x in json2['benchmarks'] if x['name'] == name and x['time_unit'] == time_unit] partitions.append([lhs, rhs]) return partitions def extract_field(partition, field_name): # The count of elements may be different. We want *all* of them. lhs = [x[field_name] for x in partition[0]] rhs = [x[field_name] for x in partition[1]] return [lhs, rhs] def print_utest(partition, utest_alpha, first_col_width, use_color=True): timings_time = extract_field(partition, 'real_time') timings_cpu = extract_field(partition, 'cpu_time') min_rep_cnt = min(len(timings_time[0]), len(timings_time[1]), len(timings_cpu[0]), len(timings_cpu[1])) # Does *everything* has at least UTEST_MIN_REPETITIONS repetitions? if min_rep_cnt < UTEST_MIN_REPETITIONS: return [] def get_utest_color(pval): return BC_FAIL if pval >= utest_alpha else BC_OKGREEN time_pvalue = mannwhitneyu( timings_time[0], timings_time[1], alternative='two-sided').pvalue cpu_pvalue = mannwhitneyu( timings_cpu[0], timings_cpu[1], alternative='two-sided').pvalue dsc = "U Test, Repetitions: {} vs {}".format( len(timings_cpu[0]), len(timings_cpu[1])) dsc_color = BC_OKGREEN if min_rep_cnt < UTEST_OPTIMAL_REPETITIONS: dsc_color = BC_WARNING dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format( UTEST_OPTIMAL_REPETITIONS) special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}" last_name = partition[0][0]['name'] return [color_format(use_color, special_str, BC_HEADER, "{}{}".format(last_name, UTEST_COL_NAME), first_col_width, get_utest_color(time_pvalue), time_pvalue, get_utest_color(cpu_pvalue), cpu_pvalue, dsc_color, dsc, endc=BC_ENDC)] def generate_difference_report( json1, json2, display_aggregates_only=False, utest=False, utest_alpha=0.05, use_color=True): """ Calculate and report the difference between each test of two benchmarks runs specified as 'json1' and 'json2'. """ assert utest is True or utest is False first_col_width = find_longest_name(json1['benchmarks']) def find_test(name): for b in json2['benchmarks']: if b['name'] == name: return b return None first_col_width = max( first_col_width, len('Benchmark')) first_col_width += len(UTEST_COL_NAME) first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format( 'Benchmark', 12 + first_col_width) output_strs = [first_line, '-' * len(first_line)] partitions = partition_benchmarks(json1, json2) for partition in partitions: # Careful, we may have different repetition count. for i in range(min(len(partition[0]), len(partition[1]))): bn = partition[0][i] other_bench = partition[1][i] # *If* we were asked to only display aggregates, # and if it is non-aggregate, then skip it. if display_aggregates_only and 'run_type' in bn and 'run_type' in other_bench: assert bn['run_type'] == other_bench['run_type'] if bn['run_type'] != 'aggregate': continue fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}" def get_color(res): if res > 0.05: return BC_FAIL elif res > -0.07: return BC_WHITE else: return BC_CYAN tres = calculate_change(bn['real_time'], other_bench['real_time']) cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time']) output_strs += [color_format(use_color, fmt_str, BC_HEADER, bn['name'], first_col_width, get_color(tres), tres, get_color(cpures), cpures, bn['real_time'], other_bench['real_time'], bn['cpu_time'], other_bench['cpu_time'], endc=BC_ENDC)] # After processing the whole partition, if requested, do the U test. if utest: output_strs += print_utest(partition, utest_alpha=utest_alpha, first_col_width=first_col_width, use_color=use_color) return output_strs ############################################################################### # Unit tests class TestGetUniqueBenchmarkNames(unittest.TestCase): def load_results(self): import json testInputs = os.path.join( os.path.dirname( os.path.realpath(__file__)), 'Inputs') testOutput = os.path.join(testInputs, 'test3_run0.json') with open(testOutput, 'r') as f: json = json.load(f) return json def test_basic(self): expect_lines = [ 'BM_One', 'BM_Two', 'short', # These two are not sorted 'medium', # These two are not sorted ] json = self.load_results() output_lines = get_unique_benchmark_names(json) print("\n") print("\n".join(output_lines)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): self.assertEqual(expect_lines[i], output_lines[i]) class TestReportDifference(unittest.TestCase): def load_results(self): import json testInputs = os.path.join( os.path.dirname( os.path.realpath(__file__)), 'Inputs') testOutput1 = os.path.join(testInputs, 'test1_run1.json') testOutput2 = os.path.join(testInputs, 'test1_run2.json') with open(testOutput1, 'r') as f: json1 = json.load(f) with open(testOutput2, 'r') as f: json2 = json.load(f) return json1, json2 def test_basic(self): expect_lines = [ ['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'], ['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'], ['BM_2xSlower', '+1.0000', '+1.0000', '50', '100', '50', '100'], ['BM_1PercentFaster', '-0.0100', '-0.0100', '100', '99', '100', '99'], ['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'], ['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'], ['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'], ['BM_100xSlower', '+99.0000', '+99.0000', '100', '10000', '100', '10000'], ['BM_100xFaster', '-0.9900', '-0.9900', '10000', '100', '10000', '100'], ['BM_10PercentCPUToTime', '+0.1000', '-0.1000', '100', '110', '100', '90'], ['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'], ['BM_BadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'], ] json1, json2 = self.load_results() output_lines_with_header = generate_difference_report( json1, json2, use_color=False) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): parts = [x for x in output_lines[i].split(' ') if x] self.assertEqual(len(parts), 7) self.assertEqual(expect_lines[i], parts) class TestReportDifferenceBetweenFamilies(unittest.TestCase): def load_result(self): import json testInputs = os.path.join( os.path.dirname( os.path.realpath(__file__)), 'Inputs') testOutput = os.path.join(testInputs, 'test2_run.json') with open(testOutput, 'r') as f: json = json.load(f) return json def test_basic(self): expect_lines = [ ['.', '-0.5000', '-0.5000', '10', '5', '10', '5'], ['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'], ['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'], ['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'], ] json = self.load_result() json1 = filter_benchmark(json, "BM_Z.ro", ".") json2 = filter_benchmark(json, "BM_O.e", ".") output_lines_with_header = generate_difference_report( json1, json2, use_color=False) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): parts = [x for x in output_lines[i].split(' ') if x] self.assertEqual(len(parts), 7) self.assertEqual(expect_lines[i], parts) class TestReportDifferenceWithUTest(unittest.TestCase): def load_results(self): import json testInputs = os.path.join( os.path.dirname( os.path.realpath(__file__)), 'Inputs') testOutput1 = os.path.join(testInputs, 'test3_run0.json') testOutput2 = os.path.join(testInputs, 'test3_run1.json') with open(testOutput1, 'r') as f: json1 = json.load(f) with open(testOutput2, 'r') as f: json2 = json.load(f) return json1, json2 def test_utest(self): expect_lines = [] expect_lines = [ ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'], ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'], ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'], ['BM_Two_pvalue', '0.6985', '0.6985', 'U', 'Test,', 'Repetitions:', '2', 'vs', '2.', 'WARNING:', 'Results', 'unreliable!', '9+', 'repetitions', 'recommended.'], ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'], ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'], ['short_pvalue', '0.7671', '0.1489', 'U', 'Test,', 'Repetitions:', '2', 'vs', '3.', 'WARNING:', 'Results', 'unreliable!', '9+', 'repetitions', 'recommended.'], ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'], ] json1, json2 = self.load_results() output_lines_with_header = generate_difference_report( json1, json2, utest=True, utest_alpha=0.05, use_color=False) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): parts = [x for x in output_lines[i].split(' ') if x] self.assertEqual(expect_lines[i], parts) class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly( unittest.TestCase): def load_results(self): import json testInputs = os.path.join( os.path.dirname( os.path.realpath(__file__)), 'Inputs') testOutput1 = os.path.join(testInputs, 'test3_run0.json') testOutput2 = os.path.join(testInputs, 'test3_run1.json') with open(testOutput1, 'r') as f: json1 = json.load(f) with open(testOutput2, 'r') as f: json2 = json.load(f) return json1, json2 def test_utest(self): expect_lines = [] expect_lines = [ ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'], ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'], ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'], ['BM_Two_pvalue', '0.6985', '0.6985', 'U', 'Test,', 'Repetitions:', '2', 'vs', '2.', 'WARNING:', 'Results', 'unreliable!', '9+', 'repetitions', 'recommended.'], ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'], ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'], ['short_pvalue', '0.7671', '0.1489', 'U', 'Test,', 'Repetitions:', '2', 'vs', '3.', 'WARNING:', 'Results', 'unreliable!', '9+', 'repetitions', 'recommended.'], ] json1, json2 = self.load_results() output_lines_with_header = generate_difference_report( json1, json2, display_aggregates_only=True, utest=True, utest_alpha=0.05, use_color=False) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): parts = [x for x in output_lines[i].split(' ') if x] self.assertEqual(expect_lines[i], parts) if __name__ == '__main__': unittest.main() # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 # kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off; # kate: indent-mode python; remove-trailing-spaces modified;
cccl-main
libcudacxx/libcxx/utils/google-benchmark/tools/gbench/report.py
# -*- coding: utf-8 -*- # # libc++ documentation build configuration file. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'libc++' copyright = u'2011-2018, LLVM Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '10.0' # The full version, including alpha/beta/rc tags. release = '10.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%Y-%m-%d' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'friendly' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'haiku' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'libcxxdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('contents', 'libcxx.tex', u'libcxx Documentation', u'LLVM project', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('contents', 'libc++', u'libc++ Documentation', [u'LLVM project'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('contents', 'libc++', u'libc++ Documentation', u'LLVM project', 'libc++', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # FIXME: Define intersphinx configuration. intersphinx_mapping = {} # -- Options for extensions ---------------------------------------------------- # Enable this if you want TODOs to show up in the generated documentation. todo_include_todos = True
cccl-main
libcudacxx/libcxx/docs/conf.py
# -*- Python -*- vim: set ft=python ts=4 sw=4 expandtab tw=79: # Configuration file for the 'lit' test runner. import os import site site.addsitedir(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'utils')) from libcxx.test.googlebenchmark import GoogleBenchmark # Tell pylint that we know config and lit_config exist somewhere. if 'PYLINT_IMPORT' in os.environ: config = object() lit_config = object() # name: The name of this test suite. config.name = 'libc++ benchmarks' config.suffixes = [] config.test_exec_root = os.path.join(config.libcxx_obj_root, 'benchmarks') config.test_source_root = config.test_exec_root config.test_format = GoogleBenchmark(test_sub_dirs='.', test_suffix='.libcxx.out', benchmark_args=config.benchmark_args)
cccl-main
libcudacxx/libcxx/benchmarks/lit.cfg.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """Commands used to automate testing gdb pretty printers. This script is part of a larger framework to test gdb pretty printers. It runs the program, detects test cases, checks them, and prints results. See gdb_pretty_printer_test.sh.cpp on how to write a test case. """ from __future__ import print_function import re import gdb test_failures = 0 class CheckResult(gdb.Command): def __init__(self): super(CheckResult, self).__init__( "print_and_compare", gdb.COMMAND_DATA) def invoke(self, arg, from_tty): try: # Stack frame is: # 0. StopForDebugger # 1. ComparePrettyPrintToChars or ComparePrettyPrintToRegex # 2. TestCase compare_frame = gdb.newest_frame().older() testcase_frame = compare_frame.older() test_loc = testcase_frame.find_sal() # Use interactive commands in the correct context to get the pretty # printed version value_str = self._get_value_string(compare_frame, testcase_frame) # Ignore the convenience variable name and newline value = value_str[value_str.find("= ") + 2:-1] gdb.newest_frame().select() expectation_val = compare_frame.read_var("expectation") if "PrettyPrintToRegex" in compare_frame.name(): check_literal = expectation_val.string() test_fails = not re.match(check_literal, value) else: check_literal_string = expectation_val.string(encoding="utf-8") check_literal = str(check_literal_string.encode("utf-8")) test_fails = value != check_literal if test_fails: global test_failures print("FAIL: " + test_loc.symtab.filename + ":" + str(test_loc.line)) print("GDB printed:") print(" " + value) print("Value should match:") print(" " + check_literal) test_failures += 1 else: print("PASS: " + test_loc.symtab.filename + ":" + str(test_loc.line)) except RuntimeError as e: # At this point, lots of different things could be wrong, so don't try to # recover or figure it out. Don't exit either, because then it's # impossible debug the framework itself. print("FAIL: Something is wrong in the test framework.") print(str(e)) test_failures += 1 def _get_value_string(self, compare_frame, testcase_frame): compare_frame.select() if "ComparePrettyPrint" in compare_frame.name(): return gdb.execute("p value", to_string=True) value_str = str(compare_frame.read_var("value")) clean_expression_str = value_str.strip("'\"") testcase_frame.select() return gdb.execute("p " + clean_expression_str, to_string=True) def exit_handler(event=None): global test_failures if test_failures: print("FAILED %d cases" % test_failures) exit(test_failures) # Start code executed at load time # Disable terminal paging gdb.execute("set height 0") gdb.execute("set python print-stack full") test_failures = 0 CheckResult() test_bp = gdb.Breakpoint("StopForDebugger") test_bp.enabled = True test_bp.silent = True test_bp.commands = "print_and_compare\ncontinue" # "run" won't return if the program exits; ensure the script regains control. gdb.events.exited.connect(exit_handler) gdb.execute("run") # If the program didn't exit, something went wrong, but we don't # know what. Fail on exit. test_failures += 1 exit_handler(None)
cccl-main
libcudacxx/.upstream-tests/test/pretty_printers/gdb_pretty_printer_test.py
import sys import os import socket import stat # Ensure that this is being run on a specific platform assert sys.platform.startswith('linux') or sys.platform.startswith('darwin') \ or sys.platform.startswith('cygwin') or sys.platform.startswith('freebsd') \ or sys.platform.startswith('netbsd') def env_path(): ep = os.environ.get('LIBCXX_FILESYSTEM_DYNAMIC_TEST_ROOT') assert ep is not None ep = os.path.realpath(ep) assert os.path.isdir(ep) return ep env_path_global = env_path() # Make sure we don't try and write outside of env_path. # All paths used should be sanitized def sanitize(p): p = os.path.realpath(p) if os.path.commonprefix([env_path_global, p]): return p assert False """ Some of the tests restrict permissions to induce failures. Before we delete the test environment, we have to walk it and re-raise the permissions. """ def clean_recursive(root_p): if not os.path.islink(root_p): os.chmod(root_p, 0o777) for ent in os.listdir(root_p): p = os.path.join(root_p, ent) if os.path.islink(p) or not os.path.isdir(p): os.remove(p) else: assert os.path.isdir(p) clean_recursive(p) os.rmdir(p) def init_test_directory(root_p): root_p = sanitize(root_p) assert not os.path.exists(root_p) os.makedirs(root_p) def destroy_test_directory(root_p): root_p = sanitize(root_p) clean_recursive(root_p) os.rmdir(root_p) def create_file(fname, size): with open(sanitize(fname), 'w') as f: f.write('c' * size) def create_dir(dname): os.mkdir(sanitize(dname)) def create_symlink(source, link): os.symlink(sanitize(source), sanitize(link)) def create_hardlink(source, link): os.link(sanitize(source), sanitize(link)) def create_fifo(source): os.mkfifo(sanitize(source)) def create_socket(source): sock = socket.socket(socket.AF_UNIX) sanitized_source = sanitize(source) # AF_UNIX sockets may have very limited path length, so split it # into chdir call (with technically unlimited length) followed # by bind() relative to the directory os.chdir(os.path.dirname(sanitized_source)) sock.bind(os.path.basename(sanitized_source)) if __name__ == '__main__': command = " ".join(sys.argv[1:]) eval(command) sys.exit(0)
cccl-main
libcudacxx/.upstream-tests/test/support/filesystem_dynamic_test_helper.py
#!/usr/bin/env python #===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## from argparse import ArgumentParser import sys def print_and_exit(msg): sys.stderr.write(msg + '\n') sys.exit(1) def main(): parser = ArgumentParser( description="Concatenate two files into a single file") parser.add_argument( '-o', '--output', dest='output', required=True, help='The output file. stdout is used if not given', type=str, action='store') parser.add_argument( 'files', metavar='files', nargs='+', help='The files to concatenate') args = parser.parse_args() if len(args.files) < 2: print_and_exit('fewer than 2 inputs provided') data = '' for filename in args.files: with open(filename, 'r') as f: data += f.read() if len(data) != 0 and data[-1] != '\n': data += '\n' assert len(data) > 0 and "cannot cat empty files" with open(args.output, 'w') as f: f.write(data) if __name__ == '__main__': main() sys.exit(0)
cccl-main
libcudacxx/.upstream-tests/utils/cat_files.py
#!/usr/bin/env python #===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """ Generate a linker script that links libc++ to the proper ABI library. An example script for c++abi would look like "INPUT(libc++.so.1 -lc++abi)". """ import argparse import os import sys def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--dryrun", help="Don't write any output", action="store_true", default=False) parser.add_argument("--rename", action="store_true", default=False, help="Rename the output as input so we can replace it") parser.add_argument("--input", help="Path to libc++ library", required=True) parser.add_argument("--output", help="Path to libc++ linker script", required=True) parser.add_argument("libraries", nargs="+", help="List of libraries libc++ depends on") args = parser.parse_args() # Use the relative path for the libc++ library. libcxx = os.path.relpath(args.input, os.path.dirname(args.output)) # Prepare the list of public libraries to link. public_libs = ['-l%s' % l for l in args.libraries] # Generate the linker script contents. contents = "INPUT(%s)" % ' '.join([libcxx] + public_libs) if args.dryrun: print("GENERATING SCRIPT: '%s' as file %s" % (contents, args.output)) return 0 # Remove the existing libc++ symlink if it exists. if os.path.islink(args.output): os.unlink(args.output) # Replace it with the linker script. with open(args.output, 'w') as f: f.write(contents + "\n") return 0 if __name__ == '__main__': sys.exit(main())
cccl-main
libcudacxx/.upstream-tests/utils/gen_link_script.py
#!/usr/bin/env python #===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """ sym_extract - Extract and output a list of symbols from a shared library. """ from argparse import ArgumentParser from libcudacxx.sym_check import extract, util def main(): parser = ArgumentParser( description='Extract a list of symbols from a shared library.') parser.add_argument('library', metavar='shared-lib', type=str, help='The library to extract symbols from') parser.add_argument('-o', '--output', dest='output', help='The output file. stdout is used if not given', type=str, action='store', default=None) parser.add_argument('--names-only', dest='names_only', help='Output only the name of the symbol', action='store_true', default=False) parser.add_argument('--only-stdlib-symbols', dest='only_stdlib', help="Filter all symbols not related to the stdlib", action='store_true', default=False) parser.add_argument('--defined-only', dest='defined_only', help="Filter all symbols that are not defined", action='store_true', default=False) parser.add_argument('--undefined-only', dest='undefined_only', help="Filter all symbols that are defined", action='store_true', default=False) args = parser.parse_args() assert not (args.undefined_only and args.defined_only) if args.output is not None: print('Extracting symbols from %s to %s.' % (args.library, args.output)) syms = extract.extract_symbols(args.library) if args.only_stdlib: syms, other_syms = util.filter_stdlib_symbols(syms) filter = lambda x: x if args.defined_only: filter = lambda l: list([x for x in l if x['is_defined']]) if args.undefined_only: filter = lambda l: list([x for x in l if not x['is_defined']]) util.write_syms(syms, out=args.output, names_only=args.names_only, filter=filter) if __name__ == '__main__': main()
cccl-main
libcudacxx/.upstream-tests/utils/sym_extract.py
#!/usr/bin/env python #===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """ sym_diff - Compare two symbol lists and output the differences. """ from argparse import ArgumentParser import sys from libcudacxx.sym_check import diff, util def main(): parser = ArgumentParser( description='Extract a list of symbols from a shared library.') parser.add_argument( '--names-only', dest='names_only', help='Only print symbol names', action='store_true', default=False) parser.add_argument( '--removed-only', dest='removed_only', help='Only print removed symbols', action='store_true', default=False) parser.add_argument('--only-stdlib-symbols', dest='only_stdlib', help="Filter all symbols not related to the stdlib", action='store_true', default=False) parser.add_argument('--strict', dest='strict', help="Exit with a non-zero status if any symbols " "differ", action='store_true', default=False) parser.add_argument( '-o', '--output', dest='output', help='The output file. stdout is used if not given', type=str, action='store', default=None) parser.add_argument( '--demangle', dest='demangle', action='store_true', default=False) parser.add_argument( 'old_syms', metavar='old-syms', type=str, help='The file containing the old symbol list or a library') parser.add_argument( 'new_syms', metavar='new-syms', type=str, help='The file containing the new symbol list or a library') args = parser.parse_args() old_syms_list = util.extract_or_load(args.old_syms) new_syms_list = util.extract_or_load(args.new_syms) if args.only_stdlib: old_syms_list, _ = util.filter_stdlib_symbols(old_syms_list) new_syms_list, _ = util.filter_stdlib_symbols(new_syms_list) added, removed, changed = diff.diff(old_syms_list, new_syms_list) if args.removed_only: added = {} report, is_break, is_different = diff.report_diff( added, removed, changed, names_only=args.names_only, demangle=args.demangle) if args.output is None: print(report) else: with open(args.output, 'w') as f: f.write(report + '\n') exit_code = 1 if is_break or (args.strict and is_different) else 0 sys.exit(exit_code) if __name__ == '__main__': main()
cccl-main
libcudacxx/.upstream-tests/utils/sym_diff.py
#!/usr/bin/env python import os import tempfile def get_libcxx_paths(): utils_path = os.path.dirname(os.path.abspath(__file__)) script_name = os.path.basename(__file__) assert os.path.exists(utils_path) src_root = os.path.dirname(utils_path) include_path = os.path.join(src_root, 'include') assert os.path.exists(include_path) docs_path = os.path.join(src_root, 'docs') assert os.path.exists(docs_path) macro_test_path = os.path.join(src_root, 'test', 'std', 'language.support', 'support.limits', 'support.limits.general') assert os.path.exists(macro_test_path) assert os.path.exists(os.path.join(macro_test_path, 'version.version.pass.cpp')) return script_name, src_root, include_path, docs_path, macro_test_path script_name, source_root, include_path, docs_path, macro_test_path = get_libcxx_paths() def has_header(h): h_path = os.path.join(include_path, h) return os.path.exists(h_path) def add_version_header(tc): tc["headers"].append("version") return tc feature_test_macros = sorted([ add_version_header(x) for x in [ # C++14 macros {"name": "__cccl_lib_integer_sequence", "values": { "c++14": 201304L }, "headers": ["utility"], }, {"name": "__cccl_lib_exchange_function", "values": { "c++14": 201304L }, "headers": ["utility"], }, {"name": "__cccl_lib_tuples_by_type", "values": { "c++14": 201304L }, "headers": ["utility", "tuple"], }, {"name": "__cccl_lib_tuple_element_t", "values": { "c++14": 201402L }, "headers": ["tuple"], }, {"name": "__cccl_lib_make_unique", "values": { "c++14": 201304L }, "headers": ["memory"], }, {"name": "__cccl_lib_transparent_operators", "values": { "c++14": 201210L, "c++17": 201510L, }, "headers": ["functional"], }, {"name": "__cccl_lib_integral_constant_callable", "values": { "c++14": 201304L }, "headers": ["type_traits"], }, {"name": "__cccl_lib_transformation_trait_aliases", "values": { "c++14": 201304L, }, "headers": ["type_traits"] }, {"name": "__cccl_lib_result_of_sfinae", "values": { "c++14": 201210L, }, "headers": ["functional", "type_traits"] }, {"name": "__cccl_lib_is_final", "values": { "c++14": 201402L, }, "headers": ["type_traits"] }, {"name": "__cccl_lib_is_null_pointer", "values": { "c++14": 201309L, }, "headers": ["type_traits"] }, {"name": "__cccl_lib_chrono_udls", "values": { "c++14": 201304L, }, "headers": ["chrono"] }, {"name": "__cccl_lib_string_udls", "values": { "c++14": 201304L, }, "headers": ["string"] }, {"name": "__cccl_lib_generic_associative_lookup", "values": { "c++14": 201304L, }, "headers": ["map", "set"] }, {"name": "__cccl_lib_null_iterators", "values": { "c++14": 201304L, }, "headers": ["iterator"] }, {"name": "__cccl_lib_make_reverse_iterator", "values": { "c++14": 201402L, }, "headers": ["iterator"] }, {"name": "__cccl_lib_robust_nonmodifying_seq_ops", "values": { "c++14": 201304L, }, "headers": ["algorithm"] }, {"name": "__cccl_lib_complex_udls", "values": { "c++14": 201309L, }, "headers": ["complex"] }, {"name": "__cccl_lib_constexpr_complex", "values": { "c++14": 201711L }, "headers": ["complex"], }, {"name": "__cccl_lib_quoted_string_io", "values": { "c++14": 201304L, }, "headers": ["iomanip"] }, {"name": "__cccl_lib_shared_timed_mutex", "values": { "c++14": 201402L, }, "headers": ["shared_mutex"], "depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)", "internal_depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)", }, # C++17 macros {"name": "__cccl_lib_atomic_is_always_lock_free", "values": { "c++17": 201603L, }, "headers": ["atomic"], "depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)", "internal_depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)", }, {"name": "__cccl_lib_filesystem", "values": { "c++17": 201703L, }, "headers": ["filesystem"] }, {"name": "__cccl_lib_invoke", "values": { "c++17": 201411L, }, "headers": ["functional"] }, {"name": "__cccl_lib_void_t", "values": { "c++17": 201411L, }, "headers": ["type_traits"] }, {"name": "__cccl_lib_node_extract", "values": { "c++17": 201606L, }, "headers": ["map", "set", "unordered_map", "unordered_set"] }, {"name": "__cccl_lib_byte", "values": { "c++17": 201603L, }, "headers": ["cstddef"], }, {"name": "__cccl_lib_hardware_interference_size", "values": { "c++17": 201703L, }, "headers": ["new"], }, {"name": "__cccl_lib_launder", "values": { "c++17": 201606L, }, "headers": ["new"], }, {"name": "__cccl_lib_uncaught_exceptions", "values": { "c++17": 201411L, }, "headers": ["exception"], }, {"name": "__cccl_lib_as_const", "values": { "c++17": 201510L, }, "headers": ["utility"], }, {"name": "__cccl_lib_make_from_tuple", "values": { "c++17": 201606L, }, "headers": ["tuple"], }, {"name": "__cccl_lib_apply", "values": { "c++17": 201603L, }, "headers": ["tuple"], }, {"name": "__cccl_lib_optional", "values": { "c++17": 201606L, }, "headers": ["optional"], }, {"name": "__cccl_lib_variant", "values": { "c++17": 201606L, }, "headers": ["variant"], }, {"name": "__cccl_lib_any", "values": { "c++17": 201606L, }, "headers": ["any"], }, {"name": "__cccl_lib_addressof_constexpr", "values": { "c++17": 201603L, }, "headers": ["memory"], "depends": "TEST_HAS_BUILTIN(__builtin_addressof) || TEST_GCC_VER >= 700", "internal_depends": "defined(_LIBCUDACXX_ADDRESSOF)", }, {"name": "__cccl_lib_raw_memory_algorithms", "values": { "c++17": 201606L, }, "headers": ["memory"], }, {"name": "__cccl_lib_enable_shared_from_this", "values": { "c++17": 201603L, }, "headers": ["memory"], }, {"name": "__cccl_lib_shared_ptr_weak_type", "values": { "c++17": 201606L, }, "headers": ["memory"], }, {"name": "__cccl_lib_shared_ptr_arrays", "values": { "c++17": 201611L, }, "headers": ["memory"], "unimplemented": True, }, {"name": "__cccl_lib_memory_resource", "values": { "c++17": 201603L, }, "headers": ["memory_resource"], "unimplemented": True, }, {"name": "__cccl_lib_boyer_moore_searcher", "values": { "c++17": 201603L, }, "headers": ["functional"], "unimplemented": True, }, {"name": "__cccl_lib_not_fn", "values": { "c++17": 201603L, }, "headers": ["functional"], }, {"name": "__cccl_lib_bool_constant", "values": { "c++17": 201505L, }, "headers": ["type_traits"], }, {"name": "__cccl_lib_type_trait_variable_templates", "values": { "c++17": 201510L, }, "headers": ["type_traits"], }, {"name": "__cccl_lib_logical_traits", "values": { "c++17": 201510L, }, "headers": ["type_traits"], }, {"name": "__cccl_lib_is_swappable", "values": { "c++17": 201603L, }, "headers": ["type_traits"], }, {"name": "__cccl_lib_is_invocable", "values": { "c++17": 201703L, }, "headers": ["type_traits"], }, {"name": "__cccl_lib_has_unique_object_representations", "values": { "c++17": 201606L, }, "headers": ["type_traits"], "depends": "TEST_HAS_BUILTIN_IDENTIFIER(__has_unique_object_representations) || TEST_GCC_VER >= 700", "internal_depends": "defined(_LIBCUDACXX_HAS_UNIQUE_OBJECT_REPRESENTATIONS)", }, {"name": "__cccl_lib_is_aggregate", "values": { "c++17": 201703L, }, "headers": ["type_traits"], "depends": "TEST_HAS_BUILTIN_IDENTIFIER(__is_aggregate) || TEST_GCC_VER_NEW >= 7001", "internal_depends": "!defined(_LIBCUDACXX_HAS_NO_IS_AGGREGATE)", }, {"name": "__cccl_lib_chrono", "values": { "c++17": 201611L, }, "headers": ["chrono"], }, {"name": "__cccl_lib_execution", "values": { "c++17": 201603L, }, "headers": ["execution"], "unimplemented": True }, {"name": "__cccl_lib_parallel_algorithm", "values": { "c++17": 201603L, }, "headers": ["algorithm", "numeric"], "unimplemented": True, }, {"name": "__cccl_lib_to_chars", "values": { "c++17": 201611L, }, "headers": ["utility"], "unimplemented": True, }, {"name": "__cccl_lib_string_view", "values": { "c++17": 201606L, }, "headers": ["string", "string_view"], }, {"name": "__cccl_lib_allocator_traits_is_always_equal", "values": { "c++17": 201411L, }, "headers": ["memory", "scoped_allocator", "string", "deque", "forward_list", "list", "vector", "map", "set", "unordered_map", "unordered_set"], }, {"name": "__cccl_lib_incomplete_container_elements", "values": { "c++17": 201505L, }, "headers": ["forward_list", "list", "vector"], }, {"name": "__cccl_lib_map_try_emplace", "values": { "c++17": 201411L, }, "headers": ["map"], }, {"name": "__cccl_lib_unordered_map_try_emplace", "values": { "c++17": 201411L, }, "headers": ["unordered_map"], }, {"name": "__cccl_lib_array_constexpr", "values": { "c++17": 201603L, }, "headers": ["iterator", "array"], }, {"name": "__cccl_lib_nonmember_container_access", "values": { "c++17": 201411L, }, "headers": ["iterator", "array", "deque", "forward_list", "list", "map", "regex", "set", "string", "unordered_map", "unordered_set", "vector"], }, {"name": "__cccl_lib_sample", "values": { "c++17": 201603L, }, "headers": ["algorithm"], }, {"name": "__cccl_lib_clamp", "values": { "c++17": 201603L, }, "headers": ["algorithm"], }, {"name": "__cccl_lib_gcd_lcm", "values": { "c++17": 201606L, }, "headers": ["numeric"], }, {"name": "__cccl_lib_hypot", "values": { "c++17": 201603L, }, "headers": ["cmath"], }, {"name": "__cccl_lib_math_special_functions", "values": { "c++17": 201603L, }, "headers": ["cmath"], "unimplemented": True, }, {"name": "__cccl_lib_shared_mutex", "values": { "c++17": 201505L, }, "headers": ["shared_mutex"], "depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)", "internal_depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)", }, {"name": "__cccl_lib_scoped_lock", "values": { "c++17": 201703L, }, "headers": ["mutex"], }, # C++2a {"name": "__cccl_lib_char8_t", "values": { "c++2a": 201811L, }, "headers": ["atomic", "filesystem", "istream", "limits", "locale", "ostream", "string", "string_view"], "depends": "defined(__cpp_char8_t)", "internal_depends": "!defined(_LIBCUDACXX_NO_HAS_CHAR8_T)", }, {"name": "__cccl_lib_erase_if", "values": { "c++2a": 201811L, }, "headers": ["string", "deque", "forward_list", "list", "vector", "map", "set", "unordered_map", "unordered_set"] }, {"name": "__cccl_lib_destroying_delete", "values": { "c++2a": 201806L, }, "headers": ["new"], "depends": "TEST_STD_VER > 17" " && defined(__cpp_impl_destroying_delete)" " && __cpp_impl_destroying_delete >= 201806L", "internal_depends": "_LIBCUDACXX_STD_VER > 17" " && defined(__cpp_impl_destroying_delete)" " && __cpp_impl_destroying_delete >= 201806L", }, {"name": "__cccl_lib_three_way_comparison", "values": { "c++2a": 201711L, }, "headers": ["compare"], "unimplemented": True, }, {"name": "__cccl_lib_concepts", "values": { "c++14": 202002L, }, "headers": ["concepts"], }, {"name": "__cccl_lib_constexpr_swap_algorithms", "values": { "c++2a": 201806L, }, "headers": ["algorithm"], "unimplemented": True, }, {"name": "__cccl_lib_constexpr_misc", "values": { "c++2a": 201811L, }, "headers": ["array", "functional", "iterator", "string_view", "tuple", "utility"], "unimplemented": True, }, {"name": "__cccl_lib_bind_front", "values": { "c++17": 201907L, }, "headers": ["functional"], }, {"name": "__cccl_lib_is_constant_evaluated", "values": { "c++2a": 201811L, }, "headers": ["type_traits"], "depends": "TEST_HAS_BUILTIN(__builtin_is_constant_evaluated) || TEST_GCC_VER >= 900", "internal_depends": "defined(_LIBCUDACXX_IS_CONSTANT_EVALUATED)", }, {"name": "__cccl_lib_list_remove_return_type", "values": { "c++2a": 201806L, }, "headers": ["forward_list", "list"], "unimplemented": True, }, {"name": "__cccl_lib_generic_unordered_lookup", "values": { "c++2a": 201811L, }, "headers": ["unordered_map", "unordered_set"], "unimplemented": True, }, {"name": "__cccl_lib_ranges", "values": { "c++2a": 201811L, }, "headers": ["algorithm", "functional", "iterator", "memory", "ranges"], "unimplemented": True, }, {"name": "__cccl_lib_bit_cast", "values": { "c++2a": 201806L, }, "headers": ["bit"], "unimplemented": True, }, {"name": "__cccl_lib_atomic_ref", "values": { "c++2a": 201806L, }, "headers": ["atomic"], "unimplemented": True, "depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)", "internal_depends": "!defined(_LIBCUDACXX_HAS_NO_THREADS)", }, {"name": "__cccl_lib_interpolate", "values": { "c++2a": 201902L, }, "headers": ["numeric"], }, ]], key=lambda tc: tc["name"]) def get_std_dialects(): std_dialects = ['c++14', 'c++17', 'c++2a'] return list(std_dialects) def get_first_std(d): for s in get_std_dialects(): if s in d.keys(): return s return None def get_last_std(d): rev_dialects = get_std_dialects() rev_dialects.reverse() for s in rev_dialects: if s in d.keys(): return s return None def get_std_before(d, std): std_dialects = get_std_dialects() candidates = std_dialects[0:std_dialects.index(std)] candidates.reverse() for cand in candidates: if cand in d.keys(): return cand return None def get_value_before(d, std): new_std = get_std_before(d, std) if new_std is None: return None return d[new_std] def get_for_std(d, std): # This catches the C++11 case for which there should be no defined feature # test macros. std_dialects = get_std_dialects() if std not in std_dialects: return None # Find the value for the newest C++ dialect between C++14 and std std_list = list(std_dialects[0:std_dialects.index(std)+1]) std_list.reverse() for s in std_list: if s in d.keys(): return d[s] return None """ Functions to produce the <version> header """ def produce_macros_definition_for_std(std): result = "" indent = 56 for tc in feature_test_macros: if std not in tc["values"]: continue inner_indent = 1 if 'depends' in tc.keys(): assert 'internal_depends' in tc.keys() result += "# if %s\n" % tc["internal_depends"] inner_indent += 2 if get_value_before(tc["values"], std) is not None: assert 'depends' not in tc.keys() result += "# undef %s\n" % tc["name"] line = "#%sdefine %s" % ((" " * inner_indent), tc["name"]) line += " " * (indent - len(line)) line += "%sL" % tc["values"][std] if 'unimplemented' in tc.keys(): line = "// " + line result += line result += "\n" if 'depends' in tc.keys(): result += "# endif\n" return result def chunks(l, n): """Yield successive n-sized chunks from l.""" for i in range(0, len(l), n): yield l[i:i + n] def produce_version_synopsis(): indent = 56 header_indent = 56 + len("20XXYYL ") result = "" def indent_to(s, val): if len(s) >= val: return s s += " " * (val - len(s)) return s line = indent_to("Macro name", indent) + "Value" line = indent_to(line, header_indent) + "Headers" result += line + "\n" for tc in feature_test_macros: prev_defined_std = get_last_std(tc["values"]) line = "{name: <{indent}}{value}L ".format(name=tc['name'], indent=indent, value=tc["values"][prev_defined_std]) headers = list(tc["headers"]) headers.remove("version") for chunk in chunks(headers, 3): line = indent_to(line, header_indent) chunk = ['<%s>' % header for header in chunk] line += ' '.join(chunk) result += line result += "\n" line = "" while True: prev_defined_std = get_std_before(tc["values"], prev_defined_std) if prev_defined_std is None: break result += "%s%sL // %s\n" % (indent_to("", indent), tc["values"][prev_defined_std], prev_defined_std.replace("c++", "C++")) return result def produce_version_header(): template="""// -*- C++ -*- //===--------------------------- version ----------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #ifndef _LIBCUDACXX_VERSIONH #define _LIBCUDACXX_VERSIONH /* version synopsis {synopsis} */ #include <__config> #if defined(_LIBCUDACXX_USE_PRAGMA_GCC_SYSTEM_HEADER) #pragma GCC system_header #endif #if _LIBCUDACXX_STD_VER > 11 {cxx14_macros} #endif #if _LIBCUDACXX_STD_VER > 14 {cxx17_macros} #endif #if _LIBCUDACXX_STD_VER > 17 {cxx2a_macros} #endif #endif // _LIBCUDACXX_VERSIONH """ return template.format( synopsis=produce_version_synopsis().strip(), cxx14_macros=produce_macros_definition_for_std('c++14').strip(), cxx17_macros=produce_macros_definition_for_std('c++17').strip(), cxx2a_macros=produce_macros_definition_for_std('c++2a').strip()) """ Functions to produce test files """ test_types = { "undefined": """ # ifdef {name} # error "{name} should not be defined before {std_first}" # endif """, "depends": """ # if {depends} # ifndef {name} # error "{name} should be defined in {std}" # endif # if {name} != {value} # error "{name} should have the value {value} in {std}" # endif # else # ifdef {name} # error "{name} should not be defined when {depends} is not defined!" # endif # endif """, "unimplemented": """ # if !defined(_LIBCUDACXX_VERSION) # ifndef {name} # error "{name} should be defined in {std}" # endif # if {name} != {value} # error "{name} should have the value {value} in {std}" # endif # else // _LIBCUDACXX_VERSION # ifdef {name} # error "{name} should not be defined because it is unimplemented in libc++!" # endif # endif """, "defined":""" # ifndef {name} # error "{name} should be defined in {std}" # endif # if {name} != {value} # error "{name} should have the value {value} in {std}" # endif """ } def generate_std_test(test_list, std): result = "" for tc in test_list: val = get_for_std(tc["values"], std) if val is not None: val = "%sL" % val if val is None: result += test_types["undefined"].format(name=tc["name"], std_first=get_first_std(tc["values"])) elif 'unimplemented' in tc.keys(): result += test_types["unimplemented"].format(name=tc["name"], value=val, std=std) elif "depends" in tc.keys(): result += test_types["depends"].format(name=tc["name"], value=val, std=std, depends=tc["depends"]) else: result += test_types["defined"].format(name=tc["name"], value=val, std=std) return result def generate_synopsis(test_list): max_name_len = max([len(tc["name"]) for tc in test_list]) indent = max_name_len + 8 def mk_line(prefix, suffix): return "{prefix: <{max_len}}{suffix}\n".format(prefix=prefix, suffix=suffix, max_len=indent) result = "" result += mk_line("/* Constant", "Value") for tc in test_list: prefix = " %s" % tc["name"] for std in [s for s in get_std_dialects() if s in tc["values"].keys()]: result += mk_line(prefix, "%sL [%s]" % (tc["values"][std], std.replace("c++", "C++"))) prefix = "" result += "*/" return result def is_threading_header_unsafe_to_include(h): # NOTE: "<mutex>" does not blow up when included without threads. return h in ['atomic', 'shared_mutex'] def produce_tests(): headers = set([h for tc in feature_test_macros for h in tc["headers"]]) for h in headers: test_list = [tc for tc in feature_test_macros if h in tc["headers"]] if not has_header(h): for tc in test_list: assert 'unimplemented' in tc.keys() continue test_tags = "" if is_threading_header_unsafe_to_include(h): test_tags += '\n// UNSUPPORTED: libcpp-has-no-threads\n' test_body = \ """//===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // WARNING: This test was generated by {script_name} // and should not be edited manually. {test_tags} // <{header}> // Test the feature test macros defined by <{header}> {synopsis} #include <{header}> #include "test_macros.h" #if TEST_STD_VER < 14 {cxx11_tests} #elif TEST_STD_VER == 14 {cxx14_tests} #elif TEST_STD_VER == 17 {cxx17_tests} #elif TEST_STD_VER > 17 {cxx2a_tests} #endif // TEST_STD_VER > 17 int main(int, char**) {{ return 0; }} """.format(script_name=script_name, header=h, test_tags=test_tags, synopsis=generate_synopsis(test_list), cxx11_tests=generate_std_test(test_list, 'c++11').strip(), cxx14_tests=generate_std_test(test_list, 'c++14').strip(), cxx17_tests=generate_std_test(test_list, 'c++17').strip(), cxx2a_tests=generate_std_test(test_list, 'c++2a').strip()) test_name = "{header}.version.pass.cpp".format(header=h) out_path = os.path.join(macro_test_path, test_name) with open(out_path, 'w') as f: f.write(test_body) """ Produce documentation for the feature test macros """ def make_widths(grid): widths = [] for i in range(0, len(grid[0])): cell_width = 2 + max(reduce(lambda x,y: x+y, [[len(row[i])] for row in grid], [])) widths += [cell_width] return widths def create_table(grid, indent): indent_str = ' '*indent col_widths = make_widths(grid) num_cols = len(grid[0]) result = indent_str + add_divider(col_widths, 2) header_flag = 2 for row_i in xrange(0, len(grid)): row = grid[row_i] result = result + indent_str + ' '.join([pad_cell(row[i], col_widths[i]) for i in range(0, len(row))]) + '\n' is_cxx_header = row[0].startswith('**') if row_i == len(grid) - 1: header_flag = 2 result = result + indent_str + add_divider(col_widths, 1 if is_cxx_header else header_flag) header_flag = 0 return result def add_divider(widths, header_flag): if header_flag == 2: return ' '.join(['='*w for w in widths]) + '\n' if header_flag == 1: return '-'.join(['-'*w for w in widths]) + '\n' else: return ' '.join(['-'*w for w in widths]) + '\n' def pad_cell(s, length, left_align=True): padding = ((length - len(s)) * ' ') return s + padding def get_status_table(): table = [["Macro Name", "Value"]] for std in get_std_dialects(): table += [["**" + std.replace("c++", "C++ ") + "**", ""]] for tc in feature_test_macros: if std not in tc["values"].keys(): continue value = "``%sL``" % tc["values"][std] if 'unimplemented' in tc.keys(): value = '*unimplemented*' table += [["``%s``" % tc["name"], value]] return table def produce_docs(): doc_str = """.. _FeatureTestMacroTable: ========================== Feature Test Macro Support ========================== .. contents:: :local: Overview ======== This file documents the feature test macros currently supported by libc++. .. _feature-status: Status ====== .. table:: Current Status :name: feature-status-table :widths: auto {status_tables} """.format(status_tables=create_table(get_status_table(), 4)) table_doc_path = os.path.join(docs_path, 'FeatureTestMacroTable.rst') with open(table_doc_path, 'w') as f: f.write(doc_str) def main(): with tempfile.NamedTemporaryFile(mode='w', prefix='version.', delete=False) as tmp_file: print("producing new <version> header as %s" % tmp_file.name) tmp_file.write(produce_version_header()) produce_tests() produce_docs() if __name__ == '__main__': main()
cccl-main
libcudacxx/.upstream-tests/utils/generate_feature_test_macro_components.py
#!/usr/bin/env python #===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """ sym_match - Match all symbols in a list against a list of regexes. """ from argparse import ArgumentParser import sys from libcudacxx.sym_check import util, match, extract def main(): parser = ArgumentParser( description='Extract a list of symbols from a shared library.') parser.add_argument( '--blacklist', dest='blacklist', type=str, action='store', default=None) parser.add_argument( 'symbol_list', metavar='symbol_list', type=str, help='The file containing the old symbol list') parser.add_argument( 'regexes', metavar='regexes', default=[], nargs='*', help='The file containing the new symbol list or a library') args = parser.parse_args() if not args.regexes and args.blacklist is None: sys.stderr.write('Either a regex or a blacklist must be specified.\n') sys.exit(1) if args.blacklist: search_list = util.read_blacklist(args.blacklist) else: search_list = args.regexes symbol_list = util.extract_or_load(args.symbol_list) matching_count, report = match.find_and_report_matching( symbol_list, search_list) sys.stdout.write(report) if matching_count != 0: print('%d matching symbols found...' % matching_count) if __name__ == '__main__': main()
cccl-main
libcudacxx/.upstream-tests/utils/sym_match.py
#!/usr/bin/env python #===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## from argparse import ArgumentParser from ctypes.util import find_library import distutils.spawn import glob import tempfile import os import shutil import subprocess import signal import sys temp_directory_root = None def exit_with_cleanups(status): if temp_directory_root is not None: shutil.rmtree(temp_directory_root) sys.exit(status) def print_and_exit(msg): sys.stderr.write(msg + '\n') exit_with_cleanups(1) def find_and_diagnose_missing(lib, search_paths): if os.path.exists(lib): return os.path.abspath(lib) if not lib.startswith('lib') or not lib.endswith('.a'): print_and_exit(("input file '%s' not not name a static library. " "It should start with 'lib' and end with '.a") % lib) for sp in search_paths: assert type(sp) is list and len(sp) == 1 path = os.path.join(sp[0], lib) if os.path.exists(path): return os.path.abspath(path) print_and_exit("input '%s' does not exist" % lib) def execute_command(cmd, cwd=None): """ Execute a command, capture and return its output. """ kwargs = { 'stdin': subprocess.PIPE, 'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE, 'cwd': cwd, 'universal_newlines': True } p = subprocess.Popen(cmd, **kwargs) out, err = p.communicate() exitCode = p.wait() if exitCode == -signal.SIGINT: raise KeyboardInterrupt return out, err, exitCode def execute_command_verbose(cmd, cwd=None, verbose=False): """ Execute a command and print its output on failure. """ out, err, exitCode = execute_command(cmd, cwd=cwd) if exitCode != 0 or verbose: report = "Command: %s\n" % ' '.join(["'%s'" % a for a in cmd]) if exitCode != 0: report += "Exit Code: %d\n" % exitCode if out: report += "Standard Output:\n--\n%s--" % out if err: report += "Standard Error:\n--\n%s--" % err if exitCode != 0: report += "\n\nFailed!" sys.stderr.write('%s\n' % report) if exitCode != 0: exit_with_cleanups(exitCode) return out def main(): parser = ArgumentParser( description="Merge multiple archives into a single library") parser.add_argument( '-v', '--verbose', dest='verbose', action='store_true', default=False) parser.add_argument( '-o', '--output', dest='output', required=True, help='The output file. stdout is used if not given', type=str, action='store') parser.add_argument( '-L', dest='search_paths', help='Paths to search for the libraries along', action='append', nargs=1) parser.add_argument( '--ar', dest='ar_exe', required=False, help='The ar executable to use, finds \'ar\' in the path if not given', type=str, action='store') parser.add_argument( '--use-libtool', dest='use_libtool', action='store_true', default=False) parser.add_argument( '--libtool', dest='libtool_exe', required=False, help='The libtool executable to use, finds \'libtool\' in the path if not given', type=str, action='store') parser.add_argument( 'archives', metavar='archives', nargs='+', help='The archives to merge') args = parser.parse_args() ar_exe = args.ar_exe if not ar_exe: ar_exe = distutils.spawn.find_executable('ar') if not ar_exe: print_and_exit("failed to find 'ar' executable") if args.use_libtool: libtool_exe = args.libtool_exe if not libtool_exe: libtool_exe = distutils.spawn.find_executable('libtool') if not libtool_exe: print_and_exit("failed to find 'libtool' executable") if len(args.archives) < 2: print_and_exit('fewer than 2 inputs provided') archives = [find_and_diagnose_missing(ar, args.search_paths) for ar in args.archives] print ('Merging archives: %s' % archives) if not os.path.exists(os.path.dirname(args.output)): print_and_exit("output path doesn't exist: '%s'" % args.output) global temp_directory_root temp_directory_root = tempfile.mkdtemp('.libcxx.merge.archives') files = [] for arc in archives: execute_command_verbose([ar_exe, 'x', arc], cwd=temp_directory_root, verbose=args.verbose) out = execute_command_verbose([ar_exe, 't', arc]) files.extend(out.splitlines()) if args.use_libtool: files = [f for f in files if not f.startswith('__.SYMDEF')] execute_command_verbose([libtool_exe, '-static', '-o', args.output] + files, cwd=temp_directory_root, verbose=args.verbose) else: execute_command_verbose([ar_exe, 'rcs', args.output] + files, cwd=temp_directory_root, verbose=args.verbose) if __name__ == '__main__': main() exit_with_cleanups(0)
cccl-main
libcudacxx/.upstream-tests/utils/merge_archives.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """not.py is a utility for inverting the return code of commands. It acts similar to llvm/utils/not. ex: python /path/to/not.py ' echo hello echo $? // (prints 1) """ import subprocess import sys def which_cannot_find_program(prog): # Allow for import errors on distutils.spawn try: import distutils.spawn prog = distutils.spawn.find_executable(prog[0]) if prog is None: sys.stderr.write('Failed to find program %s' % prog[0]) return True return False except: return False def main(): argv = list(sys.argv) del argv[0] if len(argv) > 0 and argv[0] == '--crash': del argv[0] expectCrash = True else: expectCrash = False if len(argv) == 0: return 1 if which_cannot_find_program(argv[0]): return 1 rc = subprocess.call(argv) if rc < 0: return 0 if expectCrash else 1 if expectCrash: return 1 return rc == 0 if __name__ == '__main__': exit(main())
cccl-main
libcudacxx/.upstream-tests/utils/not.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """GDB pretty-printers for libc++. These should work for objects compiled when _LIBCUDACXX_ABI_UNSTABLE is defined and when it is undefined. """ from __future__ import print_function import re import gdb # One under-documented feature of the gdb pretty-printer API # is that clients can call any other member of the API # before they call to_string. # Therefore all self.FIELDs must be set in the pretty-printer's # __init__ function. _void_pointer_type = gdb.lookup_type("void").pointer() _long_int_type = gdb.lookup_type("unsigned long long") _libcpp_big_endian = False def addr_as_long(addr): return int(addr.cast(_long_int_type)) # The size of a pointer in bytes. _pointer_size = _void_pointer_type.sizeof def _remove_cxx_namespace(typename): """Removed libc++ specific namespace from the type. Arguments: typename(string): A type, such as std::__u::something. Returns: A string without the libc++ specific part, such as std::something. """ return re.sub("std::__.*?::", "std::", typename) def _remove_generics(typename): """Remove generics part of the type. Assumes typename is not empty. Arguments: typename(string): A type such as std::my_collection<element>. Returns: The prefix up to the generic part, such as std::my_collection. """ match = re.match("^([^<]+)", typename) return match.group(1) # Some common substitutions on the types to reduce visual clutter (A user who # wants to see the actual details can always use print/r). _common_substitutions = [ ("std::basic_string<char, std::char_traits<char>, std::allocator<char> >", "std::string"), ] def _prettify_typename(gdb_type): """Returns a pretty name for the type, or None if no name can be found. Arguments: gdb_type(gdb.Type): A type object. Returns: A string, without type_defs, libc++ namespaces, and common substitutions applied. """ type_without_typedefs = gdb_type.strip_typedefs() typename = type_without_typedefs.name or type_without_typedefs.tag or \ str(type_without_typedefs) result = _remove_cxx_namespace(typename) for find_str, subst_str in _common_substitutions: result = re.sub(find_str, subst_str, result) return result def _typename_for_nth_generic_argument(gdb_type, n): """Returns a pretty string for the nth argument of the given type. Arguments: gdb_type(gdb.Type): A type object, such as the one for std::map<int, int> n: The (zero indexed) index of the argument to return. Returns: A string for the nth argument, such a "std::string" """ element_type = gdb_type.template_argument(n) return _prettify_typename(element_type) def _typename_with_n_generic_arguments(gdb_type, n): """Return a string for the type with the first n (1, ...) generic args.""" base_type = _remove_generics(_prettify_typename(gdb_type)) arg_list = [base_type] template = "%s<" for i in range(n): arg_list.append(_typename_for_nth_generic_argument(gdb_type, i)) template += "%s, " result = (template[:-2] + ">") % tuple(arg_list) return result def _typename_with_first_generic_argument(gdb_type): return _typename_with_n_generic_arguments(gdb_type, 1) class StdTuplePrinter(object): """Print a std::tuple.""" class _Children(object): """Class to iterate over the tuple's children.""" def __init__(self, val): self.val = val self.child_iter = iter(self.val["__base_"].type.fields()) self.count = 0 def __iter__(self): return self def next(self): # child_iter raises StopIteration when appropriate. field_name = self.child_iter.next() child = self.val["__base_"][field_name]["__value_"] self.count += 1 return ("[%d]" % self.count, child) def __init__(self, val): self.val = val def to_string(self): typename = _remove_generics(_prettify_typename(self.val.type)) if not self.val.type.fields(): return "empty %s" % typename return "%s containing" % typename def children(self): if not self.val.type.fields(): return iter(()) return self._Children(self.val) def _get_base_subobject(child_class_value, index=0): """Returns the object's value in the form of the parent class at index. This function effectively casts the child_class_value to the base_class's type, but the type-to-cast to is stored in the field at index, and once we know the field, we can just return the data. Args: child_class_value: the value to cast index: the parent class index Raises: Exception: field at index was not a base-class field. """ field = child_class_value.type.fields()[index] if not field.is_base_class: raise Exception("Not a base-class field.") return child_class_value[field] def _value_of_pair_first(value): """Convenience for _get_base_subobject, for the common case.""" return _get_base_subobject(value, 0)["__value_"] class StdStringPrinter(object): """Print a std::string.""" def _get_short_size(self, short_field, short_size): """Short size depends on both endianness and a compile-time define.""" # If the padding field is present after all this indirection, then string # was compiled with _LIBCUDACXX_ABI_ALTERNATE_STRING_LAYOUT defined. field = short_field.type.fields()[1].type.fields()[0] libcpp_abi_alternate_string_layout = field.name and "__padding" in field.name # This logical structure closely follows the original code (which is clearer # in C++). Keep them parallel to make them easier to compare. if libcpp_abi_alternate_string_layout: if _libcpp_big_endian: return short_size >> 1 else: return short_size elif _libcpp_big_endian: return short_size else: return short_size >> 1 def __init__(self, val): self.val = val def to_string(self): """Build a python string from the data whether stored inline or separately.""" value_field = _value_of_pair_first(self.val["__r_"]) short_field = value_field["__s"] short_size = short_field["__size_"] if short_size == 0: return "" short_mask = self.val["__short_mask"] # Counter intuitive to compare the size and short_mask to see if the string # is long, but that's the way the implementation does it. Note that # __is_long() doesn't use get_short_size in C++. is_long = short_size & short_mask if is_long: long_field = value_field["__l"] data = long_field["__data_"] size = long_field["__size_"] else: data = short_field["__data_"] size = self._get_short_size(short_field, short_size) if hasattr(data, "lazy_string"): return data.lazy_string(length=size) return data.string(length=size) def display_hint(self): return "string" class StdUniquePtrPrinter(object): """Print a std::unique_ptr.""" def __init__(self, val): self.val = val self.addr = _value_of_pair_first(self.val["__ptr_"]) self.pointee_type = self.val.type.template_argument(0) def to_string(self): typename = _remove_generics(_prettify_typename(self.val.type)) if not self.addr: return "%s is nullptr" % typename return ("%s<%s> containing" % (typename, _remove_generics(_prettify_typename(self.pointee_type)))) def __iter__(self): if self.addr: yield "__ptr_", self.addr.cast(self.pointee_type.pointer()) def children(self): return self class StdSharedPointerPrinter(object): """Print a std::shared_ptr.""" def __init__(self, val): self.val = val self.addr = self.val["__ptr_"] def to_string(self): """Returns self as a string.""" typename = _remove_generics(_prettify_typename(self.val.type)) pointee_type = _remove_generics( _prettify_typename(self.val.type.template_argument(0))) if not self.addr: return "%s is nullptr" % typename refcount = self.val["__cntrl_"] if refcount != 0: usecount = refcount["__shared_owners_"] + 1 weakcount = refcount["__shared_weak_owners_"] if usecount == 0: state = "expired, weak %d" % weakcount else: state = "count %d, weak %d" % (usecount, weakcount) return "%s<%s> %s containing" % (typename, pointee_type, state) def __iter__(self): if self.addr: yield "__ptr_", self.addr def children(self): return self class StdVectorPrinter(object): """Print a std::vector.""" class _VectorBoolIterator(object): """Class to iterate over the bool vector's children.""" def __init__(self, begin, size, bits_per_word): self.item = begin self.size = size self.bits_per_word = bits_per_word self.count = 0 self.offset = 0 def __iter__(self): return self def next(self): """Retrieve the next element.""" self.count += 1 if self.count > self.size: raise StopIteration entry = self.item.dereference() if entry & (1 << self.offset): outbit = 1 else: outbit = 0 self.offset += 1 if self.offset >= self.bits_per_word: self.item += 1 self.offset = 0 return ("[%d]" % self.count, outbit) class _VectorIterator(object): """Class to iterate over the non-bool vector's children.""" def __init__(self, begin, end): self.item = begin self.end = end self.count = 0 def __iter__(self): return self def next(self): self.count += 1 if self.item == self.end: raise StopIteration entry = self.item.dereference() self.item += 1 return ("[%d]" % self.count, entry) def __init__(self, val): """Set val, length, capacity, and iterator for bool and normal vectors.""" self.val = val self.typename = _remove_generics(_prettify_typename(val.type)) begin = self.val["__begin_"] if self.val.type.template_argument(0).code == gdb.TYPE_CODE_BOOL: self.typename += "<bool>" self.length = self.val["__size_"] bits_per_word = self.val["__bits_per_word"] self.capacity = _value_of_pair_first( self.val["__cap_alloc_"]) * bits_per_word self.iterator = self._VectorBoolIterator( begin, self.length, bits_per_word) else: end = self.val["__end_"] self.length = end - begin self.capacity = _get_base_subobject( self.val["__end_cap_"])["__value_"] - begin self.iterator = self._VectorIterator(begin, end) def to_string(self): return ("%s of length %d, capacity %d" % (self.typename, self.length, self.capacity)) def children(self): return self.iterator def display_hint(self): return "array" class StdBitsetPrinter(object): """Print a std::bitset.""" def __init__(self, val): self.val = val self.n_words = int(self.val["__n_words"]) self.bits_per_word = int(self.val["__bits_per_word"]) if self.n_words == 1: self.values = [int(self.val["__first_"])] else: self.values = [int(self.val["__first_"][index]) for index in range(self.n_words)] def to_string(self): typename = _prettify_typename(self.val.type) return "%s" % typename def _byte_it(self, value): index = -1 while value: index += 1 will_yield = value % 2 value /= 2 if will_yield: yield index def _list_it(self): for word_index in range(self.n_words): current = self.values[word_index] if current: for n in self._byte_it(current): yield ("[%d]" % (word_index * self.bits_per_word + n), 1) def __iter__(self): return self._list_it() def children(self): return self class StdDequePrinter(object): """Print a std::deque.""" def __init__(self, val): self.val = val self.size = int(_value_of_pair_first(val["__size_"])) self.start_ptr = self.val["__map_"]["__begin_"] self.first_block_start_index = int(self.val["__start_"]) self.node_type = self.start_ptr.type self.block_size = self._calculate_block_size( val.type.template_argument(0)) def _calculate_block_size(self, element_type): """Calculates the number of elements in a full block.""" size = element_type.sizeof # Copied from struct __deque_block_size implementation of libcxx. return 4096 / size if size < 256 else 16 def _bucket_it(self, start_addr, start_index, end_index): for i in range(start_index, end_index): yield i, (start_addr.dereference() + i).dereference() def _list_it(self): """Primary iteration worker.""" num_emitted = 0 current_addr = self.start_ptr start_index = self.first_block_start_index while num_emitted < self.size: end_index = min(start_index + self.size - num_emitted, self.block_size) for _, elem in self._bucket_it(current_addr, start_index, end_index): yield "", elem num_emitted += end_index - start_index current_addr = gdb.Value(addr_as_long(current_addr) + _pointer_size) \ .cast(self.node_type) start_index = 0 def to_string(self): typename = _remove_generics(_prettify_typename(self.val.type)) if self.size: return "%s with %d elements" % (typename, self.size) return "%s is empty" % typename def __iter__(self): return self._list_it() def children(self): return self def display_hint(self): return "array" class StdListPrinter(object): """Print a std::list.""" def __init__(self, val): self.val = val size_alloc_field = self.val["__size_alloc_"] self.size = int(_value_of_pair_first(size_alloc_field)) dummy_node = self.val["__end_"] self.nodetype = gdb.lookup_type( re.sub("__list_node_base", "__list_node", str(dummy_node.type.strip_typedefs()))).pointer() self.first_node = dummy_node["__next_"] def to_string(self): typename = _remove_generics(_prettify_typename(self.val.type)) if self.size: return "%s with %d elements" % (typename, self.size) return "%s is empty" % typename def _list_iter(self): current_node = self.first_node for _ in range(self.size): yield "", current_node.cast(self.nodetype).dereference()["__value_"] current_node = current_node.dereference()["__next_"] def __iter__(self): return self._list_iter() def children(self): return self if self.nodetype else iter(()) def display_hint(self): return "array" class StdQueueOrStackPrinter(object): """Print a std::queue or std::stack.""" def __init__(self, val): self.val = val self.underlying = val["c"] def to_string(self): typename = _remove_generics(_prettify_typename(self.val.type)) return "%s wrapping" % typename def children(self): return iter([("", self.underlying)]) def display_hint(self): return "array" class StdPriorityQueuePrinter(object): """Print a std::priority_queue.""" def __init__(self, val): self.val = val self.underlying = val["c"] def to_string(self): # TODO(tamur): It would be nice to print the top element. The technical # difficulty is that, the implementation refers to the underlying # container, which is a generic class. libstdcxx pretty printers do not # print the top element. typename = _remove_generics(_prettify_typename(self.val.type)) return "%s wrapping" % typename def children(self): return iter([("", self.underlying)]) def display_hint(self): return "array" class RBTreeUtils(object): """Utility class for std::(multi)map, and std::(multi)set and iterators.""" def __init__(self, cast_type, root): self.cast_type = cast_type self.root = root def left_child(self, node): result = node.cast(self.cast_type).dereference()["__left_"] return result def right_child(self, node): result = node.cast(self.cast_type).dereference()["__right_"] return result def parent(self, node): """Return the parent of node, if it exists.""" # If this is the root, then from the algorithm's point of view, it has no # parent. if node == self.root: return None # We don't have enough information to tell if this is the end_node (which # doesn't have a __parent_ field), or the root (which doesn't have a parent # from the algorithm's point of view), so cast_type may not be correct for # this particular node. Use heuristics. # The end_node's left child is the root. Note that when printing interators # in isolation, the root is unknown. if self.left_child(node) == self.root: return None parent = node.cast(self.cast_type).dereference()["__parent_"] # If the value at the offset of __parent_ doesn't look like a valid pointer, # then assume that node is the end_node (and therefore has no parent). # End_node type has a pointer embedded, so should have pointer alignment. if addr_as_long(parent) % _void_pointer_type.alignof: return None # This is ugly, but the only other option is to dereference an invalid # pointer. 0x8000 is fairly arbitrary, but has had good results in # practice. If there was a way to tell if a pointer is invalid without # actually dereferencing it and spewing error messages, that would be ideal. if parent < 0x8000: return None return parent def is_left_child(self, node): parent = self.parent(node) return parent is not None and self.left_child(parent) == node def is_right_child(self, node): parent = self.parent(node) return parent is not None and self.right_child(parent) == node class AbstractRBTreePrinter(object): """Abstract super class for std::(multi)map, and std::(multi)set.""" def __init__(self, val): self.val = val tree = self.val["__tree_"] self.size = int(_value_of_pair_first(tree["__pair3_"])) dummy_root = tree["__pair1_"] root = _value_of_pair_first(dummy_root)["__left_"] cast_type = self._init_cast_type(val.type) self.util = RBTreeUtils(cast_type, root) def _get_key_value(self, node): """Subclasses should override to return a list of values to yield.""" raise NotImplementedError def _traverse(self): """Traverses the binary search tree in order.""" current = self.util.root skip_left_child = False while True: if not skip_left_child and self.util.left_child(current): current = self.util.left_child(current) continue skip_left_child = False for key_value in self._get_key_value(current): yield "", key_value right_child = self.util.right_child(current) if right_child: current = right_child continue while self.util.is_right_child(current): current = self.util.parent(current) if self.util.is_left_child(current): current = self.util.parent(current) skip_left_child = True continue break def __iter__(self): return self._traverse() def children(self): return self if self.util.cast_type and self.size > 0 else iter(()) def to_string(self): typename = _remove_generics(_prettify_typename(self.val.type)) if self.size: return "%s with %d elements" % (typename, self.size) return "%s is empty" % typename class StdMapPrinter(AbstractRBTreePrinter): """Print a std::map or std::multimap.""" def _init_cast_type(self, val_type): map_it_type = gdb.lookup_type( str(val_type) + "::iterator").strip_typedefs() tree_it_type = map_it_type.template_argument(0) node_ptr_type = tree_it_type.template_argument(1) return node_ptr_type def display_hint(self): return "map" def _get_key_value(self, node): key_value = node.cast(self.util.cast_type).dereference()[ "__value_"]["__cc"] return [key_value["first"], key_value["second"]] class StdSetPrinter(AbstractRBTreePrinter): """Print a std::set.""" def _init_cast_type(self, val_type): set_it_type = gdb.lookup_type( str(val_type) + "::iterator").strip_typedefs() node_ptr_type = set_it_type.template_argument(1) return node_ptr_type def display_hint(self): return "array" def _get_key_value(self, node): key_value = node.cast(self.util.cast_type).dereference()["__value_"] return [key_value] class AbstractRBTreeIteratorPrinter(object): """Abstract super class for std::(multi)map, and std::(multi)set iterator.""" def _initialize(self, val, typename): self.typename = typename self.val = val self.addr = self.val["__ptr_"] cast_type = self.val.type.template_argument(1) self.util = RBTreeUtils(cast_type, None) if self.addr: self.node = self.addr.cast(cast_type).dereference() def _is_valid_node(self): if not self.util.parent(self.addr): return False return self.util.is_left_child(self.addr) or \ self.util.is_right_child(self.addr) def to_string(self): if not self.addr: return "%s is nullptr" % self.typename return "%s " % self.typename def _get_node_value(self, node): raise NotImplementedError def __iter__(self): addr_str = "[%s]" % str(self.addr) if not self._is_valid_node(): yield addr_str, " end()" else: yield addr_str, self._get_node_value(self.node) def children(self): return self if self.addr else iter(()) class MapIteratorPrinter(AbstractRBTreeIteratorPrinter): """Print a std::(multi)map iterator.""" def __init__(self, val): self._initialize(val["__i_"], _remove_generics(_prettify_typename(val.type))) def _get_node_value(self, node): return node["__value_"]["__cc"] class SetIteratorPrinter(AbstractRBTreeIteratorPrinter): """Print a std::(multi)set iterator.""" def __init__(self, val): self._initialize(val, _remove_generics(_prettify_typename(val.type))) def _get_node_value(self, node): return node["__value_"] class StdFposPrinter(object): """Print a std::fpos or std::streampos.""" def __init__(self, val): self.val = val def to_string(self): typename = _remove_generics(_prettify_typename(self.val.type)) offset = self.val["__off_"] state = self.val["__st_"] count = state["__count"] value = state["__value"]["__wch"] return "%s with stream offset:%s with state: {count:%s value:%s}" % ( typename, offset, count, value) class AbstractUnorderedCollectionPrinter(object): """Abstract super class for std::unordered_(multi)[set|map].""" def __init__(self, val): self.val = val self.table = val["__table_"] self.sentinel = self.table["__p1_"] self.size = int(_value_of_pair_first(self.table["__p2_"])) node_base_type = self.sentinel.type.template_argument(0) self.cast_type = node_base_type.template_argument(0) def _list_it(self, sentinel_ptr): next_ptr = _value_of_pair_first(sentinel_ptr)["__next_"] while str(next_ptr.cast(_void_pointer_type)) != "0x0": next_val = next_ptr.cast(self.cast_type).dereference() for key_value in self._get_key_value(next_val): yield "", key_value next_ptr = next_val["__next_"] def to_string(self): typename = _remove_generics(_prettify_typename(self.val.type)) if self.size: return "%s with %d elements" % (typename, self.size) return "%s is empty" % typename def _get_key_value(self, node): """Subclasses should override to return a list of values to yield.""" raise NotImplementedError def children(self): return self if self.cast_type and self.size > 0 else iter(()) def __iter__(self): return self._list_it(self.sentinel) class StdUnorderedSetPrinter(AbstractUnorderedCollectionPrinter): """Print a std::unordered_(multi)set.""" def _get_key_value(self, node): return [node["__value_"]] def display_hint(self): return "array" class StdUnorderedMapPrinter(AbstractUnorderedCollectionPrinter): """Print a std::unordered_(multi)map.""" def _get_key_value(self, node): key_value = node["__value_"]["__cc"] return [key_value["first"], key_value["second"]] def display_hint(self): return "map" class AbstractHashMapIteratorPrinter(object): """Abstract class for unordered collection iterators.""" def _initialize(self, val, addr): self.val = val self.typename = _remove_generics(_prettify_typename(self.val.type)) self.addr = addr if self.addr: self.node = self.addr.cast(self.cast_type).dereference() def _get_key_value(self): """Subclasses should override to return a list of values to yield.""" raise NotImplementedError def to_string(self): if not self.addr: return "%s = end()" % self.typename return "%s " % self.typename def children(self): return self if self.addr else iter(()) def __iter__(self): for key_value in self._get_key_value(): yield "", key_value class StdUnorderedSetIteratorPrinter(AbstractHashMapIteratorPrinter): """Print a std::(multi)set iterator.""" def __init__(self, val): self.cast_type = val.type.template_argument(0) self._initialize(val, val["__node_"]) def _get_key_value(self): return [self.node["__value_"]] def display_hint(self): return "array" class StdUnorderedMapIteratorPrinter(AbstractHashMapIteratorPrinter): """Print a std::(multi)map iterator.""" def __init__(self, val): self.cast_type = val.type.template_argument(0).template_argument(0) self._initialize(val, val["__i_"]["__node_"]) def _get_key_value(self): key_value = self.node["__value_"]["__cc"] return [key_value["first"], key_value["second"]] def display_hint(self): return "map" def _remove_std_prefix(typename): match = re.match("^std::(.+)", typename) return match.group(1) if match is not None else "" class LibcxxPrettyPrinter(object): """PrettyPrinter object so gdb-commands like 'info pretty-printers' work.""" def __init__(self, name): super(LibcxxPrettyPrinter, self).__init__() self.name = name self.enabled = True self.lookup = { "basic_string": StdStringPrinter, "string": StdStringPrinter, "tuple": StdTuplePrinter, "unique_ptr": StdUniquePtrPrinter, "shared_ptr": StdSharedPointerPrinter, "weak_ptr": StdSharedPointerPrinter, "bitset": StdBitsetPrinter, "deque": StdDequePrinter, "list": StdListPrinter, "queue": StdQueueOrStackPrinter, "stack": StdQueueOrStackPrinter, "priority_queue": StdPriorityQueuePrinter, "map": StdMapPrinter, "multimap": StdMapPrinter, "set": StdSetPrinter, "multiset": StdSetPrinter, "vector": StdVectorPrinter, "__map_iterator": MapIteratorPrinter, "__map_const_iterator": MapIteratorPrinter, "__tree_iterator": SetIteratorPrinter, "__tree_const_iterator": SetIteratorPrinter, "fpos": StdFposPrinter, "unordered_set": StdUnorderedSetPrinter, "unordered_multiset": StdUnorderedSetPrinter, "unordered_map": StdUnorderedMapPrinter, "unordered_multimap": StdUnorderedMapPrinter, "__hash_map_iterator": StdUnorderedMapIteratorPrinter, "__hash_map_const_iterator": StdUnorderedMapIteratorPrinter, "__hash_iterator": StdUnorderedSetIteratorPrinter, "__hash_const_iterator": StdUnorderedSetIteratorPrinter, } self.subprinters = [] for name, subprinter in self.lookup.items(): # Subprinters and names are used only for the rarely used command "info # pretty" (and related), so the name of the first data structure it prints # is a reasonable choice. if subprinter not in self.subprinters: subprinter.name = name self.subprinters.append(subprinter) def __call__(self, val): """Return the pretty printer for a val, if the type is supported.""" # Do not handle any type that is not a struct/class. if val.type.strip_typedefs().code != gdb.TYPE_CODE_STRUCT: return None # Don't attempt types known to be inside libstdcxx. typename = val.type.name or val.type.tag or str(val.type) match = re.match("^std::(__.*?)::", typename) if match is None or match.group(1) in ["__cxx1998", "__debug", "__7", "__g"]: return None # Handle any using declarations or other typedefs. typename = _prettify_typename(val.type) if not typename: return None without_generics = _remove_generics(typename) lookup_name = _remove_std_prefix(without_generics) if lookup_name in self.lookup: return self.lookup[lookup_name](val) return None _libcxx_printer_name = "libcxx_pretty_printer" # These are called for every binary object file, which could be thousands in # certain pathological cases. Limit our pretty printers to the progspace. def _register_libcxx_printers(event): progspace = event.new_objfile.progspace # It would be ideal to get the endianness at print time, but # gdb.execute clears gdb's internal wrap buffer, removing any values # already generated as part of a larger data structure, and there is # no python api to get the endianness. Mixed-endianness debugging # rare enough that this workaround should be adequate. _libcpp_big_endian = "big endian" in gdb.execute("show endian", to_string=True) if not getattr(progspace, _libcxx_printer_name, False): print("Loading libc++ pretty-printers.") gdb.printing.register_pretty_printer( progspace, LibcxxPrettyPrinter(_libcxx_printer_name)) setattr(progspace, _libcxx_printer_name, True) def _unregister_libcxx_printers(event): progspace = event.progspace if getattr(progspace, _libcxx_printer_name, False): for printer in progspace.pretty_printers: if getattr(printer, "name", "none") == _libcxx_printer_name: progspace.pretty_printers.remove(printer) setattr(progspace, _libcxx_printer_name, False) break def register_libcxx_printer_loader(): """Register event handlers to load libc++ pretty-printers.""" gdb.events.new_objfile.connect(_register_libcxx_printers) gdb.events.clear_objfiles.connect(_unregister_libcxx_printers)
cccl-main
libcudacxx/.upstream-tests/utils/gdb/libcxx/printers.py
#! /usr/bin/env python # encoding: utf-8 import argparse import errno import logging import os import platform import re import sys import subprocess import tempfile try: import winreg except ImportError: import _winreg as winreg try: import urllib.request as request except ImportError: import urllib as request try: import urllib.parse as parse except ImportError: import urlparse as parse class EmptyLogger(object): ''' Provides an implementation that performs no logging ''' def debug(self, *k, **kw): pass def info(self, *k, **kw): pass def warn(self, *k, **kw): pass def error(self, *k, **kw): pass def critical(self, *k, **kw): pass def setLevel(self, *k, **kw): pass urls = ( 'http://downloads.sourceforge.net/project/mingw-w64/Toolchains%20' 'targetting%20Win32/Personal%20Builds/mingw-builds/installer/' 'repository.txt', 'http://downloads.sourceforge.net/project/mingwbuilds/host-windows/' 'repository.txt' ) ''' A list of mingw-build repositories ''' def repository(urls = urls, log = EmptyLogger()): ''' Downloads and parse mingw-build repository files and parses them ''' log.info('getting mingw-builds repository') versions = {} re_sourceforge = re.compile(r'http://sourceforge.net/projects/([^/]+)/files') re_sub = r'http://downloads.sourceforge.net/project/\1' for url in urls: log.debug(' - requesting: %s', url) socket = request.urlopen(url) repo = socket.read() if not isinstance(repo, str): repo = repo.decode(); socket.close() for entry in repo.split('\n')[:-1]: value = entry.split('|') version = tuple([int(n) for n in value[0].strip().split('.')]) version = versions.setdefault(version, {}) arch = value[1].strip() if arch == 'x32': arch = 'i686' elif arch == 'x64': arch = 'x86_64' arch = version.setdefault(arch, {}) threading = arch.setdefault(value[2].strip(), {}) exceptions = threading.setdefault(value[3].strip(), {}) revision = exceptions.setdefault(int(value[4].strip()[3:]), re_sourceforge.sub(re_sub, value[5].strip())) return versions def find_in_path(file, path=None): ''' Attempts to find an executable in the path ''' if platform.system() == 'Windows': file += '.exe' if path is None: path = os.environ.get('PATH', '') if type(path) is type(''): path = path.split(os.pathsep) return list(filter(os.path.exists, map(lambda dir, file=file: os.path.join(dir, file), path))) def find_7zip(log = EmptyLogger()): ''' Attempts to find 7zip for unpacking the mingw-build archives ''' log.info('finding 7zip') path = find_in_path('7z') if not path: key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\7-Zip') path, _ = winreg.QueryValueEx(key, 'Path') path = [os.path.join(path, '7z.exe')] log.debug('found \'%s\'', path[0]) return path[0] find_7zip() def unpack(archive, location, log = EmptyLogger()): ''' Unpacks a mingw-builds archive ''' sevenzip = find_7zip(log) log.info('unpacking %s', os.path.basename(archive)) cmd = [sevenzip, 'x', archive, '-o' + location, '-y'] log.debug(' - %r', cmd) with open(os.devnull, 'w') as devnull: subprocess.check_call(cmd, stdout = devnull) def download(url, location, log = EmptyLogger()): ''' Downloads and unpacks a mingw-builds archive ''' log.info('downloading MinGW') log.debug(' - url: %s', url) log.debug(' - location: %s', location) re_content = re.compile(r'attachment;[ \t]*filename=(")?([^"]*)(")?[\r\n]*') stream = request.urlopen(url) try: content = stream.getheader('Content-Disposition') or '' except AttributeError: content = stream.headers.getheader('Content-Disposition') or '' matches = re_content.match(content) if matches: filename = matches.group(2) else: parsed = parse.urlparse(stream.geturl()) filename = os.path.basename(parsed.path) try: os.makedirs(location) except OSError as e: if e.errno == errno.EEXIST and os.path.isdir(location): pass else: raise archive = os.path.join(location, filename) with open(archive, 'wb') as out: while True: buf = stream.read(1024) if not buf: break out.write(buf) unpack(archive, location, log = log) os.remove(archive) possible = os.path.join(location, 'mingw64') if not os.path.exists(possible): possible = os.path.join(location, 'mingw32') if not os.path.exists(possible): raise ValueError('Failed to find unpacked MinGW: ' + possible) return possible def root(location = None, arch = None, version = None, threading = None, exceptions = None, revision = None, log = EmptyLogger()): ''' Returns the root folder of a specific version of the mingw-builds variant of gcc. Will download the compiler if needed ''' # Get the repository if we don't have all the information if not (arch and version and threading and exceptions and revision): versions = repository(log = log) # Determine some defaults version = version or max(versions.keys()) if not arch: arch = platform.machine().lower() if arch == 'x86': arch = 'i686' elif arch == 'amd64': arch = 'x86_64' if not threading: keys = versions[version][arch].keys() if 'posix' in keys: threading = 'posix' elif 'win32' in keys: threading = 'win32' else: threading = keys[0] if not exceptions: keys = versions[version][arch][threading].keys() if 'seh' in keys: exceptions = 'seh' elif 'sjlj' in keys: exceptions = 'sjlj' else: exceptions = keys[0] if revision == None: revision = max(versions[version][arch][threading][exceptions].keys()) if not location: location = os.path.join(tempfile.gettempdir(), 'mingw-builds') # Get the download url url = versions[version][arch][threading][exceptions][revision] # Tell the user whatzzup log.info('finding MinGW %s', '.'.join(str(v) for v in version)) log.debug(' - arch: %s', arch) log.debug(' - threading: %s', threading) log.debug(' - exceptions: %s', exceptions) log.debug(' - revision: %s', revision) log.debug(' - url: %s', url) # Store each specific revision differently slug = '{version}-{arch}-{threading}-{exceptions}-rev{revision}' slug = slug.format( version = '.'.join(str(v) for v in version), arch = arch, threading = threading, exceptions = exceptions, revision = revision ) if arch == 'x86_64': root_dir = os.path.join(location, slug, 'mingw64') elif arch == 'i686': root_dir = os.path.join(location, slug, 'mingw32') else: raise ValueError('Unknown MinGW arch: ' + arch) # Download if needed if not os.path.exists(root_dir): downloaded = download(url, os.path.join(location, slug), log = log) if downloaded != root_dir: raise ValueError('The location of mingw did not match\n%s\n%s' % (downloaded, root_dir)) return root_dir def str2ver(string): ''' Converts a version string into a tuple ''' try: version = tuple(int(v) for v in string.split('.')) if len(version) is not 3: raise ValueError() except ValueError: raise argparse.ArgumentTypeError( 'please provide a three digit version string') return version def main(): ''' Invoked when the script is run directly by the python interpreter ''' parser = argparse.ArgumentParser( description = 'Downloads a specific version of MinGW', formatter_class = argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('--location', help = 'the location to download the compiler to', default = os.path.join(tempfile.gettempdir(), 'mingw-builds')) parser.add_argument('--arch', required = True, choices = ['i686', 'x86_64'], help = 'the target MinGW architecture string') parser.add_argument('--version', type = str2ver, help = 'the version of GCC to download') parser.add_argument('--threading', choices = ['posix', 'win32'], help = 'the threading type of the compiler') parser.add_argument('--exceptions', choices = ['sjlj', 'seh', 'dwarf'], help = 'the method to throw exceptions') parser.add_argument('--revision', type=int, help = 'the revision of the MinGW release') group = parser.add_mutually_exclusive_group() group.add_argument('-v', '--verbose', action='store_true', help='increase the script output verbosity') group.add_argument('-q', '--quiet', action='store_true', help='only print errors and warning') args = parser.parse_args() # Create the logger logger = logging.getLogger('mingw') handler = logging.StreamHandler() formatter = logging.Formatter('%(message)s') handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.INFO) if args.quiet: logger.setLevel(logging.WARN) if args.verbose: logger.setLevel(logging.DEBUG) # Get MinGW root_dir = root(location = args.location, arch = args.arch, version = args.version, threading = args.threading, exceptions = args.exceptions, revision = args.revision, log = logger) sys.stdout.write('%s\n' % os.path.join(root_dir, 'bin')) if __name__ == '__main__': try: main() except IOError as e: sys.stderr.write('IO error: %s\n' % e) sys.exit(1) except OSError as e: sys.stderr.write('OS error: %s\n' % e) sys.exit(1) except KeyboardInterrupt as e: sys.stderr.write('Killed\n') sys.exit(1)
cccl-main
libcudacxx/.upstream-tests/utils/google-benchmark/mingw.py
import os import ycm_core # These are the compilation flags that will be used in case there's no # compilation database set (by default, one is not set). # CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR. flags = [ '-Wall', '-Werror', '-pedantic-errors', '-std=c++0x', '-fno-strict-aliasing', '-O3', '-DNDEBUG', # ...and the same thing goes for the magic -x option which specifies the # language that the files to be compiled are written in. This is mostly # relevant for c++ headers. # For a C project, you would set this to 'c' instead of 'c++'. '-x', 'c++', '-I', 'include', '-isystem', '/usr/include', '-isystem', '/usr/local/include', ] # Set this to the absolute path to the folder (NOT the file!) containing the # compile_commands.json file to use that instead of 'flags'. See here for # more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html # # Most projects will NOT need to set this to anything; you can just change the # 'flags' list of compilation flags. Notice that YCM itself uses that approach. compilation_database_folder = '' if os.path.exists( compilation_database_folder ): database = ycm_core.CompilationDatabase( compilation_database_folder ) else: database = None SOURCE_EXTENSIONS = [ '.cc' ] def DirectoryOfThisScript(): return os.path.dirname( os.path.abspath( __file__ ) ) def MakeRelativePathsInFlagsAbsolute( flags, working_directory ): if not working_directory: return list( flags ) new_flags = [] make_next_absolute = False path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ] for flag in flags: new_flag = flag if make_next_absolute: make_next_absolute = False if not flag.startswith( '/' ): new_flag = os.path.join( working_directory, flag ) for path_flag in path_flags: if flag == path_flag: make_next_absolute = True break if flag.startswith( path_flag ): path = flag[ len( path_flag ): ] new_flag = path_flag + os.path.join( working_directory, path ) break if new_flag: new_flags.append( new_flag ) return new_flags def IsHeaderFile( filename ): extension = os.path.splitext( filename )[ 1 ] return extension in [ '.h', '.hxx', '.hpp', '.hh' ] def GetCompilationInfoForFile( filename ): # The compilation_commands.json file generated by CMake does not have entries # for header files. So we do our best by asking the db for flags for a # corresponding source file, if any. If one exists, the flags for that file # should be good enough. if IsHeaderFile( filename ): basename = os.path.splitext( filename )[ 0 ] for extension in SOURCE_EXTENSIONS: replacement_file = basename + extension if os.path.exists( replacement_file ): compilation_info = database.GetCompilationInfoForFile( replacement_file ) if compilation_info.compiler_flags_: return compilation_info return None return database.GetCompilationInfoForFile( filename ) def FlagsForFile( filename, **kwargs ): if database: # Bear in mind that compilation_info.compiler_flags_ does NOT return a # python list, but a "list-like" StringVec object compilation_info = GetCompilationInfoForFile( filename ) if not compilation_info: return None final_flags = MakeRelativePathsInFlagsAbsolute( compilation_info.compiler_flags_, compilation_info.compiler_working_dir_ ) else: relative_to = DirectoryOfThisScript() final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to ) return { 'flags': final_flags, 'do_cache': True }
cccl-main
libcudacxx/.upstream-tests/utils/google-benchmark/.ycm_extra_conf.py
#!/usr/bin/env python """ strip_asm.py - Cleanup ASM output for the specified file """ from argparse import ArgumentParser import sys import os import re def find_used_labels(asm): found = set() label_re = re.compile("\s*j[a-z]+\s+\.L([a-zA-Z0-9][a-zA-Z0-9_]*)") for l in asm.splitlines(): m = label_re.match(l) if m: found.add('.L%s' % m.group(1)) return found def normalize_labels(asm): decls = set() label_decl = re.compile("^[.]{0,1}L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)") for l in asm.splitlines(): m = label_decl.match(l) if m: decls.add(m.group(0)) if len(decls) == 0: return asm needs_dot = next(iter(decls))[0] != '.' if not needs_dot: return asm for ld in decls: asm = re.sub("(^|\s+)" + ld + "(?=:|\s)", '\\1.' + ld, asm) return asm def transform_labels(asm): asm = normalize_labels(asm) used_decls = find_used_labels(asm) new_asm = '' label_decl = re.compile("^\.L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)") for l in asm.splitlines(): m = label_decl.match(l) if not m or m.group(0) in used_decls: new_asm += l new_asm += '\n' return new_asm def is_identifier(tk): if len(tk) == 0: return False first = tk[0] if not first.isalpha() and first != '_': return False for i in range(1, len(tk)): c = tk[i] if not c.isalnum() and c != '_': return False return True def process_identifiers(l): """ process_identifiers - process all identifiers and modify them to have consistent names across all platforms; specifically across ELF and MachO. For example, MachO inserts an additional understore at the beginning of names. This function removes that. """ parts = re.split(r'([a-zA-Z0-9_]+)', l) new_line = '' for tk in parts: if is_identifier(tk): if tk.startswith('__Z'): tk = tk[1:] elif tk.startswith('_') and len(tk) > 1 and \ tk[1].isalpha() and tk[1] != 'Z': tk = tk[1:] new_line += tk return new_line def process_asm(asm): """ Strip the ASM of unwanted directives and lines """ new_contents = '' asm = transform_labels(asm) # TODO: Add more things we want to remove discard_regexes = [ re.compile("\s+\..*$"), # directive re.compile("\s*#(NO_APP|APP)$"), #inline ASM re.compile("\s*#.*$"), # comment line re.compile("\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"), #global directive re.compile("\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)"), ] keep_regexes = [ ] fn_label_def = re.compile("^[a-zA-Z_][a-zA-Z0-9_.]*:") for l in asm.splitlines(): # Remove Mach-O attribute l = l.replace('@GOTPCREL', '') add_line = True for reg in discard_regexes: if reg.match(l) is not None: add_line = False break for reg in keep_regexes: if reg.match(l) is not None: add_line = True break if add_line: if fn_label_def.match(l) and len(new_contents) != 0: new_contents += '\n' l = process_identifiers(l) new_contents += l new_contents += '\n' return new_contents def main(): parser = ArgumentParser( description='generate a stripped assembly file') parser.add_argument( 'input', metavar='input', type=str, nargs=1, help='An input assembly file') parser.add_argument( 'out', metavar='output', type=str, nargs=1, help='The output file') args, unknown_args = parser.parse_known_args() input = args.input[0] output = args.out[0] if not os.path.isfile(input): print(("ERROR: input file '%s' does not exist") % input) sys.exit(1) contents = None with open(input, 'r') as f: contents = f.read() new_contents = process_asm(contents) with open(output, 'w') as f: f.write(new_contents) if __name__ == '__main__': main() # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 # kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off; # kate: indent-mode python; remove-trailing-spaces modified;
cccl-main
libcudacxx/.upstream-tests/utils/google-benchmark/tools/strip_asm.py
#!/usr/bin/env python import unittest """ compare.py - versatile benchmark output compare tool """ import argparse from argparse import ArgumentParser import sys import gbench from gbench import util, report from gbench.util import * def check_inputs(in1, in2, flags): """ Perform checking on the user provided inputs and diagnose any abnormalities """ in1_kind, in1_err = classify_input_file(in1) in2_kind, in2_err = classify_input_file(in2) output_file = find_benchmark_flag('--benchmark_out=', flags) output_type = find_benchmark_flag('--benchmark_out_format=', flags) if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file: print(("WARNING: '--benchmark_out=%s' will be passed to both " "benchmarks causing it to be overwritten") % output_file) if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0: print("WARNING: passing optional flags has no effect since both " "inputs are JSON") if output_type is not None and output_type != 'json': print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`" " is not supported.") % output_type) sys.exit(1) def create_parser(): parser = ArgumentParser( description='versatile benchmark output compare tool') parser.add_argument( '-a', '--display_aggregates_only', dest='display_aggregates_only', action="store_true", help="If there are repetitions, by default, we display everything - the" " actual runs, and the aggregates computed. Sometimes, it is " "desirable to only view the aggregates. E.g. when there are a lot " "of repetitions. Do note that only the display is affected. " "Internally, all the actual runs are still used, e.g. for U test.") utest = parser.add_argument_group() utest.add_argument( '--no-utest', dest='utest', default=True, action="store_false", help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS)) alpha_default = 0.05 utest.add_argument( "--alpha", dest='utest_alpha', default=alpha_default, type=float, help=("significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)") % alpha_default) subparsers = parser.add_subparsers( help='This tool has multiple modes of operation:', dest='mode') parser_a = subparsers.add_parser( 'benchmarks', help='The most simple use-case, compare all the output of these two benchmarks') baseline = parser_a.add_argument_group( 'baseline', 'The benchmark baseline') baseline.add_argument( 'test_baseline', metavar='test_baseline', type=argparse.FileType('r'), nargs=1, help='A benchmark executable or JSON output file') contender = parser_a.add_argument_group( 'contender', 'The benchmark that will be compared against the baseline') contender.add_argument( 'test_contender', metavar='test_contender', type=argparse.FileType('r'), nargs=1, help='A benchmark executable or JSON output file') parser_a.add_argument( 'benchmark_options', metavar='benchmark_options', nargs=argparse.REMAINDER, help='Arguments to pass when running benchmark executables') parser_b = subparsers.add_parser( 'filters', help='Compare filter one with the filter two of benchmark') baseline = parser_b.add_argument_group( 'baseline', 'The benchmark baseline') baseline.add_argument( 'test', metavar='test', type=argparse.FileType('r'), nargs=1, help='A benchmark executable or JSON output file') baseline.add_argument( 'filter_baseline', metavar='filter_baseline', type=str, nargs=1, help='The first filter, that will be used as baseline') contender = parser_b.add_argument_group( 'contender', 'The benchmark that will be compared against the baseline') contender.add_argument( 'filter_contender', metavar='filter_contender', type=str, nargs=1, help='The second filter, that will be compared against the baseline') parser_b.add_argument( 'benchmark_options', metavar='benchmark_options', nargs=argparse.REMAINDER, help='Arguments to pass when running benchmark executables') parser_c = subparsers.add_parser( 'benchmarksfiltered', help='Compare filter one of first benchmark with filter two of the second benchmark') baseline = parser_c.add_argument_group( 'baseline', 'The benchmark baseline') baseline.add_argument( 'test_baseline', metavar='test_baseline', type=argparse.FileType('r'), nargs=1, help='A benchmark executable or JSON output file') baseline.add_argument( 'filter_baseline', metavar='filter_baseline', type=str, nargs=1, help='The first filter, that will be used as baseline') contender = parser_c.add_argument_group( 'contender', 'The benchmark that will be compared against the baseline') contender.add_argument( 'test_contender', metavar='test_contender', type=argparse.FileType('r'), nargs=1, help='The second benchmark executable or JSON output file, that will be compared against the baseline') contender.add_argument( 'filter_contender', metavar='filter_contender', type=str, nargs=1, help='The second filter, that will be compared against the baseline') parser_c.add_argument( 'benchmark_options', metavar='benchmark_options', nargs=argparse.REMAINDER, help='Arguments to pass when running benchmark executables') return parser def main(): # Parse the command line flags parser = create_parser() args, unknown_args = parser.parse_known_args() if args.mode is None: parser.print_help() exit(1) assert not unknown_args benchmark_options = args.benchmark_options if args.mode == 'benchmarks': test_baseline = args.test_baseline[0].name test_contender = args.test_contender[0].name filter_baseline = '' filter_contender = '' # NOTE: if test_baseline == test_contender, you are analyzing the stdev description = 'Comparing %s to %s' % (test_baseline, test_contender) elif args.mode == 'filters': test_baseline = args.test[0].name test_contender = args.test[0].name filter_baseline = args.filter_baseline[0] filter_contender = args.filter_contender[0] # NOTE: if filter_baseline == filter_contender, you are analyzing the # stdev description = 'Comparing %s to %s (from %s)' % ( filter_baseline, filter_contender, args.test[0].name) elif args.mode == 'benchmarksfiltered': test_baseline = args.test_baseline[0].name test_contender = args.test_contender[0].name filter_baseline = args.filter_baseline[0] filter_contender = args.filter_contender[0] # NOTE: if test_baseline == test_contender and # filter_baseline == filter_contender, you are analyzing the stdev description = 'Comparing %s (from %s) to %s (from %s)' % ( filter_baseline, test_baseline, filter_contender, test_contender) else: # should never happen print("Unrecognized mode of operation: '%s'" % args.mode) parser.print_help() exit(1) check_inputs(test_baseline, test_contender, benchmark_options) if args.display_aggregates_only: benchmark_options += ['--benchmark_display_aggregates_only=true'] options_baseline = [] options_contender = [] if filter_baseline and filter_contender: options_baseline = ['--benchmark_filter=%s' % filter_baseline] options_contender = ['--benchmark_filter=%s' % filter_contender] # Run the benchmarks and report the results json1 = json1_orig = gbench.util.run_or_load_benchmark( test_baseline, benchmark_options + options_baseline) json2 = json2_orig = gbench.util.run_or_load_benchmark( test_contender, benchmark_options + options_contender) # Now, filter the benchmarks so that the difference report can work if filter_baseline and filter_contender: replacement = '[%s vs. %s]' % (filter_baseline, filter_contender) json1 = gbench.report.filter_benchmark( json1_orig, filter_baseline, replacement) json2 = gbench.report.filter_benchmark( json2_orig, filter_contender, replacement) # Diff and output output_lines = gbench.report.generate_difference_report( json1, json2, args.display_aggregates_only, args.utest, args.utest_alpha) print(description) for ln in output_lines: print(ln) class TestParser(unittest.TestCase): def setUp(self): self.parser = create_parser() testInputs = os.path.join( os.path.dirname( os.path.realpath(__file__)), 'gbench', 'Inputs') self.testInput0 = os.path.join(testInputs, 'test1_run1.json') self.testInput1 = os.path.join(testInputs, 'test1_run2.json') def test_benchmarks_basic(self): parsed = self.parser.parse_args( ['benchmarks', self.testInput0, self.testInput1]) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarks') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertFalse(parsed.benchmark_options) def test_benchmarks_basic_without_utest(self): parsed = self.parser.parse_args( ['--no-utest', 'benchmarks', self.testInput0, self.testInput1]) self.assertFalse(parsed.display_aggregates_only) self.assertFalse(parsed.utest) self.assertEqual(parsed.utest_alpha, 0.05) self.assertEqual(parsed.mode, 'benchmarks') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertFalse(parsed.benchmark_options) def test_benchmarks_basic_display_aggregates_only(self): parsed = self.parser.parse_args( ['-a', 'benchmarks', self.testInput0, self.testInput1]) self.assertTrue(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarks') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertFalse(parsed.benchmark_options) def test_benchmarks_basic_with_utest_alpha(self): parsed = self.parser.parse_args( ['--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1]) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.utest_alpha, 0.314) self.assertEqual(parsed.mode, 'benchmarks') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertFalse(parsed.benchmark_options) def test_benchmarks_basic_without_utest_with_utest_alpha(self): parsed = self.parser.parse_args( ['--no-utest', '--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1]) self.assertFalse(parsed.display_aggregates_only) self.assertFalse(parsed.utest) self.assertEqual(parsed.utest_alpha, 0.314) self.assertEqual(parsed.mode, 'benchmarks') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertFalse(parsed.benchmark_options) def test_benchmarks_with_remainder(self): parsed = self.parser.parse_args( ['benchmarks', self.testInput0, self.testInput1, 'd']) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarks') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertEqual(parsed.benchmark_options, ['d']) def test_benchmarks_with_remainder_after_doubleminus(self): parsed = self.parser.parse_args( ['benchmarks', self.testInput0, self.testInput1, '--', 'e']) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarks') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertEqual(parsed.benchmark_options, ['e']) def test_filters_basic(self): parsed = self.parser.parse_args( ['filters', self.testInput0, 'c', 'd']) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'filters') self.assertEqual(parsed.test[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') self.assertEqual(parsed.filter_contender[0], 'd') self.assertFalse(parsed.benchmark_options) def test_filters_with_remainder(self): parsed = self.parser.parse_args( ['filters', self.testInput0, 'c', 'd', 'e']) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'filters') self.assertEqual(parsed.test[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') self.assertEqual(parsed.filter_contender[0], 'd') self.assertEqual(parsed.benchmark_options, ['e']) def test_filters_with_remainder_after_doubleminus(self): parsed = self.parser.parse_args( ['filters', self.testInput0, 'c', 'd', '--', 'f']) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'filters') self.assertEqual(parsed.test[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') self.assertEqual(parsed.filter_contender[0], 'd') self.assertEqual(parsed.benchmark_options, ['f']) def test_benchmarksfiltered_basic(self): parsed = self.parser.parse_args( ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e']) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarksfiltered') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertEqual(parsed.filter_contender[0], 'e') self.assertFalse(parsed.benchmark_options) def test_benchmarksfiltered_with_remainder(self): parsed = self.parser.parse_args( ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f']) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarksfiltered') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertEqual(parsed.filter_contender[0], 'e') self.assertEqual(parsed.benchmark_options[0], 'f') def test_benchmarksfiltered_with_remainder_after_doubleminus(self): parsed = self.parser.parse_args( ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g']) self.assertFalse(parsed.display_aggregates_only) self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarksfiltered') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') self.assertEqual(parsed.test_contender[0].name, self.testInput1) self.assertEqual(parsed.filter_contender[0], 'e') self.assertEqual(parsed.benchmark_options[0], 'g') if __name__ == '__main__': # unittest.main() main() # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 # kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off; # kate: indent-mode python; remove-trailing-spaces modified;
cccl-main
libcudacxx/.upstream-tests/utils/google-benchmark/tools/compare.py
"""util.py - General utilities for running, loading, and processing benchmarks """ import json import os import tempfile import subprocess import sys # Input file type enumeration IT_Invalid = 0 IT_JSON = 1 IT_Executable = 2 _num_magic_bytes = 2 if sys.platform.startswith('win') else 4 def is_executable_file(filename): """ Return 'True' if 'filename' names a valid file which is likely an executable. A file is considered an executable if it starts with the magic bytes for a EXE, Mach O, or ELF file. """ if not os.path.isfile(filename): return False with open(filename, mode='rb') as f: magic_bytes = f.read(_num_magic_bytes) if sys.platform == 'darwin': return magic_bytes in [ b'\xfe\xed\xfa\xce', # MH_MAGIC b'\xce\xfa\xed\xfe', # MH_CIGAM b'\xfe\xed\xfa\xcf', # MH_MAGIC_64 b'\xcf\xfa\xed\xfe', # MH_CIGAM_64 b'\xca\xfe\xba\xbe', # FAT_MAGIC b'\xbe\xba\xfe\xca' # FAT_CIGAM ] elif sys.platform.startswith('win'): return magic_bytes == b'MZ' else: return magic_bytes == b'\x7FELF' def is_json_file(filename): """ Returns 'True' if 'filename' names a valid JSON output file. 'False' otherwise. """ try: with open(filename, 'r') as f: json.load(f) return True except BaseException: pass return False def classify_input_file(filename): """ Return a tuple (type, msg) where 'type' specifies the classified type of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable string represeting the error. """ ftype = IT_Invalid err_msg = None if not os.path.exists(filename): err_msg = "'%s' does not exist" % filename elif not os.path.isfile(filename): err_msg = "'%s' does not name a file" % filename elif is_executable_file(filename): ftype = IT_Executable elif is_json_file(filename): ftype = IT_JSON else: err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename return ftype, err_msg def check_input_file(filename): """ Classify the file named by 'filename' and return the classification. If the file is classified as 'IT_Invalid' print an error message and exit the program. """ ftype, msg = classify_input_file(filename) if ftype == IT_Invalid: print("Invalid input file: %s" % msg) sys.exit(1) return ftype def find_benchmark_flag(prefix, benchmark_flags): """ Search the specified list of flags for a flag matching `<prefix><arg>` and if it is found return the arg it specifies. If specified more than once the last value is returned. If the flag is not found None is returned. """ assert prefix.startswith('--') and prefix.endswith('=') result = None for f in benchmark_flags: if f.startswith(prefix): result = f[len(prefix):] return result def remove_benchmark_flags(prefix, benchmark_flags): """ Return a new list containing the specified benchmark_flags except those with the specified prefix. """ assert prefix.startswith('--') and prefix.endswith('=') return [f for f in benchmark_flags if not f.startswith(prefix)] def load_benchmark_results(fname): """ Read benchmark output from a file and return the JSON object. REQUIRES: 'fname' names a file containing JSON benchmark output. """ with open(fname, 'r') as f: return json.load(f) def run_benchmark(exe_name, benchmark_flags): """ Run a benchmark specified by 'exe_name' with the specified 'benchmark_flags'. The benchmark is run directly as a subprocess to preserve real time console output. RETURNS: A JSON object representing the benchmark output """ output_name = find_benchmark_flag('--benchmark_out=', benchmark_flags) is_temp_output = False if output_name is None: is_temp_output = True thandle, output_name = tempfile.mkstemp() os.close(thandle) benchmark_flags = list(benchmark_flags) + \ ['--benchmark_out=%s' % output_name] cmd = [exe_name] + benchmark_flags print("RUNNING: %s" % ' '.join(cmd)) exitCode = subprocess.call(cmd) if exitCode != 0: print('TEST FAILED...') sys.exit(exitCode) json_res = load_benchmark_results(output_name) if is_temp_output: os.unlink(output_name) return json_res def run_or_load_benchmark(filename, benchmark_flags): """ Get the results for a specified benchmark. If 'filename' specifies an executable benchmark then the results are generated by running the benchmark. Otherwise 'filename' must name a valid JSON output file, which is loaded and the result returned. """ ftype = check_input_file(filename) if ftype == IT_JSON: return load_benchmark_results(filename) elif ftype == IT_Executable: return run_benchmark(filename, benchmark_flags) else: assert False # This branch is unreachable
cccl-main
libcudacxx/.upstream-tests/utils/google-benchmark/tools/gbench/util.py
"""Google Benchmark tooling""" __author__ = 'Eric Fiselier' __email__ = '[email protected]' __versioninfo__ = (0, 5, 0) __version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev' __all__ = []
cccl-main
libcudacxx/.upstream-tests/utils/google-benchmark/tools/gbench/__init__.py
import unittest """report.py - Utilities for reporting statistics about benchmark results """ import os import re import copy from scipy.stats import mannwhitneyu class BenchmarkColor(object): def __init__(self, name, code): self.name = name self.code = code def __repr__(self): return '%s%r' % (self.__class__.__name__, (self.name, self.code)) def __format__(self, format): return self.code # Benchmark Colors Enumeration BC_NONE = BenchmarkColor('NONE', '') BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m') BC_CYAN = BenchmarkColor('CYAN', '\033[96m') BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m') BC_OKGREEN = BenchmarkColor('OKGREEN', '\033[32m') BC_HEADER = BenchmarkColor('HEADER', '\033[92m') BC_WARNING = BenchmarkColor('WARNING', '\033[93m') BC_WHITE = BenchmarkColor('WHITE', '\033[97m') BC_FAIL = BenchmarkColor('FAIL', '\033[91m') BC_ENDC = BenchmarkColor('ENDC', '\033[0m') BC_BOLD = BenchmarkColor('BOLD', '\033[1m') BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m') UTEST_MIN_REPETITIONS = 2 UTEST_OPTIMAL_REPETITIONS = 9 # Lowest reasonable number, More is better. UTEST_COL_NAME = "_pvalue" def color_format(use_color, fmt_str, *args, **kwargs): """ Return the result of 'fmt_str.format(*args, **kwargs)' after transforming 'args' and 'kwargs' according to the value of 'use_color'. If 'use_color' is False then all color codes in 'args' and 'kwargs' are replaced with the empty string. """ assert use_color is True or use_color is False if not use_color: args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE for arg in args] kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE for key, arg in kwargs.items()} return fmt_str.format(*args, **kwargs) def find_longest_name(benchmark_list): """ Return the length of the longest benchmark name in a given list of benchmark JSON objects """ longest_name = 1 for bc in benchmark_list: if len(bc['name']) > longest_name: longest_name = len(bc['name']) return longest_name def calculate_change(old_val, new_val): """ Return a float representing the decimal change between old_val and new_val. """ if old_val == 0 and new_val == 0: return 0.0 if old_val == 0: return float(new_val - old_val) / (float(old_val + new_val) / 2) return float(new_val - old_val) / abs(old_val) def filter_benchmark(json_orig, family, replacement=""): """ Apply a filter to the json, and only leave the 'family' of benchmarks. """ regex = re.compile(family) filtered = {} filtered['benchmarks'] = [] for be in json_orig['benchmarks']: if not regex.search(be['name']): continue filteredbench = copy.deepcopy(be) # Do NOT modify the old name! filteredbench['name'] = regex.sub(replacement, filteredbench['name']) filtered['benchmarks'].append(filteredbench) return filtered def get_unique_benchmark_names(json): """ While *keeping* the order, give all the unique 'names' used for benchmarks. """ seen = set() uniqued = [x['name'] for x in json['benchmarks'] if x['name'] not in seen and (seen.add(x['name']) or True)] return uniqued def intersect(list1, list2): """ Given two lists, get a new list consisting of the elements only contained in *both of the input lists*, while preserving the ordering. """ return [x for x in list1 if x in list2] def partition_benchmarks(json1, json2): """ While preserving the ordering, find benchmarks with the same names in both of the inputs, and group them. (i.e. partition/filter into groups with common name) """ json1_unique_names = get_unique_benchmark_names(json1) json2_unique_names = get_unique_benchmark_names(json2) names = intersect(json1_unique_names, json2_unique_names) partitions = [] for name in names: # Pick the time unit from the first entry of the lhs benchmark. time_unit = (x['time_unit'] for x in json1['benchmarks'] if x['name'] == name).next() # Filter by name and time unit. lhs = [x for x in json1['benchmarks'] if x['name'] == name and x['time_unit'] == time_unit] rhs = [x for x in json2['benchmarks'] if x['name'] == name and x['time_unit'] == time_unit] partitions.append([lhs, rhs]) return partitions def extract_field(partition, field_name): # The count of elements may be different. We want *all* of them. lhs = [x[field_name] for x in partition[0]] rhs = [x[field_name] for x in partition[1]] return [lhs, rhs] def print_utest(partition, utest_alpha, first_col_width, use_color=True): timings_time = extract_field(partition, 'real_time') timings_cpu = extract_field(partition, 'cpu_time') min_rep_cnt = min(len(timings_time[0]), len(timings_time[1]), len(timings_cpu[0]), len(timings_cpu[1])) # Does *everything* has at least UTEST_MIN_REPETITIONS repetitions? if min_rep_cnt < UTEST_MIN_REPETITIONS: return [] def get_utest_color(pval): return BC_FAIL if pval >= utest_alpha else BC_OKGREEN time_pvalue = mannwhitneyu( timings_time[0], timings_time[1], alternative='two-sided').pvalue cpu_pvalue = mannwhitneyu( timings_cpu[0], timings_cpu[1], alternative='two-sided').pvalue dsc = "U Test, Repetitions: {} vs {}".format( len(timings_cpu[0]), len(timings_cpu[1])) dsc_color = BC_OKGREEN if min_rep_cnt < UTEST_OPTIMAL_REPETITIONS: dsc_color = BC_WARNING dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format( UTEST_OPTIMAL_REPETITIONS) special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}" last_name = partition[0][0]['name'] return [color_format(use_color, special_str, BC_HEADER, "{}{}".format(last_name, UTEST_COL_NAME), first_col_width, get_utest_color(time_pvalue), time_pvalue, get_utest_color(cpu_pvalue), cpu_pvalue, dsc_color, dsc, endc=BC_ENDC)] def generate_difference_report( json1, json2, display_aggregates_only=False, utest=False, utest_alpha=0.05, use_color=True): """ Calculate and report the difference between each test of two benchmarks runs specified as 'json1' and 'json2'. """ assert utest is True or utest is False first_col_width = find_longest_name(json1['benchmarks']) def find_test(name): for b in json2['benchmarks']: if b['name'] == name: return b return None first_col_width = max( first_col_width, len('Benchmark')) first_col_width += len(UTEST_COL_NAME) first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format( 'Benchmark', 12 + first_col_width) output_strs = [first_line, '-' * len(first_line)] partitions = partition_benchmarks(json1, json2) for partition in partitions: # Careful, we may have different repetition count. for i in range(min(len(partition[0]), len(partition[1]))): bn = partition[0][i] other_bench = partition[1][i] # *If* we were asked to only display aggregates, # and if it is non-aggregate, then skip it. if display_aggregates_only and 'run_type' in bn and 'run_type' in other_bench: assert bn['run_type'] == other_bench['run_type'] if bn['run_type'] != 'aggregate': continue fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}" def get_color(res): if res > 0.05: return BC_FAIL elif res > -0.07: return BC_WHITE else: return BC_CYAN tres = calculate_change(bn['real_time'], other_bench['real_time']) cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time']) output_strs += [color_format(use_color, fmt_str, BC_HEADER, bn['name'], first_col_width, get_color(tres), tres, get_color(cpures), cpures, bn['real_time'], other_bench['real_time'], bn['cpu_time'], other_bench['cpu_time'], endc=BC_ENDC)] # After processing the whole partition, if requested, do the U test. if utest: output_strs += print_utest(partition, utest_alpha=utest_alpha, first_col_width=first_col_width, use_color=use_color) return output_strs ############################################################################### # Unit tests class TestGetUniqueBenchmarkNames(unittest.TestCase): def load_results(self): import json testInputs = os.path.join( os.path.dirname( os.path.realpath(__file__)), 'Inputs') testOutput = os.path.join(testInputs, 'test3_run0.json') with open(testOutput, 'r') as f: json = json.load(f) return json def test_basic(self): expect_lines = [ 'BM_One', 'BM_Two', 'short', # These two are not sorted 'medium', # These two are not sorted ] json = self.load_results() output_lines = get_unique_benchmark_names(json) print("\n") print("\n".join(output_lines)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): self.assertEqual(expect_lines[i], output_lines[i]) class TestReportDifference(unittest.TestCase): def load_results(self): import json testInputs = os.path.join( os.path.dirname( os.path.realpath(__file__)), 'Inputs') testOutput1 = os.path.join(testInputs, 'test1_run1.json') testOutput2 = os.path.join(testInputs, 'test1_run2.json') with open(testOutput1, 'r') as f: json1 = json.load(f) with open(testOutput2, 'r') as f: json2 = json.load(f) return json1, json2 def test_basic(self): expect_lines = [ ['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'], ['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'], ['BM_2xSlower', '+1.0000', '+1.0000', '50', '100', '50', '100'], ['BM_1PercentFaster', '-0.0100', '-0.0100', '100', '99', '100', '99'], ['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'], ['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'], ['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'], ['BM_100xSlower', '+99.0000', '+99.0000', '100', '10000', '100', '10000'], ['BM_100xFaster', '-0.9900', '-0.9900', '10000', '100', '10000', '100'], ['BM_10PercentCPUToTime', '+0.1000', '-0.1000', '100', '110', '100', '90'], ['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'], ['BM_BadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'], ] json1, json2 = self.load_results() output_lines_with_header = generate_difference_report( json1, json2, use_color=False) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): parts = [x for x in output_lines[i].split(' ') if x] self.assertEqual(len(parts), 7) self.assertEqual(expect_lines[i], parts) class TestReportDifferenceBetweenFamilies(unittest.TestCase): def load_result(self): import json testInputs = os.path.join( os.path.dirname( os.path.realpath(__file__)), 'Inputs') testOutput = os.path.join(testInputs, 'test2_run.json') with open(testOutput, 'r') as f: json = json.load(f) return json def test_basic(self): expect_lines = [ ['.', '-0.5000', '-0.5000', '10', '5', '10', '5'], ['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'], ['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'], ['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'], ] json = self.load_result() json1 = filter_benchmark(json, "BM_Z.ro", ".") json2 = filter_benchmark(json, "BM_O.e", ".") output_lines_with_header = generate_difference_report( json1, json2, use_color=False) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): parts = [x for x in output_lines[i].split(' ') if x] self.assertEqual(len(parts), 7) self.assertEqual(expect_lines[i], parts) class TestReportDifferenceWithUTest(unittest.TestCase): def load_results(self): import json testInputs = os.path.join( os.path.dirname( os.path.realpath(__file__)), 'Inputs') testOutput1 = os.path.join(testInputs, 'test3_run0.json') testOutput2 = os.path.join(testInputs, 'test3_run1.json') with open(testOutput1, 'r') as f: json1 = json.load(f) with open(testOutput2, 'r') as f: json2 = json.load(f) return json1, json2 def test_utest(self): expect_lines = [] expect_lines = [ ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'], ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'], ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'], ['BM_Two_pvalue', '0.6985', '0.6985', 'U', 'Test,', 'Repetitions:', '2', 'vs', '2.', 'WARNING:', 'Results', 'unreliable!', '9+', 'repetitions', 'recommended.'], ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'], ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'], ['short_pvalue', '0.7671', '0.1489', 'U', 'Test,', 'Repetitions:', '2', 'vs', '3.', 'WARNING:', 'Results', 'unreliable!', '9+', 'repetitions', 'recommended.'], ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'], ] json1, json2 = self.load_results() output_lines_with_header = generate_difference_report( json1, json2, utest=True, utest_alpha=0.05, use_color=False) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): parts = [x for x in output_lines[i].split(' ') if x] self.assertEqual(expect_lines[i], parts) class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly( unittest.TestCase): def load_results(self): import json testInputs = os.path.join( os.path.dirname( os.path.realpath(__file__)), 'Inputs') testOutput1 = os.path.join(testInputs, 'test3_run0.json') testOutput2 = os.path.join(testInputs, 'test3_run1.json') with open(testOutput1, 'r') as f: json1 = json.load(f) with open(testOutput2, 'r') as f: json2 = json.load(f) return json1, json2 def test_utest(self): expect_lines = [] expect_lines = [ ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'], ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'], ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'], ['BM_Two_pvalue', '0.6985', '0.6985', 'U', 'Test,', 'Repetitions:', '2', 'vs', '2.', 'WARNING:', 'Results', 'unreliable!', '9+', 'repetitions', 'recommended.'], ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'], ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'], ['short_pvalue', '0.7671', '0.1489', 'U', 'Test,', 'Repetitions:', '2', 'vs', '3.', 'WARNING:', 'Results', 'unreliable!', '9+', 'repetitions', 'recommended.'], ] json1, json2 = self.load_results() output_lines_with_header = generate_difference_report( json1, json2, display_aggregates_only=True, utest=True, utest_alpha=0.05, use_color=False) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): parts = [x for x in output_lines[i].split(' ') if x] self.assertEqual(expect_lines[i], parts) if __name__ == '__main__': unittest.main() # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 # kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off; # kate: indent-mode python; remove-trailing-spaces modified;
cccl-main
libcudacxx/.upstream-tests/utils/google-benchmark/tools/gbench/report.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## import platform import os import libcudacxx.util class CXXCompiler(object): CM_Default = 0 CM_PreProcess = 1 CM_Compile = 2 CM_Link = 3 def __init__(self, path, first_arg, flags=None, compile_flags=None, link_flags=None, warning_flags=None, verify_supported=None, verify_flags=None, use_verify=False, modules_flags=None, use_modules=False, use_ccache=False, use_warnings=False, compile_env=None, cxx_type=None, cxx_version=None): self.source_lang = 'c++' self.path = path self.first_arg = first_arg or '' self.flags = list(flags or []) self.compile_flags = list(compile_flags or []) self.link_flags = list(link_flags or []) self.warning_flags = list(warning_flags or []) self.verify_supported = verify_supported self.use_verify = use_verify self.verify_flags = list(verify_flags or []) assert not use_verify or verify_supported assert not use_verify or verify_flags is not None self.modules_flags = list(modules_flags or []) self.use_modules = use_modules assert not use_modules or modules_flags is not None self.use_ccache = use_ccache self.use_warnings = use_warnings if compile_env is not None: self.compile_env = dict(compile_env) else: self.compile_env = None self.type = cxx_type self.version = cxx_version if self.type is None or self.version is None: self._initTypeAndVersion() def isVerifySupported(self): if self.verify_supported is None: self.verify_supported = self.hasCompileFlag(['-Xclang', '-verify-ignore-unexpected']) if self.verify_supported: self.verify_flags = [ '-Xclang', '-verify', '-Xclang', '-verify-ignore-unexpected=note', '-ferror-limit=1024' ] return self.verify_supported def useVerify(self, value=True): self.use_verify = value assert not self.use_verify or self.verify_flags is not None def useModules(self, value=True): self.use_modules = value assert not self.use_modules or self.modules_flags is not None def useCCache(self, value=True): self.use_ccache = value def useWarnings(self, value=True): self.use_warnings = value def _initTypeAndVersion(self): # Get compiler type and version try: macros = self.dumpMacros() compiler_type = None major_ver = minor_ver = patchlevel = None self.is_nvrtc = False if '__NVCC__' in macros.keys(): compiler_type = 'nvcc' major_ver = macros['__CUDACC_VER_MAJOR__'] minor_ver = macros['__CUDACC_VER_MINOR__'] patchlevel = macros['__CUDACC_VER_BUILD__'] if '__LIBCUDACXX_NVRTC_TEST__' in macros.keys(): self.is_nvrtc = True elif '__NVCOMPILER' in macros.keys(): compiler_type = 'nvhpc' # NVHPC, unfortunately, adds an extra space between the macro name # and macro value in their macro dump mode. major_ver = macros['__NVCOMPILER_MAJOR__'].strip() minor_ver = macros['__NVCOMPILER_MINOR__'].strip() patchlevel = macros['__NVCOMPILER_PATCHLEVEL__'].strip() elif '__INTEL_COMPILER' in macros.keys(): compiler_type = 'icc' major_ver = int(macros['__INTEL_COMPILER']) / 100 minor_ver = (int(macros['__INTEL_COMPILER']) % 100) / 10 patchlevel = int(macros['__INTEL_COMPILER']) % 10 elif '__clang__' in macros.keys(): compiler_type = 'clang' # Treat Apple's LLVM fork differently. if '__apple_build_version__' in macros.keys(): compiler_type = 'apple-clang' major_ver = macros['__clang_major__'] minor_ver = macros['__clang_minor__'] patchlevel = macros['__clang_patchlevel__'] elif '__GNUC__' in macros.keys(): compiler_type = 'gcc' major_ver = macros['__GNUC__'] minor_ver = macros['__GNUC_MINOR__'] patchlevel = macros['__GNUC_PATCHLEVEL__'] if '__cplusplus' in macros.keys(): cplusplus = macros['__cplusplus'] if cplusplus[-1] == 'L': cplusplus = cplusplus[:-1] cpp_standard = int(cplusplus) if cpp_standard <= 199711: default_dialect = "c++03" elif cpp_standard <= 201103: default_dialect = "c++11" elif cpp_standard <= 201402: default_dialect = "c++14" elif cpp_standard <= 201703: default_dialect = "c++17" else: default_dialect = "c++20" else: default_dialect = "c++03" self.type = compiler_type self.version = (major_ver, minor_ver, patchlevel) self.default_dialect = default_dialect except: (self.type, self.version, self.default_dialect, self.is_nvrtc) = \ self.dumpVersion() if self.type == 'nvcc': # Treat C++ as CUDA when the compiler is NVCC. self.source_lang = 'cu' def _basicCmd(self, source_files, out, mode=CM_Default, flags=[], input_is_cxx=False): cmd = [] if self.use_ccache \ and not mode == self.CM_Link \ and not mode == self.CM_PreProcess: cmd += [os.environ.get('CMAKE_CUDA_COMPILER_LAUNCHER')] cmd += [self.path] + ([self.first_arg] if self.first_arg != '' else []) if out is not None: cmd += ['-o', out] if input_is_cxx: cmd += ['-x', self.source_lang] if isinstance(source_files, list): cmd += source_files elif isinstance(source_files, str): cmd += [source_files] else: raise TypeError('source_files must be a string or list') if mode == self.CM_PreProcess: cmd += ['-E'] elif mode == self.CM_Compile: cmd += ['-c'] cmd += self.flags if self.use_verify: cmd += self.verify_flags assert mode in [self.CM_Default, self.CM_Compile] if self.use_modules: cmd += self.modules_flags if mode != self.CM_Link: cmd += self.compile_flags if self.use_warnings: cmd += self.warning_flags if mode != self.CM_PreProcess and mode != self.CM_Compile: cmd += self.link_flags cmd += flags return cmd def preprocessCmd(self, source_files, out=None, flags=[]): return self._basicCmd(source_files, out, flags=flags, mode=self.CM_PreProcess, input_is_cxx=True) def compileCmd(self, source_files, out=None, flags=[]): return self._basicCmd(source_files, out, flags=flags, mode=self.CM_Compile, input_is_cxx=True) + ['-c'] def linkCmd(self, source_files, out=None, flags=[]): return self._basicCmd(source_files, out, flags=flags, mode=self.CM_Link) def compileLinkCmd(self, source_files, out=None, flags=[]): return self._basicCmd(source_files, out, flags=flags) def preprocess(self, source_files, out=None, flags=[], cwd=None): cmd = self.preprocessCmd(source_files, out, flags) out, err, rc = libcudacxx.util.executeCommand(cmd, env=self.compile_env, cwd=cwd) return cmd, out, err, rc def compile(self, source_files, out=None, flags=[], cwd=None): cmd = self.compileCmd(source_files, out, flags) out, err, rc = libcudacxx.util.executeCommand(cmd, env=self.compile_env, cwd=cwd) return cmd, out, err, rc def link(self, source_files, out=None, flags=[], cwd=None): cmd = self.linkCmd(source_files, out, flags) out, err, rc = libcudacxx.util.executeCommand(cmd, env=self.compile_env, cwd=cwd) return cmd, out, err, rc def compileLink(self, source_files, out=None, flags=[], cwd=None): cmd = self.compileLinkCmd(source_files, out, flags) out, err, rc = libcudacxx.util.executeCommand(cmd, env=self.compile_env, cwd=cwd) return cmd, out, err, rc def compileLinkTwoSteps(self, source_file, out=None, object_file=None, flags=[], cwd=None): if not isinstance(source_file, str): raise TypeError('This function only accepts a single input file') if object_file is None: # Create, use and delete a temporary object file if none is given. with_fn = lambda: libcudacxx.util.guardedTempFilename(suffix='.o') else: # Otherwise wrap the filename in a context manager function. with_fn = lambda: libcudacxx.util.nullContext(object_file) with with_fn() as object_file: cc_cmd, cc_stdout, cc_stderr, rc = self.compile( source_file, object_file, flags=flags, cwd=cwd) if rc != 0: return cc_cmd, cc_stdout, cc_stderr, rc link_cmd, link_stdout, link_stderr, rc = self.link( object_file, out=out, flags=flags, cwd=cwd) return (cc_cmd + ['&&'] + link_cmd, cc_stdout + link_stdout, cc_stderr + link_stderr, rc) def dumpVersion(self, flags=[], cwd=None): dumpversion_cpp = os.path.join( os.path.dirname(os.path.abspath(__file__)), "dumpversion.cpp") with_fn = lambda: libcudacxx.util.guardedTempFilename(suffix=".exe") with with_fn() as exe: cmd, out, err, rc = self.compileLink([dumpversion_cpp], out=exe, flags=flags, cwd=cwd) if rc != 0: return ("unknown", (0, 0, 0), "c++03", False) out, err, rc = libcudacxx.util.executeCommand(exe, env=self.compile_env, cwd=cwd) version = None try: version = eval(out) except: pass if not (isinstance(version, tuple) and 4 == len(version)): version = ("unknown", (0, 0, 0), "c++03", False) return version def dumpMacros(self, source_files=None, flags=[], cwd=None): if source_files is None: source_files = os.devnull flags = ['-dM'] + flags cmd, out, err, rc = self.preprocess(source_files, flags=flags, cwd=cwd) if rc != 0: flags = ['-Xcompiler'] + flags cmd, out, err, rc = self.preprocess(source_files, flags=flags, cwd=cwd) if rc != 0: return cmd, out, err, rc parsed_macros = {} lines = [l.strip() for l in out.split('\n') if l.strip()] for l in lines: # NVHPC also outputs the file contents from -E -dM for some reason; handle that if not l.startswith('#define '): if '__NVCOMPILER' not in parsed_macros.keys(): assert False, "a line not starting with '#define' encountered in predefined macro dump" else: continue l = l[len('#define '):] macro, _, value = l.partition(' ') parsed_macros[macro] = value return parsed_macros def getTriple(self): if self.type == "msvc": return "x86_64-pc-windows-msvc" cmd = [self.path] + self.flags + ['-dumpmachine'] return libcudacxx.util.capture(cmd).strip() def hasCompileFlag(self, flag): if isinstance(flag, list): flags = list(flag) else: flags = [flag] # Add -Werror to ensure that an unrecognized flag causes a non-zero # exit code. -Werror is supported on all known non-nvcc compiler types. if self.type is not None and self.type != 'nvcc' and self.type != 'msvc': flags += ['-Werror', '-fsyntax-only'] empty_cpp = os.path.join(os.path.dirname(os.path.abspath(__file__)), "empty.cpp") cmd, out, err, rc = self.compile(empty_cpp, out=os.devnull, flags=flags) if out.find('flag is not supported with the configured host compiler') != -1: return False if err.find('flag is not supported with the configured host compiler') != -1: return False return rc == 0 def addFlagIfSupported(self, flag): if isinstance(flag, list): flags = list(flag) else: flags = [flag] if self.hasCompileFlag(flags): self.flags += flags return True else: return False def addCompileFlagIfSupported(self, flag): if isinstance(flag, list): flags = list(flag) else: flags = [flag] if self.hasCompileFlag(flags): self.compile_flags += flags return True else: return False def hasWarningFlag(self, flag): """ hasWarningFlag - Test if the compiler supports a given warning flag. Unlike addCompileFlagIfSupported, this function detects when "-Wno-<warning>" flags are unsupported. If flag is a "-Wno-<warning>" GCC will not emit an unknown option diagnostic unless another error is triggered during compilation. """ assert isinstance(flag, str) assert flag.startswith('-W') if not flag.startswith('-Wno-'): return self.hasCompileFlag(flag) flags = ['-Werror', flag] old_use_warnings = self.use_warnings self.useWarnings(False) cmd = self.compileCmd('-', os.devnull, flags) self.useWarnings(old_use_warnings) # Remove '-v' because it will cause the command line invocation # to be printed as part of the error output. # TODO(EricWF): Are there other flags we need to worry about? if '-v' in cmd: cmd.remove('-v') out, err, rc = libcudacxx.util.executeCommand( cmd, input=libcudacxx.util.to_bytes('#error\n')) assert rc != 0 if flag in err: return False return True def addWarningFlagIfSupported(self, flag): if self.hasWarningFlag(flag): if flag not in self.warning_flags: self.warning_flags += [flag] return True return False
cccl-main
libcudacxx/.upstream-tests/utils/libcudacxx/compiler.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## from contextlib import contextmanager import errno import os import platform import signal import subprocess import sys import tempfile import threading # FIXME: Most of these functions are cribbed from LIT def to_bytes(str): # Encode to UTF-8 to get binary data. if isinstance(str, bytes): return str return str.encode('utf-8') def to_string(bytes): if isinstance(bytes, str): return bytes return to_bytes(bytes) def convert_string(bytes): try: return to_string(bytes.decode('utf-8')) except AttributeError: # 'str' object has no attribute 'decode'. return str(bytes) except UnicodeError: return str(bytes) def cleanFile(filename): try: os.remove(filename) except OSError: pass @contextmanager def guardedTempFilename(suffix='', prefix='', dir=None): # Creates and yeilds a temporary filename within a with statement. The file # is removed upon scope exit. handle, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir) os.close(handle) yield name cleanFile(name) @contextmanager def guardedFilename(name): # yeilds a filename within a with statement. The file is removed upon scope # exit. yield name cleanFile(name) @contextmanager def nullContext(value): # yeilds a variable within a with statement. No action is taken upon scope # exit. yield value def makeReport(cmd, out, err, rc): report = "Command: %s\n" % cmd report += "Exit Code: %d\n" % rc if out: report += "Standard Output:\n--\n%s--\n" % out if err: report += "Standard Error:\n--\n%s--\n" % err report += '\n' return report def capture(args, env=None): """capture(command) - Run the given command (or argv list) in a shell and return the standard output. Raises a CalledProcessError if the command exits with a non-zero status.""" p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) out, err = p.communicate() out = convert_string(out) err = convert_string(err) if p.returncode != 0: raise subprocess.CalledProcessError(cmd=args, returncode=p.returncode, output="{}\n{}".format(out, err)) return out def which(command, paths = None): """which(command, [paths]) - Look up the given command in the paths string (or the PATH environment variable, if unspecified).""" if paths is None: paths = os.environ.get('PATH','') # Check for absolute match first. if os.path.isfile(command): return command # Would be nice if Python had a lib function for this. if not paths: paths = os.defpath # Get suffixes to search. # On Cygwin, 'PATHEXT' may exist but it should not be used. if os.pathsep == ';': pathext = os.environ.get('PATHEXT', '').split(';') else: pathext = [''] # Search the paths... for path in paths.split(os.pathsep): for ext in pathext: p = os.path.join(path, command + ext) if os.path.exists(p) and not os.path.isdir(p): return p return None def checkToolsPath(dir, tools): for tool in tools: if not os.path.exists(os.path.join(dir, tool)): return False return True def whichTools(tools, paths): for path in paths.split(os.pathsep): if checkToolsPath(path, tools): return path return None def mkdir_p(path): """mkdir_p(path) - Make the "path" directory, if it does not exist; this will also make directories for any missing parent directories.""" if not path or os.path.exists(path): return parent = os.path.dirname(path) if parent != path: mkdir_p(parent) try: os.mkdir(path) except OSError: e = sys.exc_info()[1] # Ignore EEXIST, which may occur during a race condition. if e.errno != errno.EEXIST: raise class ExecuteCommandTimeoutException(Exception): def __init__(self, msg, out, err, exitCode): assert isinstance(msg, str) assert isinstance(out, str) assert isinstance(err, str) assert isinstance(exitCode, int) self.msg = msg self.out = out self.err = err self.exitCode = exitCode # Close extra file handles on UNIX (on Windows this cannot be done while # also redirecting input). kUseCloseFDs = not (platform.system() == 'Windows') def executeCommand(command, cwd=None, env=None, input=None, timeout=0): """ Execute command ``command`` (list of arguments or string) with * working directory ``cwd`` (str), use None to use the current working directory * environment ``env`` (dict), use None for none * Input to the command ``input`` (str), use string to pass no input. * Max execution time ``timeout`` (int) seconds. Use 0 for no timeout. Returns a tuple (out, err, exitCode) where * ``out`` (str) is the standard output of running the command * ``err`` (str) is the standard error of running the command * ``exitCode`` (int) is the exitCode of running the command If the timeout is hit an ``ExecuteCommandTimeoutException`` is raised. """ if input is not None: input = to_bytes(input) p = subprocess.Popen(command, cwd=cwd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, close_fds=kUseCloseFDs) timerObject = None # FIXME: Because of the way nested function scopes work in Python 2.x we # need to use a reference to a mutable object rather than a plain # bool. In Python 3 we could use the "nonlocal" keyword but we need # to support Python 2 as well. hitTimeOut = [False] try: if timeout > 0: def killProcess(): # We may be invoking a shell so we need to kill the # process and all its children. hitTimeOut[0] = True killProcessAndChildren(p.pid) timerObject = threading.Timer(timeout, killProcess) timerObject.start() out,err = p.communicate(input=input) exitCode = p.wait() finally: if timerObject != None: timerObject.cancel() # Ensure the resulting output is always of string type. out = convert_string(out) err = convert_string(err) if hitTimeOut[0]: raise ExecuteCommandTimeoutException( msg='Reached timeout of {} seconds'.format(timeout), out=out, err=err, exitCode=exitCode ) # Detect Ctrl-C in subprocess. if exitCode == -signal.SIGINT: raise KeyboardInterrupt return out, err, exitCode def killProcessAndChildren(pid): """ This function kills a process with ``pid`` and all its running children (recursively). It is currently implemented using the psutil module which provides a simple platform neutral implementation. TODO: Reimplement this without using psutil so we can remove our dependency on it. """ if platform.system() == 'AIX': subprocess.call('kill -kill $(ps -o pid= -L{})'.format(pid), shell=True) else: import psutil try: psutilProc = psutil.Process(pid) # Handle the different psutil API versions try: # psutil >= 2.x children_iterator = psutilProc.children(recursive=True) except AttributeError: # psutil 1.x children_iterator = psutilProc.get_children(recursive=True) for child in children_iterator: try: child.kill() except psutil.NoSuchProcess: pass psutilProc.kill() except psutil.NoSuchProcess: pass def executeCommandVerbose(cmd, *args, **kwargs): """ Execute a command and print its output on failure. """ out, err, exitCode = executeCommand(cmd, *args, **kwargs) if exitCode != 0: report = makeReport(cmd, out, err, exitCode) report += "\n\nFailed!" sys.stderr.write('%s\n' % report) return out, err, exitCode
cccl-main
libcudacxx/.upstream-tests/utils/libcudacxx/util.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """libcxx python utilities""" __author__ = 'Eric Fiselier' __email__ = '[email protected]' __versioninfo__ = (0, 1, 0) __version__ = ' '.join(str(v) for v in __versioninfo__) + 'dev' __all__ = []
cccl-main
libcudacxx/.upstream-tests/utils/libcudacxx/__init__.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## import os import inspect def trace_function(function, log_calls, log_results, label=''): def wrapper(*args, **kwargs): kwarg_strs = ['{}={}'.format(k, v) for (k, v) in kwargs] arg_str = ', '.join([str(a) for a in args] + kwarg_strs) call_str = '{}({})'.format(function.func_name, arg_str) # Perform the call itself, logging before, after, and anything thrown. try: if log_calls: print('{}: Calling {}'.format(label, call_str)) res = function(*args, **kwargs) if log_results: print('{}: {} -> {}'.format(label, call_str, res)) return res except Exception as ex: if log_results: print('{}: {} raised {}'.format(label, call_str, type(ex))) raise ex return wrapper def trace_object(obj, log_calls, log_results, label=''): for name, member in inspect.getmembers(obj): if inspect.ismethod(member): # Skip meta-functions, decorate everything else if not member.func_name.startswith('__'): setattr(obj, name, trace_function(member, log_calls, log_results, label)) return obj
cccl-main
libcudacxx/.upstream-tests/utils/libcudacxx/test/tracing.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## import locale import os import platform import pkgutil import pipes import re import shlex import shutil import sys from libcudacxx.compiler import CXXCompiler from libcudacxx.test.target_info import make_target_info from libcudacxx.test.executor import * from libcudacxx.test.tracing import * import libcudacxx.util def loadSiteConfig(lit_config, config, param_name, env_name): # We haven't loaded the site specific configuration (the user is # probably trying to run on a test file directly, and either the site # configuration hasn't been created by the build system, or we are in an # out-of-tree build situation). site_cfg = lit_config.params.get(param_name, os.environ.get(env_name)) if not site_cfg: lit_config.warning('No site specific configuration file found!' ' Running the tests in the default configuration.') elif not os.path.isfile(site_cfg): lit_config.fatal( "Specified site configuration file does not exist: '%s'" % site_cfg) else: lit_config.note('using site specific configuration at %s' % site_cfg) ld_fn = lit_config.load_config # Null out the load_config function so that lit.site.cfg doesn't # recursively load a config even if it tries. # TODO: This is one hell of a hack. Fix it. def prevent_reload_fn(*args, **kwargs): pass lit_config.load_config = prevent_reload_fn ld_fn(config, site_cfg) lit_config.load_config = ld_fn # Extract the value of a numeric macro such as __cplusplus or a feature-test # macro. def intMacroValue(token): return int(token.rstrip('LlUu')) class Configuration(object): # pylint: disable=redefined-outer-name def __init__(self, lit_config, config): self.lit_config = lit_config self.config = config self.is_windows = platform.system() == 'Windows' self.cxx = None self.cxx_is_clang_cl = None self.cxx_stdlib_under_test = None self.project_obj_root = None self.libcudacxx_src_root = None self.libcudacxx_obj_root = None self.cxx_library_root = None self.cxx_runtime_root = None self.abi_library_root = None self.link_shared = self.get_lit_bool('enable_shared', default=True) self.debug_build = self.get_lit_bool('debug_build', default=False) self.exec_env = dict(os.environ) self.use_target = False self.use_system_cxx_lib = False self.use_clang_verify = False self.long_tests = None self.execute_external = False def get_lit_conf(self, name, default=None): val = self.lit_config.params.get(name, None) if val is None: val = getattr(self.config, name, None) if val is None: val = default return val def get_lit_bool(self, name, default=None, env_var=None): def check_value(value, var_name): if value is None: return default if isinstance(value, bool): return value if not isinstance(value, str): raise TypeError('expected bool or string') if value.lower() in ('1', 'true'): return True if value.lower() in ('', '0', 'false'): return False self.lit_config.fatal( "parameter '{}' should be true or false".format(var_name)) conf_val = self.get_lit_conf(name) if env_var is not None and env_var in os.environ and \ os.environ[env_var] is not None: val = os.environ[env_var] if conf_val is not None: self.lit_config.warning( 'Environment variable %s=%s is overriding explicit ' '--param=%s=%s' % (env_var, val, name, conf_val)) return check_value(val, env_var) return check_value(conf_val, name) def get_modules_enabled(self): return self.get_lit_bool('enable_modules', default=False, env_var='LIBCUDACXX_ENABLE_MODULES') def make_static_lib_name(self, name): """Return the full filename for the specified library name""" if self.is_windows: assert name == 'c++' # Only allow libc++ to use this function for now. return 'lib' + name + '.lib' else: return 'lib' + name + '.a' def configure(self): self.configure_executor() self.configure_use_system_cxx_lib() self.configure_target_info() self.configure_cxx() self.configure_triple() self.configure_deployment() self.configure_src_root() self.configure_obj_root() self.configure_cxx_stdlib_under_test() self.configure_cxx_library_root() self.configure_use_clang_verify() self.configure_use_thread_safety() self.configure_no_execute() self.configure_execute_external() self.configure_ccache() self.configure_compile_flags() self.configure_filesystem_compile_flags() self.configure_link_flags() self.configure_env() self.configure_color_diagnostics() self.configure_debug_mode() self.configure_warnings() self.configure_sanitizer() self.configure_coverage() self.configure_modules() self.configure_coroutines() self.configure_substitutions() self.configure_features() def print_config_info(self): # Print the final compile and link flags. self.lit_config.note('Using compiler: %s %s' % (self.cxx.path, self.cxx.first_arg)) self.lit_config.note('Using flags: %s' % self.cxx.flags) if self.cxx.use_modules: self.lit_config.note('Using modules flags: %s' % self.cxx.modules_flags) self.lit_config.note('Using compile flags: %s' % self.cxx.compile_flags) if len(self.cxx.warning_flags): self.lit_config.note('Using warnings: %s' % self.cxx.warning_flags) self.lit_config.note('Using link flags: %s' % self.cxx.link_flags) # Print as list to prevent "set([...])" from being printed. self.lit_config.note('Using available_features: %s' % list(self.config.available_features)) show_env_vars = {} for k,v in self.exec_env.items(): if k not in os.environ or os.environ[k] != v: show_env_vars[k] = v self.lit_config.note('Adding environment variables: %r' % show_env_vars) sys.stderr.flush() # Force flushing to avoid broken output on Windows def get_test_format(self): from libcudacxx.test.format import LibcxxTestFormat return LibcxxTestFormat( self.cxx, self.use_clang_verify, self.execute_external, self.executor, exec_env=self.exec_env) def configure_executor(self): exec_str = self.get_lit_conf('executor', "None") exec_timeout = self.get_lit_conf('maxIndividualTestTime', "None") te = eval(exec_str) if te: self.lit_config.note("Using executor: %r" % exec_str) if self.lit_config.useValgrind: # We have no way of knowing where in the chain the # ValgrindExecutor is supposed to go. It is likely # that the user wants it at the end, but we have no # way of getting at that easily. selt.lit_config.fatal("Cannot infer how to create a Valgrind " " executor.") else: te = LocalExecutor() te.timeout = 0 if exec_timeout: te.timeout = exec_timeout if self.lit_config.useValgrind: te = ValgrindExecutor(self.lit_config.valgrindArgs, te) self.executor = te def configure_target_info(self): self.target_info = make_target_info(self) def configure_cxx(self): # Gather various compiler parameters. cxx = self.get_lit_conf('cxx_under_test') cxx_first_arg = self.get_lit_conf('cxx_first_arg') self.cxx_is_clang_cl = cxx is not None and \ os.path.basename(cxx) == 'clang-cl.exe' # If no specific cxx_under_test was given, attempt to infer it as # clang++. if cxx is None or self.cxx_is_clang_cl: search_paths = self.config.environment['PATH'] if cxx is not None and os.path.isabs(cxx): search_paths = os.path.dirname(cxx) clangxx = libcudacxx.util.which('clang++', search_paths) if clangxx: cxx = clangxx self.lit_config.note( "inferred cxx_under_test as: %r" % cxx) elif self.cxx_is_clang_cl: self.lit_config.fatal('Failed to find clang++ substitution for' ' clang-cl') if not cxx: self.lit_config.fatal('must specify user parameter cxx_under_test ' '(e.g., --param=cxx_under_test=clang++)') self.cxx = CXXCompiler(cxx, cxx_first_arg) if not self.cxx_is_clang_cl else \ self._configure_clang_cl(cxx) cxx_type = self.cxx.type if cxx_type is not None: assert self.cxx.version is not None maj_v, min_v, patch_v = self.cxx.version self.config.available_features.add(cxx_type) self.config.available_features.add('%s-%s' % (cxx_type, maj_v)) self.config.available_features.add('%s-%s.%s' % ( cxx_type, maj_v, min_v)) self.config.available_features.add('%s-%s.%s.%s' % ( cxx_type, maj_v, min_v, patch_v)) self.lit_config.note("detected cxx.type as: {}".format( self.cxx.type)) self.lit_config.note("detected cxx.version as: {}".format( self.cxx.version)) self.lit_config.note("detected cxx.default_dialect as: {}".format( self.cxx.default_dialect)) self.lit_config.note("detected cxx.is_nvrtc as: {}".format( self.cxx.is_nvrtc)) self.cxx.compile_env = dict(os.environ) # 'CCACHE_CPP2' prevents ccache from stripping comments while # preprocessing. This is required to prevent stripping of '-verify' # comments. self.cxx.compile_env['CCACHE_CPP2'] = '1' if self.cxx.type == 'nvcc': nvcc_host_compiler = self.get_lit_conf('nvcc_host_compiler') if len(nvcc_host_compiler.strip()) == 0: if platform.system() == 'Darwin': nvcc_host_compiler = 'clang' elif platform.system() == 'Windows': nvcc_host_compiler = 'cl.exe' else: nvcc_host_compiler = 'gcc' self.host_cxx = CXXCompiler(nvcc_host_compiler, None) self.host_cxx_type = self.host_cxx.type if self.host_cxx_type is not None: assert self.host_cxx.version is not None maj_v, min_v, _ = self.host_cxx.version self.config.available_features.add(self.host_cxx_type) self.config.available_features.add('%s-%s' % ( self.host_cxx_type, maj_v)) self.config.available_features.add('%s-%s.%s' % ( self.host_cxx_type, maj_v, min_v)) self.lit_config.note("detected host_cxx.type as: {}".format( self.host_cxx.type)) self.lit_config.note("detected host_cxx.version as: {}".format( self.host_cxx.version)) self.lit_config.note("detected host_cxx.default_dialect as: {}".format( self.host_cxx.default_dialect)) self.lit_config.note("detected host_cxx.is_nvrtc as: {}".format( self.host_cxx.is_nvrtc)) if 'icc' in self.config.available_features: self.cxx.link_flags += ['-lirc'] def _configure_clang_cl(self, clang_path): def _split_env_var(var): return [p.strip() for p in os.environ.get(var, '').split(';') if p.strip()] def _prefixed_env_list(var, prefix): from itertools import chain return list(chain.from_iterable((prefix, path) for path in _split_env_var(var))) assert self.cxx_is_clang_cl flags = [] compile_flags = _prefixed_env_list('INCLUDE', '-isystem') link_flags = _prefixed_env_list('LIB', '-L') for path in _split_env_var('LIB'): self.add_path(self.exec_env, path) return CXXCompiler(clang_path, flags=flags, compile_flags=compile_flags, link_flags=link_flags) def _dump_macros_verbose(self, *args, **kwargs): macros_or_error = self.cxx.dumpMacros(*args, **kwargs) if isinstance(macros_or_error, tuple): cmd, out, err, rc = macros_or_error report = libcudacxx.util.makeReport(cmd, out, err, rc) report += "Compiler failed unexpectedly when dumping macros!" self.lit_config.fatal(report) return None assert isinstance(macros_or_error, dict) return macros_or_error def configure_src_root(self): self.libcudacxx_src_root = self.get_lit_conf( 'libcudacxx_src_root', os.path.dirname(self.config.test_source_root)) def configure_obj_root(self): self.project_obj_root = self.get_lit_conf('project_obj_root') self.libcudacxx_obj_root = self.get_lit_conf('libcudacxx_obj_root') if not self.libcudacxx_obj_root and self.project_obj_root is not None: possible_roots = [ os.path.join(self.project_obj_root, 'libcudacxx'), os.path.join(self.project_obj_root, 'projects', 'libcudacxx'), os.path.join(self.project_obj_root, 'runtimes', 'libcudacxx'), ] for possible_root in possible_roots: if os.path.isdir(possible_root): self.libcudacxx_obj_root = possible_root break else: self.libcudacxx_obj_root = self.project_obj_root def configure_cxx_library_root(self): self.cxx_library_root = self.get_lit_conf('cxx_library_root', self.libcudacxx_obj_root) self.cxx_runtime_root = self.get_lit_conf('cxx_runtime_root', self.cxx_library_root) def configure_use_system_cxx_lib(self): # This test suite supports testing against either the system library or # the locally built one; the former mode is useful for testing ABI # compatibility between the current headers and a shipping dynamic # library. # Default to testing against the locally built libc++ library. self.use_system_cxx_lib = self.get_lit_conf('use_system_cxx_lib') if self.use_system_cxx_lib == 'true': self.use_system_cxx_lib = True elif self.use_system_cxx_lib == 'false': self.use_system_cxx_lib = False elif self.use_system_cxx_lib: assert os.path.isdir(self.use_system_cxx_lib), "the specified use_system_cxx_lib parameter (%s) is not a valid directory" % self.use_system_cxx_lib self.use_system_cxx_lib = os.path.abspath(self.use_system_cxx_lib) self.lit_config.note( "inferred use_system_cxx_lib as: %r" % self.use_system_cxx_lib) def configure_cxx_stdlib_under_test(self): self.cxx_stdlib_under_test = self.get_lit_conf( 'cxx_stdlib_under_test', 'libc++') if self.cxx_stdlib_under_test not in \ ['libc++', 'libstdc++', 'msvc', 'cxx_default']: self.lit_config.fatal( 'unsupported value for "cxx_stdlib_under_test": %s' % self.cxx_stdlib_under_test) self.config.available_features.add(self.cxx_stdlib_under_test) if self.cxx_stdlib_under_test == 'libstdc++': self.config.available_features.add('libstdc++') # Manually enable the experimental and filesystem tests for libstdc++ # if the options aren't present. # FIXME this is a hack. if self.get_lit_conf('enable_experimental') is None: self.config.enable_experimental = 'true' def configure_use_clang_verify(self): '''If set, run clang with -verify on failing tests.''' self.use_clang_verify = self.get_lit_bool('use_clang_verify') if self.use_clang_verify is None: # NOTE: We do not test for the -verify flag directly because # -verify will always exit with non-zero on an empty file. self.use_clang_verify = self.cxx.isVerifySupported() self.lit_config.note( "inferred use_clang_verify as: %r" % self.use_clang_verify) if self.use_clang_verify: self.config.available_features.add('verify-support') def configure_use_thread_safety(self): '''If set, run clang with -verify on failing tests.''' has_thread_safety = self.cxx.hasCompileFlag('-Werror=thread-safety') if has_thread_safety: self.cxx.compile_flags += ['-Werror=thread-safety'] self.config.available_features.add('thread-safety') self.lit_config.note("enabling thread-safety annotations") def configure_execute_external(self): # Choose between lit's internal shell pipeline runner and a real shell. # If LIT_USE_INTERNAL_SHELL is in the environment, we use that as the # default value. Otherwise we ask the target_info. use_lit_shell_default = os.environ.get('LIT_USE_INTERNAL_SHELL') if use_lit_shell_default is not None: use_lit_shell_default = use_lit_shell_default != '0' else: use_lit_shell_default = self.target_info.use_lit_shell_default() # Check for the command line parameter using the default value if it is # not present. use_lit_shell = self.get_lit_bool('use_lit_shell', use_lit_shell_default) self.execute_external = not use_lit_shell def configure_no_execute(self): if type(self.executor) == NoopExecutor: self.config.available_features.add('no_execute') def configure_ccache(self): use_ccache_default = os.environ.get('CMAKE_CUDA_COMPILER_LAUNCHER') is not None use_ccache = self.get_lit_bool('use_ccache', use_ccache_default) if use_ccache and not self.cxx.is_nvrtc: self.cxx.use_ccache = True self.lit_config.note('enabling ccache') def add_deployment_feature(self, feature): (arch, name, version) = self.config.deployment self.config.available_features.add('%s=%s-%s' % (feature, arch, name)) self.config.available_features.add('%s=%s' % (feature, name)) self.config.available_features.add('%s=%s%s' % (feature, name, version)) def configure_features(self): additional_features = self.get_lit_conf('additional_features') if additional_features: for f in additional_features.split(','): self.config.available_features.add(f.strip()) self.target_info.add_locale_features(self.config.available_features) target_platform = self.target_info.platform() # Write an "available feature" that combines the triple when # use_system_cxx_lib is enabled. This is so that we can easily write # XFAIL markers for tests that are known to fail with versions of # libc++ as were shipped with a particular triple. if self.use_system_cxx_lib: self.config.available_features.add('with_system_cxx_lib') self.config.available_features.add( 'with_system_cxx_lib=%s' % self.config.target_triple) # Add subcomponents individually. target_components = self.config.target_triple.split('-') for component in target_components: self.config.available_features.add( 'with_system_cxx_lib=%s' % component) # Add available features for more generic versions of the target # triple attached to with_system_cxx_lib. if self.use_deployment: self.add_deployment_feature('with_system_cxx_lib') # Configure the availability feature. Availability is only enabled # with libc++, because other standard libraries do not provide # availability markup. if self.use_deployment and self.cxx_stdlib_under_test == 'libc++': self.config.available_features.add('availability') self.add_deployment_feature('availability') if platform.system() == 'Darwin': self.config.available_features.add('apple-darwin') # Insert the platform name into the available features as a lower case. self.config.available_features.add(target_platform) # Simulator testing can take a really long time for some of these tests # so add a feature check so we can REQUIRES: long_tests in them self.long_tests = self.get_lit_bool('long_tests') if self.long_tests is None: # Default to running long tests. self.long_tests = True self.lit_config.note( "inferred long_tests as: %r" % self.long_tests) if self.long_tests: self.config.available_features.add('long_tests') if not self.get_lit_bool('enable_filesystem', default=True): self.config.available_features.add('c++filesystem-disabled') self.config.available_features.add('dylib-has-no-filesystem') # Run a compile test for the -fsized-deallocation flag. This is needed # in test/std/language.support/support.dynamic/new.delete if self.cxx.hasCompileFlag('-fsized-deallocation'): self.config.available_features.add('-fsized-deallocation') if self.cxx.hasCompileFlag('-faligned-allocation'): self.config.available_features.add('-faligned-allocation') else: # FIXME remove this once more than just clang-4.0 support # C++17 aligned allocation. self.config.available_features.add('no-aligned-allocation') if self.cxx.hasCompileFlag('-fdelayed-template-parsing'): self.config.available_features.add('fdelayed-template-parsing') if self.get_lit_bool('has_libatomic', False): self.config.available_features.add('libatomic') if 'msvc' not in self.config.available_features: macros = self._dump_macros_verbose() if '__cpp_if_constexpr' not in macros: self.config.available_features.add('libcpp-no-if-constexpr') if '__cpp_structured_bindings' not in macros: self.config.available_features.add('libcpp-no-structured-bindings') if '__cpp_deduction_guides' not in macros or \ intMacroValue(macros['__cpp_deduction_guides']) < 201611: self.config.available_features.add('libcpp-no-deduction-guides') if self.is_windows: self.config.available_features.add('windows') if self.cxx_stdlib_under_test == 'libc++': # LIBCXX-WINDOWS-FIXME is the feature name used to XFAIL the # initial Windows failures until they can be properly diagnosed # and fixed. This allows easier detection of new test failures # and regressions. Note: New failures should not be suppressed # using this feature. (Also see llvm.org/PR32730) self.config.available_features.add('LIBCUDACXX-WINDOWS-FIXME') if 'msvc' not in self.config.available_features: # Attempt to detect the glibc version by querying for __GLIBC__ # in 'features.h'. macros = self.cxx.dumpMacros(flags=['-include', 'features.h']) if isinstance(macros, dict) and '__GLIBC__' in macros: maj_v, min_v = (macros['__GLIBC__'], macros['__GLIBC_MINOR__']) self.config.available_features.add('glibc') self.config.available_features.add('glibc-%s' % maj_v) self.config.available_features.add('glibc-%s.%s' % (maj_v, min_v)) libcudacxx_gdb = self.get_lit_conf('libcudacxx_gdb') if libcudacxx_gdb and 'NOTFOUND' not in libcudacxx_gdb: self.config.available_features.add('libcudacxx_gdb') self.cxx.libcudacxx_gdb = libcudacxx_gdb # Support Objective-C++ only on MacOS and if the compiler supports it. if self.target_info.platform() == "darwin" and \ self.target_info.is_host_macosx() and \ self.cxx.hasCompileFlag(["-x", "objective-c++", "-fobjc-arc"]): self.config.available_features.add("objective-c++") def configure_compile_flags(self): self.configure_default_compile_flags() # Configure extra flags compile_flags_str = self.get_lit_conf('compile_flags', '') self.cxx.compile_flags += shlex.split(compile_flags_str) if self.is_windows: # FIXME: Can we remove this? self.cxx.compile_flags += ['-D_CRT_SECURE_NO_WARNINGS'] # Required so that tests using min/max don't fail on Windows, # and so that those tests don't have to be changed to tolerate # this insanity. self.cxx.compile_flags += ['-DNOMINMAX'] if 'msvc' in self.config.available_features: if self.cxx.type == 'nvcc': self.cxx.compile_flags += ['-Xcompiler'] self.cxx.compile_flags += ['/bigobj'] additional_flags = self.get_lit_conf('test_compiler_flags') if additional_flags: self.cxx.compile_flags += shlex.split(additional_flags) compute_archs = self.get_lit_conf('compute_archs') if self.cxx.is_nvrtc is True: self.config.available_features.add("nvrtc") if self.cxx.type == 'nvcc': self.cxx.compile_flags += ['--extended-lambda'] pre_sm_32 = True pre_sm_60 = True pre_sm_70 = True pre_sm_80 = True pre_sm_90 = True if compute_archs and self.cxx.type == 'nvcc': pre_sm_32 = False pre_sm_60 = False pre_sm_70 = False pre_sm_80 = False pre_sm_90 = False compute_archs = set(sorted(re.split('\s|;|,', compute_archs))) for s in compute_archs: # Split arch and mode i.e. 80-virtual -> 80, virtual arch, *mode = re.split('-', s) arch = int(arch) if arch < 32: pre_sm_32 = True if arch < 60: pre_sm_60 = True if arch < 70: pre_sm_70 = True if arch < 80: pre_sm_80 = True if arch < 90: pre_sm_90 = True if mode.count("virtual"): arch_flag = '-gencode=arch=compute_{0},code=compute_{0}'.format(arch) else: arch_flag = '-gencode=arch=compute_{0},code=sm_{0}'.format(arch) self.cxx.compile_flags += [arch_flag] if pre_sm_32: self.config.available_features.add("pre-sm-32") if pre_sm_60: self.config.available_features.add("pre-sm-60") if pre_sm_70: self.config.available_features.add("pre-sm-70") if pre_sm_80: self.config.available_features.add("pre-sm-80") if pre_sm_90: self.config.available_features.add("pre-sm-90") def configure_default_compile_flags(self): nvcc_host_compiler = self.get_lit_conf('nvcc_host_compiler') if nvcc_host_compiler and self.cxx.type == 'nvcc': self.cxx.compile_flags += ['-ccbin={0}'.format(nvcc_host_compiler)] # Try and get the std version from the command line. Fall back to # default given in lit.site.cfg is not present. If default is not # present then force c++11. std = self.get_lit_conf('std') if not std: # Choose the newest possible language dialect if none is given. possible_stds = ['c++20', 'c++2a', 'c++17', 'c++1z', 'c++14', 'c++11', 'c++03'] if self.cxx.type == 'gcc': maj_v, _, _ = self.cxx.version maj_v = int(maj_v) if maj_v < 6: possible_stds.remove('c++1z') possible_stds.remove('c++17') # FIXME: How many C++14 tests actually fail under GCC 5 and 6? # Should we XFAIL them individually instead? if maj_v < 6: possible_stds.remove('c++14') for s in possible_stds: cxx = self.cxx success = True if self.cxx.type == 'nvcc': # NVCC warns, but doesn't error, if the host compiler # doesn't support the dialect. It's also possible that the # host compiler supports the dialect, but NVCC doesn't. # So, first we need to check if NVCC supports the dialect... if not self.cxx.hasCompileFlag('-std=%s' % s): # If it doesn't, give up on this dialect. success = False # ... then we need to check if host compiler supports the # dialect. cxx = self.host_cxx if cxx.type == 'msvc': if not cxx.hasCompileFlag('/std:%s' % s): success = False else: if not cxx.hasCompileFlag('-std=%s' % s): success = False if success: std = s self.lit_config.note('inferred language dialect as: %s' % std) break if std: # We found a dialect flag. if self.cxx.type == 'msvc': self.cxx.compile_flags += ['/std:{0}'.format(std)] else: self.cxx.compile_flags += ['-std={0}'.format(std)] if not std: # There is no dialect flag. This happens with older MSVC. if self.cxx.type == 'nvcc': std = self.host_cxx.default_dialect else: std = self.cxx.default_dialect self.lit_config.note('using default language dialect: %s' % std) std_feature = std.replace('gnu++', 'c++') std_feature = std.replace('1z', '17') std_feature = std.replace('2a', '20') self.config.available_features.add(std_feature) # Configure include paths self.configure_compile_flags_header_includes() self.target_info.add_cxx_compile_flags(self.cxx.compile_flags) # Configure feature flags. self.configure_compile_flags_exceptions() self.configure_compile_flags_rtti() self.configure_compile_flags_abi_version() enable_32bit = self.get_lit_bool('enable_32bit', False) if enable_32bit: self.cxx.flags += ['-m32'] # Use verbose output for better errors self.cxx.flags += ['-v'] sysroot = self.get_lit_conf('sysroot') if sysroot: self.cxx.flags += ['--sysroot=' + sysroot] gcc_toolchain = self.get_lit_conf('gcc_toolchain') if gcc_toolchain: self.cxx.flags += ['--gcc-toolchain=' + gcc_toolchain] # NOTE: the _DEBUG definition must preceed the triple check because for # the Windows build of libc++, the forced inclusion of a header requires # that _DEBUG is defined. Incorrect ordering will result in -target # being elided. if self.is_windows and self.debug_build: self.cxx.compile_flags += ['-D_DEBUG'] if self.use_target: if not self.cxx.addFlagIfSupported( ['--target=' + self.config.target_triple]): self.lit_config.warning('use_target is true but --target is '\ 'not supported by the compiler') if self.use_deployment: arch, name, version = self.config.deployment self.cxx.flags += ['-arch', arch] self.cxx.flags += ['-m' + name + '-version-min=' + version] # Add includes for support headers used in the tests. support_path = os.path.join(self.libcudacxx_src_root, 'test/support') self.cxx.compile_flags += ['-I' + support_path] # Add includes for the PSTL headers pstl_src_root = self.get_lit_conf('pstl_src_root') pstl_obj_root = self.get_lit_conf('pstl_obj_root') if pstl_src_root is not None and pstl_obj_root is not None: self.cxx.compile_flags += ['-I' + os.path.join(pstl_src_root, 'include')] self.cxx.compile_flags += ['-I' + os.path.join(pstl_obj_root, 'generated_headers')] self.cxx.compile_flags += ['-I' + os.path.join(pstl_src_root, 'test')] self.config.available_features.add('parallel-algorithms') # FIXME(EricWF): variant_size.pass.cpp requires a slightly larger # template depth with older Clang versions. self.cxx.addFlagIfSupported('-ftemplate-depth=270') # If running without execution we need to mark tests that only fail at runtime as unsupported if self.lit_config.noExecute: self.config.available_features.add('no_execute') def configure_compile_flags_header_includes(self): support_path = os.path.join(self.libcudacxx_src_root, 'test', 'support') self.configure_config_site_header() if self.cxx_stdlib_under_test != 'libstdc++' and \ not self.is_windows: self.cxx.compile_flags += [ '-include', os.path.join(support_path, 'nasty_macros.h')] if self.cxx_stdlib_under_test == 'msvc': self.cxx.compile_flags += [ '-include', os.path.join(support_path, 'msvc_stdlib_force_include.h')] pass if self.is_windows and self.debug_build and \ self.cxx_stdlib_under_test != 'msvc': self.cxx.compile_flags += [ '-include', os.path.join(support_path, 'set_windows_crt_report_mode.h') ] cxx_headers = self.get_lit_conf('cxx_headers') if cxx_headers == '' or (cxx_headers is None and self.cxx_stdlib_under_test != 'libc++'): self.lit_config.note('using the system cxx headers') return if self.cxx.type != 'nvcc' and self.cxx.type != 'nvhpc': self.cxx.compile_flags += ['-nostdinc++'] if cxx_headers is None: cxx_headers = os.path.join(self.libcudacxx_src_root, 'include') if not os.path.isdir(cxx_headers): self.lit_config.fatal("cxx_headers='%s' is not a directory." % cxx_headers) self.cxx.compile_flags += ['-I' + cxx_headers] if self.libcudacxx_obj_root is not None: cxxabi_headers = os.path.join(self.libcudacxx_obj_root, 'include', 'c++build') if os.path.isdir(cxxabi_headers): self.cxx.compile_flags += ['-I' + cxxabi_headers] def configure_config_site_header(self): # Check for a possible __config_site in the build directory. We # use this if it exists. if self.libcudacxx_obj_root is None: return config_site_header = os.path.join(self.libcudacxx_obj_root, '__config_site') if not os.path.isfile(config_site_header): return contained_macros = self.parse_config_site_and_add_features( config_site_header) self.lit_config.note('Using __config_site header %s with macros: %r' % (config_site_header, contained_macros)) # FIXME: This must come after the call to # 'parse_config_site_and_add_features(...)' in order for it to work. self.cxx.compile_flags += ['-include', config_site_header] def parse_config_site_and_add_features(self, header): """ parse_config_site_and_add_features - Deduce and add the test features that that are implied by the #define's in the __config_site header. Return a dictionary containing the macros found in the '__config_site' header. """ # MSVC can't dump macros, so we just give up. if 'msvc' in self.config.available_features: return {} # Parse the macro contents of __config_site by dumping the macros # using 'c++ -dM -E' and filtering the predefines. predefines = self._dump_macros_verbose() macros = self._dump_macros_verbose(header) feature_macros_keys = set(macros.keys()) - set(predefines.keys()) feature_macros = {} for k in feature_macros_keys: feature_macros[k] = macros[k] # We expect the header guard to be one of the definitions assert '_LIBCUDACXX_CONFIG_SITE' in feature_macros del feature_macros['_LIBCUDACXX_CONFIG_SITE'] # The __config_site header should be non-empty. Otherwise it should # have never been emitted by CMake. assert len(feature_macros) > 0 # FIXME: This is a hack that should be fixed using module maps. # If modules are enabled then we have to lift all of the definitions # in __config_site onto the command line. for m in feature_macros: define = '-D%s' % m if feature_macros[m]: define += '=%s' % (feature_macros[m]) self.cxx.modules_flags += [define] if self.cxx.hasCompileFlag('-Wno-macro-redefined'): self.cxx.compile_flags += ['-Wno-macro-redefined'] # Transform each macro name into the feature name used in the tests. # Ex. _LIBCUDACXX_HAS_NO_THREADS -> libcpp-has-no-threads for m in feature_macros: if m == '_LIBCUDACXX_DISABLE_VISIBILITY_ANNOTATIONS' or \ m == '_LIBCUDACXX_HIDE_FROM_ABI_PER_TU_BY_DEFAULT': continue if m == '_LIBCUDACXX_ABI_VERSION': self.config.available_features.add('libcpp-abi-version-v%s' % feature_macros[m]) continue if m == '_LIBCUDACXX_NO_VCRUNTIME': self.config.available_features.add('libcpp-no-vcruntime') continue assert m.startswith('_LIBCUDACXX_HAS_') or m.startswith('_LIBCUDACXX_ABI_') m = m.lower()[1:].replace('_', '-') self.config.available_features.add(m) return feature_macros def configure_compile_flags_exceptions(self): enable_exceptions = self.get_lit_bool('enable_exceptions', True) if not enable_exceptions: self.config.available_features.add('libcpp-no-exceptions') if 'nvhpc' in self.config.available_features: # NVHPC reports all expressions as `noexcept(true)` with its # "no exceptions" mode. Override the setting from CMake as # a temporary workaround for that. pass # TODO: I don't know how to shut off exceptions with MSVC. elif 'msvc' not in self.config.available_features: if self.cxx.type == 'nvcc': self.cxx.compile_flags += ['-Xcompiler'] self.cxx.compile_flags += ['-fno-exceptions'] def configure_compile_flags_rtti(self): enable_rtti = self.get_lit_bool('enable_rtti', True) if not enable_rtti: self.config.available_features.add('libcpp-no-rtti') if self.cxx.type == 'nvcc': self.cxx.compile_flags += ['-Xcompiler'] if 'nvhpc' in self.config.available_features: self.cxx.compile_flags += ['--no_rtti'] elif 'msvc' in self.config.available_features: self.cxx.compile_flags += ['/GR-'] self.cxx.compile_flags += ['-D_SILENCE_CXX20_CISO646_REMOVED_WARNING'] else: self.cxx.compile_flags += ['-fno-rtti'] self.cxx.compile_flags += ['-D_LIBCUDACXX_NO_RTTI'] def configure_compile_flags_abi_version(self): abi_version = self.get_lit_conf('abi_version', '').strip() abi_unstable = self.get_lit_bool('abi_unstable') # Only add the ABI version when it is non-default. # FIXME(EricWF): Get the ABI version from the "__config_site". if abi_version and abi_version != '1': self.cxx.compile_flags += ['-D_LIBCUDACXX_ABI_VERSION=' + abi_version] if abi_unstable: self.config.available_features.add('libcpp-abi-unstable') self.cxx.compile_flags += ['-D_LIBCUDACXX_ABI_UNSTABLE'] def configure_filesystem_compile_flags(self): if not self.get_lit_bool('enable_filesystem', default=True): return static_env = os.path.join(self.libcudacxx_src_root, 'test', 'std', 'input.output', 'filesystems', 'Inputs', 'static_test_env') static_env = os.path.realpath(static_env) assert os.path.isdir(static_env) self.cxx.compile_flags += ['-DLIBCXX_FILESYSTEM_STATIC_TEST_ROOT="%s"' % static_env] dynamic_env = os.path.join(self.config.test_exec_root, 'filesystem', 'Output', 'dynamic_env') dynamic_env = os.path.realpath(dynamic_env) if not os.path.isdir(dynamic_env): os.makedirs(dynamic_env) self.cxx.compile_flags += ['-DLIBCXX_FILESYSTEM_DYNAMIC_TEST_ROOT="%s"' % dynamic_env] self.exec_env['LIBCXX_FILESYSTEM_DYNAMIC_TEST_ROOT'] = ("%s" % dynamic_env) dynamic_helper = os.path.join(self.libcudacxx_src_root, 'test', 'support', 'filesystem_dynamic_test_helper.py') assert os.path.isfile(dynamic_helper) self.cxx.compile_flags += ['-DLIBCXX_FILESYSTEM_DYNAMIC_TEST_HELPER="%s %s"' % (sys.executable, dynamic_helper)] def configure_link_flags(self): nvcc_host_compiler = self.get_lit_conf('nvcc_host_compiler') if nvcc_host_compiler and self.cxx.type == 'nvcc': self.cxx.link_flags += ['-ccbin={0}'.format(nvcc_host_compiler)] # Configure library path self.configure_link_flags_cxx_library_path() self.configure_link_flags_abi_library_path() # Configure libraries if self.cxx_stdlib_under_test == 'libc++': if self.get_lit_conf('name') != 'libcu++': if 'nvhpc' not in self.config.available_features or not self.cxx.is_nvrtc: if self.cxx.type == 'nvcc': self.cxx.link_flags += ['-Xcompiler'] self.cxx.link_flags += ['-nodefaultlibs'] # FIXME: Handle MSVCRT as part of the ABI library handling. if self.is_windows and 'msvc' not in self.config.available_features: if self.cxx.type == 'nvcc': self.cxx.link_flags += ['-Xcompiler'] self.cxx.link_flags += ['-nostdlib'] self.configure_link_flags_cxx_library() self.configure_link_flags_abi_library() self.configure_extra_library_flags() elif self.cxx_stdlib_under_test == 'libstdc++': self.config.available_features.add('c++experimental') self.cxx.link_flags += ['-lstdc++fs', '-lm', '-pthread'] elif self.cxx_stdlib_under_test == 'msvc': # FIXME: Correctly setup debug/release flags here. pass elif self.cxx_stdlib_under_test == 'cxx_default': self.cxx.link_flags += ['-pthread'] else: self.lit_config.fatal('invalid stdlib under test') link_flags_str = self.get_lit_conf('link_flags', '') self.cxx.link_flags += shlex.split(link_flags_str) def configure_link_flags_cxx_library_path(self): if not self.use_system_cxx_lib: if self.cxx_library_root: self.cxx.link_flags += ['-L' + self.cxx_library_root] if self.is_windows and self.link_shared: self.add_path(self.cxx.compile_env, self.cxx_library_root) if self.cxx_runtime_root: if not self.is_windows: if self.cxx.type == 'nvcc': self.cxx.link_flags += ['-Xcompiler', '"-Wl,-rpath,' + self.cxx_runtime_root + '"'] else: self.cxx.link_flags += ['-Wl,-rpath,' + self.cxx_runtime_root] elif self.is_windows and self.link_shared: self.add_path(self.exec_env, self.cxx_runtime_root) elif os.path.isdir(str(self.use_system_cxx_lib)): self.cxx.link_flags += ['-L' + self.use_system_cxx_lib] if not self.is_windows: if self.cxx.type == 'nvcc': self.cxx.link_flags += ['-Xcompiler', '"-Wl,-rpath,' + self.cxx_runtime_root + '"'] else: self.cxx.link_flags += ['-Wl,-rpath,' + self.use_system_cxx_lib] if self.is_windows and self.link_shared: self.add_path(self.cxx.compile_env, self.use_system_cxx_lib) additional_flags = self.get_lit_conf('test_linker_flags') if additional_flags: self.cxx.link_flags += shlex.split(additional_flags) def configure_link_flags_abi_library_path(self): # Configure ABI library paths. self.abi_library_root = self.get_lit_conf('abi_library_path') if self.abi_library_root: self.cxx.link_flags += ['-L' + self.abi_library_root] if not self.is_windows: if self.cxx.type == 'nvcc': self.cxx.link_flags += ['-Xcompiler', '"-Wl,-rpath,' + self.cxx_runtime_root + '"'] else: self.cxx.link_flags += ['-Wl,-rpath,' + self.abi_library_root] else: self.add_path(self.exec_env, self.abi_library_root) def configure_link_flags_cxx_library(self): libcxx_experimental = self.get_lit_bool('enable_experimental', default=False) if libcxx_experimental: self.config.available_features.add('c++experimental') self.cxx.link_flags += ['-lc++experimental'] if self.link_shared: self.cxx.link_flags += ['-lc++'] elif self.cxx.type != 'nvcc' and self.cxx.type != 'nvhpc': cxx_library_root = self.get_lit_conf('cxx_library_root') if cxx_library_root: libname = self.make_static_lib_name('c++') abs_path = os.path.join(cxx_library_root, libname) assert os.path.exists(abs_path) and \ "static libc++ library does not exist" self.cxx.link_flags += [abs_path] else: self.cxx.link_flags += ['-lc++'] def configure_link_flags_abi_library(self): cxx_abi = self.get_lit_conf('cxx_abi', 'libcxxabi') if cxx_abi == 'libstdc++': self.cxx.link_flags += ['-lstdc++'] elif cxx_abi == 'libsupc++': self.cxx.link_flags += ['-lsupc++'] elif cxx_abi == 'libcxxabi': # If the C++ library requires explicitly linking to libc++abi, or # if we're testing libc++abi itself (the test configs are shared), # then link it. testing_libcxxabi = self.get_lit_conf('name', '') == 'libc++abi' if self.target_info.allow_cxxabi_link() or testing_libcxxabi: libcxxabi_shared = self.get_lit_bool('libcxxabi_shared', default=True) if libcxxabi_shared: self.cxx.link_flags += ['-lc++abi'] else: cxxabi_library_root = self.get_lit_conf('abi_library_path') if cxxabi_library_root: libname = self.make_static_lib_name('c++abi') abs_path = os.path.join(cxxabi_library_root, libname) self.cxx.link_flags += [abs_path] else: self.cxx.link_flags += ['-lc++abi'] elif cxx_abi == 'libcxxrt': self.cxx.link_flags += ['-lcxxrt'] elif cxx_abi == 'vcruntime': debug_suffix = 'd' if self.debug_build else '' self.cxx.link_flags += ['-l%s%s' % (lib, debug_suffix) for lib in ['vcruntime', 'ucrt', 'msvcrt']] elif cxx_abi == 'none' or cxx_abi == 'default': if self.is_windows: debug_suffix = 'd' if self.debug_build else '' self.cxx.link_flags += ['-lmsvcrt%s' % debug_suffix] else: self.lit_config.fatal( 'C++ ABI setting %s unsupported for tests' % cxx_abi) def configure_extra_library_flags(self): if self.get_lit_bool('cxx_ext_threads', default=False): self.cxx.link_flags += ['-lc++external_threads'] self.target_info.add_cxx_link_flags(self.cxx.link_flags) def configure_color_diagnostics(self): use_color = self.get_lit_conf('color_diagnostics') if use_color is None: use_color = os.environ.get('LIBCXX_COLOR_DIAGNOSTICS') if use_color is None: return if use_color != '': self.lit_config.fatal('Invalid value for color_diagnostics "%s".' % use_color) color_flag = '-fdiagnostics-color=always' # Check if the compiler supports the color diagnostics flag. Issue a # warning if it does not since color diagnostics have been requested. if not self.cxx.hasCompileFlag(color_flag): self.lit_config.warning( 'color diagnostics have been requested but are not supported ' 'by the compiler') else: self.cxx.flags += [color_flag] def configure_debug_mode(self): debug_level = self.get_lit_conf('debug_level', None) if not debug_level: return if debug_level not in ['0', '1']: self.lit_config.fatal('Invalid value for debug_level "%s".' % debug_level) self.cxx.compile_flags += ['-D_LIBCUDACXX_DEBUG=%s' % debug_level] def configure_warnings(self): default_enable_warnings = 'clang' in self.config.available_features or \ 'msvc' in self.config.available_features or \ 'nvcc' in self.config.available_features enable_warnings = self.get_lit_bool('enable_warnings', default_enable_warnings) self.cxx.useWarnings(enable_warnings) if 'nvcc' in self.config.available_features: self.cxx.warning_flags += [ '-Xcudafe', '--display_error_number' ] self.cxx.warning_flags += [ '-Werror=all-warnings' ] if 'msvc' in self.config.available_features: self.cxx.warning_flags += [ '-Xcompiler', '/W4', '-Xcompiler', '/WX' ] # warning C4100: 'quack': unreferenced formal parameter self.cxx.warning_flags += [ '-Xcompiler', '-wd4100' ] # warning C4127: conditional expression is constant self.cxx.warning_flags += [ '-Xcompiler', '-wd4127' ] # warning C4180: qualifier applied to function type has no meaning; ignored self.cxx.warning_flags += [ '-Xcompiler', '-wd4180' ] # warning C4309: 'moo': truncation of constant value self.cxx.warning_flags += [ '-Xcompiler', '-wd4309' ] # warning C4996: deprecation warnings self.cxx.warning_flags += [ '-Xcompiler', '-wd4996' ] else: # TODO: Re-enable soon. def addIfHostSupports(flag): if hasattr(self, 'host_cxx') and self.host_cxx.hasWarningFlag(flag): self.cxx.warning_flags += [ '-Xcompiler', flag ] addIfHostSupports('-Wall') addIfHostSupports('-Wextra') addIfHostSupports('-Werror') addIfHostSupports('-Wno-literal-suffix') # GCC warning about reserved UDLs addIfHostSupports('-Wno-user-defined-literals') # Clang warning about reserved UDLs addIfHostSupports('-Wno-unused-parameter') addIfHostSupports('-Wno-unused-local-typedefs') # GCC warning local typdefs addIfHostSupports('-Wno-deprecated-declarations') addIfHostSupports('-Wno-noexcept-type') addIfHostSupports('-Wno-unused-function') if 'gcc-4.8' in self.config.available_features: # GCC pre-GCC5 spuriously generates these on reasonable aggregate initialization. addIfHostSupports('-Wno-missing-field-initializers') # TODO: port the warning disables from the non-NVCC path? self.cxx.warning_flags += [ '-D_LIBCUDACXX_DISABLE_PRAGMA_GCC_SYSTEM_HEADER' ] pass else: self.cxx.warning_flags += [ '-D_LIBCUDACXX_DISABLE_PRAGMA_GCC_SYSTEM_HEADER', '-Wall', '-Wextra', '-Werror' ] if self.cxx.hasWarningFlag('-Wuser-defined-warnings'): self.cxx.warning_flags += ['-Wuser-defined-warnings'] self.config.available_features.add('diagnose-if-support') self.cxx.addWarningFlagIfSupported('-Wshadow') self.cxx.addWarningFlagIfSupported('-Wno-unused-command-line-argument') self.cxx.addWarningFlagIfSupported('-Wno-attributes') self.cxx.addWarningFlagIfSupported('-Wno-pessimizing-move') self.cxx.addWarningFlagIfSupported('-Wno-c++11-extensions') self.cxx.addWarningFlagIfSupported('-Wno-user-defined-literals') self.cxx.addWarningFlagIfSupported('-Wno-noexcept-type') self.cxx.addWarningFlagIfSupported('-Wno-aligned-allocation-unavailable') # These warnings should be enabled in order to support the MSVC # team using the test suite; They enable the warnings below and # expect the test suite to be clean. self.cxx.addWarningFlagIfSupported('-Wsign-compare') self.cxx.addWarningFlagIfSupported('-Wunused-variable') self.cxx.addWarningFlagIfSupported('-Wunused-parameter') self.cxx.addWarningFlagIfSupported('-Wunreachable-code') std = self.get_lit_conf('std', None) if std in ['c++98', 'c++03']: if 'nvcc' not in self.config.available_features: # The '#define static_assert' provided by libc++ in C++03 mode # causes an unused local typedef whenever it is used. self.cxx.addWarningFlagIfSupported('-Wno-unused-local-typedef') def configure_sanitizer(self): san = self.get_lit_conf('use_sanitizer', '').strip() if san: self.target_info.add_sanitizer_features(san, self.config.available_features) # Search for llvm-symbolizer along the compiler path first # and then along the PATH env variable. symbolizer_search_paths = os.environ.get('PATH', '') cxx_path = libcudacxx.util.which(self.cxx.path) if cxx_path is not None: symbolizer_search_paths = ( os.path.dirname(cxx_path) + os.pathsep + symbolizer_search_paths) llvm_symbolizer = libcudacxx.util.which('llvm-symbolizer', symbolizer_search_paths) def add_ubsan(): self.cxx.flags += ['-fsanitize=undefined', '-fno-sanitize=float-divide-by-zero', '-fno-sanitize-recover=all'] self.exec_env['UBSAN_OPTIONS'] = 'print_stacktrace=1' self.config.available_features.add('ubsan') # Setup the sanitizer compile flags self.cxx.flags += ['-g', '-fno-omit-frame-pointer'] if san == 'Address' or san == 'Address;Undefined' or san == 'Undefined;Address': self.cxx.flags += ['-fsanitize=address'] if llvm_symbolizer is not None: self.exec_env['ASAN_SYMBOLIZER_PATH'] = llvm_symbolizer # FIXME: Turn ODR violation back on after PR28391 is resolved # https://bugs.llvm.org/show_bug.cgi?id=28391 self.exec_env['ASAN_OPTIONS'] = 'detect_odr_violation=0' self.config.available_features.add('asan') self.config.available_features.add('sanitizer-new-delete') self.cxx.compile_flags += ['-O1'] if san == 'Address;Undefined' or san == 'Undefined;Address': add_ubsan() elif san == 'Memory' or san == 'MemoryWithOrigins': self.cxx.flags += ['-fsanitize=memory'] if san == 'MemoryWithOrigins': self.cxx.compile_flags += [ '-fsanitize-memory-track-origins'] if llvm_symbolizer is not None: self.exec_env['MSAN_SYMBOLIZER_PATH'] = llvm_symbolizer self.config.available_features.add('msan') self.config.available_features.add('sanitizer-new-delete') self.cxx.compile_flags += ['-O1'] elif san == 'Undefined': add_ubsan() self.cxx.compile_flags += ['-O2'] elif san == 'Thread': self.cxx.flags += ['-fsanitize=thread'] self.config.available_features.add('tsan') self.config.available_features.add('sanitizer-new-delete') else: self.lit_config.fatal('unsupported value for ' 'use_sanitizer: {0}'.format(san)) san_lib = self.get_lit_conf('sanitizer_library') if san_lib: if self.cxx.type == 'nvcc': self.cxx.link_flags += ['-Xcompiler', '"-Wl,-rpath,' + os.path.dirname(san_lib) + '"'] else: self.cxx.link_flags += ['-Wl,-rpath,' + os.path.dirname(san_lib)] def configure_coverage(self): self.generate_coverage = self.get_lit_bool('generate_coverage', False) if self.generate_coverage: self.cxx.flags += ['-g', '--coverage'] self.cxx.compile_flags += ['-O0'] def configure_coroutines(self): if self.cxx.hasCompileFlag('-fcoroutines-ts'): macros = self._dump_macros_verbose(flags=['-fcoroutines-ts']) if '__cpp_coroutines' not in macros: self.lit_config.warning('-fcoroutines-ts is supported but ' '__cpp_coroutines is not defined') # Consider coroutines supported only when the feature test macro # reflects a recent value. if intMacroValue(macros['__cpp_coroutines']) >= 201703: self.config.available_features.add('fcoroutines-ts') def configure_modules(self): modules_flags = ['-fmodules'] if platform.system() != 'Darwin': modules_flags += ['-Xclang', '-fmodules-local-submodule-visibility'] supports_modules = self.cxx.hasCompileFlag(modules_flags) enable_modules = self.get_modules_enabled() if enable_modules and not supports_modules: self.lit_config.fatal( '-fmodules is enabled but not supported by the compiler') if not supports_modules: return self.config.available_features.add('modules-support') module_cache = os.path.join(self.config.test_exec_root, 'modules.cache') module_cache = os.path.realpath(module_cache) if os.path.isdir(module_cache): shutil.rmtree(module_cache) os.makedirs(module_cache) self.cxx.modules_flags += modules_flags + \ ['-fmodules-cache-path=' + module_cache] if enable_modules: self.config.available_features.add('-fmodules') self.cxx.useModules() def configure_substitutions(self): sub = self.config.substitutions cxx_path = pipes.quote(self.cxx.path) # Configure compiler substitutions sub.append(('%cxx', cxx_path)) sub.append(('%libcxx_src_root', self.libcudacxx_src_root)) # Configure flags substitutions flags_str = ' '.join([pipes.quote(f) for f in self.cxx.flags]) compile_flags_str = ' '.join([pipes.quote(f) for f in self.cxx.compile_flags]) link_flags_str = ' '.join([pipes.quote(f) for f in self.cxx.link_flags]) all_flags = '%s %s %s' % (flags_str, compile_flags_str, link_flags_str) sub.append(('%flags', flags_str)) sub.append(('%compile_flags', compile_flags_str)) sub.append(('%link_flags', link_flags_str)) sub.append(('%all_flags', all_flags)) if self.cxx.isVerifySupported(): verify_str = ' ' + ' '.join(self.cxx.verify_flags) + ' ' sub.append(('%verify', verify_str)) # Add compile and link shortcuts compile_str = (cxx_path + ' -o %t.o %s -c ' + flags_str + ' ' + compile_flags_str) link_str = (cxx_path + ' -o %t.exe %t.o ' + flags_str + ' ' + link_flags_str) assert type(link_str) is str build_str = cxx_path + ' -o %t.exe %s ' + all_flags if self.cxx.use_modules: sub.append(('%compile_module', compile_str)) sub.append(('%build_module', build_str)) elif self.cxx.modules_flags is not None: modules_str = ' '.join(self.cxx.modules_flags) + ' ' sub.append(('%compile_module', compile_str + ' ' + modules_str)) sub.append(('%build_module', build_str + ' ' + modules_str)) sub.append(('%compile', compile_str)) sub.append(('%link', link_str)) sub.append(('%build', build_str)) # Configure exec prefix substitutions. # Configure run env substitution. sub.append(('%run', '%t.exe')) # Configure not program substitutions not_py = os.path.join(self.libcudacxx_src_root, 'utils', 'not.py') not_str = '%s %s ' % (pipes.quote(sys.executable), pipes.quote(not_py)) sub.append(('not ', not_str)) if self.get_lit_conf('libcudacxx_gdb'): sub.append(('%libcxx_gdb', self.get_lit_conf('libcudacxx_gdb'))) def can_use_deployment(self): # Check if the host is on an Apple platform using clang. if not self.target_info.platform() == "darwin": return False if not self.target_info.is_host_macosx(): return False if not self.cxx.type.endswith('clang'): return False return True def configure_triple(self): # Get or infer the target triple. target_triple = self.get_lit_conf('target_triple') self.use_target = self.get_lit_bool('use_target', False) if self.use_target and target_triple: self.lit_config.warning('use_target is true but no triple is specified') # Use deployment if possible. self.use_deployment = not self.use_target and self.can_use_deployment() if self.use_deployment: return # Save the triple (and warn on Apple platforms). self.config.target_triple = target_triple if self.use_target and 'apple' in target_triple: self.lit_config.warning('consider using arch and platform instead' ' of target_triple on Apple platforms') # If no target triple was given, try to infer it from the compiler # under test. if not self.config.target_triple: target_triple = (self.cxx if self.cxx.type != 'nvcc' else self.host_cxx).getTriple() # Drop sub-major version components from the triple, because the # current XFAIL handling expects exact matches for feature checks. # Example: x86_64-apple-darwin14.0.0 -> x86_64-apple-darwin14 # The 5th group handles triples greater than 3 parts # (ex x86_64-pc-linux-gnu). target_triple = re.sub(r'([^-]+)-([^-]+)-([^.]+)([^-]*)(.*)', r'\1-\2-\3\5', target_triple) # linux-gnu is needed in the triple to properly identify linuxes # that use GLIBC. Handle redhat and opensuse triples as special # cases and append the missing `-gnu` portion. if (target_triple.endswith('redhat-linux') or target_triple.endswith('suse-linux')): target_triple += '-gnu' self.config.target_triple = target_triple self.lit_config.note( "inferred target_triple as: %r" % self.config.target_triple) def configure_deployment(self): assert not self.use_deployment is None assert not self.use_target is None if not self.use_deployment: # Warn about ignored parameters. if self.get_lit_conf('arch'): self.lit_config.warning('ignoring arch, using target_triple') if self.get_lit_conf('platform'): self.lit_config.warning('ignoring platform, using target_triple') return assert not self.use_target assert self.target_info.is_host_macosx() # Always specify deployment explicitly on Apple platforms, since # otherwise a platform is picked up from the SDK. If the SDK version # doesn't match the system version, tests that use the system library # may fail spuriously. arch = self.get_lit_conf('arch') if not arch: arch = (self.cxx if self.cxx.type != 'nvcc' else self.host_cxx).getTriple().split('-', 1)[0] self.lit_config.note("inferred arch as: %r" % arch) inferred_platform, name, version = self.target_info.get_platform() if inferred_platform: self.lit_config.note("inferred platform as: %r" % (name + version)) self.config.deployment = (arch, name, version) # Set the target triple for use by lit. self.config.target_triple = arch + '-apple-' + name + version self.lit_config.note( "computed target_triple as: %r" % self.config.target_triple) # If we're testing a system libc++ as opposed to the upstream LLVM one, # take the version of the system libc++ into account to compute which # features are enabled/disabled. Otherwise, disable availability markup, # which is not relevant for non-shipped flavors of libc++. if self.use_system_cxx_lib: # Dylib support for shared_mutex was added in macosx10.12. if name == 'macosx' and version in ('10.%s' % v for v in range(7, 12)): self.config.available_features.add('dylib-has-no-shared_mutex') self.lit_config.note("shared_mutex is not supported by the deployment target") # Throwing bad_optional_access, bad_variant_access and bad_any_cast is # supported starting in macosx10.14. if name == 'macosx' and version in ('10.%s' % v for v in range(7, 14)): self.config.available_features.add('dylib-has-no-bad_optional_access') self.lit_config.note("throwing bad_optional_access is not supported by the deployment target") self.config.available_features.add('dylib-has-no-bad_variant_access') self.lit_config.note("throwing bad_variant_access is not supported by the deployment target") self.config.available_features.add('dylib-has-no-bad_any_cast') self.lit_config.note("throwing bad_any_cast is not supported by the deployment target") # Filesystem is support on Apple platforms starting with macosx10.15. if name == 'macosx' and version in ('10.%s' % v for v in range(7, 15)): self.config.available_features.add('dylib-has-no-filesystem') self.lit_config.note("the deployment target does not support <filesystem>") else: self.cxx.flags += ['-D_LIBCUDACXX_DISABLE_AVAILABILITY'] def configure_env(self): self.target_info.configure_env(self.exec_env) def add_path(self, dest_env, new_path): if 'PATH' not in dest_env: dest_env['PATH'] = new_path else: split_char = ';' if self.is_windows else ':' dest_env['PATH'] = '%s%s%s' % (new_path, split_char, dest_env['PATH'])
cccl-main
libcudacxx/.upstream-tests/utils/libcudacxx/test/config.py
cccl-main
libcudacxx/.upstream-tests/utils/libcudacxx/test/__init__.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## import copy import errno import os import time import random import lit.Test # pylint: disable=import-error import lit.TestRunner # pylint: disable=import-error from lit.TestRunner import ParserKind, IntegratedTestKeywordParser \ # pylint: disable=import-error from libcudacxx.test.executor import LocalExecutor as LocalExecutor import libcudacxx.util class LibcxxTestFormat(object): """ Custom test format handler for use with the test format use by libc++. Tests fall into two categories: FOO.pass.cpp - Executable test which should compile, run, and exit with code 0. FOO.fail.cpp - Negative test case which is expected to fail compilation. FOO.runfail.cpp - Negative test case which is expected to compile, run, and exit with non-zero exit code. FOO.sh.cpp - A test that uses LIT's ShTest format. """ def __init__(self, cxx, use_verify_for_fail, execute_external, executor, exec_env): self.cxx = copy.deepcopy(cxx) self.use_verify_for_fail = use_verify_for_fail self.execute_external = execute_external self.executor = executor self.exec_env = dict(exec_env) @staticmethod def _make_custom_parsers(): return [ IntegratedTestKeywordParser('FLAKY_TEST.', ParserKind.TAG, initial_value=False), IntegratedTestKeywordParser('MODULES_DEFINES:', ParserKind.LIST, initial_value=[]) ] @staticmethod def _get_parser(key, parsers): for p in parsers: if p.keyword == key: return p assert False and "parser not found" # TODO: Move this into lit's FileBasedTest def getTestsInDirectory(self, testSuite, path_in_suite, litConfig, localConfig): source_path = testSuite.getSourcePath(path_in_suite) for filename in os.listdir(source_path): # Ignore dot files and excluded tests. if filename.startswith('.') or filename in localConfig.excludes: continue filepath = os.path.join(source_path, filename) if not os.path.isdir(filepath): if any([filename.endswith(ext) for ext in localConfig.suffixes]): yield lit.Test.Test(testSuite, path_in_suite + (filename,), localConfig) def execute(self, test, lit_config): while True: try: return self._execute(test, lit_config) except OSError as oe: if oe.errno != errno.ETXTBSY: raise time.sleep(0.1) def _execute(self, test, lit_config): name = test.path_in_suite[-1] name_root, name_ext = os.path.splitext(name) is_libcxx_test = test.path_in_suite[0] == 'libcxx' is_sh_test = name_root.endswith('.sh') is_pass_test = name.endswith('.pass.cpp') or name.endswith('.pass.mm') is_fail_test = name.endswith('.fail.cpp') or name.endswith('.fail.mm') is_runfail_test = name.endswith('.runfail.cpp') or name.endswith('.runfail.mm') is_objcxx_test = name.endswith('.mm') is_objcxx_arc_test = name.endswith('.arc.pass.mm') or \ name.endswith('.arc.fail.mm') assert is_sh_test or name_ext == '.cpp' or name_ext == '.mm', \ 'non-cpp file must be sh test' if test.config.unsupported: return (lit.Test.UNSUPPORTED, "A lit.local.cfg marked this unsupported") if is_objcxx_test and not \ 'objective-c++' in test.config.available_features: return (lit.Test.UNSUPPORTED, "Objective-C++ is not supported") parsers = self._make_custom_parsers() script = lit.TestRunner.parseIntegratedTestScript( test, additional_parsers=parsers, require_script=is_sh_test) # Check if a result for the test was returned. If so return that # result. if isinstance(script, lit.Test.Result): return script if lit_config.noExecute: # if we expect the test to fail at runtime, XFAIL is the proper return value if we never run the test if test.xfails: return lit.Test.Result(lit.Test.XFAIL) return lit.Test.Result(lit.Test.PASS) # Check that we don't have run lines on tests that don't support them. if not is_sh_test and len(script) != 0: lit_config.fatal('Unsupported RUN line found in test %s' % name) tmpDir, tmpBase = lit.TestRunner.getTempPaths(test) substitutions = lit.TestRunner.getDefaultSubstitutions(test, tmpDir, tmpBase) script = lit.TestRunner.applySubstitutions(script, substitutions) test_cxx = copy.deepcopy(self.cxx) if is_fail_test: test_cxx.useCCache(False) test_cxx.useWarnings(False) extra_modules_defines = self._get_parser('MODULES_DEFINES:', parsers).getValue() if '-fmodules' in test.config.available_features: test_cxx.compile_flags += [('-D%s' % mdef.strip()) for mdef in extra_modules_defines] test_cxx.addWarningFlagIfSupported('-Wno-macro-redefined') # FIXME: libc++ debug tests #define _LIBCUDACXX_ASSERT to override it # If we see this we need to build the test against uniquely built # modules. if is_libcxx_test: with open(test.getSourcePath(), 'rb') as f: contents = f.read() if b'#define _LIBCUDACXX_ASSERT' in contents: test_cxx.useModules(False) if is_objcxx_test: test_cxx.source_lang = 'objective-c++' if is_objcxx_arc_test: test_cxx.compile_flags += ['-fobjc-arc'] else: test_cxx.compile_flags += ['-fno-objc-arc'] test_cxx.link_flags += ['-framework', 'Foundation'] # Dispatch the test based on its suffix. if is_sh_test: if not isinstance(self.executor, LocalExecutor): # We can't run ShTest tests with a executor yet. # For now, bail on trying to run them return lit.Test.UNSUPPORTED, 'ShTest format not yet supported' test.config.environment = dict(self.exec_env) return lit.TestRunner._runShTest(test, lit_config, self.execute_external, script, tmpBase) elif is_fail_test: return self._evaluate_fail_test(test, test_cxx, parsers) elif is_pass_test: return self._evaluate_pass_test(test, tmpBase, lit_config, test_cxx, parsers) elif is_runfail_test: return self._evaluate_pass_test(test, tmpBase, lit_config, test_cxx, parsers, run_should_pass=False) else: # No other test type is supported assert False def _clean(self, exec_path): # pylint: disable=no-self-use libcudacxx.util.cleanFile(exec_path) def _evaluate_pass_test(self, test, tmpBase, lit_config, test_cxx, parsers, run_should_pass=True): execDir = os.path.dirname(test.getExecPath()) source_path = test.getSourcePath() exec_path = tmpBase + '.exe' object_path = tmpBase + '.o' # Create the output directory if it does not already exist. libcudacxx.util.mkdir_p(os.path.dirname(tmpBase)) try: # Compile the test cmd, out, err, rc = test_cxx.compileLinkTwoSteps( source_path, out=exec_path, object_file=object_path, cwd=execDir) compile_cmd = cmd if rc != 0: report = libcudacxx.util.makeReport(cmd, out, err, rc) report += "Compilation failed unexpectedly!" return lit.Test.Result(lit.Test.FAIL, report) # Run the test local_cwd = os.path.dirname(source_path) env = None if self.exec_env: env = self.exec_env # TODO: Only list actually needed files in file_deps. # Right now we just mark all of the .dat files in the same # directory as dependencies, but it's likely less than that. We # should add a `// FILE-DEP: foo.dat` to each test to track this. data_files = [os.path.join(local_cwd, f) for f in os.listdir(local_cwd) if f.endswith('.dat')] is_flaky = self._get_parser('FLAKY_TEST.', parsers).getValue() max_retry = 3 if is_flaky else 1 for retry_count in range(max_retry): cmd, out, err, rc = self.executor.run(exec_path, [exec_path], local_cwd, data_files, env) report = "Compiled With: '%s'\n" % ' '.join(compile_cmd) report += libcudacxx.util.makeReport(cmd, out, err, rc) result_expected = (rc == 0) == run_should_pass if result_expected: res = lit.Test.PASS if retry_count == 0 else lit.Test.FLAKYPASS return lit.Test.Result(res, report) # Rarely devices are unavailable, so just restart the test to avoid false negatives. elif rc != 0 and "cudaErrorDevicesUnavailable" in out and max_retry <= 5: max_retry += 1 elif retry_count + 1 == max_retry: if run_should_pass: report += "Compiled test failed unexpectedly!" else: report += "Compiled test succeeded unexpectedly!" return lit.Test.Result(lit.Test.FAIL, report) assert False # Unreachable finally: # Note that cleanup of exec_file happens in `_clean()`. If you # override this, cleanup is your reponsibility. libcudacxx.util.cleanFile(object_path) self._clean(exec_path) def _evaluate_fail_test(self, test, test_cxx, parsers): source_path = test.getSourcePath() # FIXME: lift this detection into LLVM/LIT. with open(source_path, 'rb') as f: contents = f.read() verify_tags = [b'expected-note', b'expected-remark', b'expected-warning', b'expected-error', b'expected-no-diagnostics'] use_verify = self.use_verify_for_fail and \ any([tag in contents for tag in verify_tags]) # FIXME(EricWF): GCC 5 does not evaluate static assertions that # are dependant on a template parameter when '-fsyntax-only' is passed. # This is fixed in GCC 6. However for now we only pass "-fsyntax-only" # when using Clang. if test_cxx.type != 'gcc' and test_cxx.type != 'nvcc': test_cxx.flags += ['-fsyntax-only'] if use_verify: test_cxx.useVerify() test_cxx.useWarnings() if '-Wuser-defined-warnings' in test_cxx.warning_flags: test_cxx.warning_flags += ['-Wno-error=user-defined-warnings'] else: # We still need to enable certain warnings on .fail.cpp test when # -verify isn't enabled. Such as -Werror=unused-result. However, # we don't want it enabled too liberally, which might incorrectly # allow unrelated failure tests to 'pass'. # # Therefore, we check if the test was expected to fail because of # nodiscard before enabling it test_str_list = [b'ignoring return value', b'nodiscard', b'NODISCARD'] if any(test_str in contents for test_str in test_str_list): test_cxx.flags += ['-Werror=unused-result'] cmd, out, err, rc = test_cxx.compile(source_path, out=os.devnull) check_rc = lambda rc: rc == 0 if use_verify else rc != 0 report = libcudacxx.util.makeReport(cmd, out, err, rc) if check_rc(rc): return lit.Test.Result(lit.Test.PASS, report) else: report += ('Expected compilation to fail!\n' if not use_verify else 'Expected compilation using verify to pass!\n') return lit.Test.Result(lit.Test.FAIL, report)
cccl-main
libcudacxx/.upstream-tests/utils/libcudacxx/test/format.py
from __future__ import absolute_import import os import subprocess import sys import lit.Test import lit.TestRunner import lit.util from lit.formats.base import TestFormat kIsWindows = sys.platform in ['win32', 'cygwin'] class GoogleBenchmark(TestFormat): def __init__(self, test_sub_dirs, test_suffix, benchmark_args=[]): self.benchmark_args = list(benchmark_args) self.test_sub_dirs = os.path.normcase(str(test_sub_dirs)).split(';') # On Windows, assume tests will also end in '.exe'. exe_suffix = str(test_suffix) if kIsWindows: exe_suffix += '.exe' # Also check for .py files for testing purposes. self.test_suffixes = {exe_suffix, test_suffix + '.py'} def getBenchmarkTests(self, path, litConfig, localConfig): """getBenchmarkTests(path) - [name] Return the tests available in gtest executable. Args: path: String path to a gtest executable litConfig: LitConfig instance localConfig: TestingConfig instance""" # TODO: allow splitting tests according to the "benchmark family" so # the output for a single family of tests all belongs to the same test # target. list_test_cmd = [path, '--benchmark_list_tests'] try: output = subprocess.check_output(list_test_cmd, env=localConfig.environment) except subprocess.CalledProcessError as exc: litConfig.warning( "unable to discover google-benchmarks in %r: %s. Process output: %s" % (path, sys.exc_info()[1], exc.output)) raise StopIteration nested_tests = [] for ln in output.splitlines(False): # Don't keep newlines. ln = lit.util.to_string(ln) if not ln.strip(): continue index = 0 while ln[index*2:index*2+2] == ' ': index += 1 while len(nested_tests) > index: nested_tests.pop() ln = ln[index*2:] if ln.endswith('.'): nested_tests.append(ln) elif any([name.startswith('DISABLED_') for name in nested_tests + [ln]]): # Gtest will internally skip these tests. No need to launch a # child process for it. continue else: yield ''.join(nested_tests) + ln def getTestsInDirectory(self, testSuite, path_in_suite, litConfig, localConfig): source_path = testSuite.getSourcePath(path_in_suite) for subdir in self.test_sub_dirs: dir_path = os.path.join(source_path, subdir) if not os.path.isdir(dir_path): continue for fn in lit.util.listdir_files(dir_path, suffixes=self.test_suffixes): # Discover the tests in this executable. execpath = os.path.join(source_path, subdir, fn) testnames = self.getBenchmarkTests(execpath, litConfig, localConfig) for testname in testnames: testPath = path_in_suite + (subdir, fn, testname) yield lit.Test.Test(testSuite, testPath, localConfig, file_path=execpath) def execute(self, test, litConfig): testPath,testName = os.path.split(test.getSourcePath()) while not os.path.exists(testPath): # Handle GTest parametrized and typed tests, whose name includes # some '/'s. testPath, namePrefix = os.path.split(testPath) testName = namePrefix + '/' + testName cmd = [testPath, '--benchmark_filter=%s$' % testName ] + self.benchmark_args if litConfig.noExecute: return lit.Test.PASS, '' try: out, err, exitCode = lit.util.executeCommand( cmd, env=test.config.environment, timeout=litConfig.maxIndividualTestTime) except lit.util.ExecuteCommandTimeoutException: return (lit.Test.TIMEOUT, 'Reached timeout of {} seconds'.format( litConfig.maxIndividualTestTime) ) if exitCode: return lit.Test.FAIL, ('exit code: %d\n' % exitCode) + out + err passing_test_line = testName if passing_test_line not in out: msg = ('Unable to find %r in google benchmark output:\n\n%s%s' % (passing_test_line, out, err)) return lit.Test.UNRESOLVED, msg return lit.Test.PASS, err + out
cccl-main
libcudacxx/.upstream-tests/utils/libcudacxx/test/googlebenchmark.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## import platform import os from libcudacxx.test import tracing from libcudacxx.util import executeCommand class Executor(object): def run(self, exe_path, cmd, local_cwd, file_deps=None, env=None): """Execute a command. Be very careful not to change shared state in this function. Executor objects are shared between python processes in `lit -jN`. Args: exe_path: str: Local path to the executable to be run cmd: [str]: subprocess.call style command local_cwd: str: Local path to the working directory file_deps: [str]: Files required by the test env: {str: str}: Environment variables to execute under Returns: cmd, out, err, exitCode """ raise NotImplementedError class LocalExecutor(Executor): def __init__(self): super(LocalExecutor, self).__init__() self.is_windows = platform.system() == 'Windows' def run(self, exe_path, cmd=None, work_dir='.', file_deps=None, env=None): cmd = cmd or [exe_path] if work_dir == '.': work_dir = os.getcwd() out, err, rc = executeCommand(cmd, cwd=work_dir, env=env, timeout=self.timeout) return (cmd, out, err, rc) class NoopExecutor(Executor): def __init__(self): super(NoopExecutor, self).__init__() def run(self, exe_path, cmd=None, work_dir='.', file_deps=None, env=None): return (cmd, '', '', 0) class PrefixExecutor(Executor): """Prefix an executor with some other command wrapper. Most useful for setting ulimits on commands, or running an emulator like qemu and valgrind. """ def __init__(self, commandPrefix, chain): super(PrefixExecutor, self).__init__() self.commandPrefix = commandPrefix self.chain = chain def run(self, exe_path, cmd=None, work_dir='.', file_deps=None, env=None): cmd = cmd or [exe_path] return self.chain.run(exe_path, self.commandPrefix + cmd, work_dir, file_deps, env=env) class PostfixExecutor(Executor): """Postfix an executor with some args.""" def __init__(self, commandPostfix, chain): super(PostfixExecutor, self).__init__() self.commandPostfix = commandPostfix self.chain = chain def run(self, exe_path, cmd=None, work_dir='.', file_deps=None, env=None): cmd = cmd or [exe_path] return self.chain.run(cmd + self.commandPostfix, work_dir, file_deps, env=env) class TimeoutExecutor(PrefixExecutor): """Execute another action under a timeout. Deprecated. http://reviews.llvm.org/D6584 adds timeouts to LIT. """ def __init__(self, duration, chain): super(TimeoutExecutor, self).__init__( ['timeout', duration], chain) class RemoteExecutor(Executor): def __init__(self): self.local_run = executeCommand def remote_temp_dir(self): return self._remote_temp(True) def remote_temp_file(self): return self._remote_temp(False) def _remote_temp(self, is_dir): raise NotImplementedError() def copy_in(self, local_srcs, remote_dsts): # This could be wrapped up in a tar->scp->untar for performance # if there are lots of files to be copied/moved for src, dst in zip(local_srcs, remote_dsts): self._copy_in_file(src, dst) def _copy_in_file(self, src, dst): raise NotImplementedError() def delete_remote(self, remote): try: self._execute_command_remote(['rm', '-rf', remote]) except OSError: # TODO: Log failure to delete? pass def run(self, exe_path, cmd=None, work_dir='.', file_deps=None, env=None): target_exe_path = None target_cwd = None try: target_cwd = self.remote_temp_dir() target_exe_path = os.path.join(target_cwd, 'libcxx_test.exe') if cmd: # Replace exe_path with target_exe_path. cmd = [c if c != exe_path else target_exe_path for c in cmd] else: cmd = [target_exe_path] srcs = [exe_path] dsts = [target_exe_path] if file_deps is not None: dev_paths = [os.path.join(target_cwd, os.path.basename(f)) for f in file_deps] srcs.extend(file_deps) dsts.extend(dev_paths) self.copy_in(srcs, dsts) # TODO(jroelofs): capture the copy_in and delete_remote commands, # and conjugate them with '&&'s around the first tuple element # returned here: return self._execute_command_remote(cmd, target_cwd, env) finally: if target_cwd: self.delete_remote(target_cwd) def _execute_command_remote(self, cmd, remote_work_dir='.', env=None): raise NotImplementedError() class SSHExecutor(RemoteExecutor): def __init__(self, host, username=None): super(SSHExecutor, self).__init__() self.user_prefix = username + '@' if username else '' self.host = host self.scp_command = 'scp' self.ssh_command = 'ssh' # TODO(jroelofs): switch this on some -super-verbose-debug config flag if False: self.local_run = tracing.trace_function( self.local_run, log_calls=True, log_results=True, label='ssh_local') def _remote_temp(self, is_dir): # TODO: detect what the target system is, and use the correct # mktemp command for it. (linux and darwin differ here, and I'm # sure windows has another way to do it) # Not sure how to do suffix on osx yet dir_arg = '-d' if is_dir else '' cmd = 'mktemp -q {} /tmp/libcxx.XXXXXXXXXX'.format(dir_arg) _, temp_path, err, exitCode = self._execute_command_remote([cmd]) temp_path = temp_path.strip() if exitCode != 0: raise RuntimeError(err) return temp_path def _copy_in_file(self, src, dst): scp = self.scp_command remote = self.host remote = self.user_prefix + remote cmd = [scp, '-p', src, remote + ':' + dst] self.local_run(cmd) def _execute_command_remote(self, cmd, remote_work_dir='.', env=None): remote = self.user_prefix + self.host ssh_cmd = [self.ssh_command, '-oBatchMode=yes', remote] if env: env_cmd = ['env'] + ['%s="%s"' % (k, v) for k, v in env.items()] else: env_cmd = [] remote_cmd = ' '.join(env_cmd + cmd) if remote_work_dir != '.': remote_cmd = 'cd ' + remote_work_dir + ' && ' + remote_cmd out, err, rc = self.local_run(ssh_cmd + [remote_cmd]) return (remote_cmd, out, err, rc)
cccl-main
libcudacxx/.upstream-tests/utils/libcudacxx/test/executor.py
#===----------------------------------------------------------------------===// # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===// import importlib import locale import os import platform import re import subprocess import sys from libcudacxx.util import executeCommand class DefaultTargetInfo(object): def __init__(self, full_config): self.full_config = full_config def platform(self): return sys.platform.lower().strip() def add_locale_features(self, features): self.full_config.lit_config.warning( "No locales entry for target_system: %s" % self.platform()) def add_cxx_compile_flags(self, flags): pass def add_cxx_link_flags(self, flags): pass def configure_env(self, env): pass def allow_cxxabi_link(self): return True def add_sanitizer_features(self, sanitizer_type, features): pass def use_lit_shell_default(self): return False def test_locale(loc): assert loc is not None default_locale = locale.setlocale(locale.LC_ALL) try: locale.setlocale(locale.LC_ALL, loc) return True except locale.Error: return False finally: locale.setlocale(locale.LC_ALL, default_locale) def add_common_locales(features, lit_config, is_windows=False): # A list of locales needed by the test-suite. # The list uses the canonical name for the locale used in the test-suite # TODO: On Linux ISO8859 *may* needs to hyphenated. locales = [ ('en_US.UTF-8', 'English_United States.1252'), ('fr_FR.UTF-8', 'French_France.1252'), ('ru_RU.UTF-8', 'Russian_Russia.1251'), ('zh_CN.UTF-8', 'Chinese_China.936'), ('fr_CA.ISO8859-1', 'French_Canada.1252'), ('cs_CZ.ISO8859-2', 'Czech_Czech Republic.1250') ] for loc_id, windows_loc_name in locales: loc_name = windows_loc_name if is_windows else loc_id if test_locale(loc_name): features.add('locale.{0}'.format(loc_id)) else: lit_config.warning('The locale {0} is not supported by ' 'your platform. Some tests will be ' 'unsupported.'.format(loc_name)) class DarwinLocalTI(DefaultTargetInfo): def __init__(self, full_config): super(DarwinLocalTI, self).__init__(full_config) def is_host_macosx(self): name = subprocess.check_output(['sw_vers', '-productName']).strip() return name == "Mac OS X" def get_macosx_version(self): assert self.is_host_macosx() version = subprocess.check_output( ['sw_vers', '-productVersion']).strip() version = re.sub(r'([0-9]+\.[0-9]+)(\..*)?', r'\1', version) return version def get_sdk_version(self, name): assert self.is_host_macosx() cmd = ['xcrun', '--sdk', name, '--show-sdk-path'] try: out = subprocess.check_output(cmd).strip() except OSError: pass if not out: self.full_config.lit_config.fatal( "cannot infer sdk version with: %r" % cmd) return re.sub(r'.*/[^0-9]+([0-9.]+)\.sdk', r'\1', out) def get_platform(self): platform = self.full_config.get_lit_conf('platform') if platform: platform = re.sub(r'([^0-9]+)([0-9\.]*)', r'\1-\2', platform) name, version = tuple(platform.split('-', 1)) else: name = 'macosx' version = None if version: return (False, name, version) # Infer the version, either from the SDK or the system itself. For # macosx, ignore the SDK version; what matters is what's at # /usr/lib/libc++.dylib. if name == 'macosx': version = self.get_macosx_version() else: version = self.get_sdk_version(name) return (True, name, version) def add_locale_features(self, features): add_common_locales(features, self.full_config.lit_config) def add_cxx_compile_flags(self, flags): if self.full_config.use_deployment: _, name, _ = self.full_config.config.deployment cmd = ['xcrun', '--sdk', name, '--show-sdk-path'] else: cmd = ['xcrun', '--show-sdk-path'] out, err, exit_code = executeCommand(cmd) if exit_code != 0: self.full_config.lit_config.warning("Could not determine macOS SDK path! stderr was " + err) if exit_code == 0 and out: sdk_path = out.strip() self.full_config.lit_config.note('using SDKROOT: %r' % sdk_path) assert isinstance(sdk_path, str) flags += ["-isysroot", sdk_path] def add_cxx_link_flags(self, flags): flags += ['-lSystem'] def configure_env(self, env): library_paths = [] # Configure the library path for libc++ if self.full_config.cxx_runtime_root: library_paths += [self.full_config.cxx_runtime_root] elif self.full_config.use_system_cxx_lib: if (os.path.isdir(str(self.full_config.use_system_cxx_lib))): library_paths += [self.full_config.use_system_cxx_lib] # Configure the abi library path if self.full_config.abi_library_root: library_paths += [self.full_config.abi_library_root] if library_paths: env['DYLD_LIBRARY_PATH'] = ':'.join(library_paths) def allow_cxxabi_link(self): # Don't link libc++abi explicitly on OS X because the symbols # should be available in libc++ directly. return False class FreeBSDLocalTI(DefaultTargetInfo): def __init__(self, full_config): super(FreeBSDLocalTI, self).__init__(full_config) def add_locale_features(self, features): add_common_locales(features, self.full_config.lit_config) def add_cxx_link_flags(self, flags): flags += ['-lc', '-lm', '-lpthread', '-lgcc_s', '-lcxxrt'] class NetBSDLocalTI(DefaultTargetInfo): def __init__(self, full_config): super(NetBSDLocalTI, self).__init__(full_config) def add_locale_features(self, features): add_common_locales(features, self.full_config.lit_config) def add_cxx_link_flags(self, flags): flags += ['-lc', '-lm', '-lpthread', '-lgcc_s', '-lc++abi', '-lunwind'] class LinuxLocalTI(DefaultTargetInfo): def __init__(self, full_config): super(LinuxLocalTI, self).__init__(full_config) def platform(self): return 'linux' def add_locale_features(self, features): add_common_locales(features, self.full_config.lit_config) def add_cxx_compile_flags(self, flags): flags += ['-D__STDC_FORMAT_MACROS', '-D__STDC_LIMIT_MACROS', '-D__STDC_CONSTANT_MACROS'] def add_cxx_link_flags(self, flags): enable_threads = ('libcpp-has-no-threads' not in self.full_config.config.available_features) llvm_unwinder = self.full_config.get_lit_bool('llvm_unwinder', False) shared_libcxx = self.full_config.get_lit_bool('enable_shared', True) flags += ['-lm'] if not llvm_unwinder: flags += ['-lgcc_s', '-lgcc'] if enable_threads: flags += ['-lpthread'] if not shared_libcxx: flags += ['-lrt'] flags += ['-lc'] if llvm_unwinder: flags += ['-lunwind', '-ldl'] else: flags += ['-lgcc_s'] builtins_lib = self.full_config.get_lit_conf('builtins_library') if builtins_lib: flags += [builtins_lib] else: flags += ['-lgcc'] use_libatomic = self.full_config.get_lit_bool('use_libatomic', False) if use_libatomic: flags += ['-latomic'] san = self.full_config.get_lit_conf('use_sanitizer', '').strip() if san: # The libraries and their order are taken from the # linkSanitizerRuntimeDeps function in # clang/lib/Driver/Tools.cpp flags += ['-lpthread', '-lrt', '-lm', '-ldl'] class WindowsLocalTI(DefaultTargetInfo): def __init__(self, full_config): super(WindowsLocalTI, self).__init__(full_config) def add_locale_features(self, features): add_common_locales(features, self.full_config.lit_config, is_windows=True) def use_lit_shell_default(self): # Default to the internal shell on Windows, as bash on Windows is # usually very slow. return True def make_target_info(full_config): default = "libcudacxx.test.target_info.LocalTI" info_str = full_config.get_lit_conf('target_info', default) if info_str != default: mod_path, _, info = info_str.rpartition('.') mod = importlib.import_module(mod_path) target_info = getattr(mod, info)(full_config) full_config.lit_config.note("inferred target_info as: %r" % info_str) return target_info target_system = platform.system() if target_system == 'Darwin': return DarwinLocalTI(full_config) if target_system == 'FreeBSD': return FreeBSDLocalTI(full_config) if target_system == 'NetBSD': return NetBSDLocalTI(full_config) if target_system == 'Linux': return LinuxLocalTI(full_config) if target_system == 'Windows': return WindowsLocalTI(full_config) return DefaultTargetInfo(full_config)
cccl-main
libcudacxx/.upstream-tests/utils/libcudacxx/test/target_info.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## import ast import distutils.spawn import sys import re import libcudacxx.util from pprint import pformat def read_syms_from_list(slist): """ Read a list of symbols from a list of strings. Each string is one symbol. """ return [ast.literal_eval(l) for l in slist] def read_syms_from_file(filename): """ Read a list of symbols in from a file. """ with open(filename, 'r') as f: data = f.read() return read_syms_from_list(data.splitlines()) def read_blacklist(filename): with open(filename, 'r') as f: data = f.read() lines = [l.strip() for l in data.splitlines() if l.strip()] lines = [l for l in lines if not l.startswith('#')] return lines def write_syms(sym_list, out=None, names_only=False, filter=None): """ Write a list of symbols to the file named by out. """ out_str = '' out_list = sym_list out_list.sort(key=lambda x: x['name']) if filter is not None: out_list = filter(out_list) if names_only: out_list = [sym['name'] for sym in out_list] for sym in out_list: # Use pformat for consistent ordering of keys. out_str += pformat(sym, width=100000) + '\n' if out is None: sys.stdout.write(out_str) else: with open(out, 'w') as f: f.write(out_str) _cppfilt_exe = distutils.spawn.find_executable('c++filt') def demangle_symbol(symbol): if _cppfilt_exe is None: return symbol out, _, exit_code = libcudacxx.util.executeCommandVerbose( [_cppfilt_exe], input=symbol) if exit_code != 0: return symbol return out def is_elf(filename): with open(filename, 'rb') as f: magic_bytes = f.read(4) return magic_bytes == b'\x7fELF' def is_mach_o(filename): with open(filename, 'rb') as f: magic_bytes = f.read(4) return magic_bytes in [ '\xfe\xed\xfa\xce', # MH_MAGIC '\xce\xfa\xed\xfe', # MH_CIGAM '\xfe\xed\xfa\xcf', # MH_MAGIC_64 '\xcf\xfa\xed\xfe', # MH_CIGAM_64 '\xca\xfe\xba\xbe', # FAT_MAGIC '\xbe\xba\xfe\xca' # FAT_CIGAM ] def is_library_file(filename): if sys.platform == 'darwin': return is_mach_o(filename) else: return is_elf(filename) def extract_or_load(filename): import libcudacxx.sym_check.extract if is_library_file(filename): return libcudacxx.sym_check.extract.extract_symbols(filename) return read_syms_from_file(filename) def adjust_mangled_name(name): if not name.startswith('__Z'): return name return name[1:] new_delete_std_symbols = [ '_Znam', '_Znwm', '_ZdaPv', '_ZdaPvm', '_ZdlPv', '_ZdlPvm' ] cxxabi_symbols = [ '___dynamic_cast', '___gxx_personality_v0', '_ZTIDi', '_ZTIDn', '_ZTIDs', '_ZTIPDi', '_ZTIPDn', '_ZTIPDs', '_ZTIPKDi', '_ZTIPKDn', '_ZTIPKDs', '_ZTIPKa', '_ZTIPKb', '_ZTIPKc', '_ZTIPKd', '_ZTIPKe', '_ZTIPKf', '_ZTIPKh', '_ZTIPKi', '_ZTIPKj', '_ZTIPKl', '_ZTIPKm', '_ZTIPKs', '_ZTIPKt', '_ZTIPKv', '_ZTIPKw', '_ZTIPKx', '_ZTIPKy', '_ZTIPa', '_ZTIPb', '_ZTIPc', '_ZTIPd', '_ZTIPe', '_ZTIPf', '_ZTIPh', '_ZTIPi', '_ZTIPj', '_ZTIPl', '_ZTIPm', '_ZTIPs', '_ZTIPt', '_ZTIPv', '_ZTIPw', '_ZTIPx', '_ZTIPy', '_ZTIa', '_ZTIb', '_ZTIc', '_ZTId', '_ZTIe', '_ZTIf', '_ZTIh', '_ZTIi', '_ZTIj', '_ZTIl', '_ZTIm', '_ZTIs', '_ZTIt', '_ZTIv', '_ZTIw', '_ZTIx', '_ZTIy', '_ZTSDi', '_ZTSDn', '_ZTSDs', '_ZTSPDi', '_ZTSPDn', '_ZTSPDs', '_ZTSPKDi', '_ZTSPKDn', '_ZTSPKDs', '_ZTSPKa', '_ZTSPKb', '_ZTSPKc', '_ZTSPKd', '_ZTSPKe', '_ZTSPKf', '_ZTSPKh', '_ZTSPKi', '_ZTSPKj', '_ZTSPKl', '_ZTSPKm', '_ZTSPKs', '_ZTSPKt', '_ZTSPKv', '_ZTSPKw', '_ZTSPKx', '_ZTSPKy', '_ZTSPa', '_ZTSPb', '_ZTSPc', '_ZTSPd', '_ZTSPe', '_ZTSPf', '_ZTSPh', '_ZTSPi', '_ZTSPj', '_ZTSPl', '_ZTSPm', '_ZTSPs', '_ZTSPt', '_ZTSPv', '_ZTSPw', '_ZTSPx', '_ZTSPy', '_ZTSa', '_ZTSb', '_ZTSc', '_ZTSd', '_ZTSe', '_ZTSf', '_ZTSh', '_ZTSi', '_ZTSj', '_ZTSl', '_ZTSm', '_ZTSs', '_ZTSt', '_ZTSv', '_ZTSw', '_ZTSx', '_ZTSy' ] def is_stdlib_symbol_name(name, sym): name = adjust_mangled_name(name) if re.search("@GLIBC|@GCC", name): # Only when symbol is defined do we consider it ours return sym['is_defined'] if re.search('(St[0-9])|(__cxa)|(__cxxabi)', name): return True if name in new_delete_std_symbols: return True if name in cxxabi_symbols: return True if name.startswith('_Z'): return True return False def filter_stdlib_symbols(syms): stdlib_symbols = [] other_symbols = [] for s in syms: canon_name = adjust_mangled_name(s['name']) if not is_stdlib_symbol_name(canon_name, s): other_symbols += [s] else: stdlib_symbols += [s] return stdlib_symbols, other_symbols
cccl-main
libcudacxx/.upstream-tests/utils/libcudacxx/sym_check/util.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """libcxx abi symbol checker""" __author__ = 'Eric Fiselier' __email__ = '[email protected]' __versioninfo__ = (0, 1, 0) __version__ = ' '.join(str(v) for v in __versioninfo__) + 'dev' __all__ = ['diff', 'extract', 'util']
cccl-main
libcudacxx/.upstream-tests/utils/libcudacxx/sym_check/__init__.py
# -*- Python -*- vim: set syntax=python tabstop=4 expandtab cc=80: #===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """ diff - A set of functions for diff-ing two symbol lists. """ from libcudacxx.sym_check import util def _symbol_difference(lhs, rhs): lhs_names = set(((n['name'], n['type']) for n in lhs)) rhs_names = set(((n['name'], n['type']) for n in rhs)) diff_names = lhs_names - rhs_names return [n for n in lhs if (n['name'], n['type']) in diff_names] def _find_by_key(sym_list, k): for sym in sym_list: if sym['name'] == k: return sym return None def added_symbols(old, new): return _symbol_difference(new, old) def removed_symbols(old, new): return _symbol_difference(old, new) def changed_symbols(old, new): changed = [] for old_sym in old: if old_sym in new: continue new_sym = _find_by_key(new, old_sym['name']) if (new_sym is not None and not new_sym in old and old_sym != new_sym): changed += [(old_sym, new_sym)] return changed def diff(old, new): added = added_symbols(old, new) removed = removed_symbols(old, new) changed = changed_symbols(old, new) return added, removed, changed def report_diff(added_syms, removed_syms, changed_syms, names_only=False, demangle=True): def maybe_demangle(name): return util.demangle_symbol(name) if demangle else name report = '' for sym in added_syms: report += 'Symbol added: %s\n' % maybe_demangle(sym['name']) if not names_only: report += ' %s\n\n' % sym if added_syms and names_only: report += '\n' for sym in removed_syms: report += 'SYMBOL REMOVED: %s\n' % maybe_demangle(sym['name']) if not names_only: report += ' %s\n\n' % sym if removed_syms and names_only: report += '\n' if not names_only: for sym_pair in changed_syms: old_sym, new_sym = sym_pair old_str = '\n OLD SYMBOL: %s' % old_sym new_str = '\n NEW SYMBOL: %s' % new_sym report += ('SYMBOL CHANGED: %s%s%s\n\n' % (maybe_demangle(old_sym['name']), old_str, new_str)) added = bool(len(added_syms) != 0) abi_break = bool(len(removed_syms)) if not names_only: abi_break = abi_break or len(changed_syms) if added or abi_break: report += 'Summary\n' report += ' Added: %d\n' % len(added_syms) report += ' Removed: %d\n' % len(removed_syms) if not names_only: report += ' Changed: %d\n' % len(changed_syms) if not abi_break: report += 'Symbols added.' else: report += 'ABI BREAKAGE: SYMBOLS ADDED OR REMOVED!' else: report += 'Symbols match.' is_different = abi_break or bool(len(added_syms)) \ or bool(len(changed_syms)) return report, abi_break, is_different
cccl-main
libcudacxx/.upstream-tests/utils/libcudacxx/sym_check/diff.py
# -*- Python -*- vim: set syntax=python tabstop=4 expandtab cc=80: #===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """ extract - A set of function that extract symbol lists from shared libraries. """ import distutils.spawn import os.path import sys import re import libcudacxx.util from libcudacxx.sym_check import util extract_ignore_names = ['_init', '_fini'] class NMExtractor(object): """ NMExtractor - Extract symbol lists from libraries using nm. """ @staticmethod def find_tool(): """ Search for the nm executable and return the path. """ return distutils.spawn.find_executable('nm') def __init__(self, static_lib): """ Initialize the nm executable and flags that will be used to extract symbols from shared libraries. """ self.nm_exe = self.find_tool() if self.nm_exe is None: # ERROR no NM found print("ERROR: Could not find nm") sys.exit(1) self.static_lib = static_lib self.flags = ['-P', '-g'] def extract(self, lib): """ Extract symbols from a library and return the results as a dict of parsed symbols. """ cmd = [self.nm_exe] + self.flags + [lib] out, _, exit_code = libcudacxx.util.executeCommandVerbose(cmd) if exit_code != 0: raise RuntimeError('Failed to run %s on %s' % (self.nm_exe, lib)) fmt_syms = (self._extract_sym(l) for l in out.splitlines() if l.strip()) # Cast symbol to string. final_syms = (repr(s) for s in fmt_syms if self._want_sym(s)) # Make unique and sort strings. tmp_list = list(sorted(set(final_syms))) # Cast string back to symbol. return util.read_syms_from_list(tmp_list) def _extract_sym(self, sym_str): bits = sym_str.split() # Everything we want has at least two columns. if len(bits) < 2: return None new_sym = { 'name': bits[0], 'type': bits[1], 'is_defined': (bits[1].lower() != 'u') } new_sym['name'] = new_sym['name'].replace('@@', '@') new_sym = self._transform_sym_type(new_sym) # NM types which we want to save the size for. if new_sym['type'] == 'OBJECT' and len(bits) > 3: new_sym['size'] = int(bits[3], 16) return new_sym @staticmethod def _want_sym(sym): """ Check that s is a valid symbol that we want to keep. """ if sym is None or len(sym) < 2: return False if sym['name'] in extract_ignore_names: return False bad_types = ['t', 'b', 'r', 'd', 'w'] return (sym['type'] not in bad_types and sym['name'] not in ['__bss_start', '_end', '_edata']) @staticmethod def _transform_sym_type(sym): """ Map the nm single letter output for type to either FUNC or OBJECT. If the type is not recognized it is left unchanged. """ func_types = ['T', 'W'] obj_types = ['B', 'D', 'R', 'V', 'S'] if sym['type'] in func_types: sym['type'] = 'FUNC' elif sym['type'] in obj_types: sym['type'] = 'OBJECT' return sym class ReadElfExtractor(object): """ ReadElfExtractor - Extract symbol lists from libraries using readelf. """ @staticmethod def find_tool(): """ Search for the readelf executable and return the path. """ return distutils.spawn.find_executable('readelf') def __init__(self, static_lib): """ Initialize the readelf executable and flags that will be used to extract symbols from shared libraries. """ self.tool = self.find_tool() if self.tool is None: # ERROR no NM found print("ERROR: Could not find readelf") sys.exit(1) # TODO: Support readelf for reading symbols from archives assert not static_lib and "RealElf does not yet support static libs" self.flags = ['--wide', '--symbols'] def extract(self, lib): """ Extract symbols from a library and return the results as a dict of parsed symbols. """ cmd = [self.tool] + self.flags + [lib] out, _, exit_code = libcudacxx.util.executeCommandVerbose(cmd) if exit_code != 0: raise RuntimeError('Failed to run %s on %s' % (self.nm_exe, lib)) dyn_syms = self.get_dynsym_table(out) return self.process_syms(dyn_syms) def process_syms(self, sym_list): new_syms = [] for s in sym_list: parts = s.split() if not parts: continue assert len(parts) == 7 or len(parts) == 8 or len(parts) == 9 if len(parts) == 7: continue new_sym = { 'name': parts[7], 'size': int(parts[2]), 'type': parts[3], 'is_defined': (parts[6] != 'UND') } assert new_sym['type'] in ['OBJECT', 'FUNC', 'NOTYPE', 'TLS'] if new_sym['name'] in extract_ignore_names: continue if new_sym['type'] == 'NOTYPE': continue if new_sym['type'] == 'FUNC': del new_sym['size'] new_syms += [new_sym] return new_syms def get_dynsym_table(self, out): lines = out.splitlines() start = -1 end = -1 for i in range(len(lines)): if lines[i].startswith("Symbol table '.dynsym'"): start = i + 2 if start != -1 and end == -1 and not lines[i].strip(): end = i + 1 assert start != -1 if end == -1: end = len(lines) return lines[start:end] def extract_symbols(lib_file, static_lib=None): """ Extract and return a list of symbols extracted from a static or dynamic library. The symbols are extracted using NM or readelf. They are then filtered and formated. Finally they symbols are made unique. """ if static_lib is None: _, ext = os.path.splitext(lib_file) static_lib = True if ext in ['.a'] else False if ReadElfExtractor.find_tool() and not static_lib: extractor = ReadElfExtractor(static_lib=static_lib) else: extractor = NMExtractor(static_lib=static_lib) return extractor.extract(lib_file)
cccl-main
libcudacxx/.upstream-tests/utils/libcudacxx/sym_check/extract.py
# -*- Python -*- vim: set syntax=python tabstop=4 expandtab cc=80: #===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## """ match - A set of functions for matching symbols in a list to a list of regexs """ import re def find_and_report_matching(symbol_list, regex_list): report = '' found_count = 0 for regex_str in regex_list: report += 'Matching regex "%s":\n' % regex_str matching_list = find_matching_symbols(symbol_list, regex_str) if not matching_list: report += ' No matches found\n\n' continue # else found_count += len(matching_list) for m in matching_list: report += ' MATCHES: %s\n' % m['name'] report += '\n' return found_count, report def find_matching_symbols(symbol_list, regex_str): regex = re.compile(regex_str) matching_list = [] for s in symbol_list: if regex.match(s['name']): matching_list += [s] return matching_list
cccl-main
libcudacxx/.upstream-tests/utils/libcudacxx/sym_check/match.py
# All the Lit configuration is handled in the site configs -- this file is only # left as a canary to catch invocations of Lit that do not go through llvm-lit. # # Invocations that go through llvm-lit will automatically use the right Lit # site configuration inside the build directory. lit_config.fatal( "You seem to be running Lit directly -- you should be running Lit through " "<build>/bin/llvm-lit, which will ensure that the right Lit configuration " "file is used.")
cccl-main
libcudacxx/libcxxabi/test/lit.cfg.py
cccl-main
libcudacxx/libcxxabi/test/libcxxabi/__init__.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## import os import sys from libcxx.test.config import Configuration as LibcxxConfiguration from libcxx.test.config import intMacroValue class Configuration(LibcxxConfiguration): # pylint: disable=redefined-outer-name def __init__(self, lit_config, config): super(Configuration, self).__init__(lit_config, config) self.libcxxabi_src_root = None self.libcxxabi_obj_root = None self.abi_library_path = None self.libcxx_src_root = None def configure_src_root(self): self.libcxxabi_src_root = self.get_lit_conf( 'libcxxabi_src_root', os.path.dirname(self.config.test_source_root)) self.libcxx_src_root = self.get_lit_conf( 'libcxx_src_root', os.path.join(self.libcxxabi_src_root, '/../libcxx')) def configure_obj_root(self): self.libcxxabi_obj_root = self.get_lit_conf('libcxxabi_obj_root') super(Configuration, self).configure_obj_root() def has_cpp_feature(self, feature, required_value): return intMacroValue(self.cxx.dumpMacros().get('__cpp_' + feature, '0')) >= required_value def configure_features(self): super(Configuration, self).configure_features() if not self.has_cpp_feature('noexcept_function_type', 201510): self.config.available_features.add('libcxxabi-no-noexcept-function-type') if not self.get_lit_bool('llvm_unwinder', False): self.config.available_features.add('libcxxabi-has-system-unwinder') def configure_compile_flags(self): self.cxx.compile_flags += [ '-DLIBCXXABI_NO_TIMER', '-D_LIBCUDACXX_ENABLE_CXX17_REMOVED_UNEXPECTED_FUNCTIONS', ] if self.get_lit_bool('enable_exceptions', True): self.cxx.compile_flags += ['-funwind-tables'] if not self.get_lit_bool('enable_threads', True): self.cxx.compile_flags += ['-D_LIBCXXABI_HAS_NO_THREADS'] self.config.available_features.add('libcxxabi-no-threads') super(Configuration, self).configure_compile_flags() def configure_compile_flags_header_includes(self): self.configure_config_site_header() cxx_headers = self.get_lit_conf( 'cxx_headers', os.path.join(self.libcxx_src_root, '/include')) if cxx_headers == '': self.lit_config.note('using the systems c++ headers') else: self.cxx.compile_flags += ['-nostdinc++'] if not os.path.isdir(cxx_headers): self.lit_config.fatal("cxx_headers='%s' is not a directory." % cxx_headers) self.cxx.compile_flags += ['-I' + cxx_headers] libcxxabi_headers = self.get_lit_conf( 'libcxxabi_headers', os.path.join(self.libcxxabi_src_root, 'include')) if not os.path.isdir(libcxxabi_headers): self.lit_config.fatal("libcxxabi_headers='%s' is not a directory." % libcxxabi_headers) self.cxx.compile_flags += ['-I' + libcxxabi_headers] libunwind_headers = self.get_lit_conf('libunwind_headers', None) if self.get_lit_bool('llvm_unwinder', False) and libunwind_headers: if not os.path.isdir(libunwind_headers): self.lit_config.fatal("libunwind_headers='%s' is not a directory." % libunwind_headers) self.cxx.compile_flags += ['-I' + libunwind_headers]
cccl-main
libcudacxx/libcxxabi/test/libcxxabi/test/config.py
cccl-main
libcudacxx/libcxxabi/test/libcxxabi/test/__init__.py
# All the Lit configuration is handled in the site configs -- this file is only # left as a canary to catch invocations of Lit that do not go through llvm-lit. # # Invocations that go through llvm-lit will automatically use the right Lit # site configuration inside the build directory. lit_config.fatal( "You seem to be running Lit directly -- you should be running Lit through " "<build>/bin/llvm-lit, which will ensure that the right Lit configuration " "file is used.")
cccl-main
libcudacxx/libunwind/test/lit.cfg.py
cccl-main
libcudacxx/libunwind/test/libunwind/__init__.py
#===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## import os import sys from libcxx.test.config import Configuration as LibcxxConfiguration class Configuration(LibcxxConfiguration): # pylint: disable=redefined-outer-name def __init__(self, lit_config, config): super(Configuration, self).__init__(lit_config, config) self.libunwind_src_root = None self.libunwind_obj_root = None self.abi_library_path = None self.libcxx_src_root = None def configure_src_root(self): self.libunwind_src_root = (self.get_lit_conf('libunwind_src_root') or os.path.dirname(self.config.test_source_root)) self.libcxx_src_root = (self.get_lit_conf('libcxx_src_root') or os.path.join(self.libunwind_src_root, '..', 'libcxx')) def configure_obj_root(self): self.libunwind_obj_root = self.get_lit_conf('libunwind_obj_root') super(Configuration, self).configure_obj_root() def has_cpp_feature(self, feature, required_value): return int(self.cxx.dumpMacros().get('__cpp_' + feature, 0)) >= required_value def configure_features(self): super(Configuration, self).configure_features() if self.get_lit_bool('arm_ehabi', False): self.config.available_features.add('libunwind-arm-ehabi') def configure_compile_flags(self): self.cxx.compile_flags += ['-DLIBUNWIND_NO_TIMER'] # Stack unwinding tests need unwinding tables and these are not # generated by default on all Targets. self.cxx.compile_flags += ['-funwind-tables'] if not self.get_lit_bool('enable_threads', True): self.cxx.compile_flags += ['-D_LIBUNWIND_HAS_NO_THREADS'] self.config.available_features.add('libunwind-no-threads') super(Configuration, self).configure_compile_flags() def configure_compile_flags_header_includes(self): self.configure_config_site_header() libunwind_headers = self.get_lit_conf( 'libunwind_headers', os.path.join(self.libunwind_src_root, 'include')) if not os.path.isdir(libunwind_headers): self.lit_config.fatal("libunwind_headers='%s' is not a directory." % libunwind_headers) self.cxx.compile_flags += ['-I' + libunwind_headers] def configure_link_flags_cxx_library(self): # libunwind tests should not link with libc++ pass def configure_link_flags_abi_library(self): # libunwind tests should not link with libc++abi pass
cccl-main
libcudacxx/libunwind/test/libunwind/test/config.py
cccl-main
libcudacxx/libunwind/test/libunwind/test/__init__.py
# -*- coding: utf-8 -*- # # libunwind documentation build configuration file. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os from datetime import date # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'libunwind' copyright = u'2011-%d, LLVM Project' % date.today().year # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '12.0' # The full version, including alpha/beta/rc tags. release = '12.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%Y-%m-%d' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'friendly' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'haiku' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'libunwinddoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('contents', 'libunwind.tex', u'libunwind Documentation', u'LLVM project', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('contents', 'libunwind', u'libunwind Documentation', [u'LLVM project'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('contents', 'libunwind', u'libunwind Documentation', u'LLVM project', 'libunwind', 'LLVM Unwinder', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # FIXME: Define intersphinx configration. intersphinx_mapping = {} # -- Options for extensions ---------------------------------------------------- # Enable this if you want TODOs to show up in the generated documentation. todo_include_todos = True
cccl-main
libcudacxx/libunwind/docs/conf.py
#!/usr/bin/python3 # Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # This parses a CSV version of Logan_customer_pinmux_release.xlsm import argparse import csv import os import os.path import sys import tegra_pmx_soc_parser from tegra_pmx_utils import * dbg = False parser = argparse.ArgumentParser(description='Create a board config' + 'from a CSV version of the Venice2 pinmux spreadsheet') parser.add_argument('--debug', action='store_true', help='Turn on debugging prints') parser.add_argument('--csv', default=argparse.SUPPRESS, help='CSV file to parse') parser.add_argument('--csv-rsvd-0based', action='store_true', dest='csv_rsvd_0based', default=argparse.SUPPRESS, help='Assume 0-based RSVD numbering') parser.add_argument('--csv-rsvd-1based', action='store_false', dest='csv_rsvd_0based', default=argparse.SUPPRESS, help='Assume 1-based RSVD numbering') parser.add_argument('board', help='Board name') args = parser.parse_args() if args.debug: dbg = True if dbg: print(args) # Boards in alphabetical order in this dictionary: supported_boards = { 'cei-tk1-som': { # tk1-som_pinmux_V2.4.xlsm Colorado TK1-SOM Configuration (1-based rsvd) # updated to version 11 by Peter Chubb 'filename': 'csv/cei-tk1-som.csv', 'rsvd_base': 1, 'soc': 'tegra124', }, 'e2220-1170': { # T210_customer_pinmux.xlsm worksheet [elided] (0-based rsvd) 'filename': 'csv/e2220-1170.csv', 'rsvd_base': 0, 'soc': 'tegra210', }, 'jetson-tk1': { # Jetson_TK1_customer_pinmux.xlsm worksheet Jetson TK1 Configuration (1-based rsvd) from: # https://developer.nvidia.com/hardware-design-and-development 'filename': 'csv/jetson-tk1.csv', 'rsvd_base': 1, 'soc': 'tegra124', }, 'norrin': { # PM370_T124_customer_pinmux_1.1.xlsm worksheet Customer_Configuration (0-based rsvd) 'filename': 'nv-internal-data/PM370_T124_customer_pinmux_1.1.csv', 'rsvd_base': 0, 'soc': 'tegra124', }, 'p2371-0000': { # T210_customer_pinmux.xlsm worksheet [elided] Configuration (0-based rsvd) 'filename': 'csv/p2371-0000.csv', 'rsvd_base': 0, 'soc': 'tegra210', }, 'p2371-2180': { # T210_customer_pinmux.xlsm worksheet [elided] Configuration (0-based rsvd) 'filename': 'csv/p2371-2180.csv', 'rsvd_base': 0, 'soc': 'tegra210', }, 'p3450-porg': { # Jetson_Nano_DeveloperKit_Users_Pinmux_Configuration.xlsm (0-based rsvd) 'filename': 'csv/p3450-porg.csv', 'rsvd_base': 0, 'soc': 'tegra210', }, 'p2571': { # T210_customer_pinmux.xlsm worksheet [elided] Configuration (0-based rsvd) 'filename': 'csv/p2571.csv', 'rsvd_base': 0, 'soc': 'tegra210', }, 'tegra210-smaug': { # erista_customer_pinmux_v04_0420.xlsm 'filename': 'csv/tegra210-smaug-v04_0420.csv', 'rsvd_base': 0, 'soc': 'tegra210', }, 'venice2': { # Venice2_T124_customer_pinmux_based_on_P4_rev47_2013-07-12.xlsm worksheet Customer_Configuration (0-based rsvd) 'filename': 'nv-internal-data/Venice2_T124_customer_pinmux_based_on_P4_rev47_2013-07-12.csv', 'rsvd_base': 0, 'soc': 'tegra124', }, } if not args.board in supported_boards: print('ERROR: Unsupported board %s' % args.board, file=sys.stderr) sys.exit(1) board_conf = supported_boards[args.board] if 'csv' in args: board_conf['filename'] = args.csv if 'csv_rsvd_0based' in args: board_conf['rsvd_base'] = {True: 0, False: 1}[args.csv_rsvd_0based] if dbg: print(board_conf) soc = tegra_pmx_soc_parser.load_soc(board_conf['soc']) COL_BALL_NAME = 0 COL_BALL_MID = 1 COL_BALL_DSC = 2 COL_GPIO = 3 COL_F0 = 4 COL_F1 = 5 COL_F2 = 6 COL_F3 = 7 COL_FS = 8 COL_MUX = 9 COL_PUPD = 10 COL_TRI = 11 COL_E_INPUT = 12 COL_GPIO_INIT_VAL = 13 COL_DIRECTION = 14 COL_RCV_SEL = 15 col_names = { COL_BALL_NAME: 'Ball Name', COL_BALL_MID: 'MID', COL_BALL_DSC: 'DSC', COL_GPIO: 'GPIO', COL_F0: 'F0', COL_F1: 'F1', COL_F2: 'F2', COL_F3: 'F3', COL_FS: 'FS', COL_MUX: 'Pin Group', COL_PUPD: 'PUPD', COL_TRI: 'Tristate', COL_E_INPUT: 'E_Input', COL_GPIO_INIT_VAL: 'GPIO Init Value', COL_DIRECTION: 'Pin Direction', } if soc.soc_pins_have_rcv_sel: col_names[COL_RCV_SEL] = 'High or Normal VIL/VIH' if soc.soc_pins_have_e_io_hv: col_names[COL_RCV_SEL] = '3.3V Tolerance Enable' cols = {} def func_munge(f): if board_conf['soc'] == 'tegra124': if f in ('sdmmc2a', 'sdmmc2b'): return 'sdmmc2' if f in ('ir3_rxd', 'ir3_txd'): return 'irda' if soc.soc_rsvd_base != board_conf['rsvd_base']: if soc.soc_rsvd_base: return rsvd_0base_to_1base(f) else: raise Exception('CSV 1-based to SoC 0-based not supported') return f def pupd_munge(d): return { 'NORMAL': 'none', 'PULL_UP': 'up', 'PULL_DOWN': 'down', }[d] def tri_munge(d): return { 'NORMAL': False, 'TRISTATE': True, }[d] def e_input_munge(d): return { 'DISABLE': False, 'ENABLE': True, }[d] warn_empty_gpio_init_val = False def gpio_init_val_munge(d): global warn_empty_gpio_init_val if d == '': warn_empty_gpio_init_val = True return { '': 'out?', '0': 'out0', '1': 'out1', }[d] def od_from_direction(d): return d == 'Open-Drain' def rcv_sel_munge(d): return { '': False, 'NORMAL': False, 'HIGH': True, 'Disable': False, 'Enable': True, }[d] found_header = False pin_table = [] mipi_table = [] with open(board_conf['filename'], newline='') as fh: csv = csv.reader(fh) lnum = 0 for row in csv: lnum += 1 # Header rows if not found_header: if 'Ball Name' not in row: if lnum > 25: print('ERROR: Header row not found', file=sys.stderr) sys.exit(1) continue for colid, coltext in col_names.items(): try: cols[colid] = row.index(coltext) except: if colid in (COL_BALL_MID, COL_BALL_DSC): pass else: if board_conf['soc'] != 'tegra124': raise if colid != COL_RCV_SEL: print('ERROR: Header column "%s" not found' % coltext, file=sys.stderr) sys.exit(1) cols[colid] = None found_header = True continue ball_name = row[cols[COL_BALL_NAME]].lower() if ball_name.startswith('mipi_pad_ctrl_'): ball_name = ball_name[14:] mipi = soc.mipi_pad_ctrl_group_by_name(ball_name) else: mipi = None if cols[COL_BALL_MID]: ball_mid = row[cols[COL_BALL_MID]] else: ball_mid = None if cols[COL_BALL_DSC]: ball_dsc = row[cols[COL_BALL_DSC]] else: ball_dsc = None # Section title row if not ball_mid and not ball_dsc and not mipi: continue mux = func_munge(row[cols[COL_MUX]].lower()) if mipi: mipi_table.append((repr(mipi.name), repr(mux))) continue # Pin not affected by pinmux if mux in ('', '0', '#n/a'): continue if dbg: print(ball_name) gpio = row[cols[COL_GPIO]].lower() f0 = func_munge(row[cols[COL_F0]].lower()) f1 = func_munge(row[cols[COL_F1]].lower()) f2 = func_munge(row[cols[COL_F2]].lower()) f3 = func_munge(row[cols[COL_F3]].lower()) fs = func_munge(row[cols[COL_FS]].lower()) pupd = pupd_munge(row[cols[COL_PUPD]]) tri = tri_munge(row[cols[COL_TRI]]) e_input = e_input_munge(row[cols[COL_E_INPUT]]) od = od_from_direction(row[cols[COL_DIRECTION]]) if cols[COL_RCV_SEL]: rcv_sel = rcv_sel_munge(row[cols[COL_RCV_SEL]]) else: rcv_sel = False mux_gpio = mux.startswith('gpio_p') or (mux == gpio) if mux_gpio: mux = None if e_input: gpio_init = 'in' else: gpio_init = gpio_init_val_munge(row[cols[COL_GPIO_INIT_VAL]]) else: gpio_init = None gpio_pin = soc.gpio_or_pin_by_name(ball_name) for i, func in enumerate((f0, f1, f2, f3)): alt_rsvd = 'rsvd' + str(soc.soc_rsvd_base + i) if func != gpio_pin.funcs[i] and func != alt_rsvd: print('WARNING: %s: F%d mismatch CSV %s vs SOC %s' % (ball_name, i, repr(func), repr(gpio_pin.funcs[i])), file=sys.stderr) for i, func in enumerate((f0, f1, f2, f3)): alt_rsvd = 'rsvd' + str(soc.soc_rsvd_base + i) if func not in gpio_pin.funcs and func != alt_rsvd: print('ERROR: %s: F%d CSV %s not in SOC list %s' % (ball_name, i, repr(func), repr(gpio_pin.funcs)), file=sys.stderr) sys.exit(1) if fs not in (f0, f1, f2, f3): print('ERROR: %s: FSAFE CSV %s not in CSV F0..3 %s' % (ball_name, fs, repr((f0, f1, f2, f3))), file=sys.stderr) sys.exit(1) if mux and mux not in (f0, f1, f2, f3): print('ERROR: %s: MUX CSV %s not in CSV F0..3 %s' % (ball_name, mux, repr((f0, f1, f2, f3))), file=sys.stderr) sys.exit(1) if mux and mux not in gpio_pin.funcs: print('ERROR: %s: MUX CSV %s not in SOC F0..3 %s' % (ball_name, mux, repr(gpio_pin.funcs)), file=sys.stderr) sys.exit(1) if (board_conf['soc'] == 'tegra124') and (ball_name in ('reset_out_n', 'owr', 'hdmi_int', 'ddc_scl', 'ddc_sda')): # These balls' pad type is always OD, so we don't need to set it # FIXME: The SoC data structure should tell us the pad type instead of hard-coding it od = False if od and not gpio_pin.od: print('WARNING: %s: OD in board file, but pin has no OD' % ball_name, file=sys.stderr) od = False pin_has_rcv_sel = False if soc.soc_pins_have_rcv_sel: pin_has_rcv_sel = gpio_pin.rcv_sel if soc.soc_pins_have_e_io_hv: pin_has_rcv_sel = gpio_pin.e_io_hv if rcv_sel and not pin_has_rcv_sel: print('WARNING: %s: RCV_SEL/E_IO_HV in board file, but pin does not support it' % ball_name, file=sys.stderr) rcv_sel = False pin_table.append((repr(gpio_pin.fullname), repr(mux), repr(gpio_init), repr(pupd), repr(tri), repr(e_input), repr(od), repr(rcv_sel))) pin_headings = ('pin', 'mux', 'gpio_init', 'pull', 'tri', 'e_inp', 'od') if soc.soc_pins_have_e_io_hv: pin_headings += ('e_io_hv',) if soc.soc_pins_have_rcv_sel: pin_headings += ('rcv_sel',) mipi_headings = ('pin', 'mux') cfgfile = os.path.join('configs', args.board + '.board') with open(cfgfile, 'wt') as fh: print('soc = \'%s\'' % board_conf['soc'], file=fh) print(file=fh) print('pins = (', file=fh) dump_py_table(pin_headings, pin_table, file=fh) print(')', file=fh) print('', file=fh) print('drive_groups = (', file=fh) print(')', file=fh) print('', file=fh) print('mipi_pad_ctrl_groups = (', file=fh) dump_py_table(mipi_headings, mipi_table, file=fh) print(')', file=fh) if warn_empty_gpio_init_val: print('WARNING: Missing gpio_init_vals detected. Manual fixup required', file=sys.stderr)
tegra-pinmux-scripts-master
csv-to-board.py
# Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. class ReprDictObj(object): def __repr__(self): return self.__class__.__name__ + '(' + repr(self.__dict__) + ')' def __str__(self): return self.__class__.__name__ + '(' + str(self.__dict__) + ')' class TopLevelParsedObj(ReprDictObj): def __init__(self, name, copy_attrs, data): self.name = name self.titlename = name.title() for attr, default in copy_attrs: if attr in data: val = data[attr] else: if default is None: raise Exception('Missing variable ' + attr) val = default self.__setattr__(attr, val)
tegra-pinmux-scripts-master
tegra_pmx_parser_utils.py
#!/usr/bin/python3 # Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. import argparse import datetime import os.path import tegra_pmx_board_parser from tegra_pmx_utils import * dbg = False parser = argparse.ArgumentParser(description='Create a U-Boot board pinmux ' + 'config table from a board config file') parser.add_argument('--debug', action='store_true', help='Turn on debugging prints') parser.add_argument('board', help='Board to process') args = parser.parse_args() if args.debug: dbg = True if dbg: print(args) board = tegra_pmx_board_parser.load_board(args.board) copyright_year = datetime.date.today().year print('''\ /* * Copyright (c) %(copyright_year)d, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: GPL-2.0+ */ /* * THIS FILE IS AUTO-GENERATED - DO NOT EDIT! * * To generate this file, use the tegra-pinmux-scripts tool available from * https://github.com/NVIDIA/tegra-pinmux-scripts * Run "board-to-uboot.py %(board_name)s". */ #ifndef _PINMUX_CONFIG_%(board_define)s_H_ #define _PINMUX_CONFIG_%(board_define)s_H_ #define GPIO_INIT(_port, _gpio, _init) \\ { \\ .gpio = TEGRA_GPIO(_port, _gpio), \\ .init = TEGRA_GPIO_INIT_##_init, \\ } static const struct tegra_gpio_config %(board_varname)s_gpio_inits[] = { ''' % { 'copyright_year': copyright_year, 'board_name': args.board, 'board_define': board.definename, 'board_varname': board.varname, }, end='') gpio_table = [] for pincfg in board.pincfgs_by_num(): if not pincfg.gpio_init: continue gpio = pincfg.gpio_pin.gpio.upper() port = gpio[:-1] assert port.isalpha() pin = gpio[-1] assert pin.isdigit() row = ( port, pin, pincfg.gpio_init.upper(), ) gpio_table.append(row) headings = ('port', 'pin', 'init_val') dump_c_table(headings, 'GPIO_INIT', gpio_table) print('''\ }; ''', end='') params = ['_pingrp', '_mux', '_pull', '_tri', '_io', '_od'] if board.soc.soc_pins_have_rcv_sel: params += ['_rcv_sel',] if board.soc.soc_pins_have_e_io_hv: params += ['_e_io_hv',] s = gen_wrapped_c_macro_header('PINCFG', params) s += '''\ { .pingrp = PMUX_PINGRP_##_pingrp, .func = PMUX_FUNC_##_mux, .pull = PMUX_PULL_##_pull, .tristate = PMUX_TRI_##_tri, .io = PMUX_PIN_##_io, .od = PMUX_PIN_OD_##_od, ''' if board.soc.soc_pins_have_rcv_sel: s += '''\ .rcv_sel = PMUX_PIN_RCV_SEL_##_rcv_sel, ''' if board.soc.soc_pins_have_e_io_hv: s += '''\ .e_io_hv = PMUX_PIN_E_IO_HV_##_e_io_hv, ''' s += '''\ .lock = PMUX_PIN_LOCK_DEFAULT, ''' if board.soc.soc_pins_have_ior: s += '''\ .ioreset = PMUX_PIN_IO_RESET_DEFAULT, ''' s = append_aligned_tabs_indent_with_tabs(s, 0) print(s) print('''\ } static const struct pmux_pingrp_config %(board_varname)s_pingrps[] = { ''' % { 'board_varname': board.varname, }, end='') def mapper_mux(val): if val: return val.upper() else: return 'DEFAULT' def mapper_pull(val): if val == 'NONE': return 'NORMAL' return val def mapper_tristate(val): return {False: 'NORMAL', True: 'TRISTATE'}[val] def mapper_e_input(val): return {False: 'OUTPUT', True: 'INPUT'}[val] def mapper_od(gpio_pin, val): if not gpio_pin.od: return 'DEFAULT' return {False: 'DISABLE', True: 'ENABLE'}[val] def mapper_rcv_sel(gpio_pin, val): if not gpio_pin.rcv_sel: return 'DEFAULT' return {False: 'NORMAL', True: 'HIGH'}[val] def mapper_e_io_hv(gpio_pin, val): if not gpio_pin.e_io_hv: return 'DEFAULT' return {False: 'NORMAL', True: 'HIGH'}[val] pincfg_table = [] for pincfg in board.pincfgs_by_num(): row = ( pincfg.fullname.upper(), mapper_mux(pincfg.mux), mapper_pull(pincfg.pull.upper()), mapper_tristate(pincfg.tri), mapper_e_input(pincfg.e_inp), mapper_od(pincfg.gpio_pin, pincfg.od), ) if board.soc.soc_pins_have_rcv_sel: row += (mapper_rcv_sel(pincfg.gpio_pin, pincfg.rcv_sel),) if board.soc.soc_pins_have_e_io_hv: row += (mapper_e_io_hv(pincfg.gpio_pin, pincfg.e_io_hv),) pincfg_table.append(row) headings = ('pingrp', 'mux', 'pull', 'tri', 'e_input', 'od') if board.soc.soc_pins_have_rcv_sel: headings += ('rcv_sel',) if board.soc.soc_pins_have_e_io_hv: headings += ('e_io_hv',) dump_c_table(headings, 'PINCFG', pincfg_table) print('''\ }; #define DRVCFG(_drvgrp, _slwf, _slwr, _drvup, _drvdn, _lpmd, _schmt, _hsm) \\ { \\ .drvgrp = PMUX_DRVGRP_##_drvgrp, \\ .slwf = _slwf, \\ .slwr = _slwr, \\ .drvup = _drvup, \\ .drvdn = _drvdn, \\ .lpmd = PMUX_LPMD_##_lpmd, \\ .schmt = PMUX_SCHMT_##_schmt, \\ .hsm = PMUX_HSM_##_hsm, \\ } static const struct pmux_drvgrp_config %s_drvgrps[] = { ''' % board.varname, end='') # FIXME: Handle drive groups print('''\ }; ''', end='') if len(board.mipipadctrlcfgs_by_num()): print('''\ #define MIPIPADCTRLCFG(_grp, _mux) \\ { \\ .grp = PMUX_MIPIPADCTRLGRP_##_grp, \\ .func = PMUX_FUNC_##_mux, \\ } static const struct pmux_mipipadctrlgrp_config %s_mipipadctrlgrps[] = { ''' % board.varname, end='') mipipadctrl_table = [] for cfg in board.mipipadctrlcfgs_by_num(): row = ( cfg.name.upper(), mapper_mux(cfg.mux), ) mipipadctrl_table.append(row) headings = ('grp', 'mux') dump_c_table(headings, 'MIPIPADCTRLCFG', mipipadctrl_table) print('''\ }; ''', end='') print('''\ #endif /* PINMUX_CONFIG_%s_H */ ''' % board.definename, end='') board.warn_about_unconfigured_pins()
tegra-pinmux-scripts-master
board-to-uboot.py
# Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. import os.path import sys import tegra_pmx_soc_parser from tegra_pmx_parser_utils import * script_dir = os.path.dirname(os.path.abspath(__file__)) configs_dir = os.path.join(script_dir, 'configs') class PinConfig(ReprDictObj): def __init__(self, soc, data): fields = ('fullname', 'mux', 'gpio_init', 'pull', 'tri', 'e_inp', 'od') if soc.soc_pins_have_rcv_sel: fields += ('rcv_sel', ) if soc.soc_pins_have_e_io_hv: fields += ('e_io_hv', ) for i, field in enumerate(fields): self.__setattr__(field, data[i]) self.gpio_pin = soc.gpio_or_pin_by_fullname(self.fullname) class MipiPadCtrlConfig(ReprDictObj): def __init__(self, soc, data): fields = ('name', 'mux') for i, field in enumerate(fields): self.__setattr__(field, data[i]) self.mipi_pad_ctrl_group = soc.mipi_pad_ctrl_group_by_name(self.name) class Board(TopLevelParsedObj): def __init__(self, name, data): TopLevelParsedObj.__init__(self, name, (), data) self.varname = name.lower().replace('-', '_') self.definename = name.upper().replace('-', '_') self.soc = tegra_pmx_soc_parser.load_soc(data['soc']) self._pincfgs = [] for num, pindata in enumerate(data['pins']): pincfg = PinConfig(self.soc, pindata) self._pincfgs.append(pincfg) # FIXME: fill this in... self.drvcfg = [] self._mipipadctrlcfgs = [] if 'mipi_pad_ctrl_groups' in data: for num, pindata in enumerate(data['mipi_pad_ctrl_groups']): mipipadctrlcfg = MipiPadCtrlConfig(self.soc, pindata) self._mipipadctrlcfgs.append(mipipadctrlcfg) self._generate_derived_data() def _generate_derived_data(self): self._pincfgs_by_num = sorted(self._pincfgs, key=lambda pincfg: pincfg.gpio_pin.sort_by_num_key()) self._mipipadctrlcfgs_by_num = sorted(self._mipipadctrlcfgs, key=lambda cfg: cfg.mipi_pad_ctrl_group.reg) def pincfgs_by_conf_order(self): return self._pincfgs def pincfgs_by_num(self): return self._pincfgs_by_num def mipipadctrlcfgs_by_conf_order(self): return self._mipipadctrlcfgs def mipipadctrlcfgs_by_num(self): return self._mipipadctrlcfgs_by_num def warn_about_unconfigured_pins(self): unconfigured_gpio_pins = [gpio_pin.fullname for gpio_pin in self.soc.gpios_pins_by_num() if gpio_pin.reg] for gpio_pin in self.pincfgs_by_num(): unconfigured_gpio_pins.remove(gpio_pin.gpio_pin.fullname) for gpio_pin in unconfigured_gpio_pins: print('WARNING: Unconfigured pin ' + gpio_pin, file=sys.stderr) def load_board(boardname): fn = os.path.join(configs_dir, boardname + '.board') d = {} with open(fn) as f: code = compile(f.read(), fn, 'exec') exec(code, globals(), d) return Board(boardname, d)
tegra-pinmux-scripts-master
tegra_pmx_board_parser.py
#!/usr/bin/python3 # Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. import argparse import os.path import tegra_pmx_board_parser from tegra_pmx_utils import * dbg = False parser = argparse.ArgumentParser(description='Create a kernel device tree ' + 'pinmux fragment from a board config file') parser.add_argument('--debug', action='store_true', help='Turn on debugging prints') parser.add_argument('board', help='Board to process') args = parser.parse_args() if args.debug: dbg = True if dbg: print(args) board = tegra_pmx_board_parser.load_board(args.board) def mapper_pull(val): return 'TEGRA_PIN_PULL_' + val.upper() def mapper_bool(val): return 'TEGRA_PIN_' + {False: 'DISABLE', True: 'ENABLE'}[val] for pincfg in board.pincfgs_by_num(): print(' ' + pincfg.fullname + ' {') print(' nvidia,pins = "' + pincfg.fullname + '";') if pincfg.mux: print(' nvidia,function = "' + pincfg.mux + '";') print(' nvidia,pull = <' + mapper_pull(pincfg.pull) + '>;') print(' nvidia,tristate = <' + mapper_bool(pincfg.tri) + '>;') print(' nvidia,enable-input = <' + mapper_bool(pincfg.e_inp) + '>;') if pincfg.gpio_pin.od: print(' nvidia,open-drain = <' + mapper_bool(pincfg.od) + '>;') if board.soc.soc_pins_have_rcv_sel and pincfg.gpio_pin.rcv_sel and hasattr(pincfg.gpio_pin, 'rcv_sel'): print(' nvidia,rcv-sel = <' + mapper_bool(pincfg.rcv_sel) + '>;') if board.soc.soc_pins_have_e_io_hv and pincfg.gpio_pin.e_io_hv and hasattr(pincfg.gpio_pin, 'e_io_hv'): print(' nvidia,io-hv = <' + mapper_bool(pincfg.e_io_hv) + '>;') print(' };') # FIXME: Handle drive groups for cfg in board.mipipadctrlcfgs_by_num(): print(' ' + cfg.name + ' {') print(' nvidia,pins = "mipi_pad_ctrl_' + cfg.name + '";') print(' nvidia,function = "' + cfg.mux + '";') print(' };') board.warn_about_unconfigured_pins()
tegra-pinmux-scripts-master
board-to-kernel-dt.py
# Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. import collections import os.path from tegra_pmx_parser_utils import * script_dir = os.path.dirname(os.path.abspath(__file__)) configs_dir = os.path.join(script_dir, 'configs') class PinBase(ReprDictObj): def __init__(self, soc, signal, gpio, num, data): self.signal = signal self.gpio = gpio self.num = num fields = [] if self.signal: fields += (self.signal,) if self.gpio: fields += ('p' + self.gpio,) self.fullname = '_'.join((fields)) if self.signal: self.shortname = self.signal else: self.shortname = self.gpio self.define = 'TEGRA_PIN_' + '_'.join(fields).upper() self.desc = ' '.join(fields).upper() if not data: self.reg = None return fields = ('reg', 'f0', 'f1', 'f2', 'f3',) if soc.soc_pins_all_have_od: self.od = True elif soc.soc_pins_have_od: fields += ('od',) if soc.soc_pins_have_ior: fields += ('ior',) if soc.soc_pins_have_rcv_sel: fields += ('rcv_sel', ) if soc.soc_pins_have_hsm: fields += ('hsm', ) if soc.soc_pins_all_have_schmitt: self.schmitt = True elif soc.soc_pins_have_schmitt: fields += ('schmitt', ) if soc.soc_pins_have_drvtype: fields += ('drvtype', ) if soc.soc_pins_have_e_io_hv: fields += ('e_io_hv', ) for i, field in enumerate(fields): self.__setattr__(field, data[i]) self.funcs = (self.f0, self.f1, self.f2, self.f3) self.per_pin_drive_group = None def set_per_pin_drive_group(self, g): self.per_pin_drive_group = g def sort_by_num_key(self): return (self.__class__ == Pin, self.num) def _gpio_number(gpion): if len(gpion) == 2: bank = ord(gpion[0]) - ord('a') index = ord(gpion[1]) - ord('0') else: bank = ord(gpion[0]) - ord('a') + 26 index = ord(gpion[2]) - ord('0') return (bank * 8) + index class Gpio(PinBase): def __init__(self, soc, data): num = _gpio_number(data[1]) PinBase.__init__(self, soc, data[0], data[1], num, data[2:]) class Pin(PinBase): def __init__(self, soc, num, data): PinBase.__init__(self, soc, data[0], '', num, data[1:]) class DriveGroup(ReprDictObj): def __init__(self, soc, data, gpios_pins): fields = ('name', 'reg', ) if soc.soc_drvgroups_have_hsm: fields += ('hsm_b',) if soc.soc_drvgroups_have_schmitt: fields += ('schmitt_b',) if soc.soc_drvgroups_have_lpmd: fields += ('lpmd_b',) if soc.soc_drvgroups_have_parked: fields += ('prk_mask',) fields += ('drvdn_b', 'drvdn_w', 'drvup_b', 'drvup_w', 'slwr_b', 'slwr_w', 'slwf_b', 'slwf_w') if soc.soc_drvgroups_have_drvtype: fields += ('drvtype', ) for i, field in enumerate(fields): self.__setattr__(field, data[i]) self.gpios_pins = gpios_pins self.fullname = 'drive_' + self.name self.has_matching_pin = ( soc.soc_combine_pin_drvgroup and (len(gpios_pins) == 1) and (gpios_pins[0].shortname == self.name) ) if self.has_matching_pin: gpios_pins[0].set_per_pin_drive_group(self) class MipiPadCtrlGroup(ReprDictObj): def __init__(self, soc, data, gpios_pins): fields = ('name', 'reg', 'bit', 'f0', 'f1') for i, field in enumerate(fields): self.__setattr__(field, data[i]) self.gpios_pins = gpios_pins self.fullname = 'mipi_pad_ctrl_' + self.name self.funcs = (self.f0, self.f1) class Function(ReprDictObj): def __init__(self, name): self.name = name self.pins = [] def _add_pin(self, pin): self.pins.append(pin) class Soc(TopLevelParsedObj): def __init__(self, name, data): copy_attrs = ( ('kernel_copyright_years', 2014), ('kernel_author', 'NVIDIA'), ('uboot_copyright_years', 2014), ('soc_has_io_clamping', None), ('soc_combine_pin_drvgroup', None), ('soc_rsvd_base', None), ('soc_drvgroups_have_drvtype', None), ('soc_drvgroups_have_hsm', None), ('soc_drvgroups_have_lpmd', None), ('soc_drvgroups_have_parked', None), ('soc_drvgroups_have_schmitt', None), ('soc_pins_all_have_od', None), ('soc_pins_all_have_parked', None), ('soc_pins_all_have_schmitt', None), ('soc_pins_have_drvtype', None), ('soc_pins_have_e_io_hv', None), ('soc_pins_have_hsm', None), ('soc_pins_have_ior', None), ('soc_pins_have_od', None), ('soc_pins_have_rcv_sel', None), ('soc_pins_have_schmitt', None), ('soc_drv_reg_base', None), ('soc_mipipadctrl_reg_base', 0), ('soc_einput_b', None), ('soc_odrain_b', None), ('soc_parked_bit', None), ) TopLevelParsedObj.__init__(self, name, copy_attrs, data) gpios_pins_by_fullname = {} gpios_pins_by_shortname = {} self._gpios = [] for gpiodata in data['gpios']: gpio = Gpio(self, gpiodata) gpios_pins_by_fullname[gpio.fullname] = gpio gpios_pins_by_shortname[gpio.shortname] = gpio self._gpios.append(gpio) self._pins = [] for num, pindata in enumerate(data['pins']): pin = Pin(self, num, pindata) gpios_pins_by_fullname[pin.fullname] = pin gpios_pins_by_shortname[pin.shortname] = pin self._pins.append(pin) self._drive_groups = [] for drive_group in data['drive_groups']: names = data['drive_group_pins'][drive_group[0]] gpios_pins = [] for name in names: gpios_pins.append(gpios_pins_by_fullname[name]) self._drive_groups.append(DriveGroup(self, drive_group, gpios_pins)) self._mipi_pad_ctrl_groups = [] for group in data.get('mipi_pad_ctrl_groups', []): names = data['mipi_pad_ctrl_group_pins'][group[0]] gpios_pins = [] for name in names: gpios_pins.append(gpios_pins_by_fullname[name]) self._mipi_pad_ctrl_groups.append(MipiPadCtrlGroup(self, group, gpios_pins)) self._generate_derived_data() def _generate_derived_data(self): self._gpios_by_num = sorted(self._gpios, key=lambda gpio: gpio.num) self._pins_by_num = sorted(self._pins, key=lambda pin: pin.num) self._gpios_pins_by_num = sorted(self._gpios + self._pins, key=lambda gpio_pin: gpio_pin.sort_by_num_key()) gpios_with_reg = [gpio for gpio in self._gpios if gpio.reg] pins_with_reg = [pin for pin in self._pins if pin.reg] self._gpios_by_reg = sorted(gpios_with_reg, key=lambda gpio: gpio.reg) self._pins_by_reg = sorted(pins_with_reg, key=lambda pin: pin.reg) self._gpios_pins_by_reg = sorted(gpios_with_reg + pins_with_reg, key=lambda gpio_pin: gpio_pin.reg) self._drive_groups_by_reg = sorted(self._drive_groups, key=lambda drive_group: drive_group.reg) self._drive_groups_by_alpha = sorted(self._drive_groups, key=lambda drive_group: drive_group.name) self._mipi_pad_ctrl_groups_by_reg = sorted(self._mipi_pad_ctrl_groups, key=lambda group: group.reg) self._mipi_pad_ctrl_groups_by_alpha = sorted(self._mipi_pad_ctrl_groups, key=lambda group: group.name) functions = collections.OrderedDict() for pin in self._gpios + self._pins: if not pin.reg: continue for func in pin.funcs: if func not in functions: functions[func] = Function(func) functions[func]._add_pin(pin) for group in self._mipi_pad_ctrl_groups: for func in (group.f0, group.f1): if func not in functions: functions[func] = Function(func) self._functions = functions.values() self._functions_by_alpha = sorted(self._functions, key=lambda f: f.name) def gpios_by_conf_order(self): return self._gpios def gpios_by_num(self): return self._gpios_by_num def gpios_by_reg(self): return self._gpios_by_reg def pins_by_conf_order(self): return self._pins def pins_by_num(self): return self._pins_by_num def pins_by_reg(self): return self._pins_by_reg def gpios_pins_by_num(self): return self._gpios_pins_by_num def gpios_pins_by_reg(self): return self._gpios_pins_by_reg def gpio_or_pin_by_name(self, name): for gpio_pin in self._gpios_pins_by_num: if name == gpio_pin.signal: return gpio_pin if name == 'gpio_p' + gpio_pin.gpio: return gpio_pin return None def gpio_or_pin_by_fullname(self, name): for gpio_pin in self._gpios_pins_by_num: if name == gpio_pin.fullname: return gpio_pin return None def drive_groups_by_conf_order(self): return self._drive_groups def drive_groups_by_reg(self): return self._drive_groups_by_reg def drive_groups_by_alpha(self): return self._drive_groups_by_alpha def mipi_pad_ctrl_groups_by_conf_order(self): return self._mipi_pad_ctrl_groups def mipi_pad_ctrl_groups_by_reg(self): return self._mipi_pad_ctrl_groups_by_reg def mipi_pad_ctrl_groups_by_alpha(self): return self._mipi_pad_ctrl_groups_by_alpha def mipi_pad_ctrl_group_by_name(self, name): for mipi_pad_ctrl in self._mipi_pad_ctrl_groups: if name == mipi_pad_ctrl.name: return mipi_pad_ctrl return None def functions(self): return self._functions def functions_by_alpha(self): return self._functions_by_alpha def load_soc(socname): fn = os.path.join(configs_dir, socname + '.soc') d = {} with open(fn) as f: code = compile(f.read(), fn, 'exec') exec(code, globals(), d) return Soc(socname, d)
tegra-pinmux-scripts-master
tegra_pmx_soc_parser.py
#!/usr/bin/env python3 # Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. import argparse import os import os.path import sys import tegra_pmx_soc_parser from tegra_pmx_utils import * dbg = False parser = argparse.ArgumentParser(description='Create a U-Boot pinctrl ' + 'driver from an SoC config file') parser.add_argument('--debug', action='store_true', help='Turn on debugging prints') parser.add_argument('soc', help='SoC to process') parser.add_argument('header', help='Header file to generate') parser.add_argument('cfile', help='C file to generate') args = parser.parse_args() if args.debug: dbg = True if dbg: print(args) soc = tegra_pmx_soc_parser.load_soc(args.soc) f = open(args.header, 'wt') print('''\ /* * Copyright (c) %s, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: GPL-2.0+ */ #ifndef _%s_PINMUX_H_ #define _%s_PINMUX_H_ enum pmux_pingrp { ''' % (soc.uboot_copyright_years, soc.name.upper(), soc.name.upper()), file=f, end='') last_reg = 0x3000 - 4 for pin in soc.gpios_pins_by_reg(): if pin.reg != last_reg + 4: eqs = ' = (0x%x / 4)' % (pin.reg - 0x3000) else: eqs = '' print('\tPMUX_PINGRP_%s%s,' % (pin.fullname.upper(), eqs), file=f) last_reg = pin.reg print('''\ PMUX_PINGRP_COUNT, }; enum pmux_drvgrp { ''', file=f, end='') last_reg = soc.soc_drv_reg_base - 4 for group in soc.drive_groups_by_reg(): if group.reg != last_reg + 4: eqs = ' = (0x%x / 4)' % (group.reg - soc.soc_drv_reg_base) else: eqs = '' print('\tPMUX_DRVGRP_%s%s,' % (group.fullname.upper()[6:], eqs), file=f) last_reg = group.reg print('''\ PMUX_DRVGRP_COUNT, }; ''', file=f, end='') if len(soc.mipi_pad_ctrl_groups_by_reg()): print('''\ enum pmux_mipipadctrlgrp { ''', file=f, end='') last_reg = soc.soc_mipipadctrl_reg_base - 4 for group in soc.mipi_pad_ctrl_groups_by_reg(): if group.reg != last_reg + 4: eqs = ' = (0x%x / 4)' % (group.reg - soc.soc_mipipadctrl_reg_base) else: eqs = '' print('\tPMUX_MIPIPADCTRLGRP_%s%s,' % (group.name.upper(), eqs), file=f) print('''\ PMUX_MIPIPADCTRLGRP_COUNT, }; ''', file=f, end='') print('''\ enum pmux_func { PMUX_FUNC_DEFAULT, ''', file=f, end='') for func in soc.functions_by_alpha(): if func.name.startswith('rsvd'): continue print('\tPMUX_FUNC_%s,' % func.name.upper(), file=f) print('''\ PMUX_FUNC_RSVD%d, PMUX_FUNC_RSVD%d, PMUX_FUNC_RSVD%d, PMUX_FUNC_RSVD%d, PMUX_FUNC_COUNT, }; ''' % tuple(soc.soc_rsvd_base + i for i in range(4)), file=f, end='') print('#define TEGRA_PMX_SOC_DRV_GROUP_BASE_REG 0x%x' % soc.soc_drv_reg_base, file=f) if len(soc.mipi_pad_ctrl_groups_by_reg()): print('#define TEGRA_PMX_SOC_MIPIPADCTRL_BASE_REG 0x%x' % soc.soc_mipipadctrl_reg_base, file=f) if soc.soc_has_io_clamping: print('#define TEGRA_PMX_SOC_HAS_IO_CLAMPING', file=f) print('#define TEGRA_PMX_SOC_HAS_DRVGRPS', file=f) if len(soc.mipi_pad_ctrl_groups_by_reg()): print('#define TEGRA_PMX_SOC_HAS_MIPI_PAD_CTRL_GRPS', file=f) if soc.soc_drvgroups_have_lpmd: print('#define TEGRA_PMX_GRPS_HAVE_LPMD', file=f) if soc.soc_drvgroups_have_schmitt: print('#define TEGRA_PMX_GRPS_HAVE_SCHMT', file=f) if soc.soc_drvgroups_have_hsm: print('#define TEGRA_PMX_GRPS_HAVE_HSM', file=f) print('#define TEGRA_PMX_PINS_HAVE_E_INPUT', file=f) print('#define TEGRA_PMX_PINS_HAVE_LOCK', file=f) if soc.soc_pins_have_od: print('#define TEGRA_PMX_PINS_HAVE_OD', file=f) if soc.soc_pins_have_ior: print('#define TEGRA_PMX_PINS_HAVE_IO_RESET', file=f) if soc.soc_pins_have_rcv_sel: print('#define TEGRA_PMX_PINS_HAVE_RCV_SEL', file=f) if soc.soc_pins_have_e_io_hv: print('#define TEGRA_PMX_PINS_HAVE_E_IO_HV', file=f) print('''\ #include <asm/arch-tegra/pinmux.h> #endif /* _%s_PINMUX_H_ */ ''' % soc.name.upper(), file=f, end='') f.close() f = open(args.cfile, 'wt') print('''\ /* * Copyright (c) %s, NVIDIA CORPORATION. All rights reserved. * * SPDX-License-Identifier: GPL-2.0+ */ #include <common.h> #include <asm/io.h> #include <asm/arch/pinmux.h> #define PIN(pin, f0, f1, f2, f3) \\ { \\ .funcs = { \\ PMUX_FUNC_##f0, \\ PMUX_FUNC_##f1, \\ PMUX_FUNC_##f2, \\ PMUX_FUNC_##f3, \\ }, \\ } #define PIN_RESERVED {} static const struct pmux_pingrp_desc %s_pingroups[] = { ''' % (soc.uboot_copyright_years, soc.name), file=f, end='') headings = ('pin', 'f0', 'f1', 'f2', 'f3') rows = [] last_reg = 0 for pin in soc.gpios_pins_by_reg(): if pin.reg != last_reg + 4: if last_reg: for i in range(((pin.reg - last_reg) // 4) - 1): rows.append('\tPIN_RESERVED,',) rows.append('\t/* Offset 0x%x */' % pin.reg,) last_reg = pin.reg row = (pin.fullname.upper(),) for i in range(4): row += (pin.funcs[i].upper(),) rows.append(row) dump_c_table(headings, 'PIN', rows, file=f) print('''\ }; const struct pmux_pingrp_desc *tegra_soc_pingroups = %s_pingroups; ''' % soc.name, file=f, end='') if len(soc.mipi_pad_ctrl_groups_by_reg()): print('''\ #define MIPIPADCTRL_GRP(grp, f0, f1) \\ { \\ .funcs = { \\ PMUX_FUNC_##f0, \\ PMUX_FUNC_##f1, \\ }, \\ } #define MIPIPADCTRL_RESERVED {} static const struct pmux_mipipadctrlgrp_desc %s_mipipadctrl_groups[] = { ''' % soc.name, file=f, end='') headings = ('pin', 'f0', 'f1') rows = [] last_reg = 0 for grp in soc.mipi_pad_ctrl_groups_by_reg(): if grp.reg != last_reg + 4: if last_reg: for i in range(((grp.reg - last_reg) // 4) - 1): rows.append('\tMIPIPACTRL_RESERVED,',) rows.append('\t/* Offset 0x%x */' % grp.reg,) last_reg = grp.reg row = (grp.name.upper(),) for i in range(2): row += (grp.funcs[i].upper(),) rows.append(row) dump_c_table(headings, 'MIPIPADCTRL_GRP', rows, file=f) print('''\ }; const struct pmux_mipipadctrlgrp_desc *tegra_soc_mipipadctrl_groups = %s_mipipadctrl_groups; ''' % soc.name, file=f, end='') f.close()
tegra-pinmux-scripts-master
soc-to-uboot-driver.py
# Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. import sys def gen_tab_padding_to(curpos, targetpos): curpos -= 1 targetpos -= 1 if (targetpos & 7): raise Exception(str(targetpos) + ' is not a TAB stop') left = targetpos - curpos tabs = (left + 7) // 8 return '\t' * tabs def emit_tab_padding_to(curpos, targetpos): print(gen_tab_padding_to(curpos, targetpos), end='') def emit_padded_field(s, maxl, skip_comma=False, right_justify=False, file=sys.stdout): pad = (' ' * (maxl - len(s))) if right_justify: print(pad, file=file, end='') print(s, file=file, end='') if skip_comma: return print(', ', file=file, end='') if not right_justify: print(pad, file=file, end='') def emit_define(define, value, valuecol): s = '#define ' + define print(s, end='') emit_tab_padding_to(len(s) + 1, valuecol) print(value) def gen_wrapped_c_macro_header(macro, params): intro = '#define %s(' % macro intro_space = ' ' * len(intro) s = '' l = intro for i, param in enumerate(params): if i != 0: prefix = ' ' else: prefix = '' if i == len(params) - 1: suffix = ')' else: suffix = ',' # ', ' ',' if (len(l) + len(prefix) + len(param) + len(suffix)) < 71: l += prefix + param + suffix else: s += l + '\n' l = intro_space + param + suffix if l: s += l s += '\n' return s def len_evaluating_tabs(s): l = 0 for c in s: if c == '\t': l = (l + 8) & ~7 else: l += 1 return l def append_aligned_tabs_indent_with_tabs(s, min_slashpos): lines = s.split('\n') if lines[-1].strip() == '': del lines[-1] # This is intended to translate leading spaces to TABs, so that callers # don't have to work out the right number of TABs to use. It also would # affect intra-line space, but there is none in practice so far. for i, l in enumerate(lines): lines[i] = l.replace(' ', '\t') max_len = 0 for l in lines: max_len = max(max_len, len_evaluating_tabs(l)) max_len = max(max_len, min_slashpos) tabpos = (max_len + 7) & ~7 for i, l in enumerate(lines): lines[i] += gen_tab_padding_to(len_evaluating_tabs(l) + 1, tabpos + 1) + '\\' return '\n'.join(lines) def yn_to_boolean(s): return {'N': False, 'Y': True}[s] def boolean_to_yn(val): return {True: 'Y', False: 'N'}[val] def boolean_to_c_bool(val): return {True: 'true', False: 'false'}[val] def dump_table(heading_prefix, heading_suffix, headings, row_prefix, row_suffix, rows, col_widths, file, right_justifies): num_cols = 0 if headings: num_cols = max(num_cols, len(headings)) if col_widths: num_cols = max(num_cols, len(col_widths)) for row in rows: if type(row) == str: continue num_cols = max(num_cols, len(row)) widths = [0] * num_cols if col_widths: for col, val in enumerate(col_widths): if not val: continue widths[col] = val if headings: for col, val in enumerate(headings): if col_widths and col_widths[col]: continue widths[col] = len(val) for row in rows: if type(row) == str: continue for col, val in enumerate(row): if col_widths and col_widths[col]: continue widths[col] = max(widths[col], len(val)) if headings: print(heading_prefix, end='', file=file) for col, heading in enumerate(headings): emit_padded_field(heading, widths[col], skip_comma = (col == len(headings) - 1), file=file) print(heading_suffix, file=file) for row in rows: if type(row) == str: print(row, file=file) else: print(row_prefix, end='', file=file) force_comma = len(row) == 1 for col, val in enumerate(row): if right_justifies: right_justify = right_justifies[col] else: right_justify = False emit_padded_field(val, widths[col], skip_comma = (col == len(row) - 1) and not force_comma, file=file, right_justify=right_justify) print(row_suffix, file=file) def dump_py_table(headings, rows, col_widths=None, file=sys.stdout, right_justifies=None): dump_table(' #', '', headings, ' (', '),', rows, col_widths, file, right_justifies) def dump_c_table(headings, macro_name, rows, col_widths=None, file=sys.stdout, right_justifies=None, row_indent='\t'): dump_table(row_indent + '/* ' + ' ' * (len(macro_name) - 2), ' */', headings, row_indent + macro_name + '(', '),', rows, col_widths, file, right_justifies) def spreadsheet_col_name_to_num(col): if len(col) == 2: return ((ord(col[0]) - ord('A') + 1) * 26) + (ord(col[1]) - ord('A')) elif len(col) == 1: return ord(col[0]) - ord('A') else: raise Exception('Bad column name ' + col) def rsvd_0base_to_1base(f): if not f.startswith('rsvd'): return f n = int(f[4:]) n += 1 return 'rsvd' + str(n)
tegra-pinmux-scripts-master
tegra_pmx_utils.py
#!/usr/bin/python3 # Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. import argparse import collections import re import sys from tegra_pmx_utils import * dbg = False re_siggpio = re.compile('^(.*)_p([a-z]+[0-7])$') re_copyright = re.compile(' \* Copyright \(c\) (.*), NVIDIA CORPORATION. All rights reserved.') re_pin_gpio = re.compile('#define TEGRA_PIN_([A-Z0-9_]+)\s*_GPIO\((\d+)\)') re_pin_pin = re.compile('#define TEGRA_PIN_([A-Z0-9_]+)\s*_PIN\((\d+)\)') re_module_author = re.compile('MODULE_AUTHOR\("(.*)"\);') re_close_brace = re.compile('};') re_pins_array_start = re.compile('static const struct pinctrl_pin_desc tegra\d+_pins\[\] = \{') re_pins_array_entry = re.compile('\s+PINCTRL_PIN\(TEGRA_PIN_([A-Z0-9_]+), "([A-Z0-9_ ]+)"\),') re_group_pins_array_start = re.compile('static const unsigned ([a-z0-9_]+)_pins\[\] = \{') re_group_pins_array_entry = re.compile('\s+TEGRA_PIN_([A-Z0-9_]+),?') re_mux_array_start = re.compile('enum tegra_mux(_dt)? {') re_mux_array_entry = re.compile('\s+TEGRA_MUX_([A-Z0-9_]+),') re_groups_array_start = re.compile('static const struct tegra_pingroup (tegra\d+)_groups\[\] = \{') re_groups_array_group_entry = re.compile('\s*PINGROUP\((.*)\),') re_groups_array_drvgroup_entry = re.compile('\s*DRV_PINGROUP\((.*)\),') num_pin_gpios = 0 pins = collections.OrderedDict() groups = collections.OrderedDict() functions = [] soc = None module_author = None copyright_years = None soc_vars = { 'soc_pins_have_od': ['tegra30', 'tegra114', 'tegra124', 'tegra210'], 'soc_pins_all_have_od': ['tegra210'], 'soc_pins_have_ior': ['tegra30', 'tegra114', 'tegra124'], 'soc_pins_have_rcv_sel': ['tegra114', 'tegra124'], 'soc_pins_have_schmitt': ['tegra210'], 'soc_pins_all_have_schmitt': ['tegra210'], 'soc_pins_have_hsm': ['tegra210',], 'soc_pins_have_drvtype': ['tegra210',], 'soc_pins_have_e_io_hv': ['tegra210',], 'soc_drvgroups_have_hsm': ['tegra30', 'tegra114', 'tegra124'], 'soc_drvgroups_have_schmitt': ['tegra30', 'tegra114', 'tegra124'], 'soc_drvgroups_have_lpmd': ['tegra30', 'tegra114', 'tegra124'], 'soc_drvgroups_have_drvtype': ['tegra114', 'tegra124'], } def set_soc(new_soc): global soc soc = new_soc for var, socs in soc_vars.items(): globals()[var] = (soc in socs) state = None re_state_end = None state_group = None def set_state(s, e): if dbg: print("SET STATE: " + repr(s)) global state state = s global re_state_end re_state_end = e def state_pins_array(l): m = re_pins_array_entry.match(l) if not m: raise Exception('pins array entry cannot be parsed') if dbg: print('pin desc:', repr(m.group(1)), repr(m.group(2))) pin = m.group(1).lower() signal = pins[pin]['signal'] if pins[pin]['is_gpio']: gpio = pins[pin]['gpio'] else: gpio = '' pindesc = signal if signal and gpio: pindesc += ' ' if gpio: pindesc += 'p' pindesc += gpio if m.group(2) != pindesc.upper(): raise Exception('pin ' + pin + ' pindesc mismatch') def state_group_pins_array(l): m = re_group_pins_array_entry.match(l) if not m: raise Exception('group pins array entry cannot be parsed') if dbg: print('pin entry:', repr(m.group(1))) groups[state_group]['pins'].append(m.group(1).lower()) def state_mux_array(l): m = re_mux_array_entry.match(l) if not m: raise Exception('mux array entry cannot be parsed') if dbg: print('function:', repr(m.group(1))) functions.append(m.group(1).lower()) def state_groups_array(l): m = re_groups_array_group_entry.match(l) if m: args = re.split('\s*,\s*', m.group(1)) (group, f0, f1, f2, f3, reg) = args[0:6] argbase = 6 if soc_pins_have_od and ((not soc_pins_all_have_od) or (soc == 'tegra210')): od = args[argbase] argbase += 1 if soc == 'tegra210': if od != 'Y': raise Exception('od not not expected value for ' + group) if soc_pins_have_ior: ior = args[argbase] argbase += 1 if soc_pins_have_rcv_sel: rcv_sel = args[argbase] argbase += 1 if soc_pins_have_schmitt and ((not soc_pins_all_have_schmitt) or (soc == 'tegra210')): schmitt = args[argbase] argbase += 1 if soc == 'tegra210': if schmitt != '12': raise Exception('drvtype not expected value for ' + group) if soc_pins_have_hsm: hsm = args[argbase] argbase += 1 if soc_pins_have_drvtype: drvtype = args[argbase] argbase += 1 if soc_pins_have_e_io_hv: e_io_hv = args[argbase] argbase += 1 group = group.lower() f0 = f0.lower() f1 = f1.lower() f2 = f2.lower() f3 = f3.lower() if not group in groups: raise Exception('invalid group', group) for f in (f0, f1, f2, f3): if not f.lower() in functions: raise Exception('invalid function', f) reg = int(reg, 0) od = yn_to_boolean(od) entry = { 'is_drive': False, 'funcs': (f0, f1, f2, f3), 'reg': reg, } if soc_pins_have_od and not soc_pins_all_have_od: od = yn_to_boolean(od) entry['od'] = od if soc_pins_have_ior: ior = yn_to_boolean(ior) entry['ior'] = ior if soc_pins_have_rcv_sel: rcv_sel = yn_to_boolean(rcv_sel) entry['rcv_sel'] = rcv_sel if soc_pins_have_schmitt and not soc_pins_all_have_schmitt: schmitt_b = int(schmitt_b) entry['schmitt_b'] = schmitt_b if soc_pins_have_hsm: hsm = (hsm != '-1') entry['hsm'] = hsm if soc_pins_have_drvtype: drvtype = yn_to_boolean(drvtype) entry['drvtype'] = drvtype if soc_pins_have_e_io_hv: e_io_hv = yn_to_boolean(e_io_hv) entry['e_io_hv'] = e_io_hv if dbg: print('group entry:', repr(entry)) groups[group].update(entry) return m = re_groups_array_drvgroup_entry.match(l) if m: args = re.split('\s*,\s*', m.group(1)) (group, reg) = args[0:2] argbase = 2 if soc_drvgroups_have_hsm: hsm_b = args[argbase] argbase += 1 if soc_drvgroups_have_schmitt: schmitt_b = args[argbase] argbase += 1 if soc_drvgroups_have_lpmd: lpmd_b = args[argbase] argbase += 1 (drvdn_b, drvdn_w, drvup_b, drvup_w, slwr_b, slwr_w, slwf_b, slwf_w) = args[argbase:(argbase + 8)] argbase += 8 if soc_drvgroups_have_drvtype: drvtype = args[argbase] argbase += 1 group = 'drive_' + group if not group in groups: raise Exception('invalid group', group) reg = int(reg, 0) drvdn_b = int(drvdn_b, 0) drvdn_w = int(drvdn_w, 0) drvup_b = int(drvup_b, 0) drvup_w = int(drvup_w, 0) slwr_b = int(slwr_b, 0) slwr_w = int(slwr_w, 0) slwf_b = int(slwf_b, 0) slwf_w = int(slwf_w, 0) entry = { 'is_drive': True, 'reg': reg, 'drvdn_b': drvdn_b, 'drvdn_w': drvdn_w, 'drvup_b': drvup_b, 'drvup_w': drvup_w, 'slwr_b': slwr_b, 'slwr_w': slwr_w, 'slwf_b': slwf_b, 'slwf_w': slwf_w, } if soc_drvgroups_have_hsm: hsm_b = int(hsm_b, 0) entry['hsm_b'] = hsm_b if soc_drvgroups_have_schmitt: schmitt_b = int(schmitt_b, 0) entry['schmitt_b'] = schmitt_b if soc_drvgroups_have_lpmd: lpmd_b = int(lpmd_b, 0) entry['lpmd_b'] = lpmd_b if soc_drvgroups_have_drvtype: drvtype = yn_to_boolean(drvtype) entry['drvtype'] = drvtype if dbg: print('group entry:', repr(entry)) groups[group].update(entry) return raise Exception('groups array entry cannot be parsed') def state_global(l): global num_pin_gpios global state_group global copyright_years global module_author m = re_pins_array_start.match(l) if m: set_state(state_pins_array, re_close_brace) return m = re_group_pins_array_start.match(l) if m: state_group = m.group(1) if dbg: print('group pins array:', repr(state_group)) groups[state_group] = {'pins': []} set_state(state_group_pins_array, re_close_brace) return m = re_mux_array_start.match(l) if m: set_state(state_mux_array, re_close_brace) return m = re_groups_array_start.match(l) if m: set_soc(m.group(1)) if dbg: print('groups array (soc %s):' % soc) set_state(state_groups_array, re_close_brace) return m = re_copyright.match(l) if m: copyright_years = m.group(1) return m = re_pin_gpio.match(l) if m: group = m.group(1).lower() gpioid = m.group(2) m = re_siggpio.match(group) if m: signal = m.group(1) gpio = m.group(2) else: signal = '' gpio = group[1:] entry = { 'is_gpio': True, 'signal': signal, 'gpio': gpio, 'id': int(gpioid), } if dbg: print('gpio:', repr(group), repr(entry)) pins[group] = entry num_pin_gpios += 1 return m = re_pin_pin.match(l) if m: entry = { 'is_gpio': False, 'signal': m.group(1).lower(), 'id': int(m.group(2)), } if dbg: print('pin:', repr(m.group(1)), repr(entry)) pins[m.group(1).lower()] = entry return m = re_module_author.match(l) if m: module_author = m.group(1) return def set_global_state(): set_state(state_global, None) global state_group up = None def main(): parser = argparse.ArgumentParser(description='Create a pinmux .soc file ' + 'from kernel pinctrl source code') parser.add_argument('--debug', action='store_true', help='Turn on debugging prints') args = parser.parse_args() if args.debug: global dbg dbg = True if dbg: print(args) set_global_state() for l in sys.stdin.readlines(): if dbg: print('<<<', repr(l)) l = re.sub('/\*.*?\*/', '', l) if not l.strip(): continue if re_state_end and re_state_end.match(l): set_global_state() continue state(l) if dbg: print('pins:') print(repr(pins)) print() print('groups:') print(repr(groups)) print() print('functions:') print(repr(functions)) for group in groups: if not 'is_drive' in groups[group]: raise Exception('group ' + group + ' not parsed in group array') if groups[group]['is_drive']: continue if len(groups[group]['pins']) != 1: raise Exception('group ' + group + ' has more than 1 pin') if groups[group]['pins'][0] != group: raise Exception('group ' + group + ' pin list does not match') for pin in pins: if pin not in groups: groups[pin] = {'is_drive': False} continue for (i, function) in enumerate(groups[pin]['funcs']): if function.startswith('RSVD') and function != 'RSVD' + str(i + 1): raise Exception('pin ' + pin + ' RSVD func ' + i + ' mismatch') print('kernel_copyright_years =', repr(copyright_years)) print('kernel_author =', repr(module_author)) print() for var in sorted(soc_vars.keys()): print('%s = %s' % (var, repr(globals()[var]))) print() def dump_pins(dump_gpios): headings = ('name',) if dump_gpios: headings += ('gpio',) headings += ('reg', 'f0', 'f1', 'f2', 'f3') if soc_pins_have_od and not soc_pins_all_have_od: headings += ('od',) if soc_pins_have_ior: headings += ('ior',) if soc_pins_have_rcv_sel: headings += ('rcv_sel',) if soc_pins_have_schmitt and not soc_pins_all_have_schmitt: headings += ('schmitt_b',) if soc_pins_have_hsm: headings += ('hsm',) if soc_pins_have_drvtype: headings += ('drvtype',) if soc_pins_have_e_io_hv: headings += ('e_io_hv',) rows = [] for pin in pins: p = pins[pin] if p['is_gpio'] != dump_gpios: continue if pin not in groups: continue g = groups[pin] if g['is_drive']: continue if dump_gpios: signal = p['signal'] gpio = p['gpio'] else: signal = pin gpio = None row = (repr(signal),) if dump_gpios: row += (repr(gpio),) if 'reg' in g: row += ('0x%x' % g['reg'],) for func in g['funcs']: row += (repr(func),) if soc_pins_have_od and not soc_pins_all_have_od: row += (repr(g['od']),) if soc_pins_have_ior: row += (repr(g['ior']),) if soc_pins_have_rcv_sel: row += (repr(g['rcv_sel']),) if soc_pins_have_schmitt and not soc_pins_all_have_schmitt: row += (repr(g['schmitt_b']),) if soc_pins_have_hsm: row += (repr(g['hsm']),) if soc_pins_have_drvtype: row += (repr(g['drvtype']),) if soc_pins_have_e_io_hv: row += (repr(g['e_io_hv']),) rows.append(row) dump_py_table(headings, rows) print('gpios = (') dump_pins(True) print(')') print() print('pins = (') dump_pins(False) print(')') print() print('drive_groups = (') heading = ' #name, r' if soc_drvgroups_have_hsm: heading += ', hsm_b' if soc_drvgroups_have_schmitt: heading += ', schmitt_b' if soc_drvgroups_have_lpmd: heading += ', lpmd_b' heading += ', drvdn_b, drvdn_w, drvup_b, drvup_w, slwr_b, slwr_w, slwf_b, slwf_w' if soc_drvgroups_have_drvtype: heading += ', drvtype' print(heading) rows = [] for group in groups: g = groups[group] if not groups[group]['is_drive']: continue row = ( repr(group[6:]), '0x%x' % g['reg'], ) if soc_drvgroups_have_hsm: row += (repr(g['hsm_b']),) if soc_drvgroups_have_schmitt: row += (repr(g['schmitt_b']),) if soc_drvgroups_have_lpmd: row += (repr(g['lpmd_b']),) row += ( repr(g['drvdn_b']), repr(g['drvdn_w']), repr(g['drvup_b']), repr(g['drvup_w']), repr(g['slwr_b']), repr(g['slwr_w']), repr(g['slwf_b']), repr(g['slwf_w']), ) if soc_drvgroups_have_drvtype: row += (repr(g['drvtype']),) rows.append(row) dump_py_table(None, rows) print(')') print() print('drive_group_pins = {') for group in groups: g = groups[group] if not groups[group]['is_drive']: continue print(' \'%s\': (' % group[6:]) for pin in g['pins']: print(' \'%s\',' % pin) print(' ),') print('}') main()
tegra-pinmux-scripts-master
kernel-pinctrl-driver-to-soc.py
#!/usr/bin/env python3 # Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. import argparse import os import os.path import sys import tegra_pmx_soc_parser from tegra_pmx_utils import * dbg = False parser = argparse.ArgumentParser(description='Create a kernel pinctrl ' + 'driver from an SoC config file') parser.add_argument('--debug', action='store_true', help='Turn on debugging prints') parser.add_argument('soc', help='SoC to process') args = parser.parse_args() if args.debug: dbg = True if dbg: print(args) soc = tegra_pmx_soc_parser.load_soc(args.soc) print('''\ // SPDX-License-Identifier: GPL-2.0-only /* * Pinctrl data for the NVIDIA %s pinmux ''' % soc.titlename, end = '') if soc.kernel_author != 'NVIDIA': print(' *') print(' * Author: %s' % soc.kernel_author) print('''\ * * Copyright (c) %s, NVIDIA CORPORATION. All rights reserved. */ #include <linux/init.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pinctrl/pinctrl.h> #include <linux/pinctrl/pinmux.h> #include "pinctrl-tegra.h" /* * Most pins affected by the pinmux can also be GPIOs. Define these first. * These must match how the GPIO driver names/numbers its pins. */ ''' % soc.kernel_copyright_years, end='') # Do not add any more exceptions here; new SoCs should be formatted correctly if soc.name == 'tegra30': define_column = 41 else: define_column = 49 emit_define('_GPIO(offset)', '(offset)', define_column) print() last_gpio_define = None for gpio in soc.gpios_by_num(): emit_define(gpio.define, '_GPIO(%d)' % gpio.num, define_column) last_gpio_define = gpio.define print() print('/* All non-GPIO pins follow */') emit_define('NUM_GPIOS', '(%s + 1)' % last_gpio_define, define_column) emit_define('_PIN(offset)', '(NUM_GPIOS + (offset))', define_column) print() print('/* Non-GPIO pins */') for pin in soc.pins_by_num(): emit_define(pin.define, '_PIN(%d)' % pin.num, define_column) print() print('static const struct pinctrl_pin_desc %s_pins[] = {' % soc.name) for pin in soc.gpios_pins_by_num(): print('\tPINCTRL_PIN(%s, "%s"),' % (pin.define, pin.desc)) print('};') for pin in soc.gpios_pins_by_num(): if not pin.reg: continue print('''\ static const unsigned %s_pins[] = { %s, }; ''' % (pin.fullname, pin.define), end='') # Do not add any more exceptions here; new SoCs should be formatted correctly if soc.name == 'tegra30': f = soc.drive_groups_by_alpha else: f = soc.drive_groups_by_reg for group in f(): if group.has_matching_pin: continue print('''\ static const unsigned %s_pins[] = { ''' % group.fullname, end='') for pin in group.gpios_pins: print('\t%s,' % pin.define) print('};'); for group in soc.mipi_pad_ctrl_groups_by_reg(): print('''\ static const unsigned %s_pins[] = { ''' % group.fullname, end='') for pin in group.gpios_pins: print('\t%s,' % pin.define) print('};'); print('''\ enum tegra_mux { ''', end='') for func in soc.functions_by_alpha(): print('\tTEGRA_MUX_%s,' % func.name.upper()) print('''\ }; #define FUNCTION(fname) \\ { \\ .name = #fname, \\ } static struct tegra_function %s_functions[] = { ''' % soc.name, end='') for func in soc.functions_by_alpha(): print('\tFUNCTION(%s),' % func.name) drv_pingroup_val = "0x%x" % soc.soc_drv_reg_base print('''\ }; #define DRV_PINGROUP_REG_A %(drv_pingroup_val)s /* bank 0 */ #define PINGROUP_REG_A 0x3000 /* bank 1 */ ''' % globals(), end='') if len(soc.mipi_pad_ctrl_groups_by_reg()): print('#define MIPI_PAD_CTRL_PINGROUP_REG_A 0x820 /* bank 2 */''') print('''\ #define DRV_PINGROUP_REG(r) ((r) - DRV_PINGROUP_REG_A) #define PINGROUP_REG(r) ((r) - PINGROUP_REG_A) ''', end='') if len(soc.mipi_pad_ctrl_groups_by_reg()): print('''\ #define MIPI_PAD_CTRL_PINGROUP_REG_Y(r) ((r) - MIPI_PAD_CTRL_PINGROUP_REG_A) ''', end='') print('''\ #define PINGROUP_BIT_Y(b) (b) #define PINGROUP_BIT_N(b) (-1) ''', end='') params = ['pg_name', 'f0', 'f1', 'f2', 'f3', 'r'] if soc.soc_pins_have_od and not soc.soc_pins_all_have_od: params += ['od',] if soc.soc_pins_have_ior: params += ['ior',] if soc.soc_pins_have_rcv_sel: params += ['rcv_sel',] if soc.soc_pins_have_hsm: params += ['hsm',] if soc.soc_pins_have_schmitt and not soc.soc_pins_all_have_schmitt: params += ['schmitt',] if soc.soc_pins_have_drvtype: params += ['drvtype',] if soc.soc_pins_have_e_io_hv: params += ['e_io_hv',] drive_params = ['drvdn_b', 'drvdn_w', 'drvup_b', 'drvup_w', 'slwr_b', 'slwr_w', 'slwf_b', 'slwf_w'] if soc.soc_combine_pin_drvgroup: params += ['rdrv',] params += drive_params s = gen_wrapped_c_macro_header('PINGROUP', params) einput_val = str(soc.soc_einput_b) if soc.soc_pins_have_od: if soc.soc_pins_all_have_od: odrain_val = str(soc.soc_odrain_b) else: odrain_val = 'PINGROUP_BIT_##od(%s)' % str(soc.soc_odrain_b) else: odrain_val = '-1' if soc.soc_pins_have_ior: ioreset_val = 'PINGROUP_BIT_##ior(8)' else: ioreset_val = '-1' # rcv_sel and e_io_hv are different names for essentially the same thing. # Re-use the field to save space if soc.soc_pins_have_rcv_sel: rcv_sel_val = 'PINGROUP_BIT_##rcv_sel(9),' elif soc.soc_pins_have_e_io_hv: rcv_sel_val = 'PINGROUP_BIT_##e_io_hv(10),' else: rcv_sel_val = '-1,' s += '''\ { .name = #pg_name, .pins = pg_name##_pins, .npins = ARRAY_SIZE(pg_name##_pins), .funcs = { TEGRA_MUX_##f0, TEGRA_MUX_##f1, TEGRA_MUX_##f2, TEGRA_MUX_##f3, }, .mux_reg = PINGROUP_REG(r), .mux_bank = 1, .mux_bit = 0, .pupd_reg = PINGROUP_REG(r), .pupd_bank = 1, .pupd_bit = 2, .tri_reg = PINGROUP_REG(r), .tri_bank = 1, .tri_bit = 4, .einput_bit = %(einput_val)s, .odrain_bit = %(odrain_val)s, .lock_bit = 7, .ioreset_bit = %(ioreset_val)s, .rcv_sel_bit = %(rcv_sel_val)s ''' % globals() if soc.soc_pins_have_hsm: s += '''\ .hsm_bit = PINGROUP_BIT_##hsm(9), ''' if soc.soc_pins_have_schmitt: if soc.soc_pins_all_have_schmitt: s += '''\ .schmitt_bit = 12, ''' else: s += '''\ .schmitt_bit = PINGROUP_BIT_##schmitt(12), ''' if soc.soc_pins_have_drvtype: s += '''\ .drvtype_bit = PINGROUP_BIT_##drvtype(13), ''' if soc.soc_combine_pin_drvgroup: # FIXME: if !soc.soc_pins_have_hsm, then we should include hsm_bit # here. Same for schmitt and drvtype. However, no SoCs have that # combination at present, so I don't feel like cluttering the code. # We should also handle !soc_drvgroups_have_lpmd. s += '''\ .drv_reg = DRV_PINGROUP_REG(rdrv), .drv_bank = 0, .lpmd_bit = -1, .drvdn_bit = drvdn_b, .drvdn_width = drvdn_w, .drvup_bit = drvup_b, .drvup_width = drvup_w, .slwr_bit = slwr_b, .slwr_width = slwr_w, .slwf_bit = slwf_b, .slwf_width = slwf_w, ''' else: s += '''\ .drv_reg = -1, ''' if soc.soc_pins_all_have_parked: s += '''\ .parked_bitmask = BIT(%s), ''' % (soc.soc_parked_bit) else: s+= '''\ .parked_bitmask = 0, ''' s = append_aligned_tabs_indent_with_tabs(s, 72) print(s) print('''\ } ''', end='') params = ['pg_name', 'r'] if soc.soc_drvgroups_have_hsm: params += ['hsm_b',] if soc.soc_drvgroups_have_schmitt: params += ['schmitt_b',] if soc.soc_drvgroups_have_lpmd: params += ['lpmd_b',] if soc.soc_drvgroups_have_parked: params += ['prk_mask',] params += drive_params if soc.soc_drvgroups_have_drvtype: params += ['drvtype',] s = gen_wrapped_c_macro_header('DRV_PINGROUP', params) if soc.soc_drvgroups_have_hsm: hsm_bit_val = 'hsm_b' else: hsm_bit_val = '-1' if soc.soc_drvgroups_have_schmitt: schmitt_bit_val = 'schmitt_b' else: schmitt_bit_val = '-1' if soc.soc_drvgroups_have_lpmd: lpmd_bit_val = 'lpmd_b' else: lpmd_bit_val = '-1' if soc.soc_drvgroups_have_parked: parked_bit_mask = 'prk_mask' else: parked_bit_mask = '0' if soc.soc_drvgroups_have_drvtype: drvtype_bit_val = 'PINGROUP_BIT_##drvtype(6),' else: drvtype_bit_val = '-1,' s += '''\ { .name = "drive_" #pg_name, .pins = drive_##pg_name##_pins, .npins = ARRAY_SIZE(drive_##pg_name##_pins), .mux_reg = -1, .pupd_reg = -1, .tri_reg = -1, .einput_bit = -1, .odrain_bit = -1, .lock_bit = -1, .ioreset_bit = -1, .rcv_sel_bit = -1, .drv_reg = DRV_PINGROUP_REG(r), .drv_bank = 0, .hsm_bit = %(hsm_bit_val)s, .schmitt_bit = %(schmitt_bit_val)s, .lpmd_bit = %(lpmd_bit_val)s, .drvdn_bit = drvdn_b, .drvdn_width = drvdn_w, .drvup_bit = drvup_b, .drvup_width = drvup_w, .slwr_bit = slwr_b, .slwr_width = slwr_w, .slwf_bit = slwf_b, .slwf_width = slwf_w, .drvtype_bit = %(drvtype_bit_val)s .parked_bitmask = %(parked_bit_mask)s, ''' % globals() s = append_aligned_tabs_indent_with_tabs(s, 72) print(s) print('''\ } ''', end='') if len(soc.mipi_pad_ctrl_groups_by_reg()): print('''\ #define MIPI_PAD_CTRL_PINGROUP(pg_name, r, b, f0, f1) \\ { \\ .name = "mipi_pad_ctrl_" #pg_name, \\ .pins = mipi_pad_ctrl_##pg_name##_pins, \\ .npins = ARRAY_SIZE(mipi_pad_ctrl_##pg_name##_pins), \\ .funcs = { \\ TEGRA_MUX_ ## f0, \\ TEGRA_MUX_ ## f1, \\ TEGRA_MUX_RSVD3, \\ TEGRA_MUX_RSVD4, \\ }, \\ .mux_reg = MIPI_PAD_CTRL_PINGROUP_REG_Y(r), \\ .mux_bank = 2, \\ .mux_bit = b, \\ .pupd_reg = -1, \\ .tri_reg = -1, \\ .einput_bit = -1, \\ .odrain_bit = -1, \\ .lock_bit = -1, \\ .ioreset_bit = -1, \\ .rcv_sel_bit = -1, \\ .drv_reg = -1, \\ } ''', end='') print('''\ static const struct tegra_pingroup %s_groups[] = { ''' % soc.name, end='') # Do not add any more exceptions here; new SoCs should be formatted correctly if soc.name == 'tegra30': max_gpio_pin_len = max([len(pin.fullname) for pin in soc.gpios_pins_by_reg()]) max_f0_len = 12 max_f1_len = 12 max_f2_len = 12 max_f3_len = 12 yn_width = 1 col_widths = (max_gpio_pin_len, max_f0_len, max_f1_len, max_f2_len, max_f3_len, 6, yn_width, yn_width) if soc.soc_pins_have_rcv_sel: col_widths += (yn_width,) right_justifies = None elif soc.name in ('tegra114', 'tegra124'): max_gpio_pin_len = max([len(pin.fullname) for pin in soc.gpios_pins_by_reg()]) max_f0_len = 10 max_f1_len = 10 max_f2_len = 12 max_f3_len = 11 yn_width = 2 col_widths = (max_gpio_pin_len, max_f0_len, max_f1_len, max_f2_len, max_f3_len, 6, yn_width, yn_width) if soc.soc_pins_have_rcv_sel: col_widths += (yn_width,) right_justifies = (False, False, False, False, False, False, False, True, True, True) else: col_widths = None right_justifies = None headings = ['pg_name', 'f0', 'f1', 'f2', 'f3', 'r'] if soc.soc_pins_have_od and not soc.soc_pins_all_have_od: headings += ['od',] if soc.soc_pins_have_ior: headings += ['ior',] if soc.soc_pins_have_rcv_sel: headings += ['rcv_sel',] if soc.soc_pins_have_hsm: headings += ['hsm',] if soc.soc_pins_have_schmitt and not soc.soc_pins_all_have_schmitt: headings += ['schmitt',] if soc.soc_pins_have_drvtype: headings += ['drvtype',] if soc.soc_pins_have_e_io_hv: headings += ['e_io_hv',] if soc.soc_combine_pin_drvgroup: headings += ['rdrv',] headings += drive_params rows = [] # Do not add any more exceptions here; new SoCs should be formatted correctly if soc.name == 'tegra30': f = soc.gpios_pins_by_num else: f = soc.gpios_pins_by_reg for pin in f(): if not pin.reg: continue row = ( pin.fullname, pin.f0.upper(), pin.f1.upper(), pin.f2.upper(), pin.f3.upper(), '0x%x' % pin.reg, ) if soc.soc_pins_have_od and not soc.soc_pins_all_have_od: row += (boolean_to_yn(pin.od),) if soc.soc_pins_have_ior: row += (boolean_to_yn(pin.ior),) if soc.soc_pins_have_rcv_sel: row += (boolean_to_yn(pin.rcv_sel),) if soc.soc_pins_have_hsm: row += (boolean_to_yn(pin.hsm),) if soc.soc_pins_have_schmitt and not soc.soc_pins_all_have_schmitt: row += (boolean_to_yn(pin.schmitt),) if soc.soc_pins_have_drvtype: row += (boolean_to_yn(pin.drvtype),) if soc.soc_pins_have_e_io_hv: row += (boolean_to_yn(pin.e_io_hv),) if soc.soc_combine_pin_drvgroup: if pin.per_pin_drive_group: row += ( '0x%x' % pin.per_pin_drive_group.reg, repr(pin.per_pin_drive_group.drvdn_b), repr(pin.per_pin_drive_group.drvdn_w), repr(pin.per_pin_drive_group.drvup_b), repr(pin.per_pin_drive_group.drvup_w), repr(pin.per_pin_drive_group.slwr_b), repr(pin.per_pin_drive_group.slwr_w), repr(pin.per_pin_drive_group.slwf_b), repr(pin.per_pin_drive_group.slwf_w), ) else: row += ( '-1', '-1', '-1', '-1', '-1', '-1', '-1', '-1', '-1', ) rows.append(row) dump_c_table(headings, 'PINGROUP', rows, col_widths=col_widths, right_justifies=right_justifies) # Do not add any more exceptions here; new SoCs should be formatted correctly if soc.name != 'tegra30': print() max_drvgrp_len = max([len(drvgroup.name) for drvgroup in soc.drive_groups_by_reg()]) print('\t/* pg_name, r, ', end='') if soc.soc_drvgroups_have_hsm: print('hsm_b, ', end='') if soc.soc_drvgroups_have_schmitt: print('schmitt_b, ', end='') if soc.soc_drvgroups_have_lpmd: print('lpmd_b, ', end='') if soc.soc_drvgroups_have_parked: print('prk_mask, ', end='') print('drvdn_b, drvdn_w, drvup_b, drvup_w, slwr_b, slwr_w, slwf_b, slwf_w', end='') if soc.soc_drvgroups_have_drvtype: print(', drvtype', end='') print(' */') rows = [] # Do not add any more exceptions here; new SoCs should be formatted correctly if soc.name == 'tegra30': f = soc.drive_groups_by_alpha else: f = soc.drive_groups_by_reg # Do not add any more exceptions here; new SoCs should be formatted correctly if soc.name in ('tegra30', 'tegra114', 'tegra124'): col_widths = (0, 0, 2, 2, 2, 3, 2, 3, 2, 3, 2, 3, 2, 2) right_justifies = (False, False, True, True, True, True, True, True, True, True, True, True, True, True) else: col_widths = None right_justifies = None for drvgroup in f(): if drvgroup.has_matching_pin: continue row = ( drvgroup.name, '0x%x' % drvgroup.reg, ) if soc.soc_drvgroups_have_hsm: row += (repr(drvgroup.hsm_b),) if soc.soc_drvgroups_have_schmitt: row += (repr(drvgroup.schmitt_b),) if soc.soc_drvgroups_have_lpmd: row += (repr(drvgroup.lpmd_b),) if soc.soc_drvgroups_have_parked: if (drvgroup.prk_mask != -1): row += (hex(drvgroup.prk_mask),) else: row += (repr(drvgroup.prk_mask),) row += ( repr(drvgroup.drvdn_b), repr(drvgroup.drvdn_w), repr(drvgroup.drvup_b), repr(drvgroup.drvup_w), repr(drvgroup.slwr_b), repr(drvgroup.slwr_w), repr(drvgroup.slwf_b), repr(drvgroup.slwf_w), ) if soc.soc_drvgroups_have_drvtype: row += (boolean_to_yn(drvgroup.drvtype),) rows.append(row) dump_c_table(None, 'DRV_PINGROUP', rows, col_widths=col_widths, right_justifies=right_justifies) if len(soc.mipi_pad_ctrl_groups_by_reg()): print() headings = ('pg_name', 'r', 'b', 'f0', 'f1') rows = [] for group in soc.mipi_pad_ctrl_groups_by_reg(): row = ( group.name, '0x%x' % group.reg, repr(group.bit), group.f0.upper(), group.f1.upper(), ) rows.append(row) dump_c_table(headings, 'MIPI_PAD_CTRL_PINGROUP', rows ) socvars = { 'author': soc.kernel_author, 'soc': soc.name, 'usoc': soc.titlename, 'hsm_in_mux': boolean_to_c_bool(soc.soc_pins_have_hsm), 'schmitt_in_mux': boolean_to_c_bool(soc.soc_pins_have_schmitt), 'drvtype_in_mux': boolean_to_c_bool(soc.soc_pins_have_drvtype), } print('''\ }; static const struct tegra_pinctrl_soc_data %(soc)s_pinctrl = { .ngpios = NUM_GPIOS, .gpio_compatible = "nvidia,%(soc)s-gpio", .pins = %(soc)s_pins, .npins = ARRAY_SIZE(%(soc)s_pins), .functions = %(soc)s_functions, .nfunctions = ARRAY_SIZE(%(soc)s_functions), .groups = %(soc)s_groups, .ngroups = ARRAY_SIZE(%(soc)s_groups), .hsm_in_mux = %(hsm_in_mux)s, .schmitt_in_mux = %(schmitt_in_mux)s, .drvtype_in_mux = %(drvtype_in_mux)s, }; static int %(soc)s_pinctrl_probe(struct platform_device *pdev) { return tegra_pinctrl_probe(pdev, &%(soc)s_pinctrl); } static const struct of_device_id %(soc)s_pinctrl_of_match[] = { { .compatible = "nvidia,%(soc)s-pinmux", }, { }, }; static struct platform_driver %(soc)s_pinctrl_driver = { .driver = { .name = "%(soc)s-pinctrl", .of_match_table = %(soc)s_pinctrl_of_match, }, .probe = %(soc)s_pinctrl_probe, }; static int __init %(soc)s_pinctrl_init(void) { return platform_driver_register(&%(soc)s_pinctrl_driver); } arch_initcall(%(soc)s_pinctrl_init); ''' % socvars, end='')
tegra-pinmux-scripts-master
soc-to-kernel-pinctrl-driver.py
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from setuptools import setup setup()
modulus-launch-main
setup.py
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "0.3.0a0"
modulus-launch-main
modulus/launch/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
modulus-launch-main
modulus/launch/config/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import re import torch import modulus from typing import Union, List, NewType, Dict from pathlib import Path from torch.optim.lr_scheduler import _LRScheduler from torch.cuda.amp import GradScaler from modulus.distributed import DistributedManager from modulus.utils.capture import _StaticCapture from modulus.launch.logging import PythonLogger optimizer = NewType("optimizer", torch.optim) scheduler = NewType("scheduler", _LRScheduler) scaler = NewType("scaler", GradScaler) checkpoint_logging = PythonLogger("checkpoint") def _get_checkpoint_filename( path: str, base_name: str = "checkpoint", index: Union[int, None] = None, saving: bool = False, model_type: str = "mdlus", ) -> str: """Gets the file name /path of checkpoint This function has three different ways of providing a checkout filename: - If supplied an index this will return the checkpoint name using that index. - If index is None and saving is false, this will get the checkpoint with the largest index (latest save). - If index is None and saving is true, it will return the next valid index file name which is calculated by indexing the largest checkpoint index found by one. Parameters ---------- path : str Path to checkpoints base_name: str, optional Base file name, by default checkpoint index : Union[int, None], optional Checkpoint index, by default None saving : bool, optional Get filename for saving a new checkpoint, by default False model_type : str Model type, by default "mdlus" for Modulus models and "pt" for PyTorch models Returns ------- str Checkpoint file name """ # Get model parallel rank so all processes in the first model parallel group # can save their checkpoint. In the case without model parallelism, # model_parallel_rank should be the same as the process rank itself and # only rank 0 saves manager = DistributedManager() model_parallel_rank = ( manager.group_rank("model_parallel") if manager.distributed else 0 ) # Input file name checkpoint_filename = str( Path(path).resolve() / f"{base_name}.{model_parallel_rank}" ) # File extension for Modulus models or PyTorch models file_extension = ".mdlus" if model_type == "mdlus" else ".pt" # If epoch is provided load that file if index is not None: checkpoint_filename = checkpoint_filename + f".{index}" checkpoint_filename += file_extension # Otherwise try loading the latest epoch or rolling checkpoint else: file_names = [] for fname in glob.glob( checkpoint_filename + "*" + file_extension, recursive=False ): file_names.append(Path(fname).name) if len(file_names) > 0: # If checkpoint from a null index save exists load that # This is the most likely line to error since it will fail with # invalid checkpoint names file_idx = [ int( re.sub( f"^{base_name}.{model_parallel_rank}.|" + file_extension, "", fname, ) ) for fname in file_names ] file_idx.sort() # If we are saving index by 1 to get the next free file name if saving: checkpoint_filename = checkpoint_filename + f".{file_idx[-1]+1}" else: checkpoint_filename = checkpoint_filename + f".{file_idx[-1]}" checkpoint_filename += file_extension else: checkpoint_filename += ".0" + file_extension return checkpoint_filename def _unique_model_names( models: List[torch.nn.Module], ) -> Dict[str, torch.nn.Module]: """Util to clean model names and index if repeat names, will also strip DDP wrappers if they exist. Parameters ---------- model : List[torch.nn.Module] List of models to generate names for Returns ------- Dict[str, torch.nn.Module] Dictionary of model names and respective modules """ # Loop through provided models and set up base names model_dict = {} for model0 in models: if hasattr(model0, "module"): # Strip out DDP layer model0 = model0.module # Base name of model is meta.name unless pytorch model base_name = model0.__class__.__name__ if isinstance(model0, modulus.models.Module): base_name = model0.meta.name # If we have multiple models of the same name, introduce another index if base_name in model_dict: model_dict[base_name].append(model0) else: model_dict[base_name] = [model0] # Set up unique model names if needed output_dict = {} for key, model in model_dict.items(): if len(model) > 1: for i, model0 in enumerate(model): output_dict[key + str(i)] = model0 else: output_dict[key] = model[0] return output_dict def save_checkpoint( path: str, models: Union[torch.nn.Module, List[torch.nn.Module], None] = None, optimizer: Union[optimizer, None] = None, scheduler: Union[scheduler, None] = None, scaler: Union[scaler, None] = None, epoch: Union[int, None] = None, ) -> None: """Training checkpoint saving utility This will save a training checkpoint in the provided path following the file naming convention "checkpoint.{model parallel id}.{epoch/index}.mdlus". The load checkpoint method in Modulus core can then be used to read this file. Parameters ---------- path : str Path to save the training checkpoint models : Union[torch.nn.Module, List[torch.nn.Module], None], optional A single or list of PyTorch models, by default None optimizer : Union[optimizer, None], optional Optimizer, by default None scheduler : Union[scheduler, None], optional Learning rate scheduler, by default None scaler : Union[scaler, None], optional AMP grad scaler. Will attempt to save on in static capture if none provided, by default None epoch : Union[int, None], optional Epoch checkpoint to load. If none this will save the checkpoint in the next valid index, by default None """ # Create checkpoint directory if it does not exist if not Path(path).is_dir(): checkpoint_logging.warning( f"Output directory {path} does not exist, will " "attempt to create" ) Path(path).mkdir(parents=True, exist_ok=True) # == Saving model checkpoint == if models: if not isinstance(models, list): models = [models] models = _unique_model_names(models) for name, model in models.items(): # Get model type model_type = "mdlus" if isinstance(model, modulus.models.Module) else "pt" # Get full file path / name file_name = _get_checkpoint_filename( path, name, index=epoch, saving=True, model_type=model_type ) # Save state dictionary if isinstance(model, modulus.models.Module): model.save(file_name) else: torch.save(model.state_dict(), file_name) checkpoint_logging.success(f"Saved model state dictionary: {file_name}") # == Saving training checkpoint == checkpoint_dict = {} # Optimizer state dict if optimizer: checkpoint_dict["optimizer_state_dict"] = optimizer.state_dict() # Scheduler state dict if scheduler: checkpoint_dict["scheduler_state_dict"] = scheduler.state_dict() # Scheduler state dict if scaler: checkpoint_dict["scaler_state_dict"] = scaler.state_dict() # Static capture is being used, save its grad scaler if _StaticCapture._amp_scalers: checkpoint_dict["static_capture_state_dict"] = _StaticCapture.state_dict() # Output file name output_filename = _get_checkpoint_filename( path, index=epoch, saving=True, model_type="pt" ) if epoch: checkpoint_dict["epoch"] = epoch # Save checkpoint to memory if bool(checkpoint_dict): torch.save( checkpoint_dict, output_filename, ) checkpoint_logging.success(f"Saved training checkpoint: {output_filename}") def load_checkpoint( path: str, models: Union[torch.nn.Module, List[torch.nn.Module], None] = None, optimizer: Union[optimizer, None] = None, scheduler: Union[scheduler, None] = None, scaler: Union[scaler, None] = None, epoch: Union[int, None] = None, device: Union[str, torch.device] = "cpu", ) -> int: """Checkpoint loading utility This loader is designed to be used with the save checkpoint utility in Modulus Launch. Given a path, this method will try to find a checkpoint and load state dictionaries into the provided training objects. Parameters ---------- path : str Path to training checkpoint models : Union[torch.nn.Module, List[torch.nn.Module], None], optional A single or list of PyTorch models, by default None optimizer : Union[optimizer, None], optional Optimizer, by default None scheduler : Union[scheduler, None], optional Learning rate scheduler, by default None scaler : Union[scaler, None], optional AMP grad scaler, by default None epoch : Union[int, None], optional Epoch checkpoint to load. If none is provided this will attempt to load the checkpoint with the largest index, by default None device : Union[str, torch.device], optional Target device, by default "cpu" Returns ------- int Loaded epoch """ # Check if checkpoint directory exists if not Path(path).is_dir(): checkpoint_logging.warning( f"Provided checkpoint directory {path} does not exist, skipping load" ) return 0 # == Loading model checkpoint == if models: if not isinstance(models, list): models = [models] models = _unique_model_names(models) for name, model in models.items(): # Get model type model_type = "mdlus" if isinstance(model, modulus.models.Module) else "pt" # Get full file path / name file_name = _get_checkpoint_filename( path, name, index=epoch, model_type=model_type ) if not Path(file_name).exists(): checkpoint_logging.error( f"Could not find valid model file {file_name}, skipping load" ) continue # Load state dictionary if isinstance(model, modulus.models.Module): model.load(file_name) else: model.load_state_dict(torch.load(file_name, map_location=device)) checkpoint_logging.success( f"Loaded model state dictionary {file_name} to device {device}" ) # == Loading training checkpoint == checkpoint_filename = _get_checkpoint_filename(path, index=epoch, model_type="pt") if not Path(checkpoint_filename).is_file(): checkpoint_logging.warning( "Could not find valid checkpoint file, skipping load" ) return 0 checkpoint_dict = torch.load(checkpoint_filename, map_location=device) checkpoint_logging.success( f"Loaded checkpoint file {checkpoint_filename} to device {device}" ) # Optimizer state dict if optimizer and "optimizer_state_dict" in checkpoint_dict: optimizer.load_state_dict(checkpoint_dict["optimizer_state_dict"]) checkpoint_logging.success("Loaded optimizer state dictionary") # Scheduler state dict if scheduler and "scheduler_state_dict" in checkpoint_dict: scheduler.load_state_dict(checkpoint_dict["scheduler_state_dict"]) checkpoint_logging.success("Loaded scheduler state dictionary") # Scaler state dict if "scaler_state_dict" in checkpoint_dict: scaler.load_state_dict(checkpoint_dict["scaler_state_dict"]) checkpoint_logging.success("Loaded grad scaler state dictionary") if "static_capture_state_dict" in checkpoint_dict: _StaticCapture.load_state_dict(checkpoint_dict["static_capture_state_dict"]) checkpoint_logging.success("Loaded static capture state dictionary") epoch = 0 if "epoch" in checkpoint_dict: epoch = checkpoint_dict["epoch"] return epoch
modulus-launch-main
modulus/launch/utils/checkpoint.py
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .checkpoint import save_checkpoint, load_checkpoint
modulus-launch-main
modulus/launch/utils/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from termcolor import colored class PythonLogger: """Simple console logger for DL training This is a WIP """ def __init__(self, name: str = "launch"): self.logger = logging.getLogger(name) self.logger.handlers.clear() formatter = logging.Formatter( "[%(asctime)s - %(name)s - %(levelname)s] %(message)s", datefmt="%H:%M:%S" ) streamhandler = logging.StreamHandler() streamhandler.setFormatter(formatter) streamhandler.setLevel(logging.INFO) self.logger.addHandler(streamhandler) # Not sure if this works self.logger.setLevel(logging.DEBUG) self.logger.propagate = False # Prevent parent logging def file_logging(self, file_name: str = "launch.log"): """Log to file""" if os.path.exists(file_name): os.remove(file_name) filehandler = logging.FileHandler(file_name) filehandler.setFormatter(formatter) filehandler.setLevel(logging.DEBUG) self.logger.addHandler(filehandler) def log(self, message: str): """Log message""" self.logger.info(message) def info(self, message: str): """Log info""" self.logger.info(colored(message, "light_blue")) def success(self, message: str): """Log success""" self.logger.info(colored(message, "light_green")) def warning(self, message: str): """Log warning""" self.logger.warning(colored(message, "light_yellow")) def error(self, message: str): """Log error""" self.logger.error(colored(message, "light_red")) class RankZeroLoggingWrapper: """Wrapper class to only log from rank 0 process in distributed training.""" def __init__(self, obj, dist): self.obj = obj self.dist = dist def __getattr__(self, name): attr = getattr(self.obj, name) if callable(attr): def wrapper(*args, **kwargs): if self.dist.rank == 0: return attr(*args, **kwargs) else: return None return wrapper else: return attr
modulus-launch-main
modulus/launch/logging/console.py
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import torch import mlflow from typing import Literal, Tuple from pathlib import Path from datetime import datetime from mlflow.tracking import MlflowClient from mlflow.entities.run import Run from modulus.distributed import DistributedManager from .utils import create_ddp_group_tag from .launch import LaunchLogger from .console import PythonLogger logger = PythonLogger("mlflow") def initialize_mlflow( experiment_name: str, experiment_desc: str = None, run_name: str = None, run_desc: str = None, user_name: str = None, mode: Literal["offline", "online", "ngc"] = "offline", tracking_location: str = None, artifact_location: str = None, ) -> Tuple[MlflowClient, Run]: """Initializes MLFlow logging client and run. Parameters ---------- experiment_name : str Experiment name experiment_desc : str, optional Experiment description, by default None run_name : str, optional Run name, by default None run_desc : str, optional Run description, by default None user_name : str, optional User name, by default None mode : str, optional MLFlow mode. Supports "offline", "online" and "ngc". Offline mode records logs to local file system. Online mode is for remote tracking servers. NGC is specific standardized setup for NGC runs, default "offline" tracking_location : str, optional Tracking location for MLFlow. For offline this would be an absolute folder directory. For online mode this would be a http URI or databricks. For NGC, this option is ignored, by default "/<run directory>/mlruns" artifact_location : str, optional Optional separate artifact location, by default None Note ---- For NGC mode, one needs to mount a NGC workspace / folder system with a metric folder at `/mlflow/mlflow_metrics/` and a artifact folder at `/mlflow/mlflow_artifacts/`. Note ---- This will set up Modulus Launch logger for MLFlow logging. Only one MLFlow logging client is supported with the Modulus Launch logger. Returns ------- Tuple[MlflowClient, Run] Returns MLFlow logging client and active run object """ dist = DistributedManager() if DistributedManager.is_initialized() and dist.distributed: group_name = create_ddp_group_tag(run_name) run_name = f"{run_name}-Process_{dist.rank}" else: start_time = datetime.now().astimezone() time_string = start_time.strftime("%m/%d/%y_%H-%M-%S") group_name = f"{run_name}_{time_string}" # Set default value here for Hydra if tracking_location is None: tracking_location = str(Path("./mlruns").absolute()) # Set up URI (remote or local) if mode == "online": tracking_uri = tracking_location elif mode == "offline": if not tracking_location.startswith("file://"): tracking_location = "file://" + tracking_location tracking_uri = tracking_location elif mode == "ngc": if not Path("/mlflow/mlflow_metrics").is_dir(): raise IOError( "NGC MLFlow config select but metrics folder '/mlflow/mlflow_metrics'" + " not found. Aborting MLFlow setup." ) return if not Path("/mlflow/mlflow_artifacts").is_dir(): raise IOError( "NGC MLFlow config select but artifact folder '/mlflow/mlflow_artifacts'" + " not found. Aborting MLFlow setup." ) return tracking_uri = "file:///mlflow/mlflow_metrics" artifact_location = "file:///mlflow/mlflow_artifacts" else: logger.warning(f"Unsupported MLFlow mode '{mode}' provided") tracking_uri = "file://" + str(Path("./mlruns").absolute()) mlflow.set_tracking_uri(tracking_uri) client = MlflowClient() check_mlflow_logged_in(client) experiment = client.get_experiment_by_name(experiment_name) # If experiment does not exist create one if experiment is None: logger.info(f"No {experiment_name} experiment found, creating...") experiment_id = client.create_experiment( experiment_name, artifact_location=artifact_location ) client.set_experiment_tag(experiment_id, "mlflow.note.content", experiment_desc) else: logger.success(f"Existing {experiment_name} experiment found") experiment_id = experiment.experiment_id # Create an run and set its tags run = client.create_run( experiment_id, tags={"mlflow.user": user_name}, run_name=run_name ) client.set_tag(run.info.run_id, "mlflow.note.content", run_desc) start_time = datetime.now().astimezone() time_string = start_time.strftime("%m/%d/%y %H:%M:%S") client.set_tag(run.info.run_id, "date", time_string) client.set_tag(run.info.run_id, "host", os.uname()[1]) if torch.cuda.is_available(): client.set_tag(run.info.run_id, "gpu", torch.cuda.get_device_name(dist.device)) client.set_tag(run.info.run_id, "group", group_name) run = client.get_run(run.info.run_id) # Set run instance in Modulus logger LaunchLogger.mlflow_run = run LaunchLogger.mlflow_client = client return client, run def check_mlflow_logged_in(client: MlflowClient): """Checks to see if MLFlow URI is functioning This isn't the best solution right now and overrides http timeout. Can update if MLFlow use is increased. """ logger.warning( "Checking MLFlow logging location is working (if this hangs its not)" ) t0 = os.environ.get("MLFLOW_HTTP_REQUEST_TIMEOUT", None) try: # Adjust http timeout to 5 seconds os.environ["MLFLOW_HTTP_REQUEST_TIMEOUT"] = str(max(int(t0), 5)) if t0 else "5" experiment = client.create_experiment("test") client.delete_experiment(experiment) except Exception as e: logger.error("Failed to validate MLFlow logging location works") raise e finally: # Restore http request if t0: os.environ["MLFLOW_HTTP_REQUEST_TIMEOUT"] = t0 else: del os.environ["MLFLOW_HTTP_REQUEST_TIMEOUT"] logger.success("MLFlow logging location is working")
modulus-launch-main
modulus/launch/logging/mlflow.py
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Weights and Biases Routines and Utilities""" import logging import os import wandb from typing import Literal from pathlib import Path from datetime import datetime from wandb import AlertLevel from modulus.distributed import DistributedManager from .utils import create_ddp_group_tag DEFAULT_WANDB_CONFIG = "~/.netrc" logger = logging.getLogger(__name__) _WANDB_INITIALIZED = False def initialize_wandb( project: str, entity: str, name: str = "train", group: str = None, sync_tensorboard: bool = False, save_code: bool = False, resume: str = None, config=None, mode: Literal["offline", "online", "disabled"] = "offline", results_dir: str = None, ): """Function to initialize wandb client with the weights and biases server. Parameters ---------- project : str Name of the project to sync data with entity : str, Name of the wanbd entity sync_tensorboard : bool, optional sync tensorboard summary writer with wandb, by default False save_code : bool, optional Whether to push a copy of the code to wandb dashboard, by default False name : str, optional Name of the task running, by default "train" group : str, optional Group name of the task running. Good to set for ddp runs, by default None resume: str, optional Sets the resuming behavior. Options: "allow", "must", "never", "auto" or None, by default None. config : optional a dictionary-like object for saving inputs , like hyperparameters. If dict, argparse or absl.flags, it will load the key value pairs into the wandb.config object. If str, it will look for a yaml file by that name, by default None. mode: str, optional Can be "offline", "online" or "disabled", by default "offline" results_dir : str, optional Output directory of the experiment, by default "/<run directory>/wandb" """ # Set default value here for Hydra if results_dir is None: results_dir = str(Path("./wandb").absolute()) wandb_dir = results_dir if DistributedManager.is_initialized() and DistributedManager().distributed: if group is None: group = create_ddp_group_tag() start_time = datetime.now().astimezone() time_string = start_time.strftime("%m/%d/%y_%H:%M:%S") wandb_name = f"{name}_Process_{DistributedManager().rank}_{time_string}" else: start_time = datetime.now().astimezone() time_string = start_time.strftime("%m/%d/%y_%H:%M:%S") wandb_name = f"{name}_{time_string}" if not os.path.exists(wandb_dir): os.makedirs(wandb_dir) wandb.init( project=project, entity=entity, sync_tensorboard=sync_tensorboard, name=wandb_name, resume=resume, config=config, mode=mode, dir=wandb_dir, group=group, save_code=save_code, ) def alert(title, text, duration=300, level=0, is_master=True): """Send alert.""" alert_levels = {0: AlertLevel.INFO, 1: AlertLevel.WARN, 2: AlertLevel.ERROR} if is_wandb_initialized() and is_master: wandb.alert( title=title, text=text, level=alert_levels[level], wait_duration=duration ) def is_wandb_initialized(): """Check if wandb has been initialized.""" global _WANDB_INITIALIZED return _WANDB_INITIALIZED
modulus-launch-main
modulus/launch/logging/wandb.py
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .launch import LaunchLogger from .console import PythonLogger, RankZeroLoggingWrapper from .wandb import initialize_wandb from .mlflow import initialize_mlflow
modulus-launch-main
modulus/launch/logging/__init__.py