repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/libscanbuild/runner.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
""" This module is responsible to run the analyzer commands. """
import re
import os
import os.path
import tempfile
import functools
import subprocess
import logging
from libscanbuild.compilation import classify_source, compiler_language
from libscanbuild.clang import get_version, get_arguments
from libscanbuild.shell import decode
__all__ = ['run']
# To have good results from static analyzer certain compiler options shall be
# omitted. The compiler flag filtering only affects the static analyzer run.
#
# Keys are the option name, value number of options to skip
IGNORED_FLAGS = {
'-c': 0, # compile option will be overwritten
'-fsyntax-only': 0, # static analyzer option will be overwritten
'-o': 1, # will set up own output file
# flags below are inherited from the perl implementation.
'-g': 0,
'-save-temps': 0,
'-install_name': 1,
'-exported_symbols_list': 1,
'-current_version': 1,
'-compatibility_version': 1,
'-init': 1,
'-e': 1,
'-seg1addr': 1,
'-bundle_loader': 1,
'-multiply_defined': 1,
'-sectorder': 3,
'--param': 1,
'--serialize-diagnostics': 1
}
def require(required):
""" Decorator for checking the required values in state.
It checks the required attributes in the passed state and stop when
any of those is missing. """
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
for key in required:
if key not in args[0]:
raise KeyError('{0} not passed to {1}'.format(
key, function.__name__))
return function(*args, **kwargs)
return wrapper
return decorator
@require(['command', # entry from compilation database
'directory', # entry from compilation database
'file', # entry from compilation database
'clang', # clang executable name (and path)
'direct_args', # arguments from command line
'force_debug', # kill non debug macros
'output_dir', # where generated report files shall go
'output_format', # it's 'plist' or 'html' or both
'output_failures']) # generate crash reports or not
def run(opts):
""" Entry point to run (or not) static analyzer against a single entry
of the compilation database.
This complex task is decomposed into smaller methods which are calling
each other in chain. If the analyzis is not possibe the given method
just return and break the chain.
The passed parameter is a python dictionary. Each method first check
that the needed parameters received. (This is done by the 'require'
decorator. It's like an 'assert' to check the contract between the
caller and the called method.) """
try:
command = opts.pop('command')
command = command if isinstance(command, list) else decode(command)
logging.debug("Run analyzer against '%s'", command)
opts.update(classify_parameters(command))
return arch_check(opts)
except Exception:
logging.error("Problem occured during analyzis.", exc_info=1)
return None
@require(['clang', 'directory', 'flags', 'file', 'output_dir', 'language',
'error_type', 'error_output', 'exit_code'])
def report_failure(opts):
""" Create report when analyzer failed.
The major report is the preprocessor output. The output filename generated
randomly. The compiler output also captured into '.stderr.txt' file.
And some more execution context also saved into '.info.txt' file. """
def extension(opts):
""" Generate preprocessor file extension. """
mapping = {'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'}
return mapping.get(opts['language'], '.i')
def destination(opts):
""" Creates failures directory if not exits yet. """
name = os.path.join(opts['output_dir'], 'failures')
if not os.path.isdir(name):
os.makedirs(name)
return name
error = opts['error_type']
(handle, name) = tempfile.mkstemp(suffix=extension(opts),
prefix='clang_' + error + '_',
dir=destination(opts))
os.close(handle)
cwd = opts['directory']
cmd = get_arguments([opts['clang'], '-fsyntax-only', '-E'] +
opts['flags'] + [opts['file'], '-o', name], cwd)
logging.debug('exec command in %s: %s', cwd, ' '.join(cmd))
subprocess.call(cmd, cwd=cwd)
# write general information about the crash
with open(name + '.info.txt', 'w') as handle:
handle.write(opts['file'] + os.linesep)
handle.write(error.title().replace('_', ' ') + os.linesep)
handle.write(' '.join(cmd) + os.linesep)
handle.write(' '.join(os.uname()) + os.linesep)
handle.write(get_version(opts['clang']))
handle.close()
# write the captured output too
with open(name + '.stderr.txt', 'w') as handle:
handle.writelines(opts['error_output'])
handle.close()
# return with the previous step exit code and output
return {
'error_output': opts['error_output'],
'exit_code': opts['exit_code']
}
@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'output_dir',
'output_format'])
def run_analyzer(opts, continuation=report_failure):
""" It assembles the analysis command line and executes it. Capture the
output of the analysis and returns with it. If failure reports are
requested, it calls the continuation to generate it. """
def output():
""" Creates output file name for reports. """
if opts['output_format'] in {'plist', 'plist-html'}:
(handle, name) = tempfile.mkstemp(prefix='report-',
suffix='.plist',
dir=opts['output_dir'])
os.close(handle)
return name
return opts['output_dir']
cwd = opts['directory']
cmd = get_arguments([opts['clang'], '--analyze'] + opts['direct_args'] +
opts['flags'] + [opts['file'], '-o', output()],
cwd)
logging.debug('exec command in %s: %s', cwd, ' '.join(cmd))
child = subprocess.Popen(cmd,
cwd=cwd,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = child.stdout.readlines()
child.stdout.close()
# do report details if it were asked
child.wait()
if opts.get('output_failures', False) and child.returncode:
error_type = 'crash' if child.returncode & 127 else 'other_error'
opts.update({
'error_type': error_type,
'error_output': output,
'exit_code': child.returncode
})
return continuation(opts)
# return the output for logging and exit code for testing
return {'error_output': output, 'exit_code': child.returncode}
@require(['flags', 'force_debug'])
def filter_debug_flags(opts, continuation=run_analyzer):
""" Filter out nondebug macros when requested. """
if opts.pop('force_debug'):
# lazy implementation just append an undefine macro at the end
opts.update({'flags': opts['flags'] + ['-UNDEBUG']})
return continuation(opts)
@require(['language', 'compiler', 'file', 'flags'])
def language_check(opts, continuation=filter_debug_flags):
""" Find out the language from command line parameters or file name
extension. The decision also influenced by the compiler invocation. """
accepted = frozenset({
'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output',
'c++-cpp-output', 'objective-c-cpp-output'
})
# language can be given as a parameter...
language = opts.pop('language')
compiler = opts.pop('compiler')
# ... or find out from source file extension
if language is None and compiler is not None:
language = classify_source(opts['file'], compiler == 'c')
if language is None:
logging.debug('skip analysis, language not known')
return None
elif language not in accepted:
logging.debug('skip analysis, language not supported')
return None
else:
logging.debug('analysis, language: %s', language)
opts.update({'language': language,
'flags': ['-x', language] + opts['flags']})
return continuation(opts)
@require(['arch_list', 'flags'])
def arch_check(opts, continuation=language_check):
""" Do run analyzer through one of the given architectures. """
disabled = frozenset({'ppc', 'ppc64'})
received_list = opts.pop('arch_list')
if received_list:
# filter out disabled architectures and -arch switches
filtered_list = [a for a in received_list if a not in disabled]
if filtered_list:
# There should be only one arch given (or the same multiple
# times). If there are multiple arch are given and are not
# the same, those should not change the pre-processing step.
# But that's the only pass we have before run the analyzer.
current = filtered_list.pop()
logging.debug('analysis, on arch: %s', current)
opts.update({'flags': ['-arch', current] + opts['flags']})
return continuation(opts)
else:
logging.debug('skip analysis, found not supported arch')
return None
else:
logging.debug('analysis, on default arch')
return continuation(opts)
def classify_parameters(command):
""" Prepare compiler flags (filters some and add others) and take out
language (-x) and architecture (-arch) flags for future processing. """
result = {
'flags': [], # the filtered compiler flags
'arch_list': [], # list of architecture flags
'language': None, # compilation language, None, if not specified
'compiler': compiler_language(command) # 'c' or 'c++'
}
# iterate on the compile options
args = iter(command[1:])
for arg in args:
# take arch flags into a separate basket
if arg == '-arch':
result['arch_list'].append(next(args))
# take language
elif arg == '-x':
result['language'] = next(args)
# parameters which looks source file are not flags
elif re.match(r'^[^-].+', arg) and classify_source(arg):
pass
# ignore some flags
elif arg in IGNORED_FLAGS:
count = IGNORED_FLAGS[arg]
for _ in range(count):
next(args)
# we don't care about extra warnings, but we should suppress ones
# that we don't want to see.
elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg):
pass
# and consider everything else as compilation flag.
else:
result['flags'].append(arg)
return result
| 11,337 | 36.419142 | 79 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/libscanbuild/report.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
""" This module is responsible to generate 'index.html' for the report.
The input for this step is the output directory, where individual reports
could be found. It parses those reports and generates 'index.html'. """
import re
import os
import os.path
import sys
import shutil
import time
import tempfile
import itertools
import plistlib
import glob
import json
import logging
import contextlib
import datetime
from libscanbuild import duplicate_check
from libscanbuild.clang import get_version
__all__ = ['report_directory', 'document']
@contextlib.contextmanager
def report_directory(hint, keep):
""" Responsible for the report directory.
hint -- could specify the parent directory of the output directory.
keep -- a boolean value to keep or delete the empty report directory. """
stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-'
stamp = datetime.datetime.now().strftime(stamp_format)
parentdir = os.path.abspath(hint)
if not os.path.exists(parentdir):
os.makedirs(parentdir)
name = tempfile.mkdtemp(prefix=stamp, dir=parentdir)
logging.info('Report directory created: %s', name)
try:
yield name
finally:
if os.listdir(name):
msg = "Run 'scan-view %s' to examine bug reports."
keep = True
else:
if keep:
msg = "Report directory '%s' contans no report, but kept."
else:
msg = "Removing directory '%s' because it contains no report."
logging.warning(msg, name)
if not keep:
os.rmdir(name)
def document(args, output_dir, use_cdb):
""" Generates cover report and returns the number of bugs/crashes. """
html_reports_available = args.output_format in {'html', 'plist-html'}
logging.debug('count crashes and bugs')
crash_count = sum(1 for _ in read_crashes(output_dir))
bug_counter = create_counters()
for bug in read_bugs(output_dir, html_reports_available):
bug_counter(bug)
result = crash_count + bug_counter.total
if html_reports_available and result:
logging.debug('generate index.html file')
# common prefix for source files to have sort filenames
prefix = commonprefix_from(args.cdb) if use_cdb else os.getcwd()
# assemble the cover from multiple fragments
try:
fragments = []
if bug_counter.total:
fragments.append(bug_summary(output_dir, bug_counter))
fragments.append(bug_report(output_dir, prefix))
if crash_count:
fragments.append(crash_report(output_dir, prefix))
assemble_cover(output_dir, prefix, args, fragments)
# copy additinal files to the report
copy_resource_files(output_dir)
if use_cdb:
shutil.copy(args.cdb, output_dir)
finally:
for fragment in fragments:
os.remove(fragment)
return result
def assemble_cover(output_dir, prefix, args, fragments):
""" Put together the fragments into a final report. """
import getpass
import socket
import datetime
if args.html_title is None:
args.html_title = os.path.basename(prefix) + ' - analyzer results'
with open(os.path.join(output_dir, 'index.html'), 'w') as handle:
indent = 0
handle.write(reindent("""
|<!DOCTYPE html>
|<html>
| <head>
| <title>{html_title}</title>
| <link type="text/css" rel="stylesheet" href="scanview.css"/>
| <script type='text/javascript' src="sorttable.js"></script>
| <script type='text/javascript' src='selectable.js'></script>
| </head>""", indent).format(html_title=args.html_title))
handle.write(comment('SUMMARYENDHEAD'))
handle.write(reindent("""
| <body>
| <h1>{html_title}</h1>
| <table>
| <tr><th>User:</th><td>{user_name}@{host_name}</td></tr>
| <tr><th>Working Directory:</th><td>{current_dir}</td></tr>
| <tr><th>Command Line:</th><td>{cmd_args}</td></tr>
| <tr><th>Clang Version:</th><td>{clang_version}</td></tr>
| <tr><th>Date:</th><td>{date}</td></tr>
| </table>""", indent).format(html_title=args.html_title,
user_name=getpass.getuser(),
host_name=socket.gethostname(),
current_dir=prefix,
cmd_args=' '.join(sys.argv),
clang_version=get_version(args.clang),
date=datetime.datetime.today(
).strftime('%c')))
for fragment in fragments:
# copy the content of fragments
with open(fragment, 'r') as input_handle:
shutil.copyfileobj(input_handle, handle)
handle.write(reindent("""
| </body>
|</html>""", indent))
def bug_summary(output_dir, bug_counter):
""" Bug summary is a HTML table to give a better overview of the bugs. """
name = os.path.join(output_dir, 'summary.html.fragment')
with open(name, 'w') as handle:
indent = 4
handle.write(reindent("""
|<h2>Bug Summary</h2>
|<table>
| <thead>
| <tr>
| <td>Bug Type</td>
| <td>Quantity</td>
| <td class="sorttable_nosort">Display?</td>
| </tr>
| </thead>
| <tbody>""", indent))
handle.write(reindent("""
| <tr style="font-weight:bold">
| <td class="SUMM_DESC">All Bugs</td>
| <td class="Q">{0}</td>
| <td>
| <center>
| <input checked type="checkbox" id="AllBugsCheck"
| onClick="CopyCheckedStateToCheckButtons(this);"/>
| </center>
| </td>
| </tr>""", indent).format(bug_counter.total))
for category, types in bug_counter.categories.items():
handle.write(reindent("""
| <tr>
| <th>{0}</th><th colspan=2></th>
| </tr>""", indent).format(category))
for bug_type in types.values():
handle.write(reindent("""
| <tr>
| <td class="SUMM_DESC">{bug_type}</td>
| <td class="Q">{bug_count}</td>
| <td>
| <center>
| <input checked type="checkbox"
| onClick="ToggleDisplay(this,'{bug_type_class}');"/>
| </center>
| </td>
| </tr>""", indent).format(**bug_type))
handle.write(reindent("""
| </tbody>
|</table>""", indent))
handle.write(comment('SUMMARYBUGEND'))
return name
def bug_report(output_dir, prefix):
""" Creates a fragment from the analyzer reports. """
pretty = prettify_bug(prefix, output_dir)
bugs = (pretty(bug) for bug in read_bugs(output_dir, True))
name = os.path.join(output_dir, 'bugs.html.fragment')
with open(name, 'w') as handle:
indent = 4
handle.write(reindent("""
|<h2>Reports</h2>
|<table class="sortable" style="table-layout:automatic">
| <thead>
| <tr>
| <td>Bug Group</td>
| <td class="sorttable_sorted">
| Bug Type
| <span id="sorttable_sortfwdind"> ▾</span>
| </td>
| <td>File</td>
| <td>Function/Method</td>
| <td class="Q">Line</td>
| <td class="Q">Path Length</td>
| <td class="sorttable_nosort"></td>
| </tr>
| </thead>
| <tbody>""", indent))
handle.write(comment('REPORTBUGCOL'))
for current in bugs:
handle.write(reindent("""
| <tr class="{bug_type_class}">
| <td class="DESC">{bug_category}</td>
| <td class="DESC">{bug_type}</td>
| <td>{bug_file}</td>
| <td class="DESC">{bug_function}</td>
| <td class="Q">{bug_line}</td>
| <td class="Q">{bug_path_length}</td>
| <td><a href="{report_file}#EndPath">View Report</a></td>
| </tr>""", indent).format(**current))
handle.write(comment('REPORTBUG', {'id': current['report_file']}))
handle.write(reindent("""
| </tbody>
|</table>""", indent))
handle.write(comment('REPORTBUGEND'))
return name
def crash_report(output_dir, prefix):
""" Creates a fragment from the compiler crashes. """
pretty = prettify_crash(prefix, output_dir)
crashes = (pretty(crash) for crash in read_crashes(output_dir))
name = os.path.join(output_dir, 'crashes.html.fragment')
with open(name, 'w') as handle:
indent = 4
handle.write(reindent("""
|<h2>Analyzer Failures</h2>
|<p>The analyzer had problems processing the following files:</p>
|<table>
| <thead>
| <tr>
| <td>Problem</td>
| <td>Source File</td>
| <td>Preprocessed File</td>
| <td>STDERR Output</td>
| </tr>
| </thead>
| <tbody>""", indent))
for current in crashes:
handle.write(reindent("""
| <tr>
| <td>{problem}</td>
| <td>{source}</td>
| <td><a href="{file}">preprocessor output</a></td>
| <td><a href="{stderr}">analyzer std err</a></td>
| </tr>""", indent).format(**current))
handle.write(comment('REPORTPROBLEM', current))
handle.write(reindent("""
| </tbody>
|</table>""", indent))
handle.write(comment('REPORTCRASHES'))
return name
def read_crashes(output_dir):
""" Generate a unique sequence of crashes from given output directory. """
return (parse_crash(filename)
for filename in glob.iglob(os.path.join(output_dir, 'failures',
'*.info.txt')))
def read_bugs(output_dir, html):
""" Generate a unique sequence of bugs from given output directory.
Duplicates can be in a project if the same module was compiled multiple
times with different compiler options. These would be better to show in
the final report (cover) only once. """
parser = parse_bug_html if html else parse_bug_plist
pattern = '*.html' if html else '*.plist'
duplicate = duplicate_check(
lambda bug: '{bug_line}.{bug_path_length}:{bug_file}'.format(**bug))
bugs = itertools.chain.from_iterable(
# parser creates a bug generator not the bug itself
parser(filename)
for filename in glob.iglob(os.path.join(output_dir, pattern)))
return (bug for bug in bugs if not duplicate(bug))
def parse_bug_plist(filename):
""" Returns the generator of bugs from a single .plist file. """
content = plistlib.readPlist(filename)
files = content.get('files')
for bug in content.get('diagnostics', []):
if len(files) <= int(bug['location']['file']):
logging.warning('Parsing bug from "%s" failed', filename)
continue
yield {
'result': filename,
'bug_type': bug['type'],
'bug_category': bug['category'],
'bug_line': int(bug['location']['line']),
'bug_path_length': int(bug['location']['col']),
'bug_file': files[int(bug['location']['file'])]
}
def parse_bug_html(filename):
""" Parse out the bug information from HTML output. """
patterns = [re.compile(r'<!-- BUGTYPE (?P<bug_type>.*) -->$'),
re.compile(r'<!-- BUGFILE (?P<bug_file>.*) -->$'),
re.compile(r'<!-- BUGPATHLENGTH (?P<bug_path_length>.*) -->$'),
re.compile(r'<!-- BUGLINE (?P<bug_line>.*) -->$'),
re.compile(r'<!-- BUGCATEGORY (?P<bug_category>.*) -->$'),
re.compile(r'<!-- BUGDESC (?P<bug_description>.*) -->$'),
re.compile(r'<!-- FUNCTIONNAME (?P<bug_function>.*) -->$')]
endsign = re.compile(r'<!-- BUGMETAEND -->')
bug = {
'report_file': filename,
'bug_function': 'n/a', # compatibility with < clang-3.5
'bug_category': 'Other',
'bug_line': 0,
'bug_path_length': 1
}
with open(filename) as handler:
for line in handler.readlines():
# do not read the file further
if endsign.match(line):
break
# search for the right lines
for regex in patterns:
match = regex.match(line.strip())
if match:
bug.update(match.groupdict())
break
encode_value(bug, 'bug_line', int)
encode_value(bug, 'bug_path_length', int)
yield bug
def parse_crash(filename):
""" Parse out the crash information from the report file. """
match = re.match(r'(.*)\.info\.txt', filename)
name = match.group(1) if match else None
with open(filename) as handler:
lines = handler.readlines()
return {
'source': lines[0].rstrip(),
'problem': lines[1].rstrip(),
'file': name,
'info': name + '.info.txt',
'stderr': name + '.stderr.txt'
}
def category_type_name(bug):
""" Create a new bug attribute from bug by category and type.
The result will be used as CSS class selector in the final report. """
def smash(key):
""" Make value ready to be HTML attribute value. """
return bug.get(key, '').lower().replace(' ', '_').replace("'", '')
return escape('bt_' + smash('bug_category') + '_' + smash('bug_type'))
def create_counters():
""" Create counters for bug statistics.
Two entries are maintained: 'total' is an integer, represents the
number of bugs. The 'categories' is a two level categorisation of bug
counters. The first level is 'bug category' the second is 'bug type'.
Each entry in this classification is a dictionary of 'count', 'type'
and 'label'. """
def predicate(bug):
bug_category = bug['bug_category']
bug_type = bug['bug_type']
current_category = predicate.categories.get(bug_category, dict())
current_type = current_category.get(bug_type, {
'bug_type': bug_type,
'bug_type_class': category_type_name(bug),
'bug_count': 0
})
current_type.update({'bug_count': current_type['bug_count'] + 1})
current_category.update({bug_type: current_type})
predicate.categories.update({bug_category: current_category})
predicate.total += 1
predicate.total = 0
predicate.categories = dict()
return predicate
def prettify_bug(prefix, output_dir):
def predicate(bug):
""" Make safe this values to embed into HTML. """
bug['bug_type_class'] = category_type_name(bug)
encode_value(bug, 'bug_file', lambda x: escape(chop(prefix, x)))
encode_value(bug, 'bug_category', escape)
encode_value(bug, 'bug_type', escape)
encode_value(bug, 'report_file', lambda x: escape(chop(output_dir, x)))
return bug
return predicate
def prettify_crash(prefix, output_dir):
def predicate(crash):
""" Make safe this values to embed into HTML. """
encode_value(crash, 'source', lambda x: escape(chop(prefix, x)))
encode_value(crash, 'problem', escape)
encode_value(crash, 'file', lambda x: escape(chop(output_dir, x)))
encode_value(crash, 'info', lambda x: escape(chop(output_dir, x)))
encode_value(crash, 'stderr', lambda x: escape(chop(output_dir, x)))
return crash
return predicate
def copy_resource_files(output_dir):
""" Copy the javascript and css files to the report directory. """
this_dir = os.path.dirname(os.path.realpath(__file__))
for resource in os.listdir(os.path.join(this_dir, 'resources')):
shutil.copy(os.path.join(this_dir, 'resources', resource), output_dir)
def encode_value(container, key, encode):
""" Run 'encode' on 'container[key]' value and update it. """
if key in container:
value = encode(container[key])
container.update({key: value})
def chop(prefix, filename):
""" Create 'filename' from '/prefix/filename' """
return filename if not len(prefix) else os.path.relpath(filename, prefix)
def escape(text):
""" Paranoid HTML escape method. (Python version independent) """
escape_table = {
'&': '&',
'"': '"',
"'": ''',
'>': '>',
'<': '<'
}
return ''.join(escape_table.get(c, c) for c in text)
def reindent(text, indent):
""" Utility function to format html output and keep indentation. """
result = ''
for line in text.splitlines():
if len(line.strip()):
result += ' ' * indent + line.split('|')[1] + os.linesep
return result
def comment(name, opts=dict()):
""" Utility function to format meta information as comment. """
attributes = ''
for key, value in opts.items():
attributes += ' {0}="{1}"'.format(key, value)
return '<!-- {0}{1} -->{2}'.format(name, attributes, os.linesep)
def commonprefix_from(filename):
""" Create file prefix from a compilation database entries. """
with open(filename, 'r') as handle:
return commonprefix(item['file'] for item in json.load(handle))
def commonprefix(files):
""" Fixed version of os.path.commonprefix. Return the longest path prefix
that is a prefix of all paths in filenames. """
result = None
for current in files:
if result is not None:
result = os.path.commonprefix([result, current])
else:
result = current
if result is None:
return ''
elif not os.path.isdir(result):
return os.path.dirname(result)
else:
return os.path.abspath(result)
| 18,471 | 33.334572 | 79 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/libscanbuild/compilation.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
""" This module is responsible for to parse a compiler invocation. """
import re
import os
import collections
__all__ = ['split_command', 'classify_source', 'compiler_language']
# Ignored compiler options map for compilation database creation.
# The map is used in `split_command` method. (Which does ignore and classify
# parameters.) Please note, that these are not the only parameters which
# might be ignored.
#
# Keys are the option name, value number of options to skip
IGNORED_FLAGS = {
# compiling only flag, ignored because the creator of compilation
# database will explicitly set it.
'-c': 0,
# preprocessor macros, ignored because would cause duplicate entries in
# the output (the only difference would be these flags). this is actual
# finding from users, who suffered longer execution time caused by the
# duplicates.
'-MD': 0,
'-MMD': 0,
'-MG': 0,
'-MP': 0,
'-MF': 1,
'-MT': 1,
'-MQ': 1,
# linker options, ignored because for compilation database will contain
# compilation commands only. so, the compiler would ignore these flags
# anyway. the benefit to get rid of them is to make the output more
# readable.
'-static': 0,
'-shared': 0,
'-s': 0,
'-rdynamic': 0,
'-l': 1,
'-L': 1,
'-u': 1,
'-z': 1,
'-T': 1,
'-Xlinker': 1
}
# Known C/C++ compiler executable name patterns
COMPILER_PATTERNS = frozenset([
re.compile(r'^(intercept-|analyze-|)c(c|\+\+)$'),
re.compile(r'^([^-]*-)*[mg](cc|\+\+)(-\d+(\.\d+){0,2})?$'),
re.compile(r'^([^-]*-)*clang(\+\+)?(-\d+(\.\d+){0,2})?$'),
re.compile(r'^llvm-g(cc|\+\+)$'),
])
def split_command(command):
""" Returns a value when the command is a compilation, None otherwise.
The value on success is a named tuple with the following attributes:
files: list of source files
flags: list of compile options
compiler: string value of 'c' or 'c++' """
# the result of this method
result = collections.namedtuple('Compilation',
['compiler', 'flags', 'files'])
result.compiler = compiler_language(command)
result.flags = []
result.files = []
# quit right now, if the program was not a C/C++ compiler
if not result.compiler:
return None
# iterate on the compile options
args = iter(command[1:])
for arg in args:
# quit when compilation pass is not involved
if arg in {'-E', '-S', '-cc1', '-M', '-MM', '-###'}:
return None
# ignore some flags
elif arg in IGNORED_FLAGS:
count = IGNORED_FLAGS[arg]
for _ in range(count):
next(args)
elif re.match(r'^-(l|L|Wl,).+', arg):
pass
# some parameters could look like filename, take as compile option
elif arg in {'-D', '-I'}:
result.flags.extend([arg, next(args)])
# parameter which looks source file is taken...
elif re.match(r'^[^-].+', arg) and classify_source(arg):
result.files.append(arg)
# and consider everything else as compile option.
else:
result.flags.append(arg)
# do extra check on number of source files
return result if result.files else None
def classify_source(filename, c_compiler=True):
""" Return the language from file name extension. """
mapping = {
'.c': 'c' if c_compiler else 'c++',
'.i': 'c-cpp-output' if c_compiler else 'c++-cpp-output',
'.ii': 'c++-cpp-output',
'.m': 'objective-c',
'.mi': 'objective-c-cpp-output',
'.mm': 'objective-c++',
'.mii': 'objective-c++-cpp-output',
'.C': 'c++',
'.cc': 'c++',
'.CC': 'c++',
'.cp': 'c++',
'.cpp': 'c++',
'.cxx': 'c++',
'.c++': 'c++',
'.C++': 'c++',
'.txx': 'c++'
}
__, extension = os.path.splitext(os.path.basename(filename))
return mapping.get(extension)
def compiler_language(command):
""" A predicate to decide the command is a compiler call or not.
Returns 'c' or 'c++' when it match. None otherwise. """
cplusplus = re.compile(r'^(.+)(\+\+)(-.+|)$')
if command:
executable = os.path.basename(command[0])
if any(pattern.match(executable) for pattern in COMPILER_PATTERNS):
return 'c++' if cplusplus.match(executable) else 'c'
return None
| 4,649 | 31.746479 | 76 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/libscanbuild/__init__.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
"""
This module responsible to run the Clang static analyzer against any build
and generate reports.
"""
def duplicate_check(method):
""" Predicate to detect duplicated entries.
Unique hash method can be use to detect duplicates. Entries are
represented as dictionaries, which has no default hash method.
This implementation uses a set datatype to store the unique hash values.
This method returns a method which can detect the duplicate values. """
def predicate(entry):
entry_hash = predicate.unique(entry)
if entry_hash not in predicate.state:
predicate.state.add(entry_hash)
return False
return True
predicate.unique = method
predicate.state = set()
return predicate
def tempdir():
""" Return the default temorary directory. """
from os import getenv
return getenv('TMPDIR', getenv('TEMP', getenv('TMP', '/tmp')))
def initialize_logging(verbose_level):
""" Output content controlled by the verbosity level. """
import sys
import os.path
import logging
level = logging.WARNING - min(logging.WARNING, (10 * verbose_level))
if verbose_level <= 3:
fmt_string = '{0}: %(levelname)s: %(message)s'
else:
fmt_string = '{0}: %(levelname)s: %(funcName)s: %(message)s'
program = os.path.basename(sys.argv[0])
logging.basicConfig(format=fmt_string.format(program), level=level)
def command_entry_point(function):
""" Decorator for command entry points. """
import functools
import logging
@functools.wraps(function)
def wrapper(*args, **kwargs):
exit_code = 127
try:
exit_code = function(*args, **kwargs)
except KeyboardInterrupt:
logging.warning('Keyboard interupt')
except Exception:
logging.exception('Internal error.')
if logging.getLogger().isEnabledFor(logging.DEBUG):
logging.error("Please report this bug and attach the output "
"to the bug report")
else:
logging.error("Please run this command again and turn on "
"verbose mode (add '-vvv' as argument).")
finally:
return exit_code
return wrapper
| 2,487 | 28.975904 | 77 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/tools/scan-build-py/libscanbuild/analyze.py | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
""" This module implements the 'scan-build' command API.
To run the static analyzer against a build is done in multiple steps:
-- Intercept: capture the compilation command during the build,
-- Analyze: run the analyzer against the captured commands,
-- Report: create a cover report from the analyzer outputs. """
import sys
import re
import os
import os.path
import json
import argparse
import logging
import subprocess
import multiprocessing
from libscanbuild import initialize_logging, tempdir, command_entry_point
from libscanbuild.runner import run
from libscanbuild.intercept import capture
from libscanbuild.report import report_directory, document
from libscanbuild.clang import get_checkers
from libscanbuild.compilation import split_command
__all__ = ['analyze_build_main', 'analyze_build_wrapper']
COMPILER_WRAPPER_CC = 'analyze-cc'
COMPILER_WRAPPER_CXX = 'analyze-c++'
@command_entry_point
def analyze_build_main(bin_dir, from_build_command):
""" Entry point for 'analyze-build' and 'scan-build'. """
parser = create_parser(from_build_command)
args = parser.parse_args()
validate(parser, args, from_build_command)
# setup logging
initialize_logging(args.verbose)
logging.debug('Parsed arguments: %s', args)
with report_directory(args.output, args.keep_empty) as target_dir:
if not from_build_command:
# run analyzer only and generate cover report
run_analyzer(args, target_dir)
number_of_bugs = document(args, target_dir, True)
return number_of_bugs if args.status_bugs else 0
elif args.intercept_first:
# run build command and capture compiler executions
exit_code = capture(args, bin_dir)
# next step to run the analyzer against the captured commands
if need_analyzer(args.build):
run_analyzer(args, target_dir)
# cover report generation and bug counting
number_of_bugs = document(args, target_dir, True)
# remove the compilation database when it was not requested
if os.path.exists(args.cdb):
os.unlink(args.cdb)
# set exit status as it was requested
return number_of_bugs if args.status_bugs else exit_code
else:
return exit_code
else:
# run the build command with compiler wrappers which
# execute the analyzer too. (interposition)
environment = setup_environment(args, target_dir, bin_dir)
logging.debug('run build in environment: %s', environment)
exit_code = subprocess.call(args.build, env=environment)
logging.debug('build finished with exit code: %d', exit_code)
# cover report generation and bug counting
number_of_bugs = document(args, target_dir, False)
# set exit status as it was requested
return number_of_bugs if args.status_bugs else exit_code
def need_analyzer(args):
""" Check the intent of the build command.
When static analyzer run against project configure step, it should be
silent and no need to run the analyzer or generate report.
To run `scan-build` against the configure step might be neccessary,
when compiler wrappers are used. That's the moment when build setup
check the compiler and capture the location for the build process. """
return len(args) and not re.search('configure|autogen', args[0])
def run_analyzer(args, output_dir):
""" Runs the analyzer against the given compilation database. """
def exclude(filename):
""" Return true when any excluded directory prefix the filename. """
return any(re.match(r'^' + directory, filename)
for directory in args.excludes)
consts = {
'clang': args.clang,
'output_dir': output_dir,
'output_format': args.output_format,
'output_failures': args.output_failures,
'direct_args': analyzer_params(args),
'force_debug': args.force_debug
}
logging.debug('run analyzer against compilation database')
with open(args.cdb, 'r') as handle:
generator = (dict(cmd, **consts)
for cmd in json.load(handle) if not exclude(cmd['file']))
# when verbose output requested execute sequentially
pool = multiprocessing.Pool(1 if args.verbose > 2 else None)
for current in pool.imap_unordered(run, generator):
if current is not None:
# display error message from the static analyzer
for line in current['error_output']:
logging.info(line.rstrip())
pool.close()
pool.join()
def setup_environment(args, destination, bin_dir):
""" Set up environment for build command to interpose compiler wrapper. """
environment = dict(os.environ)
environment.update({
'CC': os.path.join(bin_dir, COMPILER_WRAPPER_CC),
'CXX': os.path.join(bin_dir, COMPILER_WRAPPER_CXX),
'ANALYZE_BUILD_CC': args.cc,
'ANALYZE_BUILD_CXX': args.cxx,
'ANALYZE_BUILD_CLANG': args.clang if need_analyzer(args.build) else '',
'ANALYZE_BUILD_VERBOSE': 'DEBUG' if args.verbose > 2 else 'WARNING',
'ANALYZE_BUILD_REPORT_DIR': destination,
'ANALYZE_BUILD_REPORT_FORMAT': args.output_format,
'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures else '',
'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)),
'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if args.force_debug else ''
})
return environment
def analyze_build_wrapper(cplusplus):
""" Entry point for `analyze-cc` and `analyze-c++` compiler wrappers. """
# initialize wrapper logging
logging.basicConfig(format='analyze: %(levelname)s: %(message)s',
level=os.getenv('ANALYZE_BUILD_VERBOSE', 'INFO'))
# execute with real compiler
compiler = os.getenv('ANALYZE_BUILD_CXX', 'c++') if cplusplus \
else os.getenv('ANALYZE_BUILD_CC', 'cc')
compilation = [compiler] + sys.argv[1:]
logging.info('execute compiler: %s', compilation)
result = subprocess.call(compilation)
# exit when it fails, ...
if result or not os.getenv('ANALYZE_BUILD_CLANG'):
return result
# ... and run the analyzer if all went well.
try:
# check is it a compilation
compilation = split_command(sys.argv)
if compilation is None:
return result
# collect the needed parameters from environment, crash when missing
parameters = {
'clang': os.getenv('ANALYZE_BUILD_CLANG'),
'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'),
'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'),
'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'),
'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS',
'').split(' '),
'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'),
'directory': os.getcwd(),
'command': [sys.argv[0], '-c'] + compilation.flags
}
# call static analyzer against the compilation
for source in compilation.files:
parameters.update({'file': source})
logging.debug('analyzer parameters %s', parameters)
current = run(parameters)
# display error message from the static analyzer
if current is not None:
for line in current['error_output']:
logging.info(line.rstrip())
except Exception:
logging.exception("run analyzer inside compiler wrapper failed.")
return result
def analyzer_params(args):
""" A group of command line arguments can mapped to command
line arguments of the analyzer. This method generates those. """
def prefix_with(constant, pieces):
""" From a sequence create another sequence where every second element
is from the original sequence and the odd elements are the prefix.
eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0, 2, 0, 3] """
return [elem for piece in pieces for elem in [constant, piece]]
result = []
if args.store_model:
result.append('-analyzer-store={0}'.format(args.store_model))
if args.constraints_model:
result.append('-analyzer-constraints={0}'.format(
args.constraints_model))
if args.internal_stats:
result.append('-analyzer-stats')
if args.analyze_headers:
result.append('-analyzer-opt-analyze-headers')
if args.stats:
result.append('-analyzer-checker=debug.Stats')
if args.maxloop:
result.extend(['-analyzer-max-loop', str(args.maxloop)])
if args.output_format:
result.append('-analyzer-output={0}'.format(args.output_format))
if args.analyzer_config:
result.append(args.analyzer_config)
if args.verbose >= 4:
result.append('-analyzer-display-progress')
if args.plugins:
result.extend(prefix_with('-load', args.plugins))
if args.enable_checker:
checkers = ','.join(args.enable_checker)
result.extend(['-analyzer-checker', checkers])
if args.disable_checker:
checkers = ','.join(args.disable_checker)
result.extend(['-analyzer-disable-checker', checkers])
if os.getenv('UBIVIZ'):
result.append('-analyzer-viz-egraph-ubigraph')
return prefix_with('-Xclang', result)
def print_active_checkers(checkers):
""" Print active checkers to stdout. """
for name in sorted(name for name, (_, active) in checkers.items()
if active):
print(name)
def print_checkers(checkers):
""" Print verbose checker help to stdout. """
print('')
print('available checkers:')
print('')
for name in sorted(checkers.keys()):
description, active = checkers[name]
prefix = '+' if active else ' '
if len(name) > 30:
print(' {0} {1}'.format(prefix, name))
print(' ' * 35 + description)
else:
print(' {0} {1: <30} {2}'.format(prefix, name, description))
print('')
print('NOTE: "+" indicates that an analysis is enabled by default.')
print('')
def validate(parser, args, from_build_command):
""" Validation done by the parser itself, but semantic check still
needs to be done. This method is doing that. """
# Make plugins always a list. (It might be None when not specified.)
args.plugins = args.plugins if args.plugins else []
if args.help_checkers_verbose:
print_checkers(get_checkers(args.clang, args.plugins))
parser.exit()
elif args.help_checkers:
print_active_checkers(get_checkers(args.clang, args.plugins))
parser.exit()
if from_build_command and not args.build:
parser.error('missing build command')
def create_parser(from_build_command):
""" Command line argument parser factory method. """
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--verbose', '-v',
action='count',
default=0,
help="""Enable verbose output from '%(prog)s'. A second and third
flag increases verbosity.""")
parser.add_argument(
'--override-compiler',
action='store_true',
help="""Always resort to the compiler wrapper even when better
interposition methods are available.""")
parser.add_argument(
'--intercept-first',
action='store_true',
help="""Run the build commands only, build a compilation database,
then run the static analyzer afterwards.
Generally speaking it has better coverage on build commands.
With '--override-compiler' it use compiler wrapper, but does
not run the analyzer till the build is finished. """)
parser.add_argument(
'--cdb',
metavar='<file>',
default="compile_commands.json",
help="""The JSON compilation database.""")
parser.add_argument(
'--output', '-o',
metavar='<path>',
default=tempdir(),
help="""Specifies the output directory for analyzer reports.
Subdirectory will be created if default directory is targeted.
""")
parser.add_argument(
'--status-bugs',
action='store_true',
help="""By default, the exit status of '%(prog)s' is the same as the
executed build command. Specifying this option causes the exit
status of '%(prog)s' to be non zero if it found potential bugs
and zero otherwise.""")
parser.add_argument(
'--html-title',
metavar='<title>',
help="""Specify the title used on generated HTML pages.
If not specified, a default title will be used.""")
parser.add_argument(
'--analyze-headers',
action='store_true',
help="""Also analyze functions in #included files. By default, such
functions are skipped unless they are called by functions
within the main source file.""")
format_group = parser.add_mutually_exclusive_group()
format_group.add_argument(
'--plist', '-plist',
dest='output_format',
const='plist',
default='html',
action='store_const',
help="""This option outputs the results as a set of .plist files.""")
format_group.add_argument(
'--plist-html', '-plist-html',
dest='output_format',
const='plist-html',
default='html',
action='store_const',
help="""This option outputs the results as a set of .html and .plist
files.""")
# TODO: implement '-view '
advanced = parser.add_argument_group('advanced options')
advanced.add_argument(
'--keep-empty',
action='store_true',
help="""Don't remove the build results directory even if no issues
were reported.""")
advanced.add_argument(
'--no-failure-reports', '-no-failure-reports',
dest='output_failures',
action='store_false',
help="""Do not create a 'failures' subdirectory that includes analyzer
crash reports and preprocessed source files.""")
advanced.add_argument(
'--stats', '-stats',
action='store_true',
help="""Generates visitation statistics for the project being analyzed.
""")
advanced.add_argument(
'--internal-stats',
action='store_true',
help="""Generate internal analyzer statistics.""")
advanced.add_argument(
'--maxloop', '-maxloop',
metavar='<loop count>',
type=int,
help="""Specifiy the number of times a block can be visited before
giving up. Increase for more comprehensive coverage at a cost
of speed.""")
advanced.add_argument(
'--store', '-store',
metavar='<model>',
dest='store_model',
choices=['region', 'basic'],
help="""Specify the store model used by the analyzer.
'region' specifies a field- sensitive store model.
'basic' which is far less precise but can more quickly
analyze code. 'basic' was the default store model for
checker-0.221 and earlier.""")
advanced.add_argument(
'--constraints', '-constraints',
metavar='<model>',
dest='constraints_model',
choices=['range', 'basic'],
help="""Specify the contraint engine used by the analyzer. Specifying
'basic' uses a simpler, less powerful constraint model used by
checker-0.160 and earlier.""")
advanced.add_argument(
'--use-analyzer',
metavar='<path>',
dest='clang',
default='clang',
help="""'%(prog)s' uses the 'clang' executable relative to itself for
static analysis. One can override this behavior with this
option by using the 'clang' packaged with Xcode (on OS X) or
from the PATH.""")
advanced.add_argument(
'--use-cc',
metavar='<path>',
dest='cc',
default='cc',
help="""When '%(prog)s' analyzes a project by interposing a "fake
compiler", which executes a real compiler for compilation and
do other tasks (to run the static analyzer or just record the
compiler invocation). Because of this interposing, '%(prog)s'
does not know what compiler your project normally uses.
Instead, it simply overrides the CC environment variable, and
guesses your default compiler.
If you need '%(prog)s' to use a specific compiler for
*compilation* then you can use this option to specify a path
to that compiler.""")
advanced.add_argument(
'--use-c++',
metavar='<path>',
dest='cxx',
default='c++',
help="""This is the same as "--use-cc" but for C++ code.""")
advanced.add_argument(
'--analyzer-config', '-analyzer-config',
metavar='<options>',
help="""Provide options to pass through to the analyzer's
-analyzer-config flag. Several options are separated with
comma: 'key1=val1,key2=val2'
Available options:
stable-report-filename=true or false (default)
Switch the page naming to:
report-<filename>-<function/method name>-<id>.html
instead of report-XXXXXX.html""")
advanced.add_argument(
'--exclude',
metavar='<directory>',
dest='excludes',
action='append',
default=[],
help="""Do not run static analyzer against files found in this
directory. (You can specify this option multiple times.)
Could be usefull when project contains 3rd party libraries.
The directory path shall be absolute path as file names in
the compilation database.""")
advanced.add_argument(
'--force-analyze-debug-code',
dest='force_debug',
action='store_true',
help="""Tells analyzer to enable assertions in code even if they were
disabled during compilation, enabling more precise results.""")
plugins = parser.add_argument_group('checker options')
plugins.add_argument(
'--load-plugin', '-load-plugin',
metavar='<plugin library>',
dest='plugins',
action='append',
help="""Loading external checkers using the clang plugin interface.""")
plugins.add_argument(
'--enable-checker', '-enable-checker',
metavar='<checker name>',
action=AppendCommaSeparated,
help="""Enable specific checker.""")
plugins.add_argument(
'--disable-checker', '-disable-checker',
metavar='<checker name>',
action=AppendCommaSeparated,
help="""Disable specific checker.""")
plugins.add_argument(
'--help-checkers',
action='store_true',
help="""A default group of checkers is run unless explicitly disabled.
Exactly which checkers constitute the default group is a
function of the operating system in use. These can be printed
with this flag.""")
plugins.add_argument(
'--help-checkers-verbose',
action='store_true',
help="""Print all available checkers and mark the enabled ones.""")
if from_build_command:
parser.add_argument(
dest='build',
nargs=argparse.REMAINDER,
help="""Command to run.""")
return parser
class AppendCommaSeparated(argparse.Action):
""" argparse Action class to support multiple comma separated lists. """
def __call__(self, __parser, namespace, values, __option_string):
# getattr(obj, attr, default) does not really returns default but none
if getattr(namespace, self.dest, None) is None:
setattr(namespace, self.dest, [])
# once it's fixed we can use as expected
actual = getattr(namespace, self.dest)
actual.extend(values.split(','))
setattr(namespace, self.dest, actual)
| 20,779 | 39.193424 | 79 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/bindings/python/clang/enumerations.py | #===- enumerations.py - Python Enumerations ------------------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
"""
Clang Enumerations
==================
This module provides static definitions of enumerations that exist in libclang.
Enumerations are typically defined as a list of tuples. The exported values are
typically munged into other types or classes at module load time.
All enumerations are centrally defined in this file so they are all grouped
together and easier to audit. And, maybe even one day this file will be
automatically generated by scanning the libclang headers!
"""
# Maps to CXTokenKind. Note that libclang maintains a separate set of token
# enumerations from the C++ API.
TokenKinds = [
('PUNCTUATION', 0),
('KEYWORD', 1),
('IDENTIFIER', 2),
('LITERAL', 3),
('COMMENT', 4),
]
__all__ = ['TokenKinds']
| 1,077 | 29.8 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/bindings/python/clang/cindex.py | #===- cindex.py - Python Indexing Library Bindings -----------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
r"""
Clang Indexing Library Bindings
===============================
This module provides an interface to the Clang indexing library. It is a
low-level interface to the indexing library which attempts to match the Clang
API directly while also being "pythonic". Notable differences from the C API
are:
* string results are returned as Python strings, not CXString objects.
* null cursors are translated to None.
* access to child cursors is done via iteration, not visitation.
The major indexing objects are:
Index
The top-level object which manages some global library state.
TranslationUnit
High-level object encapsulating the AST for a single translation unit. These
can be loaded from .ast files or parsed on the fly.
Cursor
Generic object for representing a node in the AST.
SourceRange, SourceLocation, and File
Objects representing information about the input source.
Most object information is exposed using properties, when the underlying API
call is efficient.
"""
# TODO
# ====
#
# o API support for invalid translation units. Currently we can't even get the
# diagnostics on failure because they refer to locations in an object that
# will have been invalidated.
#
# o fix memory management issues (currently client must hold on to index and
# translation unit, or risk crashes).
#
# o expose code completion APIs.
#
# o cleanup ctypes wrapping, would be nice to separate the ctypes details more
# clearly, and hide from the external interface (i.e., help(cindex)).
#
# o implement additional SourceLocation, SourceRange, and File methods.
from ctypes import *
import collections
import clang.enumerations
# ctypes doesn't implicitly convert c_void_p to the appropriate wrapper
# object. This is a problem, because it means that from_parameter will see an
# integer and pass the wrong value on platforms where int != void*. Work around
# this by marshalling object arguments as void**.
c_object_p = POINTER(c_void_p)
callbacks = {}
### Exception Classes ###
class TranslationUnitLoadError(Exception):
"""Represents an error that occurred when loading a TranslationUnit.
This is raised in the case where a TranslationUnit could not be
instantiated due to failure in the libclang library.
FIXME: Make libclang expose additional error information in this scenario.
"""
pass
class TranslationUnitSaveError(Exception):
"""Represents an error that occurred when saving a TranslationUnit.
Each error has associated with it an enumerated value, accessible under
e.save_error. Consumers can compare the value with one of the ERROR_
constants in this class.
"""
# Indicates that an unknown error occurred. This typically indicates that
# I/O failed during save.
ERROR_UNKNOWN = 1
# Indicates that errors during translation prevented saving. The errors
# should be available via the TranslationUnit's diagnostics.
ERROR_TRANSLATION_ERRORS = 2
# Indicates that the translation unit was somehow invalid.
ERROR_INVALID_TU = 3
def __init__(self, enumeration, message):
assert isinstance(enumeration, int)
if enumeration < 1 or enumeration > 3:
raise Exception("Encountered undefined TranslationUnit save error "
"constant: %d. Please file a bug to have this "
"value supported." % enumeration)
self.save_error = enumeration
Exception.__init__(self, 'Error %d: %s' % (enumeration, message))
### Structures and Utility Classes ###
class CachedProperty(object):
"""Decorator that lazy-loads the value of a property.
The first time the property is accessed, the original property function is
executed. The value it returns is set as the new value of that instance's
property, replacing the original method.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
try:
self.__doc__ = wrapped.__doc__
except:
pass
def __get__(self, instance, instance_type=None):
if instance is None:
return self
value = self.wrapped(instance)
setattr(instance, self.wrapped.__name__, value)
return value
class _CXString(Structure):
"""Helper for transforming CXString results."""
_fields_ = [("spelling", c_char_p), ("free", c_int)]
def __del__(self):
conf.lib.clang_disposeString(self)
@staticmethod
def from_result(res, fn, args):
assert isinstance(res, _CXString)
return conf.lib.clang_getCString(res)
class SourceLocation(Structure):
"""
A SourceLocation represents a particular location within a source file.
"""
_fields_ = [("ptr_data", c_void_p * 2), ("int_data", c_uint)]
_data = None
def _get_instantiation(self):
if self._data is None:
f, l, c, o = c_object_p(), c_uint(), c_uint(), c_uint()
conf.lib.clang_getInstantiationLocation(self, byref(f), byref(l),
byref(c), byref(o))
if f:
f = File(f)
else:
f = None
self._data = (f, int(l.value), int(c.value), int(o.value))
return self._data
@staticmethod
def from_position(tu, file, line, column):
"""
Retrieve the source location associated with a given file/line/column in
a particular translation unit.
"""
return conf.lib.clang_getLocation(tu, file, line, column)
@staticmethod
def from_offset(tu, file, offset):
"""Retrieve a SourceLocation from a given character offset.
tu -- TranslationUnit file belongs to
file -- File instance to obtain offset from
offset -- Integer character offset within file
"""
return conf.lib.clang_getLocationForOffset(tu, file, offset)
@property
def file(self):
"""Get the file represented by this source location."""
return self._get_instantiation()[0]
@property
def line(self):
"""Get the line represented by this source location."""
return self._get_instantiation()[1]
@property
def column(self):
"""Get the column represented by this source location."""
return self._get_instantiation()[2]
@property
def offset(self):
"""Get the file offset represented by this source location."""
return self._get_instantiation()[3]
def __eq__(self, other):
return conf.lib.clang_equalLocations(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
if self.file:
filename = self.file.name
else:
filename = None
return "<SourceLocation file %r, line %r, column %r>" % (
filename, self.line, self.column)
class SourceRange(Structure):
"""
A SourceRange describes a range of source locations within the source
code.
"""
_fields_ = [
("ptr_data", c_void_p * 2),
("begin_int_data", c_uint),
("end_int_data", c_uint)]
# FIXME: Eliminate this and make normal constructor? Requires hiding ctypes
# object.
@staticmethod
def from_locations(start, end):
return conf.lib.clang_getRange(start, end)
@property
def start(self):
"""
Return a SourceLocation representing the first character within a
source range.
"""
return conf.lib.clang_getRangeStart(self)
@property
def end(self):
"""
Return a SourceLocation representing the last character within a
source range.
"""
return conf.lib.clang_getRangeEnd(self)
def __eq__(self, other):
return conf.lib.clang_equalRanges(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __contains__(self, other):
"""Useful to detect the Token/Lexer bug"""
if not isinstance(other, SourceLocation):
return False
if other.file is None and self.start.file is None:
pass
elif ( self.start.file.name != other.file.name or
other.file.name != self.end.file.name):
# same file name
return False
# same file, in between lines
if self.start.line < other.line < self.end.line:
return True
elif self.start.line == other.line:
# same file first line
if self.start.column <= other.column:
return True
elif other.line == self.end.line:
# same file last line
if other.column <= self.end.column:
return True
return False
def __repr__(self):
return "<SourceRange start %r, end %r>" % (self.start, self.end)
class Diagnostic(object):
"""
A Diagnostic is a single instance of a Clang diagnostic. It includes the
diagnostic severity, the message, the location the diagnostic occurred, as
well as additional source ranges and associated fix-it hints.
"""
Ignored = 0
Note = 1
Warning = 2
Error = 3
Fatal = 4
DisplaySourceLocation = 0x01
DisplayColumn = 0x02
DisplaySourceRanges = 0x04
DisplayOption = 0x08
DisplayCategoryId = 0x10
DisplayCategoryName = 0x20
_FormatOptionsMask = 0x3f
def __init__(self, ptr):
self.ptr = ptr
def __del__(self):
conf.lib.clang_disposeDiagnostic(self)
@property
def severity(self):
return conf.lib.clang_getDiagnosticSeverity(self)
@property
def location(self):
return conf.lib.clang_getDiagnosticLocation(self)
@property
def spelling(self):
return conf.lib.clang_getDiagnosticSpelling(self)
@property
def ranges(self):
class RangeIterator:
def __init__(self, diag):
self.diag = diag
def __len__(self):
return int(conf.lib.clang_getDiagnosticNumRanges(self.diag))
def __getitem__(self, key):
if (key >= len(self)):
raise IndexError
return conf.lib.clang_getDiagnosticRange(self.diag, key)
return RangeIterator(self)
@property
def fixits(self):
class FixItIterator:
def __init__(self, diag):
self.diag = diag
def __len__(self):
return int(conf.lib.clang_getDiagnosticNumFixIts(self.diag))
def __getitem__(self, key):
range = SourceRange()
value = conf.lib.clang_getDiagnosticFixIt(self.diag, key,
byref(range))
if len(value) == 0:
raise IndexError
return FixIt(range, value)
return FixItIterator(self)
@property
def children(self):
class ChildDiagnosticsIterator:
def __init__(self, diag):
self.diag_set = conf.lib.clang_getChildDiagnostics(diag)
def __len__(self):
return int(conf.lib.clang_getNumDiagnosticsInSet(self.diag_set))
def __getitem__(self, key):
diag = conf.lib.clang_getDiagnosticInSet(self.diag_set, key)
if not diag:
raise IndexError
return Diagnostic(diag)
return ChildDiagnosticsIterator(self)
@property
def category_number(self):
"""The category number for this diagnostic or 0 if unavailable."""
return conf.lib.clang_getDiagnosticCategory(self)
@property
def category_name(self):
"""The string name of the category for this diagnostic."""
return conf.lib.clang_getDiagnosticCategoryText(self)
@property
def option(self):
"""The command-line option that enables this diagnostic."""
return conf.lib.clang_getDiagnosticOption(self, None)
@property
def disable_option(self):
"""The command-line option that disables this diagnostic."""
disable = _CXString()
conf.lib.clang_getDiagnosticOption(self, byref(disable))
return conf.lib.clang_getCString(disable)
def format(self, options=None):
"""
Format this diagnostic for display. The options argument takes
Diagnostic.Display* flags, which can be combined using bitwise OR. If
the options argument is not provided, the default display options will
be used.
"""
if options is None:
options = conf.lib.clang_defaultDiagnosticDisplayOptions()
if options & ~Diagnostic._FormatOptionsMask:
raise ValueError('Invalid format options')
formatted = conf.lib.clang_formatDiagnostic(self, options)
return conf.lib.clang_getCString(formatted)
def __repr__(self):
return "<Diagnostic severity %r, location %r, spelling %r>" % (
self.severity, self.location, self.spelling)
def __str__(self):
return self.format()
def from_param(self):
return self.ptr
class FixIt(object):
"""
A FixIt represents a transformation to be applied to the source to
"fix-it". The fix-it shouldbe applied by replacing the given source range
with the given value.
"""
def __init__(self, range, value):
self.range = range
self.value = value
def __repr__(self):
return "<FixIt range %r, value %r>" % (self.range, self.value)
class TokenGroup(object):
"""Helper class to facilitate token management.
Tokens are allocated from libclang in chunks. They must be disposed of as a
collective group.
One purpose of this class is for instances to represent groups of allocated
tokens. Each token in a group contains a reference back to an instance of
this class. When all tokens from a group are garbage collected, it allows
this class to be garbage collected. When this class is garbage collected,
it calls the libclang destructor which invalidates all tokens in the group.
You should not instantiate this class outside of this module.
"""
def __init__(self, tu, memory, count):
self._tu = tu
self._memory = memory
self._count = count
def __del__(self):
conf.lib.clang_disposeTokens(self._tu, self._memory, self._count)
@staticmethod
def get_tokens(tu, extent):
"""Helper method to return all tokens in an extent.
This functionality is needed multiple places in this module. We define
it here because it seems like a logical place.
"""
tokens_memory = POINTER(Token)()
tokens_count = c_uint()
conf.lib.clang_tokenize(tu, extent, byref(tokens_memory),
byref(tokens_count))
count = int(tokens_count.value)
# If we get no tokens, no memory was allocated. Be sure not to return
# anything and potentially call a destructor on nothing.
if count < 1:
return
tokens_array = cast(tokens_memory, POINTER(Token * count)).contents
token_group = TokenGroup(tu, tokens_memory, tokens_count)
for i in xrange(0, count):
token = Token()
token.int_data = tokens_array[i].int_data
token.ptr_data = tokens_array[i].ptr_data
token._tu = tu
token._group = token_group
yield token
class TokenKind(object):
"""Describes a specific type of a Token."""
_value_map = {} # int -> TokenKind
def __init__(self, value, name):
"""Create a new TokenKind instance from a numeric value and a name."""
self.value = value
self.name = name
def __repr__(self):
return 'TokenKind.%s' % (self.name,)
@staticmethod
def from_value(value):
"""Obtain a registered TokenKind instance from its value."""
result = TokenKind._value_map.get(value, None)
if result is None:
raise ValueError('Unknown TokenKind: %d' % value)
return result
@staticmethod
def register(value, name):
"""Register a new TokenKind enumeration.
This should only be called at module load time by code within this
package.
"""
if value in TokenKind._value_map:
raise ValueError('TokenKind already registered: %d' % value)
kind = TokenKind(value, name)
TokenKind._value_map[value] = kind
setattr(TokenKind, name, kind)
### Cursor Kinds ###
class BaseEnumeration(object):
"""
Common base class for named enumerations held in sync with Index.h values.
Subclasses must define their own _kinds and _name_map members, as:
_kinds = []
_name_map = None
These values hold the per-subclass instances and value-to-name mappings,
respectively.
"""
def __init__(self, value):
if value >= len(self.__class__._kinds):
self.__class__._kinds += [None] * (value - len(self.__class__._kinds) + 1)
if self.__class__._kinds[value] is not None:
raise ValueError('{0} value {1} already loaded'.format(
str(self.__class__), value))
self.value = value
self.__class__._kinds[value] = self
self.__class__._name_map = None
def from_param(self):
return self.value
@property
def name(self):
"""Get the enumeration name of this cursor kind."""
if self._name_map is None:
self._name_map = {}
for key, value in self.__class__.__dict__.items():
if isinstance(value, self.__class__):
self._name_map[value] = key
return self._name_map[self]
@classmethod
def from_id(cls, id):
if id >= len(cls._kinds) or cls._kinds[id] is None:
raise ValueError('Unknown template argument kind %d' % id)
return cls._kinds[id]
def __repr__(self):
return '%s.%s' % (self.__class__, self.name,)
class CursorKind(BaseEnumeration):
"""
A CursorKind describes the kind of entity that a cursor points to.
"""
# The required BaseEnumeration declarations.
_kinds = []
_name_map = None
@staticmethod
def get_all_kinds():
"""Return all CursorKind enumeration instances."""
return filter(None, CursorKind._kinds)
def is_declaration(self):
"""Test if this is a declaration kind."""
return conf.lib.clang_isDeclaration(self)
def is_reference(self):
"""Test if this is a reference kind."""
return conf.lib.clang_isReference(self)
def is_expression(self):
"""Test if this is an expression kind."""
return conf.lib.clang_isExpression(self)
def is_statement(self):
"""Test if this is a statement kind."""
return conf.lib.clang_isStatement(self)
def is_attribute(self):
"""Test if this is an attribute kind."""
return conf.lib.clang_isAttribute(self)
def is_invalid(self):
"""Test if this is an invalid kind."""
return conf.lib.clang_isInvalid(self)
def is_translation_unit(self):
"""Test if this is a translation unit kind."""
return conf.lib.clang_isTranslationUnit(self)
def is_preprocessing(self):
"""Test if this is a preprocessing kind."""
return conf.lib.clang_isPreprocessing(self)
def is_unexposed(self):
"""Test if this is an unexposed kind."""
return conf.lib.clang_isUnexposed(self)
def __repr__(self):
return 'CursorKind.%s' % (self.name,)
###
# Declaration Kinds
# A declaration whose specific kind is not exposed via this interface.
#
# Unexposed declarations have the same operations as any other kind of
# declaration; one can extract their location information, spelling, find their
# definitions, etc. However, the specific kind of the declaration is not
# reported.
CursorKind.UNEXPOSED_DECL = CursorKind(1)
# A C or C++ struct.
CursorKind.STRUCT_DECL = CursorKind(2)
# A C or C++ union.
CursorKind.UNION_DECL = CursorKind(3)
# A C++ class.
CursorKind.CLASS_DECL = CursorKind(4)
# An enumeration.
CursorKind.ENUM_DECL = CursorKind(5)
# A field (in C) or non-static data member (in C++) in a struct, union, or C++
# class.
CursorKind.FIELD_DECL = CursorKind(6)
# An enumerator constant.
CursorKind.ENUM_CONSTANT_DECL = CursorKind(7)
# A function.
CursorKind.FUNCTION_DECL = CursorKind(8)
# A variable.
CursorKind.VAR_DECL = CursorKind(9)
# A function or method parameter.
CursorKind.PARM_DECL = CursorKind(10)
# An Objective-C @interface.
CursorKind.OBJC_INTERFACE_DECL = CursorKind(11)
# An Objective-C @interface for a category.
CursorKind.OBJC_CATEGORY_DECL = CursorKind(12)
# An Objective-C @protocol declaration.
CursorKind.OBJC_PROTOCOL_DECL = CursorKind(13)
# An Objective-C @property declaration.
CursorKind.OBJC_PROPERTY_DECL = CursorKind(14)
# An Objective-C instance variable.
CursorKind.OBJC_IVAR_DECL = CursorKind(15)
# An Objective-C instance method.
CursorKind.OBJC_INSTANCE_METHOD_DECL = CursorKind(16)
# An Objective-C class method.
CursorKind.OBJC_CLASS_METHOD_DECL = CursorKind(17)
# An Objective-C @implementation.
CursorKind.OBJC_IMPLEMENTATION_DECL = CursorKind(18)
# An Objective-C @implementation for a category.
CursorKind.OBJC_CATEGORY_IMPL_DECL = CursorKind(19)
# A typedef.
CursorKind.TYPEDEF_DECL = CursorKind(20)
# A C++ class method.
CursorKind.CXX_METHOD = CursorKind(21)
# A C++ namespace.
CursorKind.NAMESPACE = CursorKind(22)
# A linkage specification, e.g. 'extern "C"'.
CursorKind.LINKAGE_SPEC = CursorKind(23)
# A C++ constructor.
CursorKind.CONSTRUCTOR = CursorKind(24)
# A C++ destructor.
CursorKind.DESTRUCTOR = CursorKind(25)
# A C++ conversion function.
CursorKind.CONVERSION_FUNCTION = CursorKind(26)
# A C++ template type parameter
CursorKind.TEMPLATE_TYPE_PARAMETER = CursorKind(27)
# A C++ non-type template paramater.
CursorKind.TEMPLATE_NON_TYPE_PARAMETER = CursorKind(28)
# A C++ template template parameter.
CursorKind.TEMPLATE_TEMPLATE_PARAMETER = CursorKind(29)
# A C++ function template.
CursorKind.FUNCTION_TEMPLATE = CursorKind(30)
# A C++ class template.
CursorKind.CLASS_TEMPLATE = CursorKind(31)
# A C++ class template partial specialization.
CursorKind.CLASS_TEMPLATE_PARTIAL_SPECIALIZATION = CursorKind(32)
# A C++ namespace alias declaration.
CursorKind.NAMESPACE_ALIAS = CursorKind(33)
# A C++ using directive
CursorKind.USING_DIRECTIVE = CursorKind(34)
# A C++ using declaration
CursorKind.USING_DECLARATION = CursorKind(35)
# A Type alias decl.
CursorKind.TYPE_ALIAS_DECL = CursorKind(36)
# A Objective-C synthesize decl
CursorKind.OBJC_SYNTHESIZE_DECL = CursorKind(37)
# A Objective-C dynamic decl
CursorKind.OBJC_DYNAMIC_DECL = CursorKind(38)
# A C++ access specifier decl.
CursorKind.CXX_ACCESS_SPEC_DECL = CursorKind(39)
###
# Reference Kinds
CursorKind.OBJC_SUPER_CLASS_REF = CursorKind(40)
CursorKind.OBJC_PROTOCOL_REF = CursorKind(41)
CursorKind.OBJC_CLASS_REF = CursorKind(42)
# A reference to a type declaration.
#
# A type reference occurs anywhere where a type is named but not
# declared. For example, given:
# typedef unsigned size_type;
# size_type size;
#
# The typedef is a declaration of size_type (CXCursor_TypedefDecl),
# while the type of the variable "size" is referenced. The cursor
# referenced by the type of size is the typedef for size_type.
CursorKind.TYPE_REF = CursorKind(43)
CursorKind.CXX_BASE_SPECIFIER = CursorKind(44)
# A reference to a class template, function template, template
# template parameter, or class template partial specialization.
CursorKind.TEMPLATE_REF = CursorKind(45)
# A reference to a namespace or namepsace alias.
CursorKind.NAMESPACE_REF = CursorKind(46)
# A reference to a member of a struct, union, or class that occurs in
# some non-expression context, e.g., a designated initializer.
CursorKind.MEMBER_REF = CursorKind(47)
# A reference to a labeled statement.
CursorKind.LABEL_REF = CursorKind(48)
# A reference to a set of overloaded functions or function templates
# that has not yet been resolved to a specific function or function template.
CursorKind.OVERLOADED_DECL_REF = CursorKind(49)
# A reference to a variable that occurs in some non-expression
# context, e.g., a C++ lambda capture list.
CursorKind.VARIABLE_REF = CursorKind(50)
###
# Invalid/Error Kinds
CursorKind.INVALID_FILE = CursorKind(70)
CursorKind.NO_DECL_FOUND = CursorKind(71)
CursorKind.NOT_IMPLEMENTED = CursorKind(72)
CursorKind.INVALID_CODE = CursorKind(73)
###
# Expression Kinds
# An expression whose specific kind is not exposed via this interface.
#
# Unexposed expressions have the same operations as any other kind of
# expression; one can extract their location information, spelling, children,
# etc. However, the specific kind of the expression is not reported.
CursorKind.UNEXPOSED_EXPR = CursorKind(100)
# An expression that refers to some value declaration, such as a function,
# varible, or enumerator.
CursorKind.DECL_REF_EXPR = CursorKind(101)
# An expression that refers to a member of a struct, union, class, Objective-C
# class, etc.
CursorKind.MEMBER_REF_EXPR = CursorKind(102)
# An expression that calls a function.
CursorKind.CALL_EXPR = CursorKind(103)
# An expression that sends a message to an Objective-C object or class.
CursorKind.OBJC_MESSAGE_EXPR = CursorKind(104)
# An expression that represents a block literal.
CursorKind.BLOCK_EXPR = CursorKind(105)
# An integer literal.
CursorKind.INTEGER_LITERAL = CursorKind(106)
# A floating point number literal.
CursorKind.FLOATING_LITERAL = CursorKind(107)
# An imaginary number literal.
CursorKind.IMAGINARY_LITERAL = CursorKind(108)
# A string literal.
CursorKind.STRING_LITERAL = CursorKind(109)
# A character literal.
CursorKind.CHARACTER_LITERAL = CursorKind(110)
# A parenthesized expression, e.g. "(1)".
#
# This AST node is only formed if full location information is requested.
CursorKind.PAREN_EXPR = CursorKind(111)
# This represents the unary-expression's (except sizeof and
# alignof).
CursorKind.UNARY_OPERATOR = CursorKind(112)
# [C99 6.5.2.1] Array Subscripting.
CursorKind.ARRAY_SUBSCRIPT_EXPR = CursorKind(113)
# A builtin binary operation expression such as "x + y" or
# "x <= y".
CursorKind.BINARY_OPERATOR = CursorKind(114)
# Compound assignment such as "+=".
CursorKind.COMPOUND_ASSIGNMENT_OPERATOR = CursorKind(115)
# The ?: ternary operator.
CursorKind.CONDITIONAL_OPERATOR = CursorKind(116)
# An explicit cast in C (C99 6.5.4) or a C-style cast in C++
# (C++ [expr.cast]), which uses the syntax (Type)expr.
#
# For example: (int)f.
CursorKind.CSTYLE_CAST_EXPR = CursorKind(117)
# [C99 6.5.2.5]
CursorKind.COMPOUND_LITERAL_EXPR = CursorKind(118)
# Describes an C or C++ initializer list.
CursorKind.INIT_LIST_EXPR = CursorKind(119)
# The GNU address of label extension, representing &&label.
CursorKind.ADDR_LABEL_EXPR = CursorKind(120)
# This is the GNU Statement Expression extension: ({int X=4; X;})
CursorKind.StmtExpr = CursorKind(121)
# Represents a C11 generic selection.
CursorKind.GENERIC_SELECTION_EXPR = CursorKind(122)
# Implements the GNU __null extension, which is a name for a null
# pointer constant that has integral type (e.g., int or long) and is the same
# size and alignment as a pointer.
#
# The __null extension is typically only used by system headers, which define
# NULL as __null in C++ rather than using 0 (which is an integer that may not
# match the size of a pointer).
CursorKind.GNU_NULL_EXPR = CursorKind(123)
# C++'s static_cast<> expression.
CursorKind.CXX_STATIC_CAST_EXPR = CursorKind(124)
# C++'s dynamic_cast<> expression.
CursorKind.CXX_DYNAMIC_CAST_EXPR = CursorKind(125)
# C++'s reinterpret_cast<> expression.
CursorKind.CXX_REINTERPRET_CAST_EXPR = CursorKind(126)
# C++'s const_cast<> expression.
CursorKind.CXX_CONST_CAST_EXPR = CursorKind(127)
# Represents an explicit C++ type conversion that uses "functional"
# notion (C++ [expr.type.conv]).
#
# Example:
# \code
# x = int(0.5);
# \endcode
CursorKind.CXX_FUNCTIONAL_CAST_EXPR = CursorKind(128)
# A C++ typeid expression (C++ [expr.typeid]).
CursorKind.CXX_TYPEID_EXPR = CursorKind(129)
# [C++ 2.13.5] C++ Boolean Literal.
CursorKind.CXX_BOOL_LITERAL_EXPR = CursorKind(130)
# [C++0x 2.14.7] C++ Pointer Literal.
CursorKind.CXX_NULL_PTR_LITERAL_EXPR = CursorKind(131)
# Represents the "this" expression in C++
CursorKind.CXX_THIS_EXPR = CursorKind(132)
# [C++ 15] C++ Throw Expression.
#
# This handles 'throw' and 'throw' assignment-expression. When
# assignment-expression isn't present, Op will be null.
CursorKind.CXX_THROW_EXPR = CursorKind(133)
# A new expression for memory allocation and constructor calls, e.g:
# "new CXXNewExpr(foo)".
CursorKind.CXX_NEW_EXPR = CursorKind(134)
# A delete expression for memory deallocation and destructor calls,
# e.g. "delete[] pArray".
CursorKind.CXX_DELETE_EXPR = CursorKind(135)
# Represents a unary expression.
CursorKind.CXX_UNARY_EXPR = CursorKind(136)
# ObjCStringLiteral, used for Objective-C string literals i.e. "foo".
CursorKind.OBJC_STRING_LITERAL = CursorKind(137)
# ObjCEncodeExpr, used for in Objective-C.
CursorKind.OBJC_ENCODE_EXPR = CursorKind(138)
# ObjCSelectorExpr used for in Objective-C.
CursorKind.OBJC_SELECTOR_EXPR = CursorKind(139)
# Objective-C's protocol expression.
CursorKind.OBJC_PROTOCOL_EXPR = CursorKind(140)
# An Objective-C "bridged" cast expression, which casts between
# Objective-C pointers and C pointers, transferring ownership in the process.
#
# \code
# NSString *str = (__bridge_transfer NSString *)CFCreateString();
# \endcode
CursorKind.OBJC_BRIDGE_CAST_EXPR = CursorKind(141)
# Represents a C++0x pack expansion that produces a sequence of
# expressions.
#
# A pack expansion expression contains a pattern (which itself is an
# expression) followed by an ellipsis. For example:
CursorKind.PACK_EXPANSION_EXPR = CursorKind(142)
# Represents an expression that computes the length of a parameter
# pack.
CursorKind.SIZE_OF_PACK_EXPR = CursorKind(143)
# Represents a C++ lambda expression that produces a local function
# object.
#
# \code
# void abssort(float *x, unsigned N) {
# std::sort(x, x + N,
# [](float a, float b) {
# return std::abs(a) < std::abs(b);
# });
# }
# \endcode
CursorKind.LAMBDA_EXPR = CursorKind(144)
# Objective-c Boolean Literal.
CursorKind.OBJ_BOOL_LITERAL_EXPR = CursorKind(145)
# Represents the "self" expression in a ObjC method.
CursorKind.OBJ_SELF_EXPR = CursorKind(146)
# OpenMP 4.0 [2.4, Array Section].
CursorKind.OMP_ARRAY_SECTION_EXPR = CursorKind(147)
# Represents an @available(...) check.
CursorKind.OBJC_AVAILABILITY_CHECK_EXPR = CursorKind(148)
# A statement whose specific kind is not exposed via this interface.
#
# Unexposed statements have the same operations as any other kind of statement;
# one can extract their location information, spelling, children, etc. However,
# the specific kind of the statement is not reported.
CursorKind.UNEXPOSED_STMT = CursorKind(200)
# A labelled statement in a function.
CursorKind.LABEL_STMT = CursorKind(201)
# A compound statement
CursorKind.COMPOUND_STMT = CursorKind(202)
# A case statement.
CursorKind.CASE_STMT = CursorKind(203)
# A default statement.
CursorKind.DEFAULT_STMT = CursorKind(204)
# An if statement.
CursorKind.IF_STMT = CursorKind(205)
# A switch statement.
CursorKind.SWITCH_STMT = CursorKind(206)
# A while statement.
CursorKind.WHILE_STMT = CursorKind(207)
# A do statement.
CursorKind.DO_STMT = CursorKind(208)
# A for statement.
CursorKind.FOR_STMT = CursorKind(209)
# A goto statement.
CursorKind.GOTO_STMT = CursorKind(210)
# An indirect goto statement.
CursorKind.INDIRECT_GOTO_STMT = CursorKind(211)
# A continue statement.
CursorKind.CONTINUE_STMT = CursorKind(212)
# A break statement.
CursorKind.BREAK_STMT = CursorKind(213)
# A return statement.
CursorKind.RETURN_STMT = CursorKind(214)
# A GNU-style inline assembler statement.
CursorKind.ASM_STMT = CursorKind(215)
# Objective-C's overall @try-@catch-@finally statement.
CursorKind.OBJC_AT_TRY_STMT = CursorKind(216)
# Objective-C's @catch statement.
CursorKind.OBJC_AT_CATCH_STMT = CursorKind(217)
# Objective-C's @finally statement.
CursorKind.OBJC_AT_FINALLY_STMT = CursorKind(218)
# Objective-C's @throw statement.
CursorKind.OBJC_AT_THROW_STMT = CursorKind(219)
# Objective-C's @synchronized statement.
CursorKind.OBJC_AT_SYNCHRONIZED_STMT = CursorKind(220)
# Objective-C's autorealease pool statement.
CursorKind.OBJC_AUTORELEASE_POOL_STMT = CursorKind(221)
# Objective-C's for collection statement.
CursorKind.OBJC_FOR_COLLECTION_STMT = CursorKind(222)
# C++'s catch statement.
CursorKind.CXX_CATCH_STMT = CursorKind(223)
# C++'s try statement.
CursorKind.CXX_TRY_STMT = CursorKind(224)
# C++'s for (* : *) statement.
CursorKind.CXX_FOR_RANGE_STMT = CursorKind(225)
# Windows Structured Exception Handling's try statement.
CursorKind.SEH_TRY_STMT = CursorKind(226)
# Windows Structured Exception Handling's except statement.
CursorKind.SEH_EXCEPT_STMT = CursorKind(227)
# Windows Structured Exception Handling's finally statement.
CursorKind.SEH_FINALLY_STMT = CursorKind(228)
# A MS inline assembly statement extension.
CursorKind.MS_ASM_STMT = CursorKind(229)
# The null statement.
CursorKind.NULL_STMT = CursorKind(230)
# Adaptor class for mixing declarations with statements and expressions.
CursorKind.DECL_STMT = CursorKind(231)
# OpenMP parallel directive.
CursorKind.OMP_PARALLEL_DIRECTIVE = CursorKind(232)
# OpenMP SIMD directive.
CursorKind.OMP_SIMD_DIRECTIVE = CursorKind(233)
# OpenMP for directive.
CursorKind.OMP_FOR_DIRECTIVE = CursorKind(234)
# OpenMP sections directive.
CursorKind.OMP_SECTIONS_DIRECTIVE = CursorKind(235)
# OpenMP section directive.
CursorKind.OMP_SECTION_DIRECTIVE = CursorKind(236)
# OpenMP single directive.
CursorKind.OMP_SINGLE_DIRECTIVE = CursorKind(237)
# OpenMP parallel for directive.
CursorKind.OMP_PARALLEL_FOR_DIRECTIVE = CursorKind(238)
# OpenMP parallel sections directive.
CursorKind.OMP_PARALLEL_SECTIONS_DIRECTIVE = CursorKind(239)
# OpenMP task directive.
CursorKind.OMP_TASK_DIRECTIVE = CursorKind(240)
# OpenMP master directive.
CursorKind.OMP_MASTER_DIRECTIVE = CursorKind(241)
# OpenMP critical directive.
CursorKind.OMP_CRITICAL_DIRECTIVE = CursorKind(242)
# OpenMP taskyield directive.
CursorKind.OMP_TASKYIELD_DIRECTIVE = CursorKind(243)
# OpenMP barrier directive.
CursorKind.OMP_BARRIER_DIRECTIVE = CursorKind(244)
# OpenMP taskwait directive.
CursorKind.OMP_TASKWAIT_DIRECTIVE = CursorKind(245)
# OpenMP flush directive.
CursorKind.OMP_FLUSH_DIRECTIVE = CursorKind(246)
# Windows Structured Exception Handling's leave statement.
CursorKind.SEH_LEAVE_STMT = CursorKind(247)
# OpenMP ordered directive.
CursorKind.OMP_ORDERED_DIRECTIVE = CursorKind(248)
# OpenMP atomic directive.
CursorKind.OMP_ATOMIC_DIRECTIVE = CursorKind(249)
# OpenMP for SIMD directive.
CursorKind.OMP_FOR_SIMD_DIRECTIVE = CursorKind(250)
# OpenMP parallel for SIMD directive.
CursorKind.OMP_PARALLELFORSIMD_DIRECTIVE = CursorKind(251)
# OpenMP target directive.
CursorKind.OMP_TARGET_DIRECTIVE = CursorKind(252)
# OpenMP teams directive.
CursorKind.OMP_TEAMS_DIRECTIVE = CursorKind(253)
# OpenMP taskgroup directive.
CursorKind.OMP_TASKGROUP_DIRECTIVE = CursorKind(254)
# OpenMP cancellation point directive.
CursorKind.OMP_CANCELLATION_POINT_DIRECTIVE = CursorKind(255)
# OpenMP cancel directive.
CursorKind.OMP_CANCEL_DIRECTIVE = CursorKind(256)
# OpenMP target data directive.
CursorKind.OMP_TARGET_DATA_DIRECTIVE = CursorKind(257)
# OpenMP taskloop directive.
CursorKind.OMP_TASK_LOOP_DIRECTIVE = CursorKind(258)
# OpenMP taskloop simd directive.
CursorKind.OMP_TASK_LOOP_SIMD_DIRECTIVE = CursorKind(259)
# OpenMP distribute directive.
CursorKind.OMP_DISTRIBUTE_DIRECTIVE = CursorKind(260)
# OpenMP target enter data directive.
CursorKind.OMP_TARGET_ENTER_DATA_DIRECTIVE = CursorKind(261)
# OpenMP target exit data directive.
CursorKind.OMP_TARGET_EXIT_DATA_DIRECTIVE = CursorKind(262)
# OpenMP target parallel directive.
CursorKind.OMP_TARGET_PARALLEL_DIRECTIVE = CursorKind(263)
# OpenMP target parallel for directive.
CursorKind.OMP_TARGET_PARALLELFOR_DIRECTIVE = CursorKind(264)
# OpenMP target update directive.
CursorKind.OMP_TARGET_UPDATE_DIRECTIVE = CursorKind(265)
# OpenMP distribute parallel for directive.
CursorKind.OMP_DISTRIBUTE_PARALLELFOR_DIRECTIVE = CursorKind(266)
# OpenMP distribute parallel for simd directive.
CursorKind.OMP_DISTRIBUTE_PARALLEL_FOR_SIMD_DIRECTIVE = CursorKind(267)
# OpenMP distribute simd directive.
CursorKind.OMP_DISTRIBUTE_SIMD_DIRECTIVE = CursorKind(268)
# OpenMP target parallel for simd directive.
CursorKind.OMP_TARGET_PARALLEL_FOR_SIMD_DIRECTIVE = CursorKind(269)
# OpenMP target simd directive.
CursorKind.OMP_TARGET_SIMD_DIRECTIVE = CursorKind(270)
# OpenMP teams distribute directive.
CursorKind.OMP_TEAMS_DISTRIBUTE_DIRECTIVE = CursorKind(271)
###
# Other Kinds
# Cursor that represents the translation unit itself.
#
# The translation unit cursor exists primarily to act as the root cursor for
# traversing the contents of a translation unit.
CursorKind.TRANSLATION_UNIT = CursorKind(300)
###
# Attributes
# An attribute whoe specific kind is note exposed via this interface
CursorKind.UNEXPOSED_ATTR = CursorKind(400)
CursorKind.IB_ACTION_ATTR = CursorKind(401)
CursorKind.IB_OUTLET_ATTR = CursorKind(402)
CursorKind.IB_OUTLET_COLLECTION_ATTR = CursorKind(403)
CursorKind.CXX_FINAL_ATTR = CursorKind(404)
CursorKind.CXX_OVERRIDE_ATTR = CursorKind(405)
CursorKind.ANNOTATE_ATTR = CursorKind(406)
CursorKind.ASM_LABEL_ATTR = CursorKind(407)
CursorKind.PACKED_ATTR = CursorKind(408)
CursorKind.PURE_ATTR = CursorKind(409)
CursorKind.CONST_ATTR = CursorKind(410)
CursorKind.NODUPLICATE_ATTR = CursorKind(411)
CursorKind.CUDACONSTANT_ATTR = CursorKind(412)
CursorKind.CUDADEVICE_ATTR = CursorKind(413)
CursorKind.CUDAGLOBAL_ATTR = CursorKind(414)
CursorKind.CUDAHOST_ATTR = CursorKind(415)
CursorKind.CUDASHARED_ATTR = CursorKind(416)
CursorKind.VISIBILITY_ATTR = CursorKind(417)
CursorKind.DLLEXPORT_ATTR = CursorKind(418)
CursorKind.DLLIMPORT_ATTR = CursorKind(419)
###
# Preprocessing
CursorKind.PREPROCESSING_DIRECTIVE = CursorKind(500)
CursorKind.MACRO_DEFINITION = CursorKind(501)
CursorKind.MACRO_INSTANTIATION = CursorKind(502)
CursorKind.INCLUSION_DIRECTIVE = CursorKind(503)
###
# Extra declaration
# A module import declaration.
CursorKind.MODULE_IMPORT_DECL = CursorKind(600)
# A type alias template declaration
CursorKind.TYPE_ALIAS_TEMPLATE_DECL = CursorKind(601)
# A static_assert or _Static_assert node
CursorKind.STATIC_ASSERT = CursorKind(602)
# A friend declaration
CursorKind.FRIEND_DECL = CursorKind(603)
# A code completion overload candidate.
CursorKind.OVERLOAD_CANDIDATE = CursorKind(700)
### Template Argument Kinds ###
class TemplateArgumentKind(BaseEnumeration):
"""
A TemplateArgumentKind describes the kind of entity that a template argument
represents.
"""
# The required BaseEnumeration declarations.
_kinds = []
_name_map = None
TemplateArgumentKind.NULL = TemplateArgumentKind(0)
TemplateArgumentKind.TYPE = TemplateArgumentKind(1)
TemplateArgumentKind.DECLARATION = TemplateArgumentKind(2)
TemplateArgumentKind.NULLPTR = TemplateArgumentKind(3)
TemplateArgumentKind.INTEGRAL = TemplateArgumentKind(4)
### Cursors ###
class Cursor(Structure):
"""
The Cursor class represents a reference to an element within the AST. It
acts as a kind of iterator.
"""
_fields_ = [("_kind_id", c_int), ("xdata", c_int), ("data", c_void_p * 3)]
@staticmethod
def from_location(tu, location):
# We store a reference to the TU in the instance so the TU won't get
# collected before the cursor.
cursor = conf.lib.clang_getCursor(tu, location)
cursor._tu = tu
return cursor
def __eq__(self, other):
return conf.lib.clang_equalCursors(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def is_definition(self):
"""
Returns true if the declaration pointed at by the cursor is also a
definition of that entity.
"""
return conf.lib.clang_isCursorDefinition(self)
def is_const_method(self):
"""Returns True if the cursor refers to a C++ member function or member
function template that is declared 'const'.
"""
return conf.lib.clang_CXXMethod_isConst(self)
def is_converting_constructor(self):
"""Returns True if the cursor refers to a C++ converting constructor.
"""
return conf.lib.clang_CXXConstructor_isConvertingConstructor(self)
def is_copy_constructor(self):
"""Returns True if the cursor refers to a C++ copy constructor.
"""
return conf.lib.clang_CXXConstructor_isCopyConstructor(self)
def is_default_constructor(self):
"""Returns True if the cursor refers to a C++ default constructor.
"""
return conf.lib.clang_CXXConstructor_isDefaultConstructor(self)
def is_move_constructor(self):
"""Returns True if the cursor refers to a C++ move constructor.
"""
return conf.lib.clang_CXXConstructor_isMoveConstructor(self)
def is_default_method(self):
"""Returns True if the cursor refers to a C++ member function or member
function template that is declared '= default'.
"""
return conf.lib.clang_CXXMethod_isDefaulted(self)
def is_mutable_field(self):
"""Returns True if the cursor refers to a C++ field that is declared
'mutable'.
"""
return conf.lib.clang_CXXField_isMutable(self)
def is_pure_virtual_method(self):
"""Returns True if the cursor refers to a C++ member function or member
function template that is declared pure virtual.
"""
return conf.lib.clang_CXXMethod_isPureVirtual(self)
def is_static_method(self):
"""Returns True if the cursor refers to a C++ member function or member
function template that is declared 'static'.
"""
return conf.lib.clang_CXXMethod_isStatic(self)
def is_virtual_method(self):
"""Returns True if the cursor refers to a C++ member function or member
function template that is declared 'virtual'.
"""
return conf.lib.clang_CXXMethod_isVirtual(self)
def get_definition(self):
"""
If the cursor is a reference to a declaration or a declaration of
some entity, return a cursor that points to the definition of that
entity.
"""
# TODO: Should probably check that this is either a reference or
# declaration prior to issuing the lookup.
return conf.lib.clang_getCursorDefinition(self)
def get_usr(self):
"""Return the Unified Symbol Resultion (USR) for the entity referenced
by the given cursor (or None).
A Unified Symbol Resolution (USR) is a string that identifies a
particular entity (function, class, variable, etc.) within a
program. USRs can be compared across translation units to determine,
e.g., when references in one translation refer to an entity defined in
another translation unit."""
return conf.lib.clang_getCursorUSR(self)
@property
def kind(self):
"""Return the kind of this cursor."""
return CursorKind.from_id(self._kind_id)
@property
def spelling(self):
"""Return the spelling of the entity pointed at by the cursor."""
if not hasattr(self, '_spelling'):
self._spelling = conf.lib.clang_getCursorSpelling(self)
return self._spelling
@property
def displayname(self):
"""
Return the display name for the entity referenced by this cursor.
The display name contains extra information that helps identify the
cursor, such as the parameters of a function or template or the
arguments of a class template specialization.
"""
if not hasattr(self, '_displayname'):
self._displayname = conf.lib.clang_getCursorDisplayName(self)
return self._displayname
@property
def mangled_name(self):
"""Return the mangled name for the entity referenced by this cursor."""
if not hasattr(self, '_mangled_name'):
self._mangled_name = conf.lib.clang_Cursor_getMangling(self)
return self._mangled_name
@property
def location(self):
"""
Return the source location (the starting character) of the entity
pointed at by the cursor.
"""
if not hasattr(self, '_loc'):
self._loc = conf.lib.clang_getCursorLocation(self)
return self._loc
@property
def extent(self):
"""
Return the source range (the range of text) occupied by the entity
pointed at by the cursor.
"""
if not hasattr(self, '_extent'):
self._extent = conf.lib.clang_getCursorExtent(self)
return self._extent
@property
def storage_class(self):
"""
Retrieves the storage class (if any) of the entity pointed at by the
cursor.
"""
if not hasattr(self, '_storage_class'):
self._storage_class = conf.lib.clang_Cursor_getStorageClass(self)
return StorageClass.from_id(self._storage_class)
@property
def access_specifier(self):
"""
Retrieves the access specifier (if any) of the entity pointed at by the
cursor.
"""
if not hasattr(self, '_access_specifier'):
self._access_specifier = conf.lib.clang_getCXXAccessSpecifier(self)
return AccessSpecifier.from_id(self._access_specifier)
@property
def type(self):
"""
Retrieve the Type (if any) of the entity pointed at by the cursor.
"""
if not hasattr(self, '_type'):
self._type = conf.lib.clang_getCursorType(self)
return self._type
@property
def canonical(self):
"""Return the canonical Cursor corresponding to this Cursor.
The canonical cursor is the cursor which is representative for the
underlying entity. For example, if you have multiple forward
declarations for the same class, the canonical cursor for the forward
declarations will be identical.
"""
if not hasattr(self, '_canonical'):
self._canonical = conf.lib.clang_getCanonicalCursor(self)
return self._canonical
@property
def result_type(self):
"""Retrieve the Type of the result for this Cursor."""
if not hasattr(self, '_result_type'):
self._result_type = conf.lib.clang_getResultType(self.type)
return self._result_type
@property
def underlying_typedef_type(self):
"""Return the underlying type of a typedef declaration.
Returns a Type for the typedef this cursor is a declaration for. If
the current cursor is not a typedef, this raises.
"""
if not hasattr(self, '_underlying_type'):
assert self.kind.is_declaration()
self._underlying_type = \
conf.lib.clang_getTypedefDeclUnderlyingType(self)
return self._underlying_type
@property
def enum_type(self):
"""Return the integer type of an enum declaration.
Returns a Type corresponding to an integer. If the cursor is not for an
enum, this raises.
"""
if not hasattr(self, '_enum_type'):
assert self.kind == CursorKind.ENUM_DECL
self._enum_type = conf.lib.clang_getEnumDeclIntegerType(self)
return self._enum_type
@property
def enum_value(self):
"""Return the value of an enum constant."""
if not hasattr(self, '_enum_value'):
assert self.kind == CursorKind.ENUM_CONSTANT_DECL
# Figure out the underlying type of the enum to know if it
# is a signed or unsigned quantity.
underlying_type = self.type
if underlying_type.kind == TypeKind.ENUM:
underlying_type = underlying_type.get_declaration().enum_type
if underlying_type.kind in (TypeKind.CHAR_U,
TypeKind.UCHAR,
TypeKind.CHAR16,
TypeKind.CHAR32,
TypeKind.USHORT,
TypeKind.UINT,
TypeKind.ULONG,
TypeKind.ULONGLONG,
TypeKind.UINT128):
self._enum_value = \
conf.lib.clang_getEnumConstantDeclUnsignedValue(self)
else:
self._enum_value = conf.lib.clang_getEnumConstantDeclValue(self)
return self._enum_value
@property
def objc_type_encoding(self):
"""Return the Objective-C type encoding as a str."""
if not hasattr(self, '_objc_type_encoding'):
self._objc_type_encoding = \
conf.lib.clang_getDeclObjCTypeEncoding(self)
return self._objc_type_encoding
@property
def hash(self):
"""Returns a hash of the cursor as an int."""
if not hasattr(self, '_hash'):
self._hash = conf.lib.clang_hashCursor(self)
return self._hash
@property
def semantic_parent(self):
"""Return the semantic parent for this cursor."""
if not hasattr(self, '_semantic_parent'):
self._semantic_parent = conf.lib.clang_getCursorSemanticParent(self)
return self._semantic_parent
@property
def lexical_parent(self):
"""Return the lexical parent for this cursor."""
if not hasattr(self, '_lexical_parent'):
self._lexical_parent = conf.lib.clang_getCursorLexicalParent(self)
return self._lexical_parent
@property
def translation_unit(self):
"""Returns the TranslationUnit to which this Cursor belongs."""
# If this triggers an AttributeError, the instance was not properly
# created.
return self._tu
@property
def referenced(self):
"""
For a cursor that is a reference, returns a cursor
representing the entity that it references.
"""
if not hasattr(self, '_referenced'):
self._referenced = conf.lib.clang_getCursorReferenced(self)
return self._referenced
@property
def brief_comment(self):
"""Returns the brief comment text associated with that Cursor"""
return conf.lib.clang_Cursor_getBriefCommentText(self)
@property
def raw_comment(self):
"""Returns the raw comment text associated with that Cursor"""
return conf.lib.clang_Cursor_getRawCommentText(self)
def get_arguments(self):
"""Return an iterator for accessing the arguments of this cursor."""
num_args = conf.lib.clang_Cursor_getNumArguments(self)
for i in range(0, num_args):
yield conf.lib.clang_Cursor_getArgument(self, i)
def get_num_template_arguments(self):
"""Returns the number of template args associated with this cursor."""
return conf.lib.clang_Cursor_getNumTemplateArguments(self)
def get_template_argument_kind(self, num):
"""Returns the TemplateArgumentKind for the indicated template
argument."""
return conf.lib.clang_Cursor_getTemplateArgumentKind(self, num)
def get_template_argument_type(self, num):
"""Returns the CXType for the indicated template argument."""
return conf.lib.clang_Cursor_getTemplateArgumentType(self, num)
def get_template_argument_value(self, num):
"""Returns the value of the indicated arg as a signed 64b integer."""
return conf.lib.clang_Cursor_getTemplateArgumentValue(self, num)
def get_template_argument_unsigned_value(self, num):
"""Returns the value of the indicated arg as an unsigned 64b integer."""
return conf.lib.clang_Cursor_getTemplateArgumentUnsignedValue(self, num)
def get_children(self):
"""Return an iterator for accessing the children of this cursor."""
# FIXME: Expose iteration from CIndex, PR6125.
def visitor(child, parent, children):
# FIXME: Document this assertion in API.
# FIXME: There should just be an isNull method.
assert child != conf.lib.clang_getNullCursor()
# Create reference to TU so it isn't GC'd before Cursor.
child._tu = self._tu
children.append(child)
return 1 # continue
children = []
conf.lib.clang_visitChildren(self, callbacks['cursor_visit'](visitor),
children)
return iter(children)
def walk_preorder(self):
"""Depth-first preorder walk over the cursor and its descendants.
Yields cursors.
"""
yield self
for child in self.get_children():
for descendant in child.walk_preorder():
yield descendant
def get_tokens(self):
"""Obtain Token instances formulating that compose this Cursor.
This is a generator for Token instances. It returns all tokens which
occupy the extent this cursor occupies.
"""
return TokenGroup.get_tokens(self._tu, self.extent)
def get_field_offsetof(self):
"""Returns the offsetof the FIELD_DECL pointed by this Cursor."""
return conf.lib.clang_Cursor_getOffsetOfField(self)
def is_anonymous(self):
"""
Check if the record is anonymous.
"""
if self.kind == CursorKind.FIELD_DECL:
return self.type.get_declaration().is_anonymous()
return conf.lib.clang_Cursor_isAnonymous(self)
def is_bitfield(self):
"""
Check if the field is a bitfield.
"""
return conf.lib.clang_Cursor_isBitField(self)
def get_bitfield_width(self):
"""
Retrieve the width of a bitfield.
"""
return conf.lib.clang_getFieldDeclBitWidth(self)
@staticmethod
def from_result(res, fn, args):
assert isinstance(res, Cursor)
# FIXME: There should just be an isNull method.
if res == conf.lib.clang_getNullCursor():
return None
# Store a reference to the TU in the Python object so it won't get GC'd
# before the Cursor.
tu = None
for arg in args:
if isinstance(arg, TranslationUnit):
tu = arg
break
if hasattr(arg, 'translation_unit'):
tu = arg.translation_unit
break
assert tu is not None
res._tu = tu
return res
@staticmethod
def from_cursor_result(res, fn, args):
assert isinstance(res, Cursor)
if res == conf.lib.clang_getNullCursor():
return None
res._tu = args[0]._tu
return res
class StorageClass(object):
"""
Describes the storage class of a declaration
"""
# The unique kind objects, index by id.
_kinds = []
_name_map = None
def __init__(self, value):
if value >= len(StorageClass._kinds):
StorageClass._kinds += [None] * (value - len(StorageClass._kinds) + 1)
if StorageClass._kinds[value] is not None:
raise ValueError('StorageClass already loaded')
self.value = value
StorageClass._kinds[value] = self
StorageClass._name_map = None
def from_param(self):
return self.value
@property
def name(self):
"""Get the enumeration name of this storage class."""
if self._name_map is None:
self._name_map = {}
for key,value in StorageClass.__dict__.items():
if isinstance(value,StorageClass):
self._name_map[value] = key
return self._name_map[self]
@staticmethod
def from_id(id):
if id >= len(StorageClass._kinds) or not StorageClass._kinds[id]:
raise ValueError('Unknown storage class %d' % id)
return StorageClass._kinds[id]
def __repr__(self):
return 'StorageClass.%s' % (self.name,)
StorageClass.INVALID = StorageClass(0)
StorageClass.NONE = StorageClass(1)
StorageClass.EXTERN = StorageClass(2)
StorageClass.STATIC = StorageClass(3)
StorageClass.PRIVATEEXTERN = StorageClass(4)
StorageClass.OPENCLWORKGROUPLOCAL = StorageClass(5)
StorageClass.AUTO = StorageClass(6)
StorageClass.REGISTER = StorageClass(7)
### C++ access specifiers ###
class AccessSpecifier(BaseEnumeration):
"""
Describes the access of a C++ class member
"""
# The unique kind objects, index by id.
_kinds = []
_name_map = None
def from_param(self):
return self.value
def __repr__(self):
return 'AccessSpecifier.%s' % (self.name,)
AccessSpecifier.INVALID = AccessSpecifier(0)
AccessSpecifier.PUBLIC = AccessSpecifier(1)
AccessSpecifier.PROTECTED = AccessSpecifier(2)
AccessSpecifier.PRIVATE = AccessSpecifier(3)
AccessSpecifier.NONE = AccessSpecifier(4)
### Type Kinds ###
class TypeKind(BaseEnumeration):
"""
Describes the kind of type.
"""
# The unique kind objects, indexed by id.
_kinds = []
_name_map = None
@property
def spelling(self):
"""Retrieve the spelling of this TypeKind."""
return conf.lib.clang_getTypeKindSpelling(self.value)
def __repr__(self):
return 'TypeKind.%s' % (self.name,)
TypeKind.INVALID = TypeKind(0)
TypeKind.UNEXPOSED = TypeKind(1)
TypeKind.VOID = TypeKind(2)
TypeKind.BOOL = TypeKind(3)
TypeKind.CHAR_U = TypeKind(4)
TypeKind.UCHAR = TypeKind(5)
TypeKind.CHAR16 = TypeKind(6)
TypeKind.CHAR32 = TypeKind(7)
TypeKind.USHORT = TypeKind(8)
TypeKind.UINT = TypeKind(9)
TypeKind.ULONG = TypeKind(10)
TypeKind.ULONGLONG = TypeKind(11)
TypeKind.UINT128 = TypeKind(12)
TypeKind.CHAR_S = TypeKind(13)
TypeKind.SCHAR = TypeKind(14)
TypeKind.WCHAR = TypeKind(15)
TypeKind.SHORT = TypeKind(16)
TypeKind.INT = TypeKind(17)
TypeKind.LONG = TypeKind(18)
TypeKind.LONGLONG = TypeKind(19)
TypeKind.INT128 = TypeKind(20)
TypeKind.FLOAT = TypeKind(21)
TypeKind.DOUBLE = TypeKind(22)
TypeKind.LONGDOUBLE = TypeKind(23)
TypeKind.NULLPTR = TypeKind(24)
TypeKind.OVERLOAD = TypeKind(25)
TypeKind.DEPENDENT = TypeKind(26)
TypeKind.OBJCID = TypeKind(27)
TypeKind.OBJCCLASS = TypeKind(28)
TypeKind.OBJCSEL = TypeKind(29)
TypeKind.FLOAT128 = TypeKind(30)
TypeKind.COMPLEX = TypeKind(100)
TypeKind.POINTER = TypeKind(101)
TypeKind.BLOCKPOINTER = TypeKind(102)
TypeKind.LVALUEREFERENCE = TypeKind(103)
TypeKind.RVALUEREFERENCE = TypeKind(104)
TypeKind.RECORD = TypeKind(105)
TypeKind.ENUM = TypeKind(106)
TypeKind.TYPEDEF = TypeKind(107)
TypeKind.OBJCINTERFACE = TypeKind(108)
TypeKind.OBJCOBJECTPOINTER = TypeKind(109)
TypeKind.FUNCTIONNOPROTO = TypeKind(110)
TypeKind.FUNCTIONPROTO = TypeKind(111)
TypeKind.CONSTANTARRAY = TypeKind(112)
TypeKind.VECTOR = TypeKind(113)
TypeKind.INCOMPLETEARRAY = TypeKind(114)
TypeKind.VARIABLEARRAY = TypeKind(115)
TypeKind.DEPENDENTSIZEDARRAY = TypeKind(116)
TypeKind.MEMBERPOINTER = TypeKind(117)
TypeKind.AUTO = TypeKind(118)
TypeKind.ELABORATED = TypeKind(119)
class RefQualifierKind(BaseEnumeration):
"""Describes a specific ref-qualifier of a type."""
# The unique kind objects, indexed by id.
_kinds = []
_name_map = None
def from_param(self):
return self.value
def __repr__(self):
return 'RefQualifierKind.%s' % (self.name,)
RefQualifierKind.NONE = RefQualifierKind(0)
RefQualifierKind.LVALUE = RefQualifierKind(1)
RefQualifierKind.RVALUE = RefQualifierKind(2)
class Type(Structure):
"""
The type of an element in the abstract syntax tree.
"""
_fields_ = [("_kind_id", c_int), ("data", c_void_p * 2)]
@property
def kind(self):
"""Return the kind of this type."""
return TypeKind.from_id(self._kind_id)
def argument_types(self):
"""Retrieve a container for the non-variadic arguments for this type.
The returned object is iterable and indexable. Each item in the
container is a Type instance.
"""
class ArgumentsIterator(collections.Sequence):
def __init__(self, parent):
self.parent = parent
self.length = None
def __len__(self):
if self.length is None:
self.length = conf.lib.clang_getNumArgTypes(self.parent)
return self.length
def __getitem__(self, key):
# FIXME Support slice objects.
if not isinstance(key, int):
raise TypeError("Must supply a non-negative int.")
if key < 0:
raise IndexError("Only non-negative indexes are accepted.")
if key >= len(self):
raise IndexError("Index greater than container length: "
"%d > %d" % ( key, len(self) ))
result = conf.lib.clang_getArgType(self.parent, key)
if result.kind == TypeKind.INVALID:
raise IndexError("Argument could not be retrieved.")
return result
assert self.kind == TypeKind.FUNCTIONPROTO
return ArgumentsIterator(self)
@property
def element_type(self):
"""Retrieve the Type of elements within this Type.
If accessed on a type that is not an array, complex, or vector type, an
exception will be raised.
"""
result = conf.lib.clang_getElementType(self)
if result.kind == TypeKind.INVALID:
raise Exception('Element type not available on this type.')
return result
@property
def element_count(self):
"""Retrieve the number of elements in this type.
Returns an int.
If the Type is not an array or vector, this raises.
"""
result = conf.lib.clang_getNumElements(self)
if result < 0:
raise Exception('Type does not have elements.')
return result
@property
def translation_unit(self):
"""The TranslationUnit to which this Type is associated."""
# If this triggers an AttributeError, the instance was not properly
# instantiated.
return self._tu
@staticmethod
def from_result(res, fn, args):
assert isinstance(res, Type)
tu = None
for arg in args:
if hasattr(arg, 'translation_unit'):
tu = arg.translation_unit
break
assert tu is not None
res._tu = tu
return res
def get_canonical(self):
"""
Return the canonical type for a Type.
Clang's type system explicitly models typedefs and all the
ways a specific type can be represented. The canonical type
is the underlying type with all the "sugar" removed. For
example, if 'T' is a typedef for 'int', the canonical type for
'T' would be 'int'.
"""
return conf.lib.clang_getCanonicalType(self)
def is_const_qualified(self):
"""Determine whether a Type has the "const" qualifier set.
This does not look through typedefs that may have added "const"
at a different level.
"""
return conf.lib.clang_isConstQualifiedType(self)
def is_volatile_qualified(self):
"""Determine whether a Type has the "volatile" qualifier set.
This does not look through typedefs that may have added "volatile"
at a different level.
"""
return conf.lib.clang_isVolatileQualifiedType(self)
def is_restrict_qualified(self):
"""Determine whether a Type has the "restrict" qualifier set.
This does not look through typedefs that may have added "restrict" at
a different level.
"""
return conf.lib.clang_isRestrictQualifiedType(self)
def is_function_variadic(self):
"""Determine whether this function Type is a variadic function type."""
assert self.kind == TypeKind.FUNCTIONPROTO
return conf.lib.clang_isFunctionTypeVariadic(self)
def is_pod(self):
"""Determine whether this Type represents plain old data (POD)."""
return conf.lib.clang_isPODType(self)
def get_pointee(self):
"""
For pointer types, returns the type of the pointee.
"""
return conf.lib.clang_getPointeeType(self)
def get_declaration(self):
"""
Return the cursor for the declaration of the given type.
"""
return conf.lib.clang_getTypeDeclaration(self)
def get_result(self):
"""
Retrieve the result type associated with a function type.
"""
return conf.lib.clang_getResultType(self)
def get_array_element_type(self):
"""
Retrieve the type of the elements of the array type.
"""
return conf.lib.clang_getArrayElementType(self)
def get_array_size(self):
"""
Retrieve the size of the constant array.
"""
return conf.lib.clang_getArraySize(self)
def get_class_type(self):
"""
Retrieve the class type of the member pointer type.
"""
return conf.lib.clang_Type_getClassType(self)
def get_named_type(self):
"""
Retrieve the type named by the qualified-id.
"""
return conf.lib.clang_Type_getNamedType(self)
def get_align(self):
"""
Retrieve the alignment of the record.
"""
return conf.lib.clang_Type_getAlignOf(self)
def get_size(self):
"""
Retrieve the size of the record.
"""
return conf.lib.clang_Type_getSizeOf(self)
def get_offset(self, fieldname):
"""
Retrieve the offset of a field in the record.
"""
return conf.lib.clang_Type_getOffsetOf(self, c_char_p(fieldname))
def get_ref_qualifier(self):
"""
Retrieve the ref-qualifier of the type.
"""
return RefQualifierKind.from_id(
conf.lib.clang_Type_getCXXRefQualifier(self))
def get_fields(self):
"""Return an iterator for accessing the fields of this type."""
def visitor(field, children):
assert field != conf.lib.clang_getNullCursor()
# Create reference to TU so it isn't GC'd before Cursor.
field._tu = self._tu
fields.append(field)
return 1 # continue
fields = []
conf.lib.clang_Type_visitFields(self,
callbacks['fields_visit'](visitor), fields)
return iter(fields)
@property
def spelling(self):
"""Retrieve the spelling of this Type."""
return conf.lib.clang_getTypeSpelling(self)
def __eq__(self, other):
if type(other) != type(self):
return False
return conf.lib.clang_equalTypes(self, other)
def __ne__(self, other):
return not self.__eq__(other)
## CIndex Objects ##
# CIndex objects (derived from ClangObject) are essentially lightweight
# wrappers attached to some underlying object, which is exposed via CIndex as
# a void*.
class ClangObject(object):
"""
A helper for Clang objects. This class helps act as an intermediary for
the ctypes library and the Clang CIndex library.
"""
def __init__(self, obj):
assert isinstance(obj, c_object_p) and obj
self.obj = self._as_parameter_ = obj
def from_param(self):
return self._as_parameter_
class _CXUnsavedFile(Structure):
"""Helper for passing unsaved file arguments."""
_fields_ = [("name", c_char_p), ("contents", c_char_p), ('length', c_ulong)]
# Functions calls through the python interface are rather slow. Fortunately,
# for most symboles, we do not need to perform a function call. Their spelling
# never changes and is consequently provided by this spelling cache.
SpellingCache = {
# 0: CompletionChunk.Kind("Optional"),
# 1: CompletionChunk.Kind("TypedText"),
# 2: CompletionChunk.Kind("Text"),
# 3: CompletionChunk.Kind("Placeholder"),
# 4: CompletionChunk.Kind("Informative"),
# 5 : CompletionChunk.Kind("CurrentParameter"),
6: '(', # CompletionChunk.Kind("LeftParen"),
7: ')', # CompletionChunk.Kind("RightParen"),
8: '[', # CompletionChunk.Kind("LeftBracket"),
9: ']', # CompletionChunk.Kind("RightBracket"),
10: '{', # CompletionChunk.Kind("LeftBrace"),
11: '}', # CompletionChunk.Kind("RightBrace"),
12: '<', # CompletionChunk.Kind("LeftAngle"),
13: '>', # CompletionChunk.Kind("RightAngle"),
14: ', ', # CompletionChunk.Kind("Comma"),
# 15: CompletionChunk.Kind("ResultType"),
16: ':', # CompletionChunk.Kind("Colon"),
17: ';', # CompletionChunk.Kind("SemiColon"),
18: '=', # CompletionChunk.Kind("Equal"),
19: ' ', # CompletionChunk.Kind("HorizontalSpace"),
# 20: CompletionChunk.Kind("VerticalSpace")
}
class CompletionChunk:
class Kind:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __repr__(self):
return "<ChunkKind: %s>" % self
def __init__(self, completionString, key):
self.cs = completionString
self.key = key
self.__kindNumberCache = -1
def __repr__(self):
return "{'" + self.spelling + "', " + str(self.kind) + "}"
@CachedProperty
def spelling(self):
if self.__kindNumber in SpellingCache:
return SpellingCache[self.__kindNumber]
return conf.lib.clang_getCompletionChunkText(self.cs, self.key).spelling
# We do not use @CachedProperty here, as the manual implementation is
# apparently still significantly faster. Please profile carefully if you
# would like to add CachedProperty back.
@property
def __kindNumber(self):
if self.__kindNumberCache == -1:
self.__kindNumberCache = \
conf.lib.clang_getCompletionChunkKind(self.cs, self.key)
return self.__kindNumberCache
@CachedProperty
def kind(self):
return completionChunkKindMap[self.__kindNumber]
@CachedProperty
def string(self):
res = conf.lib.clang_getCompletionChunkCompletionString(self.cs,
self.key)
if (res):
return CompletionString(res)
else:
None
def isKindOptional(self):
return self.__kindNumber == 0
def isKindTypedText(self):
return self.__kindNumber == 1
def isKindPlaceHolder(self):
return self.__kindNumber == 3
def isKindInformative(self):
return self.__kindNumber == 4
def isKindResultType(self):
return self.__kindNumber == 15
completionChunkKindMap = {
0: CompletionChunk.Kind("Optional"),
1: CompletionChunk.Kind("TypedText"),
2: CompletionChunk.Kind("Text"),
3: CompletionChunk.Kind("Placeholder"),
4: CompletionChunk.Kind("Informative"),
5: CompletionChunk.Kind("CurrentParameter"),
6: CompletionChunk.Kind("LeftParen"),
7: CompletionChunk.Kind("RightParen"),
8: CompletionChunk.Kind("LeftBracket"),
9: CompletionChunk.Kind("RightBracket"),
10: CompletionChunk.Kind("LeftBrace"),
11: CompletionChunk.Kind("RightBrace"),
12: CompletionChunk.Kind("LeftAngle"),
13: CompletionChunk.Kind("RightAngle"),
14: CompletionChunk.Kind("Comma"),
15: CompletionChunk.Kind("ResultType"),
16: CompletionChunk.Kind("Colon"),
17: CompletionChunk.Kind("SemiColon"),
18: CompletionChunk.Kind("Equal"),
19: CompletionChunk.Kind("HorizontalSpace"),
20: CompletionChunk.Kind("VerticalSpace")}
class CompletionString(ClangObject):
class Availability:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __repr__(self):
return "<Availability: %s>" % self
def __len__(self):
return self.num_chunks
@CachedProperty
def num_chunks(self):
return conf.lib.clang_getNumCompletionChunks(self.obj)
def __getitem__(self, key):
if self.num_chunks <= key:
raise IndexError
return CompletionChunk(self.obj, key)
@property
def priority(self):
return conf.lib.clang_getCompletionPriority(self.obj)
@property
def availability(self):
res = conf.lib.clang_getCompletionAvailability(self.obj)
return availabilityKinds[res]
@property
def briefComment(self):
if conf.function_exists("clang_getCompletionBriefComment"):
return conf.lib.clang_getCompletionBriefComment(self.obj)
return _CXString()
def __repr__(self):
return " | ".join([str(a) for a in self]) \
+ " || Priority: " + str(self.priority) \
+ " || Availability: " + str(self.availability) \
+ " || Brief comment: " + str(self.briefComment.spelling)
availabilityKinds = {
0: CompletionChunk.Kind("Available"),
1: CompletionChunk.Kind("Deprecated"),
2: CompletionChunk.Kind("NotAvailable"),
3: CompletionChunk.Kind("NotAccessible")}
class CodeCompletionResult(Structure):
_fields_ = [('cursorKind', c_int), ('completionString', c_object_p)]
def __repr__(self):
return str(CompletionString(self.completionString))
@property
def kind(self):
return CursorKind.from_id(self.cursorKind)
@property
def string(self):
return CompletionString(self.completionString)
class CCRStructure(Structure):
_fields_ = [('results', POINTER(CodeCompletionResult)),
('numResults', c_int)]
def __len__(self):
return self.numResults
def __getitem__(self, key):
if len(self) <= key:
raise IndexError
return self.results[key]
class CodeCompletionResults(ClangObject):
def __init__(self, ptr):
assert isinstance(ptr, POINTER(CCRStructure)) and ptr
self.ptr = self._as_parameter_ = ptr
def from_param(self):
return self._as_parameter_
def __del__(self):
conf.lib.clang_disposeCodeCompleteResults(self)
@property
def results(self):
return self.ptr.contents
@property
def diagnostics(self):
class DiagnosticsItr:
def __init__(self, ccr):
self.ccr= ccr
def __len__(self):
return int(\
conf.lib.clang_codeCompleteGetNumDiagnostics(self.ccr))
def __getitem__(self, key):
return conf.lib.clang_codeCompleteGetDiagnostic(self.ccr, key)
return DiagnosticsItr(self)
class Index(ClangObject):
"""
The Index type provides the primary interface to the Clang CIndex library,
primarily by providing an interface for reading and parsing translation
units.
"""
@staticmethod
def create(excludeDecls=False):
"""
Create a new Index.
Parameters:
excludeDecls -- Exclude local declarations from translation units.
"""
return Index(conf.lib.clang_createIndex(excludeDecls, 0))
def __del__(self):
conf.lib.clang_disposeIndex(self)
def read(self, path):
"""Load a TranslationUnit from the given AST file."""
return TranslationUnit.from_ast_file(path, self)
def parse(self, path, args=None, unsaved_files=None, options = 0):
"""Load the translation unit from the given source code file by running
clang and generating the AST before loading. Additional command line
parameters can be passed to clang via the args parameter.
In-memory contents for files can be provided by passing a list of pairs
to as unsaved_files, the first item should be the filenames to be mapped
and the second should be the contents to be substituted for the
file. The contents may be passed as strings or file objects.
If an error was encountered during parsing, a TranslationUnitLoadError
will be raised.
"""
return TranslationUnit.from_source(path, args, unsaved_files, options,
self)
class TranslationUnit(ClangObject):
"""Represents a source code translation unit.
This is one of the main types in the API. Any time you wish to interact
with Clang's representation of a source file, you typically start with a
translation unit.
"""
# Default parsing mode.
PARSE_NONE = 0
# Instruct the parser to create a detailed processing record containing
# metadata not normally retained.
PARSE_DETAILED_PROCESSING_RECORD = 1
# Indicates that the translation unit is incomplete. This is typically used
# when parsing headers.
PARSE_INCOMPLETE = 2
# Instruct the parser to create a pre-compiled preamble for the translation
# unit. This caches the preamble (included files at top of source file).
# This is useful if the translation unit will be reparsed and you don't
# want to incur the overhead of reparsing the preamble.
PARSE_PRECOMPILED_PREAMBLE = 4
# Cache code completion information on parse. This adds time to parsing but
# speeds up code completion.
PARSE_CACHE_COMPLETION_RESULTS = 8
# Flags with values 16 and 32 are deprecated and intentionally omitted.
# Do not parse function bodies. This is useful if you only care about
# searching for declarations/definitions.
PARSE_SKIP_FUNCTION_BODIES = 64
# Used to indicate that brief documentation comments should be included
# into the set of code completions returned from this translation unit.
PARSE_INCLUDE_BRIEF_COMMENTS_IN_CODE_COMPLETION = 128
@classmethod
def from_source(cls, filename, args=None, unsaved_files=None, options=0,
index=None):
"""Create a TranslationUnit by parsing source.
This is capable of processing source code both from files on the
filesystem as well as in-memory contents.
Command-line arguments that would be passed to clang are specified as
a list via args. These can be used to specify include paths, warnings,
etc. e.g. ["-Wall", "-I/path/to/include"].
In-memory file content can be provided via unsaved_files. This is an
iterable of 2-tuples. The first element is the str filename. The
second element defines the content. Content can be provided as str
source code or as file objects (anything with a read() method). If
a file object is being used, content will be read until EOF and the
read cursor will not be reset to its original position.
options is a bitwise or of TranslationUnit.PARSE_XXX flags which will
control parsing behavior.
index is an Index instance to utilize. If not provided, a new Index
will be created for this TranslationUnit.
To parse source from the filesystem, the filename of the file to parse
is specified by the filename argument. Or, filename could be None and
the args list would contain the filename(s) to parse.
To parse source from an in-memory buffer, set filename to the virtual
filename you wish to associate with this source (e.g. "test.c"). The
contents of that file are then provided in unsaved_files.
If an error occurs, a TranslationUnitLoadError is raised.
Please note that a TranslationUnit with parser errors may be returned.
It is the caller's responsibility to check tu.diagnostics for errors.
Also note that Clang infers the source language from the extension of
the input filename. If you pass in source code containing a C++ class
declaration with the filename "test.c" parsing will fail.
"""
if args is None:
args = []
if unsaved_files is None:
unsaved_files = []
if index is None:
index = Index.create()
args_array = None
if len(args) > 0:
args_array = (c_char_p * len(args))(* args)
unsaved_array = None
if len(unsaved_files) > 0:
unsaved_array = (_CXUnsavedFile * len(unsaved_files))()
for i, (name, contents) in enumerate(unsaved_files):
if hasattr(contents, "read"):
contents = contents.read()
unsaved_array[i].name = name
unsaved_array[i].contents = contents
unsaved_array[i].length = len(contents)
ptr = conf.lib.clang_parseTranslationUnit(index, filename, args_array,
len(args), unsaved_array,
len(unsaved_files), options)
if not ptr:
raise TranslationUnitLoadError("Error parsing translation unit.")
return cls(ptr, index=index)
@classmethod
def from_ast_file(cls, filename, index=None):
"""Create a TranslationUnit instance from a saved AST file.
A previously-saved AST file (provided with -emit-ast or
TranslationUnit.save()) is loaded from the filename specified.
If the file cannot be loaded, a TranslationUnitLoadError will be
raised.
index is optional and is the Index instance to use. If not provided,
a default Index will be created.
"""
if index is None:
index = Index.create()
ptr = conf.lib.clang_createTranslationUnit(index, filename)
if not ptr:
raise TranslationUnitLoadError(filename)
return cls(ptr=ptr, index=index)
def __init__(self, ptr, index):
"""Create a TranslationUnit instance.
TranslationUnits should be created using one of the from_* @classmethod
functions above. __init__ is only called internally.
"""
assert isinstance(index, Index)
self.index = index
ClangObject.__init__(self, ptr)
def __del__(self):
conf.lib.clang_disposeTranslationUnit(self)
@property
def cursor(self):
"""Retrieve the cursor that represents the given translation unit."""
return conf.lib.clang_getTranslationUnitCursor(self)
@property
def spelling(self):
"""Get the original translation unit source file name."""
return conf.lib.clang_getTranslationUnitSpelling(self)
def get_includes(self):
"""
Return an iterable sequence of FileInclusion objects that describe the
sequence of inclusions in a translation unit. The first object in
this sequence is always the input file. Note that this method will not
recursively iterate over header files included through precompiled
headers.
"""
def visitor(fobj, lptr, depth, includes):
if depth > 0:
loc = lptr.contents
includes.append(FileInclusion(loc.file, File(fobj), loc, depth))
# Automatically adapt CIndex/ctype pointers to python objects
includes = []
conf.lib.clang_getInclusions(self,
callbacks['translation_unit_includes'](visitor), includes)
return iter(includes)
def get_file(self, filename):
"""Obtain a File from this translation unit."""
return File.from_name(self, filename)
def get_location(self, filename, position):
"""Obtain a SourceLocation for a file in this translation unit.
The position can be specified by passing:
- Integer file offset. Initial file offset is 0.
- 2-tuple of (line number, column number). Initial file position is
(0, 0)
"""
f = self.get_file(filename)
if isinstance(position, int):
return SourceLocation.from_offset(self, f, position)
return SourceLocation.from_position(self, f, position[0], position[1])
def get_extent(self, filename, locations):
"""Obtain a SourceRange from this translation unit.
The bounds of the SourceRange must ultimately be defined by a start and
end SourceLocation. For the locations argument, you can pass:
- 2 SourceLocation instances in a 2-tuple or list.
- 2 int file offsets via a 2-tuple or list.
- 2 2-tuple or lists of (line, column) pairs in a 2-tuple or list.
e.g.
get_extent('foo.c', (5, 10))
get_extent('foo.c', ((1, 1), (1, 15)))
"""
f = self.get_file(filename)
if len(locations) < 2:
raise Exception('Must pass object with at least 2 elements')
start_location, end_location = locations
if hasattr(start_location, '__len__'):
start_location = SourceLocation.from_position(self, f,
start_location[0], start_location[1])
elif isinstance(start_location, int):
start_location = SourceLocation.from_offset(self, f,
start_location)
if hasattr(end_location, '__len__'):
end_location = SourceLocation.from_position(self, f,
end_location[0], end_location[1])
elif isinstance(end_location, int):
end_location = SourceLocation.from_offset(self, f, end_location)
assert isinstance(start_location, SourceLocation)
assert isinstance(end_location, SourceLocation)
return SourceRange.from_locations(start_location, end_location)
@property
def diagnostics(self):
"""
Return an iterable (and indexable) object containing the diagnostics.
"""
class DiagIterator:
def __init__(self, tu):
self.tu = tu
def __len__(self):
return int(conf.lib.clang_getNumDiagnostics(self.tu))
def __getitem__(self, key):
diag = conf.lib.clang_getDiagnostic(self.tu, key)
if not diag:
raise IndexError
return Diagnostic(diag)
return DiagIterator(self)
def reparse(self, unsaved_files=None, options=0):
"""
Reparse an already parsed translation unit.
In-memory contents for files can be provided by passing a list of pairs
as unsaved_files, the first items should be the filenames to be mapped
and the second should be the contents to be substituted for the
file. The contents may be passed as strings or file objects.
"""
if unsaved_files is None:
unsaved_files = []
unsaved_files_array = 0
if len(unsaved_files):
unsaved_files_array = (_CXUnsavedFile * len(unsaved_files))()
for i,(name,value) in enumerate(unsaved_files):
if not isinstance(value, str):
# FIXME: It would be great to support an efficient version
# of this, one day.
value = value.read()
print(value)
if not isinstance(value, str):
raise TypeError('Unexpected unsaved file contents.')
unsaved_files_array[i].name = name
unsaved_files_array[i].contents = value
unsaved_files_array[i].length = len(value)
ptr = conf.lib.clang_reparseTranslationUnit(self, len(unsaved_files),
unsaved_files_array, options)
def save(self, filename):
"""Saves the TranslationUnit to a file.
This is equivalent to passing -emit-ast to the clang frontend. The
saved file can be loaded back into a TranslationUnit. Or, if it
corresponds to a header, it can be used as a pre-compiled header file.
If an error occurs while saving, a TranslationUnitSaveError is raised.
If the error was TranslationUnitSaveError.ERROR_INVALID_TU, this means
the constructed TranslationUnit was not valid at time of save. In this
case, the reason(s) why should be available via
TranslationUnit.diagnostics().
filename -- The path to save the translation unit to.
"""
options = conf.lib.clang_defaultSaveOptions(self)
result = int(conf.lib.clang_saveTranslationUnit(self, filename,
options))
if result != 0:
raise TranslationUnitSaveError(result,
'Error saving TranslationUnit.')
def codeComplete(self, path, line, column, unsaved_files=None,
include_macros=False, include_code_patterns=False,
include_brief_comments=False):
"""
Code complete in this translation unit.
In-memory contents for files can be provided by passing a list of pairs
as unsaved_files, the first items should be the filenames to be mapped
and the second should be the contents to be substituted for the
file. The contents may be passed as strings or file objects.
"""
options = 0
if include_macros:
options += 1
if include_code_patterns:
options += 2
if include_brief_comments:
options += 4
if unsaved_files is None:
unsaved_files = []
unsaved_files_array = 0
if len(unsaved_files):
unsaved_files_array = (_CXUnsavedFile * len(unsaved_files))()
for i,(name,value) in enumerate(unsaved_files):
if not isinstance(value, str):
# FIXME: It would be great to support an efficient version
# of this, one day.
value = value.read()
print(value)
if not isinstance(value, str):
raise TypeError('Unexpected unsaved file contents.')
unsaved_files_array[i].name = name
unsaved_files_array[i].contents = value
unsaved_files_array[i].length = len(value)
ptr = conf.lib.clang_codeCompleteAt(self, path, line, column,
unsaved_files_array, len(unsaved_files), options)
if ptr:
return CodeCompletionResults(ptr)
return None
def get_tokens(self, locations=None, extent=None):
"""Obtain tokens in this translation unit.
This is a generator for Token instances. The caller specifies a range
of source code to obtain tokens for. The range can be specified as a
2-tuple of SourceLocation or as a SourceRange. If both are defined,
behavior is undefined.
"""
if locations is not None:
extent = SourceRange(start=locations[0], end=locations[1])
return TokenGroup.get_tokens(self, extent)
class File(ClangObject):
"""
The File class represents a particular source file that is part of a
translation unit.
"""
@staticmethod
def from_name(translation_unit, file_name):
"""Retrieve a file handle within the given translation unit."""
return File(conf.lib.clang_getFile(translation_unit, file_name))
@property
def name(self):
"""Return the complete file and path name of the file."""
return conf.lib.clang_getCString(conf.lib.clang_getFileName(self))
@property
def time(self):
"""Return the last modification time of the file."""
return conf.lib.clang_getFileTime(self)
def __str__(self):
return self.name
def __repr__(self):
return "<File: %s>" % (self.name)
@staticmethod
def from_cursor_result(res, fn, args):
assert isinstance(res, File)
# Copy a reference to the TranslationUnit to prevent premature GC.
res._tu = args[0]._tu
return res
class FileInclusion(object):
"""
The FileInclusion class represents the inclusion of one source file by
another via a '#include' directive or as the input file for the translation
unit. This class provides information about the included file, the including
file, the location of the '#include' directive and the depth of the included
file in the stack. Note that the input file has depth 0.
"""
def __init__(self, src, tgt, loc, depth):
self.source = src
self.include = tgt
self.location = loc
self.depth = depth
@property
def is_input_file(self):
"""True if the included file is the input file."""
return self.depth == 0
class CompilationDatabaseError(Exception):
"""Represents an error that occurred when working with a CompilationDatabase
Each error is associated to an enumerated value, accessible under
e.cdb_error. Consumers can compare the value with one of the ERROR_
constants in this class.
"""
# An unknown error occurred
ERROR_UNKNOWN = 0
# The database could not be loaded
ERROR_CANNOTLOADDATABASE = 1
def __init__(self, enumeration, message):
assert isinstance(enumeration, int)
if enumeration > 1:
raise Exception("Encountered undefined CompilationDatabase error "
"constant: %d. Please file a bug to have this "
"value supported." % enumeration)
self.cdb_error = enumeration
Exception.__init__(self, 'Error %d: %s' % (enumeration, message))
class CompileCommand(object):
"""Represents the compile command used to build a file"""
def __init__(self, cmd, ccmds):
self.cmd = cmd
# Keep a reference to the originating CompileCommands
# to prevent garbage collection
self.ccmds = ccmds
@property
def directory(self):
"""Get the working directory for this CompileCommand"""
return conf.lib.clang_CompileCommand_getDirectory(self.cmd)
@property
def filename(self):
"""Get the working filename for this CompileCommand"""
return conf.lib.clang_CompileCommand_getFilename(self.cmd)
@property
def arguments(self):
"""
Get an iterable object providing each argument in the
command line for the compiler invocation as a _CXString.
Invariant : the first argument is the compiler executable
"""
length = conf.lib.clang_CompileCommand_getNumArgs(self.cmd)
for i in xrange(length):
yield conf.lib.clang_CompileCommand_getArg(self.cmd, i)
class CompileCommands(object):
"""
CompileCommands is an iterable object containing all CompileCommand
that can be used for building a specific file.
"""
def __init__(self, ccmds):
self.ccmds = ccmds
def __del__(self):
conf.lib.clang_CompileCommands_dispose(self.ccmds)
def __len__(self):
return int(conf.lib.clang_CompileCommands_getSize(self.ccmds))
def __getitem__(self, i):
cc = conf.lib.clang_CompileCommands_getCommand(self.ccmds, i)
if not cc:
raise IndexError
return CompileCommand(cc, self)
@staticmethod
def from_result(res, fn, args):
if not res:
return None
return CompileCommands(res)
class CompilationDatabase(ClangObject):
"""
The CompilationDatabase is a wrapper class around
clang::tooling::CompilationDatabase
It enables querying how a specific source file can be built.
"""
def __del__(self):
conf.lib.clang_CompilationDatabase_dispose(self)
@staticmethod
def from_result(res, fn, args):
if not res:
raise CompilationDatabaseError(0,
"CompilationDatabase loading failed")
return CompilationDatabase(res)
@staticmethod
def fromDirectory(buildDir):
"""Builds a CompilationDatabase from the database found in buildDir"""
errorCode = c_uint()
try:
cdb = conf.lib.clang_CompilationDatabase_fromDirectory(buildDir,
byref(errorCode))
except CompilationDatabaseError as e:
raise CompilationDatabaseError(int(errorCode.value),
"CompilationDatabase loading failed")
return cdb
def getCompileCommands(self, filename):
"""
Get an iterable object providing all the CompileCommands available to
build filename. Returns None if filename is not found in the database.
"""
return conf.lib.clang_CompilationDatabase_getCompileCommands(self,
filename)
def getAllCompileCommands(self):
"""
Get an iterable object providing all the CompileCommands available from
the database.
"""
return conf.lib.clang_CompilationDatabase_getAllCompileCommands(self)
class Token(Structure):
"""Represents a single token from the preprocessor.
Tokens are effectively segments of source code. Source code is first parsed
into tokens before being converted into the AST and Cursors.
Tokens are obtained from parsed TranslationUnit instances. You currently
can't create tokens manually.
"""
_fields_ = [
('int_data', c_uint * 4),
('ptr_data', c_void_p)
]
@property
def spelling(self):
"""The spelling of this token.
This is the textual representation of the token in source.
"""
return conf.lib.clang_getTokenSpelling(self._tu, self)
@property
def kind(self):
"""Obtain the TokenKind of the current token."""
return TokenKind.from_value(conf.lib.clang_getTokenKind(self))
@property
def location(self):
"""The SourceLocation this Token occurs at."""
return conf.lib.clang_getTokenLocation(self._tu, self)
@property
def extent(self):
"""The SourceRange this Token occupies."""
return conf.lib.clang_getTokenExtent(self._tu, self)
@property
def cursor(self):
"""The Cursor this Token corresponds to."""
cursor = Cursor()
conf.lib.clang_annotateTokens(self._tu, byref(self), 1, byref(cursor))
return cursor
# Now comes the plumbing to hook up the C library.
# Register callback types in common container.
callbacks['translation_unit_includes'] = CFUNCTYPE(None, c_object_p,
POINTER(SourceLocation), c_uint, py_object)
callbacks['cursor_visit'] = CFUNCTYPE(c_int, Cursor, Cursor, py_object)
callbacks['fields_visit'] = CFUNCTYPE(c_int, Cursor, py_object)
# Functions strictly alphabetical order.
functionList = [
("clang_annotateTokens",
[TranslationUnit, POINTER(Token), c_uint, POINTER(Cursor)]),
("clang_CompilationDatabase_dispose",
[c_object_p]),
("clang_CompilationDatabase_fromDirectory",
[c_char_p, POINTER(c_uint)],
c_object_p,
CompilationDatabase.from_result),
("clang_CompilationDatabase_getAllCompileCommands",
[c_object_p],
c_object_p,
CompileCommands.from_result),
("clang_CompilationDatabase_getCompileCommands",
[c_object_p, c_char_p],
c_object_p,
CompileCommands.from_result),
("clang_CompileCommands_dispose",
[c_object_p]),
("clang_CompileCommands_getCommand",
[c_object_p, c_uint],
c_object_p),
("clang_CompileCommands_getSize",
[c_object_p],
c_uint),
("clang_CompileCommand_getArg",
[c_object_p, c_uint],
_CXString,
_CXString.from_result),
("clang_CompileCommand_getDirectory",
[c_object_p],
_CXString,
_CXString.from_result),
("clang_CompileCommand_getFilename",
[c_object_p],
_CXString,
_CXString.from_result),
("clang_CompileCommand_getNumArgs",
[c_object_p],
c_uint),
("clang_codeCompleteAt",
[TranslationUnit, c_char_p, c_int, c_int, c_void_p, c_int, c_int],
POINTER(CCRStructure)),
("clang_codeCompleteGetDiagnostic",
[CodeCompletionResults, c_int],
Diagnostic),
("clang_codeCompleteGetNumDiagnostics",
[CodeCompletionResults],
c_int),
("clang_createIndex",
[c_int, c_int],
c_object_p),
("clang_createTranslationUnit",
[Index, c_char_p],
c_object_p),
("clang_CXXConstructor_isConvertingConstructor",
[Cursor],
bool),
("clang_CXXConstructor_isCopyConstructor",
[Cursor],
bool),
("clang_CXXConstructor_isDefaultConstructor",
[Cursor],
bool),
("clang_CXXConstructor_isMoveConstructor",
[Cursor],
bool),
("clang_CXXField_isMutable",
[Cursor],
bool),
("clang_CXXMethod_isConst",
[Cursor],
bool),
("clang_CXXMethod_isDefaulted",
[Cursor],
bool),
("clang_CXXMethod_isPureVirtual",
[Cursor],
bool),
("clang_CXXMethod_isStatic",
[Cursor],
bool),
("clang_CXXMethod_isVirtual",
[Cursor],
bool),
("clang_defaultDiagnosticDisplayOptions",
[],
c_uint),
("clang_defaultSaveOptions",
[TranslationUnit],
c_uint),
("clang_disposeCodeCompleteResults",
[CodeCompletionResults]),
# ("clang_disposeCXTUResourceUsage",
# [CXTUResourceUsage]),
("clang_disposeDiagnostic",
[Diagnostic]),
("clang_disposeIndex",
[Index]),
("clang_disposeString",
[_CXString]),
("clang_disposeTokens",
[TranslationUnit, POINTER(Token), c_uint]),
("clang_disposeTranslationUnit",
[TranslationUnit]),
("clang_equalCursors",
[Cursor, Cursor],
bool),
("clang_equalLocations",
[SourceLocation, SourceLocation],
bool),
("clang_equalRanges",
[SourceRange, SourceRange],
bool),
("clang_equalTypes",
[Type, Type],
bool),
("clang_formatDiagnostic",
[Diagnostic, c_uint],
_CXString),
("clang_getArgType",
[Type, c_uint],
Type,
Type.from_result),
("clang_getArrayElementType",
[Type],
Type,
Type.from_result),
("clang_getArraySize",
[Type],
c_longlong),
("clang_getFieldDeclBitWidth",
[Cursor],
c_int),
("clang_getCanonicalCursor",
[Cursor],
Cursor,
Cursor.from_cursor_result),
("clang_getCanonicalType",
[Type],
Type,
Type.from_result),
("clang_getChildDiagnostics",
[Diagnostic],
c_object_p),
("clang_getCompletionAvailability",
[c_void_p],
c_int),
("clang_getCompletionBriefComment",
[c_void_p],
_CXString),
("clang_getCompletionChunkCompletionString",
[c_void_p, c_int],
c_object_p),
("clang_getCompletionChunkKind",
[c_void_p, c_int],
c_int),
("clang_getCompletionChunkText",
[c_void_p, c_int],
_CXString),
("clang_getCompletionPriority",
[c_void_p],
c_int),
("clang_getCString",
[_CXString],
c_char_p),
("clang_getCursor",
[TranslationUnit, SourceLocation],
Cursor),
("clang_getCursorDefinition",
[Cursor],
Cursor,
Cursor.from_result),
("clang_getCursorDisplayName",
[Cursor],
_CXString,
_CXString.from_result),
("clang_getCursorExtent",
[Cursor],
SourceRange),
("clang_getCursorLexicalParent",
[Cursor],
Cursor,
Cursor.from_cursor_result),
("clang_getCursorLocation",
[Cursor],
SourceLocation),
("clang_getCursorReferenced",
[Cursor],
Cursor,
Cursor.from_result),
("clang_getCursorReferenceNameRange",
[Cursor, c_uint, c_uint],
SourceRange),
("clang_getCursorSemanticParent",
[Cursor],
Cursor,
Cursor.from_cursor_result),
("clang_getCursorSpelling",
[Cursor],
_CXString,
_CXString.from_result),
("clang_getCursorType",
[Cursor],
Type,
Type.from_result),
("clang_getCursorUSR",
[Cursor],
_CXString,
_CXString.from_result),
("clang_Cursor_getMangling",
[Cursor],
_CXString,
_CXString.from_result),
# ("clang_getCXTUResourceUsage",
# [TranslationUnit],
# CXTUResourceUsage),
("clang_getCXXAccessSpecifier",
[Cursor],
c_uint),
("clang_getDeclObjCTypeEncoding",
[Cursor],
_CXString,
_CXString.from_result),
("clang_getDiagnostic",
[c_object_p, c_uint],
c_object_p),
("clang_getDiagnosticCategory",
[Diagnostic],
c_uint),
("clang_getDiagnosticCategoryText",
[Diagnostic],
_CXString,
_CXString.from_result),
("clang_getDiagnosticFixIt",
[Diagnostic, c_uint, POINTER(SourceRange)],
_CXString,
_CXString.from_result),
("clang_getDiagnosticInSet",
[c_object_p, c_uint],
c_object_p),
("clang_getDiagnosticLocation",
[Diagnostic],
SourceLocation),
("clang_getDiagnosticNumFixIts",
[Diagnostic],
c_uint),
("clang_getDiagnosticNumRanges",
[Diagnostic],
c_uint),
("clang_getDiagnosticOption",
[Diagnostic, POINTER(_CXString)],
_CXString,
_CXString.from_result),
("clang_getDiagnosticRange",
[Diagnostic, c_uint],
SourceRange),
("clang_getDiagnosticSeverity",
[Diagnostic],
c_int),
("clang_getDiagnosticSpelling",
[Diagnostic],
_CXString,
_CXString.from_result),
("clang_getElementType",
[Type],
Type,
Type.from_result),
("clang_getEnumConstantDeclUnsignedValue",
[Cursor],
c_ulonglong),
("clang_getEnumConstantDeclValue",
[Cursor],
c_longlong),
("clang_getEnumDeclIntegerType",
[Cursor],
Type,
Type.from_result),
("clang_getFile",
[TranslationUnit, c_char_p],
c_object_p),
("clang_getFileName",
[File],
_CXString), # TODO go through _CXString.from_result?
("clang_getFileTime",
[File],
c_uint),
("clang_getIBOutletCollectionType",
[Cursor],
Type,
Type.from_result),
("clang_getIncludedFile",
[Cursor],
File,
File.from_cursor_result),
("clang_getInclusions",
[TranslationUnit, callbacks['translation_unit_includes'], py_object]),
("clang_getInstantiationLocation",
[SourceLocation, POINTER(c_object_p), POINTER(c_uint), POINTER(c_uint),
POINTER(c_uint)]),
("clang_getLocation",
[TranslationUnit, File, c_uint, c_uint],
SourceLocation),
("clang_getLocationForOffset",
[TranslationUnit, File, c_uint],
SourceLocation),
("clang_getNullCursor",
None,
Cursor),
("clang_getNumArgTypes",
[Type],
c_uint),
("clang_getNumCompletionChunks",
[c_void_p],
c_int),
("clang_getNumDiagnostics",
[c_object_p],
c_uint),
("clang_getNumDiagnosticsInSet",
[c_object_p],
c_uint),
("clang_getNumElements",
[Type],
c_longlong),
("clang_getNumOverloadedDecls",
[Cursor],
c_uint),
("clang_getOverloadedDecl",
[Cursor, c_uint],
Cursor,
Cursor.from_cursor_result),
("clang_getPointeeType",
[Type],
Type,
Type.from_result),
("clang_getRange",
[SourceLocation, SourceLocation],
SourceRange),
("clang_getRangeEnd",
[SourceRange],
SourceLocation),
("clang_getRangeStart",
[SourceRange],
SourceLocation),
("clang_getResultType",
[Type],
Type,
Type.from_result),
("clang_getSpecializedCursorTemplate",
[Cursor],
Cursor,
Cursor.from_cursor_result),
("clang_getTemplateCursorKind",
[Cursor],
c_uint),
("clang_getTokenExtent",
[TranslationUnit, Token],
SourceRange),
("clang_getTokenKind",
[Token],
c_uint),
("clang_getTokenLocation",
[TranslationUnit, Token],
SourceLocation),
("clang_getTokenSpelling",
[TranslationUnit, Token],
_CXString,
_CXString.from_result),
("clang_getTranslationUnitCursor",
[TranslationUnit],
Cursor,
Cursor.from_result),
("clang_getTranslationUnitSpelling",
[TranslationUnit],
_CXString,
_CXString.from_result),
("clang_getTUResourceUsageName",
[c_uint],
c_char_p),
("clang_getTypeDeclaration",
[Type],
Cursor,
Cursor.from_result),
("clang_getTypedefDeclUnderlyingType",
[Cursor],
Type,
Type.from_result),
("clang_getTypeKindSpelling",
[c_uint],
_CXString,
_CXString.from_result),
("clang_getTypeSpelling",
[Type],
_CXString,
_CXString.from_result),
("clang_hashCursor",
[Cursor],
c_uint),
("clang_isAttribute",
[CursorKind],
bool),
("clang_isConstQualifiedType",
[Type],
bool),
("clang_isCursorDefinition",
[Cursor],
bool),
("clang_isDeclaration",
[CursorKind],
bool),
("clang_isExpression",
[CursorKind],
bool),
("clang_isFileMultipleIncludeGuarded",
[TranslationUnit, File],
bool),
("clang_isFunctionTypeVariadic",
[Type],
bool),
("clang_isInvalid",
[CursorKind],
bool),
("clang_isPODType",
[Type],
bool),
("clang_isPreprocessing",
[CursorKind],
bool),
("clang_isReference",
[CursorKind],
bool),
("clang_isRestrictQualifiedType",
[Type],
bool),
("clang_isStatement",
[CursorKind],
bool),
("clang_isTranslationUnit",
[CursorKind],
bool),
("clang_isUnexposed",
[CursorKind],
bool),
("clang_isVirtualBase",
[Cursor],
bool),
("clang_isVolatileQualifiedType",
[Type],
bool),
("clang_parseTranslationUnit",
[Index, c_char_p, c_void_p, c_int, c_void_p, c_int, c_int],
c_object_p),
("clang_reparseTranslationUnit",
[TranslationUnit, c_int, c_void_p, c_int],
c_int),
("clang_saveTranslationUnit",
[TranslationUnit, c_char_p, c_uint],
c_int),
("clang_tokenize",
[TranslationUnit, SourceRange, POINTER(POINTER(Token)), POINTER(c_uint)]),
("clang_visitChildren",
[Cursor, callbacks['cursor_visit'], py_object],
c_uint),
("clang_Cursor_getNumArguments",
[Cursor],
c_int),
("clang_Cursor_getArgument",
[Cursor, c_uint],
Cursor,
Cursor.from_result),
("clang_Cursor_getNumTemplateArguments",
[Cursor],
c_int),
("clang_Cursor_getTemplateArgumentKind",
[Cursor, c_uint],
TemplateArgumentKind.from_id),
("clang_Cursor_getTemplateArgumentType",
[Cursor, c_uint],
Type,
Type.from_result),
("clang_Cursor_getTemplateArgumentValue",
[Cursor, c_uint],
c_longlong),
("clang_Cursor_getTemplateArgumentUnsignedValue",
[Cursor, c_uint],
c_ulonglong),
("clang_Cursor_isAnonymous",
[Cursor],
bool),
("clang_Cursor_isBitField",
[Cursor],
bool),
("clang_Cursor_getBriefCommentText",
[Cursor],
_CXString,
_CXString.from_result),
("clang_Cursor_getRawCommentText",
[Cursor],
_CXString,
_CXString.from_result),
("clang_Cursor_getOffsetOfField",
[Cursor],
c_longlong),
("clang_Type_getAlignOf",
[Type],
c_longlong),
("clang_Type_getClassType",
[Type],
Type,
Type.from_result),
("clang_Type_getOffsetOf",
[Type, c_char_p],
c_longlong),
("clang_Type_getSizeOf",
[Type],
c_longlong),
("clang_Type_getCXXRefQualifier",
[Type],
c_uint),
("clang_Type_getNamedType",
[Type],
Type,
Type.from_result),
("clang_Type_visitFields",
[Type, callbacks['fields_visit'], py_object],
c_uint),
]
class LibclangError(Exception):
def __init__(self, message):
self.m = message
def __str__(self):
return self.m
def register_function(lib, item, ignore_errors):
# A function may not exist, if these bindings are used with an older or
# incompatible version of libclang.so.
try:
func = getattr(lib, item[0])
except AttributeError as e:
msg = str(e) + ". Please ensure that your python bindings are "\
"compatible with your libclang.so version."
if ignore_errors:
return
raise LibclangError(msg)
if len(item) >= 2:
func.argtypes = item[1]
if len(item) >= 3:
func.restype = item[2]
if len(item) == 4:
func.errcheck = item[3]
def register_functions(lib, ignore_errors):
"""Register function prototypes with a libclang library instance.
This must be called as part of library instantiation so Python knows how
to call out to the shared library.
"""
def register(item):
return register_function(lib, item, ignore_errors)
map(register, functionList)
class Config:
library_path = None
library_file = None
compatibility_check = True
loaded = False
@staticmethod
def set_library_path(path):
"""Set the path in which to search for libclang"""
if Config.loaded:
raise Exception("library path must be set before before using " \
"any other functionalities in libclang.")
Config.library_path = path
@staticmethod
def set_library_file(filename):
"""Set the exact location of libclang"""
if Config.loaded:
raise Exception("library file must be set before before using " \
"any other functionalities in libclang.")
Config.library_file = filename
@staticmethod
def set_compatibility_check(check_status):
""" Perform compatibility check when loading libclang
The python bindings are only tested and evaluated with the version of
libclang they are provided with. To ensure correct behavior a (limited)
compatibility check is performed when loading the bindings. This check
will throw an exception, as soon as it fails.
In case these bindings are used with an older version of libclang, parts
that have been stable between releases may still work. Users of the
python bindings can disable the compatibility check. This will cause
the python bindings to load, even though they are written for a newer
version of libclang. Failures now arise if unsupported or incompatible
features are accessed. The user is required to test themselves if the
features they are using are available and compatible between different
libclang versions.
"""
if Config.loaded:
raise Exception("compatibility_check must be set before before " \
"using any other functionalities in libclang.")
Config.compatibility_check = check_status
@CachedProperty
def lib(self):
lib = self.get_cindex_library()
register_functions(lib, not Config.compatibility_check)
Config.loaded = True
return lib
def get_filename(self):
if Config.library_file:
return Config.library_file
import platform
name = platform.system()
if name == 'Darwin':
file = 'libclang.dylib'
elif name == 'Windows':
file = 'libclang.dll'
else:
file = 'libclang.so'
if Config.library_path:
file = Config.library_path + '/' + file
return file
def get_cindex_library(self):
try:
library = cdll.LoadLibrary(self.get_filename())
except OSError as e:
msg = str(e) + ". To provide a path to libclang use " \
"Config.set_library_path() or " \
"Config.set_library_file()."
raise LibclangError(msg)
return library
def function_exists(self, name):
try:
getattr(self.lib, name)
except AttributeError:
return False
return True
def register_enumerations():
for name, value in clang.enumerations.TokenKinds:
TokenKind.register(value, name)
conf = Config()
register_enumerations()
__all__ = [
'Config',
'CodeCompletionResults',
'CompilationDatabase',
'CompileCommands',
'CompileCommand',
'CursorKind',
'Cursor',
'Diagnostic',
'File',
'FixIt',
'Index',
'SourceLocation',
'SourceRange',
'TokenKind',
'Token',
'TranslationUnitLoadError',
'TranslationUnit',
'TypeKind',
'Type',
]
| 116,776 | 28.86624 | 86 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/bindings/python/clang/__init__.py | #===- __init__.py - Clang Python Bindings --------------------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
r"""
Clang Library Bindings
======================
This package provides access to the Clang compiler and libraries.
The available modules are:
cindex
Bindings for the Clang indexing library.
"""
# Python 3 uses unicode for strings. The bindings, in particular the interaction
# with ctypes, need modifying to handle conversions between unicode and
# c-strings.
import sys
if sys.version_info[0] != 2:
raise Exception("Only Python 2 is supported.")
__all__ = ['cindex']
| 826 | 24.060606 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/bindings/python/examples/cindex/cindex-includes.py | #!/usr/bin/env python
#===- cindex-includes.py - cindex/Python Inclusion Graph -----*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
"""
A simple command line tool for dumping a Graphviz description (dot) that
describes include dependencies.
"""
def main():
import sys
from clang.cindex import Index
from optparse import OptionParser, OptionGroup
parser = OptionParser("usage: %prog [options] {filename} [clang-args*]")
parser.disable_interspersed_args()
(opts, args) = parser.parse_args()
if len(args) == 0:
parser.error('invalid number arguments')
# FIXME: Add an output file option
out = sys.stdout
index = Index.create()
tu = index.parse(None, args)
if not tu:
parser.error("unable to load input")
# A helper function for generating the node name.
def name(f):
if f:
return "\"" + f.name + "\""
# Generate the include graph
out.write("digraph G {\n")
for i in tu.get_includes():
line = " ";
if i.is_input_file:
# Always write the input file as a node just in case it doesn't
# actually include anything. This would generate a 1 node graph.
line += name(i.include)
else:
line += '%s->%s' % (name(i.source), name(i.include))
line += "\n";
out.write(line)
out.write("}\n")
if __name__ == '__main__':
main()
| 1,644 | 26.881356 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/bindings/python/examples/cindex/cindex-dump.py | #!/usr/bin/env python
#===- cindex-dump.py - cindex/Python Source Dump -------------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
"""
A simple command line tool for dumping a source file using the Clang Index
Library.
"""
def get_diag_info(diag):
return { 'severity' : diag.severity,
'location' : diag.location,
'spelling' : diag.spelling,
'ranges' : diag.ranges,
'fixits' : diag.fixits }
def get_cursor_id(cursor, cursor_list = []):
if not opts.showIDs:
return None
if cursor is None:
return None
# FIXME: This is really slow. It would be nice if the index API exposed
# something that let us hash cursors.
for i,c in enumerate(cursor_list):
if cursor == c:
return i
cursor_list.append(cursor)
return len(cursor_list) - 1
def get_info(node, depth=0):
if opts.maxDepth is not None and depth >= opts.maxDepth:
children = None
else:
children = [get_info(c, depth+1)
for c in node.get_children()]
return { 'id' : get_cursor_id(node),
'kind' : node.kind,
'usr' : node.get_usr(),
'spelling' : node.spelling,
'location' : node.location,
'extent.start' : node.extent.start,
'extent.end' : node.extent.end,
'is_definition' : node.is_definition(),
'definition id' : get_cursor_id(node.get_definition()),
'children' : children }
def main():
from clang.cindex import Index
from pprint import pprint
from optparse import OptionParser, OptionGroup
global opts
parser = OptionParser("usage: %prog [options] {filename} [clang-args*]")
parser.add_option("", "--show-ids", dest="showIDs",
help="Compute cursor IDs (very slow)",
action="store_true", default=False)
parser.add_option("", "--max-depth", dest="maxDepth",
help="Limit cursor expansion to depth N",
metavar="N", type=int, default=None)
parser.disable_interspersed_args()
(opts, args) = parser.parse_args()
if len(args) == 0:
parser.error('invalid number arguments')
index = Index.create()
tu = index.parse(None, args)
if not tu:
parser.error("unable to load input")
pprint(('diags', map(get_diag_info, tu.diagnostics)))
pprint(('nodes', get_info(tu.cursor)))
if __name__ == '__main__':
main()
| 2,733 | 30.068182 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/bindings/python/tests/__init__.py | 0 | 0 | 0 | py |
|
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/bindings/python/tests/cindex/test_code_completion.py | from clang.cindex import TranslationUnit
def check_completion_results(cr, expected):
assert cr is not None
assert len(cr.diagnostics) == 0
completions = [str(c) for c in cr.results]
for c in expected:
assert c in completions
def test_code_complete():
files = [('fake.c', """
/// Aaa.
int test1;
/// Bbb.
void test2(void);
void f() {
}
""")]
tu = TranslationUnit.from_source('fake.c', ['-std=c99'], unsaved_files=files,
options=TranslationUnit.PARSE_INCLUDE_BRIEF_COMMENTS_IN_CODE_COMPLETION)
cr = tu.codeComplete('fake.c', 9, 1, unsaved_files=files, include_brief_comments=True)
expected = [
"{'int', ResultType} | {'test1', TypedText} || Priority: 50 || Availability: Available || Brief comment: Aaa.",
"{'void', ResultType} | {'test2', TypedText} | {'(', LeftParen} | {')', RightParen} || Priority: 50 || Availability: Available || Brief comment: Bbb.",
"{'return', TypedText} || Priority: 40 || Availability: Available || Brief comment: None"
]
check_completion_results(cr, expected)
def test_code_complete_availability():
files = [('fake.cpp', """
class P {
protected:
int member;
};
class Q : public P {
public:
using P::member;
};
void f(P x, Q y) {
x.; // member is inaccessible
y.; // member is accessible
}
""")]
tu = TranslationUnit.from_source('fake.cpp', ['-std=c++98'], unsaved_files=files)
cr = tu.codeComplete('fake.cpp', 12, 5, unsaved_files=files)
expected = [
"{'const', TypedText} || Priority: 40 || Availability: Available || Brief comment: None",
"{'volatile', TypedText} || Priority: 40 || Availability: Available || Brief comment: None",
"{'operator', TypedText} || Priority: 40 || Availability: Available || Brief comment: None",
"{'P', TypedText} | {'::', Text} || Priority: 75 || Availability: Available || Brief comment: None",
"{'Q', TypedText} | {'::', Text} || Priority: 75 || Availability: Available || Brief comment: None"
]
check_completion_results(cr, expected)
cr = tu.codeComplete('fake.cpp', 13, 5, unsaved_files=files)
expected = [
"{'P', TypedText} | {'::', Text} || Priority: 75 || Availability: Available || Brief comment: None",
"{'P &', ResultType} | {'operator=', TypedText} | {'(', LeftParen} | {'const P &', Placeholder} | {')', RightParen} || Priority: 34 || Availability: Available || Brief comment: None",
"{'int', ResultType} | {'member', TypedText} || Priority: 35 || Availability: NotAccessible || Brief comment: None",
"{'void', ResultType} | {'~P', TypedText} | {'(', LeftParen} | {')', RightParen} || Priority: 34 || Availability: Available || Brief comment: None"
]
check_completion_results(cr, expected)
| 2,766 | 35.407895 | 191 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/bindings/python/tests/cindex/test_translation_unit.py | import gc
import os
import tempfile
from clang.cindex import CursorKind
from clang.cindex import Cursor
from clang.cindex import File
from clang.cindex import Index
from clang.cindex import SourceLocation
from clang.cindex import SourceRange
from clang.cindex import TranslationUnitSaveError
from clang.cindex import TranslationUnitLoadError
from clang.cindex import TranslationUnit
from .util import get_cursor
from .util import get_tu
kInputsDir = os.path.join(os.path.dirname(__file__), 'INPUTS')
def test_spelling():
path = os.path.join(kInputsDir, 'hello.cpp')
tu = TranslationUnit.from_source(path)
assert tu.spelling == path
def test_cursor():
path = os.path.join(kInputsDir, 'hello.cpp')
tu = get_tu(path)
c = tu.cursor
assert isinstance(c, Cursor)
assert c.kind is CursorKind.TRANSLATION_UNIT
def test_parse_arguments():
path = os.path.join(kInputsDir, 'parse_arguments.c')
tu = TranslationUnit.from_source(path, ['-DDECL_ONE=hello', '-DDECL_TWO=hi'])
spellings = [c.spelling for c in tu.cursor.get_children()]
assert spellings[-2] == 'hello'
assert spellings[-1] == 'hi'
def test_reparse_arguments():
path = os.path.join(kInputsDir, 'parse_arguments.c')
tu = TranslationUnit.from_source(path, ['-DDECL_ONE=hello', '-DDECL_TWO=hi'])
tu.reparse()
spellings = [c.spelling for c in tu.cursor.get_children()]
assert spellings[-2] == 'hello'
assert spellings[-1] == 'hi'
def test_unsaved_files():
tu = TranslationUnit.from_source('fake.c', ['-I./'], unsaved_files = [
('fake.c', """
#include "fake.h"
int x;
int SOME_DEFINE;
"""),
('./fake.h', """
#define SOME_DEFINE y
""")
])
spellings = [c.spelling for c in tu.cursor.get_children()]
assert spellings[-2] == 'x'
assert spellings[-1] == 'y'
def test_unsaved_files_2():
import StringIO
tu = TranslationUnit.from_source('fake.c', unsaved_files = [
('fake.c', StringIO.StringIO('int x;'))])
spellings = [c.spelling for c in tu.cursor.get_children()]
assert spellings[-1] == 'x'
def normpaths_equal(path1, path2):
""" Compares two paths for equality after normalizing them with
os.path.normpath
"""
return os.path.normpath(path1) == os.path.normpath(path2)
def test_includes():
def eq(expected, actual):
if not actual.is_input_file:
return normpaths_equal(expected[0], actual.source.name) and \
normpaths_equal(expected[1], actual.include.name)
else:
return normpaths_equal(expected[1], actual.include.name)
src = os.path.join(kInputsDir, 'include.cpp')
h1 = os.path.join(kInputsDir, "header1.h")
h2 = os.path.join(kInputsDir, "header2.h")
h3 = os.path.join(kInputsDir, "header3.h")
inc = [(src, h1), (h1, h3), (src, h2), (h2, h3)]
tu = TranslationUnit.from_source(src)
for i in zip(inc, tu.get_includes()):
assert eq(i[0], i[1])
def save_tu(tu):
"""Convenience API to save a TranslationUnit to a file.
Returns the filename it was saved to.
"""
_, path = tempfile.mkstemp()
tu.save(path)
return path
def test_save():
"""Ensure TranslationUnit.save() works."""
tu = get_tu('int foo();')
path = save_tu(tu)
assert os.path.exists(path)
assert os.path.getsize(path) > 0
os.unlink(path)
def test_save_translation_errors():
"""Ensure that saving to an invalid directory raises."""
tu = get_tu('int foo();')
path = '/does/not/exist/llvm-test.ast'
assert not os.path.exists(os.path.dirname(path))
try:
tu.save(path)
assert False
except TranslationUnitSaveError as ex:
expected = TranslationUnitSaveError.ERROR_UNKNOWN
assert ex.save_error == expected
def test_load():
"""Ensure TranslationUnits can be constructed from saved files."""
tu = get_tu('int foo();')
assert len(tu.diagnostics) == 0
path = save_tu(tu)
assert os.path.exists(path)
assert os.path.getsize(path) > 0
tu2 = TranslationUnit.from_ast_file(filename=path)
assert len(tu2.diagnostics) == 0
foo = get_cursor(tu2, 'foo')
assert foo is not None
# Just in case there is an open file descriptor somewhere.
del tu2
os.unlink(path)
def test_index_parse():
path = os.path.join(kInputsDir, 'hello.cpp')
index = Index.create()
tu = index.parse(path)
assert isinstance(tu, TranslationUnit)
def test_get_file():
"""Ensure tu.get_file() works appropriately."""
tu = get_tu('int foo();')
f = tu.get_file('t.c')
assert isinstance(f, File)
assert f.name == 't.c'
try:
f = tu.get_file('foobar.cpp')
except:
pass
else:
assert False
def test_get_source_location():
"""Ensure tu.get_source_location() works."""
tu = get_tu('int foo();')
location = tu.get_location('t.c', 2)
assert isinstance(location, SourceLocation)
assert location.offset == 2
assert location.file.name == 't.c'
location = tu.get_location('t.c', (1, 3))
assert isinstance(location, SourceLocation)
assert location.line == 1
assert location.column == 3
assert location.file.name == 't.c'
def test_get_source_range():
"""Ensure tu.get_source_range() works."""
tu = get_tu('int foo();')
r = tu.get_extent('t.c', (1,4))
assert isinstance(r, SourceRange)
assert r.start.offset == 1
assert r.end.offset == 4
assert r.start.file.name == 't.c'
assert r.end.file.name == 't.c'
r = tu.get_extent('t.c', ((1,2), (1,3)))
assert isinstance(r, SourceRange)
assert r.start.line == 1
assert r.start.column == 2
assert r.end.line == 1
assert r.end.column == 3
assert r.start.file.name == 't.c'
assert r.end.file.name == 't.c'
start = tu.get_location('t.c', 0)
end = tu.get_location('t.c', 5)
r = tu.get_extent('t.c', (start, end))
assert isinstance(r, SourceRange)
assert r.start.offset == 0
assert r.end.offset == 5
assert r.start.file.name == 't.c'
assert r.end.file.name == 't.c'
def test_get_tokens_gc():
"""Ensures get_tokens() works properly with garbage collection."""
tu = get_tu('int foo();')
r = tu.get_extent('t.c', (0, 10))
tokens = list(tu.get_tokens(extent=r))
assert tokens[0].spelling == 'int'
gc.collect()
assert tokens[0].spelling == 'int'
del tokens[1]
gc.collect()
assert tokens[0].spelling == 'int'
# May trigger segfault if we don't do our job properly.
del tokens
gc.collect()
gc.collect() # Just in case.
def test_fail_from_source():
path = os.path.join(kInputsDir, 'non-existent.cpp')
try:
tu = TranslationUnit.from_source(path)
except TranslationUnitLoadError:
tu = None
assert tu == None
def test_fail_from_ast_file():
path = os.path.join(kInputsDir, 'non-existent.ast')
try:
tu = TranslationUnit.from_ast_file(path)
except TranslationUnitLoadError:
tu = None
assert tu == None
| 7,064 | 27.035714 | 81 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/bindings/python/tests/cindex/test_index.py | from clang.cindex import *
import os
kInputsDir = os.path.join(os.path.dirname(__file__), 'INPUTS')
def test_create():
index = Index.create()
# FIXME: test Index.read
def test_parse():
index = Index.create()
assert isinstance(index, Index)
tu = index.parse(os.path.join(kInputsDir, 'hello.cpp'))
assert isinstance(tu, TranslationUnit)
| 359 | 21.5 | 62 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/bindings/python/tests/cindex/test_token_kind.py | from clang.cindex import TokenKind
from nose.tools import eq_
from nose.tools import ok_
from nose.tools import raises
def test_constructor():
"""Ensure TokenKind constructor works as expected."""
t = TokenKind(5, 'foo')
eq_(t.value, 5)
eq_(t.name, 'foo')
@raises(ValueError)
def test_bad_register():
"""Ensure a duplicate value is rejected for registration."""
TokenKind.register(2, 'foo')
@raises(ValueError)
def test_unknown_value():
"""Ensure trying to fetch an unknown value raises."""
TokenKind.from_value(-1)
def test_registration():
"""Ensure that items registered appear as class attributes."""
ok_(hasattr(TokenKind, 'LITERAL'))
literal = TokenKind.LITERAL
ok_(isinstance(literal, TokenKind))
def test_from_value():
"""Ensure registered values can be obtained from from_value()."""
t = TokenKind.from_value(3)
ok_(isinstance(t, TokenKind))
eq_(t, TokenKind.LITERAL)
def test_repr():
"""Ensure repr() works."""
r = repr(TokenKind.LITERAL)
eq_(r, 'TokenKind.LITERAL')
| 1,064 | 23.204545 | 69 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/bindings/python/tests/cindex/test_diagnostics.py | from clang.cindex import *
from .util import get_tu
# FIXME: We need support for invalid translation units to test better.
def test_diagnostic_warning():
tu = get_tu('int f0() {}\n')
assert len(tu.diagnostics) == 1
assert tu.diagnostics[0].severity == Diagnostic.Warning
assert tu.diagnostics[0].location.line == 1
assert tu.diagnostics[0].location.column == 11
assert (tu.diagnostics[0].spelling ==
'control reaches end of non-void function')
def test_diagnostic_note():
# FIXME: We aren't getting notes here for some reason.
tu = get_tu('#define A x\nvoid *A = 1;\n')
assert len(tu.diagnostics) == 1
assert tu.diagnostics[0].severity == Diagnostic.Warning
assert tu.diagnostics[0].location.line == 2
assert tu.diagnostics[0].location.column == 7
assert 'incompatible' in tu.diagnostics[0].spelling
# assert tu.diagnostics[1].severity == Diagnostic.Note
# assert tu.diagnostics[1].location.line == 1
# assert tu.diagnostics[1].location.column == 11
# assert tu.diagnostics[1].spelling == 'instantiated from'
def test_diagnostic_fixit():
tu = get_tu('struct { int f0; } x = { f0 : 1 };')
assert len(tu.diagnostics) == 1
assert tu.diagnostics[0].severity == Diagnostic.Warning
assert tu.diagnostics[0].location.line == 1
assert tu.diagnostics[0].location.column == 26
assert tu.diagnostics[0].spelling.startswith('use of GNU old-style')
assert len(tu.diagnostics[0].fixits) == 1
assert tu.diagnostics[0].fixits[0].range.start.line == 1
assert tu.diagnostics[0].fixits[0].range.start.column == 26
assert tu.diagnostics[0].fixits[0].range.end.line == 1
assert tu.diagnostics[0].fixits[0].range.end.column == 30
assert tu.diagnostics[0].fixits[0].value == '.f0 = '
def test_diagnostic_range():
tu = get_tu('void f() { int i = "a" + 1; }')
assert len(tu.diagnostics) == 1
assert tu.diagnostics[0].severity == Diagnostic.Warning
assert tu.diagnostics[0].location.line == 1
assert tu.diagnostics[0].location.column == 16
assert tu.diagnostics[0].spelling.startswith('incompatible pointer to')
assert len(tu.diagnostics[0].fixits) == 0
assert len(tu.diagnostics[0].ranges) == 1
assert tu.diagnostics[0].ranges[0].start.line == 1
assert tu.diagnostics[0].ranges[0].start.column == 20
assert tu.diagnostics[0].ranges[0].end.line == 1
assert tu.diagnostics[0].ranges[0].end.column == 27
try:
tu.diagnostics[0].ranges[1].start.line
except IndexError:
assert True
else:
assert False
def test_diagnostic_category():
"""Ensure that category properties work."""
tu = get_tu('int f(int i) { return 7; }', all_warnings=True)
assert len(tu.diagnostics) == 1
d = tu.diagnostics[0]
assert d.severity == Diagnostic.Warning
assert d.location.line == 1
assert d.location.column == 11
assert d.category_number == 2
assert d.category_name == 'Semantic Issue'
def test_diagnostic_option():
"""Ensure that category option properties work."""
tu = get_tu('int f(int i) { return 7; }', all_warnings=True)
assert len(tu.diagnostics) == 1
d = tu.diagnostics[0]
assert d.option == '-Wunused-parameter'
assert d.disable_option == '-Wno-unused-parameter'
def test_diagnostic_children():
tu = get_tu('void f(int x) {} void g() { f(); }')
assert len(tu.diagnostics) == 1
d = tu.diagnostics[0]
children = d.children
assert len(children) == 1
assert children[0].severity == Diagnostic.Note
assert children[0].spelling.endswith('declared here')
assert children[0].location.line == 1
assert children[0].location.column == 1
| 3,693 | 37.884211 | 75 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/bindings/python/tests/cindex/test_cursor.py | import ctypes
import gc
from clang.cindex import CursorKind
from clang.cindex import TemplateArgumentKind
from clang.cindex import TranslationUnit
from clang.cindex import TypeKind
from .util import get_cursor
from .util import get_cursors
from .util import get_tu
kInput = """\
struct s0 {
int a;
int b;
};
struct s1;
void f0(int a0, int a1) {
int l0, l1;
if (a0)
return;
for (;;) {
break;
}
}
"""
def test_get_children():
tu = get_tu(kInput)
it = tu.cursor.get_children()
tu_nodes = list(it)
assert len(tu_nodes) == 3
for cursor in tu_nodes:
assert cursor.translation_unit is not None
assert tu_nodes[0] != tu_nodes[1]
assert tu_nodes[0].kind == CursorKind.STRUCT_DECL
assert tu_nodes[0].spelling == 's0'
assert tu_nodes[0].is_definition() == True
assert tu_nodes[0].location.file.name == 't.c'
assert tu_nodes[0].location.line == 1
assert tu_nodes[0].location.column == 8
assert tu_nodes[0].hash > 0
assert tu_nodes[0].translation_unit is not None
s0_nodes = list(tu_nodes[0].get_children())
assert len(s0_nodes) == 2
assert s0_nodes[0].kind == CursorKind.FIELD_DECL
assert s0_nodes[0].spelling == 'a'
assert s0_nodes[0].type.kind == TypeKind.INT
assert s0_nodes[1].kind == CursorKind.FIELD_DECL
assert s0_nodes[1].spelling == 'b'
assert s0_nodes[1].type.kind == TypeKind.INT
assert tu_nodes[1].kind == CursorKind.STRUCT_DECL
assert tu_nodes[1].spelling == 's1'
assert tu_nodes[1].displayname == 's1'
assert tu_nodes[1].is_definition() == False
assert tu_nodes[2].kind == CursorKind.FUNCTION_DECL
assert tu_nodes[2].spelling == 'f0'
assert tu_nodes[2].displayname == 'f0(int, int)'
assert tu_nodes[2].is_definition() == True
def test_references():
"""Ensure that references to TranslationUnit are kept."""
tu = get_tu('int x;')
cursors = list(tu.cursor.get_children())
assert len(cursors) > 0
cursor = cursors[0]
assert isinstance(cursor.translation_unit, TranslationUnit)
# Delete reference to TU and perform a full GC.
del tu
gc.collect()
assert isinstance(cursor.translation_unit, TranslationUnit)
# If the TU was destroyed, this should cause a segfault.
parent = cursor.semantic_parent
def test_canonical():
source = 'struct X; struct X; struct X { int member; };'
tu = get_tu(source)
cursors = []
for cursor in tu.cursor.get_children():
if cursor.spelling == 'X':
cursors.append(cursor)
assert len(cursors) == 3
assert cursors[1].canonical == cursors[2].canonical
def test_is_const_method():
"""Ensure Cursor.is_const_method works."""
source = 'class X { void foo() const; void bar(); };'
tu = get_tu(source, lang='cpp')
cls = get_cursor(tu, 'X')
foo = get_cursor(tu, 'foo')
bar = get_cursor(tu, 'bar')
assert cls is not None
assert foo is not None
assert bar is not None
assert foo.is_const_method()
assert not bar.is_const_method()
def test_is_converting_constructor():
"""Ensure Cursor.is_converting_constructor works."""
source = 'class X { explicit X(int); X(double); X(); };'
tu = get_tu(source, lang='cpp')
xs = get_cursors(tu, 'X')
assert len(xs) == 4
assert xs[0].kind == CursorKind.CLASS_DECL
cs = xs[1:]
assert cs[0].kind == CursorKind.CONSTRUCTOR
assert cs[1].kind == CursorKind.CONSTRUCTOR
assert cs[2].kind == CursorKind.CONSTRUCTOR
assert not cs[0].is_converting_constructor()
assert cs[1].is_converting_constructor()
assert not cs[2].is_converting_constructor()
def test_is_copy_constructor():
"""Ensure Cursor.is_copy_constructor works."""
source = 'class X { X(); X(const X&); X(X&&); };'
tu = get_tu(source, lang='cpp')
xs = get_cursors(tu, 'X')
assert xs[0].kind == CursorKind.CLASS_DECL
cs = xs[1:]
assert cs[0].kind == CursorKind.CONSTRUCTOR
assert cs[1].kind == CursorKind.CONSTRUCTOR
assert cs[2].kind == CursorKind.CONSTRUCTOR
assert not cs[0].is_copy_constructor()
assert cs[1].is_copy_constructor()
assert not cs[2].is_copy_constructor()
def test_is_default_constructor():
"""Ensure Cursor.is_default_constructor works."""
source = 'class X { X(); X(int); };'
tu = get_tu(source, lang='cpp')
xs = get_cursors(tu, 'X')
assert xs[0].kind == CursorKind.CLASS_DECL
cs = xs[1:]
assert cs[0].kind == CursorKind.CONSTRUCTOR
assert cs[1].kind == CursorKind.CONSTRUCTOR
assert cs[0].is_default_constructor()
assert not cs[1].is_default_constructor()
def test_is_move_constructor():
"""Ensure Cursor.is_move_constructor works."""
source = 'class X { X(); X(const X&); X(X&&); };'
tu = get_tu(source, lang='cpp')
xs = get_cursors(tu, 'X')
assert xs[0].kind == CursorKind.CLASS_DECL
cs = xs[1:]
assert cs[0].kind == CursorKind.CONSTRUCTOR
assert cs[1].kind == CursorKind.CONSTRUCTOR
assert cs[2].kind == CursorKind.CONSTRUCTOR
assert not cs[0].is_move_constructor()
assert not cs[1].is_move_constructor()
assert cs[2].is_move_constructor()
def test_is_default_method():
"""Ensure Cursor.is_default_method works."""
source = 'class X { X() = default; }; class Y { Y(); };'
tu = get_tu(source, lang='cpp')
xs = get_cursors(tu, 'X')
ys = get_cursors(tu, 'Y')
assert len(xs) == 2
assert len(ys) == 2
xc = xs[1]
yc = ys[1]
assert xc.is_default_method()
assert not yc.is_default_method()
def test_is_mutable_field():
"""Ensure Cursor.is_mutable_field works."""
source = 'class X { int x_; mutable int y_; };'
tu = get_tu(source, lang='cpp')
cls = get_cursor(tu, 'X')
x_ = get_cursor(tu, 'x_')
y_ = get_cursor(tu, 'y_')
assert cls is not None
assert x_ is not None
assert y_ is not None
assert not x_.is_mutable_field()
assert y_.is_mutable_field()
def test_is_static_method():
"""Ensure Cursor.is_static_method works."""
source = 'class X { static void foo(); void bar(); };'
tu = get_tu(source, lang='cpp')
cls = get_cursor(tu, 'X')
foo = get_cursor(tu, 'foo')
bar = get_cursor(tu, 'bar')
assert cls is not None
assert foo is not None
assert bar is not None
assert foo.is_static_method()
assert not bar.is_static_method()
def test_is_pure_virtual_method():
"""Ensure Cursor.is_pure_virtual_method works."""
source = 'class X { virtual void foo() = 0; virtual void bar(); };'
tu = get_tu(source, lang='cpp')
cls = get_cursor(tu, 'X')
foo = get_cursor(tu, 'foo')
bar = get_cursor(tu, 'bar')
assert cls is not None
assert foo is not None
assert bar is not None
assert foo.is_pure_virtual_method()
assert not bar.is_pure_virtual_method()
def test_is_virtual_method():
"""Ensure Cursor.is_virtual_method works."""
source = 'class X { virtual void foo(); void bar(); };'
tu = get_tu(source, lang='cpp')
cls = get_cursor(tu, 'X')
foo = get_cursor(tu, 'foo')
bar = get_cursor(tu, 'bar')
assert cls is not None
assert foo is not None
assert bar is not None
assert foo.is_virtual_method()
assert not bar.is_virtual_method()
def test_underlying_type():
tu = get_tu('typedef int foo;')
typedef = get_cursor(tu, 'foo')
assert typedef is not None
assert typedef.kind.is_declaration()
underlying = typedef.underlying_typedef_type
assert underlying.kind == TypeKind.INT
kParentTest = """\
class C {
void f();
}
void C::f() { }
"""
def test_semantic_parent():
tu = get_tu(kParentTest, 'cpp')
curs = get_cursors(tu, 'f')
decl = get_cursor(tu, 'C')
assert(len(curs) == 2)
assert(curs[0].semantic_parent == curs[1].semantic_parent)
assert(curs[0].semantic_parent == decl)
def test_lexical_parent():
tu = get_tu(kParentTest, 'cpp')
curs = get_cursors(tu, 'f')
decl = get_cursor(tu, 'C')
assert(len(curs) == 2)
assert(curs[0].lexical_parent != curs[1].lexical_parent)
assert(curs[0].lexical_parent == decl)
assert(curs[1].lexical_parent == tu.cursor)
def test_enum_type():
tu = get_tu('enum TEST { FOO=1, BAR=2 };')
enum = get_cursor(tu, 'TEST')
assert enum is not None
assert enum.kind == CursorKind.ENUM_DECL
enum_type = enum.enum_type
assert enum_type.kind == TypeKind.UINT
def test_enum_type_cpp():
tu = get_tu('enum TEST : long long { FOO=1, BAR=2 };', lang="cpp")
enum = get_cursor(tu, 'TEST')
assert enum is not None
assert enum.kind == CursorKind.ENUM_DECL
assert enum.enum_type.kind == TypeKind.LONGLONG
def test_objc_type_encoding():
tu = get_tu('int i;', lang='objc')
i = get_cursor(tu, 'i')
assert i is not None
assert i.objc_type_encoding == 'i'
def test_enum_values():
tu = get_tu('enum TEST { SPAM=1, EGG, HAM = EGG * 20};')
enum = get_cursor(tu, 'TEST')
assert enum is not None
assert enum.kind == CursorKind.ENUM_DECL
enum_constants = list(enum.get_children())
assert len(enum_constants) == 3
spam, egg, ham = enum_constants
assert spam.kind == CursorKind.ENUM_CONSTANT_DECL
assert spam.enum_value == 1
assert egg.kind == CursorKind.ENUM_CONSTANT_DECL
assert egg.enum_value == 2
assert ham.kind == CursorKind.ENUM_CONSTANT_DECL
assert ham.enum_value == 40
def test_enum_values_cpp():
tu = get_tu('enum TEST : long long { SPAM = -1, HAM = 0x10000000000};', lang="cpp")
enum = get_cursor(tu, 'TEST')
assert enum is not None
assert enum.kind == CursorKind.ENUM_DECL
enum_constants = list(enum.get_children())
assert len(enum_constants) == 2
spam, ham = enum_constants
assert spam.kind == CursorKind.ENUM_CONSTANT_DECL
assert spam.enum_value == -1
assert ham.kind == CursorKind.ENUM_CONSTANT_DECL
assert ham.enum_value == 0x10000000000
def test_annotation_attribute():
tu = get_tu('int foo (void) __attribute__ ((annotate("here be annotation attribute")));')
foo = get_cursor(tu, 'foo')
assert foo is not None
for c in foo.get_children():
if c.kind == CursorKind.ANNOTATE_ATTR:
assert c.displayname == "here be annotation attribute"
break
else:
assert False, "Couldn't find annotation"
def test_result_type():
tu = get_tu('int foo();')
foo = get_cursor(tu, 'foo')
assert foo is not None
t = foo.result_type
assert t.kind == TypeKind.INT
def test_get_tokens():
"""Ensure we can map cursors back to tokens."""
tu = get_tu('int foo(int i);')
foo = get_cursor(tu, 'foo')
tokens = list(foo.get_tokens())
assert len(tokens) == 6
assert tokens[0].spelling == 'int'
assert tokens[1].spelling == 'foo'
def test_get_arguments():
tu = get_tu('void foo(int i, int j);')
foo = get_cursor(tu, 'foo')
arguments = list(foo.get_arguments())
assert len(arguments) == 2
assert arguments[0].spelling == "i"
assert arguments[1].spelling == "j"
kTemplateArgTest = """\
template <int kInt, typename T, bool kBool>
void foo();
template<>
void foo<-7, float, true>();
"""
def test_get_num_template_arguments():
tu = get_tu(kTemplateArgTest, lang='cpp')
foos = get_cursors(tu, 'foo')
assert foos[1].get_num_template_arguments() == 3
def test_get_template_argument_kind():
tu = get_tu(kTemplateArgTest, lang='cpp')
foos = get_cursors(tu, 'foo')
assert foos[1].get_template_argument_kind(0) == TemplateArgumentKind.INTEGRAL
assert foos[1].get_template_argument_kind(1) == TemplateArgumentKind.TYPE
assert foos[1].get_template_argument_kind(2) == TemplateArgumentKind.INTEGRAL
def test_get_template_argument_type():
tu = get_tu(kTemplateArgTest, lang='cpp')
foos = get_cursors(tu, 'foo')
assert foos[1].get_template_argument_type(1).kind == TypeKind.FLOAT
def test_get_template_argument_value():
tu = get_tu(kTemplateArgTest, lang='cpp')
foos = get_cursors(tu, 'foo')
assert foos[1].get_template_argument_value(0) == -7
assert foos[1].get_template_argument_value(2) == True
def test_get_template_argument_unsigned_value():
tu = get_tu(kTemplateArgTest, lang='cpp')
foos = get_cursors(tu, 'foo')
assert foos[1].get_template_argument_unsigned_value(0) == 2 ** 32 - 7
assert foos[1].get_template_argument_unsigned_value(2) == True
def test_referenced():
tu = get_tu('void foo(); void bar() { foo(); }')
foo = get_cursor(tu, 'foo')
bar = get_cursor(tu, 'bar')
for c in bar.get_children():
if c.kind == CursorKind.CALL_EXPR:
assert c.referenced.spelling == foo.spelling
break
def test_mangled_name():
kInputForMangling = """\
int foo(int, int);
"""
tu = get_tu(kInputForMangling, lang='cpp')
foo = get_cursor(tu, 'foo')
# Since libclang does not link in targets, we cannot pass a triple to it
# and force the target. To enable this test to pass on all platforms, accept
# all valid manglings.
# [c-index-test handles this by running the source through clang, emitting
# an AST file and running libclang on that AST file]
assert foo.mangled_name in ('_Z3fooii', '__Z3fooii', '?foo@@YAHHH')
| 13,365 | 28.375824 | 93 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/bindings/python/tests/cindex/test_cdb.py | from clang.cindex import CompilationDatabase
from clang.cindex import CompilationDatabaseError
from clang.cindex import CompileCommands
from clang.cindex import CompileCommand
import os
import gc
kInputsDir = os.path.join(os.path.dirname(__file__), 'INPUTS')
def test_create_fail():
"""Check we fail loading a database with an assertion"""
path = os.path.dirname(__file__)
try:
cdb = CompilationDatabase.fromDirectory(path)
except CompilationDatabaseError as e:
assert e.cdb_error == CompilationDatabaseError.ERROR_CANNOTLOADDATABASE
else:
assert False
def test_create():
"""Check we can load a compilation database"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
def test_lookup_fail():
"""Check file lookup failure"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
assert cdb.getCompileCommands('file_do_not_exist.cpp') == None
def test_lookup_succeed():
"""Check we get some results if the file exists in the db"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
cmds = cdb.getCompileCommands('/home/john.doe/MyProject/project.cpp')
assert len(cmds) != 0
def test_all_compilecommand():
"""Check we get all results from the db"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
cmds = cdb.getAllCompileCommands()
assert len(cmds) == 3
expected = [
{ 'wd': '/home/john.doe/MyProject',
'file': '/home/john.doe/MyProject/project.cpp',
'line': ['clang++', '-o', 'project.o', '-c',
'/home/john.doe/MyProject/project.cpp']},
{ 'wd': '/home/john.doe/MyProjectA',
'file': '/home/john.doe/MyProject/project2.cpp',
'line': ['clang++', '-o', 'project2.o', '-c',
'/home/john.doe/MyProject/project2.cpp']},
{ 'wd': '/home/john.doe/MyProjectB',
'file': '/home/john.doe/MyProject/project2.cpp',
'line': ['clang++', '-DFEATURE=1', '-o', 'project2-feature.o', '-c',
'/home/john.doe/MyProject/project2.cpp']},
]
for i in range(len(cmds)):
assert cmds[i].directory == expected[i]['wd']
assert cmds[i].filename == expected[i]['file']
for arg, exp in zip(cmds[i].arguments, expected[i]['line']):
assert arg == exp
def test_1_compilecommand():
"""Check file with single compile command"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
file = '/home/john.doe/MyProject/project.cpp'
cmds = cdb.getCompileCommands(file)
assert len(cmds) == 1
assert cmds[0].directory == os.path.dirname(file)
assert cmds[0].filename == file
expected = [ 'clang++', '-o', 'project.o', '-c',
'/home/john.doe/MyProject/project.cpp']
for arg, exp in zip(cmds[0].arguments, expected):
assert arg == exp
def test_2_compilecommand():
"""Check file with 2 compile commands"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
cmds = cdb.getCompileCommands('/home/john.doe/MyProject/project2.cpp')
assert len(cmds) == 2
expected = [
{ 'wd': '/home/john.doe/MyProjectA',
'line': ['clang++', '-o', 'project2.o', '-c',
'/home/john.doe/MyProject/project2.cpp']},
{ 'wd': '/home/john.doe/MyProjectB',
'line': ['clang++', '-DFEATURE=1', '-o', 'project2-feature.o', '-c',
'/home/john.doe/MyProject/project2.cpp']}
]
for i in range(len(cmds)):
assert cmds[i].directory == expected[i]['wd']
for arg, exp in zip(cmds[i].arguments, expected[i]['line']):
assert arg == exp
def test_compilecommand_iterator_stops():
"""Check that iterator stops after the correct number of elements"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
count = 0
for cmd in cdb.getCompileCommands('/home/john.doe/MyProject/project2.cpp'):
count += 1
assert count <= 2
def test_compilationDB_references():
"""Ensure CompilationsCommands are independent of the database"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
cmds = cdb.getCompileCommands('/home/john.doe/MyProject/project.cpp')
del cdb
gc.collect()
workingdir = cmds[0].directory
def test_compilationCommands_references():
"""Ensure CompilationsCommand keeps a reference to CompilationCommands"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
cmds = cdb.getCompileCommands('/home/john.doe/MyProject/project.cpp')
del cdb
cmd0 = cmds[0]
del cmds
gc.collect()
workingdir = cmd0.directory
| 4,586 | 37.872881 | 79 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/bindings/python/tests/cindex/test_type.py | import gc
from clang.cindex import CursorKind
from clang.cindex import TranslationUnit
from clang.cindex import TypeKind
from nose.tools import raises
from .util import get_cursor
from .util import get_tu
kInput = """\
typedef int I;
struct teststruct {
int a;
I b;
long c;
unsigned long d;
signed long e;
const int f;
int *g;
int ***h;
};
"""
def test_a_struct():
tu = get_tu(kInput)
teststruct = get_cursor(tu, 'teststruct')
assert teststruct is not None, "Could not find teststruct."
fields = list(teststruct.get_children())
assert all(x.kind == CursorKind.FIELD_DECL for x in fields)
assert all(x.translation_unit is not None for x in fields)
assert fields[0].spelling == 'a'
assert not fields[0].type.is_const_qualified()
assert fields[0].type.kind == TypeKind.INT
assert fields[0].type.get_canonical().kind == TypeKind.INT
assert fields[1].spelling == 'b'
assert not fields[1].type.is_const_qualified()
assert fields[1].type.kind == TypeKind.TYPEDEF
assert fields[1].type.get_canonical().kind == TypeKind.INT
assert fields[1].type.get_declaration().spelling == 'I'
assert fields[2].spelling == 'c'
assert not fields[2].type.is_const_qualified()
assert fields[2].type.kind == TypeKind.LONG
assert fields[2].type.get_canonical().kind == TypeKind.LONG
assert fields[3].spelling == 'd'
assert not fields[3].type.is_const_qualified()
assert fields[3].type.kind == TypeKind.ULONG
assert fields[3].type.get_canonical().kind == TypeKind.ULONG
assert fields[4].spelling == 'e'
assert not fields[4].type.is_const_qualified()
assert fields[4].type.kind == TypeKind.LONG
assert fields[4].type.get_canonical().kind == TypeKind.LONG
assert fields[5].spelling == 'f'
assert fields[5].type.is_const_qualified()
assert fields[5].type.kind == TypeKind.INT
assert fields[5].type.get_canonical().kind == TypeKind.INT
assert fields[6].spelling == 'g'
assert not fields[6].type.is_const_qualified()
assert fields[6].type.kind == TypeKind.POINTER
assert fields[6].type.get_pointee().kind == TypeKind.INT
assert fields[7].spelling == 'h'
assert not fields[7].type.is_const_qualified()
assert fields[7].type.kind == TypeKind.POINTER
assert fields[7].type.get_pointee().kind == TypeKind.POINTER
assert fields[7].type.get_pointee().get_pointee().kind == TypeKind.POINTER
assert fields[7].type.get_pointee().get_pointee().get_pointee().kind == TypeKind.INT
def test_references():
"""Ensure that a Type maintains a reference to a TranslationUnit."""
tu = get_tu('int x;')
children = list(tu.cursor.get_children())
assert len(children) > 0
cursor = children[0]
t = cursor.type
assert isinstance(t.translation_unit, TranslationUnit)
# Delete main TranslationUnit reference and force a GC.
del tu
gc.collect()
assert isinstance(t.translation_unit, TranslationUnit)
# If the TU was destroyed, this should cause a segfault.
decl = t.get_declaration()
constarrayInput="""
struct teststruct {
void *A[2];
};
"""
def testConstantArray():
tu = get_tu(constarrayInput)
teststruct = get_cursor(tu, 'teststruct')
assert teststruct is not None, "Didn't find teststruct??"
fields = list(teststruct.get_children())
assert fields[0].spelling == 'A'
assert fields[0].type.kind == TypeKind.CONSTANTARRAY
assert fields[0].type.get_array_element_type() is not None
assert fields[0].type.get_array_element_type().kind == TypeKind.POINTER
assert fields[0].type.get_array_size() == 2
def test_equal():
"""Ensure equivalence operators work on Type."""
source = 'int a; int b; void *v;'
tu = get_tu(source)
a = get_cursor(tu, 'a')
b = get_cursor(tu, 'b')
v = get_cursor(tu, 'v')
assert a is not None
assert b is not None
assert v is not None
assert a.type == b.type
assert a.type != v.type
assert a.type != None
assert a.type != 'foo'
def test_type_spelling():
"""Ensure Type.spelling works."""
tu = get_tu('int c[5]; void f(int i[]); int x; int v[x];')
c = get_cursor(tu, 'c')
i = get_cursor(tu, 'i')
x = get_cursor(tu, 'x')
v = get_cursor(tu, 'v')
assert c is not None
assert i is not None
assert x is not None
assert v is not None
assert c.type.spelling == "int [5]"
assert i.type.spelling == "int []"
assert x.type.spelling == "int"
assert v.type.spelling == "int [x]"
def test_typekind_spelling():
"""Ensure TypeKind.spelling works."""
tu = get_tu('int a;')
a = get_cursor(tu, 'a')
assert a is not None
assert a.type.kind.spelling == 'Int'
def test_function_argument_types():
"""Ensure that Type.argument_types() works as expected."""
tu = get_tu('void f(int, int);')
f = get_cursor(tu, 'f')
assert f is not None
args = f.type.argument_types()
assert args is not None
assert len(args) == 2
t0 = args[0]
assert t0 is not None
assert t0.kind == TypeKind.INT
t1 = args[1]
assert t1 is not None
assert t1.kind == TypeKind.INT
args2 = list(args)
assert len(args2) == 2
assert t0 == args2[0]
assert t1 == args2[1]
@raises(TypeError)
def test_argument_types_string_key():
"""Ensure that non-int keys raise a TypeError."""
tu = get_tu('void f(int, int);')
f = get_cursor(tu, 'f')
assert f is not None
args = f.type.argument_types()
assert len(args) == 2
args['foo']
@raises(IndexError)
def test_argument_types_negative_index():
"""Ensure that negative indexes on argument_types Raises an IndexError."""
tu = get_tu('void f(int, int);')
f = get_cursor(tu, 'f')
args = f.type.argument_types()
args[-1]
@raises(IndexError)
def test_argument_types_overflow_index():
"""Ensure that indexes beyond the length of Type.argument_types() raise."""
tu = get_tu('void f(int, int);')
f = get_cursor(tu, 'f')
args = f.type.argument_types()
args[2]
@raises(Exception)
def test_argument_types_invalid_type():
"""Ensure that obtaining argument_types on a Type without them raises."""
tu = get_tu('int i;')
i = get_cursor(tu, 'i')
assert i is not None
i.type.argument_types()
def test_is_pod():
"""Ensure Type.is_pod() works."""
tu = get_tu('int i; void f();')
i = get_cursor(tu, 'i')
f = get_cursor(tu, 'f')
assert i is not None
assert f is not None
assert i.type.is_pod()
assert not f.type.is_pod()
def test_function_variadic():
"""Ensure Type.is_function_variadic works."""
source ="""
#include <stdarg.h>
void foo(int a, ...);
void bar(int a, int b);
"""
tu = get_tu(source)
foo = get_cursor(tu, 'foo')
bar = get_cursor(tu, 'bar')
assert foo is not None
assert bar is not None
assert isinstance(foo.type.is_function_variadic(), bool)
assert foo.type.is_function_variadic()
assert not bar.type.is_function_variadic()
def test_element_type():
"""Ensure Type.element_type works."""
tu = get_tu('int c[5]; void f(int i[]); int x; int v[x];')
c = get_cursor(tu, 'c')
i = get_cursor(tu, 'i')
v = get_cursor(tu, 'v')
assert c is not None
assert i is not None
assert v is not None
assert c.type.kind == TypeKind.CONSTANTARRAY
assert c.type.element_type.kind == TypeKind.INT
assert i.type.kind == TypeKind.INCOMPLETEARRAY
assert i.type.element_type.kind == TypeKind.INT
assert v.type.kind == TypeKind.VARIABLEARRAY
assert v.type.element_type.kind == TypeKind.INT
@raises(Exception)
def test_invalid_element_type():
"""Ensure Type.element_type raises if type doesn't have elements."""
tu = get_tu('int i;')
i = get_cursor(tu, 'i')
assert i is not None
i.element_type
def test_element_count():
"""Ensure Type.element_count works."""
tu = get_tu('int i[5]; int j;')
i = get_cursor(tu, 'i')
j = get_cursor(tu, 'j')
assert i is not None
assert j is not None
assert i.type.element_count == 5
try:
j.type.element_count
assert False
except:
assert True
def test_is_volatile_qualified():
"""Ensure Type.is_volatile_qualified works."""
tu = get_tu('volatile int i = 4; int j = 2;')
i = get_cursor(tu, 'i')
j = get_cursor(tu, 'j')
assert i is not None
assert j is not None
assert isinstance(i.type.is_volatile_qualified(), bool)
assert i.type.is_volatile_qualified()
assert not j.type.is_volatile_qualified()
def test_is_restrict_qualified():
"""Ensure Type.is_restrict_qualified works."""
tu = get_tu('struct s { void * restrict i; void * j; };')
i = get_cursor(tu, 'i')
j = get_cursor(tu, 'j')
assert i is not None
assert j is not None
assert isinstance(i.type.is_restrict_qualified(), bool)
assert i.type.is_restrict_qualified()
assert not j.type.is_restrict_qualified()
def test_record_layout():
"""Ensure Cursor.type.get_size, Cursor.type.get_align and
Cursor.type.get_offset works."""
source ="""
struct a {
long a1;
long a2:3;
long a3:4;
long long a4;
};
"""
tries=[(['-target','i386-linux-gnu'],(4,16,0,32,35,64)),
(['-target','nvptx64-unknown-unknown'],(8,24,0,64,67,128)),
(['-target','i386-pc-win32'],(8,16,0,32,35,64)),
(['-target','msp430-none-none'],(2,14,0,32,35,48))]
for flags, values in tries:
align,total,a1,a2,a3,a4 = values
tu = get_tu(source, flags=flags)
teststruct = get_cursor(tu, 'a')
fields = list(teststruct.get_children())
assert teststruct.type.get_align() == align
assert teststruct.type.get_size() == total
assert teststruct.type.get_offset(fields[0].spelling) == a1
assert teststruct.type.get_offset(fields[1].spelling) == a2
assert teststruct.type.get_offset(fields[2].spelling) == a3
assert teststruct.type.get_offset(fields[3].spelling) == a4
assert fields[0].is_bitfield() == False
assert fields[1].is_bitfield() == True
assert fields[1].get_bitfield_width() == 3
assert fields[2].is_bitfield() == True
assert fields[2].get_bitfield_width() == 4
assert fields[3].is_bitfield() == False
def test_offset():
"""Ensure Cursor.get_record_field_offset works in anonymous records"""
source="""
struct Test {
struct {int a;} typeanon;
struct {
int bariton;
union {
int foo;
};
};
int bar;
};"""
tries=[(['-target','i386-linux-gnu'],(4,16,0,32,64,96)),
(['-target','nvptx64-unknown-unknown'],(8,24,0,32,64,96)),
(['-target','i386-pc-win32'],(8,16,0,32,64,96)),
(['-target','msp430-none-none'],(2,14,0,32,64,96))]
for flags, values in tries:
align,total,f1,bariton,foo,bar = values
tu = get_tu(source)
teststruct = get_cursor(tu, 'Test')
children = list(teststruct.get_children())
fields = list(teststruct.type.get_fields())
assert children[0].kind == CursorKind.STRUCT_DECL
assert children[0].spelling != "typeanon"
assert children[1].spelling == "typeanon"
assert fields[0].kind == CursorKind.FIELD_DECL
assert fields[1].kind == CursorKind.FIELD_DECL
assert fields[1].is_anonymous()
assert teststruct.type.get_offset("typeanon") == f1
assert teststruct.type.get_offset("bariton") == bariton
assert teststruct.type.get_offset("foo") == foo
assert teststruct.type.get_offset("bar") == bar
def test_decay():
"""Ensure decayed types are handled as the original type"""
tu = get_tu("void foo(int a[]);")
foo = get_cursor(tu, 'foo')
a = foo.type.argument_types()[0]
assert a.kind == TypeKind.INCOMPLETEARRAY
assert a.element_type.kind == TypeKind.INT
assert a.get_canonical().kind == TypeKind.INCOMPLETEARRAY
| 11,981 | 28.439803 | 88 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/bindings/python/tests/cindex/test_access_specifiers.py |
from clang.cindex import AccessSpecifier
from clang.cindex import Cursor
from clang.cindex import TranslationUnit
from .util import get_cursor
from .util import get_tu
def test_access_specifiers():
"""Ensure that C++ access specifiers are available on cursors"""
tu = get_tu("""
class test_class {
public:
void public_member_function();
protected:
void protected_member_function();
private:
void private_member_function();
};
""", lang = 'cpp')
test_class = get_cursor(tu, "test_class")
assert test_class.access_specifier == AccessSpecifier.INVALID;
public = get_cursor(tu.cursor, "public_member_function")
assert public.access_specifier == AccessSpecifier.PUBLIC
protected = get_cursor(tu.cursor, "protected_member_function")
assert protected.access_specifier == AccessSpecifier.PROTECTED
private = get_cursor(tu.cursor, "private_member_function")
assert private.access_specifier == AccessSpecifier.PRIVATE
| 963 | 26.542857 | 68 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/bindings/python/tests/cindex/util.py | # This file provides common utility functions for the test suite.
from clang.cindex import Cursor
from clang.cindex import TranslationUnit
def get_tu(source, lang='c', all_warnings=False, flags=[]):
"""Obtain a translation unit from source and language.
By default, the translation unit is created from source file "t.<ext>"
where <ext> is the default file extension for the specified language. By
default it is C, so "t.c" is the default file name.
Supported languages are {c, cpp, objc}.
all_warnings is a convenience argument to enable all compiler warnings.
"""
args = list(flags)
name = 't.c'
if lang == 'cpp':
name = 't.cpp'
args.append('-std=c++11')
elif lang == 'objc':
name = 't.m'
elif lang != 'c':
raise Exception('Unknown language: %s' % lang)
if all_warnings:
args += ['-Wall', '-Wextra']
return TranslationUnit.from_source(name, args, unsaved_files=[(name,
source)])
def get_cursor(source, spelling):
"""Obtain a cursor from a source object.
This provides a convenient search mechanism to find a cursor with specific
spelling within a source. The first argument can be either a
TranslationUnit or Cursor instance.
If the cursor is not found, None is returned.
"""
# Convenience for calling on a TU.
root_cursor = source if isinstance(source, Cursor) else source.cursor
for cursor in root_cursor.walk_preorder():
if cursor.spelling == spelling:
return cursor
return None
def get_cursors(source, spelling):
"""Obtain all cursors from a source object with a specific spelling.
This provides a convenient search mechanism to find all cursors with
specific spelling within a source. The first argument can be either a
TranslationUnit or Cursor instance.
If no cursors are found, an empty list is returned.
"""
# Convenience for calling on a TU.
root_cursor = source if isinstance(source, Cursor) else source.cursor
cursors = []
for cursor in root_cursor.walk_preorder():
if cursor.spelling == spelling:
cursors.append(cursor)
return cursors
__all__ = [
'get_cursor',
'get_cursors',
'get_tu',
]
| 2,293 | 29.184211 | 78 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/bindings/python/tests/cindex/test_cursor_kind.py | from clang.cindex import CursorKind
def test_name():
assert CursorKind.UNEXPOSED_DECL.name is 'UNEXPOSED_DECL'
def test_get_all_kinds():
kinds = CursorKind.get_all_kinds()
assert CursorKind.UNEXPOSED_DECL in kinds
assert CursorKind.TRANSLATION_UNIT in kinds
assert CursorKind.VARIABLE_REF in kinds
assert CursorKind.LAMBDA_EXPR in kinds
assert CursorKind.OBJ_BOOL_LITERAL_EXPR in kinds
assert CursorKind.OBJ_SELF_EXPR in kinds
assert CursorKind.MS_ASM_STMT in kinds
assert CursorKind.MODULE_IMPORT_DECL in kinds
assert CursorKind.TYPE_ALIAS_TEMPLATE_DECL in kinds
def test_kind_groups():
"""Check that every kind classifies to exactly one group."""
assert CursorKind.UNEXPOSED_DECL.is_declaration()
assert CursorKind.TYPE_REF.is_reference()
assert CursorKind.DECL_REF_EXPR.is_expression()
assert CursorKind.UNEXPOSED_STMT.is_statement()
assert CursorKind.INVALID_FILE.is_invalid()
assert CursorKind.TRANSLATION_UNIT.is_translation_unit()
assert not CursorKind.TYPE_REF.is_translation_unit()
assert CursorKind.PREPROCESSING_DIRECTIVE.is_preprocessing()
assert not CursorKind.TYPE_REF.is_preprocessing()
assert CursorKind.UNEXPOSED_DECL.is_unexposed()
assert not CursorKind.TYPE_REF.is_unexposed()
for k in CursorKind.get_all_kinds():
group = [n for n in ('is_declaration', 'is_reference', 'is_expression',
'is_statement', 'is_invalid', 'is_attribute')
if getattr(k, n)()]
if k in ( CursorKind.TRANSLATION_UNIT,
CursorKind.MACRO_DEFINITION,
CursorKind.MACRO_INSTANTIATION,
CursorKind.INCLUSION_DIRECTIVE,
CursorKind.PREPROCESSING_DIRECTIVE,
CursorKind.OVERLOAD_CANDIDATE):
assert len(group) == 0
else:
assert len(group) == 1
| 1,924 | 37.5 | 79 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/bindings/python/tests/cindex/__init__.py | 0 | 0 | 0 | py |
|
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/bindings/python/tests/cindex/test_file.py | from clang.cindex import Index, File
def test_file():
index = Index.create()
tu = index.parse('t.c', unsaved_files = [('t.c', "")])
file = File.from_name(tu, "t.c")
assert str(file) == "t.c"
assert file.name == "t.c"
assert repr(file) == "<File: t.c>"
| 265 | 25.6 | 56 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/bindings/python/tests/cindex/test_comment.py | from clang.cindex import TranslationUnit
from tests.cindex.util import get_cursor
def test_comment():
files = [('fake.c', """
/// Aaa.
int test1;
/// Bbb.
/// x
void test2(void);
void f() {
}
""")]
# make a comment-aware TU
tu = TranslationUnit.from_source('fake.c', ['-std=c99'], unsaved_files=files,
options=TranslationUnit.PARSE_INCLUDE_BRIEF_COMMENTS_IN_CODE_COMPLETION)
test1 = get_cursor(tu, 'test1')
assert test1 is not None, "Could not find test1."
assert test1.type.is_pod()
raw = test1.raw_comment
brief = test1.brief_comment
assert raw == """/// Aaa."""
assert brief == """Aaa."""
test2 = get_cursor(tu, 'test2')
raw = test2.raw_comment
brief = test2.brief_comment
assert raw == """/// Bbb.\n/// x"""
assert brief == """Bbb. x"""
f = get_cursor(tu, 'f')
raw = f.raw_comment
brief = f.brief_comment
assert raw is None
assert brief is None
| 957 | 22.365854 | 84 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/bindings/python/tests/cindex/test_location.py | from clang.cindex import Cursor
from clang.cindex import File
from clang.cindex import SourceLocation
from clang.cindex import SourceRange
from .util import get_cursor
from .util import get_tu
baseInput="int one;\nint two;\n"
def assert_location(loc, line, column, offset):
assert loc.line == line
assert loc.column == column
assert loc.offset == offset
def test_location():
tu = get_tu(baseInput)
one = get_cursor(tu, 'one')
two = get_cursor(tu, 'two')
assert one is not None
assert two is not None
assert_location(one.location,line=1,column=5,offset=4)
assert_location(two.location,line=2,column=5,offset=13)
# adding a linebreak at top should keep columns same
tu = get_tu('\n' + baseInput)
one = get_cursor(tu, 'one')
two = get_cursor(tu, 'two')
assert one is not None
assert two is not None
assert_location(one.location,line=2,column=5,offset=5)
assert_location(two.location,line=3,column=5,offset=14)
# adding a space should affect column on first line only
tu = get_tu(' ' + baseInput)
one = get_cursor(tu, 'one')
two = get_cursor(tu, 'two')
assert_location(one.location,line=1,column=6,offset=5)
assert_location(two.location,line=2,column=5,offset=14)
# define the expected location ourselves and see if it matches
# the returned location
tu = get_tu(baseInput)
file = File.from_name(tu, 't.c')
location = SourceLocation.from_position(tu, file, 1, 5)
cursor = Cursor.from_location(tu, location)
one = get_cursor(tu, 'one')
assert one is not None
assert one == cursor
# Ensure locations referring to the same entity are equivalent.
location2 = SourceLocation.from_position(tu, file, 1, 5)
assert location == location2
location3 = SourceLocation.from_position(tu, file, 1, 4)
assert location2 != location3
offset_location = SourceLocation.from_offset(tu, file, 5)
cursor = Cursor.from_location(tu, offset_location)
verified = False
for n in [n for n in tu.cursor.get_children() if n.spelling == 'one']:
assert n == cursor
verified = True
assert verified
def test_extent():
tu = get_tu(baseInput)
one = get_cursor(tu, 'one')
two = get_cursor(tu, 'two')
assert_location(one.extent.start,line=1,column=1,offset=0)
assert_location(one.extent.end,line=1,column=8,offset=7)
assert baseInput[one.extent.start.offset:one.extent.end.offset] == "int one"
assert_location(two.extent.start,line=2,column=1,offset=9)
assert_location(two.extent.end,line=2,column=8,offset=16)
assert baseInput[two.extent.start.offset:two.extent.end.offset] == "int two"
file = File.from_name(tu, 't.c')
location1 = SourceLocation.from_position(tu, file, 1, 1)
location2 = SourceLocation.from_position(tu, file, 1, 8)
range1 = SourceRange.from_locations(location1, location2)
range2 = SourceRange.from_locations(location1, location2)
assert range1 == range2
location3 = SourceLocation.from_position(tu, file, 1, 6)
range3 = SourceRange.from_locations(location1, location3)
assert range1 != range3
| 3,153 | 31.854167 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/bindings/python/tests/cindex/test_tokens.py | from clang.cindex import CursorKind
from clang.cindex import Index
from clang.cindex import SourceLocation
from clang.cindex import SourceRange
from clang.cindex import TokenKind
from nose.tools import eq_
from nose.tools import ok_
from .util import get_tu
def test_token_to_cursor():
"""Ensure we can obtain a Cursor from a Token instance."""
tu = get_tu('int i = 5;')
r = tu.get_extent('t.c', (0, 9))
tokens = list(tu.get_tokens(extent=r))
assert len(tokens) == 4
assert tokens[1].spelling == 'i'
assert tokens[1].kind == TokenKind.IDENTIFIER
cursor = tokens[1].cursor
assert cursor.kind == CursorKind.VAR_DECL
assert tokens[1].cursor == tokens[2].cursor
def test_token_location():
"""Ensure Token.location works."""
tu = get_tu('int foo = 10;')
r = tu.get_extent('t.c', (0, 11))
tokens = list(tu.get_tokens(extent=r))
eq_(len(tokens), 4)
loc = tokens[1].location
ok_(isinstance(loc, SourceLocation))
eq_(loc.line, 1)
eq_(loc.column, 5)
eq_(loc.offset, 4)
def test_token_extent():
"""Ensure Token.extent works."""
tu = get_tu('int foo = 10;')
r = tu.get_extent('t.c', (0, 11))
tokens = list(tu.get_tokens(extent=r))
eq_(len(tokens), 4)
extent = tokens[1].extent
ok_(isinstance(extent, SourceRange))
eq_(extent.start.offset, 4)
eq_(extent.end.offset, 7)
| 1,384 | 25.132075 | 62 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/www/builtins.py | #!/usr/bin/env python
import sys, fileinput
err=0
# Giant associative set of builtin->intrinsic mappings where clang doesn't
# implement the builtin since the vector operation works by default.
repl_map = {
'__builtin_ia32_addps': '_mm_add_ps',
'__builtin_ia32_addsd': '_mm_add_sd',
'__builtin_ia32_addpd': '_mm_add_pd',
'__builtin_ia32_addss': '_mm_add_ss',
'__builtin_ia32_paddb128': '_mm_add_epi8',
'__builtin_ia32_paddw128': '_mm_add_epi16',
'__builtin_ia32_paddd128': '_mm_add_epi32',
'__builtin_ia32_paddq128': '_mm_add_epi64',
'__builtin_ia32_subps': '_mm_sub_ps',
'__builtin_ia32_subsd': '_mm_sub_sd',
'__builtin_ia32_subpd': '_mm_sub_pd',
'__builtin_ia32_subss': '_mm_sub_ss',
'__builtin_ia32_psubb128': '_mm_sub_epi8',
'__builtin_ia32_psubw128': '_mm_sub_epi16',
'__builtin_ia32_psubd128': '_mm_sub_epi32',
'__builtin_ia32_psubq128': '_mm_sub_epi64',
'__builtin_ia32_mulsd': '_mm_mul_sd',
'__builtin_ia32_mulpd': '_mm_mul_pd',
'__builtin_ia32_mulps': '_mm_mul_ps',
'__builtin_ia32_mulss': '_mm_mul_ss',
'__builtin_ia32_pmullw128': '_mm_mullo_epi16',
'__builtin_ia32_divsd': '_mm_div_sd',
'__builtin_ia32_divpd': '_mm_div_pd',
'__builtin_ia32_divps': '_mm_div_ps',
'__builtin_ia32_subss': '_mm_div_ss',
'__builtin_ia32_andpd': '_mm_and_pd',
'__builtin_ia32_andps': '_mm_and_ps',
'__builtin_ia32_pand128': '_mm_and_si128',
'__builtin_ia32_andnpd': '_mm_andnot_pd',
'__builtin_ia32_andnps': '_mm_andnot_ps',
'__builtin_ia32_pandn128': '_mm_andnot_si128',
'__builtin_ia32_orpd': '_mm_or_pd',
'__builtin_ia32_orps': '_mm_or_ps',
'__builtin_ia32_por128': '_mm_or_si128',
'__builtin_ia32_xorpd': '_mm_xor_pd',
'__builtin_ia32_xorps': '_mm_xor_ps',
'__builtin_ia32_pxor128': '_mm_xor_si128',
'__builtin_ia32_cvtps2dq': '_mm_cvtps_epi32',
'__builtin_ia32_cvtsd2ss': '_mm_cvtsd_ss',
'__builtin_ia32_cvtsi2sd': '_mm_cvtsi32_sd',
'__builtin_ia32_cvtss2sd': '_mm_cvtss_sd',
'__builtin_ia32_cvttsd2si': '_mm_cvttsd_si32',
'__builtin_ia32_vec_ext_v2df': '_mm_cvtsd_f64',
'__builtin_ia32_loadhpd': '_mm_loadh_pd',
'__builtin_ia32_loadlpd': '_mm_loadl_pd',
'__builtin_ia32_loadlv4si': '_mm_loadl_epi64',
'__builtin_ia32_cmpeqps': '_mm_cmpeq_ps',
'__builtin_ia32_cmpltps': '_mm_cmplt_ps',
'__builtin_ia32_cmpleps': '_mm_cmple_ps',
'__builtin_ia32_cmpgtps': '_mm_cmpgt_ps',
'__builtin_ia32_cmpgeps': '_mm_cmpge_ps',
'__builtin_ia32_cmpunordps': '_mm_cmpunord_ps',
'__builtin_ia32_cmpneqps': '_mm_cmpneq_ps',
'__builtin_ia32_cmpnltps': '_mm_cmpnlt_ps',
'__builtin_ia32_cmpnleps': '_mm_cmpnle_ps',
'__builtin_ia32_cmpngtps': '_mm_cmpngt_ps',
'__builtin_ia32_cmpordps': '_mm_cmpord_ps',
'__builtin_ia32_cmpeqss': '_mm_cmpeq_ss',
'__builtin_ia32_cmpltss': '_mm_cmplt_ss',
'__builtin_ia32_cmpless': '_mm_cmple_ss',
'__builtin_ia32_cmpunordss': '_mm_cmpunord_ss',
'__builtin_ia32_cmpneqss': '_mm_cmpneq_ss',
'__builtin_ia32_cmpnltss': '_mm_cmpnlt_ss',
'__builtin_ia32_cmpnless': '_mm_cmpnle_ss',
'__builtin_ia32_cmpngtss': '_mm_cmpngt_ss',
'__builtin_ia32_cmpngess': '_mm_cmpnge_ss',
'__builtin_ia32_cmpordss': '_mm_cmpord_ss',
'__builtin_ia32_movss': '_mm_move_ss',
'__builtin_ia32_movsd': '_mm_move_sd',
'__builtin_ia32_movhlps': '_mm_movehl_ps',
'__builtin_ia32_movlhps': '_mm_movelh_ps',
'__builtin_ia32_movqv4si': '_mm_move_epi64',
'__builtin_ia32_unpckhps': '_mm_unpackhi_ps',
'__builtin_ia32_unpckhpd': '_mm_unpackhi_pd',
'__builtin_ia32_punpckhbw128': '_mm_unpackhi_epi8',
'__builtin_ia32_punpckhwd128': '_mm_unpackhi_epi16',
'__builtin_ia32_punpckhdq128': '_mm_unpackhi_epi32',
'__builtin_ia32_punpckhqdq128': '_mm_unpackhi_epi64',
'__builtin_ia32_unpcklps': '_mm_unpacklo_ps',
'__builtin_ia32_unpcklpd': '_mm_unpacklo_pd',
'__builtin_ia32_punpcklbw128': '_mm_unpacklo_epi8',
'__builtin_ia32_punpcklwd128': '_mm_unpacklo_epi16',
'__builtin_ia32_punpckldq128': '_mm_unpacklo_epi32',
'__builtin_ia32_punpcklqdq128': '_mm_unpacklo_epi64',
'__builtin_ia32_cmpeqpd': '_mm_cmpeq_pd',
'__builtin_ia32_cmpltpd': '_mm_cmplt_pd',
'__builtin_ia32_cmplepd': '_mm_cmple_pd',
'__builtin_ia32_cmpgtpd': '_mm_cmpgt_pd',
'__builtin_ia32_cmpgepd': '_mm_cmpge_pd',
'__builtin_ia32_cmpunordpd': '_mm_cmpunord_pd',
'__builtin_ia32_cmpneqpd': '_mm_cmpneq_pd',
'__builtin_ia32_cmpnltpd': '_mm_cmpnlt_pd',
'__builtin_ia32_cmpnlepd': '_mm_cmpnle_pd',
'__builtin_ia32_cmpngtpd': '_mm_cmpngt_pd',
'__builtin_ia32_cmpngepd': '_mm_cmpnge_pd',
'__builtin_ia32_cmpordpd': '_mm_cmpord_pd',
'__builtin_ia32_cmpeqsd': '_mm_cmpeq_sd',
'__builtin_ia32_cmpltsd': '_mm_cmplt_sd',
'__builtin_ia32_cmplesd': '_mm_cmple_sd',
'__builtin_ia32_cmpunordsd': '_mm_cmpunord_sd',
'__builtin_ia32_cmpneqsd': '_mm_cmpneq_sd',
'__builtin_ia32_cmpnltsd': '_mm_cmpnlt_sd',
'__builtin_ia32_cmpnlesd': '_mm_cmpnle_sd',
'__builtin_ia32_cmpordsd': '_mm_cmpord_sd',
'__builtin_ia32_cvtsi642ss': '_mm_cvtsi64_ss',
'__builtin_ia32_cvttss2si64': '_mm_cvtss_si64',
'__builtin_ia32_shufps': '_mm_shuffle_ps',
'__builtin_ia32_shufpd': '_mm_shuffle_pd',
'__builtin_ia32_pshufhw': '_mm_shufflehi_epi16',
'__builtin_ia32_pshuflw': '_mm_shufflelo_epi16',
'__builtin_ia32_pshufd': '_mm_shuffle_epi32',
'__builtin_ia32_movshdup': '_mm_movehdup_ps',
'__builtin_ia32_movsldup': '_mm_moveldup_ps',
'__builtin_ia32_maxps': '_mm_max_ps',
'__builtin_ia32_pslldi128': '_mm_slli_epi32',
'__builtin_ia32_vec_set_v16qi': '_mm_insert_epi8',
'__builtin_ia32_vec_set_v8hi': '_mm_insert_epi16',
'__builtin_ia32_vec_set_v4si': '_mm_insert_epi32',
'__builtin_ia32_vec_set_v2di': '_mm_insert_epi64',
'__builtin_ia32_vec_set_v4hi': '_mm_insert_pi16',
'__builtin_ia32_vec_ext_v16qi': '_mm_extract_epi8',
'__builtin_ia32_vec_ext_v8hi': '_mm_extract_epi16',
'__builtin_ia32_vec_ext_v4si': '_mm_extract_epi32',
'__builtin_ia32_vec_ext_v2di': '_mm_extract_epi64',
'__builtin_ia32_vec_ext_v4hi': '_mm_extract_pi16',
'__builtin_ia32_vec_ext_v4sf': '_mm_extract_ps'
}
# Special unhandled cases:
# __builtin_ia32_vec_ext_*(__P, idx) -> _mm_store_sd/_mm_storeh_pd
# depending on index. No abstract insert/extract for these oddly.
unhandled = [
'__builtin_ia32_vec_ext_v2df',
'__builtin_ia32_vec_ext_v2si',
]
def report_repl(builtin, repl):
sys.stderr.write("%s:%d: x86 builtin %s used, replaced with %s\n" % (fileinput.filename(), fileinput.filelineno(), builtin, repl))
def report_cant(builtin):
sys.stderr.write("%s:%d: x86 builtin %s used, too many replacements\n" % (fileinput.filename(), fileinput.filelineno(), builtin))
for line in fileinput.input(inplace=1):
for builtin, repl in repl_map.iteritems():
if builtin in line:
line = line.replace(builtin, repl)
report_repl(builtin, repl)
for unh in unhandled:
if unh in line:
report_cant(unh)
sys.stdout.write(line)
sys.exit(err)
| 6,633 | 39.45122 | 132 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/docs/conf.py | # -*- coding: utf-8 -*-
#
# Clang documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 9 20:01:55 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from datetime import date
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Clang'
copyright = u'2007-%d, The Clang Team' % date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4'
# The full version, including alpha/beta/rc tags.
release = '4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'analyzer']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Clangdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Clang.tex', u'Clang Documentation',
u'The Clang Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = []
# Automatically derive the list of man pages from the contents of the command
# guide subdirectory. This was copied from llvm/docs/conf.py.
basedir = os.path.dirname(__file__)
man_page_authors = u'Maintained by the Clang / LLVM Team (<http://clang.llvm.org>)'
command_guide_subpath = 'CommandGuide'
command_guide_path = os.path.join(basedir, command_guide_subpath)
for name in os.listdir(command_guide_path):
# Ignore non-ReST files and the index page.
if not name.endswith('.rst') or name in ('index.rst',):
continue
# Otherwise, automatically extract the description.
file_subpath = os.path.join(command_guide_subpath, name)
with open(os.path.join(command_guide_path, name)) as f:
title = f.readline().rstrip('\n')
header = f.readline().rstrip('\n')
if len(header) != len(title):
print >>sys.stderr, (
"error: invalid header in %r (does not match title)" % (
file_subpath,))
if ' - ' not in title:
print >>sys.stderr, (
("error: invalid title in %r "
"(expected '<name> - <description>')") % (
file_subpath,))
# Split the name out of the title.
name,description = title.split(' - ', 1)
man_pages.append((file_subpath.replace('.rst',''), name,
description, man_page_authors, 1))
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Clang', u'Clang Documentation',
u'The Clang Team', 'Clang', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 9,128 | 32.317518 | 83 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/docs/tools/dump_format_style.py | #!/usr/bin/env python
# A tool to parse the FormatStyle struct from Format.h and update the
# documentation in ../ClangFormatStyleOptions.rst automatically.
# Run from the directory in which this file is located to update the docs.
import collections
import os
import re
import urllib2
CLANG_DIR = os.path.join(os.path.dirname(__file__), '../..')
FORMAT_STYLE_FILE = os.path.join(CLANG_DIR, 'include/clang/Format/Format.h')
DOC_FILE = os.path.join(CLANG_DIR, 'docs/ClangFormatStyleOptions.rst')
def substitute(text, tag, contents):
replacement = '\n.. START_%s\n\n%s\n\n.. END_%s\n' % (tag, contents, tag)
pattern = r'\n\.\. START_%s\n.*\n\.\. END_%s\n' % (tag, tag)
return re.sub(pattern, '%s', text, flags=re.S) % replacement
def doxygen2rst(text):
text = re.sub(r'([^/\*])\*', r'\1\\*', text)
text = re.sub(r'<tt>\s*(.*?)\s*<\/tt>', r'``\1``', text)
text = re.sub(r'\\c ([^ ,;\.]+)', r'``\1``', text)
text = re.sub(r'\\\w+ ', '', text)
return text
def indent(text, columns):
indent = ' ' * columns
s = re.sub(r'\n([^\n])', '\n' + indent + '\\1', text, flags=re.S)
if s.startswith('\n'):
return s
return indent + s
class Option:
def __init__(self, name, type, comment):
self.name = name
self.type = type
self.comment = comment.strip()
self.enum = None
self.nested_struct = None
def __str__(self):
s = '**%s** (``%s``)\n%s' % (self.name, self.type,
doxygen2rst(indent(self.comment, 2)))
if self.enum:
s += indent('\n\nPossible values:\n\n%s\n' % self.enum, 2)
if self.nested_struct:
s += indent('\n\nNested configuration flags:\n\n%s\n' %self.nested_struct,
2)
return s
class NestedStruct:
def __init__(self, name, comment):
self.name = name
self.comment = comment.strip()
self.values = []
def __str__(self):
return '\n'.join(map(str, self.values))
class NestedField:
def __init__(self, name, comment):
self.name = name
self.comment = comment.strip()
def __str__(self):
return '* ``%s`` %s' % (self.name, doxygen2rst(self.comment))
class Enum:
def __init__(self, name, comment):
self.name = name
self.comment = comment.strip()
self.values = []
def __str__(self):
return '\n'.join(map(str, self.values))
class EnumValue:
def __init__(self, name, comment):
self.name = name
self.comment = comment
def __str__(self):
return '* ``%s`` (in configuration: ``%s``)\n%s' % (
self.name,
re.sub('.*_', '', self.name),
doxygen2rst(indent(self.comment, 2)))
def clean_comment_line(line):
match = re.match(r'^/// \\code(\{.(\w+)\})?$', line)
if match:
lang = match.groups()[1]
if not lang:
lang = 'c++'
return '\n.. code-block:: %s\n\n' % lang
if line == '/// \\endcode':
return ''
return line[4:] + '\n'
def read_options(header):
class State:
BeforeStruct, Finished, InStruct, InNestedStruct, InNestedFieldComent, \
InFieldComment, InEnum, InEnumMemberComment = range(8)
state = State.BeforeStruct
options = []
enums = {}
nested_structs = {}
comment = ''
enum = None
nested_struct = None
for line in header:
line = line.strip()
if state == State.BeforeStruct:
if line == 'struct FormatStyle {':
state = State.InStruct
elif state == State.InStruct:
if line.startswith('///'):
state = State.InFieldComment
comment = clean_comment_line(line)
elif line == '};':
state = State.Finished
break
elif state == State.InFieldComment:
if line.startswith('///'):
comment += clean_comment_line(line)
elif line.startswith('enum'):
state = State.InEnum
name = re.sub(r'enum\s+(\w+)\s*\{', '\\1', line)
enum = Enum(name, comment)
elif line.startswith('struct'):
state = State.InNestedStruct
name = re.sub(r'struct\s+(\w+)\s*\{', '\\1', line)
nested_struct = NestedStruct(name, comment)
elif line.endswith(';'):
state = State.InStruct
field_type, field_name = re.match(r'([<>:\w(,\s)]+)\s+(\w+);',
line).groups()
option = Option(str(field_name), str(field_type), comment)
options.append(option)
else:
raise Exception('Invalid format, expected comment, field or enum')
elif state == State.InNestedStruct:
if line.startswith('///'):
state = State.InNestedFieldComent
comment = clean_comment_line(line)
elif line == '};':
state = State.InStruct
nested_structs[nested_struct.name] = nested_struct
elif state == State.InNestedFieldComent:
if line.startswith('///'):
comment += clean_comment_line(line)
else:
state = State.InNestedStruct
nested_struct.values.append(NestedField(line.replace(';', ''), comment))
elif state == State.InEnum:
if line.startswith('///'):
state = State.InEnumMemberComment
comment = clean_comment_line(line)
elif line == '};':
state = State.InStruct
enums[enum.name] = enum
else:
raise Exception('Invalid format, expected enum field comment or };')
elif state == State.InEnumMemberComment:
if line.startswith('///'):
comment += clean_comment_line(line)
else:
state = State.InEnum
enum.values.append(EnumValue(line.replace(',', ''), comment))
if state != State.Finished:
raise Exception('Not finished by the end of file')
for option in options:
if not option.type in ['bool', 'unsigned', 'int', 'std::string',
'std::vector<std::string>',
'std::vector<IncludeCategory>']:
if enums.has_key(option.type):
option.enum = enums[option.type]
elif nested_structs.has_key(option.type):
option.nested_struct = nested_structs[option.type];
else:
raise Exception('Unknown type: %s' % option.type)
return options
options = read_options(open(FORMAT_STYLE_FILE))
options = sorted(options, key=lambda x: x.name)
options_text = '\n\n'.join(map(str, options))
contents = open(DOC_FILE).read()
contents = substitute(contents, 'FORMAT_STYLE_OPTIONS', options_text)
with open(DOC_FILE, 'wb') as output:
output.write(contents)
| 6,350 | 30.755 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/docs/tools/dump_ast_matchers.py | #!/usr/bin/env python
# A tool to parse ASTMatchers.h and update the documentation in
# ../LibASTMatchersReference.html automatically. Run from the
# directory in which this file is located to update the docs.
import collections
import re
import urllib2
MATCHERS_FILE = '../../include/clang/ASTMatchers/ASTMatchers.h'
# Each matcher is documented in one row of the form:
# result | name | argA
# The subsequent row contains the documentation and is hidden by default,
# becoming visible via javascript when the user clicks the matcher name.
TD_TEMPLATE="""
<tr><td>%(result)s</td><td class="name" onclick="toggle('%(id)s')"><a name="%(id)sAnchor">%(name)s</a></td><td>%(args)s</td></tr>
<tr><td colspan="4" class="doc" id="%(id)s"><pre>%(comment)s</pre></td></tr>
"""
# We categorize the matchers into these three categories in the reference:
node_matchers = {}
narrowing_matchers = {}
traversal_matchers = {}
# We output multiple rows per matcher if the matcher can be used on multiple
# node types. Thus, we need a new id per row to control the documentation
# pop-up. ids[name] keeps track of those ids.
ids = collections.defaultdict(int)
# Cache for doxygen urls we have already verified.
doxygen_probes = {}
def esc(text):
"""Escape any html in the given text."""
text = re.sub(r'&', '&', text)
text = re.sub(r'<', '<', text)
text = re.sub(r'>', '>', text)
def link_if_exists(m):
name = m.group(1)
url = 'http://clang.llvm.org/doxygen/classclang_1_1%s.html' % name
if url not in doxygen_probes:
try:
print 'Probing %s...' % url
urllib2.urlopen(url)
doxygen_probes[url] = True
except:
doxygen_probes[url] = False
if doxygen_probes[url]:
return r'Matcher<<a href="%s">%s</a>>' % (url, name)
else:
return m.group(0)
text = re.sub(
r'Matcher<([^\*&]+)>', link_if_exists, text)
return text
def extract_result_types(comment):
"""Extracts a list of result types from the given comment.
We allow annotations in the comment of the matcher to specify what
nodes a matcher can match on. Those comments have the form:
Usable as: Any Matcher | (Matcher<T1>[, Matcher<t2>[, ...]])
Returns ['*'] in case of 'Any Matcher', or ['T1', 'T2', ...].
Returns the empty list if no 'Usable as' specification could be
parsed.
"""
result_types = []
m = re.search(r'Usable as: Any Matcher[\s\n]*$', comment, re.S)
if m:
return ['*']
while True:
m = re.match(r'^(.*)Matcher<([^>]+)>\s*,?[\s\n]*$', comment, re.S)
if not m:
if re.search(r'Usable as:\s*$', comment):
return result_types
else:
return None
result_types += [m.group(2)]
comment = m.group(1)
def strip_doxygen(comment):
"""Returns the given comment without \-escaped words."""
# If there is only a doxygen keyword in the line, delete the whole line.
comment = re.sub(r'^\\[^\s]+\n', r'', comment, flags=re.M)
# If there is a doxygen \see command, change the \see prefix into "See also:".
# FIXME: it would be better to turn this into a link to the target instead.
comment = re.sub(r'\\see', r'See also:', comment)
# Delete the doxygen command and the following whitespace.
comment = re.sub(r'\\[^\s]+\s+', r'', comment)
return comment
def unify_arguments(args):
"""Gets rid of anything the user doesn't care about in the argument list."""
args = re.sub(r'internal::', r'', args)
args = re.sub(r'const\s+(.*)&', r'\1 ', args)
args = re.sub(r'&', r' ', args)
args = re.sub(r'(^|\s)M\d?(\s)', r'\1Matcher<*>\2', args)
return args
def add_matcher(result_type, name, args, comment, is_dyncast=False):
"""Adds a matcher to one of our categories."""
if name == 'id':
# FIXME: Figure out whether we want to support the 'id' matcher.
return
matcher_id = '%s%d' % (name, ids[name])
ids[name] += 1
args = unify_arguments(args)
matcher_html = TD_TEMPLATE % {
'result': esc('Matcher<%s>' % result_type),
'name': name,
'args': esc(args),
'comment': esc(strip_doxygen(comment)),
'id': matcher_id,
}
if is_dyncast:
node_matchers[result_type + name] = matcher_html
# Use a heuristic to figure out whether a matcher is a narrowing or
# traversal matcher. By default, matchers that take other matchers as
# arguments (and are not node matchers) do traversal. We specifically
# exclude known narrowing matchers that also take other matchers as
# arguments.
elif ('Matcher<' not in args or
name in ['allOf', 'anyOf', 'anything', 'unless']):
narrowing_matchers[result_type + name + esc(args)] = matcher_html
else:
traversal_matchers[result_type + name + esc(args)] = matcher_html
def act_on_decl(declaration, comment, allowed_types):
"""Parse the matcher out of the given declaration and comment.
If 'allowed_types' is set, it contains a list of node types the matcher
can match on, as extracted from the static type asserts in the matcher
definition.
"""
if declaration.strip():
# Node matchers are defined by writing:
# VariadicDynCastAllOfMatcher<ResultType, ArgumentType> name;
m = re.match(r""".*Variadic(?:DynCast)?AllOfMatcher\s*<
\s*([^\s,]+)\s*(?:,
\s*([^\s>]+)\s*)?>
\s*([^\s;]+)\s*;\s*$""", declaration, flags=re.X)
if m:
result, inner, name = m.groups()
if not inner:
inner = result
add_matcher(result, name, 'Matcher<%s>...' % inner,
comment, is_dyncast=True)
return
# Parse the various matcher definition macros.
m = re.match(""".*AST_TYPE_MATCHER\(
\s*([^\s,]+\s*),
\s*([^\s,]+\s*)
\)\s*;\s*$""", declaration, flags=re.X)
if m:
inner, name = m.groups()
add_matcher('Type', name, 'Matcher<%s>...' % inner,
comment, is_dyncast=True)
# FIXME: re-enable once we have implemented casting on the TypeLoc
# hierarchy.
# add_matcher('TypeLoc', '%sLoc' % name, 'Matcher<%sLoc>...' % inner,
# comment, is_dyncast=True)
return
m = re.match(""".*AST_TYPE(LOC)?_TRAVERSE_MATCHER\(
\s*([^\s,]+\s*),
\s*(?:[^\s,]+\s*),
\s*AST_POLYMORPHIC_SUPPORTED_TYPES\(([^)]*)\)
\)\s*;\s*$""", declaration, flags=re.X)
if m:
loc, name, results = m.groups()[0:3]
result_types = [r.strip() for r in results.split(',')]
comment_result_types = extract_result_types(comment)
if (comment_result_types and
sorted(result_types) != sorted(comment_result_types)):
raise Exception('Inconsistent documentation for: %s' % name)
for result_type in result_types:
add_matcher(result_type, name, 'Matcher<Type>', comment)
if loc:
add_matcher('%sLoc' % result_type, '%sLoc' % name, 'Matcher<TypeLoc>',
comment)
return
m = re.match(r"""^\s*AST_POLYMORPHIC_MATCHER(_P)?(.?)(?:_OVERLOAD)?\(
\s*([^\s,]+)\s*,
\s*AST_POLYMORPHIC_SUPPORTED_TYPES\(([^)]*)\)
(?:,\s*([^\s,]+)\s*
,\s*([^\s,]+)\s*)?
(?:,\s*([^\s,]+)\s*
,\s*([^\s,]+)\s*)?
(?:,\s*\d+\s*)?
\)\s*{\s*$""", declaration, flags=re.X)
if m:
p, n, name, results = m.groups()[0:4]
args = m.groups()[4:]
result_types = [r.strip() for r in results.split(',')]
if allowed_types and allowed_types != result_types:
raise Exception('Inconsistent documentation for: %s' % name)
if n not in ['', '2']:
raise Exception('Cannot parse "%s"' % declaration)
args = ', '.join('%s %s' % (args[i], args[i+1])
for i in range(0, len(args), 2) if args[i])
for result_type in result_types:
add_matcher(result_type, name, args, comment)
return
m = re.match(r"""^\s*AST_MATCHER_FUNCTION(_P)?(.?)(?:_OVERLOAD)?\(
(?:\s*([^\s,]+)\s*,)?
\s*([^\s,]+)\s*
(?:,\s*([^\s,]+)\s*
,\s*([^\s,]+)\s*)?
(?:,\s*([^\s,]+)\s*
,\s*([^\s,]+)\s*)?
(?:,\s*\d+\s*)?
\)\s*{\s*$""", declaration, flags=re.X)
if m:
p, n, result, name = m.groups()[0:4]
args = m.groups()[4:]
if n not in ['', '2']:
raise Exception('Cannot parse "%s"' % declaration)
args = ', '.join('%s %s' % (args[i], args[i+1])
for i in range(0, len(args), 2) if args[i])
add_matcher(result, name, args, comment)
return
m = re.match(r"""^\s*AST_MATCHER(_P)?(.?)(?:_OVERLOAD)?\(
(?:\s*([^\s,]+)\s*,)?
\s*([^\s,]+)\s*
(?:,\s*([^,]+)\s*
,\s*([^\s,]+)\s*)?
(?:,\s*([^\s,]+)\s*
,\s*([^\s,]+)\s*)?
(?:,\s*\d+\s*)?
\)\s*{\s*$""", declaration, flags=re.X)
if m:
p, n, result, name = m.groups()[0:4]
args = m.groups()[4:]
if not result:
if not allowed_types:
raise Exception('Did not find allowed result types for: %s' % name)
result_types = allowed_types
else:
result_types = [result]
if n not in ['', '2']:
raise Exception('Cannot parse "%s"' % declaration)
args = ', '.join('%s %s' % (args[i], args[i+1])
for i in range(0, len(args), 2) if args[i])
for result_type in result_types:
add_matcher(result_type, name, args, comment)
return
# Parse ArgumentAdapting matchers.
m = re.match(
r"""^.*ArgumentAdaptingMatcherFunc<.*>\s*(?:LLVM_ATTRIBUTE_UNUSED\s*)
([a-zA-Z]*)\s*=\s*{};$""",
declaration, flags=re.X)
if m:
name = m.groups()[0]
add_matcher('*', name, 'Matcher<*>', comment)
return
# Parse Variadic functions.
m = re.match(
r"""^.*internal::VariadicFunction\s*<\s*([^,]+),\s*([^,]+),\s*[^>]+>\s*
([a-zA-Z]*)\s*=\s*{.*};$""",
declaration, flags=re.X)
if m:
result, arg, name = m.groups()[:3]
add_matcher(result, name, '%s, ..., %s' % (arg, arg), comment)
return
# Parse Variadic operator matchers.
m = re.match(
r"""^.*VariadicOperatorMatcherFunc\s*<\s*([^,]+),\s*([^\s>]+)\s*>\s*
([a-zA-Z]*)\s*=\s*{.*};$""",
declaration, flags=re.X)
if m:
min_args, max_args, name = m.groups()[:3]
if max_args == '1':
add_matcher('*', name, 'Matcher<*>', comment)
return
elif max_args == 'UINT_MAX':
add_matcher('*', name, 'Matcher<*>, ..., Matcher<*>', comment)
return
# Parse free standing matcher functions, like:
# Matcher<ResultType> Name(Matcher<ArgumentType> InnerMatcher) {
m = re.match(r"""^\s*(.*)\s+
([^\s\(]+)\s*\(
(.*)
\)\s*{""", declaration, re.X)
if m:
result, name, args = m.groups()
args = ', '.join(p.strip() for p in args.split(','))
m = re.match(r'.*\s+internal::(Bindable)?Matcher<([^>]+)>$', result)
if m:
result_types = [m.group(2)]
else:
result_types = extract_result_types(comment)
if not result_types:
if not comment:
# Only overloads don't have their own doxygen comments; ignore those.
print 'Ignoring "%s"' % name
else:
print 'Cannot determine result type for "%s"' % name
else:
for result_type in result_types:
add_matcher(result_type, name, args, comment)
else:
print '*** Unparsable: "' + declaration + '" ***'
def sort_table(matcher_type, matcher_map):
"""Returns the sorted html table for the given row map."""
table = ''
for key in sorted(matcher_map.keys()):
table += matcher_map[key] + '\n'
return ('<!-- START_%(type)s_MATCHERS -->\n' +
'%(table)s' +
'<!--END_%(type)s_MATCHERS -->') % {
'type': matcher_type,
'table': table,
}
# Parse the ast matchers.
# We alternate between two modes:
# body = True: We parse the definition of a matcher. We need
# to parse the full definition before adding a matcher, as the
# definition might contain static asserts that specify the result
# type.
# body = False: We parse the comments and declaration of the matcher.
comment = ''
declaration = ''
allowed_types = []
body = False
for line in open(MATCHERS_FILE).read().splitlines():
if body:
if line.strip() and line[0] == '}':
if declaration:
act_on_decl(declaration, comment, allowed_types)
comment = ''
declaration = ''
allowed_types = []
body = False
else:
m = re.search(r'is_base_of<([^,]+), NodeType>', line)
if m and m.group(1):
allowed_types += [m.group(1)]
continue
if line.strip() and line.lstrip()[0] == '/':
comment += re.sub(r'/+\s?', '', line) + '\n'
else:
declaration += ' ' + line
if ((not line.strip()) or
line.rstrip()[-1] == ';' or
(line.rstrip()[-1] == '{' and line.rstrip()[-3:] != '= {')):
if line.strip() and line.rstrip()[-1] == '{':
body = True
else:
act_on_decl(declaration, comment, allowed_types)
comment = ''
declaration = ''
allowed_types = []
node_matcher_table = sort_table('DECL', node_matchers)
narrowing_matcher_table = sort_table('NARROWING', narrowing_matchers)
traversal_matcher_table = sort_table('TRAVERSAL', traversal_matchers)
reference = open('../LibASTMatchersReference.html').read()
reference = re.sub(r'<!-- START_DECL_MATCHERS.*END_DECL_MATCHERS -->',
node_matcher_table, reference, flags=re.S)
reference = re.sub(r'<!-- START_NARROWING_MATCHERS.*END_NARROWING_MATCHERS -->',
narrowing_matcher_table, reference, flags=re.S)
reference = re.sub(r'<!-- START_TRAVERSAL_MATCHERS.*END_TRAVERSAL_MATCHERS -->',
traversal_matcher_table, reference, flags=re.S)
with open('../LibASTMatchersReference.html', 'wb') as output:
output.write(reference)
| 14,497 | 36.657143 | 129 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/docs/analyzer/conf.py | # -*- coding: utf-8 -*-
#
# Clang Static Analyzer documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 2 15:54:28 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from datetime import date
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Clang Static Analyzer'
copyright = u'2013-%d, Analyzer Team' % date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.0'
# The full version, including alpha/beta/rc tags.
release = '4.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ClangStaticAnalyzerdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ClangStaticAnalyzer.tex', u'Clang Static Analyzer Documentation',
u'Analyzer Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'clangstaticanalyzer', u'Clang Static Analyzer Documentation',
[u'Analyzer Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ClangStaticAnalyzer', u'Clang Static Analyzer Documentation',
u'Analyzer Team', 'ClangStaticAnalyzer', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| 8,070 | 31.544355 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/utils/modfuzz.py | #! /usr/bin/env python
# To use:
# 1) Update the 'decls' list below with your fuzzing configuration.
# 2) Run with the clang binary as the command-line argument.
import random
import subprocess
import sys
import os
clang = sys.argv[1]
none_opts = 0.3
class Decl:
def __init__(self, text, depends=[], provides=[], conflicts=[]):
self.text = text
self.depends = depends
self.provides = provides
self.conflicts = conflicts
def valid(self, model):
for i in self.depends:
if i not in model.decls:
return False
for i in self.conflicts:
if i in model.decls:
return False
return True
def apply(self, model, name):
for i in self.provides:
model.decls[i] = True
model.source += self.text % {'name': name}
decls = [
Decl('struct X { int n; };\n', provides=['X'], conflicts=['X']),
Decl('static_assert(X{.n=1}.n == 1, "");\n', depends=['X']),
Decl('X %(name)s;\n', depends=['X']),
]
class FS:
def __init__(self):
self.fs = {}
self.prevfs = {}
def write(self, path, contents):
self.fs[path] = contents
def done(self):
for f, s in self.fs.items():
if self.prevfs.get(f) != s:
f = file(f, 'w')
f.write(s)
f.close()
for f in self.prevfs:
if f not in self.fs:
os.remove(f)
self.prevfs, self.fs = self.fs, {}
fs = FS()
class CodeModel:
def __init__(self):
self.source = ''
self.modules = {}
self.decls = {}
self.i = 0
def make_name(self):
self.i += 1
return 'n' + str(self.i)
def fails(self):
fs.write('module.modulemap',
''.join('module %s { header "%s.h" export * }\n' % (m, m)
for m in self.modules.keys()))
for m, (s, _) in self.modules.items():
fs.write('%s.h' % m, s)
fs.write('main.cc', self.source)
fs.done()
return subprocess.call([clang, '-std=c++11', '-c', '-fmodules', 'main.cc', '-o', '/dev/null']) != 0
def generate():
model = CodeModel()
m = []
try:
for d in mutations(model):
d(model)
m.append(d)
if not model.fails():
return
except KeyboardInterrupt:
print
return True
sys.stdout.write('\nReducing:\n')
sys.stdout.flush()
try:
while True:
assert m, 'got a failure with no steps; broken clang binary?'
i = random.choice(range(len(m)))
x = m[0:i] + m[i+1:]
m2 = CodeModel()
for d in x:
d(m2)
if m2.fails():
m = x
model = m2
else:
sys.stdout.write('.')
sys.stdout.flush()
except KeyboardInterrupt:
# FIXME: Clean out output directory first.
model.fails()
return model
def choose(options):
while True:
i = int(random.uniform(0, len(options) + none_opts))
if i >= len(options):
break
yield options[i]
def mutations(model):
options = [create_module, add_top_level_decl]
for opt in choose(options):
yield opt(model, options)
def create_module(model, options):
n = model.make_name()
def go(model):
model.modules[n] = (model.source, model.decls)
(model.source, model.decls) = ('', {})
options += [lambda model, options: add_import(model, options, n)]
return go
def add_top_level_decl(model, options):
n = model.make_name()
d = random.choice([decl for decl in decls if decl.valid(model)])
def go(model):
if not d.valid(model):
return
d.apply(model, n)
return go
def add_import(model, options, module_name):
def go(model):
if module_name in model.modules:
model.source += '#include "%s.h"\n' % module_name
model.decls.update(model.modules[module_name][1])
return go
sys.stdout.write('Finding bug: ')
while True:
if generate():
break
sys.stdout.write('.')
sys.stdout.flush()
| 3,777 | 21.622754 | 103 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/utils/token-delta.py | #!/usr/bin/env python
import os
import re
import subprocess
import sys
import tempfile
###
class DeltaAlgorithm(object):
def __init__(self):
self.cache = set()
def test(self, changes):
abstract
###
def getTestResult(self, changes):
# There is no reason to cache successful tests because we will
# always reduce the changeset when we see one.
changeset = frozenset(changes)
if changeset in self.cache:
return False
elif not self.test(changes):
self.cache.add(changeset)
return False
else:
return True
def run(self, changes, force=False):
# Make sure the initial test passes, if not then (a) either
# the user doesn't expect monotonicity, and we may end up
# doing O(N^2) tests, or (b) the test is wrong. Avoid the
# O(N^2) case unless user requests it.
if not force:
if not self.getTestResult(changes):
raise ValueError,'Initial test passed to delta fails.'
# Check empty set first to quickly find poor test functions.
if self.getTestResult(set()):
return set()
else:
return self.delta(changes, self.split(changes))
def split(self, S):
"""split(set) -> [sets]
Partition a set into one or two pieces.
"""
# There are many ways to split, we could do a better job with more
# context information (but then the API becomes grosser).
L = list(S)
mid = len(L)//2
if mid==0:
return L,
else:
return L[:mid],L[mid:]
def delta(self, c, sets):
# assert(reduce(set.union, sets, set()) == c)
# If there is nothing left we can remove, we are done.
if len(sets) <= 1:
return c
# Look for a passing subset.
res = self.search(c, sets)
if res is not None:
return res
# Otherwise, partition sets if possible; if not we are done.
refined = sum(map(list, map(self.split, sets)), [])
if len(refined) == len(sets):
return c
return self.delta(c, refined)
def search(self, c, sets):
for i,S in enumerate(sets):
# If test passes on this subset alone, recurse.
if self.getTestResult(S):
return self.delta(S, self.split(S))
# Otherwise if we have more than two sets, see if test
# pases without this subset.
if len(sets) > 2:
complement = sum(sets[:i] + sets[i+1:],[])
if self.getTestResult(complement):
return self.delta(complement, sets[:i] + sets[i+1:])
###
class Token:
def __init__(self, type, data, flags, file, line, column):
self.type = type
self.data = data
self.flags = flags
self.file = file
self.line = line
self.column = column
kTokenRE = re.compile(r"""([a-z_]+) '(.*)'\t(.*)\tLoc=<(.*):(.*):(.*)>""",
re.DOTALL | re.MULTILINE)
def getTokens(path):
p = subprocess.Popen(['clang','-dump-raw-tokens',path],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out,err = p.communicate()
tokens = []
collect = None
for ln in err.split('\n'):
# Silly programmers refuse to print in simple machine readable
# formats. Whatever.
if collect is None:
collect = ln
else:
collect = collect + '\n' + ln
if 'Loc=<' in ln and ln.endswith('>'):
ln,collect = collect,None
tokens.append(Token(*kTokenRE.match(ln).groups()))
return tokens
###
class TMBDDelta(DeltaAlgorithm):
def __init__(self, testProgram, tokenLists, log):
def patchName(name, suffix):
base,ext = os.path.splitext(name)
return base + '.' + suffix + ext
super(TMBDDelta, self).__init__()
self.testProgram = testProgram
self.tokenLists = tokenLists
self.tempFiles = [patchName(f,'tmp')
for f,_ in self.tokenLists]
self.targetFiles = [patchName(f,'ok')
for f,_ in self.tokenLists]
self.log = log
self.numTests = 0
def writeFiles(self, changes, fileNames):
assert len(fileNames) == len(self.tokenLists)
byFile = [[] for i in self.tokenLists]
for i,j in changes:
byFile[i].append(j)
for i,(file,tokens) in enumerate(self.tokenLists):
f = open(fileNames[i],'w')
for j in byFile[i]:
f.write(tokens[j])
f.close()
return byFile
def test(self, changes):
self.numTests += 1
byFile = self.writeFiles(changes, self.tempFiles)
if self.log:
print >>sys.stderr, 'TEST - ',
if self.log > 1:
for i,(file,_) in enumerate(self.tokenLists):
indices = byFile[i]
if i:
sys.stderr.write('\n ')
sys.stderr.write('%s:%d tokens: [' % (file,len(byFile[i])))
prev = None
for j in byFile[i]:
if prev is None or j != prev + 1:
if prev:
sys.stderr.write('%d][' % prev)
sys.stderr.write(str(j))
sys.stderr.write(':')
prev = j
if byFile[i]:
sys.stderr.write(str(byFile[i][-1]))
sys.stderr.write('] ')
else:
print >>sys.stderr, ', '.join(['%s:%d tokens' % (file, len(byFile[i]))
for i,(file,_) in enumerate(self.tokenLists)]),
p = subprocess.Popen([self.testProgram] + self.tempFiles)
res = p.wait() == 0
if res:
self.writeFiles(changes, self.targetFiles)
if self.log:
print >>sys.stderr, '=> %s' % res
else:
if res:
print '\nSUCCESS (%d tokens)' % len(changes)
else:
sys.stderr.write('.')
return res
def run(self):
res = super(TMBDDelta, self).run([(i,j)
for i,(file,tokens) in enumerate(self.tokenLists)
for j in range(len(tokens))])
self.writeFiles(res, self.targetFiles)
if not self.log:
print >>sys.stderr
return res
def tokenBasedMultiDelta(program, files, log):
# Read in the lists of tokens.
tokenLists = [(file, [t.data for t in getTokens(file)])
for file in files]
numTokens = sum([len(tokens) for _,tokens in tokenLists])
print "Delta on %s with %d tokens." % (', '.join(files), numTokens)
tbmd = TMBDDelta(program, tokenLists, log)
res = tbmd.run()
print "Finished %s with %d tokens (in %d tests)." % (', '.join(tbmd.targetFiles),
len(res),
tbmd.numTests)
def main():
from optparse import OptionParser, OptionGroup
parser = OptionParser("%prog <test program> {files+}")
parser.add_option("", "--debug", dest="debugLevel",
help="set debug level [default %default]",
action="store", type=int, default=0)
(opts, args) = parser.parse_args()
if len(args) <= 1:
parser.error('Invalid number of arguments.')
program,files = args[0],args[1:]
md = tokenBasedMultiDelta(program, files, log=opts.debugLevel)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print >>sys.stderr,'Interrupted.'
os._exit(1) # Avoid freeing our giant cache.
| 8,168 | 31.416667 | 94 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/utils/ClangDataFormat.py | """lldb data formatters for clang classes.
Usage
--
import this file in your ~/.lldbinit by adding this line:
command script import /path/to/ClangDataFormat.py
After that, instead of getting this:
(lldb) p Tok.Loc
(clang::SourceLocation) $0 = {
(unsigned int) ID = 123582
}
you'll get:
(lldb) p Tok.Loc
(clang::SourceLocation) $4 = "/usr/include/i386/_types.h:37:1" (offset: 123582, file, local)
"""
import lldb
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand("type summary add -F ClangDataFormat.SourceLocation_summary clang::SourceLocation")
debugger.HandleCommand("type summary add -F ClangDataFormat.QualType_summary clang::QualType")
debugger.HandleCommand("type summary add -F ClangDataFormat.StringRef_summary llvm::StringRef")
def SourceLocation_summary(srcloc, internal_dict):
return SourceLocation(srcloc).summary()
def QualType_summary(qualty, internal_dict):
return QualType(qualty).summary()
def StringRef_summary(strref, internal_dict):
return StringRef(strref).summary()
class SourceLocation(object):
def __init__(self, srcloc):
self.srcloc = srcloc
self.ID = srcloc.GetChildAtIndex(0).GetValueAsUnsigned()
self.frame = srcloc.GetFrame()
def offset(self):
return getValueFromExpression(self.srcloc, ".getOffset()").GetValueAsUnsigned()
def isInvalid(self):
return self.ID == 0
def isMacro(self):
return getValueFromExpression(self.srcloc, ".isMacroID()").GetValueAsUnsigned()
def isLocal(self, srcmgr_path):
return self.frame.EvaluateExpression("(%s).isLocalSourceLocation(%s)" % (srcmgr_path, getExpressionPath(self.srcloc))).GetValueAsUnsigned()
def getPrint(self, srcmgr_path):
print_str = getValueFromExpression(self.srcloc, ".printToString(%s)" % srcmgr_path)
return print_str.GetSummary()
def summary(self):
if self.isInvalid():
return "<invalid loc>"
srcmgr_path = findObjectExpressionPath("clang::SourceManager", self.frame)
if srcmgr_path:
return "%s (offset: %d, %s, %s)" % (self.getPrint(srcmgr_path), self.offset(), "macro" if self.isMacro() else "file", "local" if self.isLocal(srcmgr_path) else "loaded")
return "(offset: %d, %s)" % (self.offset(), "macro" if self.isMacro() else "file")
class QualType(object):
def __init__(self, qualty):
self.qualty = qualty
def getAsString(self):
std_str = getValueFromExpression(self.qualty, ".getAsString()")
return std_str.GetSummary()
def summary(self):
desc = self.getAsString()
if desc == '"NULL TYPE"':
return "<NULL TYPE>"
return desc
class StringRef(object):
def __init__(self, strref):
self.strref = strref
self.Data_value = strref.GetChildAtIndex(0)
self.Length = strref.GetChildAtIndex(1).GetValueAsUnsigned()
def summary(self):
if self.Length == 0:
return '""'
data = self.Data_value.GetPointeeData(0, self.Length)
error = lldb.SBError()
string = data.ReadRawData(error, 0, data.GetByteSize())
if error.Fail():
return None
return '"%s"' % string
# Key is a (function address, type name) tuple, value is the expression path for
# an object with such a type name from inside that function.
FramePathMapCache = {}
def findObjectExpressionPath(typename, frame):
func_addr = frame.GetFunction().GetStartAddress().GetFileAddress()
key = (func_addr, typename)
try:
return FramePathMapCache[key]
except KeyError:
#print "CACHE MISS"
path = None
obj = findObject(typename, frame)
if obj:
path = getExpressionPath(obj)
FramePathMapCache[key] = path
return path
def findObject(typename, frame):
def getTypename(value):
# FIXME: lldb should provide something like getBaseType
ty = value.GetType()
if ty.IsPointerType() or ty.IsReferenceType():
return ty.GetPointeeType().GetName()
return ty.GetName()
def searchForType(value, searched):
tyname = getTypename(value)
#print "SEARCH:", getExpressionPath(value), value.GetType().GetName()
if tyname == typename:
return value
ty = value.GetType()
if not (ty.IsPointerType() or
ty.IsReferenceType() or
# FIXME: lldb should provide something like getCanonicalType
tyname.startswith("llvm::IntrusiveRefCntPtr<") or
tyname.startswith("llvm::OwningPtr<")):
return None
# FIXME: Hashing for SBTypes does not seem to work correctly, uses the typename instead,
# and not the canonical one unfortunately.
if tyname in searched:
return None
searched.add(tyname)
for i in range(value.GetNumChildren()):
child = value.GetChildAtIndex(i, 0, False)
found = searchForType(child, searched)
if found:
return found
searched = set()
value_list = frame.GetVariables(True, True, True, True)
for val in value_list:
found = searchForType(val, searched)
if found:
return found if not found.TypeIsPointerType() else found.Dereference()
def getValueFromExpression(val, expr):
return val.GetFrame().EvaluateExpression(getExpressionPath(val) + expr)
def getExpressionPath(val):
stream = lldb.SBStream()
val.GetExpressionPath(stream)
return stream.GetData()
| 5,012 | 29.944444 | 172 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/utils/ABITest/ABITestGen.py | #!/usr/bin/env python
from pprint import pprint
import random, atexit, time
from random import randrange
import re
from Enumeration import *
from TypeGen import *
####
class TypePrinter:
def __init__(self, output, outputHeader=None,
outputTests=None, outputDriver=None,
headerName=None, info=None):
self.output = output
self.outputHeader = outputHeader
self.outputTests = outputTests
self.outputDriver = outputDriver
self.writeBody = outputHeader or outputTests or outputDriver
self.types = {}
self.testValues = {}
self.testReturnValues = {}
self.layoutTests = []
self.declarations = set()
if info:
for f in (self.output,self.outputHeader,self.outputTests,self.outputDriver):
if f:
print >>f,info
if self.writeBody:
print >>self.output, '#include <stdio.h>\n'
if self.outputTests:
print >>self.outputTests, '#include <stdio.h>'
print >>self.outputTests, '#include <string.h>'
print >>self.outputTests, '#include <assert.h>\n'
if headerName:
for f in (self.output,self.outputTests,self.outputDriver):
if f is not None:
print >>f, '#include "%s"\n'%(headerName,)
if self.outputDriver:
print >>self.outputDriver, '#include <stdio.h>'
print >>self.outputDriver, '#include <stdlib.h>\n'
print >>self.outputDriver, 'int main(int argc, char **argv) {'
print >>self.outputDriver, ' int index = -1;'
print >>self.outputDriver, ' if (argc > 1) index = atoi(argv[1]);'
def finish(self):
if self.layoutTests:
print >>self.output, 'int main(int argc, char **argv) {'
print >>self.output, ' int index = -1;'
print >>self.output, ' if (argc > 1) index = atoi(argv[1]);'
for i,f in self.layoutTests:
print >>self.output, ' if (index == -1 || index == %d)' % i
print >>self.output, ' %s();' % f
print >>self.output, ' return 0;'
print >>self.output, '}'
if self.outputDriver:
print >>self.outputDriver, ' printf("DONE\\n");'
print >>self.outputDriver, ' return 0;'
print >>self.outputDriver, '}'
def addDeclaration(self, decl):
if decl in self.declarations:
return False
self.declarations.add(decl)
if self.outputHeader:
print >>self.outputHeader, decl
else:
print >>self.output, decl
if self.outputTests:
print >>self.outputTests, decl
return True
def getTypeName(self, T):
name = self.types.get(T)
if name is None:
# Reserve slot
self.types[T] = None
self.types[T] = name = T.getTypeName(self)
return name
def writeLayoutTest(self, i, ty):
tyName = self.getTypeName(ty)
tyNameClean = tyName.replace(' ','_').replace('*','star')
fnName = 'test_%s' % tyNameClean
print >>self.output,'void %s(void) {' % fnName
self.printSizeOfType(' %s'%fnName, tyName, ty, self.output)
self.printAlignOfType(' %s'%fnName, tyName, ty, self.output)
self.printOffsetsOfType(' %s'%fnName, tyName, ty, self.output)
print >>self.output,'}'
print >>self.output
self.layoutTests.append((i,fnName))
def writeFunction(self, i, FT):
args = ', '.join(['%s arg%d'%(self.getTypeName(t),i) for i,t in enumerate(FT.argTypes)])
if not args:
args = 'void'
if FT.returnType is None:
retvalName = None
retvalTypeName = 'void'
else:
retvalTypeName = self.getTypeName(FT.returnType)
if self.writeBody or self.outputTests:
retvalName = self.getTestReturnValue(FT.returnType)
fnName = 'fn%d'%(FT.index,)
if self.outputHeader:
print >>self.outputHeader,'%s %s(%s);'%(retvalTypeName, fnName, args)
elif self.outputTests:
print >>self.outputTests,'%s %s(%s);'%(retvalTypeName, fnName, args)
print >>self.output,'%s %s(%s)'%(retvalTypeName, fnName, args),
if self.writeBody:
print >>self.output, '{'
for i,t in enumerate(FT.argTypes):
self.printValueOfType(' %s'%fnName, 'arg%d'%i, t)
if retvalName is not None:
print >>self.output, ' return %s;'%(retvalName,)
print >>self.output, '}'
else:
print >>self.output, '{}'
print >>self.output
if self.outputDriver:
print >>self.outputDriver, ' if (index == -1 || index == %d) {' % i
print >>self.outputDriver, ' extern void test_%s(void);' % fnName
print >>self.outputDriver, ' test_%s();' % fnName
print >>self.outputDriver, ' }'
if self.outputTests:
if self.outputHeader:
print >>self.outputHeader, 'void test_%s(void);'%(fnName,)
if retvalName is None:
retvalTests = None
else:
retvalTests = self.getTestValuesArray(FT.returnType)
tests = map(self.getTestValuesArray, FT.argTypes)
print >>self.outputTests, 'void test_%s(void) {'%(fnName,)
if retvalTests is not None:
print >>self.outputTests, ' printf("%s: testing return.\\n");'%(fnName,)
print >>self.outputTests, ' for (int i=0; i<%d; ++i) {'%(retvalTests[1],)
args = ', '.join(['%s[%d]'%(t,randrange(l)) for t,l in tests])
print >>self.outputTests, ' %s RV;'%(retvalTypeName,)
print >>self.outputTests, ' %s = %s[i];'%(retvalName, retvalTests[0])
print >>self.outputTests, ' RV = %s(%s);'%(fnName, args)
self.printValueOfType(' %s_RV'%fnName, 'RV', FT.returnType, output=self.outputTests, indent=4)
self.checkTypeValues('RV', '%s[i]' % retvalTests[0], FT.returnType, output=self.outputTests, indent=4)
print >>self.outputTests, ' }'
if tests:
print >>self.outputTests, ' printf("%s: testing arguments.\\n");'%(fnName,)
for i,(array,length) in enumerate(tests):
for j in range(length):
args = ['%s[%d]'%(t,randrange(l)) for t,l in tests]
args[i] = '%s[%d]'%(array,j)
print >>self.outputTests, ' %s(%s);'%(fnName, ', '.join(args),)
print >>self.outputTests, '}'
def getTestReturnValue(self, type):
typeName = self.getTypeName(type)
info = self.testReturnValues.get(typeName)
if info is None:
name = '%s_retval'%(typeName.replace(' ','_').replace('*','star'),)
print >>self.output, '%s %s;'%(typeName,name)
if self.outputHeader:
print >>self.outputHeader, 'extern %s %s;'%(typeName,name)
elif self.outputTests:
print >>self.outputTests, 'extern %s %s;'%(typeName,name)
info = self.testReturnValues[typeName] = name
return info
def getTestValuesArray(self, type):
typeName = self.getTypeName(type)
info = self.testValues.get(typeName)
if info is None:
name = '%s_values'%(typeName.replace(' ','_').replace('*','star'),)
print >>self.outputTests, 'static %s %s[] = {'%(typeName,name)
length = 0
for item in self.getTestValues(type):
print >>self.outputTests, '\t%s,'%(item,)
length += 1
print >>self.outputTests,'};'
info = self.testValues[typeName] = (name,length)
return info
def getTestValues(self, t):
if isinstance(t, BuiltinType):
if t.name=='float':
for i in ['0.0','-1.0','1.0']:
yield i+'f'
elif t.name=='double':
for i in ['0.0','-1.0','1.0']:
yield i
elif t.name in ('void *'):
yield '(void*) 0'
yield '(void*) -1'
else:
yield '(%s) 0'%(t.name,)
yield '(%s) -1'%(t.name,)
yield '(%s) 1'%(t.name,)
elif isinstance(t, EnumType):
for i in range(0, len(t.enumerators)):
yield 'enum%dval%d_%d' % (t.index, i, t.unique_id)
elif isinstance(t, RecordType):
nonPadding = [f for f in t.fields
if not f.isPaddingBitField()]
if not nonPadding:
yield '{ }'
return
# FIXME: Use designated initializers to access non-first
# fields of unions.
if t.isUnion:
for v in self.getTestValues(nonPadding[0]):
yield '{ %s }' % v
return
fieldValues = map(list, map(self.getTestValues, nonPadding))
for i,values in enumerate(fieldValues):
for v in values:
elements = map(random.choice,fieldValues)
elements[i] = v
yield '{ %s }'%(', '.join(elements))
elif isinstance(t, ComplexType):
for t in self.getTestValues(t.elementType):
yield '%s + %s * 1i'%(t,t)
elif isinstance(t, ArrayType):
values = list(self.getTestValues(t.elementType))
if not values:
yield '{ }'
for i in range(t.numElements):
for v in values:
elements = [random.choice(values) for i in range(t.numElements)]
elements[i] = v
yield '{ %s }'%(', '.join(elements))
else:
raise NotImplementedError,'Cannot make tests values of type: "%s"'%(t,)
def printSizeOfType(self, prefix, name, t, output=None, indent=2):
print >>output, '%*sprintf("%s: sizeof(%s) = %%ld\\n", (long)sizeof(%s));'%(indent, '', prefix, name, name)
def printAlignOfType(self, prefix, name, t, output=None, indent=2):
print >>output, '%*sprintf("%s: __alignof__(%s) = %%ld\\n", (long)__alignof__(%s));'%(indent, '', prefix, name, name)
def printOffsetsOfType(self, prefix, name, t, output=None, indent=2):
if isinstance(t, RecordType):
for i,f in enumerate(t.fields):
if f.isBitField():
continue
fname = 'field%d' % i
print >>output, '%*sprintf("%s: __builtin_offsetof(%s, %s) = %%ld\\n", (long)__builtin_offsetof(%s, %s));'%(indent, '', prefix, name, fname, name, fname)
def printValueOfType(self, prefix, name, t, output=None, indent=2):
if output is None:
output = self.output
if isinstance(t, BuiltinType):
value_expr = name
if t.name.split(' ')[-1] == '_Bool':
# Hack to work around PR5579.
value_expr = "%s ? 2 : 0" % name
if t.name.endswith('long long'):
code = 'lld'
elif t.name.endswith('long'):
code = 'ld'
elif t.name.split(' ')[-1] in ('_Bool','char','short',
'int','unsigned'):
code = 'd'
elif t.name in ('float','double'):
code = 'f'
elif t.name == 'long double':
code = 'Lf'
else:
code = 'p'
print >>output, '%*sprintf("%s: %s = %%%s\\n", %s);'%(
indent, '', prefix, name, code, value_expr)
elif isinstance(t, EnumType):
print >>output, '%*sprintf("%s: %s = %%d\\n", %s);'%(indent, '', prefix, name, name)
elif isinstance(t, RecordType):
if not t.fields:
print >>output, '%*sprintf("%s: %s (empty)\\n");'%(indent, '', prefix, name)
for i,f in enumerate(t.fields):
if f.isPaddingBitField():
continue
fname = '%s.field%d'%(name,i)
self.printValueOfType(prefix, fname, f, output=output, indent=indent)
elif isinstance(t, ComplexType):
self.printValueOfType(prefix, '(__real %s)'%name, t.elementType, output=output,indent=indent)
self.printValueOfType(prefix, '(__imag %s)'%name, t.elementType, output=output,indent=indent)
elif isinstance(t, ArrayType):
for i in range(t.numElements):
# Access in this fashion as a hackish way to portably
# access vectors.
if t.isVector:
self.printValueOfType(prefix, '((%s*) &%s)[%d]'%(t.elementType,name,i), t.elementType, output=output,indent=indent)
else:
self.printValueOfType(prefix, '%s[%d]'%(name,i), t.elementType, output=output,indent=indent)
else:
raise NotImplementedError,'Cannot print value of type: "%s"'%(t,)
def checkTypeValues(self, nameLHS, nameRHS, t, output=None, indent=2):
prefix = 'foo'
if output is None:
output = self.output
if isinstance(t, BuiltinType):
print >>output, '%*sassert(%s == %s);' % (indent, '', nameLHS, nameRHS)
elif isinstance(t, EnumType):
print >>output, '%*sassert(%s == %s);' % (indent, '', nameLHS, nameRHS)
elif isinstance(t, RecordType):
for i,f in enumerate(t.fields):
if f.isPaddingBitField():
continue
self.checkTypeValues('%s.field%d'%(nameLHS,i), '%s.field%d'%(nameRHS,i),
f, output=output, indent=indent)
if t.isUnion:
break
elif isinstance(t, ComplexType):
self.checkTypeValues('(__real %s)'%nameLHS, '(__real %s)'%nameRHS, t.elementType, output=output,indent=indent)
self.checkTypeValues('(__imag %s)'%nameLHS, '(__imag %s)'%nameRHS, t.elementType, output=output,indent=indent)
elif isinstance(t, ArrayType):
for i in range(t.numElements):
# Access in this fashion as a hackish way to portably
# access vectors.
if t.isVector:
self.checkTypeValues('((%s*) &%s)[%d]'%(t.elementType,nameLHS,i),
'((%s*) &%s)[%d]'%(t.elementType,nameRHS,i),
t.elementType, output=output,indent=indent)
else:
self.checkTypeValues('%s[%d]'%(nameLHS,i), '%s[%d]'%(nameRHS,i),
t.elementType, output=output,indent=indent)
else:
raise NotImplementedError,'Cannot print value of type: "%s"'%(t,)
import sys
def main():
from optparse import OptionParser, OptionGroup
parser = OptionParser("%prog [options] {indices}")
parser.add_option("", "--mode", dest="mode",
help="autogeneration mode (random or linear) [default %default]",
type='choice', choices=('random','linear'), default='linear')
parser.add_option("", "--count", dest="count",
help="autogenerate COUNT functions according to MODE",
type=int, default=0)
parser.add_option("", "--min", dest="minIndex", metavar="N",
help="start autogeneration with the Nth function type [default %default]",
type=int, default=0)
parser.add_option("", "--max", dest="maxIndex", metavar="N",
help="maximum index for random autogeneration [default %default]",
type=int, default=10000000)
parser.add_option("", "--seed", dest="seed",
help="random number generator seed [default %default]",
type=int, default=1)
parser.add_option("", "--use-random-seed", dest="useRandomSeed",
help="use random value for initial random number generator seed",
action='store_true', default=False)
parser.add_option("", "--skip", dest="skipTests",
help="add a test index to skip",
type=int, action='append', default=[])
parser.add_option("-o", "--output", dest="output", metavar="FILE",
help="write output to FILE [default %default]",
type=str, default='-')
parser.add_option("-O", "--output-header", dest="outputHeader", metavar="FILE",
help="write header file for output to FILE [default %default]",
type=str, default=None)
parser.add_option("-T", "--output-tests", dest="outputTests", metavar="FILE",
help="write function tests to FILE [default %default]",
type=str, default=None)
parser.add_option("-D", "--output-driver", dest="outputDriver", metavar="FILE",
help="write test driver to FILE [default %default]",
type=str, default=None)
parser.add_option("", "--test-layout", dest="testLayout", metavar="FILE",
help="test structure layout",
action='store_true', default=False)
group = OptionGroup(parser, "Type Enumeration Options")
# Builtins - Ints
group.add_option("", "--no-char", dest="useChar",
help="do not generate char types",
action="store_false", default=True)
group.add_option("", "--no-short", dest="useShort",
help="do not generate short types",
action="store_false", default=True)
group.add_option("", "--no-int", dest="useInt",
help="do not generate int types",
action="store_false", default=True)
group.add_option("", "--no-long", dest="useLong",
help="do not generate long types",
action="store_false", default=True)
group.add_option("", "--no-long-long", dest="useLongLong",
help="do not generate long long types",
action="store_false", default=True)
group.add_option("", "--no-unsigned", dest="useUnsigned",
help="do not generate unsigned integer types",
action="store_false", default=True)
# Other builtins
group.add_option("", "--no-bool", dest="useBool",
help="do not generate bool types",
action="store_false", default=True)
group.add_option("", "--no-float", dest="useFloat",
help="do not generate float types",
action="store_false", default=True)
group.add_option("", "--no-double", dest="useDouble",
help="do not generate double types",
action="store_false", default=True)
group.add_option("", "--no-long-double", dest="useLongDouble",
help="do not generate long double types",
action="store_false", default=True)
group.add_option("", "--no-void-pointer", dest="useVoidPointer",
help="do not generate void* types",
action="store_false", default=True)
# Enumerations
group.add_option("", "--no-enums", dest="useEnum",
help="do not generate enum types",
action="store_false", default=True)
# Derived types
group.add_option("", "--no-array", dest="useArray",
help="do not generate record types",
action="store_false", default=True)
group.add_option("", "--no-complex", dest="useComplex",
help="do not generate complex types",
action="store_false", default=True)
group.add_option("", "--no-record", dest="useRecord",
help="do not generate record types",
action="store_false", default=True)
group.add_option("", "--no-union", dest="recordUseUnion",
help="do not generate union types",
action="store_false", default=True)
group.add_option("", "--no-vector", dest="useVector",
help="do not generate vector types",
action="store_false", default=True)
group.add_option("", "--no-bit-field", dest="useBitField",
help="do not generate bit-field record members",
action="store_false", default=True)
group.add_option("", "--no-builtins", dest="useBuiltins",
help="do not use any types",
action="store_false", default=True)
# Tuning
group.add_option("", "--no-function-return", dest="functionUseReturn",
help="do not generate return types for functions",
action="store_false", default=True)
group.add_option("", "--vector-types", dest="vectorTypes",
help="comma separated list of vector types (e.g., v2i32) [default %default]",
action="store", type=str, default='v2i16, v1i64, v2i32, v4i16, v8i8, v2f32, v2i64, v4i32, v8i16, v16i8, v2f64, v4f32, v16f32', metavar="N")
group.add_option("", "--bit-fields", dest="bitFields",
help="comma separated list 'type:width' bit-field specifiers [default %default]",
action="store", type=str, default=(
"char:0,char:4,int:0,unsigned:1,int:1,int:4,int:13,int:24"))
group.add_option("", "--max-args", dest="functionMaxArgs",
help="maximum number of arguments per function [default %default]",
action="store", type=int, default=4, metavar="N")
group.add_option("", "--max-array", dest="arrayMaxSize",
help="maximum array size [default %default]",
action="store", type=int, default=4, metavar="N")
group.add_option("", "--max-record", dest="recordMaxSize",
help="maximum number of fields per record [default %default]",
action="store", type=int, default=4, metavar="N")
group.add_option("", "--max-record-depth", dest="recordMaxDepth",
help="maximum nested structure depth [default %default]",
action="store", type=int, default=None, metavar="N")
parser.add_option_group(group)
(opts, args) = parser.parse_args()
if not opts.useRandomSeed:
random.seed(opts.seed)
# Construct type generator
builtins = []
if opts.useBuiltins:
ints = []
if opts.useChar: ints.append(('char',1))
if opts.useShort: ints.append(('short',2))
if opts.useInt: ints.append(('int',4))
# FIXME: Wrong size.
if opts.useLong: ints.append(('long',4))
if opts.useLongLong: ints.append(('long long',8))
if opts.useUnsigned:
ints = ([('unsigned %s'%i,s) for i,s in ints] +
[('signed %s'%i,s) for i,s in ints])
builtins.extend(ints)
if opts.useBool: builtins.append(('_Bool',1))
if opts.useFloat: builtins.append(('float',4))
if opts.useDouble: builtins.append(('double',8))
if opts.useLongDouble: builtins.append(('long double',16))
# FIXME: Wrong size.
if opts.useVoidPointer: builtins.append(('void*',4))
btg = FixedTypeGenerator([BuiltinType(n,s) for n,s in builtins])
bitfields = []
for specifier in opts.bitFields.split(','):
if not specifier.strip():
continue
name,width = specifier.strip().split(':', 1)
bitfields.append(BuiltinType(name,None,int(width)))
bftg = FixedTypeGenerator(bitfields)
charType = BuiltinType('char',1)
shortType = BuiltinType('short',2)
intType = BuiltinType('int',4)
longlongType = BuiltinType('long long',8)
floatType = BuiltinType('float',4)
doubleType = BuiltinType('double',8)
sbtg = FixedTypeGenerator([charType, intType, floatType, doubleType])
atg = AnyTypeGenerator()
artg = AnyTypeGenerator()
def makeGenerator(atg, subgen, subfieldgen, useRecord, useArray, useBitField):
atg.addGenerator(btg)
if useBitField and opts.useBitField:
atg.addGenerator(bftg)
if useRecord and opts.useRecord:
assert subgen
atg.addGenerator(RecordTypeGenerator(subfieldgen, opts.recordUseUnion,
opts.recordMaxSize))
if opts.useComplex:
# FIXME: Allow overriding builtins here
atg.addGenerator(ComplexTypeGenerator(sbtg))
if useArray and opts.useArray:
assert subgen
atg.addGenerator(ArrayTypeGenerator(subgen, opts.arrayMaxSize))
if opts.useVector:
vTypes = []
for i,t in enumerate(opts.vectorTypes.split(',')):
m = re.match('v([1-9][0-9]*)([if][1-9][0-9]*)', t.strip())
if not m:
parser.error('Invalid vector type: %r' % t)
count,kind = m.groups()
count = int(count)
type = { 'i8' : charType,
'i16' : shortType,
'i32' : intType,
'i64' : longlongType,
'f32' : floatType,
'f64' : doubleType,
}.get(kind)
if not type:
parser.error('Invalid vector type: %r' % t)
vTypes.append(ArrayType(i, True, type, count * type.size))
atg.addGenerator(FixedTypeGenerator(vTypes))
if opts.useEnum:
atg.addGenerator(EnumTypeGenerator([None, '-1', '1', '1u'], 1, 4))
if opts.recordMaxDepth is None:
# Fully recursive, just avoid top-level arrays.
subFTG = AnyTypeGenerator()
subTG = AnyTypeGenerator()
atg = AnyTypeGenerator()
makeGenerator(subFTG, atg, atg, True, True, True)
makeGenerator(subTG, atg, subFTG, True, True, False)
makeGenerator(atg, subTG, subFTG, True, False, False)
else:
# Make a chain of type generators, each builds smaller
# structures.
base = AnyTypeGenerator()
fbase = AnyTypeGenerator()
makeGenerator(base, None, None, False, False, False)
makeGenerator(fbase, None, None, False, False, True)
for i in range(opts.recordMaxDepth):
n = AnyTypeGenerator()
fn = AnyTypeGenerator()
makeGenerator(n, base, fbase, True, True, False)
makeGenerator(fn, base, fbase, True, True, True)
base = n
fbase = fn
atg = AnyTypeGenerator()
makeGenerator(atg, base, fbase, True, False, False)
if opts.testLayout:
ftg = atg
else:
ftg = FunctionTypeGenerator(atg, opts.functionUseReturn, opts.functionMaxArgs)
# Override max,min,count if finite
if opts.maxIndex is None:
if ftg.cardinality is aleph0:
opts.maxIndex = 10000000
else:
opts.maxIndex = ftg.cardinality
opts.maxIndex = min(opts.maxIndex, ftg.cardinality)
opts.minIndex = max(0,min(opts.maxIndex-1, opts.minIndex))
if not opts.mode=='random':
opts.count = min(opts.count, opts.maxIndex-opts.minIndex)
if opts.output=='-':
output = sys.stdout
else:
output = open(opts.output,'w')
atexit.register(lambda: output.close())
outputHeader = None
if opts.outputHeader:
outputHeader = open(opts.outputHeader,'w')
atexit.register(lambda: outputHeader.close())
outputTests = None
if opts.outputTests:
outputTests = open(opts.outputTests,'w')
atexit.register(lambda: outputTests.close())
outputDriver = None
if opts.outputDriver:
outputDriver = open(opts.outputDriver,'w')
atexit.register(lambda: outputDriver.close())
info = ''
info += '// %s\n'%(' '.join(sys.argv),)
info += '// Generated: %s\n'%(time.strftime('%Y-%m-%d %H:%M'),)
info += '// Cardinality of function generator: %s\n'%(ftg.cardinality,)
info += '// Cardinality of type generator: %s\n'%(atg.cardinality,)
if opts.testLayout:
info += '\n#include <stdio.h>'
P = TypePrinter(output,
outputHeader=outputHeader,
outputTests=outputTests,
outputDriver=outputDriver,
headerName=opts.outputHeader,
info=info)
def write(N):
try:
FT = ftg.get(N)
except RuntimeError,e:
if e.args[0]=='maximum recursion depth exceeded':
print >>sys.stderr,'WARNING: Skipped %d, recursion limit exceeded (bad arguments?)'%(N,)
return
raise
if opts.testLayout:
P.writeLayoutTest(N, FT)
else:
P.writeFunction(N, FT)
if args:
[write(int(a)) for a in args]
skipTests = set(opts.skipTests)
for i in range(opts.count):
if opts.mode=='linear':
index = opts.minIndex + i
else:
index = opts.minIndex + int((opts.maxIndex-opts.minIndex) * random.random())
if index in skipTests:
continue
write(index)
P.finish()
if __name__=='__main__':
main()
| 30,083 | 43.701337 | 170 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/utils/ABITest/Enumeration.py | """Utilities for enumeration of finite and countably infinite sets.
"""
###
# Countable iteration
# Simplifies some calculations
class Aleph0(int):
_singleton = None
def __new__(type):
if type._singleton is None:
type._singleton = int.__new__(type)
return type._singleton
def __repr__(self): return '<aleph0>'
def __str__(self): return 'inf'
def __cmp__(self, b):
return 1
def __sub__(self, b):
raise ValueError,"Cannot subtract aleph0"
__rsub__ = __sub__
def __add__(self, b):
return self
__radd__ = __add__
def __mul__(self, b):
if b == 0: return b
return self
__rmul__ = __mul__
def __floordiv__(self, b):
if b == 0: raise ZeroDivisionError
return self
__rfloordiv__ = __floordiv__
__truediv__ = __floordiv__
__rtuediv__ = __floordiv__
__div__ = __floordiv__
__rdiv__ = __floordiv__
def __pow__(self, b):
if b == 0: return 1
return self
aleph0 = Aleph0()
def base(line):
return line*(line+1)//2
def pairToN((x,y)):
line,index = x+y,y
return base(line)+index
def getNthPairInfo(N):
# Avoid various singularities
if N==0:
return (0,0)
# Gallop to find bounds for line
line = 1
next = 2
while base(next)<=N:
line = next
next = line << 1
# Binary search for starting line
lo = line
hi = line<<1
while lo + 1 != hi:
#assert base(lo) <= N < base(hi)
mid = (lo + hi)>>1
if base(mid)<=N:
lo = mid
else:
hi = mid
line = lo
return line, N - base(line)
def getNthPair(N):
line,index = getNthPairInfo(N)
return (line - index, index)
def getNthPairBounded(N,W=aleph0,H=aleph0,useDivmod=False):
"""getNthPairBounded(N, W, H) -> (x, y)
Return the N-th pair such that 0 <= x < W and 0 <= y < H."""
if W <= 0 or H <= 0:
raise ValueError,"Invalid bounds"
elif N >= W*H:
raise ValueError,"Invalid input (out of bounds)"
# Simple case...
if W is aleph0 and H is aleph0:
return getNthPair(N)
# Otherwise simplify by assuming W < H
if H < W:
x,y = getNthPairBounded(N,H,W,useDivmod=useDivmod)
return y,x
if useDivmod:
return N%W,N//W
else:
# Conceptually we want to slide a diagonal line across a
# rectangle. This gives more interesting results for large
# bounds than using divmod.
# If in lower left, just return as usual
cornerSize = base(W)
if N < cornerSize:
return getNthPair(N)
# Otherwise if in upper right, subtract from corner
if H is not aleph0:
M = W*H - N - 1
if M < cornerSize:
x,y = getNthPair(M)
return (W-1-x,H-1-y)
# Otherwise, compile line and index from number of times we
# wrap.
N = N - cornerSize
index,offset = N%W,N//W
# p = (W-1, 1+offset) + (-1,1)*index
return (W-1-index, 1+offset+index)
def getNthPairBoundedChecked(N,W=aleph0,H=aleph0,useDivmod=False,GNP=getNthPairBounded):
x,y = GNP(N,W,H,useDivmod)
assert 0 <= x < W and 0 <= y < H
return x,y
def getNthNTuple(N, W, H=aleph0, useLeftToRight=False):
"""getNthNTuple(N, W, H) -> (x_0, x_1, ..., x_W)
Return the N-th W-tuple, where for 0 <= x_i < H."""
if useLeftToRight:
elts = [None]*W
for i in range(W):
elts[i],N = getNthPairBounded(N, H)
return tuple(elts)
else:
if W==0:
return ()
elif W==1:
return (N,)
elif W==2:
return getNthPairBounded(N, H, H)
else:
LW,RW = W//2, W - (W//2)
L,R = getNthPairBounded(N, H**LW, H**RW)
return (getNthNTuple(L,LW,H=H,useLeftToRight=useLeftToRight) +
getNthNTuple(R,RW,H=H,useLeftToRight=useLeftToRight))
def getNthNTupleChecked(N, W, H=aleph0, useLeftToRight=False, GNT=getNthNTuple):
t = GNT(N,W,H,useLeftToRight)
assert len(t) == W
for i in t:
assert i < H
return t
def getNthTuple(N, maxSize=aleph0, maxElement=aleph0, useDivmod=False, useLeftToRight=False):
"""getNthTuple(N, maxSize, maxElement) -> x
Return the N-th tuple where len(x) < maxSize and for y in x, 0 <=
y < maxElement."""
# All zero sized tuples are isomorphic, don't ya know.
if N == 0:
return ()
N -= 1
if maxElement is not aleph0:
if maxSize is aleph0:
raise NotImplementedError,'Max element size without max size unhandled'
bounds = [maxElement**i for i in range(1, maxSize+1)]
S,M = getNthPairVariableBounds(N, bounds)
else:
S,M = getNthPairBounded(N, maxSize, useDivmod=useDivmod)
return getNthNTuple(M, S+1, maxElement, useLeftToRight=useLeftToRight)
def getNthTupleChecked(N, maxSize=aleph0, maxElement=aleph0,
useDivmod=False, useLeftToRight=False, GNT=getNthTuple):
# FIXME: maxsize is inclusive
t = GNT(N,maxSize,maxElement,useDivmod,useLeftToRight)
assert len(t) <= maxSize
for i in t:
assert i < maxElement
return t
def getNthPairVariableBounds(N, bounds):
"""getNthPairVariableBounds(N, bounds) -> (x, y)
Given a finite list of bounds (which may be finite or aleph0),
return the N-th pair such that 0 <= x < len(bounds) and 0 <= y <
bounds[x]."""
if not bounds:
raise ValueError,"Invalid bounds"
if not (0 <= N < sum(bounds)):
raise ValueError,"Invalid input (out of bounds)"
level = 0
active = range(len(bounds))
active.sort(key=lambda i: bounds[i])
prevLevel = 0
for i,index in enumerate(active):
level = bounds[index]
W = len(active) - i
if level is aleph0:
H = aleph0
else:
H = level - prevLevel
levelSize = W*H
if N<levelSize: # Found the level
idelta,delta = getNthPairBounded(N, W, H)
return active[i+idelta],prevLevel+delta
else:
N -= levelSize
prevLevel = level
else:
raise RuntimError,"Unexpected loop completion"
def getNthPairVariableBoundsChecked(N, bounds, GNVP=getNthPairVariableBounds):
x,y = GNVP(N,bounds)
assert 0 <= x < len(bounds) and 0 <= y < bounds[x]
return (x,y)
###
def testPairs():
W = 3
H = 6
a = [[' ' for x in range(10)] for y in range(10)]
b = [[' ' for x in range(10)] for y in range(10)]
for i in range(min(W*H,40)):
x,y = getNthPairBounded(i,W,H)
x2,y2 = getNthPairBounded(i,W,H,useDivmod=True)
print i,(x,y),(x2,y2)
a[y][x] = '%2d'%i
b[y2][x2] = '%2d'%i
print '-- a --'
for ln in a[::-1]:
if ''.join(ln).strip():
print ' '.join(ln)
print '-- b --'
for ln in b[::-1]:
if ''.join(ln).strip():
print ' '.join(ln)
def testPairsVB():
bounds = [2,2,4,aleph0,5,aleph0]
a = [[' ' for x in range(15)] for y in range(15)]
b = [[' ' for x in range(15)] for y in range(15)]
for i in range(min(sum(bounds),40)):
x,y = getNthPairVariableBounds(i, bounds)
print i,(x,y)
a[y][x] = '%2d'%i
print '-- a --'
for ln in a[::-1]:
if ''.join(ln).strip():
print ' '.join(ln)
###
# Toggle to use checked versions of enumeration routines.
if False:
getNthPairVariableBounds = getNthPairVariableBoundsChecked
getNthPairBounded = getNthPairBoundedChecked
getNthNTuple = getNthNTupleChecked
getNthTuple = getNthTupleChecked
if __name__ == '__main__':
testPairs()
testPairsVB()
| 7,814 | 27.212996 | 93 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/utils/ABITest/TypeGen.py | """Flexible enumeration of C types."""
from Enumeration import *
# TODO:
# - struct improvements (flexible arrays, packed &
# unpacked, alignment)
# - objective-c qualified id
# - anonymous / transparent unions
# - VLAs
# - block types
# - K&R functions
# - pass arguments of different types (test extension, transparent union)
# - varargs
###
# Actual type types
class Type:
def isBitField(self):
return False
def isPaddingBitField(self):
return False
def getTypeName(self, printer):
name = 'T%d' % len(printer.types)
typedef = self.getTypedefDef(name, printer)
printer.addDeclaration(typedef)
return name
class BuiltinType(Type):
def __init__(self, name, size, bitFieldSize=None):
self.name = name
self.size = size
self.bitFieldSize = bitFieldSize
def isBitField(self):
return self.bitFieldSize is not None
def isPaddingBitField(self):
return self.bitFieldSize is 0
def getBitFieldSize(self):
assert self.isBitField()
return self.bitFieldSize
def getTypeName(self, printer):
return self.name
def sizeof(self):
return self.size
def __str__(self):
return self.name
class EnumType(Type):
unique_id = 0
def __init__(self, index, enumerators):
self.index = index
self.enumerators = enumerators
self.unique_id = self.__class__.unique_id
self.__class__.unique_id += 1
def getEnumerators(self):
result = ''
for i, init in enumerate(self.enumerators):
if i > 0:
result = result + ', '
result = result + 'enum%dval%d_%d' % (self.index, i, self.unique_id)
if init:
result = result + ' = %s' % (init)
return result
def __str__(self):
return 'enum { %s }' % (self.getEnumerators())
def getTypedefDef(self, name, printer):
return 'typedef enum %s { %s } %s;'%(name, self.getEnumerators(), name)
class RecordType(Type):
def __init__(self, index, isUnion, fields):
self.index = index
self.isUnion = isUnion
self.fields = fields
self.name = None
def __str__(self):
def getField(t):
if t.isBitField():
return "%s : %d;" % (t, t.getBitFieldSize())
else:
return "%s;" % t
return '%s { %s }'%(('struct','union')[self.isUnion],
' '.join(map(getField, self.fields)))
def getTypedefDef(self, name, printer):
def getField((i, t)):
if t.isBitField():
if t.isPaddingBitField():
return '%s : 0;'%(printer.getTypeName(t),)
else:
return '%s field%d : %d;'%(printer.getTypeName(t),i,
t.getBitFieldSize())
else:
return '%s field%d;'%(printer.getTypeName(t),i)
fields = map(getField, enumerate(self.fields))
# Name the struct for more readable LLVM IR.
return 'typedef %s %s { %s } %s;'%(('struct','union')[self.isUnion],
name, ' '.join(fields), name)
class ArrayType(Type):
def __init__(self, index, isVector, elementType, size):
if isVector:
# Note that for vectors, this is the size in bytes.
assert size > 0
else:
assert size is None or size >= 0
self.index = index
self.isVector = isVector
self.elementType = elementType
self.size = size
if isVector:
eltSize = self.elementType.sizeof()
assert not (self.size % eltSize)
self.numElements = self.size // eltSize
else:
self.numElements = self.size
def __str__(self):
if self.isVector:
return 'vector (%s)[%d]'%(self.elementType,self.size)
elif self.size is not None:
return '(%s)[%d]'%(self.elementType,self.size)
else:
return '(%s)[]'%(self.elementType,)
def getTypedefDef(self, name, printer):
elementName = printer.getTypeName(self.elementType)
if self.isVector:
return 'typedef %s %s __attribute__ ((vector_size (%d)));'%(elementName,
name,
self.size)
else:
if self.size is None:
sizeStr = ''
else:
sizeStr = str(self.size)
return 'typedef %s %s[%s];'%(elementName, name, sizeStr)
class ComplexType(Type):
def __init__(self, index, elementType):
self.index = index
self.elementType = elementType
def __str__(self):
return '_Complex (%s)'%(self.elementType)
def getTypedefDef(self, name, printer):
return 'typedef _Complex %s %s;'%(printer.getTypeName(self.elementType), name)
class FunctionType(Type):
def __init__(self, index, returnType, argTypes):
self.index = index
self.returnType = returnType
self.argTypes = argTypes
def __str__(self):
if self.returnType is None:
rt = 'void'
else:
rt = str(self.returnType)
if not self.argTypes:
at = 'void'
else:
at = ', '.join(map(str, self.argTypes))
return '%s (*)(%s)'%(rt, at)
def getTypedefDef(self, name, printer):
if self.returnType is None:
rt = 'void'
else:
rt = str(self.returnType)
if not self.argTypes:
at = 'void'
else:
at = ', '.join(map(str, self.argTypes))
return 'typedef %s (*%s)(%s);'%(rt, name, at)
###
# Type enumerators
class TypeGenerator(object):
def __init__(self):
self.cache = {}
def setCardinality(self):
abstract
def get(self, N):
T = self.cache.get(N)
if T is None:
assert 0 <= N < self.cardinality
T = self.cache[N] = self.generateType(N)
return T
def generateType(self, N):
abstract
class FixedTypeGenerator(TypeGenerator):
def __init__(self, types):
TypeGenerator.__init__(self)
self.types = types
self.setCardinality()
def setCardinality(self):
self.cardinality = len(self.types)
def generateType(self, N):
return self.types[N]
# Factorial
def fact(n):
result = 1
while n > 0:
result = result * n
n = n - 1
return result
# Compute the number of combinations (n choose k)
def num_combinations(n, k):
return fact(n) / (fact(k) * fact(n - k))
# Enumerate the combinations choosing k elements from the list of values
def combinations(values, k):
# From ActiveState Recipe 190465: Generator for permutations,
# combinations, selections of a sequence
if k==0: yield []
else:
for i in xrange(len(values)-k+1):
for cc in combinations(values[i+1:],k-1):
yield [values[i]]+cc
class EnumTypeGenerator(TypeGenerator):
def __init__(self, values, minEnumerators, maxEnumerators):
TypeGenerator.__init__(self)
self.values = values
self.minEnumerators = minEnumerators
self.maxEnumerators = maxEnumerators
self.setCardinality()
def setCardinality(self):
self.cardinality = 0
for num in range(self.minEnumerators, self.maxEnumerators + 1):
self.cardinality += num_combinations(len(self.values), num)
def generateType(self, n):
# Figure out the number of enumerators in this type
numEnumerators = self.minEnumerators
valuesCovered = 0
while numEnumerators < self.maxEnumerators:
comb = num_combinations(len(self.values), numEnumerators)
if valuesCovered + comb > n:
break
numEnumerators = numEnumerators + 1
valuesCovered += comb
# Find the requested combination of enumerators and build a
# type from it.
i = 0
for enumerators in combinations(self.values, numEnumerators):
if i == n - valuesCovered:
return EnumType(n, enumerators)
i = i + 1
assert False
class ComplexTypeGenerator(TypeGenerator):
def __init__(self, typeGen):
TypeGenerator.__init__(self)
self.typeGen = typeGen
self.setCardinality()
def setCardinality(self):
self.cardinality = self.typeGen.cardinality
def generateType(self, N):
return ComplexType(N, self.typeGen.get(N))
class VectorTypeGenerator(TypeGenerator):
def __init__(self, typeGen, sizes):
TypeGenerator.__init__(self)
self.typeGen = typeGen
self.sizes = tuple(map(int,sizes))
self.setCardinality()
def setCardinality(self):
self.cardinality = len(self.sizes)*self.typeGen.cardinality
def generateType(self, N):
S,T = getNthPairBounded(N, len(self.sizes), self.typeGen.cardinality)
return ArrayType(N, True, self.typeGen.get(T), self.sizes[S])
class FixedArrayTypeGenerator(TypeGenerator):
def __init__(self, typeGen, sizes):
TypeGenerator.__init__(self)
self.typeGen = typeGen
self.sizes = tuple(size)
self.setCardinality()
def setCardinality(self):
self.cardinality = len(self.sizes)*self.typeGen.cardinality
def generateType(self, N):
S,T = getNthPairBounded(N, len(self.sizes), self.typeGen.cardinality)
return ArrayType(N, false, self.typeGen.get(T), self.sizes[S])
class ArrayTypeGenerator(TypeGenerator):
def __init__(self, typeGen, maxSize, useIncomplete=False, useZero=False):
TypeGenerator.__init__(self)
self.typeGen = typeGen
self.useIncomplete = useIncomplete
self.useZero = useZero
self.maxSize = int(maxSize)
self.W = useIncomplete + useZero + self.maxSize
self.setCardinality()
def setCardinality(self):
self.cardinality = self.W * self.typeGen.cardinality
def generateType(self, N):
S,T = getNthPairBounded(N, self.W, self.typeGen.cardinality)
if self.useIncomplete:
if S==0:
size = None
S = None
else:
S = S - 1
if S is not None:
if self.useZero:
size = S
else:
size = S + 1
return ArrayType(N, False, self.typeGen.get(T), size)
class RecordTypeGenerator(TypeGenerator):
def __init__(self, typeGen, useUnion, maxSize):
TypeGenerator.__init__(self)
self.typeGen = typeGen
self.useUnion = bool(useUnion)
self.maxSize = int(maxSize)
self.setCardinality()
def setCardinality(self):
M = 1 + self.useUnion
if self.maxSize is aleph0:
S = aleph0 * self.typeGen.cardinality
else:
S = 0
for i in range(self.maxSize+1):
S += M * (self.typeGen.cardinality ** i)
self.cardinality = S
def generateType(self, N):
isUnion,I = False,N
if self.useUnion:
isUnion,I = (I&1),I>>1
fields = map(self.typeGen.get,getNthTuple(I,self.maxSize,self.typeGen.cardinality))
return RecordType(N, isUnion, fields)
class FunctionTypeGenerator(TypeGenerator):
def __init__(self, typeGen, useReturn, maxSize):
TypeGenerator.__init__(self)
self.typeGen = typeGen
self.useReturn = useReturn
self.maxSize = maxSize
self.setCardinality()
def setCardinality(self):
if self.maxSize is aleph0:
S = aleph0 * self.typeGen.cardinality()
elif self.useReturn:
S = 0
for i in range(1,self.maxSize+1+1):
S += self.typeGen.cardinality ** i
else:
S = 0
for i in range(self.maxSize+1):
S += self.typeGen.cardinality ** i
self.cardinality = S
def generateType(self, N):
if self.useReturn:
# Skip the empty tuple
argIndices = getNthTuple(N+1, self.maxSize+1, self.typeGen.cardinality)
retIndex,argIndices = argIndices[0],argIndices[1:]
retTy = self.typeGen.get(retIndex)
else:
retTy = None
argIndices = getNthTuple(N, self.maxSize, self.typeGen.cardinality)
args = map(self.typeGen.get, argIndices)
return FunctionType(N, retTy, args)
class AnyTypeGenerator(TypeGenerator):
def __init__(self):
TypeGenerator.__init__(self)
self.generators = []
self.bounds = []
self.setCardinality()
self._cardinality = None
def getCardinality(self):
if self._cardinality is None:
return aleph0
else:
return self._cardinality
def setCardinality(self):
self.bounds = [g.cardinality for g in self.generators]
self._cardinality = sum(self.bounds)
cardinality = property(getCardinality, None)
def addGenerator(self, g):
self.generators.append(g)
for i in range(100):
prev = self._cardinality
self._cardinality = None
for g in self.generators:
g.setCardinality()
self.setCardinality()
if (self._cardinality is aleph0) or prev==self._cardinality:
break
else:
raise RuntimeError,"Infinite loop in setting cardinality"
def generateType(self, N):
index,M = getNthPairVariableBounds(N, self.bounds)
return self.generators[index].get(M)
def test():
fbtg = FixedTypeGenerator([BuiltinType('char', 4),
BuiltinType('char', 4, 0),
BuiltinType('int', 4, 5)])
fields1 = AnyTypeGenerator()
fields1.addGenerator( fbtg )
fields0 = AnyTypeGenerator()
fields0.addGenerator( fbtg )
# fields0.addGenerator( RecordTypeGenerator(fields1, False, 4) )
btg = FixedTypeGenerator([BuiltinType('char', 4),
BuiltinType('int', 4)])
etg = EnumTypeGenerator([None, '-1', '1', '1u'], 0, 3)
atg = AnyTypeGenerator()
atg.addGenerator( btg )
atg.addGenerator( RecordTypeGenerator(fields0, False, 4) )
atg.addGenerator( etg )
print 'Cardinality:',atg.cardinality
for i in range(100):
if i == atg.cardinality:
try:
atg.get(i)
raise RuntimeError,"Cardinality was wrong"
except AssertionError:
break
print '%4d: %s'%(i, atg.get(i))
if __name__ == '__main__':
test()
| 14,959 | 30.428571 | 91 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/utils/CIndex/completion_logger_server.py | #!/usr/bin/env python
import sys
from socket import *
from time import strftime
import datetime
def main():
if len(sys.argv) < 4:
print "completion_logger_server.py <listen address> <listen port> <log file>"
exit(1)
host = sys.argv[1]
port = int(sys.argv[2])
buf = 1024 * 8
addr = (host,port)
# Create socket and bind to address
UDPSock = socket(AF_INET,SOCK_DGRAM)
UDPSock.bind(addr)
print "Listing on {0}:{1} and logging to '{2}'".format(host, port, sys.argv[3])
# Open the logging file.
f = open(sys.argv[3], "a")
# Receive messages
while 1:
data,addr = UDPSock.recvfrom(buf)
if not data:
break
else:
f.write("{ ");
f.write("\"time\": \"{0}\"".format(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')))
f.write(", \"sender\": \"{0}\" ".format(addr[0]))
f.write(", \"data\": ")
f.write(data)
f.write(" }\n")
f.flush()
# Close socket
UDPSock.close()
if __name__ == '__main__':
main()
| 1,004 | 21.333333 | 99 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/utils/check_cfc/obj_diff.py | #!/usr/bin/env python2.7
from __future__ import print_function
import argparse
import difflib
import filecmp
import os
import subprocess
import sys
disassembler = 'objdump'
def keep_line(line):
"""Returns true for lines that should be compared in the disassembly
output."""
return "file format" not in line
def disassemble(objfile):
"""Disassemble object to a file."""
p = subprocess.Popen([disassembler, '-d', objfile],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
if p.returncode or err:
print("Disassemble failed: {}".format(objfile))
sys.exit(1)
return filter(keep_line, out.split(os.linesep))
def dump_debug(objfile):
"""Dump all of the debug info from a file."""
p = subprocess.Popen([disassembler, '-WliaprmfsoRt', objfile], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = p.communicate()
if p.returncode or err:
print("Dump debug failed: {}".format(objfile))
sys.exit(1)
return filter(keep_line, out.split(os.linesep))
def first_diff(a, b, fromfile, tofile):
"""Returns the first few lines of a difference, if there is one. Python
diff can be very slow with large objects and the most interesting changes
are the first ones. Truncate data before sending to difflib. Returns None
is there is no difference."""
# Find first diff
first_diff_idx = None
for idx, val in enumerate(a):
if val != b[idx]:
first_diff_idx = idx
break
if first_diff_idx == None:
# No difference
return None
# Diff to first line of diff plus some lines
context = 3
diff = difflib.unified_diff(a[:first_diff_idx+context],
b[:first_diff_idx+context],
fromfile,
tofile)
difference = "\n".join(diff)
if first_diff_idx + context < len(a):
difference += "\n*** Diff truncated ***"
return difference
def compare_object_files(objfilea, objfileb):
"""Compare disassembly of two different files.
Allowing unavoidable differences, such as filenames.
Return the first difference if the disassembly differs, or None.
"""
disa = disassemble(objfilea)
disb = disassemble(objfileb)
return first_diff(disa, disb, objfilea, objfileb)
def compare_debug_info(objfilea, objfileb):
"""Compare debug info of two different files.
Allowing unavoidable differences, such as filenames.
Return the first difference if the debug info differs, or None.
If there are differences in the code, there will almost certainly be differences in the debug info too.
"""
dbga = dump_debug(objfilea)
dbgb = dump_debug(objfileb)
return first_diff(dbga, dbgb, objfilea, objfileb)
def compare_exact(objfilea, objfileb):
"""Byte for byte comparison between object files.
Returns True if equal, False otherwise.
"""
return filecmp.cmp(objfilea, objfileb)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('objfilea', nargs=1)
parser.add_argument('objfileb', nargs=1)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
diff = compare_object_files(args.objfilea[0], args.objfileb[0])
if diff:
print("Difference detected")
if args.verbose:
print(diff)
sys.exit(1)
else:
print("The same")
| 3,559 | 32.584906 | 114 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/utils/check_cfc/setup.py | """For use on Windows. Run with:
python.exe setup.py py2exe
"""
from distutils.core import setup
try:
import py2exe
except ImportError:
import platform
import sys
if platform.system() == 'Windows':
print "Could not find py2exe. Please install then run setup.py py2exe."
raise
else:
print "setup.py only required on Windows."
sys.exit(1)
setup(
console=['check_cfc.py'],
name="Check CFC",
description='Check Compile Flow Consistency'
)
| 520 | 22.681818 | 79 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/utils/check_cfc/test_check_cfc.py | #!/usr/bin/env python2.7
"""Test internal functions within check_cfc.py."""
import check_cfc
import os
import platform
import unittest
class TestCheckCFC(unittest.TestCase):
def test_flip_dash_g(self):
self.assertIn('-g', check_cfc.flip_dash_g(['clang', '-c']))
self.assertNotIn('-g', check_cfc.flip_dash_g(['clang', '-c', '-g']))
self.assertNotIn(
'-g', check_cfc.flip_dash_g(['clang', '-g', '-c', '-g']))
def test_remove_dir_from_path(self):
bin_path = r'/usr/bin'
space_path = r'/home/user/space in path'
superstring_path = r'/usr/bin/local'
# Test removing last thing in path
self.assertNotIn(
bin_path, check_cfc.remove_dir_from_path(bin_path, bin_path))
# Test removing one entry and leaving others
# Also tests removing repeated path
path_var = os.pathsep.join(
[superstring_path, bin_path, space_path, bin_path])
stripped_path_var = check_cfc.remove_dir_from_path(path_var, bin_path)
self.assertIn(superstring_path, stripped_path_var)
self.assertNotIn(bin_path, stripped_path_var.split(os.pathsep))
self.assertIn(space_path, stripped_path_var)
# Test removing non-canonical path
self.assertNotIn(r'/usr//bin',
check_cfc.remove_dir_from_path(r'/usr//bin', bin_path))
if platform == 'Windows':
# Windows is case insensitive so should remove a different case
# path
self.assertNotIn(
bin_path, check_cfc.remove_dir_from_path(path_var, r'/USR/BIN'))
else:
# Case sensitive so will not remove different case path
self.assertIn(
bin_path, check_cfc.remove_dir_from_path(path_var, r'/USR/BIN'))
def test_is_output_specified(self):
self.assertTrue(
check_cfc.is_output_specified(['clang', '-o', 'test.o']))
self.assertTrue(check_cfc.is_output_specified(['clang', '-otest.o']))
self.assertFalse(
check_cfc.is_output_specified(['clang', '-gline-tables-only']))
# Not specified for implied output file name
self.assertFalse(check_cfc.is_output_specified(['clang', 'test.c']))
def test_get_output_file(self):
self.assertEqual(
check_cfc.get_output_file(['clang', '-o', 'test.o']), 'test.o')
self.assertEqual(
check_cfc.get_output_file(['clang', '-otest.o']), 'test.o')
self.assertIsNone(
check_cfc.get_output_file(['clang', '-gline-tables-only']))
# Can't get output file if more than one input file
self.assertIsNone(
check_cfc.get_output_file(['clang', '-c', 'test.cpp', 'test2.cpp']))
# No output file specified
self.assertIsNone(check_cfc.get_output_file(['clang', '-c', 'test.c']))
def test_derive_output_file(self):
# Test getting implicit output file
self.assertEqual(
check_cfc.derive_output_file(['clang', '-c', 'test.c']), 'test.o')
self.assertEqual(
check_cfc.derive_output_file(['clang', '-c', 'test.cpp']), 'test.o')
self.assertIsNone(check_cfc.derive_output_file(['clang', '--version']))
def test_is_normal_compile(self):
self.assertTrue(check_cfc.is_normal_compile(
['clang', '-c', 'test.cpp', '-o', 'test2.o']))
self.assertTrue(
check_cfc.is_normal_compile(['clang', '-c', 'test.cpp']))
# Outputting bitcode is not a normal compile
self.assertFalse(
check_cfc.is_normal_compile(['clang', '-c', 'test.cpp', '-flto']))
self.assertFalse(
check_cfc.is_normal_compile(['clang', '-c', 'test.cpp', '-emit-llvm']))
# Outputting preprocessed output or assembly is not a normal compile
self.assertFalse(
check_cfc.is_normal_compile(['clang', '-E', 'test.cpp', '-o', 'test.ii']))
self.assertFalse(
check_cfc.is_normal_compile(['clang', '-S', 'test.cpp', '-o', 'test.s']))
# Input of preprocessed or assembly is not a "normal compile"
self.assertFalse(
check_cfc.is_normal_compile(['clang', '-c', 'test.s', '-o', 'test.o']))
self.assertFalse(
check_cfc.is_normal_compile(['clang', '-c', 'test.ii', '-o', 'test.o']))
# Specifying --version and -c is not a normal compile
self.assertFalse(
check_cfc.is_normal_compile(['clang', '-c', 'test.cpp', '--version']))
self.assertFalse(
check_cfc.is_normal_compile(['clang', '-c', 'test.cpp', '--help']))
# Outputting dependency files is not a normal compile
self.assertFalse(
check_cfc.is_normal_compile(['clang', '-c', '-M', 'test.cpp']))
self.assertFalse(
check_cfc.is_normal_compile(['clang', '-c', '-MM', 'test.cpp']))
# Creating a dependency file as a side effect still outputs an object file
self.assertTrue(
check_cfc.is_normal_compile(['clang', '-c', '-MD', 'test.cpp']))
self.assertTrue(
check_cfc.is_normal_compile(['clang', '-c', '-MMD', 'test.cpp']))
def test_replace_output_file(self):
self.assertEqual(check_cfc.replace_output_file(
['clang', '-o', 'test.o'], 'testg.o'), ['clang', '-o', 'testg.o'])
self.assertEqual(check_cfc.replace_output_file(
['clang', '-otest.o'], 'testg.o'), ['clang', '-otestg.o'])
with self.assertRaises(Exception):
check_cfc.replace_output_file(['clang'], 'testg.o')
def test_add_output_file(self):
self.assertEqual(check_cfc.add_output_file(
['clang'], 'testg.o'), ['clang', '-o', 'testg.o'])
def test_set_output_file(self):
# Test output not specified
self.assertEqual(
check_cfc.set_output_file(['clang'], 'test.o'), ['clang', '-o', 'test.o'])
# Test output is specified
self.assertEqual(check_cfc.set_output_file(
['clang', '-o', 'test.o'], 'testb.o'), ['clang', '-o', 'testb.o'])
def test_get_input_file(self):
# No input file
self.assertIsNone(check_cfc.get_input_file(['clang']))
# Input C file
self.assertEqual(
check_cfc.get_input_file(['clang', 'test.c']), 'test.c')
# Input C++ file
self.assertEqual(
check_cfc.get_input_file(['clang', 'test.cpp']), 'test.cpp')
# Multiple input files
self.assertIsNone(
check_cfc.get_input_file(['clang', 'test.c', 'test2.cpp']))
self.assertIsNone(
check_cfc.get_input_file(['clang', 'test.c', 'test2.c']))
# Don't handle preprocessed files
self.assertIsNone(check_cfc.get_input_file(['clang', 'test.i']))
self.assertIsNone(check_cfc.get_input_file(['clang', 'test.ii']))
# Test identifying input file with quotes
self.assertEqual(
check_cfc.get_input_file(['clang', '"test.c"']), '"test.c"')
self.assertEqual(
check_cfc.get_input_file(['clang', "'test.c'"]), "'test.c'")
# Test multiple quotes
self.assertEqual(
check_cfc.get_input_file(['clang', "\"'test.c'\""]), "\"'test.c'\"")
def test_set_input_file(self):
self.assertEqual(check_cfc.set_input_file(
['clang', 'test.c'], 'test.s'), ['clang', 'test.s'])
if __name__ == '__main__':
unittest.main()
| 7,480 | 43.266272 | 86 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/utils/check_cfc/check_cfc.py | #!/usr/bin/env python2.7
"""Check CFC - Check Compile Flow Consistency
This is a compiler wrapper for testing that code generation is consistent with
different compilation processes. It checks that code is not unduly affected by
compiler options or other changes which should not have side effects.
To use:
-Ensure that the compiler under test (i.e. clang, clang++) is on the PATH
-On Linux copy this script to the name of the compiler
e.g. cp check_cfc.py clang && cp check_cfc.py clang++
-On Windows use setup.py to generate check_cfc.exe and copy that to clang.exe
and clang++.exe
-Enable the desired checks in check_cfc.cfg (in the same directory as the
wrapper)
e.g.
[Checks]
dash_g_no_change = true
dash_s_no_change = false
-The wrapper can be run using its absolute path or added to PATH before the
compiler under test
e.g. export PATH=<path to check_cfc>:$PATH
-Compile as normal. The wrapper intercepts normal -c compiles and will return
non-zero if the check fails.
e.g.
$ clang -c test.cpp
Code difference detected with -g
--- /tmp/tmp5nv893.o
+++ /tmp/tmp6Vwjnc.o
@@ -1 +1 @@
- 0: 48 8b 05 51 0b 20 00 mov 0x200b51(%rip),%rax
+ 0: 48 39 3d 51 0b 20 00 cmp %rdi,0x200b51(%rip)
-To run LNT with Check CFC specify the absolute path to the wrapper to the --cc
and --cxx options
e.g.
lnt runtest nt --cc <path to check_cfc>/clang \\
--cxx <path to check_cfc>/clang++ ...
To add a new check:
-Create a new subclass of WrapperCheck
-Implement the perform_check() method. This should perform the alternate compile
and do the comparison.
-Add the new check to check_cfc.cfg. The check has the same name as the
subclass.
"""
from __future__ import print_function
import imp
import os
import platform
import shutil
import subprocess
import sys
import tempfile
import ConfigParser
import io
import obj_diff
def is_windows():
"""Returns True if running on Windows."""
return platform.system() == 'Windows'
class WrapperStepException(Exception):
"""Exception type to be used when a step other than the original compile
fails."""
def __init__(self, msg, stdout, stderr):
self.msg = msg
self.stdout = stdout
self.stderr = stderr
class WrapperCheckException(Exception):
"""Exception type to be used when a comparison check fails."""
def __init__(self, msg):
self.msg = msg
def main_is_frozen():
"""Returns True when running as a py2exe executable."""
return (hasattr(sys, "frozen") or # new py2exe
hasattr(sys, "importers") or # old py2exe
imp.is_frozen("__main__")) # tools/freeze
def get_main_dir():
"""Get the directory that the script or executable is located in."""
if main_is_frozen():
return os.path.dirname(sys.executable)
return os.path.dirname(sys.argv[0])
def remove_dir_from_path(path_var, directory):
"""Remove the specified directory from path_var, a string representing
PATH"""
pathlist = path_var.split(os.pathsep)
norm_directory = os.path.normpath(os.path.normcase(directory))
pathlist = filter(lambda x: os.path.normpath(
os.path.normcase(x)) != norm_directory, pathlist)
return os.pathsep.join(pathlist)
def path_without_wrapper():
"""Returns the PATH variable modified to remove the path to this program."""
scriptdir = get_main_dir()
path = os.environ['PATH']
return remove_dir_from_path(path, scriptdir)
def flip_dash_g(args):
"""Search for -g in args. If it exists then return args without. If not then
add it."""
if '-g' in args:
# Return args without any -g
return [x for x in args if x != '-g']
else:
# No -g, add one
return args + ['-g']
def derive_output_file(args):
"""Derive output file from the input file (if just one) or None
otherwise."""
infile = get_input_file(args)
if infile is None:
return None
else:
return '{}.o'.format(os.path.splitext(infile)[0])
def get_output_file(args):
"""Return the output file specified by this command or None if not
specified."""
grabnext = False
for arg in args:
if grabnext:
return arg
if arg == '-o':
# Specified as a separate arg
grabnext = True
elif arg.startswith('-o'):
# Specified conjoined with -o
return arg[2:]
assert grabnext == False
return None
def is_output_specified(args):
"""Return true is output file is specified in args."""
return get_output_file(args) is not None
def replace_output_file(args, new_name):
"""Replaces the specified name of an output file with the specified name.
Assumes that the output file name is specified in the command line args."""
replaceidx = None
attached = False
for idx, val in enumerate(args):
if val == '-o':
replaceidx = idx + 1
attached = False
elif val.startswith('-o'):
replaceidx = idx
attached = True
if replaceidx is None:
raise Exception
replacement = new_name
if attached == True:
replacement = '-o' + new_name
args[replaceidx] = replacement
return args
def add_output_file(args, output_file):
"""Append an output file to args, presuming not already specified."""
return args + ['-o', output_file]
def set_output_file(args, output_file):
"""Set the output file within the arguments. Appends or replaces as
appropriate."""
if is_output_specified(args):
args = replace_output_file(args, output_file)
else:
args = add_output_file(args, output_file)
return args
gSrcFileSuffixes = ('.c', '.cpp', '.cxx', '.c++', '.cp', '.cc')
def get_input_file(args):
"""Return the input file string if it can be found (and there is only
one)."""
inputFiles = list()
for arg in args:
testarg = arg
quotes = ('"', "'")
while testarg.endswith(quotes):
testarg = testarg[:-1]
testarg = os.path.normcase(testarg)
# Test if it is a source file
if testarg.endswith(gSrcFileSuffixes):
inputFiles.append(arg)
if len(inputFiles) == 1:
return inputFiles[0]
else:
return None
def set_input_file(args, input_file):
"""Replaces the input file with that specified."""
infile = get_input_file(args)
if infile:
infile_idx = args.index(infile)
args[infile_idx] = input_file
return args
else:
# Could not find input file
assert False
def is_normal_compile(args):
"""Check if this is a normal compile which will output an object file rather
than a preprocess or link. args is a list of command line arguments."""
compile_step = '-c' in args
# Bitcode cannot be disassembled in the same way
bitcode = '-flto' in args or '-emit-llvm' in args
# Version and help are queries of the compiler and override -c if specified
query = '--version' in args or '--help' in args
# Options to output dependency files for make
dependency = '-M' in args or '-MM' in args
# Check if the input is recognised as a source file (this may be too
# strong a restriction)
input_is_valid = bool(get_input_file(args))
return compile_step and not bitcode and not query and not dependency and input_is_valid
def run_step(command, my_env, error_on_failure):
"""Runs a step of the compilation. Reports failure as exception."""
# Need to use shell=True on Windows as Popen won't use PATH otherwise.
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=my_env, shell=is_windows())
(stdout, stderr) = p.communicate()
if p.returncode != 0:
raise WrapperStepException(error_on_failure, stdout, stderr)
def get_temp_file_name(suffix):
"""Get a temporary file name with a particular suffix. Let the caller be
reponsible for deleting it."""
tf = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
tf.close()
return tf.name
class WrapperCheck(object):
"""Base class for a check. Subclass this to add a check."""
def __init__(self, output_file_a):
"""Record the base output file that will be compared against."""
self._output_file_a = output_file_a
def perform_check(self, arguments, my_env):
"""Override this to perform the modified compilation and required
checks."""
raise NotImplementedError("Please Implement this method")
class dash_g_no_change(WrapperCheck):
def perform_check(self, arguments, my_env):
"""Check if different code is generated with/without the -g flag."""
output_file_b = get_temp_file_name('.o')
alternate_command = list(arguments)
alternate_command = flip_dash_g(alternate_command)
alternate_command = set_output_file(alternate_command, output_file_b)
run_step(alternate_command, my_env, "Error compiling with -g")
# Compare disassembly (returns first diff if differs)
difference = obj_diff.compare_object_files(self._output_file_a,
output_file_b)
if difference:
raise WrapperCheckException(
"Code difference detected with -g\n{}".format(difference))
# Clean up temp file if comparison okay
os.remove(output_file_b)
class dash_s_no_change(WrapperCheck):
def perform_check(self, arguments, my_env):
"""Check if compiling to asm then assembling in separate steps results
in different code than compiling to object directly."""
output_file_b = get_temp_file_name('.o')
alternate_command = arguments + ['-via-file-asm']
alternate_command = set_output_file(alternate_command, output_file_b)
run_step(alternate_command, my_env,
"Error compiling with -via-file-asm")
# Compare if object files are exactly the same
exactly_equal = obj_diff.compare_exact(self._output_file_a, output_file_b)
if not exactly_equal:
# Compare disassembly (returns first diff if differs)
difference = obj_diff.compare_object_files(self._output_file_a,
output_file_b)
if difference:
raise WrapperCheckException(
"Code difference detected with -S\n{}".format(difference))
# Code is identical, compare debug info
dbgdifference = obj_diff.compare_debug_info(self._output_file_a,
output_file_b)
if dbgdifference:
raise WrapperCheckException(
"Debug info difference detected with -S\n{}".format(dbgdifference))
raise WrapperCheckException("Object files not identical with -S\n")
# Clean up temp file if comparison okay
os.remove(output_file_b)
if __name__ == '__main__':
# Create configuration defaults from list of checks
default_config = """
[Checks]
"""
# Find all subclasses of WrapperCheck
checks = [cls.__name__ for cls in vars()['WrapperCheck'].__subclasses__()]
for c in checks:
default_config += "{} = false\n".format(c)
config = ConfigParser.RawConfigParser()
config.readfp(io.BytesIO(default_config))
scriptdir = get_main_dir()
config_path = os.path.join(scriptdir, 'check_cfc.cfg')
try:
config.read(os.path.join(config_path))
except:
print("Could not read config from {}, "
"using defaults.".format(config_path))
my_env = os.environ.copy()
my_env['PATH'] = path_without_wrapper()
arguments_a = list(sys.argv)
# Prevent infinite loop if called with absolute path.
arguments_a[0] = os.path.basename(arguments_a[0])
# Sanity check
enabled_checks = [check_name
for check_name in checks
if config.getboolean('Checks', check_name)]
checks_comma_separated = ', '.join(enabled_checks)
print("Check CFC, checking: {}".format(checks_comma_separated))
# A - original compilation
output_file_orig = get_output_file(arguments_a)
if output_file_orig is None:
output_file_orig = derive_output_file(arguments_a)
p = subprocess.Popen(arguments_a, env=my_env, shell=is_windows())
p.communicate()
if p.returncode != 0:
sys.exit(p.returncode)
if not is_normal_compile(arguments_a) or output_file_orig is None:
# Bail out here if we can't apply checks in this case.
# Does not indicate an error.
# Maybe not straight compilation (e.g. -S or --version or -flto)
# or maybe > 1 input files.
sys.exit(0)
# Sometimes we generate files which have very long names which can't be
# read/disassembled. This will exit early if we can't find the file we
# expected to be output.
if not os.path.isfile(output_file_orig):
sys.exit(0)
# Copy output file to a temp file
temp_output_file_orig = get_temp_file_name('.o')
shutil.copyfile(output_file_orig, temp_output_file_orig)
# Run checks, if they are enabled in config and if they are appropriate for
# this command line.
current_module = sys.modules[__name__]
for check_name in checks:
if config.getboolean('Checks', check_name):
class_ = getattr(current_module, check_name)
checker = class_(temp_output_file_orig)
try:
checker.perform_check(arguments_a, my_env)
except WrapperCheckException as e:
# Check failure
print("{} {}".format(get_input_file(arguments_a), e.msg), file=sys.stderr)
# Remove file to comply with build system expectations (no
# output file if failed)
os.remove(output_file_orig)
sys.exit(1)
except WrapperStepException as e:
# Compile step failure
print(e.msg, file=sys.stderr)
print("*** stdout ***", file=sys.stderr)
print(e.stdout, file=sys.stderr)
print("*** stderr ***", file=sys.stderr)
print(e.stderr, file=sys.stderr)
# Remove file to comply with build system expectations (no
# output file if failed)
os.remove(output_file_orig)
sys.exit(1)
| 14,547 | 35.099256 | 91 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/utils/analyzer/SumTimerInfo.py | #!/usr/bin/env python
"""
Script to Summarize statistics in the scan-build output.
Statistics are enabled by passing '-internal-stats' option to scan-build
(or '-analyzer-stats' to the analyzer).
"""
import string
from operator import itemgetter
import sys
if __name__ == '__main__':
if len(sys.argv) < 2:
print >> sys.stderr, 'Usage: ', sys.argv[0],\
'scan_build_output_file'
sys.exit(-1)
f = open(sys.argv[1], 'r')
Time = 0.0
TotalTime = 0.0
MaxTime = 0.0
Warnings = 0
Count = 0
FunctionsAnalyzed = 0
ReachableBlocks = 0
ReachedMaxSteps = 0
NumSteps = 0
NumInlinedCallSites = 0
NumBifurcatedCallSites = 0
MaxCFGSize = 0
Mode = 1
for line in f:
if ("Miscellaneous Ungrouped Timers" in line) :
Mode = 1
if (("Analyzer Total Time" in line) and (Mode == 1)) :
s = line.split()
Time = Time + float(s[6])
Count = Count + 1
if (float(s[6]) > MaxTime) :
MaxTime = float(s[6])
if ((("warning generated." in line) or ("warnings generated" in line)) and Mode == 1) :
s = line.split()
Warnings = Warnings + int(s[0])
if (("The # of functions analysed (as top level)" in line) and (Mode == 1)) :
s = line.split()
FunctionsAnalyzed = FunctionsAnalyzed + int(s[0])
if (("The % of reachable basic blocks" in line) and (Mode == 1)) :
s = line.split()
ReachableBlocks = ReachableBlocks + int(s[0])
if (("The # of times we reached the max number of steps" in line) and (Mode == 1)) :
s = line.split()
ReachedMaxSteps = ReachedMaxSteps + int(s[0])
if (("The maximum number of basic blocks in a function" in line) and (Mode == 1)) :
s = line.split()
if (MaxCFGSize < int(s[0])) :
MaxCFGSize = int(s[0])
if (("The # of steps executed" in line) and (Mode == 1)) :
s = line.split()
NumSteps = NumSteps + int(s[0])
if (("The # of times we inlined a call" in line) and (Mode == 1)) :
s = line.split()
NumInlinedCallSites = NumInlinedCallSites + int(s[0])
if (("The # of times we split the path due to imprecise dynamic dispatch info" in line) and (Mode == 1)) :
s = line.split()
NumBifurcatedCallSites = NumBifurcatedCallSites + int(s[0])
if ((") Total" in line) and (Mode == 1)) :
s = line.split()
TotalTime = TotalTime + float(s[6])
print "TU Count %d" % (Count)
print "Time %f" % (Time)
print "Warnings %d" % (Warnings)
print "Functions Analyzed %d" % (FunctionsAnalyzed)
print "Reachable Blocks %d" % (ReachableBlocks)
print "Reached Max Steps %d" % (ReachedMaxSteps)
print "Number of Steps %d" % (NumSteps)
print "Number of Inlined calls %d (bifurcated %d)" % (NumInlinedCallSites, NumBifurcatedCallSites)
print "MaxTime %f" % (MaxTime)
print "TotalTime %f" % (TotalTime)
print "Max CFG Size %d" % (MaxCFGSize)
| 3,095 | 35.857143 | 114 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/utils/analyzer/CmpRuns.py | #!/usr/bin/env python
"""
CmpRuns - A simple tool for comparing two static analyzer runs to determine
which reports have been added, removed, or changed.
This is designed to support automated testing using the static analyzer, from
two perspectives:
1. To monitor changes in the static analyzer's reports on real code bases, for
regression testing.
2. For use by end users who want to integrate regular static analyzer testing
into a buildbot like environment.
Usage:
# Load the results of both runs, to obtain lists of the corresponding
# AnalysisDiagnostic objects.
#
resultsA = loadResultsFromSingleRun(singleRunInfoA, deleteEmpty)
resultsB = loadResultsFromSingleRun(singleRunInfoB, deleteEmpty)
# Generate a relation from diagnostics in run A to diagnostics in run B
# to obtain a list of triples (a, b, confidence).
diff = compareResults(resultsA, resultsB)
"""
import os
import plistlib
import CmpRuns
# Information about analysis run:
# path - the analysis output directory
# root - the name of the root directory, which will be disregarded when
# determining the source file name
class SingleRunInfo:
def __init__(self, path, root="", verboseLog=None):
self.path = path
self.root = root.rstrip("/\\")
self.verboseLog = verboseLog
class AnalysisDiagnostic:
def __init__(self, data, report, htmlReport):
self._data = data
self._loc = self._data['location']
self._report = report
self._htmlReport = htmlReport
def getFileName(self):
root = self._report.run.root
fileName = self._report.files[self._loc['file']]
if fileName.startswith(root) and len(root) > 0:
return fileName[len(root)+1:]
return fileName
def getLine(self):
return self._loc['line']
def getColumn(self):
return self._loc['col']
def getCategory(self):
return self._data['category']
def getDescription(self):
return self._data['description']
def getIssueIdentifier(self) :
id = self.getFileName() + "+"
if 'issue_context' in self._data :
id += self._data['issue_context'] + "+"
if 'issue_hash_content_of_line_in_context' in self._data :
id += str(self._data['issue_hash_content_of_line_in_context'])
return id
def getReport(self):
if self._htmlReport is None:
return " "
return os.path.join(self._report.run.path, self._htmlReport)
def getReadableName(self):
return '%s:%d:%d, %s: %s' % (self.getFileName(), self.getLine(),
self.getColumn(), self.getCategory(),
self.getDescription())
# Note, the data format is not an API and may change from one analyzer
# version to another.
def getRawData(self):
return self._data
class multidict:
def __init__(self, elts=()):
self.data = {}
for key,value in elts:
self[key] = value
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, key, value):
if key in self.data:
self.data[key].append(value)
else:
self.data[key] = [value]
def items(self):
return self.data.items()
def values(self):
return self.data.values()
def keys(self):
return self.data.keys()
def __len__(self):
return len(self.data)
def get(self, key, default=None):
return self.data.get(key, default)
class CmpOptions:
def __init__(self, verboseLog=None, rootA="", rootB=""):
self.rootA = rootA
self.rootB = rootB
self.verboseLog = verboseLog
class AnalysisReport:
def __init__(self, run, files):
self.run = run
self.files = files
self.diagnostics = []
class AnalysisRun:
def __init__(self, info):
self.path = info.path
self.root = info.root
self.info = info
self.reports = []
# Cumulative list of all diagnostics from all the reports.
self.diagnostics = []
self.clang_version = None
def getClangVersion(self):
return self.clang_version
def readSingleFile(self, p, deleteEmpty):
data = plistlib.readPlist(p)
# We want to retrieve the clang version even if there are no
# reports. Assume that all reports were created using the same
# clang version (this is always true and is more efficient).
if 'clang_version' in data:
if self.clang_version == None:
self.clang_version = data.pop('clang_version')
else:
data.pop('clang_version')
# Ignore/delete empty reports.
if not data['files']:
if deleteEmpty == True:
os.remove(p)
return
# Extract the HTML reports, if they exists.
if 'HTMLDiagnostics_files' in data['diagnostics'][0]:
htmlFiles = []
for d in data['diagnostics']:
# FIXME: Why is this named files, when does it have multiple
# files?
assert len(d['HTMLDiagnostics_files']) == 1
htmlFiles.append(d.pop('HTMLDiagnostics_files')[0])
else:
htmlFiles = [None] * len(data['diagnostics'])
report = AnalysisReport(self, data.pop('files'))
diagnostics = [AnalysisDiagnostic(d, report, h)
for d,h in zip(data.pop('diagnostics'),
htmlFiles)]
assert not data
report.diagnostics.extend(diagnostics)
self.reports.append(report)
self.diagnostics.extend(diagnostics)
# Backward compatibility API.
def loadResults(path, opts, root = "", deleteEmpty=True):
return loadResultsFromSingleRun(SingleRunInfo(path, root, opts.verboseLog),
deleteEmpty)
# Load results of the analyzes from a given output folder.
# - info is the SingleRunInfo object
# - deleteEmpty specifies if the empty plist files should be deleted
def loadResultsFromSingleRun(info, deleteEmpty=True):
path = info.path
run = AnalysisRun(info)
if os.path.isfile(path):
run.readSingleFile(path, deleteEmpty)
else:
for (dirpath, dirnames, filenames) in os.walk(path):
for f in filenames:
if (not f.endswith('plist')):
continue
p = os.path.join(dirpath, f)
run.readSingleFile(p, deleteEmpty)
return run
def cmpAnalysisDiagnostic(d) :
return d.getIssueIdentifier()
def compareResults(A, B):
"""
compareResults - Generate a relation from diagnostics in run A to
diagnostics in run B.
The result is the relation as a list of triples (a, b, confidence) where
each element {a,b} is None or an element from the respective run, and
confidence is a measure of the match quality (where 0 indicates equality,
and None is used if either element is None).
"""
res = []
# Quickly eliminate equal elements.
neqA = []
neqB = []
eltsA = list(A.diagnostics)
eltsB = list(B.diagnostics)
eltsA.sort(key = cmpAnalysisDiagnostic)
eltsB.sort(key = cmpAnalysisDiagnostic)
while eltsA and eltsB:
a = eltsA.pop()
b = eltsB.pop()
if (a.getIssueIdentifier() == b.getIssueIdentifier()) :
res.append((a, b, 0))
elif a.getIssueIdentifier() > b.getIssueIdentifier():
eltsB.append(b)
neqA.append(a)
else:
eltsA.append(a)
neqB.append(b)
neqA.extend(eltsA)
neqB.extend(eltsB)
# FIXME: Add fuzzy matching. One simple and possible effective idea would be
# to bin the diagnostics, print them in a normalized form (based solely on
# the structure of the diagnostic), compute the diff, then use that as the
# basis for matching. This has the nice property that we don't depend in any
# way on the diagnostic format.
for a in neqA:
res.append((a, None, None))
for b in neqB:
res.append((None, b, None))
return res
def dumpScanBuildResultsDiff(dirA, dirB, opts, deleteEmpty=True):
# Load the run results.
resultsA = loadResults(dirA, opts, opts.rootA, deleteEmpty)
resultsB = loadResults(dirB, opts, opts.rootB, deleteEmpty)
# Open the verbose log, if given.
if opts.verboseLog:
auxLog = open(opts.verboseLog, "wb")
else:
auxLog = None
diff = compareResults(resultsA, resultsB)
foundDiffs = 0
for res in diff:
a,b,confidence = res
if a is None:
print "ADDED: %r" % b.getReadableName()
foundDiffs += 1
if auxLog:
print >>auxLog, ("('ADDED', %r, %r)" % (b.getReadableName(),
b.getReport()))
elif b is None:
print "REMOVED: %r" % a.getReadableName()
foundDiffs += 1
if auxLog:
print >>auxLog, ("('REMOVED', %r, %r)" % (a.getReadableName(),
a.getReport()))
elif confidence:
print "CHANGED: %r to %r" % (a.getReadableName(),
b.getReadableName())
foundDiffs += 1
if auxLog:
print >>auxLog, ("('CHANGED', %r, %r, %r, %r)"
% (a.getReadableName(),
b.getReadableName(),
a.getReport(),
b.getReport()))
else:
pass
TotalReports = len(resultsB.diagnostics)
print "TOTAL REPORTS: %r" % TotalReports
print "TOTAL DIFFERENCES: %r" % foundDiffs
if auxLog:
print >>auxLog, "('TOTAL NEW REPORTS', %r)" % TotalReports
print >>auxLog, "('TOTAL DIFFERENCES', %r)" % foundDiffs
return foundDiffs, len(resultsA.diagnostics), len(resultsB.diagnostics)
def main():
from optparse import OptionParser
parser = OptionParser("usage: %prog [options] [dir A] [dir B]")
parser.add_option("", "--rootA", dest="rootA",
help="Prefix to ignore on source files for directory A",
action="store", type=str, default="")
parser.add_option("", "--rootB", dest="rootB",
help="Prefix to ignore on source files for directory B",
action="store", type=str, default="")
parser.add_option("", "--verbose-log", dest="verboseLog",
help="Write additional information to LOG [default=None]",
action="store", type=str, default=None,
metavar="LOG")
(opts, args) = parser.parse_args()
if len(args) != 2:
parser.error("invalid number of arguments")
dirA,dirB = args
dumpScanBuildResultsDiff(dirA, dirB, opts)
if __name__ == '__main__':
main()
| 11,083 | 32.68997 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/utils/analyzer/SATestAdd.py | #!/usr/bin/env python
"""
Static Analyzer qualification infrastructure: adding a new project to
the Repository Directory.
Add a new project for testing: build it and add to the Project Map file.
Assumes it's being run from the Repository Directory.
The project directory should be added inside the Repository Directory and
have the same name as the project ID
The project should use the following files for set up:
- cleanup_run_static_analyzer.sh - prepare the build environment.
Ex: make clean can be a part of it.
- run_static_analyzer.cmd - a list of commands to run through scan-build.
Each command should be on a separate line.
Choose from: configure, make, xcodebuild
- download_project.sh - download the project into the CachedSource/
directory. For example, download a zip of
the project source from GitHub, unzip it,
and rename the unzipped directory to
'CachedSource'. This script is not called
when 'CachedSource' is already present,
so an alternative is to check the
'CachedSource' directory into the
repository directly.
- CachedSource/ - An optional directory containing the source of the
project being analyzed. If present,
download_project.sh will not be called.
- changes_for_analyzer.patch - An optional patch file for any local changes
(e.g., to adapt to newer version of clang)
that should be applied to CachedSource
before analysis. To construct this patch,
run the the download script to download
the project to CachedSource, copy the
CachedSource to another directory (for
example, PatchedSource) and make any needed
modifications to the the copied source.
Then run:
diff -ur CachedSource PatchedSource \
> changes_for_analyzer.patch
"""
import SATestBuild
import os
import csv
import sys
def isExistingProject(PMapFile, projectID) :
PMapReader = csv.reader(PMapFile)
for I in PMapReader:
if projectID == I[0]:
return True
return False
# Add a new project for testing: build it and add to the Project Map file.
# Params:
# Dir is the directory where the sources are.
# ID is a short string used to identify a project.
def addNewProject(ID, BuildMode) :
CurDir = os.path.abspath(os.curdir)
Dir = SATestBuild.getProjectDir(ID)
if not os.path.exists(Dir):
print "Error: Project directory is missing: %s" % Dir
sys.exit(-1)
# Build the project.
SATestBuild.testProject(ID, BuildMode, IsReferenceBuild=True, Dir=Dir)
# Add the project ID to the project map.
ProjectMapPath = os.path.join(CurDir, SATestBuild.ProjectMapFile)
if os.path.exists(ProjectMapPath):
PMapFile = open(ProjectMapPath, "r+b")
else:
print "Warning: Creating the Project Map file!!"
PMapFile = open(ProjectMapPath, "w+b")
try:
if (isExistingProject(PMapFile, ID)) :
print >> sys.stdout, 'Warning: Project with ID \'', ID, \
'\' already exists.'
print >> sys.stdout, "Reference output has been regenerated."
else:
PMapWriter = csv.writer(PMapFile)
PMapWriter.writerow( (ID, int(BuildMode)) );
print "The project map is updated: ", ProjectMapPath
finally:
PMapFile.close()
# TODO: Add an option not to build.
# TODO: Set the path to the Repository directory.
if __name__ == '__main__':
if len(sys.argv) < 2:
print >> sys.stderr, 'Usage: ', sys.argv[0],\
'project_ID <mode>' \
'mode - 0 for single file project; ' \
'1 for scan_build; ' \
'2 for single file c++11 project'
sys.exit(-1)
BuildMode = 1
if (len(sys.argv) >= 3):
BuildMode = int(sys.argv[2])
assert((BuildMode == 0) | (BuildMode == 1) | (BuildMode == 2))
addNewProject(sys.argv[1], BuildMode)
| 4,793 | 43.803738 | 81 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/utils/analyzer/SATestBuild.py | #!/usr/bin/env python
"""
Static Analyzer qualification infrastructure.
The goal is to test the analyzer against different projects, check for failures,
compare results, and measure performance.
Repository Directory will contain sources of the projects as well as the
information on how to build them and the expected output.
Repository Directory structure:
- ProjectMap file
- Historical Performance Data
- Project Dir1
- ReferenceOutput
- Project Dir2
- ReferenceOutput
..
Note that the build tree must be inside the project dir.
To test the build of the analyzer one would:
- Copy over a copy of the Repository Directory. (TODO: Prefer to ensure that
the build directory does not pollute the repository to min network traffic).
- Build all projects, until error. Produce logs to report errors.
- Compare results.
The files which should be kept around for failure investigations:
RepositoryCopy/Project DirI/ScanBuildResults
RepositoryCopy/Project DirI/run_static_analyzer.log
Assumptions (TODO: shouldn't need to assume these.):
The script is being run from the Repository Directory.
The compiler for scan-build and scan-build are in the PATH.
export PATH=/Users/zaks/workspace/c2llvm/build/Release+Asserts/bin:$PATH
For more logging, set the env variables:
zaks:TI zaks$ export CCC_ANALYZER_LOG=1
zaks:TI zaks$ export CCC_ANALYZER_VERBOSE=1
The list of checkers tested are hardcoded in the Checkers variable.
For testing additional checkers, use the SA_ADDITIONAL_CHECKERS environment
variable. It should contain a comma separated list.
"""
import CmpRuns
import os
import csv
import sys
import glob
import math
import shutil
import time
import plistlib
import argparse
from subprocess import check_call, check_output, CalledProcessError
#------------------------------------------------------------------------------
# Helper functions.
#------------------------------------------------------------------------------
def detectCPUs():
"""
Detects the number of CPUs on a system. Cribbed from pp.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"):
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(capture(['sysctl', '-n', 'hw.ncpu']))
# Windows:
if os.environ.has_key("NUMBER_OF_PROCESSORS"):
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
if ncpus > 0:
return ncpus
return 1 # Default
def which(command, paths = None):
"""which(command, [paths]) - Look up the given command in the paths string
(or the PATH environment variable, if unspecified)."""
if paths is None:
paths = os.environ.get('PATH','')
# Check for absolute match first.
if os.path.exists(command):
return command
# Would be nice if Python had a lib function for this.
if not paths:
paths = os.defpath
# Get suffixes to search.
# On Cygwin, 'PATHEXT' may exist but it should not be used.
if os.pathsep == ';':
pathext = os.environ.get('PATHEXT', '').split(';')
else:
pathext = ['']
# Search the paths...
for path in paths.split(os.pathsep):
for ext in pathext:
p = os.path.join(path, command + ext)
if os.path.exists(p):
return p
return None
# Make sure we flush the output after every print statement.
class flushfile(object):
def __init__(self, f):
self.f = f
def write(self, x):
self.f.write(x)
self.f.flush()
sys.stdout = flushfile(sys.stdout)
def getProjectMapPath():
ProjectMapPath = os.path.join(os.path.abspath(os.curdir),
ProjectMapFile)
if not os.path.exists(ProjectMapPath):
print "Error: Cannot find the Project Map file " + ProjectMapPath +\
"\nRunning script for the wrong directory?"
sys.exit(-1)
return ProjectMapPath
def getProjectDir(ID):
return os.path.join(os.path.abspath(os.curdir), ID)
def getSBOutputDirName(IsReferenceBuild) :
if IsReferenceBuild == True :
return SBOutputDirReferencePrefix + SBOutputDirName
else :
return SBOutputDirName
#------------------------------------------------------------------------------
# Configuration setup.
#------------------------------------------------------------------------------
# Find Clang for static analysis.
Clang = which("clang", os.environ['PATH'])
if not Clang:
print "Error: cannot find 'clang' in PATH"
sys.exit(-1)
# Number of jobs.
Jobs = int(math.ceil(detectCPUs() * 0.75))
# Project map stores info about all the "registered" projects.
ProjectMapFile = "projectMap.csv"
# Names of the project specific scripts.
# The script that downloads the project.
DownloadScript = "download_project.sh"
# The script that needs to be executed before the build can start.
CleanupScript = "cleanup_run_static_analyzer.sh"
# This is a file containing commands for scan-build.
BuildScript = "run_static_analyzer.cmd"
# The log file name.
LogFolderName = "Logs"
BuildLogName = "run_static_analyzer.log"
# Summary file - contains the summary of the failures. Ex: This info can be be
# displayed when buildbot detects a build failure.
NumOfFailuresInSummary = 10
FailuresSummaryFileName = "failures.txt"
# Summary of the result diffs.
DiffsSummaryFileName = "diffs.txt"
# The scan-build result directory.
SBOutputDirName = "ScanBuildResults"
SBOutputDirReferencePrefix = "Ref"
# The name of the directory storing the cached project source. If this directory
# does not exist, the download script will be executed. That script should
# create the "CachedSource" directory and download the project source into it.
CachedSourceDirName = "CachedSource"
# The name of the directory containing the source code that will be analyzed.
# Each time a project is analyzed, a fresh copy of its CachedSource directory
# will be copied to the PatchedSource directory and then the local patches
# in PatchfileName will be applied (if PatchfileName exists).
PatchedSourceDirName = "PatchedSource"
# The name of the patchfile specifying any changes that should be applied
# to the CachedSource before analyzing.
PatchfileName = "changes_for_analyzer.patch"
# The list of checkers used during analyzes.
# Currently, consists of all the non-experimental checkers, plus a few alpha
# checkers we don't want to regress on.
Checkers="alpha.unix.SimpleStream,alpha.security.taint,cplusplus.NewDeleteLeaks,core,cplusplus,deadcode,security,unix,osx"
Verbose = 1
#------------------------------------------------------------------------------
# Test harness logic.
#------------------------------------------------------------------------------
# Run pre-processing script if any.
def runCleanupScript(Dir, PBuildLogFile):
Cwd = os.path.join(Dir, PatchedSourceDirName)
ScriptPath = os.path.join(Dir, CleanupScript)
runScript(ScriptPath, PBuildLogFile, Cwd)
# Run the script to download the project, if it exists.
def runDownloadScript(Dir, PBuildLogFile):
ScriptPath = os.path.join(Dir, DownloadScript)
runScript(ScriptPath, PBuildLogFile, Dir)
# Run the provided script if it exists.
def runScript(ScriptPath, PBuildLogFile, Cwd):
if os.path.exists(ScriptPath):
try:
if Verbose == 1:
print " Executing: %s" % (ScriptPath,)
check_call("chmod +x '%s'" % ScriptPath, cwd = Cwd,
stderr=PBuildLogFile,
stdout=PBuildLogFile,
shell=True)
check_call("'%s'" % ScriptPath, cwd = Cwd, stderr=PBuildLogFile,
stdout=PBuildLogFile,
shell=True)
except:
print "Error: Running %s failed. See %s for details." % (ScriptPath,
PBuildLogFile.name)
sys.exit(-1)
# Download the project and apply the local patchfile if it exists.
def downloadAndPatch(Dir, PBuildLogFile):
CachedSourceDirPath = os.path.join(Dir, CachedSourceDirName)
# If the we don't already have the cached source, run the project's
# download script to download it.
if not os.path.exists(CachedSourceDirPath):
runDownloadScript(Dir, PBuildLogFile)
if not os.path.exists(CachedSourceDirPath):
print "Error: '%s' not found after download." % (CachedSourceDirPath)
exit(-1)
PatchedSourceDirPath = os.path.join(Dir, PatchedSourceDirName)
# Remove potentially stale patched source.
if os.path.exists(PatchedSourceDirPath):
shutil.rmtree(PatchedSourceDirPath)
# Copy the cached source and apply any patches to the copy.
shutil.copytree(CachedSourceDirPath, PatchedSourceDirPath, symlinks=True)
applyPatch(Dir, PBuildLogFile)
def applyPatch(Dir, PBuildLogFile):
PatchfilePath = os.path.join(Dir, PatchfileName)
PatchedSourceDirPath = os.path.join(Dir, PatchedSourceDirName)
if not os.path.exists(PatchfilePath):
print " No local patches."
return
print " Applying patch."
try:
check_call("patch -p1 < '%s'" % (PatchfilePath),
cwd = PatchedSourceDirPath,
stderr=PBuildLogFile,
stdout=PBuildLogFile,
shell=True)
except:
print "Error: Patch failed. See %s for details." % (PBuildLogFile.name)
sys.exit(-1)
# Build the project with scan-build by reading in the commands and
# prefixing them with the scan-build options.
def runScanBuild(Dir, SBOutputDir, PBuildLogFile):
BuildScriptPath = os.path.join(Dir, BuildScript)
if not os.path.exists(BuildScriptPath):
print "Error: build script is not defined: %s" % BuildScriptPath
sys.exit(-1)
AllCheckers = Checkers
if os.environ.has_key('SA_ADDITIONAL_CHECKERS'):
AllCheckers = AllCheckers + ',' + os.environ['SA_ADDITIONAL_CHECKERS']
# Run scan-build from within the patched source directory.
SBCwd = os.path.join(Dir, PatchedSourceDirName)
SBOptions = "--use-analyzer '%s' " % Clang
SBOptions += "-plist-html -o '%s' " % SBOutputDir
SBOptions += "-enable-checker " + AllCheckers + " "
SBOptions += "--keep-empty "
# Always use ccc-analyze to ensure that we can locate the failures
# directory.
SBOptions += "--override-compiler "
try:
SBCommandFile = open(BuildScriptPath, "r")
SBPrefix = "scan-build " + SBOptions + " "
for Command in SBCommandFile:
Command = Command.strip()
if len(Command) == 0:
continue;
# If using 'make', auto imply a -jX argument
# to speed up analysis. xcodebuild will
# automatically use the maximum number of cores.
if (Command.startswith("make ") or Command == "make") and \
"-j" not in Command:
Command += " -j%d" % Jobs
SBCommand = SBPrefix + Command
if Verbose == 1:
print " Executing: %s" % (SBCommand,)
check_call(SBCommand, cwd = SBCwd, stderr=PBuildLogFile,
stdout=PBuildLogFile,
shell=True)
except:
print "Error: scan-build failed. See ",PBuildLogFile.name,\
" for details."
raise
def hasNoExtension(FileName):
(Root, Ext) = os.path.splitext(FileName)
if ((Ext == "")) :
return True
return False
def isValidSingleInputFile(FileName):
(Root, Ext) = os.path.splitext(FileName)
if ((Ext == ".i") | (Ext == ".ii") |
(Ext == ".c") | (Ext == ".cpp") |
(Ext == ".m") | (Ext == "")) :
return True
return False
# Get the path to the SDK for the given SDK name. Returns None if
# the path cannot be determined.
def getSDKPath(SDKName):
if which("xcrun") is None:
return None
Cmd = "xcrun --sdk " + SDKName + " --show-sdk-path"
return check_output(Cmd, shell=True).rstrip()
# Run analysis on a set of preprocessed files.
def runAnalyzePreprocessed(Dir, SBOutputDir, Mode):
if os.path.exists(os.path.join(Dir, BuildScript)):
print "Error: The preprocessed files project should not contain %s" % \
BuildScript
raise Exception()
CmdPrefix = Clang + " -cc1 "
# For now, we assume the preprocessed files should be analyzed
# with the OS X SDK.
SDKPath = getSDKPath("macosx")
if SDKPath is not None:
CmdPrefix += "-isysroot " + SDKPath + " "
CmdPrefix += "-analyze -analyzer-output=plist -w "
CmdPrefix += "-analyzer-checker=" + Checkers +" -fcxx-exceptions -fblocks "
if (Mode == 2) :
CmdPrefix += "-std=c++11 "
PlistPath = os.path.join(Dir, SBOutputDir, "date")
FailPath = os.path.join(PlistPath, "failures");
os.makedirs(FailPath);
for FullFileName in glob.glob(Dir + "/*"):
FileName = os.path.basename(FullFileName)
Failed = False
# Only run the analyzes on supported files.
if (hasNoExtension(FileName)):
continue
if (isValidSingleInputFile(FileName) == False):
print "Error: Invalid single input file %s." % (FullFileName,)
raise Exception()
# Build and call the analyzer command.
OutputOption = "-o '%s.plist' " % os.path.join(PlistPath, FileName)
Command = CmdPrefix + OutputOption + ("'%s'" % FileName)
LogFile = open(os.path.join(FailPath, FileName + ".stderr.txt"), "w+b")
try:
if Verbose == 1:
print " Executing: %s" % (Command,)
check_call(Command, cwd = Dir, stderr=LogFile,
stdout=LogFile,
shell=True)
except CalledProcessError, e:
print "Error: Analyzes of %s failed. See %s for details." \
"Error code %d." % \
(FullFileName, LogFile.name, e.returncode)
Failed = True
finally:
LogFile.close()
# If command did not fail, erase the log file.
if Failed == False:
os.remove(LogFile.name);
def getBuildLogPath(SBOutputDir):
return os.path.join(SBOutputDir, LogFolderName, BuildLogName)
def removeLogFile(SBOutputDir):
BuildLogPath = getBuildLogPath(SBOutputDir)
# Clean up the log file.
if (os.path.exists(BuildLogPath)) :
RmCommand = "rm '%s'" % BuildLogPath
if Verbose == 1:
print " Executing: %s" % (RmCommand,)
check_call(RmCommand, shell=True)
def buildProject(Dir, SBOutputDir, ProjectBuildMode, IsReferenceBuild):
TBegin = time.time()
BuildLogPath = getBuildLogPath(SBOutputDir)
print "Log file: %s" % (BuildLogPath,)
print "Output directory: %s" %(SBOutputDir, )
removeLogFile(SBOutputDir)
# Clean up scan build results.
if (os.path.exists(SBOutputDir)) :
RmCommand = "rm -r '%s'" % SBOutputDir
if Verbose == 1:
print " Executing: %s" % (RmCommand,)
check_call(RmCommand, shell=True)
assert(not os.path.exists(SBOutputDir))
os.makedirs(os.path.join(SBOutputDir, LogFolderName))
# Open the log file.
PBuildLogFile = open(BuildLogPath, "wb+")
# Build and analyze the project.
try:
if (ProjectBuildMode == 1):
downloadAndPatch(Dir, PBuildLogFile)
runCleanupScript(Dir, PBuildLogFile)
runScanBuild(Dir, SBOutputDir, PBuildLogFile)
else:
runAnalyzePreprocessed(Dir, SBOutputDir, ProjectBuildMode)
if IsReferenceBuild :
runCleanupScript(Dir, PBuildLogFile)
# Make the absolute paths relative in the reference results.
for (DirPath, Dirnames, Filenames) in os.walk(SBOutputDir):
for F in Filenames:
if (not F.endswith('plist')):
continue
Plist = os.path.join(DirPath, F)
Data = plistlib.readPlist(Plist)
PathPrefix = Dir
if (ProjectBuildMode == 1):
PathPrefix = os.path.join(Dir, PatchedSourceDirName)
Paths = [SourceFile[len(PathPrefix)+1:]\
if SourceFile.startswith(PathPrefix)\
else SourceFile for SourceFile in Data['files']]
Data['files'] = Paths
plistlib.writePlist(Data, Plist)
finally:
PBuildLogFile.close()
print "Build complete (time: %.2f). See the log for more details: %s" % \
((time.time()-TBegin), BuildLogPath)
# A plist file is created for each call to the analyzer(each source file).
# We are only interested on the once that have bug reports, so delete the rest.
def CleanUpEmptyPlists(SBOutputDir):
for F in glob.glob(SBOutputDir + "/*/*.plist"):
P = os.path.join(SBOutputDir, F)
Data = plistlib.readPlist(P)
# Delete empty reports.
if not Data['files']:
os.remove(P)
continue
# Given the scan-build output directory, checks if the build failed
# (by searching for the failures directories). If there are failures, it
# creates a summary file in the output directory.
def checkBuild(SBOutputDir):
# Check if there are failures.
Failures = glob.glob(SBOutputDir + "/*/failures/*.stderr.txt")
TotalFailed = len(Failures);
if TotalFailed == 0:
CleanUpEmptyPlists(SBOutputDir)
Plists = glob.glob(SBOutputDir + "/*/*.plist")
print "Number of bug reports (non-empty plist files) produced: %d" %\
len(Plists)
return;
# Create summary file to display when the build fails.
SummaryPath = os.path.join(SBOutputDir, LogFolderName, FailuresSummaryFileName)
if (Verbose > 0):
print " Creating the failures summary file %s" % (SummaryPath,)
SummaryLog = open(SummaryPath, "w+")
try:
SummaryLog.write("Total of %d failures discovered.\n" % (TotalFailed,))
if TotalFailed > NumOfFailuresInSummary:
SummaryLog.write("See the first %d below.\n"
% (NumOfFailuresInSummary,))
# TODO: Add a line "See the results folder for more."
FailuresCopied = NumOfFailuresInSummary
Idx = 0
for FailLogPathI in Failures:
if Idx >= NumOfFailuresInSummary:
break;
Idx += 1
SummaryLog.write("\n-- Error #%d -----------\n" % (Idx,));
FailLogI = open(FailLogPathI, "r");
try:
shutil.copyfileobj(FailLogI, SummaryLog);
finally:
FailLogI.close()
finally:
SummaryLog.close()
print "Error: analysis failed. See ", SummaryPath
sys.exit(-1)
# Auxiliary object to discard stdout.
class Discarder(object):
def write(self, text):
pass # do nothing
# Compare the warnings produced by scan-build.
# Strictness defines the success criteria for the test:
# 0 - success if there are no crashes or analyzer failure.
# 1 - success if there are no difference in the number of reported bugs.
# 2 - success if all the bug reports are identical.
def runCmpResults(Dir, Strictness = 0):
TBegin = time.time()
RefDir = os.path.join(Dir, SBOutputDirReferencePrefix + SBOutputDirName)
NewDir = os.path.join(Dir, SBOutputDirName)
# We have to go one level down the directory tree.
RefList = glob.glob(RefDir + "/*")
NewList = glob.glob(NewDir + "/*")
# Log folders are also located in the results dir, so ignore them.
RefLogDir = os.path.join(RefDir, LogFolderName)
if RefLogDir in RefList:
RefList.remove(RefLogDir)
NewList.remove(os.path.join(NewDir, LogFolderName))
if len(RefList) == 0 or len(NewList) == 0:
return False
assert(len(RefList) == len(NewList))
# There might be more then one folder underneath - one per each scan-build
# command (Ex: one for configure and one for make).
if (len(RefList) > 1):
# Assume that the corresponding folders have the same names.
RefList.sort()
NewList.sort()
# Iterate and find the differences.
NumDiffs = 0
PairList = zip(RefList, NewList)
for P in PairList:
RefDir = P[0]
NewDir = P[1]
assert(RefDir != NewDir)
if Verbose == 1:
print " Comparing Results: %s %s" % (RefDir, NewDir)
DiffsPath = os.path.join(NewDir, DiffsSummaryFileName)
PatchedSourceDirPath = os.path.join(Dir, PatchedSourceDirName)
Opts = CmpRuns.CmpOptions(DiffsPath, "", PatchedSourceDirPath)
# Discard everything coming out of stdout (CmpRun produces a lot of them).
OLD_STDOUT = sys.stdout
sys.stdout = Discarder()
# Scan the results, delete empty plist files.
NumDiffs, ReportsInRef, ReportsInNew = \
CmpRuns.dumpScanBuildResultsDiff(RefDir, NewDir, Opts, False)
sys.stdout = OLD_STDOUT
if (NumDiffs > 0) :
print "Warning: %r differences in diagnostics. See %s" % \
(NumDiffs, DiffsPath,)
if Strictness >= 2 and NumDiffs > 0:
print "Error: Diffs found in strict mode (2)."
sys.exit(-1)
elif Strictness >= 1 and ReportsInRef != ReportsInNew:
print "Error: The number of results are different in strict mode (1)."
sys.exit(-1)
print "Diagnostic comparison complete (time: %.2f)." % (time.time()-TBegin)
return (NumDiffs > 0)
def cleanupReferenceResults(SBOutputDir):
# Delete html, css, and js files from reference results. These can
# include multiple copies of the benchmark source and so get very large.
Extensions = ["html", "css", "js"]
for E in Extensions:
for F in glob.glob("%s/*/*.%s" % (SBOutputDir, E)):
P = os.path.join(SBOutputDir, F)
RmCommand = "rm '%s'" % P
check_call(RmCommand, shell=True)
# Remove the log file. It leaks absolute path names.
removeLogFile(SBOutputDir)
def updateSVN(Mode, ProjectsMap):
try:
ProjectsMap.seek(0)
for I in csv.reader(ProjectsMap):
ProjName = I[0]
Path = os.path.join(ProjName, getSBOutputDirName(True))
if Mode == "delete":
Command = "svn delete '%s'" % (Path,)
else:
Command = "svn add '%s'" % (Path,)
if Verbose == 1:
print " Executing: %s" % (Command,)
check_call(Command, shell=True)
if Mode == "delete":
CommitCommand = "svn commit -m \"[analyzer tests] Remove " \
"reference results.\""
else:
CommitCommand = "svn commit -m \"[analyzer tests] Add new " \
"reference results.\""
if Verbose == 1:
print " Executing: %s" % (CommitCommand,)
check_call(CommitCommand, shell=True)
except:
print "Error: SVN update failed."
sys.exit(-1)
def testProject(ID, ProjectBuildMode, IsReferenceBuild=False, Dir=None, Strictness = 0):
print " \n\n--- Building project %s" % (ID,)
TBegin = time.time()
if Dir is None :
Dir = getProjectDir(ID)
if Verbose == 1:
print " Build directory: %s." % (Dir,)
# Set the build results directory.
RelOutputDir = getSBOutputDirName(IsReferenceBuild)
SBOutputDir = os.path.join(Dir, RelOutputDir)
buildProject(Dir, SBOutputDir, ProjectBuildMode, IsReferenceBuild)
checkBuild(SBOutputDir)
if IsReferenceBuild == False:
runCmpResults(Dir, Strictness)
else:
cleanupReferenceResults(SBOutputDir)
print "Completed tests for project %s (time: %.2f)." % \
(ID, (time.time()-TBegin))
def isCommentCSVLine(Entries):
# Treat CSV lines starting with a '#' as a comment.
return len(Entries) > 0 and Entries[0].startswith("#")
def testAll(IsReferenceBuild = False, UpdateSVN = False, Strictness = 0):
PMapFile = open(getProjectMapPath(), "rb")
try:
# Validate the input.
for I in csv.reader(PMapFile):
if (isCommentCSVLine(I)):
continue
if (len(I) != 2) :
print "Error: Rows in the ProjectMapFile should have 3 entries."
raise Exception()
if (not ((I[1] == "0") | (I[1] == "1") | (I[1] == "2"))):
print "Error: Second entry in the ProjectMapFile should be 0" \
" (single file), 1 (project), or 2(single file c++11)."
raise Exception()
# When we are regenerating the reference results, we might need to
# update svn. Remove reference results from SVN.
if UpdateSVN == True:
assert(IsReferenceBuild == True);
updateSVN("delete", PMapFile);
# Test the projects.
PMapFile.seek(0)
for I in csv.reader(PMapFile):
if isCommentCSVLine(I):
continue;
testProject(I[0], int(I[1]), IsReferenceBuild, None, Strictness)
# Add reference results to SVN.
if UpdateSVN == True:
updateSVN("add", PMapFile);
except:
print "Error occurred. Premature termination."
raise
finally:
PMapFile.close()
if __name__ == '__main__':
# Parse command line arguments.
Parser = argparse.ArgumentParser(description='Test the Clang Static Analyzer.')
Parser.add_argument('--strictness', dest='strictness', type=int, default=0,
help='0 to fail on runtime errors, 1 to fail when the number\
of found bugs are different from the reference, 2 to \
fail on any difference from the reference. Default is 0.')
Parser.add_argument('-r', dest='regenerate', action='store_true', default=False,
help='Regenerate reference output.')
Parser.add_argument('-rs', dest='update_reference', action='store_true',
default=False, help='Regenerate reference output and update svn.')
Args = Parser.parse_args()
IsReference = False
UpdateSVN = False
Strictness = Args.strictness
if Args.regenerate:
IsReference = True
elif Args.update_reference:
IsReference = True
UpdateSVN = True
testAll(IsReference, UpdateSVN, Strictness)
| 26,878 | 35.921703 | 122 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/utils/perf-training/perf-helper.py | #===- perf-helper.py - Clang Python Bindings -----------------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
from __future__ import print_function
import sys
import os
import subprocess
import argparse
import time
import bisect
import shlex
import tempfile
test_env = { 'PATH' : os.environ['PATH'] }
def findFilesWithExtension(path, extension):
filenames = []
for root, dirs, files in os.walk(path):
for filename in files:
if filename.endswith(extension):
filenames.append(os.path.join(root, filename))
return filenames
def clean(args):
if len(args) != 2:
print('Usage: %s clean <path> <extension>\n' % __file__ +
'\tRemoves all files with extension from <path>.')
return 1
for filename in findFilesWithExtension(args[0], args[1]):
os.remove(filename)
return 0
def merge(args):
if len(args) != 3:
print('Usage: %s clean <llvm-profdata> <output> <path>\n' % __file__ +
'\tMerges all profraw files from path into output.')
return 1
cmd = [args[0], 'merge', '-o', args[1]]
cmd.extend(findFilesWithExtension(args[2], "profraw"))
subprocess.check_call(cmd)
return 0
def dtrace(args):
parser = argparse.ArgumentParser(prog='perf-helper dtrace',
description='dtrace wrapper for order file generation')
parser.add_argument('--buffer-size', metavar='size', type=int, required=False,
default=1, help='dtrace buffer size in MB (default 1)')
parser.add_argument('--use-oneshot', required=False, action='store_true',
help='Use dtrace\'s oneshot probes')
parser.add_argument('--use-ustack', required=False, action='store_true',
help='Use dtrace\'s ustack to print function names')
parser.add_argument('--cc1', required=False, action='store_true',
help='Execute cc1 directly (don\'t profile the driver)')
parser.add_argument('cmd', nargs='*', help='')
# Use python's arg parser to handle all leading option arguments, but pass
# everything else through to dtrace
first_cmd = next(arg for arg in args if not arg.startswith("--"))
last_arg_idx = args.index(first_cmd)
opts = parser.parse_args(args[:last_arg_idx])
cmd = args[last_arg_idx:]
if opts.cc1:
cmd = get_cc1_command_for_args(cmd, test_env)
if opts.use_oneshot:
target = "oneshot$target:::entry"
else:
target = "pid$target:::entry"
predicate = '%s/probemod=="%s"/' % (target, os.path.basename(cmd[0]))
log_timestamp = 'printf("dtrace-TS: %d\\n", timestamp)'
if opts.use_ustack:
action = 'ustack(1);'
else:
action = 'printf("dtrace-Symbol: %s\\n", probefunc);'
dtrace_script = "%s { %s; %s }" % (predicate, log_timestamp, action)
dtrace_args = []
if not os.geteuid() == 0:
print(
'Script must be run as root, or you must add the following to your sudoers:'
+ '%%admin ALL=(ALL) NOPASSWD: /usr/sbin/dtrace')
dtrace_args.append("sudo")
dtrace_args.extend((
'dtrace', '-xevaltime=exec',
'-xbufsize=%dm' % (opts.buffer_size),
'-q', '-n', dtrace_script,
'-c', ' '.join(cmd)))
if sys.platform == "darwin":
dtrace_args.append('-xmangled')
start_time = time.time()
with open("%d.dtrace" % os.getpid(), "w") as f:
f.write("### Command: %s" % dtrace_args)
subprocess.check_call(dtrace_args, stdout=f, stderr=subprocess.PIPE)
elapsed = time.time() - start_time
print("... data collection took %.4fs" % elapsed)
return 0
def get_cc1_command_for_args(cmd, env):
# Find the cc1 command used by the compiler. To do this we execute the
# compiler with '-###' to figure out what it wants to do.
cmd = cmd + ['-###']
cc_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env).strip()
cc_commands = []
for ln in cc_output.split('\n'):
# Filter out known garbage.
if (ln == 'Using built-in specs.' or
ln.startswith('Configured with:') or
ln.startswith('Target:') or
ln.startswith('Thread model:') or
ln.startswith('InstalledDir:') or
ln.startswith('LLVM Profile Note') or
' version ' in ln):
continue
cc_commands.append(ln)
if len(cc_commands) != 1:
print('Fatal error: unable to determine cc1 command: %r' % cc_output)
exit(1)
cc1_cmd = shlex.split(cc_commands[0])
if not cc1_cmd:
print('Fatal error: unable to determine cc1 command: %r' % cc_output)
exit(1)
return cc1_cmd
def cc1(args):
parser = argparse.ArgumentParser(prog='perf-helper cc1',
description='cc1 wrapper for order file generation')
parser.add_argument('cmd', nargs='*', help='')
# Use python's arg parser to handle all leading option arguments, but pass
# everything else through to dtrace
first_cmd = next(arg for arg in args if not arg.startswith("--"))
last_arg_idx = args.index(first_cmd)
opts = parser.parse_args(args[:last_arg_idx])
cmd = args[last_arg_idx:]
# clear the profile file env, so that we don't generate profdata
# when capturing the cc1 command
cc1_env = test_env
cc1_env["LLVM_PROFILE_FILE"] = os.devnull
cc1_cmd = get_cc1_command_for_args(cmd, cc1_env)
subprocess.check_call(cc1_cmd)
return 0
def parse_dtrace_symbol_file(path, all_symbols, all_symbols_set,
missing_symbols, opts):
def fix_mangling(symbol):
if sys.platform == "darwin":
if symbol[0] != '_' and symbol != 'start':
symbol = '_' + symbol
return symbol
def get_symbols_with_prefix(symbol):
start_index = bisect.bisect_left(all_symbols, symbol)
for s in all_symbols[start_index:]:
if not s.startswith(symbol):
break
yield s
# Extract the list of symbols from the given file, which is assumed to be
# the output of a dtrace run logging either probefunc or ustack(1) and
# nothing else. The dtrace -xdemangle option needs to be used.
#
# This is particular to OS X at the moment, because of the '_' handling.
with open(path) as f:
current_timestamp = None
for ln in f:
# Drop leading and trailing whitespace.
ln = ln.strip()
if not ln.startswith("dtrace-"):
continue
# If this is a timestamp specifier, extract it.
if ln.startswith("dtrace-TS: "):
_,data = ln.split(': ', 1)
if not data.isdigit():
print("warning: unrecognized timestamp line %r, ignoring" % ln,
file=sys.stderr)
continue
current_timestamp = int(data)
continue
elif ln.startswith("dtrace-Symbol: "):
_,ln = ln.split(': ', 1)
if not ln:
continue
# If there is a '`' in the line, assume it is a ustack(1) entry in
# the form of <modulename>`<modulefunc>, where <modulefunc> is never
# truncated (but does need the mangling patched).
if '`' in ln:
yield (current_timestamp, fix_mangling(ln.split('`',1)[1]))
continue
# Otherwise, assume this is a probefunc printout. DTrace on OS X
# seems to have a bug where it prints the mangled version of symbols
# which aren't C++ mangled. We just add a '_' to anything but start
# which doesn't already have a '_'.
symbol = fix_mangling(ln)
# If we don't know all the symbols, or the symbol is one of them,
# just return it.
if not all_symbols_set or symbol in all_symbols_set:
yield (current_timestamp, symbol)
continue
# Otherwise, we have a symbol name which isn't present in the
# binary. We assume it is truncated, and try to extend it.
# Get all the symbols with this prefix.
possible_symbols = list(get_symbols_with_prefix(symbol))
if not possible_symbols:
continue
# If we found too many possible symbols, ignore this as a prefix.
if len(possible_symbols) > 100:
print( "warning: ignoring symbol %r " % symbol +
"(no match and too many possible suffixes)", file=sys.stderr)
continue
# Report that we resolved a missing symbol.
if opts.show_missing_symbols and symbol not in missing_symbols:
print("warning: resolved missing symbol %r" % symbol, file=sys.stderr)
missing_symbols.add(symbol)
# Otherwise, treat all the possible matches as having occurred. This
# is an over-approximation, but it should be ok in practice.
for s in possible_symbols:
yield (current_timestamp, s)
def uniq(list):
seen = set()
for item in list:
if item not in seen:
yield item
seen.add(item)
def form_by_call_order(symbol_lists):
# Simply strategy, just return symbols in order of occurrence, even across
# multiple runs.
return uniq(s for symbols in symbol_lists for s in symbols)
def form_by_call_order_fair(symbol_lists):
# More complicated strategy that tries to respect the call order across all
# of the test cases, instead of giving a huge preference to the first test
# case.
# First, uniq all the lists.
uniq_lists = [list(uniq(symbols)) for symbols in symbol_lists]
# Compute the successors for each list.
succs = {}
for symbols in uniq_lists:
for a,b in zip(symbols[:-1], symbols[1:]):
succs[a] = items = succs.get(a, [])
if b not in items:
items.append(b)
# Emit all the symbols, but make sure to always emit all successors from any
# call list whenever we see a symbol.
#
# There isn't much science here, but this sometimes works better than the
# more naive strategy. Then again, sometimes it doesn't so more research is
# probably needed.
return uniq(s
for symbols in symbol_lists
for node in symbols
for s in ([node] + succs.get(node,[])))
def form_by_frequency(symbol_lists):
# Form the order file by just putting the most commonly occurring symbols
# first. This assumes the data files didn't use the oneshot dtrace method.
counts = {}
for symbols in symbol_lists:
for a in symbols:
counts[a] = counts.get(a,0) + 1
by_count = counts.items()
by_count.sort(key = lambda (_,n): -n)
return [s for s,n in by_count]
def form_by_random(symbol_lists):
# Randomize the symbols.
merged_symbols = uniq(s for symbols in symbol_lists
for s in symbols)
random.shuffle(merged_symbols)
return merged_symbols
def form_by_alphabetical(symbol_lists):
# Alphabetize the symbols.
merged_symbols = list(set(s for symbols in symbol_lists for s in symbols))
merged_symbols.sort()
return merged_symbols
methods = dict((name[len("form_by_"):],value)
for name,value in locals().items() if name.startswith("form_by_"))
def genOrderFile(args):
parser = argparse.ArgumentParser(
"%prog [options] <dtrace data file directories>]")
parser.add_argument('input', nargs='+', help='')
parser.add_argument("--binary", metavar="PATH", type=str, dest="binary_path",
help="Path to the binary being ordered (for getting all symbols)",
default=None)
parser.add_argument("--output", dest="output_path",
help="path to output order file to write", default=None, required=True,
metavar="PATH")
parser.add_argument("--show-missing-symbols", dest="show_missing_symbols",
help="show symbols which are 'fixed up' to a valid name (requires --binary)",
action="store_true", default=None)
parser.add_argument("--output-unordered-symbols",
dest="output_unordered_symbols_path",
help="write a list of the unordered symbols to PATH (requires --binary)",
default=None, metavar="PATH")
parser.add_argument("--method", dest="method",
help="order file generation method to use", choices=methods.keys(),
default='call_order')
opts = parser.parse_args(args)
# If the user gave us a binary, get all the symbols in the binary by
# snarfing 'nm' output.
if opts.binary_path is not None:
output = subprocess.check_output(['nm', '-P', opts.binary_path])
lines = output.split("\n")
all_symbols = [ln.split(' ',1)[0]
for ln in lines
if ln.strip()]
print("found %d symbols in binary" % len(all_symbols))
all_symbols.sort()
else:
all_symbols = []
all_symbols_set = set(all_symbols)
# Compute the list of input files.
input_files = []
for dirname in opts.input:
input_files.extend(findFilesWithExtension(dirname, "dtrace"))
# Load all of the input files.
print("loading from %d data files" % len(input_files))
missing_symbols = set()
timestamped_symbol_lists = [
list(parse_dtrace_symbol_file(path, all_symbols, all_symbols_set,
missing_symbols, opts))
for path in input_files]
# Reorder each symbol list.
symbol_lists = []
for timestamped_symbols_list in timestamped_symbol_lists:
timestamped_symbols_list.sort()
symbol_lists.append([symbol for _,symbol in timestamped_symbols_list])
# Execute the desire order file generation method.
method = methods.get(opts.method)
result = list(method(symbol_lists))
# Report to the user on what percentage of symbols are present in the order
# file.
num_ordered_symbols = len(result)
if all_symbols:
print("note: order file contains %d/%d symbols (%.2f%%)" % (
num_ordered_symbols, len(all_symbols),
100.*num_ordered_symbols/len(all_symbols)), file=sys.stderr)
if opts.output_unordered_symbols_path:
ordered_symbols_set = set(result)
with open(opts.output_unordered_symbols_path, 'w') as f:
f.write("\n".join(s for s in all_symbols if s not in ordered_symbols_set))
# Write the order file.
with open(opts.output_path, 'w') as f:
f.write("\n".join(result))
f.write("\n")
return 0
commands = {'clean' : clean,
'merge' : merge,
'dtrace' : dtrace,
'cc1' : cc1,
'gen-order-file' : genOrderFile}
def main():
f = commands[sys.argv[1]]
sys.exit(f(sys.argv[2:]))
if __name__ == '__main__':
main()
| 14,154 | 33.608802 | 85 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/clang/utils/TestUtils/deep-stack.py | #!/usr/bin/env python
def pcall(f, N):
if N == 0:
print >>f, ' f(0)'
return
print >>f, ' f('
pcall(f, N - 1)
print >>f, ' )'
def main():
f = open('t.c','w')
print >>f, 'int f(int n) { return n; }'
print >>f, 'int t() {'
print >>f, ' return'
pcall(f, 10000)
print >>f, ' ;'
print >>f, '}'
if __name__ == "__main__":
import sys
sys.setrecursionlimit(100000)
main()
| 451 | 17.08 | 43 | py |
LowFat | LowFat-master/llvm-4.0.0.src/tools/sancov/coverage-report-server.py | #!/usr/bin/env python3
#===- symcov-report-server.py - Coverage Reports HTTP Serve --*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
'''(EXPERIMENTAL) HTTP server to browse coverage reports from .symcov files.
Coverage reports for big binaries are too huge, generating them statically
makes no sense. Start the server and go to localhost:8001 instead.
Usage:
./tools/sancov/symcov-report-server.py \
--symcov coverage_data.symcov \
--srcpath root_src_dir
Other options:
--port port_number - specifies the port to use (8001)
--host host_name - host name to bind server to (127.0.0.1)
'''
import argparse
import http.server
import json
import socketserver
import time
import html
import os
import string
import math
INDEX_PAGE_TMPL = """
<html>
<head>
<title>Coverage Report</title>
<style>
.lz { color: lightgray; }
</style>
</head>
<body>
<table>
<tr><th>File</th><th>Coverage</th></tr>
<tr><td><em>Files with 0 coverage are not shown.</em></td></tr>
$filenames
</table>
</body>
</html>
"""
CONTENT_PAGE_TMPL = """
<html>
<head>
<title>$path</title>
<style>
.covered { background: lightgreen; }
.not-covered { background: lightcoral; }
.partially-covered { background: navajowhite; }
.lz { color: lightgray; }
</style>
</head>
<body>
<pre>
$content
</pre>
</body>
</html>
"""
class SymcovData:
def __init__(self, symcov_json):
self.covered_points = frozenset(symcov_json['covered-points'])
self.point_symbol_info = symcov_json['point-symbol-info']
self.file_coverage = self.compute_filecoverage()
def filenames(self):
return self.point_symbol_info.keys()
def has_file(self, filename):
return filename in self.point_symbol_info
def compute_linemap(self, filename):
"""Build a line_number->css_class map."""
points = self.point_symbol_info.get(filename, dict())
line_to_points = dict()
for fn, points in points.items():
for point, loc in points.items():
line = int(loc.split(":")[0])
line_to_points.setdefault(line, []).append(point)
result = dict()
for line, points in line_to_points.items():
status = "covered"
covered_points = self.covered_points & set(points)
if not len(covered_points):
status = "not-covered"
elif len(covered_points) != len(points):
status = "partially-covered"
result[line] = status
return result
def compute_filecoverage(self):
"""Build a filename->pct coverage."""
result = dict()
for filename, fns in self.point_symbol_info.items():
file_points = []
for fn, points in fns.items():
file_points.extend(points.keys())
covered_points = self.covered_points & set(file_points)
result[filename] = int(math.ceil(
len(covered_points) * 100 / len(file_points)))
return result
def format_pct(pct):
pct_str = str(max(0, min(100, pct)))
zeroes = '0' * (3 - len(pct_str))
if zeroes:
zeroes = '<span class="lz">{0}</span>'.format(zeroes)
return zeroes + pct_str
class ServerHandler(http.server.BaseHTTPRequestHandler):
symcov_data = None
src_path = None
def do_GET(self):
if self.path == '/':
self.send_response(200)
self.send_header("Content-type", "text/html; charset=utf-8")
self.end_headers()
filelist = []
for filename in sorted(self.symcov_data.filenames()):
file_coverage = self.symcov_data.file_coverage[filename]
if not file_coverage:
continue
filelist.append(
"<tr><td><a href=\"/{name}\">{name}</a></td>"
"<td>{coverage}%</td></tr>".format(
name=html.escape(filename, quote=True),
coverage=format_pct(file_coverage)))
response = string.Template(INDEX_PAGE_TMPL).safe_substitute(
filenames='\n'.join(filelist))
self.wfile.write(response.encode('UTF-8', 'replace'))
elif self.symcov_data.has_file(self.path[1:]):
filename = self.path[1:]
filepath = os.path.join(self.src_path, filename)
if not os.path.exists(filepath):
self.send_response(404)
self.end_headers()
return
self.send_response(200)
self.send_header("Content-type", "text/html; charset=utf-8")
self.end_headers()
linemap = self.symcov_data.compute_linemap(filename)
with open(filepath, 'r') as f:
content = "\n".join(
["<span class='{cls}'>{line} </span>".format(
line=html.escape(line.rstrip()),
cls=linemap.get(line_no, ""))
for line_no, line in enumerate(f)])
response = string.Template(CONTENT_PAGE_TMPL).safe_substitute(
path=self.path[1:],
content=content)
self.wfile.write(response.encode('UTF-8', 'replace'))
else:
self.send_response(404)
self.end_headers()
def main():
parser = argparse.ArgumentParser(description="symcov report http server.")
parser.add_argument('--host', default='127.0.0.1')
parser.add_argument('--port', default=8001)
parser.add_argument('--symcov', required=True, type=argparse.FileType('r'))
parser.add_argument('--srcpath', required=True)
args = parser.parse_args()
print("Loading coverage...")
symcov_json = json.load(args.symcov)
ServerHandler.symcov_data = SymcovData(symcov_json)
ServerHandler.src_path = args.srcpath
socketserver.TCPServer.allow_reuse_address = True
httpd = socketserver.TCPServer((args.host, args.port), ServerHandler)
print("Serving at {host}:{port}".format(host=args.host, port=args.port))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
if __name__ == '__main__':
main()
| 6,519 | 30.960784 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/bindings/python/llvm/enumerations.py | #===- enumerations.py - Python LLVM Enumerations -------------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
r"""
LLVM Enumerations
=================
This file defines enumerations from LLVM.
Each enumeration is exposed as a list of 2-tuples. These lists are consumed by
dedicated types elsewhere in the package. The enumerations are centrally
defined in this file so they are easier to locate and maintain.
"""
__all__ = [
'Attributes',
'OpCodes',
'TypeKinds',
'Linkages',
'Visibility',
'CallConv',
'IntPredicate',
'RealPredicate',
'LandingPadClauseTy',
]
Attributes = [
('ZExt', 1 << 0),
('MSExt', 1 << 1),
('NoReturn', 1 << 2),
('InReg', 1 << 3),
('StructRet', 1 << 4),
('NoUnwind', 1 << 5),
('NoAlias', 1 << 6),
('ByVal', 1 << 7),
('Nest', 1 << 8),
('ReadNone', 1 << 9),
('ReadOnly', 1 << 10),
('NoInline', 1 << 11),
('AlwaysInline', 1 << 12),
('OptimizeForSize', 1 << 13),
('StackProtect', 1 << 14),
('StackProtectReq', 1 << 15),
('Alignment', 31 << 16),
('NoCapture', 1 << 21),
('NoRedZone', 1 << 22),
('ImplicitFloat', 1 << 23),
('Naked', 1 << 24),
('InlineHint', 1 << 25),
('StackAlignment', 7 << 26),
('ReturnsTwice', 1 << 29),
('UWTable', 1 << 30),
('NonLazyBind', 1 << 31),
]
OpCodes = [
('Ret', 1),
('Br', 2),
('Switch', 3),
('IndirectBr', 4),
('Invoke', 5),
('Unreachable', 7),
('Add', 8),
('FAdd', 9),
('Sub', 10),
('FSub', 11),
('Mul', 12),
('FMul', 13),
('UDiv', 14),
('SDiv', 15),
('FDiv', 16),
('URem', 17),
('SRem', 18),
('FRem', 19),
('Shl', 20),
('LShr', 21),
('AShr', 22),
('And', 23),
('Or', 24),
('Xor', 25),
('Alloca', 26),
('Load', 27),
('Store', 28),
('GetElementPtr', 29),
('Trunc', 30),
('ZExt', 31),
('SExt', 32),
('FPToUI', 33),
('FPToSI', 34),
('UIToFP', 35),
('SIToFP', 36),
('FPTrunc', 37),
('FPExt', 38),
('PtrToInt', 39),
('IntToPtr', 40),
('BitCast', 41),
('ICmp', 42),
('FCmpl', 43),
('PHI', 44),
('Call', 45),
('Select', 46),
('UserOp1', 47),
('UserOp2', 48),
('AArg', 49),
('ExtractElement', 50),
('InsertElement', 51),
('ShuffleVector', 52),
('ExtractValue', 53),
('InsertValue', 54),
('Fence', 55),
('AtomicCmpXchg', 56),
('AtomicRMW', 57),
('Resume', 58),
('LandingPad', 59),
]
TypeKinds = [
('Void', 0),
('Half', 1),
('Float', 2),
('Double', 3),
('X86_FP80', 4),
('FP128', 5),
('PPC_FP128', 6),
('Label', 7),
('Integer', 8),
('Function', 9),
('Struct', 10),
('Array', 11),
('Pointer', 12),
('Vector', 13),
('Metadata', 14),
('X86_MMX', 15),
]
Linkages = [
('External', 0),
('AvailableExternally', 1),
('LinkOnceAny', 2),
('LinkOnceODR', 3),
('WeakAny', 4),
('WeakODR', 5),
('Appending', 6),
('Internal', 7),
('Private', 8),
('DLLImport', 9),
('DLLExport', 10),
('ExternalWeak', 11),
('Ghost', 12),
('Common', 13),
('LinkerPrivate', 14),
('LinkerPrivateWeak', 15),
('LinkerPrivateWeakDefAuto', 16),
]
Visibility = [
('Default', 0),
('Hidden', 1),
('Protected', 2),
]
CallConv = [
('CCall', 0),
('FastCall', 8),
('ColdCall', 9),
('X86StdcallCall', 64),
('X86FastcallCall', 65),
]
IntPredicate = [
('EQ', 32),
('NE', 33),
('UGT', 34),
('UGE', 35),
('ULT', 36),
('ULE', 37),
('SGT', 38),
('SGE', 39),
('SLT', 40),
('SLE', 41),
]
RealPredicate = [
('PredicateFalse', 0),
('OEQ', 1),
('OGT', 2),
('OGE', 3),
('OLT', 4),
('OLE', 5),
('ONE', 6),
('ORD', 7),
('UNO', 8),
('UEQ', 9),
('UGT', 10),
('UGE', 11),
('ULT', 12),
('ULE', 13),
('UNE', 14),
('PredicateTrue', 15),
]
LandingPadClauseTy = [
('Catch', 0),
('Filter', 1),
]
| 4,249 | 19.04717 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/bindings/python/llvm/core.py | #===- core.py - Python LLVM Bindings -------------------------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
from .common import LLVMObject
from .common import c_object_p
from .common import get_library
from . import enumerations
from ctypes import POINTER
from ctypes import byref
from ctypes import c_char_p
from ctypes import c_uint
__all__ = [
"lib",
"Enums",
"OpCode",
"MemoryBuffer",
"Module",
"Value",
"Function",
"BasicBlock",
"Instruction",
"Context",
"PassRegistry"
]
lib = get_library()
Enums = []
class LLVMEnumeration(object):
"""Represents an individual LLVM enumeration."""
def __init__(self, name, value):
self.name = name
self.value = value
def __repr__(self):
return '%s.%s' % (self.__class__.__name__,
self.name)
@classmethod
def from_value(cls, value):
"""Obtain an enumeration instance from a numeric value."""
result = cls._value_map.get(value, None)
if result is None:
raise ValueError('Unknown %s: %d' % (cls.__name__,
value))
return result
@classmethod
def register(cls, name, value):
"""Registers a new enumeration.
This is called by this module for each enumeration defined in
enumerations. You should not need to call this outside this module.
"""
if value in cls._value_map:
raise ValueError('%s value already registered: %d' % (cls.__name__,
value))
enum = cls(name, value)
cls._value_map[value] = enum
setattr(cls, name, enum)
class Attribute(LLVMEnumeration):
"""Represents an individual Attribute enumeration."""
_value_map = {}
def __init__(self, name, value):
super(Attribute, self).__init__(name, value)
class OpCode(LLVMEnumeration):
"""Represents an individual OpCode enumeration."""
_value_map = {}
def __init__(self, name, value):
super(OpCode, self).__init__(name, value)
class TypeKind(LLVMEnumeration):
"""Represents an individual TypeKind enumeration."""
_value_map = {}
def __init__(self, name, value):
super(TypeKind, self).__init__(name, value)
class Linkage(LLVMEnumeration):
"""Represents an individual Linkage enumeration."""
_value_map = {}
def __init__(self, name, value):
super(Linkage, self).__init__(name, value)
class Visibility(LLVMEnumeration):
"""Represents an individual visibility enumeration."""
_value_map = {}
def __init__(self, name, value):
super(Visibility, self).__init__(name, value)
class CallConv(LLVMEnumeration):
"""Represents an individual calling convention enumeration."""
_value_map = {}
def __init__(self, name, value):
super(CallConv, self).__init__(name, value)
class IntPredicate(LLVMEnumeration):
"""Represents an individual IntPredicate enumeration."""
_value_map = {}
def __init__(self, name, value):
super(IntPredicate, self).__init__(name, value)
class RealPredicate(LLVMEnumeration):
"""Represents an individual RealPredicate enumeration."""
_value_map = {}
def __init__(self, name, value):
super(RealPredicate, self).__init__(name, value)
class LandingPadClauseTy(LLVMEnumeration):
"""Represents an individual LandingPadClauseTy enumeration."""
_value_map = {}
def __init__(self, name, value):
super(LandingPadClauseTy, self).__init__(name, value)
class MemoryBuffer(LLVMObject):
"""Represents an opaque memory buffer."""
def __init__(self, filename=None):
"""Create a new memory buffer.
Currently, we support creating from the contents of a file at the
specified filename.
"""
if filename is None:
raise Exception("filename argument must be defined")
memory = c_object_p()
out = c_char_p(None)
result = lib.LLVMCreateMemoryBufferWithContentsOfFile(filename,
byref(memory), byref(out))
if result:
raise Exception("Could not create memory buffer: %s" % out.value)
LLVMObject.__init__(self, memory, disposer=lib.LLVMDisposeMemoryBuffer)
def __len__(self):
return lib.LLVMGetBufferSize(self)
class Value(LLVMObject):
def __init__(self, value):
LLVMObject.__init__(self, value)
@property
def name(self):
return lib.LLVMGetValueName(self)
def dump(self):
lib.LLVMDumpValue(self)
def get_operand(self, i):
return Value(lib.LLVMGetOperand(self, i))
def set_operand(self, i, v):
return lib.LLVMSetOperand(self, i, v)
def __len__(self):
return lib.LLVMGetNumOperands(self)
class Module(LLVMObject):
"""Represents the top-level structure of an llvm program in an opaque object."""
def __init__(self, module, name=None, context=None):
LLVMObject.__init__(self, module, disposer=lib.LLVMDisposeModule)
@classmethod
def CreateWithName(cls, module_id):
m = Module(lib.LLVMModuleCreateWithName(module_id))
Context.GetGlobalContext().take_ownership(m)
return m
@property
def datalayout(self):
return lib.LLVMGetDataLayout(self)
@datalayout.setter
def datalayout(self, new_data_layout):
"""new_data_layout is a string."""
lib.LLVMSetDataLayout(self, new_data_layout)
@property
def target(self):
return lib.LLVMGetTarget(self)
@target.setter
def target(self, new_target):
"""new_target is a string."""
lib.LLVMSetTarget(self, new_target)
def dump(self):
lib.LLVMDumpModule(self)
class __function_iterator(object):
def __init__(self, module, reverse=False):
self.module = module
self.reverse = reverse
if self.reverse:
self.function = self.module.last
else:
self.function = self.module.first
def __iter__(self):
return self
def next(self):
if not isinstance(self.function, Function):
raise StopIteration("")
result = self.function
if self.reverse:
self.function = self.function.prev
else:
self.function = self.function.next
return result
def __iter__(self):
return Module.__function_iterator(self)
def __reversed__(self):
return Module.__function_iterator(self, reverse=True)
@property
def first(self):
return Function(lib.LLVMGetFirstFunction(self))
@property
def last(self):
return Function(lib.LLVMGetLastFunction(self))
def print_module_to_file(self, filename):
out = c_char_p(None)
# Result is inverted so 0 means everything was ok.
result = lib.LLVMPrintModuleToFile(self, filename, byref(out))
if result:
raise RuntimeError("LLVM Error: %s" % out.value)
class Function(Value):
def __init__(self, value):
Value.__init__(self, value)
@property
def next(self):
f = lib.LLVMGetNextFunction(self)
return f and Function(f)
@property
def prev(self):
f = lib.LLVMGetPreviousFunction(self)
return f and Function(f)
@property
def first(self):
b = lib.LLVMGetFirstBasicBlock(self)
return b and BasicBlock(b)
@property
def last(self):
b = lib.LLVMGetLastBasicBlock(self)
return b and BasicBlock(b)
class __bb_iterator(object):
def __init__(self, function, reverse=False):
self.function = function
self.reverse = reverse
if self.reverse:
self.bb = function.last
else:
self.bb = function.first
def __iter__(self):
return self
def next(self):
if not isinstance(self.bb, BasicBlock):
raise StopIteration("")
result = self.bb
if self.reverse:
self.bb = self.bb.prev
else:
self.bb = self.bb.next
return result
def __iter__(self):
return Function.__bb_iterator(self)
def __reversed__(self):
return Function.__bb_iterator(self, reverse=True)
def __len__(self):
return lib.LLVMCountBasicBlocks(self)
class BasicBlock(LLVMObject):
def __init__(self, value):
LLVMObject.__init__(self, value)
@property
def next(self):
b = lib.LLVMGetNextBasicBlock(self)
return b and BasicBlock(b)
@property
def prev(self):
b = lib.LLVMGetPreviousBasicBlock(self)
return b and BasicBlock(b)
@property
def first(self):
i = lib.LLVMGetFirstInstruction(self)
return i and Instruction(i)
@property
def last(self):
i = lib.LLVMGetLastInstruction(self)
return i and Instruction(i)
def __as_value(self):
return Value(lib.LLVMBasicBlockAsValue(self))
@property
def name(self):
return lib.LLVMGetValueName(self.__as_value())
def dump(self):
lib.LLVMDumpValue(self.__as_value())
def get_operand(self, i):
return Value(lib.LLVMGetOperand(self.__as_value(),
i))
def set_operand(self, i, v):
return lib.LLVMSetOperand(self.__as_value(),
i, v)
def __len__(self):
return lib.LLVMGetNumOperands(self.__as_value())
class __inst_iterator(object):
def __init__(self, bb, reverse=False):
self.bb = bb
self.reverse = reverse
if self.reverse:
self.inst = self.bb.last
else:
self.inst = self.bb.first
def __iter__(self):
return self
def next(self):
if not isinstance(self.inst, Instruction):
raise StopIteration("")
result = self.inst
if self.reverse:
self.inst = self.inst.prev
else:
self.inst = self.inst.next
return result
def __iter__(self):
return BasicBlock.__inst_iterator(self)
def __reversed__(self):
return BasicBlock.__inst_iterator(self, reverse=True)
class Instruction(Value):
def __init__(self, value):
Value.__init__(self, value)
@property
def next(self):
i = lib.LLVMGetNextInstruction(self)
return i and Instruction(i)
@property
def prev(self):
i = lib.LLVMGetPreviousInstruction(self)
return i and Instruction(i)
@property
def opcode(self):
return OpCode.from_value(lib.LLVMGetInstructionOpcode(self))
class Context(LLVMObject):
def __init__(self, context=None):
if context is None:
context = lib.LLVMContextCreate()
LLVMObject.__init__(self, context, disposer=lib.LLVMContextDispose)
else:
LLVMObject.__init__(self, context)
@classmethod
def GetGlobalContext(cls):
return Context(lib.LLVMGetGlobalContext())
class PassRegistry(LLVMObject):
"""Represents an opaque pass registry object."""
def __init__(self):
LLVMObject.__init__(self,
lib.LLVMGetGlobalPassRegistry())
def register_library(library):
# Initialization/Shutdown declarations.
library.LLVMInitializeCore.argtypes = [PassRegistry]
library.LLVMInitializeCore.restype = None
library.LLVMInitializeTransformUtils.argtypes = [PassRegistry]
library.LLVMInitializeTransformUtils.restype = None
library.LLVMInitializeScalarOpts.argtypes = [PassRegistry]
library.LLVMInitializeScalarOpts.restype = None
library.LLVMInitializeObjCARCOpts.argtypes = [PassRegistry]
library.LLVMInitializeObjCARCOpts.restype = None
library.LLVMInitializeVectorization.argtypes = [PassRegistry]
library.LLVMInitializeVectorization.restype = None
library.LLVMInitializeInstCombine.argtypes = [PassRegistry]
library.LLVMInitializeInstCombine.restype = None
library.LLVMInitializeIPO.argtypes = [PassRegistry]
library.LLVMInitializeIPO.restype = None
library.LLVMInitializeInstrumentation.argtypes = [PassRegistry]
library.LLVMInitializeInstrumentation.restype = None
library.LLVMInitializeAnalysis.argtypes = [PassRegistry]
library.LLVMInitializeAnalysis.restype = None
library.LLVMInitializeCodeGen.argtypes = [PassRegistry]
library.LLVMInitializeCodeGen.restype = None
library.LLVMInitializeTarget.argtypes = [PassRegistry]
library.LLVMInitializeTarget.restype = None
library.LLVMShutdown.argtypes = []
library.LLVMShutdown.restype = None
# Pass Registry declarations.
library.LLVMGetGlobalPassRegistry.argtypes = []
library.LLVMGetGlobalPassRegistry.restype = c_object_p
# Context declarations.
library.LLVMContextCreate.argtypes = []
library.LLVMContextCreate.restype = c_object_p
library.LLVMContextDispose.argtypes = [Context]
library.LLVMContextDispose.restype = None
library.LLVMGetGlobalContext.argtypes = []
library.LLVMGetGlobalContext.restype = c_object_p
# Memory buffer declarations
library.LLVMCreateMemoryBufferWithContentsOfFile.argtypes = [c_char_p,
POINTER(c_object_p), POINTER(c_char_p)]
library.LLVMCreateMemoryBufferWithContentsOfFile.restype = bool
library.LLVMGetBufferSize.argtypes = [MemoryBuffer]
library.LLVMDisposeMemoryBuffer.argtypes = [MemoryBuffer]
# Module declarations
library.LLVMModuleCreateWithName.argtypes = [c_char_p]
library.LLVMModuleCreateWithName.restype = c_object_p
library.LLVMDisposeModule.argtypes = [Module]
library.LLVMDisposeModule.restype = None
library.LLVMGetDataLayout.argtypes = [Module]
library.LLVMGetDataLayout.restype = c_char_p
library.LLVMSetDataLayout.argtypes = [Module, c_char_p]
library.LLVMSetDataLayout.restype = None
library.LLVMGetTarget.argtypes = [Module]
library.LLVMGetTarget.restype = c_char_p
library.LLVMSetTarget.argtypes = [Module, c_char_p]
library.LLVMSetTarget.restype = None
library.LLVMDumpModule.argtypes = [Module]
library.LLVMDumpModule.restype = None
library.LLVMPrintModuleToFile.argtypes = [Module, c_char_p,
POINTER(c_char_p)]
library.LLVMPrintModuleToFile.restype = bool
library.LLVMGetFirstFunction.argtypes = [Module]
library.LLVMGetFirstFunction.restype = c_object_p
library.LLVMGetLastFunction.argtypes = [Module]
library.LLVMGetLastFunction.restype = c_object_p
library.LLVMGetNextFunction.argtypes = [Function]
library.LLVMGetNextFunction.restype = c_object_p
library.LLVMGetPreviousFunction.argtypes = [Function]
library.LLVMGetPreviousFunction.restype = c_object_p
# Value declarations.
library.LLVMGetValueName.argtypes = [Value]
library.LLVMGetValueName.restype = c_char_p
library.LLVMDumpValue.argtypes = [Value]
library.LLVMDumpValue.restype = None
library.LLVMGetOperand.argtypes = [Value, c_uint]
library.LLVMGetOperand.restype = c_object_p
library.LLVMSetOperand.argtypes = [Value, Value, c_uint]
library.LLVMSetOperand.restype = None
library.LLVMGetNumOperands.argtypes = [Value]
library.LLVMGetNumOperands.restype = c_uint
# Basic Block Declarations.
library.LLVMGetFirstBasicBlock.argtypes = [Function]
library.LLVMGetFirstBasicBlock.restype = c_object_p
library.LLVMGetLastBasicBlock.argtypes = [Function]
library.LLVMGetLastBasicBlock.restype = c_object_p
library.LLVMGetNextBasicBlock.argtypes = [BasicBlock]
library.LLVMGetNextBasicBlock.restype = c_object_p
library.LLVMGetPreviousBasicBlock.argtypes = [BasicBlock]
library.LLVMGetPreviousBasicBlock.restype = c_object_p
library.LLVMGetFirstInstruction.argtypes = [BasicBlock]
library.LLVMGetFirstInstruction.restype = c_object_p
library.LLVMGetLastInstruction.argtypes = [BasicBlock]
library.LLVMGetLastInstruction.restype = c_object_p
library.LLVMBasicBlockAsValue.argtypes = [BasicBlock]
library.LLVMBasicBlockAsValue.restype = c_object_p
library.LLVMCountBasicBlocks.argtypes = [Function]
library.LLVMCountBasicBlocks.restype = c_uint
# Instruction Declarations.
library.LLVMGetNextInstruction.argtypes = [Instruction]
library.LLVMGetNextInstruction.restype = c_object_p
library.LLVMGetPreviousInstruction.argtypes = [Instruction]
library.LLVMGetPreviousInstruction.restype = c_object_p
library.LLVMGetInstructionOpcode.argtypes = [Instruction]
library.LLVMGetInstructionOpcode.restype = c_uint
def register_enumerations():
if Enums:
return None
enums = [
(Attribute, enumerations.Attributes),
(OpCode, enumerations.OpCodes),
(TypeKind, enumerations.TypeKinds),
(Linkage, enumerations.Linkages),
(Visibility, enumerations.Visibility),
(CallConv, enumerations.CallConv),
(IntPredicate, enumerations.IntPredicate),
(RealPredicate, enumerations.RealPredicate),
(LandingPadClauseTy, enumerations.LandingPadClauseTy),
]
for enum_class, enum_spec in enums:
for name, value in enum_spec:
print name, value
enum_class.register(name, value)
return enums
def initialize_llvm():
Context.GetGlobalContext()
p = PassRegistry()
lib.LLVMInitializeCore(p)
lib.LLVMInitializeTransformUtils(p)
lib.LLVMInitializeScalarOpts(p)
lib.LLVMInitializeObjCARCOpts(p)
lib.LLVMInitializeVectorization(p)
lib.LLVMInitializeInstCombine(p)
lib.LLVMInitializeIPO(p)
lib.LLVMInitializeInstrumentation(p)
lib.LLVMInitializeAnalysis(p)
lib.LLVMInitializeCodeGen(p)
lib.LLVMInitializeTarget(p)
register_library(lib)
Enums = register_enumerations()
initialize_llvm()
| 18,523 | 28.54386 | 84 | py |
LowFat | LowFat-master/llvm-4.0.0.src/bindings/python/llvm/object.py | #===- object.py - Python Object Bindings --------------------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
r"""
Object File Interface
=====================
This module provides an interface for reading information from object files
(e.g. binary executables and libraries).
Using this module, you can obtain information about an object file's sections,
symbols, and relocations. These are represented by the classes ObjectFile,
Section, Symbol, and Relocation, respectively.
Usage
-----
The only way to use this module is to start by creating an ObjectFile. You can
create an ObjectFile by loading a file (specified by its path) or by creating a
llvm.core.MemoryBuffer and loading that.
Once you have an object file, you can inspect its sections and symbols directly
by calling get_sections() and get_symbols() respectively. To inspect
relocations, call get_relocations() on a Section instance.
Iterator Interface
------------------
The LLVM bindings expose iteration over sections, symbols, and relocations in a
way that only allows one instance to be operated on at a single time. This is
slightly annoying from a Python perspective, as it isn't very Pythonic to have
objects that "expire" but are still active from a dynamic language.
To aid working around this limitation, each Section, Symbol, and Relocation
instance caches its properties after first access. So, if the underlying
iterator is advanced, the properties can still be obtained provided they have
already been retrieved.
In addition, we also provide a "cache" method on each class to cache all
available data. You can call this on each obtained instance. Or, you can pass
cache=True to the appropriate get_XXX() method to have this done for you.
Here are some examples on how to perform iteration:
obj = ObjectFile(filename='/bin/ls')
# This is OK. Each Section is only accessed inside its own iteration slot.
section_names = []
for section in obj.get_sections():
section_names.append(section.name)
# This is NOT OK. You perform a lookup after the object has expired.
symbols = list(obj.get_symbols())
for symbol in symbols:
print symbol.name # This raises because the object has expired.
# In this example, we mix a working and failing scenario.
symbols = []
for symbol in obj.get_symbols():
symbols.append(symbol)
print symbol.name
for symbol in symbols:
print symbol.name # OK
print symbol.address # NOT OK. We didn't look up this property before.
# Cache everything up front.
symbols = list(obj.get_symbols(cache=True))
for symbol in symbols:
print symbol.name # OK
"""
from ctypes import c_char_p
from ctypes import c_char
from ctypes import POINTER
from ctypes import c_uint64
from ctypes import string_at
from .common import CachedProperty
from .common import LLVMObject
from .common import c_object_p
from .common import get_library
from .core import MemoryBuffer
__all__ = [
"lib",
"ObjectFile",
"Relocation",
"Section",
"Symbol",
]
class ObjectFile(LLVMObject):
"""Represents an object/binary file."""
def __init__(self, filename=None, contents=None):
"""Construct an instance from a filename or binary data.
filename must be a path to a file that can be opened with open().
contents can be either a native Python buffer type (like str) or a
llvm.core.MemoryBuffer instance.
"""
if contents:
assert isinstance(contents, MemoryBuffer)
if filename is not None:
contents = MemoryBuffer(filename=filename)
if contents is None:
raise Exception('No input found.')
ptr = lib.LLVMCreateObjectFile(contents)
LLVMObject.__init__(self, ptr, disposer=lib.LLVMDisposeObjectFile)
self.take_ownership(contents)
def get_sections(self, cache=False):
"""Obtain the sections in this object file.
This is a generator for llvm.object.Section instances.
Sections are exposed as limited-use objects. See the module's
documentation on iterators for more.
"""
sections = lib.LLVMGetSections(self)
last = None
while True:
if lib.LLVMIsSectionIteratorAtEnd(self, sections):
break
last = Section(sections)
if cache:
last.cache()
yield last
lib.LLVMMoveToNextSection(sections)
last.expire()
if last is not None:
last.expire()
lib.LLVMDisposeSectionIterator(sections)
def get_symbols(self, cache=False):
"""Obtain the symbols in this object file.
This is a generator for llvm.object.Symbol instances.
Each Symbol instance is a limited-use object. See this module's
documentation on iterators for more.
"""
symbols = lib.LLVMGetSymbols(self)
last = None
while True:
if lib.LLVMIsSymbolIteratorAtEnd(self, symbols):
break
last = Symbol(symbols, self)
if cache:
last.cache()
yield last
lib.LLVMMoveToNextSymbol(symbols)
last.expire()
if last is not None:
last.expire()
lib.LLVMDisposeSymbolIterator(symbols)
class Section(LLVMObject):
"""Represents a section in an object file."""
def __init__(self, ptr):
"""Construct a new section instance.
Section instances can currently only be created from an ObjectFile
instance. Therefore, this constructor should not be used outside of
this module.
"""
LLVMObject.__init__(self, ptr)
self.expired = False
@CachedProperty
def name(self):
"""Obtain the string name of the section.
This is typically something like '.dynsym' or '.rodata'.
"""
if self.expired:
raise Exception('Section instance has expired.')
return lib.LLVMGetSectionName(self)
@CachedProperty
def size(self):
"""The size of the section, in long bytes."""
if self.expired:
raise Exception('Section instance has expired.')
return lib.LLVMGetSectionSize(self)
@CachedProperty
def contents(self):
if self.expired:
raise Exception('Section instance has expired.')
siz = self.size
r = lib.LLVMGetSectionContents(self)
if r:
return string_at(r, siz)
return None
@CachedProperty
def address(self):
"""The address of this section, in long bytes."""
if self.expired:
raise Exception('Section instance has expired.')
return lib.LLVMGetSectionAddress(self)
def has_symbol(self, symbol):
"""Returns whether a Symbol instance is present in this Section."""
if self.expired:
raise Exception('Section instance has expired.')
assert isinstance(symbol, Symbol)
return lib.LLVMGetSectionContainsSymbol(self, symbol)
def get_relocations(self, cache=False):
"""Obtain the relocations in this Section.
This is a generator for llvm.object.Relocation instances.
Each instance is a limited used object. See this module's documentation
on iterators for more.
"""
if self.expired:
raise Exception('Section instance has expired.')
relocations = lib.LLVMGetRelocations(self)
last = None
while True:
if lib.LLVMIsRelocationIteratorAtEnd(self, relocations):
break
last = Relocation(relocations)
if cache:
last.cache()
yield last
lib.LLVMMoveToNextRelocation(relocations)
last.expire()
if last is not None:
last.expire()
lib.LLVMDisposeRelocationIterator(relocations)
def cache(self):
"""Cache properties of this Section.
This can be called as a workaround to the single active Section
limitation. When called, the properties of the Section are fetched so
they are still available after the Section has been marked inactive.
"""
getattr(self, 'name')
getattr(self, 'size')
getattr(self, 'contents')
getattr(self, 'address')
def expire(self):
"""Expire the section.
This is called internally by the section iterator.
"""
self.expired = True
class Symbol(LLVMObject):
"""Represents a symbol in an object file."""
def __init__(self, ptr, object_file):
assert isinstance(ptr, c_object_p)
assert isinstance(object_file, ObjectFile)
LLVMObject.__init__(self, ptr)
self.expired = False
self._object_file = object_file
@CachedProperty
def name(self):
"""The str name of the symbol.
This is often a function or variable name. Keep in mind that name
mangling could be in effect.
"""
if self.expired:
raise Exception('Symbol instance has expired.')
return lib.LLVMGetSymbolName(self)
@CachedProperty
def address(self):
"""The address of this symbol, in long bytes."""
if self.expired:
raise Exception('Symbol instance has expired.')
return lib.LLVMGetSymbolAddress(self)
@CachedProperty
def size(self):
"""The size of the symbol, in long bytes."""
if self.expired:
raise Exception('Symbol instance has expired.')
return lib.LLVMGetSymbolSize(self)
@CachedProperty
def section(self):
"""The Section to which this Symbol belongs.
The returned Section instance does not expire, unlike Sections that are
commonly obtained through iteration.
Because this obtains a new section iterator each time it is accessed,
calling this on a number of Symbol instances could be expensive.
"""
sections = lib.LLVMGetSections(self._object_file)
lib.LLVMMoveToContainingSection(sections, self)
return Section(sections)
def cache(self):
"""Cache all cacheable properties."""
getattr(self, 'name')
getattr(self, 'address')
getattr(self, 'size')
def expire(self):
"""Mark the object as expired to prevent future API accesses.
This is called internally by this module and it is unlikely that
external callers have a legitimate reason for using it.
"""
self.expired = True
class Relocation(LLVMObject):
"""Represents a relocation definition."""
def __init__(self, ptr):
"""Create a new relocation instance.
Relocations are created from objects derived from Section instances.
Therefore, this constructor should not be called outside of this
module. See Section.get_relocations() for the proper method to obtain
a Relocation instance.
"""
assert isinstance(ptr, c_object_p)
LLVMObject.__init__(self, ptr)
self.expired = False
@CachedProperty
def offset(self):
"""The offset of this relocation, in long bytes."""
if self.expired:
raise Exception('Relocation instance has expired.')
return lib.LLVMGetRelocationOffset(self)
@CachedProperty
def symbol(self):
"""The Symbol corresponding to this Relocation."""
if self.expired:
raise Exception('Relocation instance has expired.')
ptr = lib.LLVMGetRelocationSymbol(self)
return Symbol(ptr)
@CachedProperty
def type_number(self):
"""The relocation type, as a long."""
if self.expired:
raise Exception('Relocation instance has expired.')
return lib.LLVMGetRelocationType(self)
@CachedProperty
def type_name(self):
"""The relocation type's name, as a str."""
if self.expired:
raise Exception('Relocation instance has expired.')
return lib.LLVMGetRelocationTypeName(self)
@CachedProperty
def value_string(self):
if self.expired:
raise Exception('Relocation instance has expired.')
return lib.LLVMGetRelocationValueString(self)
def expire(self):
"""Expire this instance, making future API accesses fail."""
self.expired = True
def cache(self):
"""Cache all cacheable properties on this instance."""
getattr(self, 'address')
getattr(self, 'offset')
getattr(self, 'symbol')
getattr(self, 'type')
getattr(self, 'type_name')
getattr(self, 'value_string')
def register_library(library):
"""Register function prototypes with LLVM library instance."""
# Object.h functions
library.LLVMCreateObjectFile.argtypes = [MemoryBuffer]
library.LLVMCreateObjectFile.restype = c_object_p
library.LLVMDisposeObjectFile.argtypes = [ObjectFile]
library.LLVMGetSections.argtypes = [ObjectFile]
library.LLVMGetSections.restype = c_object_p
library.LLVMDisposeSectionIterator.argtypes = [c_object_p]
library.LLVMIsSectionIteratorAtEnd.argtypes = [ObjectFile, c_object_p]
library.LLVMIsSectionIteratorAtEnd.restype = bool
library.LLVMMoveToNextSection.argtypes = [c_object_p]
library.LLVMMoveToContainingSection.argtypes = [c_object_p, c_object_p]
library.LLVMGetSymbols.argtypes = [ObjectFile]
library.LLVMGetSymbols.restype = c_object_p
library.LLVMDisposeSymbolIterator.argtypes = [c_object_p]
library.LLVMIsSymbolIteratorAtEnd.argtypes = [ObjectFile, c_object_p]
library.LLVMIsSymbolIteratorAtEnd.restype = bool
library.LLVMMoveToNextSymbol.argtypes = [c_object_p]
library.LLVMGetSectionName.argtypes = [c_object_p]
library.LLVMGetSectionName.restype = c_char_p
library.LLVMGetSectionSize.argtypes = [c_object_p]
library.LLVMGetSectionSize.restype = c_uint64
library.LLVMGetSectionContents.argtypes = [c_object_p]
# Can't use c_char_p here as it isn't a NUL-terminated string.
library.LLVMGetSectionContents.restype = POINTER(c_char)
library.LLVMGetSectionAddress.argtypes = [c_object_p]
library.LLVMGetSectionAddress.restype = c_uint64
library.LLVMGetSectionContainsSymbol.argtypes = [c_object_p, c_object_p]
library.LLVMGetSectionContainsSymbol.restype = bool
library.LLVMGetRelocations.argtypes = [c_object_p]
library.LLVMGetRelocations.restype = c_object_p
library.LLVMDisposeRelocationIterator.argtypes = [c_object_p]
library.LLVMIsRelocationIteratorAtEnd.argtypes = [c_object_p, c_object_p]
library.LLVMIsRelocationIteratorAtEnd.restype = bool
library.LLVMMoveToNextRelocation.argtypes = [c_object_p]
library.LLVMGetSymbolName.argtypes = [Symbol]
library.LLVMGetSymbolName.restype = c_char_p
library.LLVMGetSymbolAddress.argtypes = [Symbol]
library.LLVMGetSymbolAddress.restype = c_uint64
library.LLVMGetSymbolSize.argtypes = [Symbol]
library.LLVMGetSymbolSize.restype = c_uint64
library.LLVMGetRelocationOffset.argtypes = [c_object_p]
library.LLVMGetRelocationOffset.restype = c_uint64
library.LLVMGetRelocationSymbol.argtypes = [c_object_p]
library.LLVMGetRelocationSymbol.restype = c_object_p
library.LLVMGetRelocationType.argtypes = [c_object_p]
library.LLVMGetRelocationType.restype = c_uint64
library.LLVMGetRelocationTypeName.argtypes = [c_object_p]
library.LLVMGetRelocationTypeName.restype = c_char_p
library.LLVMGetRelocationValueString.argtypes = [c_object_p]
library.LLVMGetRelocationValueString.restype = c_char_p
lib = get_library()
register_library(lib)
| 16,015 | 30.403922 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/bindings/python/llvm/bit_reader.py |
from .common import LLVMObject
from .common import c_object_p
from .common import get_library
from . import enumerations
from .core import MemoryBuffer
from .core import Module
from .core import OpCode
from ctypes import POINTER
from ctypes import byref
from ctypes import c_char_p
from ctypes import cast
__all__ = ['parse_bitcode']
lib = get_library()
def parse_bitcode(mem_buffer):
"""Input is .core.MemoryBuffer"""
module = c_object_p()
result = lib.LLVMParseBitcode2(mem_buffer, byref(module))
if result:
raise RuntimeError('LLVM Error')
m = Module(module)
m.take_ownership(mem_buffer)
return m
def register_library(library):
library.LLVMParseBitcode2.argtypes = [MemoryBuffer, POINTER(c_object_p)]
library.LLVMParseBitcode2.restype = bool
register_library(lib)
| 815 | 25.322581 | 76 | py |
LowFat | LowFat-master/llvm-4.0.0.src/bindings/python/llvm/common.py | #===- common.py - Python LLVM Bindings -----------------------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
from ctypes import POINTER
from ctypes import c_void_p
from ctypes import cdll
import ctypes.util
import platform
# LLVM_VERSION: sync with PACKAGE_VERSION in autoconf/configure.ac and CMakeLists.txt
# but leave out the 'svn' suffix.
LLVM_VERSION = '3.4'
__all__ = [
'c_object_p',
'get_library',
]
c_object_p = POINTER(c_void_p)
class LLVMObject(object):
"""Base class for objects that are backed by an LLVM data structure.
This class should never be instantiated outside of this package.
"""
def __init__(self, ptr, ownable=True, disposer=None):
assert isinstance(ptr, c_object_p)
self._ptr = self._as_parameter_ = ptr
self._self_owned = True
self._ownable = ownable
self._disposer = disposer
self._owned_objects = []
def take_ownership(self, obj):
"""Take ownership of another object.
When you take ownership of another object, you are responsible for
destroying that object. In addition, a reference to that object is
placed inside this object so the Python garbage collector will not
collect the object while it is still alive in libLLVM.
This method should likely only be called from within modules inside
this package.
"""
assert isinstance(obj, LLVMObject)
self._owned_objects.append(obj)
obj._self_owned = False
def from_param(self):
"""ctypes function that converts this object to a function parameter."""
return self._as_parameter_
def __del__(self):
if not hasattr(self, '_self_owned') or not hasattr(self, '_disposer'):
return
if self._self_owned and self._disposer:
self._disposer(self)
class CachedProperty(object):
"""Decorator that caches the result of a property lookup.
This is a useful replacement for @property. It is recommended to use this
decorator on properties that invoke C API calls for which the result of the
call will be idempotent.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
try:
self.__doc__ = wrapped.__doc__
except: # pragma: no cover
pass
def __get__(self, instance, instance_type=None):
if instance is None:
return self
value = self.wrapped(instance)
setattr(instance, self.wrapped.__name__, value)
return value
def get_library():
"""Obtain a reference to the llvm library."""
# On Linux, ctypes.cdll.LoadLibrary() respects LD_LIBRARY_PATH
# while ctypes.util.find_library() doesn't.
# See http://docs.python.org/2/library/ctypes.html#finding-shared-libraries
#
# To make it possible to run the unit tests without installing the LLVM shared
# library into a default linker search path. Always Try ctypes.cdll.LoadLibrary()
# with all possible library names first, then try ctypes.util.find_library().
names = ['LLVM-' + LLVM_VERSION, 'LLVM-' + LLVM_VERSION + 'svn']
t = platform.system()
if t == 'Darwin':
pfx, ext = 'lib', '.dylib'
elif t == 'Windows':
pfx, ext = '', '.dll'
else:
pfx, ext = 'lib', '.so'
for i in names:
try:
lib = cdll.LoadLibrary(pfx + i + ext)
except OSError:
pass
else:
return lib
for i in names:
t = ctypes.util.find_library(i)
if t:
return cdll.LoadLibrary(t)
raise Exception('LLVM shared library not found!')
| 3,884 | 29.590551 | 86 | py |
LowFat | LowFat-master/llvm-4.0.0.src/bindings/python/llvm/__init__.py | 0 | 0 | 0 | py |
|
LowFat | LowFat-master/llvm-4.0.0.src/bindings/python/llvm/disassembler.py | #===- disassembler.py - Python LLVM Bindings -----------------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
from ctypes import CFUNCTYPE
from ctypes import POINTER
from ctypes import addressof
from ctypes import c_byte
from ctypes import c_char_p
from ctypes import c_int
from ctypes import c_size_t
from ctypes import c_ubyte
from ctypes import c_uint64
from ctypes import c_void_p
from ctypes import cast
from .common import LLVMObject
from .common import c_object_p
from .common import get_library
__all__ = [
'Disassembler',
]
lib = get_library()
callbacks = {}
# Constants for set_options
Option_UseMarkup = 1
_initialized = False
_targets = ['AArch64', 'ARM', 'Hexagon', 'MSP430', 'Mips', 'NVPTX', 'PowerPC', 'R600', 'Sparc', 'SystemZ', 'X86', 'XCore']
def _ensure_initialized():
global _initialized
if not _initialized:
# Here one would want to call the functions
# LLVMInitializeAll{TargetInfo,TargetMC,Disassembler}s, but
# unfortunately they are only defined as static inline
# functions in the header files of llvm-c, so they don't exist
# as symbols in the shared library.
# So until that is fixed use this hack to initialize them all
for tgt in _targets:
for initializer in ("TargetInfo", "TargetMC", "Disassembler"):
try:
f = getattr(lib, "LLVMInitialize" + tgt + initializer)
except AttributeError:
continue
f()
_initialized = True
class Disassembler(LLVMObject):
"""Represents a disassembler instance.
Disassembler instances are tied to specific "triple," which must be defined
at creation time.
Disassembler instances can disassemble instructions from multiple sources.
"""
def __init__(self, triple):
"""Create a new disassembler instance.
The triple argument is the triple to create the disassembler for. This
is something like 'i386-apple-darwin9'.
"""
_ensure_initialized()
ptr = lib.LLVMCreateDisasm(c_char_p(triple), c_void_p(None), c_int(0),
callbacks['op_info'](0), callbacks['symbol_lookup'](0))
if not ptr:
raise Exception('Could not obtain disassembler for triple: %s' %
triple)
LLVMObject.__init__(self, ptr, disposer=lib.LLVMDisasmDispose)
def get_instruction(self, source, pc=0):
"""Obtain the next instruction from an input source.
The input source should be a str or bytearray or something that
represents a sequence of bytes.
This function will start reading bytes from the beginning of the
source.
The pc argument specifies the address that the first byte is at.
This returns a 2-tuple of:
long number of bytes read. 0 if no instruction was read.
str representation of instruction. This will be the assembly that
represents the instruction.
"""
buf = cast(c_char_p(source), POINTER(c_ubyte))
out_str = cast((c_byte * 255)(), c_char_p)
result = lib.LLVMDisasmInstruction(self, buf, c_uint64(len(source)),
c_uint64(pc), out_str, 255)
return (result, out_str.value)
def get_instructions(self, source, pc=0):
"""Obtain multiple instructions from an input source.
This is like get_instruction() except it is a generator for all
instructions within the source. It starts at the beginning of the
source and reads instructions until no more can be read.
This generator returns 3-tuple of:
long address of instruction.
long size of instruction, in bytes.
str representation of instruction.
"""
source_bytes = c_char_p(source)
out_str = cast((c_byte * 255)(), c_char_p)
# This could probably be written cleaner. But, it does work.
buf = cast(source_bytes, POINTER(c_ubyte * len(source))).contents
offset = 0
address = pc
end_address = pc + len(source)
while address < end_address:
b = cast(addressof(buf) + offset, POINTER(c_ubyte))
result = lib.LLVMDisasmInstruction(self, b,
c_uint64(len(source) - offset), c_uint64(address),
out_str, 255)
if result == 0:
break
yield (address, result, out_str.value)
address += result
offset += result
def set_options(self, options):
if not lib.LLVMSetDisasmOptions(self, options):
raise Exception('Unable to set all disassembler options in %i' % options)
def register_library(library):
library.LLVMCreateDisasm.argtypes = [c_char_p, c_void_p, c_int,
callbacks['op_info'], callbacks['symbol_lookup']]
library.LLVMCreateDisasm.restype = c_object_p
library.LLVMDisasmDispose.argtypes = [Disassembler]
library.LLVMDisasmInstruction.argtypes = [Disassembler, POINTER(c_ubyte),
c_uint64, c_uint64, c_char_p, c_size_t]
library.LLVMDisasmInstruction.restype = c_size_t
library.LLVMSetDisasmOptions.argtypes = [Disassembler, c_uint64]
library.LLVMSetDisasmOptions.restype = c_int
callbacks['op_info'] = CFUNCTYPE(c_int, c_void_p, c_uint64, c_uint64, c_uint64,
c_int, c_void_p)
callbacks['symbol_lookup'] = CFUNCTYPE(c_char_p, c_void_p, c_uint64,
POINTER(c_uint64), c_uint64,
POINTER(c_char_p))
register_library(lib)
| 5,889 | 33.444444 | 122 | py |
LowFat | LowFat-master/llvm-4.0.0.src/bindings/python/llvm/tests/base.py | import os.path
import unittest
POSSIBLE_TEST_BINARIES = [
'libreadline.so.5',
'libreadline.so.6',
]
POSSIBLE_TEST_BINARY_PATHS = [
'/usr/lib/debug',
'/lib',
'/usr/lib',
'/usr/local/lib',
'/lib/i386-linux-gnu',
]
class TestBase(unittest.TestCase):
def get_test_binary(self):
"""Helper to obtain a test binary for object file testing.
FIXME Support additional, highly-likely targets or create one
ourselves.
"""
for d in POSSIBLE_TEST_BINARY_PATHS:
for lib in POSSIBLE_TEST_BINARIES:
path = os.path.join(d, lib)
if os.path.exists(path):
return path
raise Exception('No suitable test binaries available!')
get_test_binary.__test__ = False
def get_test_file(self):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_file")
def get_test_bc(self):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "test.bc")
| 1,016 | 25.076923 | 84 | py |
LowFat | LowFat-master/llvm-4.0.0.src/bindings/python/llvm/tests/test_core.py | from .base import TestBase
from ..core import MemoryBuffer
from ..core import PassRegistry
from ..core import Context
from ..core import Module
from ..core import Enums
from ..core import OpCode
from ..bit_reader import parse_bitcode
class TestCore(TestBase):
def test_enumerations(self):
for enum_cls, enum_spec in Enums:
for enum_name, enum_value in enum_spec:
# First make sure that enum_cls has the name of the enum as an
# attribute. People will access these values as
# EnumCls.EnumName.
self.assertTrue(hasattr(enum_cls, enum_name))
v_attr = getattr(enum_cls, enum_name)
self.assertTrue(isinstance(v_attr, enum_cls))
# Then make sure that the value returned for this attribute is
# correct in both ways.
self.assertEqual(v_attr.value, enum_value)
e = enum_cls.from_value(enum_value)
self.assertTrue(isinstance(e, enum_cls))
self.assertEqual(e, v_attr)
def test_memory_buffer_create_from_file(self):
source = self.get_test_file()
MemoryBuffer(filename=source)
def test_memory_buffer_failing(self):
with self.assertRaises(Exception):
MemoryBuffer(filename="/hopefully/this/path/doesnt/exist")
def test_memory_buffer_len(self):
source = self.get_test_file()
m = MemoryBuffer(filename=source)
self.assertEqual(len(m), 50)
def test_create_passregistry(self):
PassRegistry()
def test_create_context(self):
Context.GetGlobalContext()
def test_create_module_with_name(self):
# Make sure we can not create a module without a LLVMModuleRef.
with self.assertRaises(TypeError):
m = Module()
m = Module.CreateWithName("test-module")
def test_module_getset_datalayout(self):
m = Module.CreateWithName("test-module")
dl = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
m.datalayout = dl
self.assertEqual(m.datalayout, dl)
def test_module_getset_target(self):
m = Module.CreateWithName("test-module")
target = "thumbv7-apple-ios5.0.0"
m.target = target
self.assertEqual(m.target, target)
def test_module_print_module_to_file(self):
m = Module.CreateWithName("test")
dl = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
m.datalayout = dl
target = "thumbv7-apple-ios5.0.0"
m.target = target
m.print_module_to_file("test2.ll")
def test_module_function_iteration(self):
m = parse_bitcode(MemoryBuffer(filename=self.get_test_bc()))
i = 0
functions = ["f", "f2", "f3", "f4", "f5", "f6", "g1", "g2", "h1", "h2",
"h3"]
# Forward
for f in m:
self.assertEqual(f.name, functions[i])
f.dump()
i += 1
# Backwards
for f in reversed(m):
i -= 1
self.assertEqual(f.name, functions[i])
f.dump()
def test_function_basicblock_iteration(self):
m = parse_bitcode(MemoryBuffer(filename=self.get_test_bc()))
i = 0
bb_list = ['b1', 'b2', 'end']
f = m.first
while f.name != "f6":
f = f.next
# Forward
for bb in f:
self.assertEqual(bb.name, bb_list[i])
bb.dump()
i += 1
# Backwards
for bb in reversed(f):
i -= 1
self.assertEqual(bb.name, bb_list[i])
bb.dump()
def test_basicblock_instruction_iteration(self):
m = parse_bitcode(MemoryBuffer(filename=self.get_test_bc()))
i = 0
inst_list = [('arg1', OpCode.ExtractValue),
('arg2', OpCode.ExtractValue),
('', OpCode.Call),
('', OpCode.Ret)]
bb = m.first.first
# Forward
for inst in bb:
self.assertEqual(inst.name, inst_list[i][0])
self.assertEqual(inst.opcode, inst_list[i][1])
for op in range(len(inst)):
o = inst.get_operand(op)
print o.name
o.dump()
inst.dump()
i += 1
# Backwards
for inst in reversed(bb):
i -= 1
self.assertEqual(inst.name, inst_list[i][0])
self.assertEqual(inst.opcode, inst_list[i][1])
inst.dump()
| 4,675 | 32.163121 | 131 | py |
LowFat | LowFat-master/llvm-4.0.0.src/bindings/python/llvm/tests/test_object.py | from .base import TestBase
from ..object import ObjectFile
from ..object import Relocation
from ..object import Section
from ..object import Symbol
class TestObjectFile(TestBase):
def get_object_file(self):
source = self.get_test_binary()
return ObjectFile(filename=source)
def test_create_from_file(self):
self.get_object_file()
def test_get_sections(self):
o = self.get_object_file()
count = 0
for section in o.get_sections():
count += 1
assert isinstance(section, Section)
assert isinstance(section.name, str)
assert isinstance(section.size, long)
assert isinstance(section.contents, str)
assert isinstance(section.address, long)
assert len(section.contents) == section.size
self.assertGreater(count, 0)
for section in o.get_sections():
section.cache()
def test_get_symbols(self):
o = self.get_object_file()
count = 0
for symbol in o.get_symbols():
count += 1
assert isinstance(symbol, Symbol)
assert isinstance(symbol.name, str)
assert isinstance(symbol.address, long)
assert isinstance(symbol.size, long)
self.assertGreater(count, 0)
for symbol in o.get_symbols():
symbol.cache()
def test_symbol_section_accessor(self):
o = self.get_object_file()
for symbol in o.get_symbols():
section = symbol.section
assert isinstance(section, Section)
break
def test_get_relocations(self):
o = self.get_object_file()
for section in o.get_sections():
for relocation in section.get_relocations():
assert isinstance(relocation, Relocation)
assert isinstance(relocation.address, long)
assert isinstance(relocation.offset, long)
assert isinstance(relocation.type_number, long)
assert isinstance(relocation.type_name, str)
assert isinstance(relocation.value_string, str)
| 2,143 | 30.529412 | 63 | py |
LowFat | LowFat-master/llvm-4.0.0.src/bindings/python/llvm/tests/test_disassembler.py | from .base import TestBase
from ..disassembler import Disassembler, Option_UseMarkup
class TestDisassembler(TestBase):
def test_instantiate(self):
Disassembler('i686-apple-darwin9')
def test_basic(self):
sequence = '\x67\xe3\x81' # jcxz -127
triple = 'i686-apple-darwin9'
disassembler = Disassembler(triple)
count, s = disassembler.get_instruction(sequence)
self.assertEqual(count, 3)
self.assertEqual(s, '\tjcxz\t-127')
def test_nonexistent_triple(self):
with self.assertRaisesRegexp(Exception, "Could not obtain disassembler for triple"):
Disassembler("nonexistent-triple-raises")
def test_get_instructions(self):
sequence = '\x67\xe3\x81\x01\xc7' # jcxz -127; addl %eax, %edi
disassembler = Disassembler('i686-apple-darwin9')
instructions = list(disassembler.get_instructions(sequence))
self.assertEqual(len(instructions), 2)
self.assertEqual(instructions[0], (0, 3, '\tjcxz\t-127'))
self.assertEqual(instructions[1], (3, 2, '\taddl\t%eax, %edi'))
def test_set_options(self):
sequence = '\x10\x40\x2d\xe9'
triple = 'arm-linux-android'
disassembler = Disassembler(triple)
disassembler.set_options(Option_UseMarkup)
count, s = disassembler.get_instruction(sequence)
print s
self.assertEqual(count, 4)
self.assertEqual(s, '\tpush\t{<reg:r4>, <reg:lr>}')
| 1,475 | 32.545455 | 92 | py |
LowFat | LowFat-master/llvm-4.0.0.src/bindings/python/llvm/tests/test_bitreader.py | from .base import TestBase
from ..core import OpCode
from ..core import MemoryBuffer
from ..core import PassRegistry
from ..core import Context
from ..core import Module
from ..bit_reader import parse_bitcode
class TestBitReader(TestBase):
def test_parse_bitcode(self):
source = self.get_test_bc()
m = parse_bitcode(MemoryBuffer(filename=source))
print m.target
print m.datalayout
| 419 | 25.25 | 56 | py |
LowFat | LowFat-master/llvm-4.0.0.src/bindings/python/llvm/tests/__init__.py | 0 | 0 | 0 | py |
|
LowFat | LowFat-master/llvm-4.0.0.src/examples/Kaleidoscope/MCJIT/complete/split-lib.py | #!/usr/bin/env python
class TimingScriptGenerator:
"""Used to generate a bash script which will invoke the toy and time it"""
def __init__(self, scriptname, outputname):
self.shfile = open(scriptname, 'w')
self.timeFile = outputname
self.shfile.write("echo \"\" > %s\n" % self.timeFile)
def writeTimingCall(self, irname, callname):
"""Echo some comments and invoke both versions of toy"""
rootname = irname
if '.' in irname:
rootname = irname[:irname.rfind('.')]
self.shfile.write("echo \"%s: Calls %s\" >> %s\n" % (callname, irname, self.timeFile))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"With MCJIT\" >> %s\n" % self.timeFile)
self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
self.shfile.write(" -o %s -a " % self.timeFile)
self.shfile.write("./toy -suppress-prompts -use-mcjit=true -enable-lazy-compilation=true -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (irname, callname, rootname, rootname))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"With MCJIT again\" >> %s\n" % self.timeFile)
self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
self.shfile.write(" -o %s -a " % self.timeFile)
self.shfile.write("./toy -suppress-prompts -use-mcjit=true -enable-lazy-compilation=true -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (irname, callname, rootname, rootname))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"With JIT\" >> %s\n" % self.timeFile)
self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
self.shfile.write(" -o %s -a " % self.timeFile)
self.shfile.write("./toy -suppress-prompts -use-mcjit=false -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (irname, callname, rootname, rootname))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
class LibScriptGenerator:
"""Used to generate a bash script which will invoke the toy and time it"""
def __init__(self, filename):
self.shfile = open(filename, 'w')
def writeLibGenCall(self, libname, irname):
self.shfile.write("./toy -suppress-prompts -use-mcjit=false -dump-modules < %s 2> %s\n" % (libname, irname))
def splitScript(inputname, libGenScript, timingScript):
rootname = inputname[:-2]
libname = rootname + "-lib.k"
irname = rootname + "-lib.ir"
callname = rootname + "-call.k"
infile = open(inputname, "r")
libfile = open(libname, "w")
callfile = open(callname, "w")
print "Splitting %s into %s and %s" % (inputname, callname, libname)
for line in infile:
if not line.startswith("#"):
if line.startswith("print"):
callfile.write(line)
else:
libfile.write(line)
libGenScript.writeLibGenCall(libname, irname)
timingScript.writeTimingCall(irname, callname)
# Execution begins here
libGenScript = LibScriptGenerator("make-libs.sh")
timingScript = TimingScriptGenerator("time-lib.sh", "lib-timing.txt")
script_list = ["test-5000-3-50-50.k", "test-5000-10-100-10.k", "test-5000-10-5-10.k", "test-5000-10-1-0.k",
"test-1000-3-10-50.k", "test-1000-10-100-10.k", "test-1000-10-5-10.k", "test-1000-10-1-0.k",
"test-200-3-2-50.k", "test-200-10-40-10.k", "test-200-10-2-10.k", "test-200-10-1-0.k"]
for script in script_list:
splitScript(script, libGenScript, timingScript)
print "All done!"
| 3,805 | 52.605634 | 208 | py |
LowFat | LowFat-master/llvm-4.0.0.src/examples/Kaleidoscope/MCJIT/complete/genk-timing.py | #!/usr/bin/env python
import sys
import random
class TimingScriptGenerator:
"""Used to generate a bash script which will invoke the toy and time it"""
def __init__(self, scriptname, outputname):
self.timeFile = outputname
self.shfile = open(scriptname, 'w')
self.shfile.write("echo \"\" > %s\n" % self.timeFile)
def writeTimingCall(self, filename, numFuncs, funcsCalled, totalCalls):
"""Echo some comments and invoke both versions of toy"""
rootname = filename
if '.' in filename:
rootname = filename[:filename.rfind('.')]
self.shfile.write("echo \"%s: Calls %d of %d functions, %d total\" >> %s\n" % (filename, funcsCalled, numFuncs, totalCalls, self.timeFile))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"With MCJIT (original)\" >> %s\n" % self.timeFile)
self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
self.shfile.write(" -o %s -a " % self.timeFile)
self.shfile.write("./toy -suppress-prompts -use-mcjit=true -enable-lazy-compilation=false < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (filename, rootname, rootname))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"With MCJIT (lazy)\" >> %s\n" % self.timeFile)
self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
self.shfile.write(" -o %s -a " % self.timeFile)
self.shfile.write("./toy -suppress-prompts -use-mcjit=true -enable-lazy-compilation=true < %s > %s-mcjit-lazy.out 2> %s-mcjit-lazy.err\n" % (filename, rootname, rootname))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"With JIT\" >> %s\n" % self.timeFile)
self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
self.shfile.write(" -o %s -a " % self.timeFile)
self.shfile.write("./toy -suppress-prompts -use-mcjit=false < %s > %s-jit.out 2> %s-jit.err\n" % (filename, rootname, rootname))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
class KScriptGenerator:
"""Used to generate random Kaleidoscope code"""
def __init__(self, filename):
self.kfile = open(filename, 'w')
self.nextFuncNum = 1
self.lastFuncNum = None
self.callWeighting = 0.1
# A mapping of calls within functions with no duplicates
self.calledFunctionTable = {}
# A list of function calls which will actually be executed
self.calledFunctions = []
# A comprehensive mapping of calls within functions
# used for computing the total number of calls
self.comprehensiveCalledFunctionTable = {}
self.totalCallsExecuted = 0
def updateTotalCallCount(self, callee):
# Count this call
self.totalCallsExecuted += 1
# Then count all the functions it calls
if callee in self.comprehensiveCalledFunctionTable:
for child in self.comprehensiveCalledFunctionTable[callee]:
self.updateTotalCallCount(child)
def updateFunctionCallMap(self, caller, callee):
"""Maintains a map of functions that are called from other functions"""
if not caller in self.calledFunctionTable:
self.calledFunctionTable[caller] = []
if not callee in self.calledFunctionTable[caller]:
self.calledFunctionTable[caller].append(callee)
if not caller in self.comprehensiveCalledFunctionTable:
self.comprehensiveCalledFunctionTable[caller] = []
self.comprehensiveCalledFunctionTable[caller].append(callee)
def updateCalledFunctionList(self, callee):
"""Maintains a list of functions that will actually be called"""
# Update the total call count
self.updateTotalCallCount(callee)
# If this function is already in the list, don't do anything else
if callee in self.calledFunctions:
return
# Add this function to the list of those that will be called.
self.calledFunctions.append(callee)
# If this function calls other functions, add them too
if callee in self.calledFunctionTable:
for subCallee in self.calledFunctionTable[callee]:
self.updateCalledFunctionList(subCallee)
def setCallWeighting(self, weight):
""" Sets the probably of generating a function call"""
self.callWeighting = weight
def writeln(self, line):
self.kfile.write(line + '\n')
def writeComment(self, comment):
self.writeln('# ' + comment)
def writeEmptyLine(self):
self.writeln("")
def writePredefinedFunctions(self):
self.writeComment("Define ':' for sequencing: as a low-precedence operator that ignores operands")
self.writeComment("and just returns the RHS.")
self.writeln("def binary : 1 (x y) y;")
self.writeEmptyLine()
self.writeComment("Helper functions defined within toy")
self.writeln("extern putchard(x);")
self.writeln("extern printd(d);")
self.writeln("extern printlf();")
self.writeEmptyLine()
self.writeComment("Print the result of a function call")
self.writeln("def printresult(N Result)")
self.writeln(" # 'result('")
self.writeln(" putchard(114) : putchard(101) : putchard(115) : putchard(117) : putchard(108) : putchard(116) : putchard(40) :")
self.writeln(" printd(N) :");
self.writeln(" # ') = '")
self.writeln(" putchard(41) : putchard(32) : putchard(61) : putchard(32) :")
self.writeln(" printd(Result) :");
self.writeln(" printlf();")
self.writeEmptyLine()
def writeRandomOperation(self, LValue, LHS, RHS):
shouldCallFunc = (self.lastFuncNum > 2 and random.random() < self.callWeighting)
if shouldCallFunc:
funcToCall = random.randrange(1, self.lastFuncNum - 1)
self.updateFunctionCallMap(self.lastFuncNum, funcToCall)
self.writeln(" %s = func%d(%s, %s) :" % (LValue, funcToCall, LHS, RHS))
else:
possibleOperations = ["+", "-", "*", "/"]
operation = random.choice(possibleOperations)
if operation == "-":
# Don't let our intermediate value become zero
# This is complicated by the fact that '<' is our only comparison operator
self.writeln(" if %s < %s then" % (LHS, RHS))
self.writeln(" %s = %s %s %s" % (LValue, LHS, operation, RHS))
self.writeln(" else if %s < %s then" % (RHS, LHS))
self.writeln(" %s = %s %s %s" % (LValue, LHS, operation, RHS))
self.writeln(" else")
self.writeln(" %s = %s %s %f :" % (LValue, LHS, operation, random.uniform(1, 100)))
else:
self.writeln(" %s = %s %s %s :" % (LValue, LHS, operation, RHS))
def getNextFuncNum(self):
result = self.nextFuncNum
self.nextFuncNum += 1
self.lastFuncNum = result
return result
def writeFunction(self, elements):
funcNum = self.getNextFuncNum()
self.writeComment("Auto-generated function number %d" % funcNum)
self.writeln("def func%d(X Y)" % funcNum)
self.writeln(" var temp1 = X,")
self.writeln(" temp2 = Y,")
self.writeln(" temp3 in")
# Initialize the variable names to be rotated
first = "temp3"
second = "temp1"
third = "temp2"
# Write some random operations
for i in range(elements):
self.writeRandomOperation(first, second, third)
# Rotate the variables
temp = first
first = second
second = third
third = temp
self.writeln(" " + third + ";")
self.writeEmptyLine()
def writeFunctionCall(self):
self.writeComment("Call the last function")
arg1 = random.uniform(1, 100)
arg2 = random.uniform(1, 100)
self.writeln("printresult(%d, func%d(%f, %f) )" % (self.lastFuncNum, self.lastFuncNum, arg1, arg2))
self.writeEmptyLine()
self.updateCalledFunctionList(self.lastFuncNum)
def writeFinalFunctionCounts(self):
self.writeComment("Called %d of %d functions" % (len(self.calledFunctions), self.lastFuncNum))
def generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript):
""" Generate a random Kaleidoscope script based on the given parameters """
print "Generating " + filename
print(" %d functions, %d elements per function, %d functions between execution" %
(numFuncs, elementsPerFunc, funcsBetweenExec))
print(" Call weighting = %f" % callWeighting)
script = KScriptGenerator(filename)
script.setCallWeighting(callWeighting)
script.writeComment("===========================================================================")
script.writeComment("Auto-generated script")
script.writeComment(" %d functions, %d elements per function, %d functions between execution"
% (numFuncs, elementsPerFunc, funcsBetweenExec))
script.writeComment(" call weighting = %f" % callWeighting)
script.writeComment("===========================================================================")
script.writeEmptyLine()
script.writePredefinedFunctions()
funcsSinceLastExec = 0
for i in range(numFuncs):
script.writeFunction(elementsPerFunc)
funcsSinceLastExec += 1
if funcsSinceLastExec == funcsBetweenExec:
script.writeFunctionCall()
funcsSinceLastExec = 0
# Always end with a function call
if funcsSinceLastExec > 0:
script.writeFunctionCall()
script.writeEmptyLine()
script.writeFinalFunctionCounts()
funcsCalled = len(script.calledFunctions)
print " Called %d of %d functions, %d total" % (funcsCalled, numFuncs, script.totalCallsExecuted)
timingScript.writeTimingCall(filename, numFuncs, funcsCalled, script.totalCallsExecuted)
# Execution begins here
random.seed()
timingScript = TimingScriptGenerator("time-toy.sh", "timing-data.txt")
dataSets = [(5000, 3, 50, 0.50), (5000, 10, 100, 0.10), (5000, 10, 5, 0.10), (5000, 10, 1, 0.0),
(1000, 3, 10, 0.50), (1000, 10, 100, 0.10), (1000, 10, 5, 0.10), (1000, 10, 1, 0.0),
( 200, 3, 2, 0.50), ( 200, 10, 40, 0.10), ( 200, 10, 2, 0.10), ( 200, 10, 1, 0.0)]
# Generate the code
for (numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting) in dataSets:
filename = "test-%d-%d-%d-%d.k" % (numFuncs, elementsPerFunc, funcsBetweenExec, int(callWeighting * 100))
generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript)
print "All done!"
| 11,103 | 48.351111 | 179 | py |
LowFat | LowFat-master/llvm-4.0.0.src/examples/Kaleidoscope/MCJIT/lazy/genk-timing.py | #!/usr/bin/env python
import sys
import random
class TimingScriptGenerator:
"""Used to generate a bash script which will invoke the toy and time it"""
def __init__(self, scriptname, outputname):
self.timeFile = outputname
self.shfile = open(scriptname, 'w')
self.shfile.write("echo \"\" > %s\n" % self.timeFile)
def writeTimingCall(self, filename, numFuncs, funcsCalled, totalCalls):
"""Echo some comments and invoke both versions of toy"""
rootname = filename
if '.' in filename:
rootname = filename[:filename.rfind('.')]
self.shfile.write("echo \"%s: Calls %d of %d functions, %d total\" >> %s\n" % (filename, funcsCalled, numFuncs, totalCalls, self.timeFile))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"With MCJIT\" >> %s\n" % self.timeFile)
self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
self.shfile.write(" -o %s -a " % self.timeFile)
self.shfile.write("./toy-mcjit < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (filename, rootname, rootname))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"With JIT\" >> %s\n" % self.timeFile)
self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
self.shfile.write(" -o %s -a " % self.timeFile)
self.shfile.write("./toy-jit < %s > %s-jit.out 2> %s-jit.err\n" % (filename, rootname, rootname))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
class KScriptGenerator:
"""Used to generate random Kaleidoscope code"""
def __init__(self, filename):
self.kfile = open(filename, 'w')
self.nextFuncNum = 1
self.lastFuncNum = None
self.callWeighting = 0.1
# A mapping of calls within functions with no duplicates
self.calledFunctionTable = {}
# A list of function calls which will actually be executed
self.calledFunctions = []
# A comprehensive mapping of calls within functions
# used for computing the total number of calls
self.comprehensiveCalledFunctionTable = {}
self.totalCallsExecuted = 0
def updateTotalCallCount(self, callee):
# Count this call
self.totalCallsExecuted += 1
# Then count all the functions it calls
if callee in self.comprehensiveCalledFunctionTable:
for child in self.comprehensiveCalledFunctionTable[callee]:
self.updateTotalCallCount(child)
def updateFunctionCallMap(self, caller, callee):
"""Maintains a map of functions that are called from other functions"""
if not caller in self.calledFunctionTable:
self.calledFunctionTable[caller] = []
if not callee in self.calledFunctionTable[caller]:
self.calledFunctionTable[caller].append(callee)
if not caller in self.comprehensiveCalledFunctionTable:
self.comprehensiveCalledFunctionTable[caller] = []
self.comprehensiveCalledFunctionTable[caller].append(callee)
def updateCalledFunctionList(self, callee):
"""Maintains a list of functions that will actually be called"""
# Update the total call count
self.updateTotalCallCount(callee)
# If this function is already in the list, don't do anything else
if callee in self.calledFunctions:
return
# Add this function to the list of those that will be called.
self.calledFunctions.append(callee)
# If this function calls other functions, add them too
if callee in self.calledFunctionTable:
for subCallee in self.calledFunctionTable[callee]:
self.updateCalledFunctionList(subCallee)
def setCallWeighting(self, weight):
""" Sets the probably of generating a function call"""
self.callWeighting = weight
def writeln(self, line):
self.kfile.write(line + '\n')
def writeComment(self, comment):
self.writeln('# ' + comment)
def writeEmptyLine(self):
self.writeln("")
def writePredefinedFunctions(self):
self.writeComment("Define ':' for sequencing: as a low-precedence operator that ignores operands")
self.writeComment("and just returns the RHS.")
self.writeln("def binary : 1 (x y) y;")
self.writeEmptyLine()
self.writeComment("Helper functions defined within toy")
self.writeln("extern putchard(x);")
self.writeln("extern printd(d);")
self.writeln("extern printlf();")
self.writeEmptyLine()
self.writeComment("Print the result of a function call")
self.writeln("def printresult(N Result)")
self.writeln(" # 'result('")
self.writeln(" putchard(114) : putchard(101) : putchard(115) : putchard(117) : putchard(108) : putchard(116) : putchard(40) :")
self.writeln(" printd(N) :");
self.writeln(" # ') = '")
self.writeln(" putchard(41) : putchard(32) : putchard(61) : putchard(32) :")
self.writeln(" printd(Result) :");
self.writeln(" printlf();")
self.writeEmptyLine()
def writeRandomOperation(self, LValue, LHS, RHS):
shouldCallFunc = (self.lastFuncNum > 2 and random.random() < self.callWeighting)
if shouldCallFunc:
funcToCall = random.randrange(1, self.lastFuncNum - 1)
self.updateFunctionCallMap(self.lastFuncNum, funcToCall)
self.writeln(" %s = func%d(%s, %s) :" % (LValue, funcToCall, LHS, RHS))
else:
possibleOperations = ["+", "-", "*", "/"]
operation = random.choice(possibleOperations)
if operation == "-":
# Don't let our intermediate value become zero
# This is complicated by the fact that '<' is our only comparison operator
self.writeln(" if %s < %s then" % (LHS, RHS))
self.writeln(" %s = %s %s %s" % (LValue, LHS, operation, RHS))
self.writeln(" else if %s < %s then" % (RHS, LHS))
self.writeln(" %s = %s %s %s" % (LValue, LHS, operation, RHS))
self.writeln(" else")
self.writeln(" %s = %s %s %f :" % (LValue, LHS, operation, random.uniform(1, 100)))
else:
self.writeln(" %s = %s %s %s :" % (LValue, LHS, operation, RHS))
def getNextFuncNum(self):
result = self.nextFuncNum
self.nextFuncNum += 1
self.lastFuncNum = result
return result
def writeFunction(self, elements):
funcNum = self.getNextFuncNum()
self.writeComment("Auto-generated function number %d" % funcNum)
self.writeln("def func%d(X Y)" % funcNum)
self.writeln(" var temp1 = X,")
self.writeln(" temp2 = Y,")
self.writeln(" temp3 in")
# Initialize the variable names to be rotated
first = "temp3"
second = "temp1"
third = "temp2"
# Write some random operations
for i in range(elements):
self.writeRandomOperation(first, second, third)
# Rotate the variables
temp = first
first = second
second = third
third = temp
self.writeln(" " + third + ";")
self.writeEmptyLine()
def writeFunctionCall(self):
self.writeComment("Call the last function")
arg1 = random.uniform(1, 100)
arg2 = random.uniform(1, 100)
self.writeln("printresult(%d, func%d(%f, %f) )" % (self.lastFuncNum, self.lastFuncNum, arg1, arg2))
self.writeEmptyLine()
self.updateCalledFunctionList(self.lastFuncNum)
def writeFinalFunctionCounts(self):
self.writeComment("Called %d of %d functions" % (len(self.calledFunctions), self.lastFuncNum))
def generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript):
""" Generate a random Kaleidoscope script based on the given parameters """
print "Generating " + filename
print(" %d functions, %d elements per function, %d functions between execution" %
(numFuncs, elementsPerFunc, funcsBetweenExec))
print(" Call weighting = %f" % callWeighting)
script = KScriptGenerator(filename)
script.setCallWeighting(callWeighting)
script.writeComment("===========================================================================")
script.writeComment("Auto-generated script")
script.writeComment(" %d functions, %d elements per function, %d functions between execution"
% (numFuncs, elementsPerFunc, funcsBetweenExec))
script.writeComment(" call weighting = %f" % callWeighting)
script.writeComment("===========================================================================")
script.writeEmptyLine()
script.writePredefinedFunctions()
funcsSinceLastExec = 0
for i in range(numFuncs):
script.writeFunction(elementsPerFunc)
funcsSinceLastExec += 1
if funcsSinceLastExec == funcsBetweenExec:
script.writeFunctionCall()
funcsSinceLastExec = 0
# Always end with a function call
if funcsSinceLastExec > 0:
script.writeFunctionCall()
script.writeEmptyLine()
script.writeFinalFunctionCounts()
funcsCalled = len(script.calledFunctions)
print " Called %d of %d functions, %d total" % (funcsCalled, numFuncs, script.totalCallsExecuted)
timingScript.writeTimingCall(filename, numFuncs, funcsCalled, script.totalCallsExecuted)
# Execution begins here
random.seed()
timingScript = TimingScriptGenerator("time-toy.sh", "timing-data.txt")
dataSets = [(5000, 3, 50, 0.50), (5000, 10, 100, 0.10), (5000, 10, 5, 0.10), (5000, 10, 1, 0.0),
(1000, 3, 10, 0.50), (1000, 10, 100, 0.10), (1000, 10, 5, 0.10), (1000, 10, 1, 0.0),
( 200, 3, 2, 0.50), ( 200, 10, 40, 0.10), ( 200, 10, 2, 0.10), ( 200, 10, 1, 0.0)]
# Generate the code
for (numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting) in dataSets:
filename = "test-%d-%d-%d-%d.k" % (numFuncs, elementsPerFunc, funcsBetweenExec, int(callWeighting * 100))
generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript)
print "All done!"
| 10,499 | 46.727273 | 147 | py |
LowFat | LowFat-master/llvm-4.0.0.src/examples/Kaleidoscope/MCJIT/cached/split-lib.py | #!/usr/bin/env python
class TimingScriptGenerator:
"""Used to generate a bash script which will invoke the toy and time it"""
def __init__(self, scriptname, outputname):
self.shfile = open(scriptname, 'w')
self.timeFile = outputname
self.shfile.write("echo \"\" > %s\n" % self.timeFile)
def writeTimingCall(self, irname, callname):
"""Echo some comments and invoke both versions of toy"""
rootname = irname
if '.' in irname:
rootname = irname[:irname.rfind('.')]
self.shfile.write("echo \"%s: Calls %s\" >> %s\n" % (callname, irname, self.timeFile))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"With MCJIT\" >> %s\n" % self.timeFile)
self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
self.shfile.write(" -o %s -a " % self.timeFile)
self.shfile.write("./toy-mcjit -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (irname, callname, rootname, rootname))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"With MCJIT again\" >> %s\n" % self.timeFile)
self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
self.shfile.write(" -o %s -a " % self.timeFile)
self.shfile.write("./toy-mcjit -use-object-cache -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (irname, callname, rootname, rootname))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"With JIT\" >> %s\n" % self.timeFile)
self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
self.shfile.write(" -o %s -a " % self.timeFile)
self.shfile.write("./toy-jit -input-IR=%s < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (irname, callname, rootname, rootname))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
class LibScriptGenerator:
"""Used to generate a bash script which will convert Kaleidoscope files to IR"""
def __init__(self, filename):
self.shfile = open(filename, 'w')
def writeLibGenCall(self, libname, irname):
self.shfile.write("./toy-ir-gen < %s 2> %s\n" % (libname, irname))
def splitScript(inputname, libGenScript, timingScript):
rootname = inputname[:-2]
libname = rootname + "-lib.k"
irname = rootname + "-lib.ir"
callname = rootname + "-call.k"
infile = open(inputname, "r")
libfile = open(libname, "w")
callfile = open(callname, "w")
print "Splitting %s into %s and %s" % (inputname, callname, libname)
for line in infile:
if not line.startswith("#"):
if line.startswith("print"):
callfile.write(line)
else:
libfile.write(line)
libGenScript.writeLibGenCall(libname, irname)
timingScript.writeTimingCall(irname, callname)
# Execution begins here
libGenScript = LibScriptGenerator("make-libs.sh")
timingScript = TimingScriptGenerator("time-lib.sh", "lib-timing.txt")
script_list = ["test-5000-3-50-50.k", "test-5000-10-100-10.k", "test-5000-10-5-10.k", "test-5000-10-1-0.k",
"test-1000-3-10-50.k", "test-1000-10-100-10.k", "test-1000-10-5-10.k", "test-1000-10-1-0.k",
"test-200-3-2-50.k", "test-200-10-40-10.k", "test-200-10-2-10.k", "test-200-10-1-0.k"]
for script in script_list:
splitScript(script, libGenScript, timingScript)
print "All done!"
| 3,622 | 50.028169 | 150 | py |
LowFat | LowFat-master/llvm-4.0.0.src/examples/Kaleidoscope/MCJIT/cached/genk-timing.py | #!/usr/bin/env python
import sys
import random
class TimingScriptGenerator:
"""Used to generate a bash script which will invoke the toy and time it"""
def __init__(self, scriptname, outputname):
self.timeFile = outputname
self.shfile = open(scriptname, 'w')
self.shfile.write("echo \"\" > %s\n" % self.timeFile)
def writeTimingCall(self, filename, numFuncs, funcsCalled, totalCalls):
"""Echo some comments and invoke both versions of toy"""
rootname = filename
if '.' in filename:
rootname = filename[:filename.rfind('.')]
self.shfile.write("echo \"%s: Calls %d of %d functions, %d total\" >> %s\n" % (filename, funcsCalled, numFuncs, totalCalls, self.timeFile))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"With MCJIT\" >> %s\n" % self.timeFile)
self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
self.shfile.write(" -o %s -a " % self.timeFile)
self.shfile.write("./toy-mcjit < %s > %s-mcjit.out 2> %s-mcjit.err\n" % (filename, rootname, rootname))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"With JIT\" >> %s\n" % self.timeFile)
self.shfile.write("/usr/bin/time -f \"Command %C\\n\\tuser time: %U s\\n\\tsytem time: %S s\\n\\tmax set: %M kb\"")
self.shfile.write(" -o %s -a " % self.timeFile)
self.shfile.write("./toy-jit < %s > %s-jit.out 2> %s-jit.err\n" % (filename, rootname, rootname))
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
self.shfile.write("echo \"\" >> %s\n" % self.timeFile)
class KScriptGenerator:
"""Used to generate random Kaleidoscope code"""
def __init__(self, filename):
self.kfile = open(filename, 'w')
self.nextFuncNum = 1
self.lastFuncNum = None
self.callWeighting = 0.1
# A mapping of calls within functions with no duplicates
self.calledFunctionTable = {}
# A list of function calls which will actually be executed
self.calledFunctions = []
# A comprehensive mapping of calls within functions
# used for computing the total number of calls
self.comprehensiveCalledFunctionTable = {}
self.totalCallsExecuted = 0
def updateTotalCallCount(self, callee):
# Count this call
self.totalCallsExecuted += 1
# Then count all the functions it calls
if callee in self.comprehensiveCalledFunctionTable:
for child in self.comprehensiveCalledFunctionTable[callee]:
self.updateTotalCallCount(child)
def updateFunctionCallMap(self, caller, callee):
"""Maintains a map of functions that are called from other functions"""
if not caller in self.calledFunctionTable:
self.calledFunctionTable[caller] = []
if not callee in self.calledFunctionTable[caller]:
self.calledFunctionTable[caller].append(callee)
if not caller in self.comprehensiveCalledFunctionTable:
self.comprehensiveCalledFunctionTable[caller] = []
self.comprehensiveCalledFunctionTable[caller].append(callee)
def updateCalledFunctionList(self, callee):
"""Maintains a list of functions that will actually be called"""
# Update the total call count
self.updateTotalCallCount(callee)
# If this function is already in the list, don't do anything else
if callee in self.calledFunctions:
return
# Add this function to the list of those that will be called.
self.calledFunctions.append(callee)
# If this function calls other functions, add them too
if callee in self.calledFunctionTable:
for subCallee in self.calledFunctionTable[callee]:
self.updateCalledFunctionList(subCallee)
def setCallWeighting(self, weight):
""" Sets the probably of generating a function call"""
self.callWeighting = weight
def writeln(self, line):
self.kfile.write(line + '\n')
def writeComment(self, comment):
self.writeln('# ' + comment)
def writeEmptyLine(self):
self.writeln("")
def writePredefinedFunctions(self):
self.writeComment("Define ':' for sequencing: as a low-precedence operator that ignores operands")
self.writeComment("and just returns the RHS.")
self.writeln("def binary : 1 (x y) y;")
self.writeEmptyLine()
self.writeComment("Helper functions defined within toy")
self.writeln("extern putchard(x);")
self.writeln("extern printd(d);")
self.writeln("extern printlf();")
self.writeEmptyLine()
self.writeComment("Print the result of a function call")
self.writeln("def printresult(N Result)")
self.writeln(" # 'result('")
self.writeln(" putchard(114) : putchard(101) : putchard(115) : putchard(117) : putchard(108) : putchard(116) : putchard(40) :")
self.writeln(" printd(N) :");
self.writeln(" # ') = '")
self.writeln(" putchard(41) : putchard(32) : putchard(61) : putchard(32) :")
self.writeln(" printd(Result) :");
self.writeln(" printlf();")
self.writeEmptyLine()
def writeRandomOperation(self, LValue, LHS, RHS):
shouldCallFunc = (self.lastFuncNum > 2 and random.random() < self.callWeighting)
if shouldCallFunc:
funcToCall = random.randrange(1, self.lastFuncNum - 1)
self.updateFunctionCallMap(self.lastFuncNum, funcToCall)
self.writeln(" %s = func%d(%s, %s) :" % (LValue, funcToCall, LHS, RHS))
else:
possibleOperations = ["+", "-", "*", "/"]
operation = random.choice(possibleOperations)
if operation == "-":
# Don't let our intermediate value become zero
# This is complicated by the fact that '<' is our only comparison operator
self.writeln(" if %s < %s then" % (LHS, RHS))
self.writeln(" %s = %s %s %s" % (LValue, LHS, operation, RHS))
self.writeln(" else if %s < %s then" % (RHS, LHS))
self.writeln(" %s = %s %s %s" % (LValue, LHS, operation, RHS))
self.writeln(" else")
self.writeln(" %s = %s %s %f :" % (LValue, LHS, operation, random.uniform(1, 100)))
else:
self.writeln(" %s = %s %s %s :" % (LValue, LHS, operation, RHS))
def getNextFuncNum(self):
result = self.nextFuncNum
self.nextFuncNum += 1
self.lastFuncNum = result
return result
def writeFunction(self, elements):
funcNum = self.getNextFuncNum()
self.writeComment("Auto-generated function number %d" % funcNum)
self.writeln("def func%d(X Y)" % funcNum)
self.writeln(" var temp1 = X,")
self.writeln(" temp2 = Y,")
self.writeln(" temp3 in")
# Initialize the variable names to be rotated
first = "temp3"
second = "temp1"
third = "temp2"
# Write some random operations
for i in range(elements):
self.writeRandomOperation(first, second, third)
# Rotate the variables
temp = first
first = second
second = third
third = temp
self.writeln(" " + third + ";")
self.writeEmptyLine()
def writeFunctionCall(self):
self.writeComment("Call the last function")
arg1 = random.uniform(1, 100)
arg2 = random.uniform(1, 100)
self.writeln("printresult(%d, func%d(%f, %f) )" % (self.lastFuncNum, self.lastFuncNum, arg1, arg2))
self.writeEmptyLine()
self.updateCalledFunctionList(self.lastFuncNum)
def writeFinalFunctionCounts(self):
self.writeComment("Called %d of %d functions" % (len(self.calledFunctions), self.lastFuncNum))
def generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript):
""" Generate a random Kaleidoscope script based on the given parameters """
print "Generating " + filename
print(" %d functions, %d elements per function, %d functions between execution" %
(numFuncs, elementsPerFunc, funcsBetweenExec))
print(" Call weighting = %f" % callWeighting)
script = KScriptGenerator(filename)
script.setCallWeighting(callWeighting)
script.writeComment("===========================================================================")
script.writeComment("Auto-generated script")
script.writeComment(" %d functions, %d elements per function, %d functions between execution"
% (numFuncs, elementsPerFunc, funcsBetweenExec))
script.writeComment(" call weighting = %f" % callWeighting)
script.writeComment("===========================================================================")
script.writeEmptyLine()
script.writePredefinedFunctions()
funcsSinceLastExec = 0
for i in range(numFuncs):
script.writeFunction(elementsPerFunc)
funcsSinceLastExec += 1
if funcsSinceLastExec == funcsBetweenExec:
script.writeFunctionCall()
funcsSinceLastExec = 0
# Always end with a function call
if funcsSinceLastExec > 0:
script.writeFunctionCall()
script.writeEmptyLine()
script.writeFinalFunctionCounts()
funcsCalled = len(script.calledFunctions)
print " Called %d of %d functions, %d total" % (funcsCalled, numFuncs, script.totalCallsExecuted)
timingScript.writeTimingCall(filename, numFuncs, funcsCalled, script.totalCallsExecuted)
# Execution begins here
random.seed()
timingScript = TimingScriptGenerator("time-toy.sh", "timing-data.txt")
dataSets = [(5000, 3, 50, 0.50), (5000, 10, 100, 0.10), (5000, 10, 5, 0.10), (5000, 10, 1, 0.0),
(1000, 3, 10, 0.50), (1000, 10, 100, 0.10), (1000, 10, 5, 0.10), (1000, 10, 1, 0.0),
( 200, 3, 2, 0.50), ( 200, 10, 40, 0.10), ( 200, 10, 2, 0.10), ( 200, 10, 1, 0.0)]
# Generate the code
for (numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting) in dataSets:
filename = "test-%d-%d-%d-%d.k" % (numFuncs, elementsPerFunc, funcsBetweenExec, int(callWeighting * 100))
generateKScript(filename, numFuncs, elementsPerFunc, funcsBetweenExec, callWeighting, timingScript)
print "All done!"
| 10,499 | 46.727273 | 147 | py |
LowFat | LowFat-master/llvm-4.0.0.src/projects/compiler-rt/test/asan/android_commands/android_compile.py | #!/usr/bin/python
import os, sys, subprocess
from android_common import *
here = os.path.abspath(os.path.dirname(sys.argv[0]))
android_run = os.path.join(here, 'android_run.py')
output = None
output_type = 'executable'
args = sys.argv[1:]
while args:
arg = args.pop(0)
if arg == '-shared':
output_type = 'shared'
elif arg == '-c':
output_type = 'object'
elif arg == '-o':
output = args.pop(0)
if output == None:
print "No output file name!"
sys.exit(1)
ret = subprocess.call(sys.argv[1:])
if ret != 0:
sys.exit(ret)
if output_type in ['executable', 'shared']:
push_to_device(output)
if output_type == 'executable':
os.rename(output, output + '.real')
os.symlink(android_run, output)
| 756 | 19.459459 | 52 | py |
LowFat | LowFat-master/llvm-4.0.0.src/projects/compiler-rt/test/asan/android_commands/android_run.py | #!/usr/bin/python
import os, signal, sys, subprocess, tempfile
from android_common import *
ANDROID_TMPDIR = '/data/local/tmp/Output'
here = os.path.abspath(os.path.dirname(sys.argv[0]))
device_binary = os.path.join(ANDROID_TMPDIR, os.path.basename(sys.argv[0]))
def build_env():
args = []
# Android linker ignores RPATH. Set LD_LIBRARY_PATH to Output dir.
args.append('LD_LIBRARY_PATH=%s' % (ANDROID_TMPDIR,))
for (key, value) in os.environ.items():
if key in ['ASAN_OPTIONS', 'ASAN_ACTIVATION_OPTIONS']:
args.append('%s="%s"' % (key, value))
return ' '.join(args)
is_64bit = (subprocess.check_output(['file', sys.argv[0] + '.real']).find('64-bit') != -1)
asanwrapper = "" if is_64bit else "asanwrapper "
device_env = build_env()
device_args = ' '.join(sys.argv[1:]) # FIXME: escape?
device_stdout = device_binary + '.stdout'
device_stderr = device_binary + '.stderr'
device_exitcode = device_binary + '.exitcode'
ret = adb(['shell', 'cd %s && %s %s%s %s >%s 2>%s ; echo $? >%s' %
(ANDROID_TMPDIR, device_env, asanwrapper, device_binary, device_args,
device_stdout, device_stderr, device_exitcode)])
if ret != 0:
sys.exit(ret)
sys.stdout.write(pull_from_device(device_stdout))
sys.stderr.write(pull_from_device(device_stderr))
retcode = int(pull_from_device(device_exitcode))
# If the device process died with a signal, do abort().
# Not exactly the same, but good enough to fool "not --crash".
if retcode > 128:
os.kill(os.getpid(), signal.SIGABRT)
sys.exit(retcode)
| 1,545 | 35.809524 | 90 | py |
LowFat | LowFat-master/llvm-4.0.0.src/projects/compiler-rt/test/asan/android_commands/android_common.py | import os, subprocess, tempfile
import time
ANDROID_TMPDIR = '/data/local/tmp/Output'
ADB = os.environ.get('ADB', 'adb')
verbose = False
if os.environ.get('ANDROID_RUN_VERBOSE') == '1':
verbose = True
def adb(args, attempts = 1):
if verbose:
print args
tmpname = tempfile.mktemp()
out = open(tmpname, 'w')
ret = 255
while attempts > 0 and ret != 0:
attempts -= 1
ret = subprocess.call([ADB] + args, stdout=out, stderr=subprocess.STDOUT)
if attempts != 0:
ret = 5
if ret != 0:
print "adb command failed", args
print tmpname
out.close()
out = open(tmpname, 'r')
print out.read()
out.close()
os.unlink(tmpname)
return ret
def pull_from_device(path):
tmp = tempfile.mktemp()
adb(['pull', path, tmp], 5)
text = open(tmp, 'r').read()
os.unlink(tmp)
return text
def push_to_device(path):
# Workaround for https://code.google.com/p/android/issues/detail?id=65857
dst_path = os.path.join(ANDROID_TMPDIR, os.path.basename(path))
tmp_path = dst_path + '.push'
adb(['push', path, tmp_path], 5)
adb(['shell', 'cp "%s" "%s" 2>&1' % (tmp_path, dst_path)], 5)
| 1,193 | 25.533333 | 79 | py |
LowFat | LowFat-master/llvm-4.0.0.src/projects/compiler-rt/lib/asan/scripts/asan_symbolize.py | #!/usr/bin/env python
#===- lib/asan/scripts/asan_symbolize.py -----------------------------------===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
import argparse
import bisect
import getopt
import os
import re
import subprocess
import sys
symbolizers = {}
DEBUG = False
demangle = False
binutils_prefix = None
sysroot_path = None
binary_name_filter = None
fix_filename_patterns = None
logfile = sys.stdin
allow_system_symbolizer = True
force_system_symbolizer = False
# FIXME: merge the code that calls fix_filename().
def fix_filename(file_name):
if fix_filename_patterns:
for path_to_cut in fix_filename_patterns:
file_name = re.sub('.*' + path_to_cut, '', file_name)
file_name = re.sub('.*asan_[a-z_]*.cc:[0-9]*', '_asan_rtl_', file_name)
file_name = re.sub('.*crtstuff.c:0', '???:0', file_name)
return file_name
def sysroot_path_filter(binary_name):
return sysroot_path + binary_name
def is_valid_arch(s):
return s in ["i386", "x86_64", "x86_64h", "arm", "armv6", "armv7", "armv7s",
"armv7k", "arm64", "powerpc64", "powerpc64le", "s390x", "s390"]
def guess_arch(addr):
# Guess which arch we're running. 10 = len('0x') + 8 hex digits.
if len(addr) > 10:
return 'x86_64'
else:
return 'i386'
class Symbolizer(object):
def __init__(self):
pass
def symbolize(self, addr, binary, offset):
"""Symbolize the given address (pair of binary and offset).
Overriden in subclasses.
Args:
addr: virtual address of an instruction.
binary: path to executable/shared object containing this instruction.
offset: instruction offset in the @binary.
Returns:
list of strings (one string for each inlined frame) describing
the code locations for this instruction (that is, function name, file
name, line and column numbers).
"""
return None
class LLVMSymbolizer(Symbolizer):
def __init__(self, symbolizer_path, default_arch, system, dsym_hints=[]):
super(LLVMSymbolizer, self).__init__()
self.symbolizer_path = symbolizer_path
self.default_arch = default_arch
self.system = system
self.dsym_hints = dsym_hints
self.pipe = self.open_llvm_symbolizer()
def open_llvm_symbolizer(self):
cmd = [self.symbolizer_path,
'--use-symbol-table=true',
'--demangle=%s' % demangle,
'--functions=linkage',
'--inlining=true',
'--default-arch=%s' % self.default_arch]
if self.system == 'Darwin':
for hint in self.dsym_hints:
cmd.append('--dsym-hint=%s' % hint)
if DEBUG:
print ' '.join(cmd)
try:
result = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
except OSError:
result = None
return result
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if not self.pipe:
return None
result = []
try:
symbolizer_input = '"%s" %s' % (binary, offset)
if DEBUG:
print symbolizer_input
print >> self.pipe.stdin, symbolizer_input
while True:
function_name = self.pipe.stdout.readline().rstrip()
if not function_name:
break
file_name = self.pipe.stdout.readline().rstrip()
file_name = fix_filename(file_name)
if (not function_name.startswith('??') or
not file_name.startswith('??')):
# Append only non-trivial frames.
result.append('%s in %s %s' % (addr, function_name,
file_name))
except Exception:
result = []
if not result:
result = None
return result
def LLVMSymbolizerFactory(system, default_arch, dsym_hints=[]):
symbolizer_path = os.getenv('LLVM_SYMBOLIZER_PATH')
if not symbolizer_path:
symbolizer_path = os.getenv('ASAN_SYMBOLIZER_PATH')
if not symbolizer_path:
# Assume llvm-symbolizer is in PATH.
symbolizer_path = 'llvm-symbolizer'
return LLVMSymbolizer(symbolizer_path, default_arch, system, dsym_hints)
class Addr2LineSymbolizer(Symbolizer):
def __init__(self, binary):
super(Addr2LineSymbolizer, self).__init__()
self.binary = binary
self.pipe = self.open_addr2line()
self.output_terminator = -1
def open_addr2line(self):
addr2line_tool = 'addr2line'
if binutils_prefix:
addr2line_tool = binutils_prefix + addr2line_tool
cmd = [addr2line_tool, '-fi']
if demangle:
cmd += ['--demangle']
cmd += ['-e', self.binary]
if DEBUG:
print ' '.join(cmd)
return subprocess.Popen(cmd,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if self.binary != binary:
return None
lines = []
try:
print >> self.pipe.stdin, offset
print >> self.pipe.stdin, self.output_terminator
is_first_frame = True
while True:
function_name = self.pipe.stdout.readline().rstrip()
file_name = self.pipe.stdout.readline().rstrip()
if is_first_frame:
is_first_frame = False
elif function_name in ['', '??']:
assert file_name == function_name
break
lines.append((function_name, file_name));
except Exception:
lines.append(('??', '??:0'))
return ['%s in %s %s' % (addr, function, fix_filename(file)) for (function, file) in lines]
class UnbufferedLineConverter(object):
"""
Wrap a child process that responds to each line of input with one line of
output. Uses pty to trick the child into providing unbuffered output.
"""
def __init__(self, args, close_stderr=False):
# Local imports so that the script can start on Windows.
import pty
import termios
pid, fd = pty.fork()
if pid == 0:
# We're the child. Transfer control to command.
if close_stderr:
dev_null = os.open('/dev/null', 0)
os.dup2(dev_null, 2)
os.execvp(args[0], args)
else:
# Disable echoing.
attr = termios.tcgetattr(fd)
attr[3] = attr[3] & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, attr)
# Set up a file()-like interface to the child process
self.r = os.fdopen(fd, "r", 1)
self.w = os.fdopen(os.dup(fd), "w", 1)
def convert(self, line):
self.w.write(line + "\n")
return self.readline()
def readline(self):
return self.r.readline().rstrip()
class DarwinSymbolizer(Symbolizer):
def __init__(self, addr, binary, arch):
super(DarwinSymbolizer, self).__init__()
self.binary = binary
self.arch = arch
self.open_atos()
def open_atos(self):
if DEBUG:
print 'atos -o %s -arch %s' % (self.binary, self.arch)
cmdline = ['atos', '-o', self.binary, '-arch', self.arch]
self.atos = UnbufferedLineConverter(cmdline, close_stderr=True)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if self.binary != binary:
return None
atos_line = self.atos.convert('0x%x' % int(offset, 16))
while "got symbolicator for" in atos_line:
atos_line = self.atos.readline()
# A well-formed atos response looks like this:
# foo(type1, type2) (in object.name) (filename.cc:80)
match = re.match('^(.*) \(in (.*)\) \((.*:\d*)\)$', atos_line)
if DEBUG:
print 'atos_line: ', atos_line
if match:
function_name = match.group(1)
function_name = re.sub('\(.*?\)', '', function_name)
file_name = fix_filename(match.group(3))
return ['%s in %s %s' % (addr, function_name, file_name)]
else:
return ['%s in %s' % (addr, atos_line)]
# Chain several symbolizers so that if one symbolizer fails, we fall back
# to the next symbolizer in chain.
class ChainSymbolizer(Symbolizer):
def __init__(self, symbolizer_list):
super(ChainSymbolizer, self).__init__()
self.symbolizer_list = symbolizer_list
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
for symbolizer in self.symbolizer_list:
if symbolizer:
result = symbolizer.symbolize(addr, binary, offset)
if result:
return result
return None
def append_symbolizer(self, symbolizer):
self.symbolizer_list.append(symbolizer)
def BreakpadSymbolizerFactory(binary):
suffix = os.getenv('BREAKPAD_SUFFIX')
if suffix:
filename = binary + suffix
if os.access(filename, os.F_OK):
return BreakpadSymbolizer(filename)
return None
def SystemSymbolizerFactory(system, addr, binary, arch):
if system == 'Darwin':
return DarwinSymbolizer(addr, binary, arch)
elif system == 'Linux' or system == 'FreeBSD':
return Addr2LineSymbolizer(binary)
class BreakpadSymbolizer(Symbolizer):
def __init__(self, filename):
super(BreakpadSymbolizer, self).__init__()
self.filename = filename
lines = file(filename).readlines()
self.files = []
self.symbols = {}
self.address_list = []
self.addresses = {}
# MODULE mac x86_64 A7001116478B33F18FF9BEDE9F615F190 t
fragments = lines[0].rstrip().split()
self.arch = fragments[2]
self.debug_id = fragments[3]
self.binary = ' '.join(fragments[4:])
self.parse_lines(lines[1:])
def parse_lines(self, lines):
cur_function_addr = ''
for line in lines:
fragments = line.split()
if fragments[0] == 'FILE':
assert int(fragments[1]) == len(self.files)
self.files.append(' '.join(fragments[2:]))
elif fragments[0] == 'PUBLIC':
self.symbols[int(fragments[1], 16)] = ' '.join(fragments[3:])
elif fragments[0] in ['CFI', 'STACK']:
pass
elif fragments[0] == 'FUNC':
cur_function_addr = int(fragments[1], 16)
if not cur_function_addr in self.symbols.keys():
self.symbols[cur_function_addr] = ' '.join(fragments[4:])
else:
# Line starting with an address.
addr = int(fragments[0], 16)
self.address_list.append(addr)
# Tuple of symbol address, size, line, file number.
self.addresses[addr] = (cur_function_addr,
int(fragments[1], 16),
int(fragments[2]),
int(fragments[3]))
self.address_list.sort()
def get_sym_file_line(self, addr):
key = None
if addr in self.addresses.keys():
key = addr
else:
index = bisect.bisect_left(self.address_list, addr)
if index == 0:
return None
else:
key = self.address_list[index - 1]
sym_id, size, line_no, file_no = self.addresses[key]
symbol = self.symbols[sym_id]
filename = self.files[file_no]
if addr < key + size:
return symbol, filename, line_no
else:
return None
def symbolize(self, addr, binary, offset):
if self.binary != binary:
return None
res = self.get_sym_file_line(int(offset, 16))
if res:
function_name, file_name, line_no = res
result = ['%s in %s %s:%d' % (
addr, function_name, file_name, line_no)]
print result
return result
else:
return None
class SymbolizationLoop(object):
def __init__(self, binary_name_filter=None, dsym_hint_producer=None):
if sys.platform == 'win32':
# ASan on Windows uses dbghelp.dll to symbolize in-process, which works
# even in sandboxed processes. Nothing needs to be done here.
self.process_line = self.process_line_echo
else:
# Used by clients who may want to supply a different binary name.
# E.g. in Chrome several binaries may share a single .dSYM.
self.binary_name_filter = binary_name_filter
self.dsym_hint_producer = dsym_hint_producer
self.system = os.uname()[0]
if self.system not in ['Linux', 'Darwin', 'FreeBSD']:
raise Exception('Unknown system')
self.llvm_symbolizers = {}
self.last_llvm_symbolizer = None
self.dsym_hints = set([])
self.frame_no = 0
self.process_line = self.process_line_posix
def symbolize_address(self, addr, binary, offset, arch):
# On non-Darwin (i.e. on platforms without .dSYM debug info) always use
# a single symbolizer binary.
# On Darwin, if the dsym hint producer is present:
# 1. check whether we've seen this binary already; if so,
# use |llvm_symbolizers[binary]|, which has already loaded the debug
# info for this binary (might not be the case for
# |last_llvm_symbolizer|);
# 2. otherwise check if we've seen all the hints for this binary already;
# if so, reuse |last_llvm_symbolizer| which has the full set of hints;
# 3. otherwise create a new symbolizer and pass all currently known
# .dSYM hints to it.
result = None
if not force_system_symbolizer:
if not binary in self.llvm_symbolizers:
use_new_symbolizer = True
if self.system == 'Darwin' and self.dsym_hint_producer:
dsym_hints_for_binary = set(self.dsym_hint_producer(binary))
use_new_symbolizer = bool(dsym_hints_for_binary - self.dsym_hints)
self.dsym_hints |= dsym_hints_for_binary
if self.last_llvm_symbolizer and not use_new_symbolizer:
self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
else:
self.last_llvm_symbolizer = LLVMSymbolizerFactory(
self.system, arch, self.dsym_hints)
self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
# Use the chain of symbolizers:
# Breakpad symbolizer -> LLVM symbolizer -> addr2line/atos
# (fall back to next symbolizer if the previous one fails).
if not binary in symbolizers:
symbolizers[binary] = ChainSymbolizer(
[BreakpadSymbolizerFactory(binary), self.llvm_symbolizers[binary]])
result = symbolizers[binary].symbolize(addr, binary, offset)
else:
symbolizers[binary] = ChainSymbolizer([])
if result is None:
if not allow_system_symbolizer:
raise Exception('Failed to launch or use llvm-symbolizer.')
# Initialize system symbolizer only if other symbolizers failed.
symbolizers[binary].append_symbolizer(
SystemSymbolizerFactory(self.system, addr, binary, arch))
result = symbolizers[binary].symbolize(addr, binary, offset)
# The system symbolizer must produce some result.
assert result
return result
def get_symbolized_lines(self, symbolized_lines):
if not symbolized_lines:
return [self.current_line]
else:
result = []
for symbolized_frame in symbolized_lines:
result.append(' #%s %s' % (str(self.frame_no), symbolized_frame.rstrip()))
self.frame_no += 1
return result
def process_logfile(self):
self.frame_no = 0
for line in logfile:
processed = self.process_line(line)
print '\n'.join(processed)
def process_line_echo(self, line):
return [line.rstrip()]
def process_line_posix(self, line):
self.current_line = line.rstrip()
#0 0x7f6e35cf2e45 (/blah/foo.so+0x11fe45)
stack_trace_line_format = (
'^( *#([0-9]+) *)(0x[0-9a-f]+) *\((.*)\+(0x[0-9a-f]+)\)')
match = re.match(stack_trace_line_format, line)
if not match:
return [self.current_line]
if DEBUG:
print line
_, frameno_str, addr, binary, offset = match.groups()
arch = ""
# Arch can be embedded in the filename, e.g.: "libabc.dylib:x86_64h"
colon_pos = binary.rfind(":")
if colon_pos != -1:
maybe_arch = binary[colon_pos+1:]
if is_valid_arch(maybe_arch):
arch = maybe_arch
binary = binary[0:colon_pos]
if arch == "":
arch = guess_arch(addr)
if frameno_str == '0':
# Assume that frame #0 is the first frame of new stack trace.
self.frame_no = 0
original_binary = binary
if self.binary_name_filter:
binary = self.binary_name_filter(binary)
symbolized_line = self.symbolize_address(addr, binary, offset, arch)
if not symbolized_line:
if original_binary != binary:
symbolized_line = self.symbolize_address(addr, binary, offset, arch)
return self.get_symbolized_lines(symbolized_line)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='ASan symbolization script',
epilog='Example of use:\n'
'asan_symbolize.py -c "$HOME/opt/cross/bin/arm-linux-gnueabi-" '
'-s "$HOME/SymbolFiles" < asan.log')
parser.add_argument('path_to_cut', nargs='*',
help='pattern to be cut from the result file path ')
parser.add_argument('-d','--demangle', action='store_true',
help='demangle function names')
parser.add_argument('-s', metavar='SYSROOT',
help='set path to sysroot for sanitized binaries')
parser.add_argument('-c', metavar='CROSS_COMPILE',
help='set prefix for binutils')
parser.add_argument('-l','--logfile', default=sys.stdin,
type=argparse.FileType('r'),
help='set log file name to parse, default is stdin')
parser.add_argument('--force-system-symbolizer', action='store_true',
help='don\'t use llvm-symbolizer')
args = parser.parse_args()
if args.path_to_cut:
fix_filename_patterns = args.path_to_cut
if args.demangle:
demangle = True
if args.s:
binary_name_filter = sysroot_path_filter
sysroot_path = args.s
if args.c:
binutils_prefix = args.c
if args.logfile:
logfile = args.logfile
else:
logfile = sys.stdin
if args.force_system_symbolizer:
force_system_symbolizer = True
if force_system_symbolizer:
assert(allow_system_symbolizer)
loop = SymbolizationLoop(binary_name_filter)
loop.process_logfile()
| 18,097 | 34.073643 | 95 | py |
LowFat | LowFat-master/llvm-4.0.0.src/projects/compiler-rt/lib/sanitizer_common/scripts/litlint_test.py | #!/usr/bin/python
# Tests for litlint.py
#
# Usage: python litlint_test.py
#
# Returns nonzero if any test fails
import litlint
import unittest
class TestLintLine(unittest.TestCase):
def test_missing_run(self):
f = litlint.LintLine
self.assertEqual(f(' %t '), ('missing %run before %t', 2))
self.assertEqual(f(' %t\n'), ('missing %run before %t', 2))
self.assertEqual(f(' %t.so '), (None, None))
self.assertEqual(f(' %t.o '), (None, None))
self.assertEqual(f('%run %t '), (None, None))
self.assertEqual(f('-o %t '), (None, None))
if __name__ == '__main__':
unittest.main()
| 621 | 24.916667 | 66 | py |
LowFat | LowFat-master/llvm-4.0.0.src/projects/compiler-rt/lib/sanitizer_common/scripts/sancov.py | #!/usr/bin/env python
# Merge or print the coverage data collected by asan's coverage.
# Input files are sequences of 4-byte integers.
# We need to merge these integers into a set and then
# either print them (as hex) or dump them into another file.
import array
import bisect
import glob
import os.path
import struct
import subprocess
import sys
prog_name = ""
def Usage():
print >> sys.stderr, "Usage: \n" + \
" " + prog_name + " merge FILE [FILE...] > OUTPUT\n" \
" " + prog_name + " print FILE [FILE...]\n" \
" " + prog_name + " unpack FILE [FILE...]\n" \
" " + prog_name + " rawunpack FILE [FILE ...]\n" \
" " + prog_name + " missing BINARY < LIST_OF_PCS\n"
exit(1)
def CheckBits(bits):
if bits != 32 and bits != 64:
raise Exception("Wrong bitness: %d" % bits)
def TypeCodeForBits(bits):
CheckBits(bits)
return 'L' if bits == 64 else 'I'
def TypeCodeForStruct(bits):
CheckBits(bits)
return 'Q' if bits == 64 else 'I'
kMagic32SecondHalf = 0xFFFFFF32;
kMagic64SecondHalf = 0xFFFFFF64;
kMagicFirstHalf = 0xC0BFFFFF;
def MagicForBits(bits):
CheckBits(bits)
if sys.byteorder == 'little':
return [kMagic64SecondHalf if bits == 64 else kMagic32SecondHalf, kMagicFirstHalf]
else:
return [kMagicFirstHalf, kMagic64SecondHalf if bits == 64 else kMagic32SecondHalf]
def ReadMagicAndReturnBitness(f, path):
magic_bytes = f.read(8)
magic_words = struct.unpack('II', magic_bytes);
bits = 0
idx = 1 if sys.byteorder == 'little' else 0
if magic_words[idx] == kMagicFirstHalf:
if magic_words[1-idx] == kMagic64SecondHalf:
bits = 64
elif magic_words[1-idx] == kMagic32SecondHalf:
bits = 32
if bits == 0:
raise Exception('Bad magic word in %s' % path)
return bits
def ReadOneFile(path):
with open(path, mode="rb") as f:
f.seek(0, 2)
size = f.tell()
f.seek(0, 0)
if size < 8:
raise Exception('File %s is short (< 8 bytes)' % path)
bits = ReadMagicAndReturnBitness(f, path)
size -= 8
s = struct.unpack_from(TypeCodeForStruct(bits) * (size * 8 / bits), f.read(size))
print >>sys.stderr, "%s: read %d %d-bit PCs from %s" % (prog_name, size * 8 / bits, bits, path)
return s
def Merge(files):
s = set()
for f in files:
s = s.union(set(ReadOneFile(f)))
print >> sys.stderr, "%s: %d files merged; %d PCs total" % \
(prog_name, len(files), len(s))
return sorted(s)
def PrintFiles(files):
if len(files) > 1:
s = Merge(files)
else: # If there is just on file, print the PCs in order.
s = ReadOneFile(files[0])
print >> sys.stderr, "%s: 1 file merged; %d PCs total" % \
(prog_name, len(s))
for i in s:
print "0x%x" % i
def MergeAndPrint(files):
if sys.stdout.isatty():
Usage()
s = Merge(files)
bits = 32
if max(s) > 0xFFFFFFFF:
bits = 64
array.array('I', MagicForBits(bits)).tofile(sys.stdout)
a = struct.pack(TypeCodeForStruct(bits) * len(s), *s)
sys.stdout.write(a)
def UnpackOneFile(path):
with open(path, mode="rb") as f:
print >> sys.stderr, "%s: unpacking %s" % (prog_name, path)
while True:
header = f.read(12)
if not header: return
if len(header) < 12:
break
pid, module_length, blob_size = struct.unpack('iII', header)
module = f.read(module_length)
blob = f.read(blob_size)
assert(len(module) == module_length)
assert(len(blob) == blob_size)
extracted_file = "%s.%d.sancov" % (module, pid)
print >> sys.stderr, "%s: extracting %s" % \
(prog_name, extracted_file)
# The packed file may contain multiple blobs for the same pid/module
# pair. Append to the end of the file instead of overwriting.
with open(extracted_file, 'ab') as f2:
f2.write(blob)
# fail
raise Exception('Error reading file %s' % path)
def Unpack(files):
for f in files:
UnpackOneFile(f)
def UnpackOneRawFile(path, map_path):
mem_map = []
with open(map_path, mode="rt") as f_map:
print >> sys.stderr, "%s: reading map %s" % (prog_name, map_path)
bits = int(f_map.readline())
if bits != 32 and bits != 64:
raise Exception('Wrong bits size in the map')
for line in f_map:
parts = line.rstrip().split()
mem_map.append((int(parts[0], 16),
int(parts[1], 16),
int(parts[2], 16),
' '.join(parts[3:])))
mem_map.sort(key=lambda m : m[0])
mem_map_keys = [m[0] for m in mem_map]
with open(path, mode="rb") as f:
print >> sys.stderr, "%s: unpacking %s" % (prog_name, path)
f.seek(0, 2)
size = f.tell()
f.seek(0, 0)
pcs = struct.unpack_from(TypeCodeForStruct(bits) * (size * 8 / bits), f.read(size))
mem_map_pcs = [[] for i in range(0, len(mem_map))]
for pc in pcs:
if pc == 0: continue
map_idx = bisect.bisect(mem_map_keys, pc) - 1
(start, end, base, module_path) = mem_map[map_idx]
assert pc >= start
if pc >= end:
print >> sys.stderr, "warning: %s: pc %x outside of any known mapping" % (prog_name, pc)
continue
mem_map_pcs[map_idx].append(pc - base)
for ((start, end, base, module_path), pc_list) in zip(mem_map, mem_map_pcs):
if len(pc_list) == 0: continue
assert path.endswith('.sancov.raw')
dst_path = module_path + '.' + os.path.basename(path)[:-4]
print >> sys.stderr, "%s: writing %d PCs to %s" % (prog_name, len(pc_list), dst_path)
sorted_pc_list = sorted(pc_list)
pc_buffer = struct.pack(TypeCodeForStruct(bits) * len(pc_list), *sorted_pc_list)
with open(dst_path, 'ab+') as f2:
array.array('I', MagicForBits(bits)).tofile(f2)
f2.seek(0, 2)
f2.write(pc_buffer)
def RawUnpack(files):
for f in files:
if not f.endswith('.sancov.raw'):
raise Exception('Unexpected raw file name %s' % f)
f_map = f[:-3] + 'map'
UnpackOneRawFile(f, f_map)
def GetInstrumentedPCs(binary):
# This looks scary, but all it does is extract all offsets where we call:
# - __sanitizer_cov() or __sanitizer_cov_with_check(),
# - with call or callq,
# - directly or via PLT.
cmd = "objdump -d %s | " \
"grep '^\s\+[0-9a-f]\+:.*\scall\(q\|\)\s\+[0-9a-f]\+ <__sanitizer_cov\(_with_check\|\)\(@plt\|\)>' | " \
"grep '^\s\+[0-9a-f]\+' -o" % binary
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
shell=True)
proc.stdin.close()
# The PCs we get from objdump are off by 4 bytes, as they point to the
# beginning of the callq instruction. Empirically this is true on x86 and
# x86_64.
return set(int(line.strip(), 16) + 4 for line in proc.stdout)
def PrintMissing(binary):
if not os.path.isfile(binary):
raise Exception('File not found: %s' % binary)
instrumented = GetInstrumentedPCs(binary)
print >> sys.stderr, "%s: found %d instrumented PCs in %s" % (prog_name,
len(instrumented),
binary)
covered = set(int(line, 16) for line in sys.stdin)
print >> sys.stderr, "%s: read %d PCs from stdin" % (prog_name, len(covered))
missing = instrumented - covered
print >> sys.stderr, "%s: %d PCs missing from coverage" % (prog_name, len(missing))
if (len(missing) > len(instrumented) - len(covered)):
print >> sys.stderr, \
"%s: WARNING: stdin contains PCs not found in binary" % prog_name
for pc in sorted(missing):
print "0x%x" % pc
if __name__ == '__main__':
prog_name = sys.argv[0]
if len(sys.argv) <= 2:
Usage();
if sys.argv[1] == "missing":
if len(sys.argv) != 3:
Usage()
PrintMissing(sys.argv[2])
exit(0)
file_list = []
for f in sys.argv[2:]:
file_list += glob.glob(f)
if not file_list:
Usage()
if sys.argv[1] == "print":
PrintFiles(file_list)
elif sys.argv[1] == "merge":
MergeAndPrint(file_list)
elif sys.argv[1] == "unpack":
Unpack(file_list)
elif sys.argv[1] == "rawunpack":
RawUnpack(file_list)
else:
Usage()
| 8,087 | 31.744939 | 112 | py |
LowFat | LowFat-master/llvm-4.0.0.src/projects/compiler-rt/lib/sanitizer_common/scripts/cpplint.py | #!/usr/bin/env python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Here are some issues that I've had people identify in my code during reviews,
# that I think are possible to flag automatically in a lint tool. If these were
# caught by lint, it would save time both for myself and that of my reviewers.
# Most likely, some of these are beyond the scope of the current lint framework,
# but I think it is valuable to retain these wish-list items even if they cannot
# be immediately implemented.
#
# Suggestions
# -----------
# - Check for no 'explicit' for multi-arg ctor
# - Check for boolean assign RHS in parens
# - Check for ctor initializer-list colon position and spacing
# - Check that if there's a ctor, there should be a dtor
# - Check accessors that return non-pointer member variables are
# declared const
# - Check accessors that return non-const pointer member vars are
# *not* declared const
# - Check for using public includes for testing
# - Check for spaces between brackets in one-line inline method
# - Check for no assert()
# - Check for spaces surrounding operators
# - Check for 0 in pointer context (should be NULL)
# - Check for 0 in char context (should be '\0')
# - Check for camel-case method name conventions for methods
# that are not simple inline getters and setters
# - Do not indent namespace contents
# - Avoid inlining non-trivial constructors in header files
# - Check for old-school (void) cast for call-sites of functions
# ignored return value
# - Check gUnit usage of anonymous namespace
# - Check for class declaration order (typedefs, consts, enums,
# ctor(s?), dtor, friend declarations, methods, member vars)
#
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Linted extensions are .cc, .cpp, and .h. Other file types will be ignored.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
root=subdir
The root directory used for deriving header guard CPP variable.
By default, the header guard CPP variable is calculated as the relative
path to the directory that contains .git, .hg, or .svn. When this flag
is specified, the relative path is calculated from the specified
directory. If the specified directory does not exist, this flag is
ignored.
Examples:
Assuing that src/.git exists, the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
# \ used for clearer layout -- pylint: disable-msg=C6013
_ERROR_CATEGORIES = [
'build/class',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/streams',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/rtti',
'runtime/sizeof',
'runtime/string',
'runtime/threadsafe_fn',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/labels',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo'
]
# The default state of the category filter. This is overrided by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = ['-build/include_alpha']
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# Headers that we consider STL headers.
_STL_HEADERS = frozenset([
'algobase.h', 'algorithm', 'alloc.h', 'bitset', 'deque', 'exception',
'function.h', 'functional', 'hash_map', 'hash_map.h', 'hash_set',
'hash_set.h', 'iterator', 'list', 'list.h', 'map', 'memory', 'new',
'pair.h', 'pthread_alloc', 'queue', 'set', 'set.h', 'sstream', 'stack',
'stl_alloc.h', 'stl_relops.h', 'type_traits.h',
'utility', 'vector', 'vector.h',
])
# Non-STL C++ system headers.
_CPP_HEADERS = frozenset([
'algo.h', 'builtinbuf.h', 'bvector.h', 'cassert', 'cctype',
'cerrno', 'cfloat', 'ciso646', 'climits', 'clocale', 'cmath',
'complex', 'complex.h', 'csetjmp', 'csignal', 'cstdarg', 'cstddef',
'cstdio', 'cstdlib', 'cstring', 'ctime', 'cwchar', 'cwctype',
'defalloc.h', 'deque.h', 'editbuf.h', 'exception', 'fstream',
'fstream.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip',
'iomanip.h', 'ios', 'iosfwd', 'iostream', 'iostream.h', 'istream',
'istream.h', 'iterator.h', 'limits', 'map.h', 'multimap.h', 'multiset.h',
'numeric', 'ostream', 'ostream.h', 'parsestream.h', 'pfstream.h',
'PlotFile.h', 'procbuf.h', 'pthread_alloc.h', 'rope', 'rope.h',
'ropeimpl.h', 'SFile.h', 'slist', 'slist.h', 'stack.h', 'stdexcept',
'stdiostream.h', 'streambuf', 'streambuf.h', 'stream.h', 'strfile.h',
'string', 'strstream', 'strstream.h', 'tempbuf.h', 'tree.h', 'typeinfo',
'valarray',
])
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives include C-style multi-line comments (http://go/nsiut )
# and multi-line strings (http://go/beujw ), but those have always been
# troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
_regexp_compile_cache = {}
# Finds occurrences of NOLINT or NOLINT(...).
_RE_SUPPRESSION = re.compile(r'\bNOLINT\b(\([^)]*\))?')
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
# The root directory used for deriving header guard CPP variable.
# This is set by --root flag.
_root = None
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
# FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*).
matched = _RE_SUPPRESSION.search(raw_line)
if matched:
category = matched.group(1)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(linenum)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(linenum)
else:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ResetNolintSuppressions():
"Resets the set of NOLINT suppressions to empty."
_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment.
"""
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(dict):
"""Tracks line numbers for includes, and the order in which includes appear.
As a dict, an _IncludeState object serves as a mapping between include
filename and line number on which that file was included.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
dict.__init__(self)
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
header_path: Header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
canonical_header = self.CanonicalizeAlphabeticalOrder(header_path)
if self._last_header > canonical_header:
return False
self._last_header = canonical_header
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in self.errors_by_category.iteritems():
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo:
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
# Matches standard C++ escape esequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Matches strings. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
# Matches characters. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
# Matches multi-line C++ comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r"""(\s*/\*.*\*/\s*$|
/\*.*\*/\s+|
\s+/\*.*\*/(?=\W)|
/\*.*\*/)""", re.VERBOSE)
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 3 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
3) raw_lines member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
for linenum in range(len(lines)):
self.lines.append(CleanseComments(lines[linenum]))
elided = self._CollapseStrings(lines[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if not _RE_PATTERN_INCLUDE.match(elided):
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
return elided
def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
"""Find the position just after the matching endchar.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
depth: nesting level at startpos.
startchar: expression opening character.
endchar: expression closing character.
Returns:
Index just after endchar.
"""
for i in xrange(startpos, len(line)):
if line[i] == startchar:
depth += 1
elif line[i] == endchar:
depth -= 1
if depth == 0:
return i + 1
return -1
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[', finds the
linenum/pos that correspond to the closing of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
startchar = line[pos]
if startchar not in '({[':
return (line, clean_lines.NumLines(), -1)
if startchar == '(': endchar = ')'
if startchar == '[': endchar = ']'
if startchar == '{': endchar = '}'
# Check first line
end_pos = FindEndOfExpressionInLine(line, pos, 0, startchar, endchar)
if end_pos > -1:
return (line, linenum, end_pos)
tail = line[pos:]
num_open = tail.count(startchar) - tail.count(endchar)
while linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
delta = line.count(startchar) - line.count(endchar)
if num_open + delta <= 0:
return (line, linenum,
FindEndOfExpressionInLine(line, 0, num_open, startchar, endchar))
num_open += delta
# Did not find endchar before end of file, give up
return (line, clean_lines.NumLines(), -1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = None
ifndef_linenum = 0
define = None
endif = None
endif_linenum = 0
for linenum, line in enumerate(lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
if not define:
error(filename, 0, 'build/header_guard', 5,
'No #define header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
if define != ifndef:
error(filename, 0, 'build/header_guard', 5,
'#ifndef and #define don\'t match, suggested CPP variable is: %s' %
cppvar)
return
if endif != ('#endif // %s' % cppvar):
error_level = 0
if endif != ('#endif // %s' % (cppvar + '_')):
error_level = 5
ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
error)
error(filename, endif_linenum, 'build/header_guard', error_level,
'#endif line should be "#endif // %s"' % cppvar)
def CheckForUnicodeReplacementCharacters(filename, lines, error):
"""Logs an error for each line containing Unicode replacement characters.
These indicate that either the file contained invalid UTF-8 (likely)
or Unicode replacement characters (which it shouldn't). Note that
it's possible for this to throw off line numbering if the invalid
UTF-8 occurred adjacent to a newline.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. They\'re '
'ugly and unnecessary, and you should use concatenation instead".')
threading_list = (
('asctime(', 'asctime_r('),
('ctime(', 'ctime_r('),
('getgrgid(', 'getgrgid_r('),
('getgrnam(', 'getgrnam_r('),
('getlogin(', 'getlogin_r('),
('getpwnam(', 'getpwnam_r('),
('getpwuid(', 'getpwuid_r('),
('gmtime(', 'gmtime_r('),
('localtime(', 'localtime_r('),
('rand(', 'rand_r('),
('readdir(', 'readdir_r('),
('strtok(', 'strtok_r('),
('ttyname(', 'ttyname_r('),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_function, multithread_safe_function in threading_list:
ix = line.find(single_thread_function)
# Comparisons made explicit for clarity -- pylint: disable-msg=C6403
if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
line[ix - 1] not in ('_', '.', '>'))):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_function +
'...) instead of ' + single_thread_function +
'...) for improved thread safety.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, seen_open_brace):
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, False)
self.name = name
self.starting_linenum = linenum
self.is_derived = False
if class_or_struct == 'struct':
self.access = 'public'
else:
self.access = 'private'
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, False)
self.name = name or ''
self.starting_linenum = linenum
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean. Example: http://go/nxpiz
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace. Example: http://go/ldkdc, http://cl/23548205
if self.name:
# Named namespace
if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class _NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
(see http://go/qwddn for original example)
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Update pp_stack first
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
#
# Templates with class arguments may confuse the parser, for example:
# template <class T
# class Comparator = less<T>,
# class Vector = vector<T> >
# class HeapQueue {
#
# Because this parser has no nesting state about templates, by the
# time it saw "class Comparator", it may think that it's a new class.
# Nested templates have a similar problem:
# template <
# typename ExportedType,
# typename TupleType,
# template <typename, typename> class ImplTemplate>
#
# To avoid these cases, we ignore classes that are followed by '=' or '>'
class_decl_match = Match(
r'\s*(template\s*<[\w\s<>,:]*>\s*)?'
'(class|struct)\s+([A-Z_]+\s+)*(\w+(?:::\w+)*)'
'(([^=>]|<[^<>]*>)*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
self.stack.append(_ClassInfo(
class_decl_match.group(4), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(5)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
access_match = Match(r'\s*(public|private|protected)\s*:', line)
if access_match:
self.stack[-1].access = access_match.group(1)
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
else:
self.stack.append(_BlockInfo(True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckClassFinished(self, filename, error):
"""Checks that all classes have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
args = Match(r'\s+(?:inline\s+)?%s\s*\(([^,()]+)\)'
% re.escape(base_classname),
line)
if (args and
args.group(1) != 'void' and
not Match(r'(const\s+)?%s\s*(?:<\w+>\s*)?&' % re.escape(base_classname),
args.group(1).strip())):
error(filename, linenum, 'runtime/explicit', 5,
'Single-argument constructors should be marked explicit.')
def CheckSpacingForFunctionCall(filename, line, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
line: The text of the line to check.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|delete)\b', fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef', fncall) and
not Search(r'\w\s+\((\w+::)?\*\w+\)\(', fncall)):
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
raw = clean_lines.raw_lines
raw_line = raw[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(comment, filename, linenum, error):
"""Checks for common mistakes in TODO comments.
Args:
comment: The text of the comment from the line in question.
filename: The name of the current file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable-msg=C6403
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
"""Checks for improper use of DISALLOW* macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
r'DISALLOW_EVIL_CONSTRUCTORS|'
r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
if not matched:
return
if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
if nesting_state.stack[-1].access != 'private':
error(filename, linenum, 'readability/constructors', 3,
'%s must be in the private: section' % matched.group(1))
else:
# Found DISALLOW* macro outside a class declaration, or perhaps it
# was used inside a function when it should have been part of the
# class declaration. We could issue a warning here, but it
# probably resulted in a compiler error already.
pass
def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix):
"""Find the corresponding > to close a template.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: Current line number.
init_suffix: Remainder of the current line after the initial <.
Returns:
True if a matching bracket exists.
"""
line = init_suffix
nesting_stack = ['<']
while True:
# Find the next operator that can tell us whether < is used as an
# opening bracket or as a less-than operator. We only want to
# warn on the latter case.
#
# We could also check all other operators and terminate the search
# early, e.g. if we got something like this "a<b+c", the "<" is
# most likely a less-than operator, but then we will get false
# positives for default arguments (e.g. http://go/prccd) and
# other template expressions (e.g. http://go/oxcjq).
match = Search(r'^[^<>(),;\[\]]*([<>(),;\[\]])(.*)$', line)
if match:
# Found an operator, update nesting stack
operator = match.group(1)
line = match.group(2)
if nesting_stack[-1] == '<':
# Expecting closing angle bracket
if operator in ('<', '(', '['):
nesting_stack.append(operator)
elif operator == '>':
nesting_stack.pop()
if not nesting_stack:
# Found matching angle bracket
return True
elif operator == ',':
# Got a comma after a bracket, this is most likely a template
# argument. We have not seen a closing angle bracket yet, but
# it's probably a few lines later if we look for it, so just
# return early here.
return True
else:
# Got some other operator.
return False
else:
# Expecting closing parenthesis or closing bracket
if operator in ('<', '(', '['):
nesting_stack.append(operator)
elif operator in (')', ']'):
# We don't bother checking for matching () or []. If we got
# something like (] or [), it would have been a syntax error.
nesting_stack.pop()
else:
# Scan the next line
linenum += 1
if linenum >= len(clean_lines.elided):
break
line = clean_lines.elided[linenum]
# Exhausted all remaining lines and still no matching angle bracket.
# Most likely the input was incomplete, otherwise we should have
# seen a semicolon and returned early.
return True
def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix):
"""Find the corresponding < that started a template.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: Current line number.
init_prefix: Part of the current line before the initial >.
Returns:
True if a matching bracket exists.
"""
line = init_prefix
nesting_stack = ['>']
while True:
# Find the previous operator
match = Search(r'^(.*)([<>(),;\[\]])[^<>(),;\[\]]*$', line)
if match:
# Found an operator, update nesting stack
operator = match.group(2)
line = match.group(1)
if nesting_stack[-1] == '>':
# Expecting opening angle bracket
if operator in ('>', ')', ']'):
nesting_stack.append(operator)
elif operator == '<':
nesting_stack.pop()
if not nesting_stack:
# Found matching angle bracket
return True
elif operator == ',':
# Got a comma before a bracket, this is most likely a
# template argument. The opening angle bracket is probably
# there if we look for it, so just return early here.
return True
else:
# Got some other operator.
return False
else:
# Expecting opening parenthesis or opening bracket
if operator in ('>', ')', ']'):
nesting_stack.append(operator)
elif operator in ('(', '['):
nesting_stack.pop()
else:
# Scan the previous line
linenum -= 1
if linenum < 0:
break
line = clean_lines.elided[linenum]
# Exhausted all earlier lines and still no matching angle bracket.
return False
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
raw = clean_lines.raw_lines
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
if IsBlankLine(line) and not nesting_state.InNamespaceBody():
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Blank line at the start of a code block. Is this needed?')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Blank line at the end of a code block. Is this needed?')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, we complain if there's a comment too near the text
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable-msg=C6403
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not Match(r'^\s*{ //', line) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# There should always be a space between the // and the comment
commentend = commentpos + 2
if commentend < len(line) and not line[commentend] == ' ':
# but some lines are exceptions -- e.g. if they're big
# comment delimiters like:
# //----------------------------------------------------------
# or are an empty C++ style Doxygen comment, like:
# ///
# or they begin with multiple slashes followed by a space:
# //////// Header comment
match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or
Search(r'^/$', line[commentend:]) or
Search(r'^/+ ', line[commentend:]))
if not match:
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
CheckComment(line[commentpos:], filename, linenum, error)
line = clean_lines.elided[linenum] # get rid of comments and strings
# Don't try to do spacing checks for operator methods
line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
match = Search(r'(\S)(?:L|UL|ULL|l|ul|ull)?<<(\S)', line)
if match and not (match.group(1).isdigit() and match.group(2).isdigit()):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
elif not Match(r'#.*include', line):
# Avoid false positives on ->
reduced_line = line.replace('->', '')
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Search(r'[^\s<]<([^\s=<].*)', reduced_line)
if (match and
not FindNextMatchingAngleBracket(clean_lines, linenum, match.group(1))):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Search(r'^(.*[^\s>])>[^\s=>]', reduced_line)
if (match and
not FindPreviousMatchingAngleBracket(clean_lines, linenum,
match.group(1))):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
# A pet peeve of mine: no spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if not len(match.group(2)) in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
# You should always have a space after a comma (either as fn arg or operator)
if Search(r',[^\s]', line):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
# Next we will look for issues with function calls.
CheckSpacingForFunctionCall(filename, line, linenum, error)
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces. And since you should never have braces at the beginning of a line,
# this is an easy test.
if Search(r'[^ ({]{', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'new char * []'.
if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search('for *\(.*[^:]:[^: ]', line) or
Search('for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone
# is using braces in a block to explicitly create a new scope,
# which is commonly used to control the lifetime of
# stack-allocated variables. We don't detect this perfectly: we
# just don't complain if the last non-whitespace character on the
# previous non-blank line is ';', ':', '{', or '}', or if the previous
# line starts a preprocessor block.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[;:}{]\s*$', prevline) and
not Match(r'\s*#', prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\s*', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
if Search(r'}\s*else if([^{]*)$', line): # could be multi-line if
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
if endline[endpos:].find('{') == -1: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
else: # common case: else not followed by a multi-line if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Braces shouldn't be followed by a ; unless they're defining a struct
# or initializing an array.
# We can't tell in general, but we can for some common cases.
prevlinenum = linenum
while True:
(prevline, prevlinenum) = GetPreviousNonBlankLine(clean_lines, prevlinenum)
if Match(r'\s+{.*}\s*;', line) and not prevline.count(';'):
line = prevline + line
else:
break
if (Search(r'{.*}\s*;', line) and
line.count('{') == line.count('}') and
not Search(r'struct|class|enum|\s*=\s*{', line)):
error(filename, linenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyLoopBody(filename, clean_lines, linenum, error):
"""Loop for empty loop body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
line = clean_lines.elided[linenum]
if Match(r'\s*(for|while)\s*\(', line):
# Find the end of the conditional expression
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
def ReplaceableCheck(operator, macro, line):
"""Determine whether a basic CHECK can be replaced with a more specific one.
For example suggest using CHECK_EQ instead of CHECK(a == b) and
similarly for CHECK_GE, CHECK_GT, CHECK_LE, CHECK_LT, CHECK_NE.
Args:
operator: The C++ operator used in the CHECK.
macro: The CHECK or EXPECT macro being called.
line: The current source line.
Returns:
True if the CHECK can be replaced with a more specific one.
"""
# This matches decimal and hex integers, strings, and chars (in that order).
match_constant = r'([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')'
# Expression to match two sides of the operator with something that
# looks like a literal, since CHECK(x == iterator) won't compile.
# This means we can't catch all the cases where a more specific
# CHECK is possible, but it's less annoying than dealing with
# extraneous warnings.
match_this = (r'\s*' + macro + r'\((\s*' +
match_constant + r'\s*' + operator + r'[^<>].*|'
r'.*[^<>]' + operator + r'\s*' + match_constant +
r'\s*\))')
# Don't complain about CHECK(x == NULL) or similar because
# CHECK_EQ(x, NULL) won't compile (requires a cast).
# Also, don't complain about more complex boolean expressions
# involving && or || such as CHECK(a == b || c == d).
return Match(match_this, line) and not Search(r'NULL|&&|\|\|', line)
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
raw_lines = clean_lines.raw_lines
current_macro = ''
for macro in _CHECK_MACROS:
if raw_lines[linenum].find(macro) >= 0:
current_macro = macro
break
if not current_macro:
# Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
return
line = clean_lines.elided[linenum] # get rid of comments and strings
# Encourage replacing plain CHECKs with CHECK_EQ/CHECK_NE/etc.
for operator in ['==', '!=', '>=', '>', '<=', '<']:
if ReplaceableCheck(operator, current_macro, line):
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[current_macro][operator],
current_macro, operator))
break
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
raw_lines = clean_lines.raw_lines
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for labels
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(r'\s*\w+\s*:\s*$', cleansed_line)):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Labels should always be indented at least one space.
elif not initial_spaces and line[:2] != '//' and Search(r'[^:]:\s*$',
line):
error(filename, linenum, 'whitespace/labels', 4,
'Labels should always be indented at least one space. '
'If this is a member-initializer list in a constructor or '
'the base class list in a class definition, the colon should '
'be on the following line.')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
if line_width > 100:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than 100 characters')
elif line_width > 80:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= 80 characters long')
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckEmptyLoopBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_stl_h = include in _STL_HEADERS
is_cpp_h = is_stl_h or include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line):
error(filename, linenum, 'build/include', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
if include in include_state:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, include_state[include]))
else:
include_state[include] = linenum
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
if not include_state.IsInAlphabeticalOrder(include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
# Look for any of the stream classes that are part of standard C++.
match = _RE_PATTERN_INCLUDE.match(line)
if match:
include = match.group(2)
if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
# Many unit tests use cout, so we exempt them.
if not _IsTestFilename(filename):
error(filename, linenum, 'readability/streams', 3,
'Streams are highly discouraged.')
def _GetTextInside(text, start_pattern):
"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(sugawarayu): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(matching_punctuation.itervalues())
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
def CheckLanguage(filename, clean_lines, linenum, file_extension, include_state,
error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Create an extended_line, which is the concatenation of the current and
# next lines, for more effective checking of code that may span more than one
# line.
if linenum + 1 < clean_lines.NumLines():
extended_line = line + clean_lines.elided[linenum + 1]
else:
extended_line = line
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# TODO(unknown): figure out if they're using default arguments in fn proto.
# Check for non-const references in functions. This is tricky because &
# is also used to take the address of something. We allow <> for templates,
# (ignoring whatever is between the braces) and : for classes.
# These are complicated re's. They try to capture the following:
# paren (for fn-prototype start), typename, &, varname. For the const
# version, we're willing for const to be before typename or after
# Don't check the implementation on same line.
fnline = line.split('{', 1)[0]
if (len(re.findall(r'\([^()]*\b(?:[\w:]|<[^()]*>)+(\s?&|&\s?)\w+', fnline)) >
len(re.findall(r'\([^()]*\bconst\s+(?:typename\s+)?(?:struct\s+)?'
r'(?:[\w:]|<[^()]*>)+(\s?&|&\s?)\w+', fnline)) +
len(re.findall(r'\([^()]*\b(?:[\w:]|<[^()]*>)+\s+const(\s?&|&\s?)[\w]+',
fnline))):
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". We also filter
# out for loops, which lint otherwise mistakenly thinks are functions.
if not Search(
r'(for|swap|Swap|operator[<>][<>])\s*\(\s*'
r'(?:(?:typename\s*)?[\w:]|<.*>)+\s*&',
fnline):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer.')
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there
r'(int|float|double|bool|char|int32|uint32|int64|uint64)\([^)]', line)
if match:
# gMock methods are defined using some variant of MOCK_METHODx(name, type)
# where type may be float(), int(string), etc. Without context they are
# virtually indistinguishable from int(x) casts. Likewise, gMock's
# MockCallback takes a template parameter of the form return_type(arg_type),
# which looks much like the cast we're trying to detect.
if (match.group(1) is None and # If new operator, then this isn't a cast
not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
Match(r'^\s*MockCallback<.*>', line))):
# Try a bit harder to catch gmock lines: the only place where
# something looks like an old-style cast is where we declare the
# return type of the mocked method, and the only time when we
# are missing context is if MOCK_METHOD was split across
# multiple lines (for example http://go/hrfhr ), so we only need
# to check the previous line for MOCK_METHOD.
if (linenum == 0 or
not Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(\S+,\s*$',
clean_lines.elided[linenum - 1])):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
match.group(2))
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'const_cast', r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
if Search(
r'(&\([^)]+\)[\w(])|(&(static|dynamic|reinterpret)_cast\b)', line):
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Make sure it's not a function.
# Function template specialization looks like: "string foo<Type>(...".
# Class template definitions look like: "string Foo<Type>::Method(...".
if match and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)',
match.group(3)):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
# Check that we're not using RTTI outside of testing code.
if Search(r'\bdynamic_cast<', line) and not _IsTestFilename(filename):
error(filename, linenum, 'runtime/rtti', 5,
'Do not use dynamic_cast<>. If you need to cast within a class '
"hierarchy, use static_cast<> to upcast. Google doesn't support "
'RTTI.')
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\b', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\b', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
if Search(r'\bsscanf\b', line):
error(filename, linenum, 'runtime/printf', 1,
'sscanf can be ok, but is slow and can overflow buffers.')
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(sugawarayu): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or
# DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing
# in the class declaration.
match = Match(
(r'\s*'
r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
r'\(.*\);$'),
line)
if match and linenum + 1 < clean_lines.NumLines():
next_line = clean_lines.elided[linenum + 1]
# We allow some, but not all, declarations of variables to be present
# in the statement that defines the class. The [\w\*,\s]* fragment of
# the regular expression below allows users to declare instances of
# the class or pointers to instances, but not less common types such
# as function pointers or arrays. It's a tradeoff between allowing
# reasonable code and avoiding trying to parse more C++ using regexps.
if not Search(r'^\s*}[\w\*,\s]*;', next_line):
error(filename, linenum, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern,
error):
"""Checks for a C-style cast by looking for the pattern.
This also handles sizeof(type) warnings, due to similarity of content.
Args:
filename: The name of the current file.
linenum: The number of the line to check.
line: The line of code to check.
raw_line: The raw line of code to check, with comments.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
match = Search(pattern, line)
if not match:
return False
# e.g., sizeof(int)
sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1])
if sizeof_match:
error(filename, linenum, 'runtime/sizeof', 1,
'Using sizeof(type). Use sizeof(varname) instead if possible')
return True
# operator++(int) and operator--(int)
if (line[0:match.start(1) - 1].endswith(' operator++') or
line[0:match.start(1) - 1].endswith(' operator--')):
return False
remainder = line[match.end(0):]
# The close paren is for function pointers as arguments to a function.
# eg, void foo(void (*bar)(int));
# The semicolon check is a more basic function check; also possibly a
# function pointer typedef.
# eg, void foo(int); or void foo(int) const;
# The equals check is for function pointer assignment.
# eg, void *(*foo)(int) = ...
# The > is for MockCallback<...> ...
#
# Right now, this will only catch cases where there's a single argument, and
# it's unnamed. It should probably be expanded to check for multiple
# arguments with some unnamed.
function_match = Match(r'\s*(\)|=|(const)?\s*(;|\{|throw\(\)|>))', remainder)
if function_match:
if (not function_match.group(3) or
function_match.group(3) == ';' or
('MockCallback<' not in raw_line and
'/*' not in raw_line)):
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return True
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the .cc file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cc.endswith('.cc'):
return (False, '')
filename_cc = filename_cc[:-len('.cc')]
if filename_cc.endswith('_unittest'):
filename_cc = filename_cc[:-len('_unittest')]
elif filename_cc.endswith('_test'):
filename_cc = filename_cc[:-len('_test')]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_state, io=codecs):
"""Fill up the include_state with new includes found from the file.
Args:
filename: the name of the header to read.
include_state: an _IncludeState instance in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
# The value formatting is cute, but not really used right now.
# What matters here is that the key is in include_state.
include_state.setdefault(include, '%s:%d' % (filename, linenum))
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's copy the include_state so it is only messed up within this function.
include_state = include_state.copy()
# Did we find the header for this file (if any) and successfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_state is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = include_state.keys()
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_state, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cc') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_state:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++0x mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.raw_lines
line = raw[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
if nesting_state.stack and nesting_state.stack[-1].inline_asm != _NO_ASM:
return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = _NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
if file_extension == 'h':
CheckForHeaderGuard(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
nesting_state.CheckClassFinished(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForUnicodeReplacementCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below. If it is not expected to be present (i.e. os.linesep !=
# '\r\n' as in Windows), a warning is issued below if this file
# is processed.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
carriage_return_found = False
# Remove trailing '\r'.
for linenum in range(len(lines)):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
carriage_return_found = True
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if (filename != '-' and file_extension != 'cc' and file_extension != 'h'
and file_extension != 'cpp'):
sys.stderr.write('Ignoring %s; not a .cc or .h file\n' % filename)
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
if carriage_return_found and os.linesep != '\r\n':
# Use 0 for linenum since outputting only one error for potentially
# several lines.
Error(filename, 0, 'whitespace/newline', 1,
'One or more unexpected \\r (^M) found;'
'better to use only a \\n')
sys.stderr.write('Done processing %s\n' % filename)
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if not val in ('emacs', 'vs7', 'eclipse'):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
| 158,908 | 38.480497 | 86 | py |
LowFat | LowFat-master/llvm-4.0.0.src/projects/compiler-rt/lib/sanitizer_common/scripts/litlint.py | #!/usr/bin/env python
#
# litlint
#
# Ensure RUN commands in lit tests are free of common errors.
#
# If any errors are detected, litlint returns a nonzero exit code.
#
import optparse
import re
import sys
# Compile regex once for all files
runRegex = re.compile(r'(?<!-o)(?<!%run) %t\s')
def LintLine(s):
""" Validate a line
Args:
s: str, the line to validate
Returns:
Returns an error message and a 1-based column number if an error was
detected, otherwise (None, None).
"""
# Check that RUN command can be executed with an emulator
m = runRegex.search(s)
if m:
start, end = m.span()
return ('missing %run before %t', start + 2)
# No errors
return (None, None)
def LintFile(p):
""" Check that each RUN command can be executed with an emulator
Args:
p: str, valid path to a file
Returns:
The number of errors detected.
"""
errs = 0
with open(p, 'r') as f:
for i, s in enumerate(f.readlines(), start=1):
msg, col = LintLine(s)
if msg != None:
errs += 1
errorMsg = 'litlint: {}:{}:{}: error: {}.\n{}{}\n'
arrow = (col-1) * ' ' + '^'
sys.stderr.write(errorMsg.format(p, i, col, msg, s, arrow))
return errs
if __name__ == "__main__":
# Parse args
parser = optparse.OptionParser()
parser.add_option('--filter') # ignored
(options, filenames) = parser.parse_args()
# Lint each file
errs = 0
for p in filenames:
errs += LintFile(p)
# If errors, return nonzero
if errs > 0:
sys.exit(1)
| 1,529 | 19.958904 | 72 | py |
LowFat | LowFat-master/llvm-4.0.0.src/projects/compiler-rt/lib/sanitizer_common/scripts/gen_dynamic_list.py | #!/usr/bin/env python
#===- lib/sanitizer_common/scripts/gen_dynamic_list.py ---------------------===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
#
# Generates the list of functions that should be exported from sanitizer
# runtimes. The output format is recognized by --dynamic-list linker option.
# Usage:
# gen_dynamic_list.py libclang_rt.*san*.a [ files ... ]
#
#===------------------------------------------------------------------------===#
import argparse
import os
import re
import subprocess
import sys
import platform
new_delete = set([
'_Znam', '_ZnamRKSt9nothrow_t', # operator new[](unsigned long)
'_Znwm', '_ZnwmRKSt9nothrow_t', # operator new(unsigned long)
'_Znaj', '_ZnajRKSt9nothrow_t', # operator new[](unsigned int)
'_Znwj', '_ZnwjRKSt9nothrow_t', # operator new(unsigned int)
'_ZdaPv', '_ZdaPvRKSt9nothrow_t', # operator delete[](void *)
'_ZdlPv', '_ZdlPvRKSt9nothrow_t', # operator delete(void *)
'_ZdaPvm', # operator delete[](void*, unsigned long)
'_ZdlPvm', # operator delete(void*, unsigned long)
'_ZdaPvj', # operator delete[](void*, unsigned int)
'_ZdlPvj', # operator delete(void*, unsigned int)
])
versioned_functions = set(['memcpy', 'pthread_attr_getaffinity_np',
'pthread_cond_broadcast',
'pthread_cond_destroy', 'pthread_cond_init',
'pthread_cond_signal', 'pthread_cond_timedwait',
'pthread_cond_wait', 'realpath',
'sched_getaffinity'])
def get_global_functions(library):
functions = []
nm = os.environ.get('NM', 'nm')
nm_proc = subprocess.Popen([nm, library], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
nm_out = nm_proc.communicate()[0].decode().split('\n')
if nm_proc.returncode != 0:
raise subprocess.CalledProcessError(nm_proc.returncode, nm)
func_symbols = ['T', 'W']
# On PowerPC, nm prints function descriptors from .data section.
if platform.uname()[4] in ["powerpc", "ppc64"]:
func_symbols += ['D']
for line in nm_out:
cols = line.split(' ')
if len(cols) == 3 and cols[1] in func_symbols :
functions.append(cols[2])
return functions
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--version-list', action='store_true')
parser.add_argument('--extra', default=[], action='append')
parser.add_argument('libraries', default=[], nargs='+')
args = parser.parse_args()
result = []
all_functions = []
for library in args.libraries:
all_functions.extend(get_global_functions(library))
function_set = set(all_functions)
for func in all_functions:
# Export new/delete operators.
if func in new_delete:
result.append(func)
continue
# Export interceptors.
match = re.match('__interceptor_(.*)', func)
if match:
result.append(func)
# We have to avoid exporting the interceptors for versioned library
# functions due to gold internal error.
orig_name = match.group(1)
if orig_name in function_set and (args.version_list or orig_name not in versioned_functions):
result.append(orig_name)
continue
# Export sanitizer interface functions.
if re.match('__sanitizer_(.*)', func):
result.append(func)
# Additional exported functions from files.
for fname in args.extra:
f = open(fname, 'r')
for line in f:
result.append(line.rstrip())
# Print the resulting list in the format recognized by ld.
print('{')
if args.version_list:
print('global:')
result.sort()
for f in result:
print(u' %s;' % f)
if args.version_list:
print('local:')
print(' *;')
print('};')
if __name__ == '__main__':
main(sys.argv)
| 4,250 | 36.619469 | 99 | py |
LowFat | LowFat-master/llvm-4.0.0.src/projects/compiler-rt/lib/dfsan/scripts/build-libc-list.py | #!/usr/bin/env python
#===- lib/dfsan/scripts/build-libc-list.py ---------------------------------===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
# The purpose of this script is to identify every function symbol in a set of
# libraries (in this case, libc and libgcc) so that they can be marked as
# uninstrumented, thus allowing the instrumentation pass to treat calls to those
# functions correctly.
import os
import subprocess
import sys
from optparse import OptionParser
def defined_function_list(object):
functions = []
readelf_proc = subprocess.Popen(['readelf', '-s', '-W', object],
stdout=subprocess.PIPE)
readelf = readelf_proc.communicate()[0].split('\n')
if readelf_proc.returncode != 0:
raise subprocess.CalledProcessError(readelf_proc.returncode, 'readelf')
for line in readelf:
if (line[31:35] == 'FUNC' or line[31:36] == 'IFUNC') and \
line[39:44] != 'LOCAL' and \
line[55:58] != 'UND':
function_name = line[59:].split('@')[0]
functions.append(function_name)
return functions
p = OptionParser()
p.add_option('--libc-dso-path', metavar='PATH',
help='path to libc DSO directory',
default='/lib/x86_64-linux-gnu')
p.add_option('--libc-archive-path', metavar='PATH',
help='path to libc archive directory',
default='/usr/lib/x86_64-linux-gnu')
p.add_option('--libgcc-dso-path', metavar='PATH',
help='path to libgcc DSO directory',
default='/lib/x86_64-linux-gnu')
p.add_option('--libgcc-archive-path', metavar='PATH',
help='path to libgcc archive directory',
default='/usr/lib/gcc/x86_64-linux-gnu/4.6')
p.add_option('--with-libstdcxx', action='store_true',
dest='with_libstdcxx',
help='include libstdc++ in the list (inadvisable)')
p.add_option('--libstdcxx-dso-path', metavar='PATH',
help='path to libstdc++ DSO directory',
default='/usr/lib/x86_64-linux-gnu')
(options, args) = p.parse_args()
libs = [os.path.join(options.libc_dso_path, name) for name in
['ld-linux-x86-64.so.2',
'libanl.so.1',
'libBrokenLocale.so.1',
'libcidn.so.1',
'libcrypt.so.1',
'libc.so.6',
'libdl.so.2',
'libm.so.6',
'libnsl.so.1',
'libpthread.so.0',
'libresolv.so.2',
'librt.so.1',
'libthread_db.so.1',
'libutil.so.1']]
libs += [os.path.join(options.libc_archive_path, name) for name in
['libc_nonshared.a',
'libpthread_nonshared.a']]
libs.append(os.path.join(options.libgcc_dso_path, 'libgcc_s.so.1'))
libs.append(os.path.join(options.libgcc_archive_path, 'libgcc.a'))
if options.with_libstdcxx:
libs.append(os.path.join(options.libstdcxx_dso_path, 'libstdc++.so.6'))
functions = []
for l in libs:
if os.path.exists(l):
functions += defined_function_list(l)
else:
print >> sys.stderr, 'warning: library %s not found' % l
functions = list(set(functions))
functions.sort()
for f in functions:
print 'fun:%s=uninstrumented' % f
| 3,335 | 33.391753 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/test/tools/llvm-readobj/Inputs/relocs.py | #!/usr/bin/env python
# Generates ELF, COFF and MachO object files for different architectures
# containing all relocations:
#
# ELF: i386, x86_64, ppc64, aarch64, arm, mips, mips64el
# COFF: i386, x86_64
# MachO: i386, x86_64, arm
# (see end of file for triples)
#
# To simplify generation, object files are generated with just the proper
# number of relocations through repeated instructions. Afterwards, the
# relocations in the object file are patched to their proper value.
import operator
import shutil
import StringIO
import struct
import subprocess
import sys
class EnumType(type):
def __init__(self, name, bases = (), attributes = {}):
super(EnumType, self).__init__(name, bases, attributes)
type.__setattr__(self, '_map', {})
type.__setattr__(self, '_nameMap', {})
for symbol in attributes:
if symbol.startswith('__') or symbol.endswith('__'):
continue
value = attributes[symbol]
# MyEnum.symbol == value
type.__setattr__(self, symbol, value)
self._nameMap[symbol] = value
# The first symbol with the given value is authoritative.
if not (value in self._map):
# MyEnum[value] == symbol
self._map[value] = symbol
# Not supported (Enums are immutable).
def __setattr__(self, name, value):
raise NotSupportedException, self.__setattr__
# Not supported (Enums are immutable).
def __delattr__(self, name):
raise NotSupportedException, self.__delattr__
# Gets the enum symbol for the specified value.
def __getitem__(self, value):
symbol = self._map.get(value)
if symbol is None:
raise KeyError, value
return symbol
# Gets the enum symbol for the specified value or none.
def lookup(self, value):
symbol = self._map.get(value)
return symbol
# Not supported (Enums are immutable).
def __setitem__(self, value, symbol):
raise NotSupportedException, self.__setitem__
# Not supported (Enums are immutable).
def __delitem__(self, value):
raise NotSupportedException, self.__delitem__
def entries(self):
# sort by (value, name)
def makeKey(item):
return (item[1], item[0])
e = []
for pair in sorted(self._nameMap.iteritems(), key=makeKey):
e.append(pair)
return e
def __iter__(self):
for e in self.entries():
yield e
Enum = EnumType('Enum', (), {})
class BinaryReader:
def __init__(self, path):
self.file = open(path, "r+b", 0)
self.isLSB = None
self.is64Bit = None
self.isN64 = False
def tell(self):
return self.file.tell()
def seek(self, pos):
self.file.seek(pos)
def read(self, N):
data = self.file.read(N)
if len(data) != N:
raise ValueError, "Out of data!"
return data
def int8(self):
return ord(self.read(1))
def uint8(self):
return ord(self.read(1))
def int16(self):
return struct.unpack('><'[self.isLSB] + 'h', self.read(2))[0]
def uint16(self):
return struct.unpack('><'[self.isLSB] + 'H', self.read(2))[0]
def int32(self):
return struct.unpack('><'[self.isLSB] + 'i', self.read(4))[0]
def uint32(self):
return struct.unpack('><'[self.isLSB] + 'I', self.read(4))[0]
def int64(self):
return struct.unpack('><'[self.isLSB] + 'q', self.read(8))[0]
def uint64(self):
return struct.unpack('><'[self.isLSB] + 'Q', self.read(8))[0]
def writeUInt8(self, value):
self.file.write(struct.pack('><'[self.isLSB] + 'B', value))
def writeUInt16(self, value):
self.file.write(struct.pack('><'[self.isLSB] + 'H', value))
def writeUInt32(self, value):
self.file.write(struct.pack('><'[self.isLSB] + 'I', value))
def writeUInt64(self, value):
self.file.write(struct.pack('><'[self.isLSB] + 'Q', value))
def word(self):
if self.is64Bit:
return self.uint64()
else:
return self.uint32()
def writeWord(self, value):
if self.is64Bit:
self.writeUInt64(value)
else:
self.writeUInt32(value)
class StringTable:
def __init__(self, strings):
self.string_table = strings
def __getitem__(self, index):
end = self.string_table.index('\x00', index)
return self.string_table[index:end]
class ElfSection:
def __init__(self, f):
self.sh_name = f.uint32()
self.sh_type = f.uint32()
self.sh_flags = f.word()
self.sh_addr = f.word()
self.sh_offset = f.word()
self.sh_size = f.word()
self.sh_link = f.uint32()
self.sh_info = f.uint32()
self.sh_addralign = f.word()
self.sh_entsize = f.word()
def patch(self, f, relocs):
if self.sh_type == 4 or self.sh_type == 9: # SHT_RELA / SHT_REL
self.patchRelocs(f, relocs)
def patchRelocs(self, f, relocs):
entries = self.sh_size // self.sh_entsize
for index in range(entries):
f.seek(self.sh_offset + index * self.sh_entsize)
r_offset = f.word()
if index < len(relocs):
ri = index
else:
ri = 0
if f.isN64:
r_sym = f.uint32()
r_ssym = f.uint8()
f.seek(f.tell())
f.writeUInt8(relocs[ri][1])
f.writeUInt8(relocs[ri][1])
f.writeUInt8(relocs[ri][1])
else:
pos = f.tell()
r_info = f.word()
r_type = relocs[ri][1]
if f.is64Bit:
r_info = (r_info & 0xFFFFFFFF00000000) | (r_type & 0xFFFFFFFF)
else:
r_info = (r_info & 0xFF00) | (r_type & 0xFF)
print(" %s" % relocs[ri][0])
f.seek(pos)
f.writeWord(r_info)
class CoffSection:
def __init__(self, f):
self.raw_name = f.read(8)
self.virtual_size = f.uint32()
self.virtual_address = f.uint32()
self.raw_data_size = f.uint32()
self.pointer_to_raw_data = f.uint32()
self.pointer_to_relocations = f.uint32()
self.pointer_to_line_numbers = f.uint32()
self.relocation_count = f.uint16()
self.line_number_count = f.uint16()
self.characteristics = f.uint32()
def compileAsm(filename, triple, src):
cmd = ["llvm-mc", "-triple=" + triple, "-filetype=obj", "-o", filename]
print(" Running: " + " ".join(cmd))
p = subprocess.Popen(cmd, stdin=subprocess.PIPE)
p.communicate(input=src)
p.wait()
def compileIR(filename, triple, src):
cmd = ["llc", "-mtriple=" + triple, "-filetype=obj", "-o", filename]
print(" Running: " + " ".join(cmd))
p = subprocess.Popen(cmd, stdin=subprocess.PIPE)
p.communicate(input=src)
p.wait()
def craftElf(filename, triple, relocs, dummyReloc):
print("Crafting " + filename + " for " + triple)
if type(dummyReloc) is tuple:
preSrc, dummyReloc, relocsPerDummy = dummyReloc
src = preSrc + "\n"
for i in range((len(relocs) + relocsPerDummy - 1) / relocsPerDummy):
src += dummyReloc.format(i) + "\n"
compileIR(filename, triple, src)
else:
src = (dummyReloc + "\n") * len(relocs)
compileAsm(filename, triple, src)
print(" Patching relocations...")
patchElf(filename, relocs)
def patchElf(path, relocs):
f = BinaryReader(path)
magic = f.read(4)
assert magic == '\x7FELF'
fileclass = f.uint8()
if fileclass == 1:
f.is64Bit = False
elif fileclass == 2:
f.is64Bit = True
else:
raise ValueError, "Unknown file class %x" % fileclass
byteordering = f.uint8()
if byteordering == 1:
f.isLSB = True
elif byteordering == 2:
f.isLSB = False
else:
raise ValueError, "Unknown byte ordering %x" % byteordering
f.seek(18)
e_machine = f.uint16()
if e_machine == 0x0008 and f.is64Bit: # EM_MIPS && 64 bit
f.isN64 = True
e_version = f.uint32()
e_entry = f.word()
e_phoff = f.word()
e_shoff = f.word()
e_flags = f.uint32()
e_ehsize = f.uint16()
e_phentsize = f.uint16()
e_phnum = f.uint16()
e_shentsize = f.uint16()
e_shnum = f.uint16()
e_shstrndx = f.uint16()
sections = []
for index in range(e_shnum):
f.seek(e_shoff + index * e_shentsize)
s = ElfSection(f)
sections.append(s)
f.seek(sections[e_shstrndx].sh_offset)
shstrtab = StringTable(f.read(sections[e_shstrndx].sh_size))
strtab = None
for section in sections:
if shstrtab[section.sh_name] == ".strtab":
f.seek(section.sh_offset)
strtab = StringTable(f.read(section.sh_size))
break
for index in range(e_shnum):
sections[index].patch(f, relocs)
def craftCoff(filename, triple, relocs, dummyReloc):
print("Crafting " + filename + " for " + triple)
src = (dummyReloc + "\n") * len(relocs)
compileAsm(filename, triple, src)
print(" Patching relocations...")
patchCoff(filename, relocs)
def patchCoff(path, relocs):
f = BinaryReader(path)
f.isLSB = True
machine_type = f.uint16()
section_count = f.uint16()
# Zero out timestamp to prevent churn when regenerating COFF files.
f.writeUInt32(0)
f.seek(20)
sections = [CoffSection(f) for idx in range(section_count)]
section = sections[0]
f.seek(section.pointer_to_relocations)
for i in range(section.relocation_count):
virtual_addr = f.uint32()
symtab_idx = f.uint32()
print(" %s" % relocs[i][0])
f.writeUInt16(relocs[i][1])
def craftMacho(filename, triple, relocs, dummyReloc):
print("Crafting " + filename + " for " + triple)
if type(dummyReloc) is tuple:
srcType, preSrc, dummyReloc, relocsPerDummy = dummyReloc
src = preSrc + "\n"
for i in range((len(relocs) + relocsPerDummy - 1) / relocsPerDummy):
src += dummyReloc.format(i) + "\n"
if srcType == "asm":
compileAsm(filename, triple, src)
elif srcType == "ir":
compileIR(filename, triple, src)
else:
src = (dummyReloc + "\n") * len(relocs)
compileAsm(filename, triple, src)
print(" Patching relocations...")
patchMacho(filename, relocs)
def patchMacho(filename, relocs):
f = BinaryReader(filename)
magic = f.read(4)
if magic == '\xFE\xED\xFA\xCE':
f.isLSB, f.is64Bit = False, False
elif magic == '\xCE\xFA\xED\xFE':
f.isLSB, f.is64Bit = True, False
elif magic == '\xFE\xED\xFA\xCF':
f.isLSB, f.is64Bit = False, True
elif magic == '\xCF\xFA\xED\xFE':
f.isLSB, f.is64Bit = True, True
else:
raise ValueError,"Not a Mach-O object file: %r (bad magic)" % path
cputype = f.uint32()
cpusubtype = f.uint32()
filetype = f.uint32()
numLoadCommands = f.uint32()
loadCommandsSize = f.uint32()
flag = f.uint32()
if f.is64Bit:
reserved = f.uint32()
start = f.tell()
for i in range(numLoadCommands):
patchMachoLoadCommand(f, relocs)
if f.tell() - start != loadCommandsSize:
raise ValueError,"%s: warning: invalid load commands size: %r" % (
sys.argv[0], loadCommandsSize)
def patchMachoLoadCommand(f, relocs):
start = f.tell()
cmd = f.uint32()
cmdSize = f.uint32()
if cmd == 1:
patchMachoSegmentLoadCommand(f, relocs)
elif cmd == 25:
patchMachoSegmentLoadCommand(f, relocs)
else:
f.read(cmdSize - 8)
if f.tell() - start != cmdSize:
raise ValueError,"%s: warning: invalid load command size: %r" % (
sys.argv[0], cmdSize)
def patchMachoSegmentLoadCommand(f, relocs):
segment_name = f.read(16)
vm_addr = f.word()
vm_size = f.word()
file_offset = f.word()
file_size = f.word()
maxprot = f.uint32()
initprot = f.uint32()
numSections = f.uint32()
flags = f.uint32()
for i in range(numSections):
patchMachoSection(f, relocs)
def patchMachoSection(f, relocs):
section_name = f.read(16)
segment_name = f.read(16)
address = f.word()
size = f.word()
offset = f.uint32()
alignment = f.uint32()
relocOffset = f.uint32()
numReloc = f.uint32()
flags = f.uint32()
reserved1 = f.uint32()
reserved2 = f.uint32()
if f.is64Bit:
reserved3 = f.uint32()
prev_pos = f.tell()
f.seek(relocOffset)
for i in range(numReloc):
ri = i < len(relocs) and i or 0
print(" %s" % relocs[ri][0])
word1 = f.uint32()
pos = f.tell()
value = f.uint32()
f.seek(pos)
value = (value & 0x0FFFFFFF) | ((relocs[ri][1] & 0xF) << 28)
f.writeUInt32(value)
f.seek(prev_pos)
class Relocs_Elf_X86_64(Enum):
R_X86_64_NONE = 0
R_X86_64_64 = 1
R_X86_64_PC32 = 2
R_X86_64_GOT32 = 3
R_X86_64_PLT32 = 4
R_X86_64_COPY = 5
R_X86_64_GLOB_DAT = 6
R_X86_64_JUMP_SLOT = 7
R_X86_64_RELATIVE = 8
R_X86_64_GOTPCREL = 9
R_X86_64_32 = 10
R_X86_64_32S = 11
R_X86_64_16 = 12
R_X86_64_PC16 = 13
R_X86_64_8 = 14
R_X86_64_PC8 = 15
R_X86_64_DTPMOD64 = 16
R_X86_64_DTPOFF64 = 17
R_X86_64_TPOFF64 = 18
R_X86_64_TLSGD = 19
R_X86_64_TLSLD = 20
R_X86_64_DTPOFF32 = 21
R_X86_64_GOTTPOFF = 22
R_X86_64_TPOFF32 = 23
R_X86_64_PC64 = 24
R_X86_64_GOTOFF64 = 25
R_X86_64_GOTPC32 = 26
R_X86_64_GOT64 = 27
R_X86_64_GOTPCREL64 = 28
R_X86_64_GOTPC64 = 29
R_X86_64_GOTPLT64 = 30
R_X86_64_PLTOFF64 = 31
R_X86_64_SIZE32 = 32
R_X86_64_SIZE64 = 33
R_X86_64_GOTPC32_TLSDESC = 34
R_X86_64_TLSDESC_CALL = 35
R_X86_64_TLSDESC = 36
R_X86_64_IRELATIVE = 37
class Relocs_Elf_i386(Enum):
R_386_NONE = 0
R_386_32 = 1
R_386_PC32 = 2
R_386_GOT32 = 3
R_386_PLT32 = 4
R_386_COPY = 5
R_386_GLOB_DAT = 6
R_386_JUMP_SLOT = 7
R_386_RELATIVE = 8
R_386_GOTOFF = 9
R_386_GOTPC = 10
R_386_32PLT = 11
R_386_TLS_TPOFF = 14
R_386_TLS_IE = 15
R_386_TLS_GOTIE = 16
R_386_TLS_LE = 17
R_386_TLS_GD = 18
R_386_TLS_LDM = 19
R_386_16 = 20
R_386_PC16 = 21
R_386_8 = 22
R_386_PC8 = 23
R_386_TLS_GD_32 = 24
R_386_TLS_GD_PUSH = 25
R_386_TLS_GD_CALL = 26
R_386_TLS_GD_POP = 27
R_386_TLS_LDM_32 = 28
R_386_TLS_LDM_PUSH = 29
R_386_TLS_LDM_CALL = 30
R_386_TLS_LDM_POP = 31
R_386_TLS_LDO_32 = 32
R_386_TLS_IE_32 = 33
R_386_TLS_LE_32 = 34
R_386_TLS_DTPMOD32 = 35
R_386_TLS_DTPOFF32 = 36
R_386_TLS_TPOFF32 = 37
R_386_TLS_GOTDESC = 39
R_386_TLS_DESC_CALL = 40
R_386_TLS_DESC = 41
R_386_IRELATIVE = 42
R_386_NUM = 43
class Relocs_Elf_PPC32(Enum):
R_PPC_NONE = 0
R_PPC_ADDR32 = 1
R_PPC_ADDR24 = 2
R_PPC_ADDR16 = 3
R_PPC_ADDR16_LO = 4
R_PPC_ADDR16_HI = 5
R_PPC_ADDR16_HA = 6
R_PPC_ADDR14 = 7
R_PPC_ADDR14_BRTAKEN = 8
R_PPC_ADDR14_BRNTAKEN = 9
R_PPC_REL24 = 10
R_PPC_REL14 = 11
R_PPC_REL14_BRTAKEN = 12
R_PPC_REL14_BRNTAKEN = 13
R_PPC_REL32 = 26
R_PPC_TPREL16_LO = 70
R_PPC_TPREL16_HA = 72
class Relocs_Elf_PPC64(Enum):
R_PPC64_NONE = 0
R_PPC64_ADDR32 = 1
R_PPC64_ADDR16_LO = 4
R_PPC64_ADDR16_HI = 5
R_PPC64_ADDR14 = 7
R_PPC64_REL24 = 10
R_PPC64_REL32 = 26
R_PPC64_ADDR64 = 38
R_PPC64_ADDR16_HIGHER = 39
R_PPC64_ADDR16_HIGHEST = 41
R_PPC64_REL64 = 44
R_PPC64_TOC16 = 47
R_PPC64_TOC16_LO = 48
R_PPC64_TOC16_HA = 50
R_PPC64_TOC = 51
R_PPC64_ADDR16_DS = 56
R_PPC64_ADDR16_LO_DS = 57
R_PPC64_TOC16_DS = 63
R_PPC64_TOC16_LO_DS = 64
R_PPC64_TLS = 67
R_PPC64_TPREL16_LO = 70
R_PPC64_TPREL16_HA = 72
R_PPC64_DTPREL16_LO = 75
R_PPC64_DTPREL16_HA = 77
R_PPC64_GOT_TLSGD16_LO = 80
R_PPC64_GOT_TLSGD16_HA = 82
R_PPC64_GOT_TLSLD16_LO = 84
R_PPC64_GOT_TLSLD16_HA = 86
R_PPC64_GOT_TPREL16_LO_DS = 88
R_PPC64_GOT_TPREL16_HA = 90
R_PPC64_TLSGD = 107
R_PPC64_TLSLD = 108
class Relocs_Elf_AArch64(Enum):
R_AARCH64_NONE = 0
R_AARCH64_ABS64 = 0x101
R_AARCH64_ABS32 = 0x102
R_AARCH64_ABS16 = 0x103
R_AARCH64_PREL64 = 0x104
R_AARCH64_PREL32 = 0x105
R_AARCH64_PREL16 = 0x106
R_AARCH64_MOVW_UABS_G0 = 0x107
R_AARCH64_MOVW_UABS_G0_NC = 0x108
R_AARCH64_MOVW_UABS_G1 = 0x109
R_AARCH64_MOVW_UABS_G1_NC = 0x10a
R_AARCH64_MOVW_UABS_G2 = 0x10b
R_AARCH64_MOVW_UABS_G2_NC = 0x10c
R_AARCH64_MOVW_UABS_G3 = 0x10d
R_AARCH64_MOVW_SABS_G0 = 0x10e
R_AARCH64_MOVW_SABS_G1 = 0x10f
R_AARCH64_MOVW_SABS_G2 = 0x110
R_AARCH64_LD_PREL_LO19 = 0x111
R_AARCH64_ADR_PREL_LO21 = 0x112
R_AARCH64_ADR_PREL_PG_HI21 = 0x113
R_AARCH64_ADR_PREL_PG_HI21_NC = 0x114
R_AARCH64_ADD_ABS_LO12_NC = 0x115
R_AARCH64_LDST8_ABS_LO12_NC = 0x116
R_AARCH64_TSTBR14 = 0x117
R_AARCH64_CONDBR19 = 0x118
R_AARCH64_JUMP26 = 0x11a
R_AARCH64_CALL26 = 0x11b
R_AARCH64_LDST16_ABS_LO12_NC = 0x11c
R_AARCH64_LDST32_ABS_LO12_NC = 0x11d
R_AARCH64_LDST64_ABS_LO12_NC = 0x11e
R_AARCH64_MOVW_PREL_G0 = 0x11f
R_AARCH64_MOVW_PREL_G0_NC = 0x120
R_AARCH64_MOVW_PREL_G1 = 0x121
R_AARCH64_MOVW_PREL_G1_NC = 0x122
R_AARCH64_MOVW_PREL_G2 = 0x123
R_AARCH64_MOVW_PREL_G2_NC = 0x124
R_AARCH64_MOVW_PREL_G3 = 0x125
R_AARCH64_LDST128_ABS_LO12_NC = 0x12b
R_AARCH64_MOVW_GOTOFF_G0 = 0x12c
R_AARCH64_MOVW_GOTOFF_G0_NC = 0x12d
R_AARCH64_MOVW_GOTOFF_G1 = 0x12e
R_AARCH64_MOVW_GOTOFF_G1_NC = 0x12f
R_AARCH64_MOVW_GOTOFF_G2 = 0x130
R_AARCH64_MOVW_GOTOFF_G2_NC = 0x131
R_AARCH64_MOVW_GOTOFF_G3 = 0x132
R_AARCH64_GOTREL64 = 0x133
R_AARCH64_GOTREL32 = 0x134
R_AARCH64_GOT_LD_PREL19 = 0x135
R_AARCH64_LD64_GOTOFF_LO15 = 0x136
R_AARCH64_ADR_GOT_PAGE = 0x137
R_AARCH64_LD64_GOT_LO12_NC = 0x138
R_AARCH64_LD64_GOTPAGE_LO15 = 0x139
R_AARCH64_TLSGD_ADR_PREL21 = 0x200
R_AARCH64_TLSGD_ADR_PAGE21 = 0x201
R_AARCH64_TLSGD_ADD_LO12_NC = 0x202
R_AARCH64_TLSGD_MOVW_G1 = 0x203
R_AARCH64_TLSGD_MOVW_G0_NC = 0x204
R_AARCH64_TLSLD_ADR_PREL21 = 0x205
R_AARCH64_TLSLD_ADR_PAGE21 = 0x206
R_AARCH64_TLSLD_ADD_LO12_NC = 0x207
R_AARCH64_TLSLD_MOVW_G1 = 0x208
R_AARCH64_TLSLD_MOVW_G0_NC = 0x209
R_AARCH64_TLSLD_LD_PREL19 = 0x20a
R_AARCH64_TLSLD_MOVW_DTPREL_G2 = 0x20b
R_AARCH64_TLSLD_MOVW_DTPREL_G1 = 0x20c
R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC = 0x20d
R_AARCH64_TLSLD_MOVW_DTPREL_G0 = 0x20e
R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC = 0x20f
R_AARCH64_TLSLD_ADD_DTPREL_HI12 = 0x210
R_AARCH64_TLSLD_ADD_DTPREL_LO12 = 0x211
R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC = 0x212
R_AARCH64_TLSLD_LDST8_DTPREL_LO12 = 0x213
R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC = 0x214
R_AARCH64_TLSLD_LDST16_DTPREL_LO12 = 0x215
R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC = 0x216
R_AARCH64_TLSLD_LDST32_DTPREL_LO12 = 0x217
R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC = 0x218
R_AARCH64_TLSLD_LDST64_DTPREL_LO12 = 0x219
R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC = 0x21a
R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 = 0x21b
R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC = 0x21c
R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 = 0x21d
R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC = 0x21e
R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 = 0x21f
R_AARCH64_TLSLE_MOVW_TPREL_G2 = 0x220
R_AARCH64_TLSLE_MOVW_TPREL_G1 = 0x221
R_AARCH64_TLSLE_MOVW_TPREL_G1_NC = 0x222
R_AARCH64_TLSLE_MOVW_TPREL_G0 = 0x223
R_AARCH64_TLSLE_MOVW_TPREL_G0_NC = 0x224
R_AARCH64_TLSLE_ADD_TPREL_HI12 = 0x225
R_AARCH64_TLSLE_ADD_TPREL_LO12 = 0x226
R_AARCH64_TLSLE_ADD_TPREL_LO12_NC = 0x227
R_AARCH64_TLSLE_LDST8_TPREL_LO12 = 0x228
R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC = 0x229
R_AARCH64_TLSLE_LDST16_TPREL_LO12 = 0x22a
R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC = 0x22b
R_AARCH64_TLSLE_LDST32_TPREL_LO12 = 0x22c
R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC = 0x22d
R_AARCH64_TLSLE_LDST64_TPREL_LO12 = 0x22e
R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC = 0x22f
R_AARCH64_TLSDESC_LD_PREL19 = 0x230
R_AARCH64_TLSDESC_ADR_PREL21 = 0x231
R_AARCH64_TLSDESC_ADR_PAGE21 = 0x232
R_AARCH64_TLSDESC_LD64_LO12_NC = 0x233
R_AARCH64_TLSDESC_ADD_LO12_NC = 0x234
R_AARCH64_TLSDESC_OFF_G1 = 0x235
R_AARCH64_TLSDESC_OFF_G0_NC = 0x236
R_AARCH64_TLSDESC_LDR = 0x237
R_AARCH64_TLSDESC_ADD = 0x238
R_AARCH64_TLSDESC_CALL = 0x239
R_AARCH64_TLSLE_LDST128_TPREL_LO12 = 0x23a
R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC = 0x23b
R_AARCH64_TLSLD_LDST128_DTPREL_LO12 = 0x23c
R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC = 0x23d
R_AARCH64_COPY = 0x400
R_AARCH64_GLOB_DAT = 0x401
R_AARCH64_JUMP_SLOT = 0x402
R_AARCH64_RELATIVE = 0x403
R_AARCH64_TLS_DTPREL64 = 0x404
R_AARCH64_TLS_DTPMOD64 = 0x405
R_AARCH64_TLS_TPREL64 = 0x406
R_AARCH64_TLSDESC = 0x407
R_AARCH64_IRELATIVE = 0x408
class Relocs_Elf_AArch64_ILP32(Enum):
R_AARCH64_P32_NONE = 0
R_AARCH64_P32_ABS32 = 1
R_AARCH64_P32_ABS16 = 2
R_AARCH64_P32_PREL32 = 3
R_AARCH64_P32_PREL16 = 4
R_AARCH64_P32_MOVW_UABS_G0 = 5
R_AARCH64_P32_MOVW_UABS_G0_NC = 6
R_AARCH64_P32_MOVW_UABS_G1 = 7
R_AARCH64_P32_MOVW_SABS_G0 = 8
R_AARCH64_P32_LD_PREL_LO19 = 9
R_AARCH64_P32_ADR_PREL_LO21 = 10
R_AARCH64_P32_ADR_PREL_PG_HI21 = 11
R_AARCH64_P32_ADD_ABS_LO12_NC = 12
R_AARCH64_P32_LDST8_ABS_LO12_NC = 13
R_AARCH64_P32_LDST16_ABS_LO12_NC = 14
R_AARCH64_P32_LDST32_ABS_LO12_NC = 15
R_AARCH64_P32_LDST64_ABS_LO12_NC = 16
R_AARCH64_P32_LDST128_ABS_LO12_NC = 17
R_AARCH64_P32_TSTBR14 = 18
R_AARCH64_P32_CONDBR19 = 19
R_AARCH64_P32_JUMP26 = 20
R_AARCH64_P32_CALL26 = 21
R_AARCH64_P32_MOVW_PREL_G0 = 22
R_AARCH64_P32_MOVW_PREL_G0_NC = 23
R_AARCH64_P32_MOVW_PREL_G1 = 24
R_AARCH64_P32_GOT_LD_PREL19 = 25
R_AARCH64_P32_ADR_GOT_PAGE = 26
R_AARCH64_P32_LD32_GOT_LO12_NC = 27
R_AARCH64_P32_LD32_GOTPAGE_LO14 = 28
R_AARCH64_P32_TLSGD_ADR_PREL21 = 80
R_AARCH64_P32_TLS_GD_ADR_PAGE21 = 81
R_AARCH64_P32_TLSGD_ADD_LO12_NC = 82
R_AARCH64_P32_TLSLD_ADR_PREL21 = 83
R_AARCH64_P32_TLDLD_ADR_PAGE21 = 84
R_AARCH64_P32_TLSLD_ADR_LO12_NC = 85
R_AARCH64_P32_TLSLD_LD_PREL19 = 86
R_AARCH64_P32_TLDLD_MOVW_DTPREL_G1 = 87
R_AARCH64_P32_TLSLD_MOVW_DTPREL_G0 = 88
R_AARCH64_P32_TLSLD_MOVW_DTPREL_G0_NC = 89
R_AARCH64_P32_TLSLD_MOVW_ADD_DTPREL_HI12 = 90
R_AARCH64_P32_TLSLD_ADD_DTPREL_LO12 = 91
R_AARCH64_P32_TLSLD_ADD_DTPREL_LO12_NC = 92
R_AARCH64_P32_TLSLD_LDST8_DTPREL_LO12 = 93
R_AARCH64_P32_TLSLD_LDST8_DTPREL_LO12_NC = 94
R_AARCH64_P32_TLSLD_LDST16_DTPREL_LO12 = 95
R_AARCH64_P32_TLSLD_LDST16_DTPREL_LO12_NC = 96
R_AARCH64_P32_TLSLD_LDST32_DTPREL_LO12 = 97
R_AARCH64_P32_TLSLD_LDST32_DTPREL_LO12_NC = 98
R_AARCH64_P32_TLSLD_LDST64_DTPREL_LO12 = 99
R_AARCH64_P32_TLSLD_LDST64_DTPREL_LO12_NC = 100
R_AARCH64_P32_TLSLD_LDST128_DTPREL_LO12 = 101
R_AARCH64_P32_TLSLD_LDST128_DTPREL_LO12_NC = 102
R_AARCH64_P32_TLSIE_MOVW_GOTTPREL_PAGE21 = 103
R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC = 104
R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19 = 105
R_AARCH64_P32_TLSLE_MOVEW_TPREL_G1 = 106
R_AARCH64_P32_TLSLE_MOVW_TPREL_G0 = 107
R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC = 108
R_AARCH64_P32_TLS_MOVW_TPREL_HI12 = 109
R_AARCH64_P32_TLSLE_ADD_TPREL_LO12 = 110
R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC = 111
R_AARCH64_P32_TLSLE_LDST8_TPREL_LO12 = 112
R_AARCH64_P32_TLSLE_LDST8_TPREL_LO12_NC = 113
R_AARCH64_P32_TLSLE_LDST16_TPREL_LO12 = 114
R_AARCH64_P32_TLSLE_LDST16_TPREL_LO12_NC = 115
R_AARCH64_P32_TLSLE_LDST32_TPREL_LO12 = 116
R_AARCH64_P32_TLSLE_LDST32_TPREL_LO12_NC = 117
R_AARCH64_P32_TLSLE_LDST64_TPREL_LO12 = 118
R_AARCH64_P32_TLSLE_LDST64_TPREL_LO12_NC = 119
R_AARCH64_P32_TLSLE_LDST128_TPREL_LO12 = 120
R_AARCH64_P32_TLSLE_LDST128_TPREL_LO12_NC = 121
R_AARCH64_P32_TLSDESC_LD_PRELL19 = 122
R_AARCH64_P32_TLSDESC_ADR_PREL21 = 123
R_AARCH64_P32_TLSDESC_ADR_PAGE21 = 124
R_AARCH64_P32_TLSDESSC_LD32_LO12 = 125
R_AARCH64_P32_TLSDESC_ADD_LO12 = 126
R_AARCH64_P32_TLSDESC_CALL = 127
R_AARCH64_P32_COPY = 180
R_AARCH64_P32_GLOB_DAT = 181
R_AARCH64_P32_JUMP_SLOT = 182
R_AARCH64_P32_RELATIVE = 183
R_AARCH64_P32_TLS_DTPREL = 184
R_AARCH64_P32_TLS_DTPMOD = 185
R_AARCH64_P32_TLS_TPREL = 186
R_AARCH64_P32_TLSDESC = 187
R_AARCH64_P32_IRELATIVE = 188
class Relocs_Elf_ARM(Enum):
R_ARM_NONE = 0x00
R_ARM_PC24 = 0x01
R_ARM_ABS32 = 0x02
R_ARM_REL32 = 0x03
R_ARM_LDR_PC_G0 = 0x04
R_ARM_ABS16 = 0x05
R_ARM_ABS12 = 0x06
R_ARM_THM_ABS5 = 0x07
R_ARM_ABS8 = 0x08
R_ARM_SBREL32 = 0x09
R_ARM_THM_CALL = 0x0a
R_ARM_THM_PC8 = 0x0b
R_ARM_BREL_ADJ = 0x0c
R_ARM_TLS_DESC = 0x0d
R_ARM_THM_SWI8 = 0x0e
R_ARM_XPC25 = 0x0f
R_ARM_THM_XPC22 = 0x10
R_ARM_TLS_DTPMOD32 = 0x11
R_ARM_TLS_DTPOFF32 = 0x12
R_ARM_TLS_TPOFF32 = 0x13
R_ARM_COPY = 0x14
R_ARM_GLOB_DAT = 0x15
R_ARM_JUMP_SLOT = 0x16
R_ARM_RELATIVE = 0x17
R_ARM_GOTOFF32 = 0x18
R_ARM_BASE_PREL = 0x19
R_ARM_GOT_BREL = 0x1a
R_ARM_PLT32 = 0x1b
R_ARM_CALL = 0x1c
R_ARM_JUMP24 = 0x1d
R_ARM_THM_JUMP24 = 0x1e
R_ARM_BASE_ABS = 0x1f
R_ARM_ALU_PCREL_7_0 = 0x20
R_ARM_ALU_PCREL_15_8 = 0x21
R_ARM_ALU_PCREL_23_15 = 0x22
R_ARM_LDR_SBREL_11_0_NC = 0x23
R_ARM_ALU_SBREL_19_12_NC = 0x24
R_ARM_ALU_SBREL_27_20_CK = 0x25
R_ARM_TARGET1 = 0x26
R_ARM_SBREL31 = 0x27
R_ARM_V4BX = 0x28
R_ARM_TARGET2 = 0x29
R_ARM_PREL31 = 0x2a
R_ARM_MOVW_ABS_NC = 0x2b
R_ARM_MOVT_ABS = 0x2c
R_ARM_MOVW_PREL_NC = 0x2d
R_ARM_MOVT_PREL = 0x2e
R_ARM_THM_MOVW_ABS_NC = 0x2f
R_ARM_THM_MOVT_ABS = 0x30
R_ARM_THM_MOVW_PREL_NC = 0x31
R_ARM_THM_MOVT_PREL = 0x32
R_ARM_THM_JUMP19 = 0x33
R_ARM_THM_JUMP6 = 0x34
R_ARM_THM_ALU_PREL_11_0 = 0x35
R_ARM_THM_PC12 = 0x36
R_ARM_ABS32_NOI = 0x37
R_ARM_REL32_NOI = 0x38
R_ARM_ALU_PC_G0_NC = 0x39
R_ARM_ALU_PC_G0 = 0x3a
R_ARM_ALU_PC_G1_NC = 0x3b
R_ARM_ALU_PC_G1 = 0x3c
R_ARM_ALU_PC_G2 = 0x3d
R_ARM_LDR_PC_G1 = 0x3e
R_ARM_LDR_PC_G2 = 0x3f
R_ARM_LDRS_PC_G0 = 0x40
R_ARM_LDRS_PC_G1 = 0x41
R_ARM_LDRS_PC_G2 = 0x42
R_ARM_LDC_PC_G0 = 0x43
R_ARM_LDC_PC_G1 = 0x44
R_ARM_LDC_PC_G2 = 0x45
R_ARM_ALU_SB_G0_NC = 0x46
R_ARM_ALU_SB_G0 = 0x47
R_ARM_ALU_SB_G1_NC = 0x48
R_ARM_ALU_SB_G1 = 0x49
R_ARM_ALU_SB_G2 = 0x4a
R_ARM_LDR_SB_G0 = 0x4b
R_ARM_LDR_SB_G1 = 0x4c
R_ARM_LDR_SB_G2 = 0x4d
R_ARM_LDRS_SB_G0 = 0x4e
R_ARM_LDRS_SB_G1 = 0x4f
R_ARM_LDRS_SB_G2 = 0x50
R_ARM_LDC_SB_G0 = 0x51
R_ARM_LDC_SB_G1 = 0x52
R_ARM_LDC_SB_G2 = 0x53
R_ARM_MOVW_BREL_NC = 0x54
R_ARM_MOVT_BREL = 0x55
R_ARM_MOVW_BREL = 0x56
R_ARM_THM_MOVW_BREL_NC = 0x57
R_ARM_THM_MOVT_BREL = 0x58
R_ARM_THM_MOVW_BREL = 0x59
R_ARM_TLS_GOTDESC = 0x5a
R_ARM_TLS_CALL = 0x5b
R_ARM_TLS_DESCSEQ = 0x5c
R_ARM_THM_TLS_CALL = 0x5d
R_ARM_PLT32_ABS = 0x5e
R_ARM_GOT_ABS = 0x5f
R_ARM_GOT_PREL = 0x60
R_ARM_GOT_BREL12 = 0x61
R_ARM_GOTOFF12 = 0x62
R_ARM_GOTRELAX = 0x63
R_ARM_GNU_VTENTRY = 0x64
R_ARM_GNU_VTINHERIT = 0x65
R_ARM_THM_JUMP11 = 0x66
R_ARM_THM_JUMP8 = 0x67
R_ARM_TLS_GD32 = 0x68
R_ARM_TLS_LDM32 = 0x69
R_ARM_TLS_LDO32 = 0x6a
R_ARM_TLS_IE32 = 0x6b
R_ARM_TLS_LE32 = 0x6c
R_ARM_TLS_LDO12 = 0x6d
R_ARM_TLS_LE12 = 0x6e
R_ARM_TLS_IE12GP = 0x6f
R_ARM_PRIVATE_0 = 0x70
R_ARM_PRIVATE_1 = 0x71
R_ARM_PRIVATE_2 = 0x72
R_ARM_PRIVATE_3 = 0x73
R_ARM_PRIVATE_4 = 0x74
R_ARM_PRIVATE_5 = 0x75
R_ARM_PRIVATE_6 = 0x76
R_ARM_PRIVATE_7 = 0x77
R_ARM_PRIVATE_8 = 0x78
R_ARM_PRIVATE_9 = 0x79
R_ARM_PRIVATE_10 = 0x7a
R_ARM_PRIVATE_11 = 0x7b
R_ARM_PRIVATE_12 = 0x7c
R_ARM_PRIVATE_13 = 0x7d
R_ARM_PRIVATE_14 = 0x7e
R_ARM_PRIVATE_15 = 0x7f
R_ARM_ME_TOO = 0x80
R_ARM_THM_TLS_DESCSEQ16 = 0x81
R_ARM_THM_TLS_DESCSEQ32 = 0x82
R_ARM_IRELATIVE = 0xa0
class Relocs_Elf_Mips(Enum):
R_MIPS_NONE = 0
R_MIPS_16 = 1
R_MIPS_32 = 2
R_MIPS_REL32 = 3
R_MIPS_26 = 4
R_MIPS_HI16 = 5
R_MIPS_LO16 = 6
R_MIPS_GPREL16 = 7
R_MIPS_LITERAL = 8
R_MIPS_GOT16 = 9
R_MIPS_PC16 = 10
R_MIPS_CALL16 = 11
R_MIPS_GPREL32 = 12
R_MIPS_SHIFT5 = 16
R_MIPS_SHIFT6 = 17
R_MIPS_64 = 18
R_MIPS_GOT_DISP = 19
R_MIPS_GOT_PAGE = 20
R_MIPS_GOT_OFST = 21
R_MIPS_GOT_HI16 = 22
R_MIPS_GOT_LO16 = 23
R_MIPS_SUB = 24
R_MIPS_INSERT_A = 25
R_MIPS_INSERT_B = 26
R_MIPS_DELETE = 27
R_MIPS_HIGHER = 28
R_MIPS_HIGHEST = 29
R_MIPS_CALL_HI16 = 30
R_MIPS_CALL_LO16 = 31
R_MIPS_SCN_DISP = 32
R_MIPS_REL16 = 33
R_MIPS_ADD_IMMEDIATE = 34
R_MIPS_PJUMP = 35
R_MIPS_RELGOT = 36
R_MIPS_JALR = 37
R_MIPS_TLS_DTPMOD32 = 38
R_MIPS_TLS_DTPREL32 = 39
R_MIPS_TLS_DTPMOD64 = 40
R_MIPS_TLS_DTPREL64 = 41
R_MIPS_TLS_GD = 42
R_MIPS_TLS_LDM = 43
R_MIPS_TLS_DTPREL_HI16 = 44
R_MIPS_TLS_DTPREL_LO16 = 45
R_MIPS_TLS_GOTTPREL = 46
R_MIPS_TLS_TPREL32 = 47
R_MIPS_TLS_TPREL64 = 48
R_MIPS_TLS_TPREL_HI16 = 49
R_MIPS_TLS_TPREL_LO16 = 50
R_MIPS_GLOB_DAT = 51
R_MIPS_COPY = 126
R_MIPS_JUMP_SLOT = 127
R_MIPS_NUM = 218
class Relocs_Elf_Hexagon(Enum):
R_HEX_NONE = 0
R_HEX_B22_PCREL = 1
R_HEX_B15_PCREL = 2
R_HEX_B7_PCREL = 3
R_HEX_LO16 = 4
R_HEX_HI16 = 5
R_HEX_32 = 6
R_HEX_16 = 7
R_HEX_8 = 8
R_HEX_GPREL16_0 = 9
R_HEX_GPREL16_1 = 10
R_HEX_GPREL16_2 = 11
R_HEX_GPREL16_3 = 12
R_HEX_HL16 = 13
R_HEX_B13_PCREL = 14
R_HEX_B9_PCREL = 15
R_HEX_B32_PCREL_X = 16
R_HEX_32_6_X = 17
R_HEX_B22_PCREL_X = 18
R_HEX_B15_PCREL_X = 19
R_HEX_B13_PCREL_X = 20
R_HEX_B9_PCREL_X = 21
R_HEX_B7_PCREL_X = 22
R_HEX_16_X = 23
R_HEX_12_X = 24
R_HEX_11_X = 25
R_HEX_10_X = 26
R_HEX_9_X = 27
R_HEX_8_X = 28
R_HEX_7_X = 29
R_HEX_6_X = 30
R_HEX_32_PCREL = 31
R_HEX_COPY = 32
R_HEX_GLOB_DAT = 33
R_HEX_JMP_SLOT = 34
R_HEX_RELATIVE = 35
R_HEX_PLT_B22_PCREL = 36
R_HEX_GOTREL_LO16 = 37
R_HEX_GOTREL_HI16 = 38
R_HEX_GOTREL_32 = 39
R_HEX_GOT_LO16 = 40
R_HEX_GOT_HI16 = 41
R_HEX_GOT_32 = 42
R_HEX_GOT_16 = 43
R_HEX_DTPMOD_32 = 44
R_HEX_DTPREL_LO16 = 45
R_HEX_DTPREL_HI16 = 46
R_HEX_DTPREL_32 = 47
R_HEX_DTPREL_16 = 48
R_HEX_GD_PLT_B22_PCREL = 49
R_HEX_GD_GOT_LO16 = 50
R_HEX_GD_GOT_HI16 = 51
R_HEX_GD_GOT_32 = 52
R_HEX_GD_GOT_16 = 53
R_HEX_IE_LO16 = 54
R_HEX_IE_HI16 = 55
R_HEX_IE_32 = 56
R_HEX_IE_GOT_LO16 = 57
R_HEX_IE_GOT_HI16 = 58
R_HEX_IE_GOT_32 = 59
R_HEX_IE_GOT_16 = 60
R_HEX_TPREL_LO16 = 61
R_HEX_TPREL_HI16 = 62
R_HEX_TPREL_32 = 63
R_HEX_TPREL_16 = 64
R_HEX_6_PCREL_X = 65
R_HEX_GOTREL_32_6_X = 66
R_HEX_GOTREL_16_X = 67
R_HEX_GOTREL_11_X = 68
R_HEX_GOT_32_6_X = 69
R_HEX_GOT_16_X = 70
R_HEX_GOT_11_X = 71
R_HEX_DTPREL_32_6_X = 72
R_HEX_DTPREL_16_X = 73
R_HEX_DTPREL_11_X = 74
R_HEX_GD_GOT_32_6_X = 75
R_HEX_GD_GOT_16_X = 76
R_HEX_GD_GOT_11_X = 77
R_HEX_IE_32_6_X = 78
R_HEX_IE_16_X = 79
R_HEX_IE_GOT_32_6_X = 80
R_HEX_IE_GOT_16_X = 81
R_HEX_IE_GOT_11_X = 82
R_HEX_TPREL_32_6_X = 83
R_HEX_TPREL_16_X = 84
R_HEX_TPREL_11_X = 85
class Relocs_Elf_Lanai(Enum):
R_LANAI_NONE = 0
R_LANAI_21 = 1
R_LANAI_21_F = 2
R_LANAI_25 = 3
R_LANAI_32 = 4
R_LANAI_HI16 = 5
R_LANAI_LO16 = 6
class Relocs_Coff_i386(Enum):
IMAGE_REL_I386_ABSOLUTE = 0x0000
IMAGE_REL_I386_DIR16 = 0x0001
IMAGE_REL_I386_REL16 = 0x0002
IMAGE_REL_I386_DIR32 = 0x0006
IMAGE_REL_I386_DIR32NB = 0x0007
IMAGE_REL_I386_SEG12 = 0x0009
IMAGE_REL_I386_SECTION = 0x000A
IMAGE_REL_I386_SECREL = 0x000B
IMAGE_REL_I386_TOKEN = 0x000C
IMAGE_REL_I386_SECREL7 = 0x000D
IMAGE_REL_I386_REL32 = 0x0014
class Relocs_Coff_X86_64(Enum):
IMAGE_REL_AMD64_ABSOLUTE = 0x0000
IMAGE_REL_AMD64_ADDR64 = 0x0001
IMAGE_REL_AMD64_ADDR32 = 0x0002
IMAGE_REL_AMD64_ADDR32NB = 0x0003
IMAGE_REL_AMD64_REL32 = 0x0004
IMAGE_REL_AMD64_REL32_1 = 0x0005
IMAGE_REL_AMD64_REL32_2 = 0x0006
IMAGE_REL_AMD64_REL32_3 = 0x0007
IMAGE_REL_AMD64_REL32_4 = 0x0008
IMAGE_REL_AMD64_REL32_5 = 0x0009
IMAGE_REL_AMD64_SECTION = 0x000A
IMAGE_REL_AMD64_SECREL = 0x000B
IMAGE_REL_AMD64_SECREL7 = 0x000C
IMAGE_REL_AMD64_TOKEN = 0x000D
IMAGE_REL_AMD64_SREL32 = 0x000E
IMAGE_REL_AMD64_PAIR = 0x000F
IMAGE_REL_AMD64_SSPAN32 = 0x0010
class Relocs_Coff_ARM(Enum):
IMAGE_REL_ARM_ABSOLUTE = 0x0000
IMAGE_REL_ARM_ADDR32 = 0x0001
IMAGE_REL_ARM_ADDR32NB = 0x0002
IMAGE_REL_ARM_BRANCH24 = 0x0003
IMAGE_REL_ARM_BRANCH11 = 0x0004
IMAGE_REL_ARM_TOKEN = 0x0005
IMAGE_REL_ARM_BLX24 = 0x0008
IMAGE_REL_ARM_BLX11 = 0x0009
IMAGE_REL_ARM_SECTION = 0x000E
IMAGE_REL_ARM_SECREL = 0x000F
IMAGE_REL_ARM_MOV32A = 0x0010
IMAGE_REL_ARM_MOV32T = 0x0011
IMAGE_REL_ARM_BRANCH20T = 0x0012
IMAGE_REL_ARM_BRANCH24T = 0x0014
IMAGE_REL_ARM_BLX23T = 0x0015
class Relocs_Macho_i386(Enum):
RIT_Vanilla = 0
RIT_Pair = 1
RIT_Difference = 2
RIT_Generic_PreboundLazyPointer = 3
RIT_Generic_LocalDifference = 4
RIT_Generic_TLV = 5
class Relocs_Macho_X86_64(Enum):
RIT_X86_64_Unsigned = 0
RIT_X86_64_Signed = 1
RIT_X86_64_Branch = 2
RIT_X86_64_GOTLoad = 3
RIT_X86_64_GOT = 4
RIT_X86_64_Subtractor = 5
RIT_X86_64_Signed1 = 6
RIT_X86_64_Signed2 = 7
RIT_X86_64_Signed4 = 8
RIT_X86_64_TLV = 9
class Relocs_Macho_ARM(Enum):
RIT_Vanilla = 0
RIT_Pair = 1
RIT_Difference = 2
RIT_ARM_LocalDifference = 3
RIT_ARM_PreboundLazyPointer = 4
RIT_ARM_Branch24Bit = 5
RIT_ARM_ThumbBranch22Bit = 6
RIT_ARM_ThumbBranch32Bit = 7
RIT_ARM_Half = 8
RIT_ARM_HalfDifference = 9
class Relocs_Macho_PPC(Enum):
PPC_RELOC_VANILLA = 0
PPC_RELOC_PAIR = 1
PPC_RELOC_BR14 = 2
PPC_RELOC_BR24 = 3
PPC_RELOC_HI16 = 4
PPC_RELOC_LO16 = 5
PPC_RELOC_HA16 = 6
PPC_RELOC_LO14 = 7
PPC_RELOC_SECTDIFF = 8
PPC_RELOC_PB_LA_PTR = 9
PPC_RELOC_HI16_SECTDIFF = 10
PPC_RELOC_LO16_SECTDIFF = 11
PPC_RELOC_HA16_SECTDIFF = 12
PPC_RELOC_JBSR = 13
PPC_RELOC_LO14_SECTDIFF = 14
PPC_RELOC_LOCAL_SECTDIFF = 15
craftElf("relocs.obj.elf-x86_64", "x86_64-pc-linux-gnu", Relocs_Elf_X86_64.entries(), "leaq sym@GOTTPOFF(%rip), %rax")
craftElf("relocs.obj.elf-i386", "i386-pc-linux-gnu", Relocs_Elf_i386.entries(), "mov sym@GOTOFF(%ebx), %eax")
#craftElf("relocs-elf-ppc32", "powerpc-unknown-linux-gnu", Relocs_Elf_PPC32.entries(), ...)
craftElf("relocs.obj.elf-ppc64", "powerpc64-unknown-linux-gnu", Relocs_Elf_PPC64.entries(),
("@t = thread_local global i32 0, align 4", "define i32* @f{0}() nounwind {{ ret i32* @t }}", 2))
craftElf("relocs.obj.elf-aarch64", "aarch64", Relocs_Elf_AArch64.entries(), "movz x0, #:abs_g0:sym")
craftElf("relocs.obj.elf-aarch64-ilp32", "aarch64",
Relocs_Elf_AArch64_ILP32.entries(), "movz x0, #:abs_g0:sym")
Relocs_Elf_AArch64_ILP32
craftElf("relocs.obj.elf-arm", "arm-unknown-unknown", Relocs_Elf_ARM.entries(), "b sym")
craftElf("relocs.obj.elf-mips", "mips-unknown-linux", Relocs_Elf_Mips.entries(), "lui $2, %hi(sym)")
craftElf("relocs.obj.elf-mips64el", "mips64el-unknown-linux", Relocs_Elf_Mips.entries(), "lui $2, %hi(sym)")
#craftElf("relocs.obj.elf-hexagon", "hexagon-unknown-unknown", Relocs_Elf_Hexagon.entries(), ...)
#craftElf("relocs.obj.elf-lanai", "lanai-unknown-unknown", Relocs_Elf_Lanai.entries(), "mov hi(x), %r4")
craftCoff("relocs.obj.coff-i386", "i386-pc-win32", Relocs_Coff_i386.entries(), "mov foo@imgrel(%ebx, %ecx, 4), %eax")
craftCoff("relocs.obj.coff-x86_64", "x86_64-pc-win32", Relocs_Coff_X86_64.entries(), "mov foo@imgrel(%ebx, %ecx, 4), %eax")
#craftCoff("relocs.obj.coff-arm", "arm-pc-win32", Relocs_Coff_ARM.entries(), "...")
craftMacho("relocs.obj.macho-i386", "i386-apple-darwin9", Relocs_Macho_i386.entries(),
("asm", ".subsections_via_symbols; .text; a: ; b:", "call a", 1))
craftMacho("relocs.obj.macho-x86_64", "x86_64-apple-darwin9", Relocs_Macho_X86_64.entries(),
("asm", ".subsections_via_symbols; .text; a: ; b:", "call a", 1))
craftMacho("relocs.obj.macho-arm", "armv7-apple-darwin10", Relocs_Macho_ARM.entries(), "bl sym")
#craftMacho("relocs.obj.macho-ppc", "powerpc-apple-darwin10", Relocs_Macho_PPC.entries(), ...)
| 41,252 | 32.897288 | 128 | py |
LowFat | LowFat-master/llvm-4.0.0.src/test/Other/opt-bisect-helper.py | #!/usr/bin/env python
import os
import sys
import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument('--start', type=int, default=0)
parser.add_argument('--end', type=int, default=(1 << 32))
parser.add_argument('--optcmd', default=("opt"))
parser.add_argument('--filecheckcmd', default=("FileCheck"))
parser.add_argument('--prefix', default=("CHECK-BISECT"))
parser.add_argument('--test', default=(""))
args = parser.parse_args()
start = args.start
end = args.end
opt_command = [args.optcmd, "-O2", "-opt-bisect-limit=%(count)s", "-S", args.test]
check_command = [args.filecheckcmd, args.test, "--check-prefix=%s" % args.prefix]
last = None
while start != end and start != end-1:
count = int(round(start + (end - start)/2))
cmd = [x % {'count':count} for x in opt_command]
print("opt: " + str(cmd))
opt_result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
filecheck_result = subprocess.Popen(check_command, stdin=opt_result.stdout)
opt_result.stdout.close()
opt_result.stderr.close()
filecheck_result.wait()
if filecheck_result.returncode == 0:
start = count
else:
end = count
print("Last good count: %d" % start)
| 1,234 | 29.875 | 86 | py |
LowFat | LowFat-master/llvm-4.0.0.src/test/MC/COFF/bigobj.py | # RUN: python %s | llvm-mc -filetype=obj -triple i686-pc-win32 - | llvm-readobj -h | FileCheck %s
# This test checks that the COFF object emitter can produce objects with
# more than 65279 sections.
# While we only generate 65277 sections, an implicit .text, .data and .bss will
# also be emitted. This brings the total to 65280.
num_sections = 65277
# CHECK: ImageFileHeader {
# CHECK-NEXT: Machine: IMAGE_FILE_MACHINE_I386
# CHECK-NEXT: SectionCount: 65280
# CHECK-NEXT: TimeDateStamp: {{[0-9]+}}
# CHECK-NEXT: PointerToSymbolTable: 0x{{[0-9A-F]+}}
# CHECK-NEXT: SymbolCount: 195837
# CHECK-NEXT: OptionalHeaderSize: 0
# CHECK-NEXT: Characteristics [ (0x0)
# CHECK-NEXT: ]
# CHECK-NEXT: }
for i in range(0, num_sections):
print(""" .section .bss,"bw",discard,_b%d
.globl _b%d # @b%d
_b%d:
.byte 0 # 0x0
""" % (i, i, i, i))
| 896 | 32.222222 | 97 | py |
LowFat | LowFat-master/llvm-4.0.0.src/test/BugPoint/compile-custom.ll.py | #!/usr/bin/env python
import sys
# Currently any print-out from the custom tool is interpreted as a crash
# (i.e. test is still interesting)
print("Error: " + ' '.join(sys.argv[1:]))
sys.exit(1)
| 199 | 17.181818 | 72 | py |
LowFat | LowFat-master/llvm-4.0.0.src/test/CodeGen/SystemZ/Large/spill-01.py | # Test cases where MVC is used for spill slots that end up being out of range.
# RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s
# There are 8 usable call-saved GPRs, two of which are needed for the base
# registers. The first 160 bytes of the frame are needed for the ABI
# call frame, and a further 8 bytes are needed for the emergency spill slot.
# That means we will have at least one out-of-range slot if:
#
# count == (4096 - 168) / 8 + 6 + 1 == 498
#
# Add in some extra room and check both %r15+4096 (the first out-of-range slot)
# and %r15+4104.
#
# CHECK: f1:
# CHECK: lay [[REG:%r[0-5]]], 4096(%r15)
# CHECK: mvc 0(8,[[REG]]), {{[0-9]+}}({{%r[0-9]+}})
# CHECK: brasl %r14, foo@PLT
# CHECK: lay [[REG:%r[0-5]]], 4096(%r15)
# CHECK: mvc {{[0-9]+}}(8,{{%r[0-9]+}}), 8([[REG]])
# CHECK: br %r14
count = 500
print 'declare void @foo()'
print ''
print 'define void @f1(i64 *%base0, i64 *%base1) {'
for i in range(count):
print ' %%ptr%d = getelementptr i64, i64 *%%base%d, i64 %d' % (i, i % 2, i / 2)
print ' %%val%d = load i64 , i64 *%%ptr%d' % (i, i)
print ''
print ' call void @foo()'
print ''
for i in range(count):
print ' store i64 %%val%d, i64 *%%ptr%d' % (i, i)
print ''
print ' ret void'
print '}'
| 1,256 | 29.658537 | 84 | py |
LowFat | LowFat-master/llvm-4.0.0.src/test/CodeGen/SystemZ/Large/branch-range-12.py | # Test 64-bit COMPARE LOGICAL IMMEDIATE AND BRANCH in cases where the sheer
# number of instructions causes some branches to be out of range.
# RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s
# Construct:
#
# before0:
# conditional branch to after0
# ...
# beforeN:
# conditional branch to after0
# main:
# 0xffb4 bytes, from MVIY instructions
# conditional branch to main
# after0:
# ...
# conditional branch to main
# afterN:
#
# Each conditional branch sequence occupies 18 bytes if it uses a short
# branch and 24 if it uses a long one. The ones before "main:" have to
# take the branch length into account, which is 6 for short branches,
# so the final (0x4c - 6) / 18 == 3 blocks can use short branches.
# The ones after "main:" do not, so the first 0x4c / 18 == 4 blocks
# can use short branches. The conservative algorithm we use makes
# one of the forward branches unnecessarily long, as noted in the
# check output below.
#
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgfi [[REG]], 50
# CHECK: jgl [[LABEL:\.L[^ ]*]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgfi [[REG]], 51
# CHECK: jgl [[LABEL]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgfi [[REG]], 52
# CHECK: jgl [[LABEL]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgfi [[REG]], 53
# CHECK: jgl [[LABEL]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgfi [[REG]], 54
# CHECK: jgl [[LABEL]]
# ...as mentioned above, the next one could be a CLGIJL instead...
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgfi [[REG]], 55
# CHECK: jgl [[LABEL]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgijl [[REG]], 56, [[LABEL]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgijl [[REG]], 57, [[LABEL]]
# ...main goes here...
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgijl [[REG]], 100, [[LABEL:\.L[^ ]*]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgijl [[REG]], 101, [[LABEL]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgijl [[REG]], 102, [[LABEL]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgijl [[REG]], 103, [[LABEL]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgfi [[REG]], 104
# CHECK: jgl [[LABEL]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgfi [[REG]], 105
# CHECK: jgl [[LABEL]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgfi [[REG]], 106
# CHECK: jgl [[LABEL]]
# CHECK: lg [[REG:%r[0-5]]], 0(%r3)
# CHECK: sg [[REG]], 0(%r4)
# CHECK: clgfi [[REG]], 107
# CHECK: jgl [[LABEL]]
branch_blocks = 8
main_size = 0xffb4
print '@global = global i32 0'
print 'define void @f1(i8 *%base, i64 *%stopa, i64 *%stopb) {'
print 'entry:'
print ' br label %before0'
print ''
for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
print ' %%bcur%da = load i64 , i64 *%%stopa' % i
print ' %%bcur%db = load i64 , i64 *%%stopb' % i
print ' %%bsub%d = sub i64 %%bcur%da, %%bcur%db' % (i, i, i)
print ' %%btest%d = icmp ult i64 %%bsub%d, %d' % (i, i, i + 50)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
print ''
print '%s:' % next
a, b = 1, 1
for i in xrange(0, main_size, 6):
a, b = b, a + b
offset = 4096 + b % 500000
value = a % 256
print ' %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
print ' %%acur%da = load i64 , i64 *%%stopa' % i
print ' %%acur%db = load i64 , i64 *%%stopb' % i
print ' %%asub%d = sub i64 %%acur%da, %%acur%db' % (i, i, i)
print ' %%atest%d = icmp ult i64 %%asub%d, %d' % (i, i, i + 100)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
print ''
print 'after%d:' % i
print ' %dummy = load volatile i32, i32 *@global'
print ' ret void'
print '}'
| 4,217 | 31.198473 | 75 | py |
LowFat | LowFat-master/llvm-4.0.0.src/test/CodeGen/SystemZ/Large/branch-range-11.py | # Test 32-bit COMPARE LOGICAL IMMEDIATE AND BRANCH in cases where the sheer
# number of instructions causes some branches to be out of range.
# RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s
# Construct:
#
# before0:
# conditional branch to after0
# ...
# beforeN:
# conditional branch to after0
# main:
# 0xffc6 bytes, from MVIY instructions
# conditional branch to main
# after0:
# ...
# conditional branch to main
# afterN:
#
# Each conditional branch sequence occupies 14 bytes if it uses a short
# branch and 20 if it uses a long one. The ones before "main:" have to
# take the branch length into account, which is 6 for short branches,
# so the final (0x3a - 6) / 14 == 3 blocks can use short branches.
# The ones after "main:" do not, so the first 0x3a / 14 == 4 blocks
# can use short branches. The conservative algorithm we use makes
# one of the forward branches unnecessarily long, as noted in the
# check output below.
#
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clfi [[REG]], 50
# CHECK: jgl [[LABEL:\.L[^ ]*]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clfi [[REG]], 51
# CHECK: jgl [[LABEL]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clfi [[REG]], 52
# CHECK: jgl [[LABEL]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clfi [[REG]], 53
# CHECK: jgl [[LABEL]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clfi [[REG]], 54
# CHECK: jgl [[LABEL]]
# ...as mentioned above, the next one could be a CLIJL instead...
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clfi [[REG]], 55
# CHECK: jgl [[LABEL]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clijl [[REG]], 56, [[LABEL]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clijl [[REG]], 57, [[LABEL]]
# ...main goes here...
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clijl [[REG]], 100, [[LABEL:\.L[^ ]*]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clijl [[REG]], 101, [[LABEL]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clijl [[REG]], 102, [[LABEL]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clijl [[REG]], 103, [[LABEL]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clfi [[REG]], 104
# CHECK: jgl [[LABEL]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clfi [[REG]], 105
# CHECK: jgl [[LABEL]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clfi [[REG]], 106
# CHECK: jgl [[LABEL]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clfi [[REG]], 107
# CHECK: jgl [[LABEL]]
branch_blocks = 8
main_size = 0xffc6
print '@global = global i32 0'
print 'define void @f1(i8 *%base, i32 *%stopa, i32 *%stopb) {'
print 'entry:'
print ' br label %before0'
print ''
for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
print ' %%bcur%da = load i32 , i32 *%%stopa' % i
print ' %%bcur%db = load i32 , i32 *%%stopb' % i
print ' %%bsub%d = sub i32 %%bcur%da, %%bcur%db' % (i, i, i)
print ' %%btest%d = icmp ult i32 %%bsub%d, %d' % (i, i, i + 50)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
print ''
print '%s:' % next
a, b = 1, 1
for i in xrange(0, main_size, 6):
a, b = b, a + b
offset = 4096 + b % 500000
value = a % 256
print ' %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
print ' %%acur%da = load i32 , i32 *%%stopa' % i
print ' %%acur%db = load i32 , i32 *%%stopb' % i
print ' %%asub%d = sub i32 %%acur%da, %%acur%db' % (i, i, i)
print ' %%atest%d = icmp ult i32 %%asub%d, %d' % (i, i, i + 100)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
print ''
print 'after%d:' % i
print ' %dummy = load volatile i32, i32 *@global'
print ' ret void'
print '}'
| 4,168 | 30.824427 | 75 | py |
LowFat | LowFat-master/llvm-4.0.0.src/test/CodeGen/SystemZ/Large/branch-range-07.py | # Test 32-bit BRANCH RELATIVE ON COUNT in cases where some branches are out
# of range.
# RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s
# Construct:
#
# loopN:
# load of countN
# ...
# loop0:
# 0xffd8 bytes, from MVIY instructions
# conditional branch to main
# after0:
# ...
# decrement of countN
# conditional branch to loopN
# afterN:
#
# Each load occupies 4 bytes. Each decrement and branch occupies 4
# bytes if BRCT can be used, otherwise it occupies 10 bytes (AHI + BRCL).
# This means that loop 6 contains 5 * 4 + 0xffd8 + 5 * 4 == 0x10000 bytes
# and is therefore (just) in range. Loop 7 is out of range.
#
# CHECK: brct {{%r[0-9]+}}
# CHECK: brct {{%r[0-9]+}}
# CHECK: brct {{%r[0-9]+}}
# CHECK: brct {{%r[0-9]+}}
# CHECK: brct {{%r[0-9]+}}
# CHECK: brct {{%r[0-9]+}}
# CHECK: ahi {{%r[0-9]+}}, -1
# CHECK: jglh
# CHECK: ahi {{%r[0-9]+}}, -1
# CHECK: jglh
branch_blocks = 8
main_size = 0xffd8
print 'define void @f1(i8 *%base, i32 *%counts) {'
print 'entry:'
for i in xrange(branch_blocks - 1, -1, -1):
print ' %%countptr%d = getelementptr i32, i32 *%%counts, i64 %d' % (i, i)
print ' %%initcount%d = load i32 , i32 *%%countptr%d' % (i, i)
print ' br label %%loop%d' % i
print 'loop%d:' % i
block1 = 'entry' if i == branch_blocks - 1 else 'loop%d' % (i + 1)
block2 = 'loop0' if i == 0 else 'after%d' % (i - 1)
print (' %%count%d = phi i32 [ %%initcount%d, %%%s ],'
' [ %%nextcount%d, %%%s ]' % (i, i, block1, i, block2))
a, b = 1, 1
for i in xrange(0, main_size, 6):
a, b = b, a + b
offset = 4096 + b % 500000
value = a % 256
print ' %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
print ' %%nextcount%d = add i32 %%count%d, -1' % (i, i)
print ' %%test%d = icmp ne i32 %%nextcount%d, 0' % (i, i)
print ' br i1 %%test%d, label %%loop%d, label %%after%d' % (i, i, i)
print ''
print 'after%d:' % i
print ' ret void'
print '}'
| 2,068 | 28.985507 | 78 | py |
LowFat | LowFat-master/llvm-4.0.0.src/test/CodeGen/SystemZ/Large/branch-range-01.py | # Test normal conditional branches in cases where the sheer number of
# instructions causes some branches to be out of range.
# RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s
# Construct:
#
# before0:
# conditional branch to after0
# ...
# beforeN:
# conditional branch to after0
# main:
# 0xffd8 bytes, from MVIY instructions
# conditional branch to main
# after0:
# ...
# conditional branch to main
# afterN:
#
# Each conditional branch sequence occupies 8 bytes if it uses a short branch
# and 10 if it uses a long one. The ones before "main:" have to take the branch
# length into account -- which is 4 bytes for short branches -- so the final
# (0x28 - 4) / 8 == 4 blocks can use short branches. The ones after "main:"
# do not, so the first 0x28 / 8 == 5 can use short branches. However,
# the conservative algorithm we use makes one branch unnecessarily long
# on each side.
#
# CHECK: c %r4, 0(%r3)
# CHECK: jge [[LABEL:\.L[^ ]*]]
# CHECK: c %r4, 4(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 8(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 12(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 16(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 20(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 24(%r3)
# CHECK: j{{g?}}e [[LABEL]]
# CHECK: c %r4, 28(%r3)
# CHECK: je [[LABEL]]
# CHECK: c %r4, 32(%r3)
# CHECK: je [[LABEL]]
# CHECK: c %r4, 36(%r3)
# CHECK: je [[LABEL]]
# ...main goes here...
# CHECK: c %r4, 100(%r3)
# CHECK: je [[LABEL:\.L[^ ]*]]
# CHECK: c %r4, 104(%r3)
# CHECK: je [[LABEL]]
# CHECK: c %r4, 108(%r3)
# CHECK: je [[LABEL]]
# CHECK: c %r4, 112(%r3)
# CHECK: je [[LABEL]]
# CHECK: c %r4, 116(%r3)
# CHECK: j{{g?}}e [[LABEL]]
# CHECK: c %r4, 120(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 124(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 128(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 132(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 136(%r3)
# CHECK: jge [[LABEL]]
branch_blocks = 10
main_size = 0xffd8
print '@global = global i32 0'
print 'define void @f1(i8 *%base, i32 *%stop, i32 %limit) {'
print 'entry:'
print ' br label %before0'
print ''
for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
print ' %%bstop%d = getelementptr i32, i32 *%%stop, i64 %d' % (i, i)
print ' %%bcur%d = load i32 , i32 *%%bstop%d' % (i, i)
print ' %%btest%d = icmp eq i32 %%limit, %%bcur%d' % (i, i)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
print ''
print '%s:' % next
a, b = 1, 1
for i in xrange(0, main_size, 6):
a, b = b, a + b
offset = 4096 + b % 500000
value = a % 256
print ' %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
print ' %%astop%d = getelementptr i32, i32 *%%stop, i64 %d' % (i, i + 25)
print ' %%acur%d = load i32 , i32 *%%astop%d' % (i, i)
print ' %%atest%d = icmp eq i32 %%limit, %%acur%d' % (i, i)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
print ''
print 'after%d:' % i
print ' %dummy = load volatile i32, i32 *@global'
print ' ret void'
print '}'
| 3,201 | 28.376147 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/test/CodeGen/SystemZ/Large/branch-range-05.py | # Test 32-bit COMPARE IMMEDIATE AND BRANCH in cases where the sheer number of
# instructions causes some branches to be out of range.
# RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s
# Construct:
#
# before0:
# conditional branch to after0
# ...
# beforeN:
# conditional branch to after0
# main:
# 0xffcc bytes, from MVIY instructions
# conditional branch to main
# after0:
# ...
# conditional branch to main
# afterN:
#
# Each conditional branch sequence occupies 12 bytes if it uses a short
# branch and 16 if it uses a long one. The ones before "main:" have to
# take the branch length into account, which is 6 for short branches,
# so the final (0x34 - 6) / 12 == 3 blocks can use short branches.
# The ones after "main:" do not, so the first 0x34 / 12 == 4 blocks
# can use short branches. The conservative algorithm we use makes
# one of the forward branches unnecessarily long, as noted in the
# check output below.
#
# CHECK: lb [[REG:%r[0-5]]], 0(%r3)
# CHECK: chi [[REG]], 50
# CHECK: jgl [[LABEL:\.L[^ ]*]]
# CHECK: lb [[REG:%r[0-5]]], 0(%r3)
# CHECK: chi [[REG]], 51
# CHECK: jgl [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 0(%r3)
# CHECK: chi [[REG]], 52
# CHECK: jgl [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 0(%r3)
# CHECK: chi [[REG]], 53
# CHECK: jgl [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 0(%r3)
# CHECK: chi [[REG]], 54
# CHECK: jgl [[LABEL]]
# ...as mentioned above, the next one could be a CIJL instead...
# CHECK: lb [[REG:%r[0-5]]], 0(%r3)
# CHECK: chi [[REG]], 55
# CHECK: jgl [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cijl [[REG]], 56, [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cijl [[REG]], 57, [[LABEL]]
# ...main goes here...
# CHECK: lb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cijl [[REG]], 100, [[LABEL:\.L[^ ]*]]
# CHECK: lb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cijl [[REG]], 101, [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cijl [[REG]], 102, [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cijl [[REG]], 103, [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 0(%r3)
# CHECK: chi [[REG]], 104
# CHECK: jgl [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 0(%r3)
# CHECK: chi [[REG]], 105
# CHECK: jgl [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 0(%r3)
# CHECK: chi [[REG]], 106
# CHECK: jgl [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 0(%r3)
# CHECK: chi [[REG]], 107
# CHECK: jgl [[LABEL]]
branch_blocks = 8
main_size = 0xffcc
print '@global = global i32 0'
print 'define void @f1(i8 *%base, i8 *%stop) {'
print 'entry:'
print ' br label %before0'
print ''
for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
print ' %%bcur%d = load i8 , i8 *%%stop' % i
print ' %%bext%d = sext i8 %%bcur%d to i32' % (i, i)
print ' %%btest%d = icmp slt i32 %%bext%d, %d' % (i, i, i + 50)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
print ''
print '%s:' % next
a, b = 1, 1
for i in xrange(0, main_size, 6):
a, b = b, a + b
offset = 4096 + b % 500000
value = a % 256
print ' %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
print ' %%acur%d = load i8 , i8 *%%stop' % i
print ' %%aext%d = sext i8 %%acur%d to i32' % (i, i)
print ' %%atest%d = icmp slt i32 %%aext%d, %d' % (i, i, i + 100)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
print ''
print 'after%d:' % i
print ' %dummy = load volatile i32, i32 *@global'
print ' ret void'
print '}'
| 3,580 | 30.690265 | 77 | py |
LowFat | LowFat-master/llvm-4.0.0.src/test/CodeGen/SystemZ/Large/branch-range-08.py | # Test 64-bit BRANCH RELATIVE ON COUNT in cases where some branches are out
# of range.
# RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s
# Construct:
#
# loopN:
# load of countN
# ...
# loop0:
# 0xffd8 bytes, from MVIY instructions
# conditional branch to main
# after0:
# ...
# decrement of countN
# conditional branch to loopN
# afterN:
#
# Each load occupies 6 bytes. Each decrement and branch occupies 4
# bytes if BRCTG can be used, otherwise it occupies 10 bytes (AGHI + BRCL).
# This means that loop 5 contains 4 * 6 + 0xffd8 + 4 * 4 == 0x10000 bytes
# and is therefore (just) in range. Loop 6 is out of range.
#
# CHECK: brctg {{%r[0-9]+}}
# CHECK: brctg {{%r[0-9]+}}
# CHECK: brctg {{%r[0-9]+}}
# CHECK: brctg {{%r[0-9]+}}
# CHECK: brctg {{%r[0-9]+}}
# CHECK: aghi {{%r[0-9]+}}, -1
# CHECK: jglh
# CHECK: aghi {{%r[0-9]+}}, -1
# CHECK: jglh
# CHECK: aghi {{%r[0-9]+}}, -1
# CHECK: jglh
branch_blocks = 8
main_size = 0xffd8
print 'define void @f1(i8 *%base, i64 *%counts) {'
print 'entry:'
for i in xrange(branch_blocks - 1, -1, -1):
print ' %%countptr%d = getelementptr i64, i64 *%%counts, i64 %d' % (i, i)
print ' %%initcount%d = load i64 , i64 *%%countptr%d' % (i, i)
print ' br label %%loop%d' % i
print 'loop%d:' % i
block1 = 'entry' if i == branch_blocks - 1 else 'loop%d' % (i + 1)
block2 = 'loop0' if i == 0 else 'after%d' % (i - 1)
print (' %%count%d = phi i64 [ %%initcount%d, %%%s ],'
' [ %%nextcount%d, %%%s ]' % (i, i, block1, i, block2))
a, b = 1, 1
for i in xrange(0, main_size, 6):
a, b = b, a + b
offset = 4096 + b % 500000
value = a % 256
print ' %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
print ' %%nextcount%d = add i64 %%count%d, -1' % (i, i)
print ' %%test%d = icmp ne i64 %%nextcount%d, 0' % (i, i)
print ' br i1 %%test%d, label %%loop%d, label %%after%d' % (i, i, i)
print ''
print 'after%d:' % i
print ' ret void'
print '}'
| 2,095 | 28.942857 | 78 | py |
LowFat | LowFat-master/llvm-4.0.0.src/test/CodeGen/SystemZ/Large/branch-range-09.py | # Test 32-bit COMPARE LOGICAL AND BRANCH in cases where the sheer number of
# instructions causes some branches to be out of range.
# RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s
# Construct:
#
# before0:
# conditional branch to after0
# ...
# beforeN:
# conditional branch to after0
# main:
# 0xffcc bytes, from MVIY instructions
# conditional branch to main
# after0:
# ...
# conditional branch to main
# afterN:
#
# Each conditional branch sequence occupies 12 bytes if it uses a short
# branch and 14 if it uses a long one. The ones before "main:" have to
# take the branch length into account, which is 6 for short branches,
# so the final (0x34 - 6) / 12 == 3 blocks can use short branches.
# The ones after "main:" do not, so the first 0x34 / 12 == 4 blocks
# can use short branches.
#
# CHECK: lb [[REG:%r[0-5]]], 0(%r3)
# CHECK: clr %r4, [[REG]]
# CHECK: jgl [[LABEL:\.L[^ ]*]]
# CHECK: lb [[REG:%r[0-5]]], 1(%r3)
# CHECK: clr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 2(%r3)
# CHECK: clr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 3(%r3)
# CHECK: clr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 4(%r3)
# CHECK: clr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 5(%r3)
# CHECK: clrjl %r4, [[REG]], [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 6(%r3)
# CHECK: clrjl %r4, [[REG]], [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 7(%r3)
# CHECK: clrjl %r4, [[REG]], [[LABEL]]
# ...main goes here...
# CHECK: lb [[REG:%r[0-5]]], 25(%r3)
# CHECK: clrjl %r4, [[REG]], [[LABEL:\.L[^ ]*]]
# CHECK: lb [[REG:%r[0-5]]], 26(%r3)
# CHECK: clrjl %r4, [[REG]], [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 27(%r3)
# CHECK: clrjl %r4, [[REG]], [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 28(%r3)
# CHECK: clrjl %r4, [[REG]], [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 29(%r3)
# CHECK: clr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 30(%r3)
# CHECK: clr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 31(%r3)
# CHECK: clr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 32(%r3)
# CHECK: clr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
branch_blocks = 8
main_size = 0xffcc
print '@global = global i32 0'
print 'define void @f1(i8 *%base, i8 *%stop, i32 %limit) {'
print 'entry:'
print ' br label %before0'
print ''
for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
print ' %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i)
print ' %%bcur%d = load i8 , i8 *%%bstop%d' % (i, i)
print ' %%bext%d = sext i8 %%bcur%d to i32' % (i, i)
print ' %%btest%d = icmp ult i32 %%limit, %%bext%d' % (i, i)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
print ''
print '%s:' % next
a, b = 1, 1
for i in xrange(0, main_size, 6):
a, b = b, a + b
offset = 4096 + b % 500000
value = a % 256
print ' %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
print ' %%astop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i + 25)
print ' %%acur%d = load i8 , i8 *%%astop%d' % (i, i)
print ' %%aext%d = sext i8 %%acur%d to i32' % (i, i)
print ' %%atest%d = icmp ult i32 %%limit, %%aext%d' % (i, i)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
print ''
print 'after%d:' % i
print ' %dummy = load volatile i32, i32 *@global'
print ' ret void'
print '}'
| 3,566 | 31.135135 | 76 | py |
LowFat | LowFat-master/llvm-4.0.0.src/test/CodeGen/SystemZ/Large/branch-range-04.py | # Test 64-bit COMPARE AND BRANCH in cases where the sheer number of
# instructions causes some branches to be out of range.
# RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s
# Construct:
#
# before0:
# conditional branch to after0
# ...
# beforeN:
# conditional branch to after0
# main:
# 0xffcc bytes, from MVIY instructions
# conditional branch to main
# after0:
# ...
# conditional branch to main
# afterN:
#
# Each conditional branch sequence occupies 12 bytes if it uses a short
# branch and 16 if it uses a long one. The ones before "main:" have to
# take the branch length into account, which is 6 for short branches,
# so the final (0x34 - 6) / 12 == 3 blocks can use short branches.
# The ones after "main:" do not, so the first 0x34 / 12 == 4 blocks
# can use short branches. The conservative algorithm we use makes
# one of the forward branches unnecessarily long, as noted in the
# check output below.
#
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cgr %r4, [[REG]]
# CHECK: jge [[LABEL:\.L[^ ]*]]
# CHECK: lgb [[REG:%r[0-5]]], 1(%r3)
# CHECK: cgr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 2(%r3)
# CHECK: cgr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 3(%r3)
# CHECK: cgr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 4(%r3)
# CHECK: cgr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# ...as mentioned above, the next one could be a CGRJE instead...
# CHECK: lgb [[REG:%r[0-5]]], 5(%r3)
# CHECK: cgr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 6(%r3)
# CHECK: cgrje %r4, [[REG]], [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 7(%r3)
# CHECK: cgrje %r4, [[REG]], [[LABEL]]
# ...main goes here...
# CHECK: lgb [[REG:%r[0-5]]], 25(%r3)
# CHECK: cgrje %r4, [[REG]], [[LABEL:\.L[^ ]*]]
# CHECK: lgb [[REG:%r[0-5]]], 26(%r3)
# CHECK: cgrje %r4, [[REG]], [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 27(%r3)
# CHECK: cgrje %r4, [[REG]], [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 28(%r3)
# CHECK: cgrje %r4, [[REG]], [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 29(%r3)
# CHECK: cgr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 30(%r3)
# CHECK: cgr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 31(%r3)
# CHECK: cgr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 32(%r3)
# CHECK: cgr %r4, [[REG]]
# CHECK: jge [[LABEL]]
branch_blocks = 8
main_size = 0xffcc
print '@global = global i32 0'
print 'define void @f1(i8 *%base, i8 *%stop, i64 %limit) {'
print 'entry:'
print ' br label %before0'
print ''
for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
print ' %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i)
print ' %%bcur%d = load i8 , i8 *%%bstop%d' % (i, i)
print ' %%bext%d = sext i8 %%bcur%d to i64' % (i, i)
print ' %%btest%d = icmp eq i64 %%limit, %%bext%d' % (i, i)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
print ''
print '%s:' % next
a, b = 1, 1
for i in xrange(0, main_size, 6):
a, b = b, a + b
offset = 4096 + b % 500000
value = a % 256
print ' %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
print ' %%astop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i + 25)
print ' %%acur%d = load i8 , i8 *%%astop%d' % (i, i)
print ' %%aext%d = sext i8 %%acur%d to i64' % (i, i)
print ' %%atest%d = icmp eq i64 %%limit, %%aext%d' % (i, i)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
print ''
print 'after%d:' % i
print ' %dummy = load volatile i32, i32 *@global'
print ' ret void'
print '}'
| 3,777 | 31.852174 | 76 | py |
LowFat | LowFat-master/llvm-4.0.0.src/test/CodeGen/SystemZ/Large/branch-range-02.py | # Test normal conditional branches in cases where block alignments cause
# some branches to be out of range.
# RUN: python %s | llc -mtriple=s390x-linux-gnu -align-all-blocks=8 | FileCheck %s
# Construct:
#
# b0:
# conditional branch to end
# ...
# b<N>:
# conditional branch to end
# b<N+1>:
# conditional branch to b0
# ...
# b<2*N>:
# conditional branch to b0
# end:
#
# with N == 256 + 4. The -align-all-blocks=8 option ensures that all blocks
# are 256 bytes in size. The first 4 blocks and the last 4 blocks are then
# out of range.
#
# CHECK: c %r4, 0(%r3)
# CHECK: jge [[LABEL:\.L[^ ]*]]
# CHECK: c %r4, 4(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 8(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 12(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 16(%r3)
# CHECK: je [[LABEL]]
# CHECK: c %r4, 20(%r3)
# CHECK: je [[LABEL]]
# CHECK: c %r4, 24(%r3)
# CHECK: je [[LABEL]]
# CHECK: c %r4, 28(%r3)
# CHECK: je [[LABEL]]
# ...lots of other blocks...
# CHECK: c %r4, 1004(%r3)
# CHECK: je [[LABEL:\.L[^ ]*]]
# CHECK: c %r4, 1008(%r3)
# CHECK: je [[LABEL]]
# CHECK: c %r4, 1012(%r3)
# CHECK: je [[LABEL]]
# CHECK: c %r4, 1016(%r3)
# CHECK: je [[LABEL]]
# CHECK: c %r4, 1020(%r3)
# CHECK: je [[LABEL]]
# CHECK: c %r4, 1024(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 1028(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 1032(%r3)
# CHECK: jge [[LABEL]]
# CHECK: c %r4, 1036(%r3)
# CHECK: jge [[LABEL]]
blocks = 256 + 4
print 'define void @f1(i8 *%base, i32 *%stop, i32 %limit) {'
print 'entry:'
print ' br label %b0'
print ''
a, b = 1, 1
for i in xrange(blocks):
a, b = b, a + b
value = a % 256
next = 'b%d' % (i + 1) if i + 1 < blocks else 'end'
other = 'end' if 2 * i < blocks else 'b0'
print 'b%d:' % i
print ' store volatile i8 %d, i8 *%%base' % value
print ' %%astop%d = getelementptr i32, i32 *%%stop, i64 %d' % (i, i)
print ' %%acur%d = load i32 , i32 *%%astop%d' % (i, i)
print ' %%atest%d = icmp eq i32 %%limit, %%acur%d' % (i, i)
print ' br i1 %%atest%d, label %%%s, label %%%s' % (i, other, next)
print ''
print '%s:' % next
print ' ret void'
print '}'
| 2,121 | 24.566265 | 82 | py |
LowFat | LowFat-master/llvm-4.0.0.src/test/CodeGen/SystemZ/Large/spill-02.py | # Test cases where we spill from one frame index to another, both of which
# are out of range of MVC, and both of which need emergency spill slots.
# RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s
# CHECK: f1:
# CHECK: %fallthru
# CHECK-DAG: stg [[REG1:%r[0-9]+]], 8168(%r15)
# CHECK-DAG: stg [[REG2:%r[0-9]+]], 8176(%r15)
# CHECK-DAG: lay [[REG3:%r[0-9]+]], 8192(%r15)
# CHECK-DAG: lay [[REG4:%r[0-9]+]], 4096(%r15)
# CHECK: mvc 0(8,[[REG3]]), 4088([[REG4]])
# CHECK-DAG: lg [[REG1]], 8168(%r15)
# CHECK-DAG: lg [[REG2]], 8176(%r15)
# CHECK: %skip
# CHECK: br %r14
# Arrange for %foo's spill slot to be at 8184(%r15) and the alloca area to be at
# 8192(%r15). The two emergency spill slots live below that, so this requires
# the first 8168 bytes to be used for the call. 160 of these bytes are
# allocated for the ABI frame. There are also 5 argument registers, one of
# which is used as a base pointer.
args = (8168 - 160) / 8 + (5 - 1)
print 'declare i64 *@foo(i64 *%s)' % (', i64' * args)
print 'declare void @bar(i64 *)'
print ''
print 'define i64 @f1(i64 %foo) {'
print 'entry:'
# Make the allocation big, so that it goes at the top of the frame.
print ' %array = alloca [1000 x i64]'
print ' %area = getelementptr [1000 x i64], [1000 x i64] *%array, i64 0, i64 0'
print ' %%base = call i64 *@foo(i64 *%%area%s)' % (', i64 0' * args)
print ''
# Make sure all GPRs are used. One is needed for the stack pointer and
# another for %base, so we need 14 live values.
count = 14
for i in range(count):
print ' %%ptr%d = getelementptr i64, i64 *%%base, i64 %d' % (i, i / 2)
print ' %%val%d = load volatile i64 , i64 *%%ptr%d' % (i, i)
print ''
# Encourage the register allocator to give preference to these %vals
# by using them several times.
for j in range(4):
for i in range(count):
print ' store volatile i64 %%val%d, i64 *%%ptr%d' % (i, i)
print ''
# Copy the incoming argument, which we expect to be spilled, to the frame
# index for the alloca area. Also throw in a volatile store, so that this
# block cannot be reordered with the surrounding code.
print ' %cond = icmp eq i64 %val0, %val1'
print ' br i1 %cond, label %skip, label %fallthru'
print ''
print 'fallthru:'
print ' store i64 %foo, i64 *%area'
print ' store volatile i64 %val0, i64 *%ptr0'
print ' br label %skip'
print ''
print 'skip:'
# Use each %val a few more times to emphasise the point, and to make sure
# that they are live across the store of %foo.
for j in range(4):
for i in range(count):
print ' store volatile i64 %%val%d, i64 *%%ptr%d' % (i, i)
print ''
print ' call void @bar(i64 *%area)'
print ' ret i64 0'
print '}'
| 2,688 | 35.337838 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/test/CodeGen/SystemZ/Large/branch-range-10.py | # Test 64-bit COMPARE LOGICAL AND BRANCH in cases where the sheer number of
# instructions causes some branches to be out of range.
# RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s
# Construct:
#
# before0:
# conditional branch to after0
# ...
# beforeN:
# conditional branch to after0
# main:
# 0xffcc bytes, from MVIY instructions
# conditional branch to main
# after0:
# ...
# conditional branch to main
# afterN:
#
# Each conditional branch sequence occupies 12 bytes if it uses a short
# branch and 16 if it uses a long one. The ones before "main:" have to
# take the branch length into account, which is 6 for short branches,
# so the final (0x34 - 6) / 12 == 3 blocks can use short branches.
# The ones after "main:" do not, so the first 0x34 / 12 == 4 blocks
# can use short branches. The conservative algorithm we use makes
# one of the forward branches unnecessarily long, as noted in the
# check output below.
#
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: clgr %r4, [[REG]]
# CHECK: jgl [[LABEL:\.L[^ ]*]]
# CHECK: lgb [[REG:%r[0-5]]], 1(%r3)
# CHECK: clgr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 2(%r3)
# CHECK: clgr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 3(%r3)
# CHECK: clgr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 4(%r3)
# CHECK: clgr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
# ...as mentioned above, the next one could be a CLGRJL instead...
# CHECK: lgb [[REG:%r[0-5]]], 5(%r3)
# CHECK: clgr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 6(%r3)
# CHECK: clgrjl %r4, [[REG]], [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 7(%r3)
# CHECK: clgrjl %r4, [[REG]], [[LABEL]]
# ...main goes here...
# CHECK: lgb [[REG:%r[0-5]]], 25(%r3)
# CHECK: clgrjl %r4, [[REG]], [[LABEL:\.L[^ ]*]]
# CHECK: lgb [[REG:%r[0-5]]], 26(%r3)
# CHECK: clgrjl %r4, [[REG]], [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 27(%r3)
# CHECK: clgrjl %r4, [[REG]], [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 28(%r3)
# CHECK: clgrjl %r4, [[REG]], [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 29(%r3)
# CHECK: clgr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 30(%r3)
# CHECK: clgr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 31(%r3)
# CHECK: clgr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 32(%r3)
# CHECK: clgr %r4, [[REG]]
# CHECK: jgl [[LABEL]]
branch_blocks = 8
main_size = 0xffcc
print '@global = global i32 0'
print 'define void @f1(i8 *%base, i8 *%stop, i64 %limit) {'
print 'entry:'
print ' br label %before0'
print ''
for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
print ' %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i)
print ' %%bcur%d = load i8 , i8 *%%bstop%d' % (i, i)
print ' %%bext%d = sext i8 %%bcur%d to i64' % (i, i)
print ' %%btest%d = icmp ult i64 %%limit, %%bext%d' % (i, i)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
print ''
print '%s:' % next
a, b = 1, 1
for i in xrange(0, main_size, 6):
a, b = b, a + b
offset = 4096 + b % 500000
value = a % 256
print ' %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
print ' %%astop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i + 25)
print ' %%acur%d = load i8 , i8 *%%astop%d' % (i, i)
print ' %%aext%d = sext i8 %%acur%d to i64' % (i, i)
print ' %%atest%d = icmp ult i64 %%limit, %%aext%d' % (i, i)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
print ''
print 'after%d:' % i
print ' %dummy = load volatile i32, i32 *@global'
print ' ret void'
print '}'
| 3,804 | 32.086957 | 76 | py |
LowFat | LowFat-master/llvm-4.0.0.src/test/CodeGen/SystemZ/Large/branch-range-03.py | # Test 32-bit COMPARE AND BRANCH in cases where the sheer number of
# instructions causes some branches to be out of range.
# RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s
# Construct:
#
# before0:
# conditional branch to after0
# ...
# beforeN:
# conditional branch to after0
# main:
# 0xffcc bytes, from MVIY instructions
# conditional branch to main
# after0:
# ...
# conditional branch to main
# afterN:
#
# Each conditional branch sequence occupies 12 bytes if it uses a short
# branch and 14 if it uses a long one. The ones before "main:" have to
# take the branch length into account, which is 6 for short branches,
# so the final (0x34 - 6) / 12 == 3 blocks can use short branches.
# The ones after "main:" do not, so the first 0x34 / 12 == 4 blocks
# can use short branches.
#
# CHECK: lb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cr %r4, [[REG]]
# CHECK: jge [[LABEL:\.L[^ ]*]]
# CHECK: lb [[REG:%r[0-5]]], 1(%r3)
# CHECK: cr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 2(%r3)
# CHECK: cr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 3(%r3)
# CHECK: cr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 4(%r3)
# CHECK: cr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 5(%r3)
# CHECK: crje %r4, [[REG]], [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 6(%r3)
# CHECK: crje %r4, [[REG]], [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 7(%r3)
# CHECK: crje %r4, [[REG]], [[LABEL]]
# ...main goes here...
# CHECK: lb [[REG:%r[0-5]]], 25(%r3)
# CHECK: crje %r4, [[REG]], [[LABEL:\.L[^ ]*]]
# CHECK: lb [[REG:%r[0-5]]], 26(%r3)
# CHECK: crje %r4, [[REG]], [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 27(%r3)
# CHECK: crje %r4, [[REG]], [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 28(%r3)
# CHECK: crje %r4, [[REG]], [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 29(%r3)
# CHECK: cr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 30(%r3)
# CHECK: cr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 31(%r3)
# CHECK: cr %r4, [[REG]]
# CHECK: jge [[LABEL]]
# CHECK: lb [[REG:%r[0-5]]], 32(%r3)
# CHECK: cr %r4, [[REG]]
# CHECK: jge [[LABEL]]
branch_blocks = 8
main_size = 0xffcc
print '@global = global i32 0'
print 'define void @f1(i8 *%base, i8 *%stop, i32 %limit) {'
print 'entry:'
print ' br label %before0'
print ''
for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
print ' %%bstop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i)
print ' %%bcur%d = load i8 , i8 *%%bstop%d' % (i, i)
print ' %%bext%d = sext i8 %%bcur%d to i32' % (i, i)
print ' %%btest%d = icmp eq i32 %%limit, %%bext%d' % (i, i)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
print ''
print '%s:' % next
a, b = 1, 1
for i in xrange(0, main_size, 6):
a, b = b, a + b
offset = 4096 + b % 500000
value = a % 256
print ' %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
print ' %%astop%d = getelementptr i8, i8 *%%stop, i64 %d' % (i, i + 25)
print ' %%acur%d = load i8 , i8 *%%astop%d' % (i, i)
print ' %%aext%d = sext i8 %%acur%d to i32' % (i, i)
print ' %%atest%d = icmp eq i32 %%limit, %%aext%d' % (i, i)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
print ''
print 'after%d:' % i
print ' %dummy = load volatile i32, i32 *@global'
print ' ret void'
print '}'
| 3,540 | 30.900901 | 76 | py |
LowFat | LowFat-master/llvm-4.0.0.src/test/CodeGen/SystemZ/Large/branch-range-06.py | # Test 64-bit COMPARE IMMEDIATE AND BRANCH in cases where the sheer number of
# instructions causes some branches to be out of range.
# RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s
# Construct:
#
# before0:
# conditional branch to after0
# ...
# beforeN:
# conditional branch to after0
# main:
# 0xffcc bytes, from MVIY instructions
# conditional branch to main
# after0:
# ...
# conditional branch to main
# afterN:
#
# Each conditional branch sequence occupies 12 bytes if it uses a short
# branch and 16 if it uses a long one. The ones before "main:" have to
# take the branch length into account, which is 6 for short branches,
# so the final (0x34 - 6) / 12 == 3 blocks can use short branches.
# The ones after "main:" do not, so the first 0x34 / 12 == 4 blocks
# can use short branches. The conservative algorithm we use makes
# one of the forward branches unnecessarily long, as noted in the
# check output below.
#
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cghi [[REG]], 50
# CHECK: jgl [[LABEL:\.L[^ ]*]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cghi [[REG]], 51
# CHECK: jgl [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cghi [[REG]], 52
# CHECK: jgl [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cghi [[REG]], 53
# CHECK: jgl [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cghi [[REG]], 54
# CHECK: jgl [[LABEL]]
# ...as mentioned above, the next one could be a CGIJL instead...
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cghi [[REG]], 55
# CHECK: jgl [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cgijl [[REG]], 56, [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cgijl [[REG]], 57, [[LABEL]]
# ...main goes here...
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cgijl [[REG]], 100, [[LABEL:\.L[^ ]*]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cgijl [[REG]], 101, [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cgijl [[REG]], 102, [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cgijl [[REG]], 103, [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cghi [[REG]], 104
# CHECK: jgl [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cghi [[REG]], 105
# CHECK: jgl [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cghi [[REG]], 106
# CHECK: jgl [[LABEL]]
# CHECK: lgb [[REG:%r[0-5]]], 0(%r3)
# CHECK: cghi [[REG]], 107
# CHECK: jgl [[LABEL]]
branch_blocks = 8
main_size = 0xffcc
print '@global = global i32 0'
print 'define void @f1(i8 *%base, i8 *%stop) {'
print 'entry:'
print ' br label %before0'
print ''
for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
print ' %%bcur%d = load i8 , i8 *%%stop' % i
print ' %%bext%d = sext i8 %%bcur%d to i64' % (i, i)
print ' %%btest%d = icmp slt i64 %%bext%d, %d' % (i, i, i + 50)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
print ''
print '%s:' % next
a, b = 1, 1
for i in xrange(0, main_size, 6):
a, b = b, a + b
offset = 4096 + b % 500000
value = a % 256
print ' %%ptr%d = getelementptr i8, i8 *%%base, i64 %d' % (i, offset)
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
print ' %%acur%d = load i8 , i8 *%%stop' % i
print ' %%aext%d = sext i8 %%acur%d to i64' % (i, i)
print ' %%atest%d = icmp slt i64 %%aext%d, %d' % (i, i, i + 100)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
print ''
print 'after%d:' % i
print ' %dummy = load volatile i32, i32 *@global'
print ' ret void'
print '}'
| 3,613 | 30.982301 | 77 | py |
LowFat | LowFat-master/llvm-4.0.0.src/docs/conf.py | # -*- coding: utf-8 -*-
#
# LLVM documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from datetime import date
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'LLVM'
copyright = u'2003-%d, LLVM Project' % date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4'
# The full version, including alpha/beta/rc tags.
release = '4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%Y-%m-%d'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'llvm-theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = { "nosidebar": True }
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'LLVMdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'LLVM.tex', u'LLVM Documentation',
u'LLVM project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = []
# Automatically derive the list of man pages from the contents of the command
# guide subdirectory.
basedir = os.path.dirname(__file__)
man_page_authors = "Maintained by The LLVM Team (http://llvm.org/)."
command_guide_subpath = 'CommandGuide'
command_guide_path = os.path.join(basedir, command_guide_subpath)
for name in os.listdir(command_guide_path):
# Ignore non-ReST files and the index page.
if not name.endswith('.rst') or name in ('index.rst',):
continue
# Otherwise, automatically extract the description.
file_subpath = os.path.join(command_guide_subpath, name)
with open(os.path.join(command_guide_path, name)) as f:
title = f.readline().rstrip('\n')
header = f.readline().rstrip('\n')
if len(header) != len(title):
print >>sys.stderr, (
"error: invalid header in %r (does not match title)" % (
file_subpath,))
if ' - ' not in title:
print >>sys.stderr, (
("error: invalid title in %r "
"(expected '<name> - <description>')") % (
file_subpath,))
# Split the name out of the title.
name,description = title.split(' - ', 1)
man_pages.append((file_subpath.replace('.rst',''), name,
description, man_page_authors, 1))
# If true, show URL addresses after external links.
#man_show_urls = False
# FIXME: Define intersphinx configuration.
intersphinx_mapping = {}
| 8,478 | 32.38189 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/update_test_checks.py | #!/usr/bin/env python2.7
"""A script to generate FileCheck statements for regression tests.
This script is a utility to update LLVM opt or llc test cases with new
FileCheck patterns. It can either update all of the tests in the file or
a single test function.
Example usage:
$ update_test_checks.py --tool=../bin/opt test/foo.ll
Workflow:
1. Make a compiler patch that requires updating some number of FileCheck lines
in regression test files.
2. Save the patch and revert it from your local work area.
3. Update the RUN-lines in the affected regression tests to look canonical.
Example: "; RUN: opt < %s -instcombine -S | FileCheck %s"
4. Refresh the FileCheck lines for either the entire file or select functions by
running this script.
5. Commit the fresh baseline of checks.
6. Apply your patch from step 1 and rebuild your local binaries.
7. Re-run this script on affected regression tests.
8. Check the diffs to ensure the script has done something reasonable.
9. Submit a patch including the regression test diffs for review.
A common pattern is to have the script insert complete checking of every
instruction. Then, edit it down to only check the relevant instructions.
The script is designed to make adding checks to a test case fast, it is *not*
designed to be authoratitive about what constitutes a good test!
"""
import argparse
import itertools
import os # Used to advertise this file's name ("autogenerated_note").
import string
import subprocess
import sys
import tempfile
import re
ADVERT = '; NOTE: Assertions have been autogenerated by '
# RegEx: this is where the magic happens.
SCRUB_LEADING_WHITESPACE_RE = re.compile(r'^(\s+)')
SCRUB_WHITESPACE_RE = re.compile(r'(?!^(| \w))[ \t]+', flags=re.M)
SCRUB_TRAILING_WHITESPACE_RE = re.compile(r'[ \t]+$', flags=re.M)
SCRUB_X86_SHUFFLES_RE = (
re.compile(
r'^(\s*\w+) [^#\n]+#+ ((?:[xyz]mm\d+|mem)( \{%k\d+\}( \{z\})?)? = .*)$',
flags=re.M))
SCRUB_X86_SP_RE = re.compile(r'\d+\(%(esp|rsp)\)')
SCRUB_X86_RIP_RE = re.compile(r'[.\w]+\(%rip\)')
SCRUB_X86_LCP_RE = re.compile(r'\.LCPI[0-9]+_[0-9]+')
SCRUB_KILL_COMMENT_RE = re.compile(r'^ *#+ +kill:.*\n')
SCRUB_IR_COMMENT_RE = re.compile(r'\s*;.*')
RUN_LINE_RE = re.compile('^\s*;\s*RUN:\s*(.*)$')
IR_FUNCTION_RE = re.compile('^\s*define\s+(?:internal\s+)?[^@]*@([\w-]+)\s*\(')
LLC_FUNCTION_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@(?P=func)\n[^:]*?'
r'(?P<body>^##?[ \t]+[^:]+:.*?)\s*'
r'^\s*(?:[^:\n]+?:\s*\n\s*\.size|\.cfi_endproc|\.globl|\.comm|\.(?:sub)?section)',
flags=(re.M | re.S))
OPT_FUNCTION_RE = re.compile(
r'^\s*define\s+(?:internal\s+)?[^@]*@(?P<func>[\w-]+?)\s*\('
r'(\s+)?[^)]*[^{]*\{\n(?P<body>.*?)^\}$',
flags=(re.M | re.S))
CHECK_PREFIX_RE = re.compile('--check-prefix=(\S+)')
CHECK_RE = re.compile(r'^\s*;\s*([^:]+?)(?:-NEXT|-NOT|-DAG|-LABEL)?:')
# Match things that look at identifiers, but only if they are followed by
# spaces, commas, paren, or end of the string
IR_VALUE_RE = re.compile(r'(\s+)%(.+?)([,\s\(\)]|\Z)')
# Invoke the tool that is being tested.
def invoke_tool(args, cmd_args, ir):
with open(ir) as ir_file:
stdout = subprocess.check_output(args.tool_binary + ' ' + cmd_args,
shell=True, stdin=ir_file)
# Fix line endings to unix CR style.
stdout = stdout.replace('\r\n', '\n')
return stdout
# FIXME: Separate the x86-specific scrubbers, so this can be used for other targets.
def scrub_asm(asm):
# Detect shuffle asm comments and hide the operands in favor of the comments.
asm = SCRUB_X86_SHUFFLES_RE.sub(r'\1 {{.*#+}} \2', asm)
# Generically match the stack offset of a memory operand.
asm = SCRUB_X86_SP_RE.sub(r'{{[0-9]+}}(%\1)', asm)
# Generically match a RIP-relative memory operand.
asm = SCRUB_X86_RIP_RE.sub(r'{{.*}}(%rip)', asm)
# Generically match a LCP symbol.
asm = SCRUB_X86_LCP_RE.sub(r'{{\.LCPI.*}}', asm)
# Strip kill operands inserted into the asm.
asm = SCRUB_KILL_COMMENT_RE.sub('', asm)
return asm
def scrub_body(body, tool_basename):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
body = SCRUB_WHITESPACE_RE.sub(r' ', body)
# Expand the tabs used for indentation.
body = string.expandtabs(body, 2)
# Strip trailing whitespace.
body = SCRUB_TRAILING_WHITESPACE_RE.sub(r'', body)
if tool_basename == "llc":
body = scrub_asm(body)
return body
# Build up a dictionary of all the function bodies.
def build_function_body_dictionary(raw_tool_output, prefixes, func_dict, verbose, tool_basename):
if tool_basename == "llc":
func_regex = LLC_FUNCTION_RE
else:
func_regex = OPT_FUNCTION_RE
for m in func_regex.finditer(raw_tool_output):
if not m:
continue
func = m.group('func')
scrubbed_body = scrub_body(m.group('body'), tool_basename)
if func.startswith('stress'):
# We only use the last line of the function body for stress tests.
scrubbed_body = '\n'.join(scrubbed_body.splitlines()[-1:])
if verbose:
print >>sys.stderr, 'Processing function: ' + func
for l in scrubbed_body.splitlines():
print >>sys.stderr, ' ' + l
for prefix in prefixes:
if func in func_dict[prefix] and func_dict[prefix][func] != scrubbed_body:
if prefix == prefixes[-1]:
print >>sys.stderr, ('WARNING: Found conflicting asm under the '
'same prefix: %r!' % (prefix,))
else:
func_dict[prefix][func] = None
continue
func_dict[prefix][func] = scrubbed_body
# Create a FileCheck variable name based on an IR name.
def get_value_name(var):
if var.isdigit():
var = 'TMP' + var
var = var.replace('.', '_')
return var.upper()
# Create a FileCheck variable from regex.
def get_value_definition(var):
return '[[' + get_value_name(var) + ':%.*]]'
# Use a FileCheck variable.
def get_value_use(var):
return '[[' + get_value_name(var) + ']]'
# Replace IR value defs and uses with FileCheck variables.
def genericize_check_lines(lines):
# This gets called for each match that occurs in
# a line. We transform variables we haven't seen
# into defs, and variables we have seen into uses.
def transform_line_vars(match):
var = match.group(2)
if var in vars_seen:
rv = get_value_use(var)
else:
vars_seen.add(var)
rv = get_value_definition(var)
# re.sub replaces the entire regex match
# with whatever you return, so we have
# to make sure to hand it back everything
# including the commas and spaces.
return match.group(1) + rv + match.group(3)
vars_seen = set()
lines_with_def = []
for i, line in enumerate(lines):
# An IR variable named '%.' matches the FileCheck regex string.
line = line.replace('%.', '%dot')
# Ignore any comments, since the check lines will too.
scrubbed_line = SCRUB_IR_COMMENT_RE.sub(r'', line)
lines[i] = IR_VALUE_RE.sub(transform_line_vars, scrubbed_line)
return lines
def add_checks(output_lines, prefix_list, func_dict, func_name, tool_basename):
# Select a label format based on the whether we're checking asm or IR.
if tool_basename == "llc":
check_label_format = "; %s-LABEL: %s:"
else:
check_label_format = "; %s-LABEL: @%s("
printed_prefixes = []
for checkprefixes, _ in prefix_list:
for checkprefix in checkprefixes:
if checkprefix in printed_prefixes:
break
if not func_dict[checkprefix][func_name]:
continue
# Add some space between different check prefixes, but not after the last
# check line (before the test code).
#if len(printed_prefixes) != 0:
# output_lines.append(';')
printed_prefixes.append(checkprefix)
output_lines.append(check_label_format % (checkprefix, func_name))
func_body = func_dict[checkprefix][func_name].splitlines()
# For IR output, change all defs to FileCheck variables, so we're immune
# to variable naming fashions.
if tool_basename == "opt":
func_body = genericize_check_lines(func_body)
# This could be selectively enabled with an optional invocation argument.
# Disabled for now: better to check everything. Be safe rather than sorry.
# Handle the first line of the function body as a special case because
# it's often just noise (a useless asm comment or entry label).
#if func_body[0].startswith("#") or func_body[0].startswith("entry:"):
# is_blank_line = True
#else:
# output_lines.append('; %s: %s' % (checkprefix, func_body[0]))
# is_blank_line = False
# For llc tests, there may be asm directives between the label and the
# first checked line (most likely that first checked line is "# BB#0").
if tool_basename == "opt":
is_blank_line = False
else:
is_blank_line = True;
for func_line in func_body:
if func_line.strip() == '':
is_blank_line = True
continue
# Do not waste time checking IR comments.
if tool_basename == "opt":
func_line = SCRUB_IR_COMMENT_RE.sub(r'', func_line)
# Skip blank lines instead of checking them.
if is_blank_line == True:
output_lines.append('; %s: %s' % (checkprefix, func_line))
else:
output_lines.append('; %s-NEXT: %s' % (checkprefix, func_line))
is_blank_line = False
# Add space between different check prefixes and also before the first
# line of code in the test function.
output_lines.append(';')
break
return output_lines
def should_add_line_to_output(input_line, prefix_set):
# Skip any blank comment lines in the IR.
if input_line.strip() == ';':
return False
# Skip any blank lines in the IR.
#if input_line.strip() == '':
# return False
# And skip any CHECK lines. We're building our own.
m = CHECK_RE.match(input_line)
if m and m.group(1) in prefix_set:
return False
return True
def main():
from argparse import RawTextHelpFormatter
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument('-v', '--verbose', action='store_true',
help='Show verbose output')
parser.add_argument('--tool-binary', default='llc',
help='The tool used to generate the test case')
parser.add_argument(
'--function', help='The function in the test file to update')
parser.add_argument('tests', nargs='+')
args = parser.parse_args()
autogenerated_note = (ADVERT + 'utils/' + os.path.basename(__file__))
tool_basename = os.path.basename(args.tool_binary)
if (tool_basename != "llc" and tool_basename != "opt"):
print >>sys.stderr, 'ERROR: Unexpected tool name: ' + tool_basename
sys.exit(1)
for test in args.tests:
if args.verbose:
print >>sys.stderr, 'Scanning for RUN lines in test file: %s' % (test,)
with open(test) as f:
input_lines = [l.rstrip() for l in f]
raw_lines = [m.group(1)
for m in [RUN_LINE_RE.match(l) for l in input_lines] if m]
run_lines = [raw_lines[0]] if len(raw_lines) > 0 else []
for l in raw_lines[1:]:
if run_lines[-1].endswith("\\"):
run_lines[-1] = run_lines[-1].rstrip("\\") + " " + l
else:
run_lines.append(l)
if args.verbose:
print >>sys.stderr, 'Found %d RUN lines:' % (len(run_lines),)
for l in run_lines:
print >>sys.stderr, ' RUN: ' + l
prefix_list = []
for l in run_lines:
(tool_cmd, filecheck_cmd) = tuple([cmd.strip() for cmd in l.split('|', 1)])
if not tool_cmd.startswith(tool_basename + ' '):
print >>sys.stderr, 'WARNING: Skipping non-%s RUN line: %s' % (tool_basename, l)
continue
if not filecheck_cmd.startswith('FileCheck '):
print >>sys.stderr, 'WARNING: Skipping non-FileChecked RUN line: ' + l
continue
tool_cmd_args = tool_cmd[len(tool_basename):].strip()
tool_cmd_args = tool_cmd_args.replace('< %s', '').replace('%s', '').strip()
check_prefixes = [m.group(1)
for m in CHECK_PREFIX_RE.finditer(filecheck_cmd)]
if not check_prefixes:
check_prefixes = ['CHECK']
# FIXME: We should use multiple check prefixes to common check lines. For
# now, we just ignore all but the last.
prefix_list.append((check_prefixes, tool_cmd_args))
func_dict = {}
for prefixes, _ in prefix_list:
for prefix in prefixes:
func_dict.update({prefix: dict()})
for prefixes, tool_args in prefix_list:
if args.verbose:
print >>sys.stderr, 'Extracted tool cmd: ' + tool_basename + ' ' + tool_args
print >>sys.stderr, 'Extracted FileCheck prefixes: ' + str(prefixes)
raw_tool_output = invoke_tool(args, tool_args, test)
build_function_body_dictionary(raw_tool_output, prefixes, func_dict, args.verbose, tool_basename)
is_in_function = False
is_in_function_start = False
prefix_set = set([prefix for prefixes, _ in prefix_list for prefix in prefixes])
if args.verbose:
print >>sys.stderr, 'Rewriting FileCheck prefixes: %s' % (prefix_set,)
output_lines = []
output_lines.append(autogenerated_note)
for input_line in input_lines:
if is_in_function_start:
if input_line == '':
continue
if input_line.lstrip().startswith(';'):
m = CHECK_RE.match(input_line)
if not m or m.group(1) not in prefix_set:
output_lines.append(input_line)
continue
# Print out the various check lines here.
output_lines = add_checks(output_lines, prefix_list, func_dict, name, tool_basename)
is_in_function_start = False
if is_in_function:
if should_add_line_to_output(input_line, prefix_set) == True:
# This input line of the function body will go as-is into the output.
# Except make leading whitespace uniform: 2 spaces.
input_line = SCRUB_LEADING_WHITESPACE_RE.sub(r' ', input_line)
output_lines.append(input_line)
else:
continue
if input_line.strip() == '}':
is_in_function = False
continue
# Discard any previous script advertising.
if input_line.startswith(ADVERT):
continue
# If it's outside a function, it just gets copied to the output.
output_lines.append(input_line)
m = IR_FUNCTION_RE.match(input_line)
if not m:
continue
name = m.group(1)
if args.function is not None and name != args.function:
# When filtering on a specific function, skip all others.
continue
is_in_function = is_in_function_start = True
if args.verbose:
print>>sys.stderr, 'Writing %d lines to %s...' % (len(output_lines), test)
with open(test, 'wb') as f:
f.writelines([l + '\n' for l in output_lines])
if __name__ == '__main__':
main()
| 15,046 | 35.879902 | 103 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/extract_symbols.py | #!/usr/bin/env python
"""A tool for extracting a list of symbols to export
When exporting symbols from a dll or exe we either need to mark the symbols in
the source code as __declspec(dllexport) or supply a list of symbols to the
linker. This program automates the latter by inspecting the symbol tables of a
list of link inputs and deciding which of those symbols need to be exported.
We can't just export all the defined symbols, as there's a limit of 65535
exported symbols and in clang we go way over that, particularly in a debug
build. Therefore a large part of the work is pruning symbols either which can't
be imported, or which we think are things that have definitions in public header
files (i.e. template instantiations) and we would get defined in the thing
importing these symbols anyway.
"""
from __future__ import print_function
import sys
import re
import os
import subprocess
import multiprocessing
import argparse
# Define functions which extract a list of symbols from a library using several
# different tools. We use subprocess.Popen and yield a symbol at a time instead
# of using subprocess.check_output and returning a list as, especially on
# Windows, waiting for the entire output to be ready can take a significant
# amount of time.
def dumpbin_get_symbols(lib):
process = subprocess.Popen(['dumpbin','/symbols',lib], bufsize=1,
stdout=subprocess.PIPE, stdin=subprocess.PIPE,
universal_newlines=True)
process.stdin.close()
for line in process.stdout:
# Look for external symbols that are defined in some section
match = re.match("^.+SECT.+External\s+\|\s+(\S+).*$", line)
if match:
yield match.group(1)
process.wait()
def nm_get_symbols(lib):
process = subprocess.Popen(['nm',lib], bufsize=1,
stdout=subprocess.PIPE, stdin=subprocess.PIPE,
universal_newlines=True)
process.stdin.close()
for line in process.stdout:
# Look for external symbols that are defined in some section
match = re.match("^\S+\s+[BDGRSTVW]\s+(\S+)$", line)
if match:
yield match.group(1)
process.wait()
def readobj_get_symbols(lib):
process = subprocess.Popen(['llvm-readobj','-symbols',lib], bufsize=1,
stdout=subprocess.PIPE, stdin=subprocess.PIPE,
universal_newlines=True)
process.stdin.close()
for line in process.stdout:
# When looking through the output of llvm-readobj we expect to see Name,
# Section, then StorageClass, so record Name and Section when we see
# them and decide if this is a defined external symbol when we see
# StorageClass.
match = re.search('Name: (\S+)', line)
if match:
name = match.group(1)
match = re.search('Section: (\S+)', line)
if match:
section = match.group(1)
match = re.search('StorageClass: (\S+)', line)
if match:
storageclass = match.group(1)
if section != 'IMAGE_SYM_ABSOLUTE' and \
section != 'IMAGE_SYM_UNDEFINED' and \
storageclass == 'External':
yield name
process.wait()
# Define functions which determine if the target is 32-bit Windows (as that's
# where calling convention name decoration happens).
def dumpbin_is_32bit_windows(lib):
# dumpbin /headers can output a huge amount of data (>100MB in a debug
# build) so we read only up to the 'machine' line then close the output.
process = subprocess.Popen(['dumpbin','/headers',lib], bufsize=1,
stdout=subprocess.PIPE, stdin=subprocess.PIPE,
universal_newlines=True)
process.stdin.close()
retval = False
for line in process.stdout:
match = re.match('.+machine \((\S+)\)', line)
if match:
retval = (match.group(1) == 'x86')
break
process.stdout.close()
process.wait()
return retval
def objdump_is_32bit_windows(lib):
output = subprocess.check_output(['objdump','-f',lib],
universal_newlines=True)
for line in output:
match = re.match('.+file format (\S+)', line)
if match:
return (match.group(1) == 'pe-i386')
return False
def readobj_is_32bit_windows(lib):
output = subprocess.check_output(['llvm-readobj','-file-headers',lib],
universal_newlines=True)
for line in output:
match = re.match('Format: (\S+)', line)
if match:
return (match.group(1) == 'COFF-i386')
return False
# MSVC mangles names to ?<identifier_mangling>@<type_mangling>. By examining the
# identifier/type mangling we can decide which symbols could possibly be
# required and which we can discard.
def should_keep_microsoft_symbol(symbol, calling_convention_decoration):
# Keep unmangled (i.e. extern "C") names
if not '?' in symbol:
if calling_convention_decoration:
# Remove calling convention decoration from names
match = re.match('[_@]([^@]+)', symbol)
if match:
return match.group(1)
return symbol
# Function template instantiations start with ?$; keep the instantiations of
# clang::Type::getAs, as some of them are explipict specializations that are
# defined in clang's lib/AST/Type.cpp; discard the rest as it's assumed that
# the definition is public
elif re.match('\?\?\$getAs@.+@Type@clang@@', symbol):
return symbol
elif symbol.startswith('??$'):
return None
# Deleting destructors start with ?_G or ?_E and can be discarded because
# link.exe gives you a warning telling you they can't be exported if you
# don't
elif symbol.startswith('??_G') or symbol.startswith('??_E'):
return None
# Constructors (?0) and destructors (?1) of templates (?$) are assumed to be
# defined in headers and not required to be kept
elif symbol.startswith('??0?$') or symbol.startswith('??1?$'):
return None
# An anonymous namespace is mangled as ?A(maybe hex number)@. Any symbol
# that mentions an anonymous namespace can be discarded, as the anonymous
# namespace doesn't exist outside of that translation unit.
elif re.search('\?A(0x\w+)?@', symbol):
return None
# Keep mangled llvm:: and clang:: function symbols. How we detect these is a
# bit of a mess and imprecise, but that avoids having to completely demangle
# the symbol name. The outermost namespace is at the end of the identifier
# mangling, and the identifier mangling is followed by the type mangling, so
# we look for (llvm|clang)@@ followed by something that looks like a
# function type mangling. To spot a function type we use (this is derived
# from clang/lib/AST/MicrosoftMangle.cpp):
# <function-type> ::= <function-class> <this-cvr-qualifiers>
# <calling-convention> <return-type>
# <argument-list> <throw-spec>
# <function-class> ::= [A-Z]
# <this-cvr-qualifiers> ::= [A-Z0-9_]*
# <calling-convention> ::= [A-JQ]
# <return-type> ::= .+
# <argument-list> ::= X (void)
# ::= .+@ (list of types)
# ::= .*Z (list of types, varargs)
# <throw-spec> ::= exceptions are not allowed
elif re.search('(llvm|clang)@@[A-Z][A-Z0-9_]*[A-JQ].+(X|.+@|.*Z)$', symbol):
return symbol
return None
# Itanium manglings are of the form _Z<identifier_mangling><type_mangling>. We
# demangle the identifier mangling to identify symbols that can be safely
# discarded.
def should_keep_itanium_symbol(symbol, calling_convention_decoration):
# Start by removing any calling convention decoration (which we expect to
# see on all symbols, even mangled C++ symbols)
if calling_convention_decoration and symbol.startswith('_'):
symbol = symbol[1:]
# Keep unmangled names
if not symbol.startswith('_') and not symbol.startswith('.'):
return symbol
# Discard manglings that aren't nested names
match = re.match('_Z(T[VTIS])?(N.+)', symbol)
if not match:
return None
# Demangle the name. If the name is too complex then we don't need to keep
# it, but it the demangling fails then keep the symbol just in case.
try:
names, _ = parse_itanium_nested_name(match.group(2))
except TooComplexName:
return None
if not names:
return symbol
# Constructors and destructors of templates classes are assumed to be
# defined in headers and not required to be kept
if re.match('[CD][123]', names[-1][0]) and names[-2][1]:
return None
# Keep the instantiations of clang::Type::getAs, as some of them are
# explipict specializations that are defined in clang's lib/AST/Type.cpp;
# discard any other function template instantiations as it's assumed that
# the definition is public
elif symbol.startswith('_ZNK5clang4Type5getAs'):
return symbol
elif names[-1][1]:
return None
# Keep llvm:: and clang:: names
elif names[0][0] == '4llvm' or names[0][0] == '5clang':
return symbol
# Discard everything else
else:
return None
# Certain kinds of complex manglings we assume cannot be part of a public
# interface, and we handle them by raising an exception.
class TooComplexName(Exception):
pass
# Parse an itanium mangled name from the start of a string and return a
# (name, rest of string) pair.
def parse_itanium_name(arg):
# Check for a normal name
match = re.match('(\d+)(.+)', arg)
if match:
n = int(match.group(1))
name = match.group(1)+match.group(2)[:n]
rest = match.group(2)[n:]
return name, rest
# Check for constructor/destructor names
match = re.match('([CD][123])(.+)', arg)
if match:
return match.group(1), match.group(2)
# Assume that a sequence of characters that doesn't end a nesting is an
# operator (this is very imprecise, but appears to be good enough)
match = re.match('([^E]+)(.+)', arg)
if match:
return match.group(1), match.group(2)
# Anything else: we can't handle it
return None, arg
# Parse an itanium mangled template argument list from the start of a string
# and throw it away, returning the rest of the string.
def skip_itanium_template(arg):
# A template argument list starts with I
assert arg.startswith('I'), arg
tmp = arg[1:]
while tmp:
# Check for names
match = re.match('(\d+)(.+)', tmp)
if match:
n = int(match.group(1))
tmp = match.group(2)[n:]
continue
# Check for substitutions
match = re.match('S[A-Z0-9]*_(.+)', tmp)
if match:
tmp = match.group(1)
# Start of a template
elif tmp.startswith('I'):
tmp = skip_itanium_template(tmp)
# Start of a nested name
elif tmp.startswith('N'):
_, tmp = parse_itanium_nested_name(tmp)
# Start of an expression: assume that it's too complicated
elif tmp.startswith('L') or tmp.startswith('X'):
raise TooComplexName
# End of the template
elif tmp.startswith('E'):
return tmp[1:]
# Something else: probably a type, skip it
else:
tmp = tmp[1:]
return None
# Parse an itanium mangled nested name and transform it into a list of pairs of
# (name, is_template), returning (list, rest of string).
def parse_itanium_nested_name(arg):
# A nested name starts with N
assert arg.startswith('N'), arg
ret = []
# Skip past the N, and possibly a substitution
match = re.match('NS[A-Z0-9]*_(.+)', arg)
if match:
tmp = match.group(1)
else:
tmp = arg[1:]
# Skip past CV-qualifiers and ref qualifiers
match = re.match('[rVKRO]*(.+)', tmp);
if match:
tmp = match.group(1)
# Repeatedly parse names from the string until we reach the end of the
# nested name
while tmp:
# An E ends the nested name
if tmp.startswith('E'):
return ret, tmp[1:]
# Parse a name
name_part, tmp = parse_itanium_name(tmp)
if not name_part:
# If we failed then we don't know how to demangle this
return None, None
is_template = False
# If this name is a template record that, then skip the template
# arguments
if tmp.startswith('I'):
tmp = skip_itanium_template(tmp)
is_template = True
# Add the name to the list
ret.append((name_part, is_template))
# If we get here then something went wrong
return None, None
def extract_symbols(arg):
get_symbols, should_keep_symbol, calling_convention_decoration, lib = arg
symbols = dict()
for symbol in get_symbols(lib):
symbol = should_keep_symbol(symbol, calling_convention_decoration)
if symbol:
symbols[symbol] = 1 + symbols.setdefault(symbol,0)
return symbols
if __name__ == '__main__':
tool_exes = ['dumpbin','nm','objdump','llvm-readobj']
parser = argparse.ArgumentParser(
description='Extract symbols to export from libraries')
parser.add_argument('--mangling', choices=['itanium','microsoft'],
required=True, help='expected symbol mangling scheme')
parser.add_argument('--tools', choices=tool_exes, nargs='*',
help='tools to use to extract symbols and determine the'
' target')
parser.add_argument('libs', metavar='lib', type=str, nargs='+',
help='libraries to extract symbols from')
parser.add_argument('-o', metavar='file', type=str, help='output to file')
args = parser.parse_args()
# Determine the function to use to get the list of symbols from the inputs,
# and the function to use to determine if the target is 32-bit windows.
tools = { 'dumpbin' : (dumpbin_get_symbols, dumpbin_is_32bit_windows),
'nm' : (nm_get_symbols, None),
'objdump' : (None, objdump_is_32bit_windows),
'llvm-readobj' : (readobj_get_symbols, readobj_is_32bit_windows) }
get_symbols = None
is_32bit_windows = None
# If we have a tools argument then use that for the list of tools to check
if args.tools:
tool_exes = args.tools
# Find a tool to use by trying each in turn until we find one that exists
# (subprocess.call will throw OSError when the program does not exist)
get_symbols = None
for exe in tool_exes:
try:
# Close std streams as we don't want any output and we don't
# want the process to wait for something on stdin.
p = subprocess.Popen([exe], stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
universal_newlines=True)
p.stdout.close()
p.stderr.close()
p.stdin.close()
p.wait()
# Keep going until we have a tool to use for both get_symbols and
# is_32bit_windows
if not get_symbols:
get_symbols = tools[exe][0]
if not is_32bit_windows:
is_32bit_windows = tools[exe][1]
if get_symbols and is_32bit_windows:
break
except OSError:
continue
if not get_symbols:
print("Couldn't find a program to read symbols with", file=sys.stderr)
exit(1)
if not is_32bit_windows:
print("Couldn't find a program to determing the target", file=sys.stderr)
exit(1)
# How we determine which symbols to keep and which to discard depends on
# the mangling scheme
if args.mangling == 'microsoft':
should_keep_symbol = should_keep_microsoft_symbol
else:
should_keep_symbol = should_keep_itanium_symbol
# Get the list of libraries to extract symbols from
libs = list()
for lib in args.libs:
# When invoked by cmake the arguments are the cmake target names of the
# libraries, so we need to add .lib/.a to the end and maybe lib to the
# start to get the filename. Also allow objects.
suffixes = ['.lib','.a','.obj','.o']
if not any([lib.endswith(s) for s in suffixes]):
for s in suffixes:
if os.path.exists(lib+s):
lib = lib+s
break
if os.path.exists('lib'+lib+s):
lib = 'lib'+lib+s
break
if not any([lib.endswith(s) for s in suffixes]):
print("Don't know what to do with argument "+lib, file=sys.stderr)
exit(1)
libs.append(lib)
# Check if calling convention decoration is used by inspecting the first
# library in the list
calling_convention_decoration = is_32bit_windows(libs[0])
# Extract symbols from libraries in parallel. This is a huge time saver when
# doing a debug build, as there are hundreds of thousands of symbols in each
# library.
pool = multiprocessing.Pool()
try:
# Only one argument can be passed to the mapping function, and we can't
# use a lambda or local function definition as that doesn't work on
# windows, so create a list of tuples which duplicates the arguments
# that are the same in all calls.
vals = [(get_symbols, should_keep_symbol, calling_convention_decoration, x) for x in libs]
# Do an async map then wait for the result to make sure that
# KeyboardInterrupt gets caught correctly (see
# http://bugs.python.org/issue8296)
result = pool.map_async(extract_symbols, vals)
pool.close()
libs_symbols = result.get(3600)
except KeyboardInterrupt:
# On Ctrl-C terminate everything and exit
pool.terminate()
pool.join()
exit(1)
# Merge everything into a single dict
symbols = dict()
for this_lib_symbols in libs_symbols:
for k,v in list(this_lib_symbols.items()):
symbols[k] = v + symbols.setdefault(k,0)
# Count instances of member functions of template classes, and map the
# symbol name to the function+class. We do this under the assumption that if
# a member function of a template class is instantiated many times it's
# probably declared in a public header file.
template_function_count = dict()
template_function_mapping = dict()
template_function_count[""] = 0
for k in symbols:
name = None
if args.mangling == 'microsoft':
# Member functions of templates start with
# ?<fn_name>@?$<class_name>@, so we map to <fn_name>@?$<class_name>.
# As manglings go from the innermost scope to the outermost scope
# this means:
# * When we have a function member of a subclass of a template
# class then <fn_name> will actually contain the mangling of
# both the subclass and the function member. This is fine.
# * When we have a function member of a template subclass of a
# (possibly template) class then it's the innermost template
# subclass that becomes <class_name>. This should be OK so long
# as we don't have multiple classes with a template subclass of
# the same name.
match = re.search("^\?(\??\w+\@\?\$\w+)\@", k)
if match:
name = match.group(1)
else:
# Find member functions of templates by demangling the name and
# checking if the second-to-last name in the list is a template.
match = re.match('_Z(T[VTIS])?(N.+)', k)
if match:
try:
names, _ = parse_itanium_nested_name(match.group(2))
if names and names[-2][1]:
name = ''.join([x for x,_ in names])
except TooComplexName:
# Manglings that are too complex should already have been
# filtered out, but if we happen to somehow see one here
# just leave it as-is.
pass
if name:
old_count = template_function_count.setdefault(name,0)
template_function_count[name] = old_count + 1
template_function_mapping[k] = name
else:
template_function_mapping[k] = ""
# Print symbols which both:
# * Appear in exactly one input, as symbols defined in multiple
# objects/libraries are assumed to have public definitions.
# * Aren't instances of member functions of templates which have been
# instantiated 100 times or more, which are assumed to have public
# definitions. (100 is an arbitrary guess here.)
if args.o:
outfile = open(args.o,'w')
else:
outfile = sys.stdout
for k,v in list(symbols.items()):
template_count = template_function_count[template_function_mapping[k]]
if v == 1 and template_count < 100:
print(k, file=outfile)
| 21,499 | 41.574257 | 98 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lldbDataFormatters.py | """
LLDB Formatters for LLVM data types.
Load into LLDB with 'command script import /path/to/lldbDataFormatters.py'
"""
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand('type category define -e llvm -l c++')
debugger.HandleCommand('type synthetic add -w llvm '
'-l lldbDataFormatters.SmallVectorSynthProvider '
'-x "^llvm::SmallVectorImpl<.+>$"')
debugger.HandleCommand('type synthetic add -w llvm '
'-l lldbDataFormatters.SmallVectorSynthProvider '
'-x "^llvm::SmallVector<.+,.+>$"')
debugger.HandleCommand('type synthetic add -w llvm '
'-l lldbDataFormatters.ArrayRefSynthProvider '
'-x "^llvm::ArrayRef<.+>$"')
debugger.HandleCommand('type summary add -w llvm '
'-F lldbDataFormatters.OptionalSummaryProvider '
'-x "^llvm::Optional<.+>$"')
# Pretty printer for llvm::SmallVector/llvm::SmallVectorImpl
class SmallVectorSynthProvider:
def __init__(self, valobj, dict):
self.valobj = valobj;
self.update() # initialize this provider
def num_children(self):
begin = self.begin.GetValueAsUnsigned(0)
end = self.end.GetValueAsUnsigned(0)
return (end - begin)/self.type_size
def get_child_index(self, name):
try:
return int(name.lstrip('[').rstrip(']'))
except:
return -1;
def get_child_at_index(self, index):
# Do bounds checking.
if index < 0:
return None
if index >= self.num_children():
return None;
offset = index * self.type_size
return self.begin.CreateChildAtOffset('['+str(index)+']',
offset, self.data_type)
def update(self):
self.begin = self.valobj.GetChildMemberWithName('BeginX')
self.end = self.valobj.GetChildMemberWithName('EndX')
the_type = self.valobj.GetType()
# If this is a reference type we have to dereference it to get to the
# template parameter.
if the_type.IsReferenceType():
the_type = the_type.GetDereferencedType()
self.data_type = the_type.GetTemplateArgumentType(0)
self.type_size = self.data_type.GetByteSize()
assert self.type_size != 0
class ArrayRefSynthProvider:
""" Provider for llvm::ArrayRef """
def __init__(self, valobj, dict):
self.valobj = valobj;
self.update() # initialize this provider
def num_children(self):
return self.length
def get_child_index(self, name):
try:
return int(name.lstrip('[').rstrip(']'))
except:
return -1;
def get_child_at_index(self, index):
if index < 0 or index >= self.num_children():
return None;
offset = index * self.type_size
return self.data.CreateChildAtOffset('[' + str(index) + ']',
offset, self.data_type)
def update(self):
self.data = self.valobj.GetChildMemberWithName('Data')
length_obj = self.valobj.GetChildMemberWithName('Length')
self.length = length_obj.GetValueAsUnsigned(0)
self.data_type = self.data.GetType().GetPointeeType()
self.type_size = self.data_type.GetByteSize()
assert self.type_size != 0
def OptionalSummaryProvider(valobj, internal_dict):
if not valobj.GetChildMemberWithName('hasVal').GetValueAsUnsigned(0):
return 'None'
underlying_type = valobj.GetType().GetTemplateArgumentType(0)
storage = valobj.GetChildMemberWithName('storage')
return str(storage.Cast(underlying_type))
| 3,789 | 37.282828 | 77 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.