repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
qemu | qemu-master/target/hexagon/gen_tcg_func_table.py | #!/usr/bin/env python3
##
## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sys
import re
import string
import hex_common
def main():
hex_common.read_semantics_file(sys.argv[1])
hex_common.read_attribs_file(sys.argv[2])
hex_common.calculate_attribs()
tagregs = hex_common.get_tagregs()
tagimms = hex_common.get_tagimms()
with open(sys.argv[3], 'w') as f:
f.write("#ifndef HEXAGON_FUNC_TABLE_H\n")
f.write("#define HEXAGON_FUNC_TABLE_H\n\n")
f.write("const SemanticInsn opcode_genptr[XX_LAST_OPCODE] = {\n")
for tag in hex_common.tags:
## Skip the priv instructions
if ( "A_PRIV" in hex_common.attribdict[tag] ) :
continue
## Skip the guest instructions
if ( "A_GUEST" in hex_common.attribdict[tag] ) :
continue
## Skip the diag instructions
if ( tag == "Y6_diag" ) :
continue
if ( tag == "Y6_diag0" ) :
continue
if ( tag == "Y6_diag1" ) :
continue
f.write(" [%s] = generate_%s,\n" % (tag, tag))
f.write("};\n\n")
f.write("#endif /* HEXAGON_FUNC_TABLE_H */\n")
if __name__ == "__main__":
main()
| 1,971 | 32.423729 | 80 | py |
qemu | qemu-master/target/hexagon/gen_op_regs.py | #!/usr/bin/env python3
##
## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sys
import re
import string
import hex_common
##
## Generate the register and immediate operands for each instruction
##
def calculate_regid_reg(tag):
def letter_inc(x): return chr(ord(x)+1)
ordered_implregs = [ 'SP','FP','LR' ]
srcdst_lett = 'X'
src_lett = 'S'
dst_lett = 'D'
retstr = ""
mapdict = {}
for reg in ordered_implregs:
reg_rd = 0
reg_wr = 0
if ('A_IMPLICIT_WRITES_'+reg) in hex_common.attribdict[tag]: reg_wr = 1
if reg_rd and reg_wr:
retstr += srcdst_lett
mapdict[srcdst_lett] = reg
srcdst_lett = letter_inc(srcdst_lett)
elif reg_rd:
retstr += src_lett
mapdict[src_lett] = reg
src_lett = letter_inc(src_lett)
elif reg_wr:
retstr += dst_lett
mapdict[dst_lett] = reg
dst_lett = letter_inc(dst_lett)
return retstr,mapdict
def calculate_regid_letters(tag):
retstr,mapdict = calculate_regid_reg(tag)
return retstr
def strip_reg_prefix(x):
y=x.replace('UREG.','')
y=y.replace('MREG.','')
return y.replace('GREG.','')
def main():
hex_common.read_semantics_file(sys.argv[1])
hex_common.read_attribs_file(sys.argv[2])
tagregs = hex_common.get_tagregs()
tagimms = hex_common.get_tagimms()
with open(sys.argv[3], 'w') as f:
for tag in hex_common.tags:
regs = tagregs[tag]
rregs = []
wregs = []
regids = ""
for regtype,regid,toss,numregs in regs:
if hex_common.is_read(regid):
if regid[0] not in regids: regids += regid[0]
rregs.append(regtype+regid+numregs)
if hex_common.is_written(regid):
wregs.append(regtype+regid+numregs)
if regid[0] not in regids: regids += regid[0]
for attrib in hex_common.attribdict[tag]:
if hex_common.attribinfo[attrib]['rreg']:
rregs.append(strip_reg_prefix(attribinfo[attrib]['rreg']))
if hex_common.attribinfo[attrib]['wreg']:
wregs.append(strip_reg_prefix(attribinfo[attrib]['wreg']))
regids += calculate_regid_letters(tag)
f.write('REGINFO(%s,"%s",\t/*RD:*/\t"%s",\t/*WR:*/\t"%s")\n' % \
(tag,regids,",".join(rregs),",".join(wregs)))
for tag in hex_common.tags:
imms = tagimms[tag]
f.write( 'IMMINFO(%s' % tag)
if not imms:
f.write(''','u',0,0,'U',0,0''')
for sign,size,shamt in imms:
if sign == 'r': sign = 's'
if not shamt:
shamt = "0"
f.write(''','%s',%s,%s''' % (sign,size,shamt))
if len(imms) == 1:
if sign.isupper():
myu = 'u'
else:
myu = 'U'
f.write(''','%s',0,0''' % myu)
f.write(')\n')
if __name__ == "__main__":
main()
| 3,844 | 33.63964 | 80 | py |
qemu | qemu-master/target/hexagon/hex_common.py | #!/usr/bin/env python3
##
## Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sys
import re
import string
behdict = {} # tag ->behavior
semdict = {} # tag -> semantics
attribdict = {} # tag -> attributes
macros = {} # macro -> macro information...
attribinfo = {} # Register information and misc
tags = [] # list of all tags
overrides = {} # tags with helper overrides
idef_parser_enabled = {} # tags enabled for idef-parser
# We should do this as a hash for performance,
# but to keep order let's keep it as a list.
def uniquify(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
regre = re.compile(
r"((?<!DUP)[MNORCPQXSGVZA])([stuvwxyzdefg]+)([.]?[LlHh]?)(\d+S?)")
immre = re.compile(r"[#]([rRsSuUm])(\d+)(?:[:](\d+))?")
reg_or_immre = \
re.compile(r"(((?<!DUP)[MNRCOPQXSGVZA])([stuvwxyzdefg]+)" + \
"([.]?[LlHh]?)(\d+S?))|([#]([rRsSuUm])(\d+)[:]?(\d+)?)")
relimmre = re.compile(r"[#]([rR])(\d+)(?:[:](\d+))?")
absimmre = re.compile(r"[#]([sSuUm])(\d+)(?:[:](\d+))?")
finished_macros = set()
def expand_macro_attribs(macro,allmac_re):
if macro.key not in finished_macros:
# Get a list of all things that might be macros
l = allmac_re.findall(macro.beh)
for submacro in l:
if not submacro: continue
if not macros[submacro]:
raise Exception("Couldn't find macro: <%s>" % l)
macro.attribs |= expand_macro_attribs(
macros[submacro], allmac_re)
finished_macros.add(macro.key)
return macro.attribs
# When qemu needs an attribute that isn't in the imported files,
# we'll add it here.
def add_qemu_macro_attrib(name, attrib):
macros[name].attribs.add(attrib)
immextre = re.compile(r'f(MUST_)?IMMEXT[(]([UuSsRr])')
def is_cond_jump(tag):
if tag == 'J2_rte':
return False
if ('A_HWLOOP0_END' in attribdict[tag] or
'A_HWLOOP1_END' in attribdict[tag]):
return False
return \
re.compile(r"(if.*fBRANCH)|(if.*fJUMPR)").search(semdict[tag]) != None
def is_cond_call(tag):
return re.compile(r"(if.*fCALL)").search(semdict[tag]) != None
def calculate_attribs():
add_qemu_macro_attrib('fREAD_PC', 'A_IMPLICIT_READS_PC')
add_qemu_macro_attrib('fTRAP', 'A_IMPLICIT_READS_PC')
add_qemu_macro_attrib('fWRITE_P0', 'A_WRITES_PRED_REG')
add_qemu_macro_attrib('fWRITE_P1', 'A_WRITES_PRED_REG')
add_qemu_macro_attrib('fWRITE_P2', 'A_WRITES_PRED_REG')
add_qemu_macro_attrib('fWRITE_P3', 'A_WRITES_PRED_REG')
add_qemu_macro_attrib('fSET_OVERFLOW', 'A_IMPLICIT_WRITES_USR')
add_qemu_macro_attrib('fSET_LPCFG', 'A_IMPLICIT_WRITES_USR')
add_qemu_macro_attrib('fLOAD', 'A_SCALAR_LOAD')
add_qemu_macro_attrib('fSTORE', 'A_SCALAR_STORE')
# Recurse down macros, find attributes from sub-macros
macroValues = list(macros.values())
allmacros_restr = "|".join(set([ m.re.pattern for m in macroValues ]))
allmacros_re = re.compile(allmacros_restr)
for macro in macroValues:
expand_macro_attribs(macro,allmacros_re)
# Append attributes to all instructions
for tag in tags:
for macname in allmacros_re.findall(semdict[tag]):
if not macname: continue
macro = macros[macname]
attribdict[tag] |= set(macro.attribs)
# Figure out which instructions write predicate registers
tagregs = get_tagregs()
for tag in tags:
regs = tagregs[tag]
for regtype, regid, toss, numregs in regs:
if regtype == "P" and is_written(regid):
attribdict[tag].add('A_WRITES_PRED_REG')
# Mark conditional jumps and calls
# Not all instructions are properly marked with A_CONDEXEC
for tag in tags:
if is_cond_jump(tag) or is_cond_call(tag):
attribdict[tag].add('A_CONDEXEC')
def SEMANTICS(tag, beh, sem):
#print tag,beh,sem
behdict[tag] = beh
semdict[tag] = sem
attribdict[tag] = set()
tags.append(tag) # dicts have no order, this is for order
def ATTRIBUTES(tag, attribstring):
attribstring = \
attribstring.replace("ATTRIBS","").replace("(","").replace(")","")
if not attribstring:
return
attribs = attribstring.split(",")
for attrib in attribs:
attribdict[tag].add(attrib.strip())
class Macro(object):
__slots__ = ['key','name', 'beh', 'attribs', 're']
def __init__(self, name, beh, attribs):
self.key = name
self.name = name
self.beh = beh
self.attribs = set(attribs)
self.re = re.compile("\\b" + name + "\\b")
def MACROATTRIB(macname,beh,attribstring):
attribstring = attribstring.replace("(","").replace(")","")
if attribstring:
attribs = attribstring.split(",")
else:
attribs = []
macros[macname] = Macro(macname,beh,attribs)
def compute_tag_regs(tag):
return uniquify(regre.findall(behdict[tag]))
def compute_tag_immediates(tag):
return uniquify(immre.findall(behdict[tag]))
##
## tagregs is the main data structure we'll use
## tagregs[tag] will contain the registers used by an instruction
## Within each entry, we'll use the regtype and regid fields
## regtype can be one of the following
## C control register
## N new register value
## P predicate register
## R GPR register
## M modifier register
## Q HVX predicate vector
## V HVX vector register
## O HVX new vector register
## regid can be one of the following
## d, e destination register
## dd destination register pair
## s, t, u, v, w source register
## ss, tt, uu, vv source register pair
## x, y read-write register
## xx, yy read-write register pair
##
def get_tagregs():
return dict(zip(tags, list(map(compute_tag_regs, tags))))
def get_tagimms():
return dict(zip(tags, list(map(compute_tag_immediates, tags))))
def is_pair(regid):
return len(regid) == 2
def is_single(regid):
return len(regid) == 1
def is_written(regid):
return regid[0] in "dexy"
def is_writeonly(regid):
return regid[0] in "de"
def is_read(regid):
return regid[0] in "stuvwxy"
def is_readwrite(regid):
return regid[0] in "xy"
def is_scalar_reg(regtype):
return regtype in "RPC"
def is_hvx_reg(regtype):
return regtype in "VQ"
def is_old_val(regtype, regid, tag):
return regtype+regid+'V' in semdict[tag]
def is_new_val(regtype, regid, tag):
return regtype+regid+'N' in semdict[tag]
def need_slot(tag):
if (('A_CONDEXEC' in attribdict[tag] and
'A_JUMP' not in attribdict[tag]) or
'A_STORE' in attribdict[tag] or
'A_LOAD' in attribdict[tag]):
return 1
else:
return 0
def need_part1(tag):
return re.compile(r"fPART1").search(semdict[tag])
def need_ea(tag):
return re.compile(r"\bEA\b").search(semdict[tag])
def need_PC(tag):
return 'A_IMPLICIT_READS_PC' in attribdict[tag]
def helper_needs_next_PC(tag):
return 'A_CALL' in attribdict[tag]
def need_pkt_has_multi_cof(tag):
return 'A_COF' in attribdict[tag]
def need_condexec_reg(tag, regs):
if 'A_CONDEXEC' in attribdict[tag]:
for regtype, regid, toss, numregs in regs:
if is_writeonly(regid) and not is_hvx_reg(regtype):
return True
return False
def skip_qemu_helper(tag):
return tag in overrides.keys()
def is_tmp_result(tag):
return ('A_CVI_TMP' in attribdict[tag] or
'A_CVI_TMP_DST' in attribdict[tag])
def is_new_result(tag):
return ('A_CVI_NEW' in attribdict[tag])
def is_idef_parser_enabled(tag):
return tag in idef_parser_enabled
def imm_name(immlett):
return "%siV" % immlett
def read_semantics_file(name):
eval_line = ""
for line in open(name, 'rt').readlines():
if not line.startswith("#"):
eval_line += line
if line.endswith("\\\n"):
eval_line.rstrip("\\\n")
else:
eval(eval_line.strip())
eval_line = ""
def read_attribs_file(name):
attribre = re.compile(r'DEF_ATTRIB\(([A-Za-z0-9_]+), ([^,]*), ' +
r'"([A-Za-z0-9_\.]*)", "([A-Za-z0-9_\.]*)"\)')
for line in open(name, 'rt').readlines():
if not attribre.match(line):
continue
(attrib_base,descr,rreg,wreg) = attribre.findall(line)[0]
attrib_base = 'A_' + attrib_base
attribinfo[attrib_base] = {'rreg':rreg, 'wreg':wreg, 'descr':descr}
def read_overrides_file(name):
overridere = re.compile("#define fGEN_TCG_([A-Za-z0-9_]+)\(.*")
for line in open(name, 'rt').readlines():
if not overridere.match(line):
continue
tag = overridere.findall(line)[0]
overrides[tag] = True
def read_idef_parser_enabled_file(name):
global idef_parser_enabled
with open(name, "r") as idef_parser_enabled_file:
lines = idef_parser_enabled_file.read().strip().split("\n")
idef_parser_enabled = set(lines)
| 10,019 | 32.737374 | 80 | py |
qemu | qemu-master/target/hexagon/gen_op_attribs.py | #!/usr/bin/env python3
##
## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sys
import re
import string
import hex_common
def main():
hex_common.read_semantics_file(sys.argv[1])
hex_common.read_attribs_file(sys.argv[2])
hex_common.calculate_attribs()
##
## Generate all the attributes associated with each instruction
##
with open(sys.argv[3], 'w') as f:
for tag in hex_common.tags:
f.write('OP_ATTRIB(%s,ATTRIBS(%s))\n' % \
(tag, ','.join(sorted(hex_common.attribdict[tag]))))
if __name__ == "__main__":
main()
| 1,288 | 31.225 | 80 | py |
qemu | qemu-master/target/hexagon/gen_idef_parser_funcs.py | #!/usr/bin/env python3
##
## Copyright(c) 2019-2022 rev.ng Labs Srl. All Rights Reserved.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sys
import re
import string
from io import StringIO
import hex_common
##
## Generate code to be fed to the idef_parser
##
## Consider A2_add:
##
## Rd32=add(Rs32,Rt32), { RdV=RsV+RtV;}
##
## We produce:
##
## A2_add(RdV, in RsV, in RtV) {
## { RdV=RsV+RtV;}
## }
##
## A2_add represents the instruction tag. Then we have a list of TCGv
## that the code generated by the parser can expect in input. Some of
## them are inputs ("in" prefix), while some others are outputs.
##
def main():
hex_common.read_semantics_file(sys.argv[1])
hex_common.read_attribs_file(sys.argv[2])
hex_common.calculate_attribs()
tagregs = hex_common.get_tagregs()
tagimms = hex_common.get_tagimms()
with open(sys.argv[3], 'w') as f:
f.write('#include "macros.inc"\n\n')
for tag in hex_common.tags:
## Skip the priv instructions
if ( "A_PRIV" in hex_common.attribdict[tag] ) :
continue
## Skip the guest instructions
if ( "A_GUEST" in hex_common.attribdict[tag] ) :
continue
## Skip instructions that saturate in a ternary expression
if ( tag in {'S2_asr_r_r_sat', 'S2_asl_r_r_sat'} ) :
continue
## Skip instructions using switch
if ( tag in {'S4_vrcrotate_acc', 'S4_vrcrotate'} ) :
continue
## Skip trap instructions
if ( tag in {'J2_trap0', 'J2_trap1'} ) :
continue
## Skip 128-bit instructions
if ( tag in {'A7_croundd_ri', 'A7_croundd_rr'} ) :
continue
if ( tag in {'M7_wcmpyrw', 'M7_wcmpyrwc',
'M7_wcmpyiw', 'M7_wcmpyiwc',
'M7_wcmpyrw_rnd', 'M7_wcmpyrwc_rnd',
'M7_wcmpyiw_rnd', 'M7_wcmpyiwc_rnd'} ) :
continue
## Skip interleave/deinterleave instructions
if ( tag in {'S2_interleave', 'S2_deinterleave'} ) :
continue
## Skip instructions using bit reverse
if ( tag in {'S2_brev', 'S2_brevp', 'S2_ct0', 'S2_ct1',
'S2_ct0p', 'S2_ct1p', 'A4_tlbmatch'} ) :
continue
## Skip other unsupported instructions
if ( tag == 'S2_cabacdecbin' or tag == 'A5_ACS' ) :
continue
if ( tag.startswith('Y') ) :
continue
if ( tag.startswith('V6_') ) :
continue
if ( tag.startswith('F') ) :
continue
if ( tag.endswith('_locked') ) :
continue
if ( "A_COF" in hex_common.attribdict[tag] ) :
continue
regs = tagregs[tag]
imms = tagimms[tag]
arguments = []
for regtype,regid,toss,numregs in regs:
prefix = "in " if hex_common.is_read(regid) else ""
is_pair = hex_common.is_pair(regid)
is_single_old = (hex_common.is_single(regid)
and hex_common.is_old_val(regtype, regid, tag))
is_single_new = (hex_common.is_single(regid)
and hex_common.is_new_val(regtype, regid, tag))
if is_pair or is_single_old:
arguments.append("%s%s%sV" % (prefix, regtype, regid))
elif is_single_new:
arguments.append("%s%s%sN" % (prefix, regtype, regid))
else:
print("Bad register parse: ",regtype,regid,toss,numregs)
for immlett,bits,immshift in imms:
arguments.append(hex_common.imm_name(immlett))
f.write("%s(%s) {\n" % (tag, ", ".join(arguments)))
f.write(" ");
if hex_common.need_ea(tag):
f.write("size4u_t EA; ");
f.write("%s\n" % hex_common.semdict[tag])
f.write("}\n\n")
if __name__ == "__main__":
main()
| 4,795 | 35.610687 | 80 | py |
qemu | qemu-master/target/hexagon/gen_shortcode.py | #!/usr/bin/env python3
##
## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sys
import re
import string
import hex_common
def gen_shortcode(f, tag):
f.write('DEF_SHORTCODE(%s, %s)\n' % (tag, hex_common.semdict[tag]))
def main():
hex_common.read_semantics_file(sys.argv[1])
hex_common.read_attribs_file(sys.argv[2])
hex_common.calculate_attribs()
tagregs = hex_common.get_tagregs()
tagimms = hex_common.get_tagimms()
with open(sys.argv[3], 'w') as f:
f.write("#ifndef DEF_SHORTCODE\n")
f.write("#define DEF_SHORTCODE(TAG,SHORTCODE) /* Nothing */\n")
f.write("#endif\n")
for tag in hex_common.tags:
## Skip the priv instructions
if ( "A_PRIV" in hex_common.attribdict[tag] ) :
continue
## Skip the guest instructions
if ( "A_GUEST" in hex_common.attribdict[tag] ) :
continue
## Skip the diag instructions
if ( tag == "Y6_diag" ) :
continue
if ( tag == "Y6_diag0" ) :
continue
if ( tag == "Y6_diag1" ) :
continue
gen_shortcode(f, tag)
f.write("#undef DEF_SHORTCODE\n")
if __name__ == "__main__":
main()
| 1,971 | 31.327869 | 80 | py |
qemu | qemu-master/target/hexagon/gen_helper_protos.py | #!/usr/bin/env python3
##
## Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sys
import re
import string
import hex_common
##
## Helpers for gen_helper_prototype
##
def_helper_types = {
'N' : 's32',
'O' : 's32',
'P' : 's32',
'M' : 's32',
'C' : 's32',
'R' : 's32',
'V' : 'ptr',
'Q' : 'ptr'
}
def_helper_types_pair = {
'R' : 's64',
'C' : 's64',
'S' : 's64',
'G' : 's64',
'V' : 'ptr',
'Q' : 'ptr'
}
def gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i):
if (hex_common.is_pair(regid)):
f.write(", %s" % (def_helper_types_pair[regtype]))
elif (hex_common.is_single(regid)):
f.write(", %s" % (def_helper_types[regtype]))
else:
print("Bad register parse: ",regtype,regid,toss,numregs)
##
## Generate the DEF_HELPER prototype for an instruction
## For A2_add: Rd32=add(Rs32,Rt32)
## We produce:
## DEF_HELPER_3(A2_add, s32, env, s32, s32)
##
def gen_helper_prototype(f, tag, tagregs, tagimms):
regs = tagregs[tag]
imms = tagimms[tag]
numresults = 0
numscalarresults = 0
numscalarreadwrite = 0
for regtype,regid,toss,numregs in regs:
if (hex_common.is_written(regid)):
numresults += 1
if (hex_common.is_scalar_reg(regtype)):
numscalarresults += 1
if (hex_common.is_readwrite(regid)):
if (hex_common.is_scalar_reg(regtype)):
numscalarreadwrite += 1
if (numscalarresults > 1):
## The helper is bogus when there is more than one result
f.write('DEF_HELPER_1(%s, void, env)\n' % tag)
else:
## Figure out how many arguments the helper will take
if (numscalarresults == 0):
def_helper_size = len(regs)+len(imms)+numscalarreadwrite+1
if hex_common.need_pkt_has_multi_cof(tag): def_helper_size += 1
if hex_common.need_part1(tag): def_helper_size += 1
if hex_common.need_slot(tag): def_helper_size += 1
if hex_common.need_PC(tag): def_helper_size += 1
if hex_common.helper_needs_next_PC(tag): def_helper_size += 1
if hex_common.need_condexec_reg(tag, regs): def_helper_size += 1
f.write('DEF_HELPER_%s(%s' % (def_helper_size, tag))
## The return type is void
f.write(', void' )
else:
def_helper_size = len(regs)+len(imms)+numscalarreadwrite
if hex_common.need_pkt_has_multi_cof(tag): def_helper_size += 1
if hex_common.need_part1(tag): def_helper_size += 1
if hex_common.need_slot(tag): def_helper_size += 1
if hex_common.need_PC(tag): def_helper_size += 1
if hex_common.need_condexec_reg(tag, regs): def_helper_size += 1
if hex_common.helper_needs_next_PC(tag): def_helper_size += 1
f.write('DEF_HELPER_%s(%s' % (def_helper_size, tag))
## Generate the qemu DEF_HELPER type for each result
## Iterate over this list twice
## - Emit the scalar result
## - Emit the vector result
i=0
for regtype,regid,toss,numregs in regs:
if (hex_common.is_written(regid)):
if (not hex_common.is_hvx_reg(regtype)):
gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i)
i += 1
## Put the env between the outputs and inputs
f.write(', env' )
i += 1
# Second pass
for regtype,regid,toss,numregs in regs:
if (hex_common.is_written(regid)):
if (hex_common.is_hvx_reg(regtype)):
gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i)
i += 1
## For conditional instructions, we pass in the destination register
if 'A_CONDEXEC' in hex_common.attribdict[tag]:
for regtype, regid, toss, numregs in regs:
if (hex_common.is_writeonly(regid) and
not hex_common.is_hvx_reg(regtype)):
gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i)
i += 1
## Generate the qemu type for each input operand (regs and immediates)
for regtype,regid,toss,numregs in regs:
if (hex_common.is_read(regid)):
if (hex_common.is_hvx_reg(regtype) and
hex_common.is_readwrite(regid)):
continue
gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i)
i += 1
for immlett,bits,immshift in imms:
f.write(", s32")
## Add the arguments for the instruction pkt_has_multi_cof, slot and
## part1 (if needed)
if hex_common.need_pkt_has_multi_cof(tag): f.write(', i32')
if hex_common.need_PC(tag): f.write(', i32')
if hex_common.helper_needs_next_PC(tag): f.write(', i32')
if hex_common.need_slot(tag): f.write(', i32' )
if hex_common.need_part1(tag): f.write(' , i32' )
f.write(')\n')
def main():
hex_common.read_semantics_file(sys.argv[1])
hex_common.read_attribs_file(sys.argv[2])
hex_common.read_overrides_file(sys.argv[3])
hex_common.read_overrides_file(sys.argv[4])
## Whether or not idef-parser is enabled is
## determined by the number of arguments to
## this script:
##
## 5 args. -> not enabled,
## 6 args. -> idef-parser enabled.
##
## The 6:th arg. then holds a list of the successfully
## parsed instructions.
is_idef_parser_enabled = len(sys.argv) > 6
if is_idef_parser_enabled:
hex_common.read_idef_parser_enabled_file(sys.argv[5])
hex_common.calculate_attribs()
tagregs = hex_common.get_tagregs()
tagimms = hex_common.get_tagimms()
output_file = sys.argv[-1]
with open(output_file, 'w') as f:
for tag in hex_common.tags:
## Skip the priv instructions
if ( "A_PRIV" in hex_common.attribdict[tag] ) :
continue
## Skip the guest instructions
if ( "A_GUEST" in hex_common.attribdict[tag] ) :
continue
## Skip the diag instructions
if ( tag == "Y6_diag" ) :
continue
if ( tag == "Y6_diag0" ) :
continue
if ( tag == "Y6_diag1" ) :
continue
if ( hex_common.skip_qemu_helper(tag) ):
continue
if ( hex_common.is_idef_parser_enabled(tag) ):
continue
gen_helper_prototype(f, tag, tagregs, tagimms)
if __name__ == "__main__":
main()
| 7,358 | 35.61194 | 80 | py |
qemu | qemu-master/target/hexagon/gen_tcg_funcs.py | #!/usr/bin/env python3
##
## Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sys
import re
import string
import hex_common
##
## Helpers for gen_tcg_func
##
def gen_decl_ea_tcg(f, tag):
f.write(" TCGv EA G_GNUC_UNUSED = tcg_temp_new();\n")
def genptr_decl_pair_writable(f, tag, regtype, regid, regno):
regN="%s%sN" % (regtype,regid)
if (regtype == "R"):
f.write(" const int %s = insn->regno[%d];\n" % (regN, regno))
elif (regtype == "C"):
f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \
(regN, regno))
else:
print("Bad register parse: ", regtype, regid)
f.write(" TCGv_i64 %s%sV = get_result_gpr_pair(ctx, %s);\n" % \
(regtype, regid, regN))
def genptr_decl_writable(f, tag, regtype, regid, regno):
regN="%s%sN" % (regtype,regid)
if (regtype == "R"):
f.write(" const int %s = insn->regno[%d];\n" % (regN, regno))
f.write(" TCGv %s%sV = get_result_gpr(ctx, %s);\n" % \
(regtype, regid, regN))
elif (regtype == "C"):
f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \
(regN, regno))
f.write(" TCGv %s%sV = get_result_gpr(ctx, %s);\n" % \
(regtype, regid, regN))
elif (regtype == "P"):
f.write(" const int %s = insn->regno[%d];\n" % (regN, regno))
f.write(" TCGv %s%sV = tcg_temp_new();\n" % \
(regtype, regid))
else:
print("Bad register parse: ", regtype, regid)
def genptr_decl(f, tag, regtype, regid, regno):
regN="%s%sN" % (regtype,regid)
if (regtype == "R"):
if (regid in {"ss", "tt"}):
f.write(" TCGv_i64 %s%sV = tcg_temp_new_i64();\n" % \
(regtype, regid))
f.write(" const int %s = insn->regno[%d];\n" % \
(regN, regno))
elif (regid in {"dd", "ee", "xx", "yy"}):
genptr_decl_pair_writable(f, tag, regtype, regid, regno)
elif (regid in {"s", "t", "u", "v"}):
f.write(" TCGv %s%sV = hex_gpr[insn->regno[%d]];\n" % \
(regtype, regid, regno))
elif (regid in {"d", "e", "x", "y"}):
genptr_decl_writable(f, tag, regtype, regid, regno)
else:
print("Bad register parse: ", regtype, regid)
elif (regtype == "P"):
if (regid in {"s", "t", "u", "v"}):
f.write(" TCGv %s%sV = hex_pred[insn->regno[%d]];\n" % \
(regtype, regid, regno))
elif (regid in {"d", "e", "x"}):
genptr_decl_writable(f, tag, regtype, regid, regno)
else:
print("Bad register parse: ", regtype, regid)
elif (regtype == "C"):
if (regid == "ss"):
f.write(" TCGv_i64 %s%sV = tcg_temp_new_i64();\n" % \
(regtype, regid))
f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \
(regN, regno))
elif (regid == "dd"):
genptr_decl_pair_writable(f, tag, regtype, regid, regno)
elif (regid == "s"):
f.write(" TCGv %s%sV = tcg_temp_new();\n" % \
(regtype, regid))
f.write(" const int %s%sN = insn->regno[%d] + HEX_REG_SA0;\n" % \
(regtype, regid, regno))
elif (regid == "d"):
genptr_decl_writable(f, tag, regtype, regid, regno)
else:
print("Bad register parse: ", regtype, regid)
elif (regtype == "M"):
if (regid == "u"):
f.write(" const int %s%sN = insn->regno[%d];\n"% \
(regtype, regid, regno))
f.write(" TCGv %s%sV = hex_gpr[%s%sN + HEX_REG_M0];\n" % \
(regtype, regid, regtype, regid))
else:
print("Bad register parse: ", regtype, regid)
elif (regtype == "V"):
if (regid in {"dd"}):
f.write(" const int %s%sN = insn->regno[%d];\n" %\
(regtype, regid, regno))
f.write(" const intptr_t %s%sV_off =\n" %\
(regtype, regid))
if (hex_common.is_tmp_result(tag)):
f.write(" ctx_tmp_vreg_off(ctx, %s%sN, 2, true);\n" % \
(regtype, regid))
else:
f.write(" ctx_future_vreg_off(ctx, %s%sN," % \
(regtype, regid))
f.write(" 2, true);\n")
if (not hex_common.skip_qemu_helper(tag)):
f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \
(regtype, regid))
f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \
(regtype, regid, regtype, regid))
elif (regid in {"uu", "vv", "xx"}):
f.write(" const int %s%sN = insn->regno[%d];\n" % \
(regtype, regid, regno))
f.write(" const intptr_t %s%sV_off =\n" % \
(regtype, regid))
f.write(" offsetof(CPUHexagonState, %s%sV);\n" % \
(regtype, regid))
if (not hex_common.skip_qemu_helper(tag)):
f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \
(regtype, regid))
f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \
(regtype, regid, regtype, regid))
elif (regid in {"s", "u", "v", "w"}):
f.write(" const int %s%sN = insn->regno[%d];\n" % \
(regtype, regid, regno))
f.write(" const intptr_t %s%sV_off =\n" % \
(regtype, regid))
f.write(" vreg_src_off(ctx, %s%sN);\n" % \
(regtype, regid))
if (not hex_common.skip_qemu_helper(tag)):
f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \
(regtype, regid))
elif (regid in {"d", "x", "y"}):
f.write(" const int %s%sN = insn->regno[%d];\n" % \
(regtype, regid, regno))
f.write(" const intptr_t %s%sV_off =\n" % \
(regtype, regid))
if (regid == "y"):
f.write(" offsetof(CPUHexagonState, vtmp);\n")
elif (hex_common.is_tmp_result(tag)):
f.write(" ctx_tmp_vreg_off(ctx, %s%sN, 1, true);\n" % \
(regtype, regid))
else:
f.write(" ctx_future_vreg_off(ctx, %s%sN," % \
(regtype, regid))
f.write(" 1, true);\n");
if (not hex_common.skip_qemu_helper(tag)):
f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \
(regtype, regid))
f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \
(regtype, regid, regtype, regid))
else:
print("Bad register parse: ", regtype, regid)
elif (regtype == "Q"):
if (regid in {"d", "e", "x"}):
f.write(" const int %s%sN = insn->regno[%d];\n" % \
(regtype, regid, regno))
f.write(" const intptr_t %s%sV_off =\n" % \
(regtype, regid))
f.write(" get_result_qreg(ctx, %s%sN);\n" % \
(regtype, regid))
if (not hex_common.skip_qemu_helper(tag)):
f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \
(regtype, regid))
f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \
(regtype, regid, regtype, regid))
elif (regid in {"s", "t", "u", "v"}):
f.write(" const int %s%sN = insn->regno[%d];\n" % \
(regtype, regid, regno))
f.write(" const intptr_t %s%sV_off =\n" %\
(regtype, regid))
f.write(" offsetof(CPUHexagonState, QRegs[%s%sN]);\n" % \
(regtype, regid))
if (not hex_common.skip_qemu_helper(tag)):
f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \
(regtype, regid))
else:
print("Bad register parse: ", regtype, regid)
else:
print("Bad register parse: ", regtype, regid)
def genptr_decl_new(f, tag, regtype, regid, regno):
if (regtype == "N"):
if (regid in {"s", "t"}):
f.write(" TCGv %s%sN = hex_new_value[insn->regno[%d]];\n" % \
(regtype, regid, regno))
else:
print("Bad register parse: ", regtype, regid)
elif (regtype == "P"):
if (regid in {"t", "u", "v"}):
f.write(" TCGv %s%sN = hex_new_pred_value[insn->regno[%d]];\n" % \
(regtype, regid, regno))
else:
print("Bad register parse: ", regtype, regid)
elif (regtype == "O"):
if (regid == "s"):
f.write(" const intptr_t %s%sN_num = insn->regno[%d];\n" % \
(regtype, regid, regno))
if (hex_common.skip_qemu_helper(tag)):
f.write(" const intptr_t %s%sN_off =\n" % \
(regtype, regid))
f.write(" ctx_future_vreg_off(ctx, %s%sN_num," % \
(regtype, regid))
f.write(" 1, true);\n")
else:
f.write(" TCGv %s%sN = tcg_constant_tl(%s%sN_num);\n" % \
(regtype, regid, regtype, regid))
else:
print("Bad register parse: ", regtype, regid)
else:
print("Bad register parse: ", regtype, regid)
def genptr_decl_opn(f, tag, regtype, regid, toss, numregs, i):
if (hex_common.is_pair(regid)):
genptr_decl(f, tag, regtype, regid, i)
elif (hex_common.is_single(regid)):
if hex_common.is_old_val(regtype, regid, tag):
genptr_decl(f,tag, regtype, regid, i)
elif hex_common.is_new_val(regtype, regid, tag):
genptr_decl_new(f, tag, regtype, regid, i)
else:
print("Bad register parse: ",regtype,regid,toss,numregs)
else:
print("Bad register parse: ",regtype,regid,toss,numregs)
def genptr_decl_imm(f,immlett):
if (immlett.isupper()):
i = 1
else:
i = 0
f.write(" int %s = insn->immed[%d];\n" % \
(hex_common.imm_name(immlett), i))
def genptr_src_read(f, tag, regtype, regid):
if (regtype == "R"):
if (regid in {"ss", "tt", "xx", "yy"}):
f.write(" tcg_gen_concat_i32_i64(%s%sV, hex_gpr[%s%sN],\n" % \
(regtype, regid, regtype, regid))
f.write(" hex_gpr[%s%sN + 1]);\n" % \
(regtype, regid))
elif (regid in {"x", "y"}):
## For read/write registers, we need to get the original value into
## the result TCGv. For conditional instructions, this is done in
## gen_start_packet. For unconditional instructions, we do it here.
if ('A_CONDEXEC' not in hex_common.attribdict[tag]):
f.write(" tcg_gen_mov_tl(%s%sV, hex_gpr[%s%sN]);\n" % \
(regtype, regid, regtype, regid))
elif (regid not in {"s", "t", "u", "v"}):
print("Bad register parse: ", regtype, regid)
elif (regtype == "P"):
if (regid == "x"):
f.write(" tcg_gen_mov_tl(%s%sV, hex_pred[%s%sN]);\n" % \
(regtype, regid, regtype, regid))
elif (regid not in {"s", "t", "u", "v"}):
print("Bad register parse: ", regtype, regid)
elif (regtype == "C"):
if (regid == "ss"):
f.write(" gen_read_ctrl_reg_pair(ctx, %s%sN, %s%sV);\n" % \
(regtype, regid, regtype, regid))
elif (regid == "s"):
f.write(" gen_read_ctrl_reg(ctx, %s%sN, %s%sV);\n" % \
(regtype, regid, regtype, regid))
else:
print("Bad register parse: ", regtype, regid)
elif (regtype == "M"):
if (regid != "u"):
print("Bad register parse: ", regtype, regid)
elif (regtype == "V"):
if (regid in {"uu", "vv", "xx"}):
f.write(" tcg_gen_gvec_mov(MO_64, %s%sV_off,\n" % \
(regtype, regid))
f.write(" vreg_src_off(ctx, %s%sN),\n" % \
(regtype, regid))
f.write(" sizeof(MMVector), sizeof(MMVector));\n")
f.write(" tcg_gen_gvec_mov(MO_64,\n")
f.write(" %s%sV_off + sizeof(MMVector),\n" % \
(regtype, regid))
f.write(" vreg_src_off(ctx, %s%sN ^ 1),\n" % \
(regtype, regid))
f.write(" sizeof(MMVector), sizeof(MMVector));\n")
elif (regid in {"s", "u", "v", "w"}):
if (not hex_common.skip_qemu_helper(tag)):
f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \
(regtype, regid, regtype, regid))
elif (regid in {"x", "y"}):
f.write(" tcg_gen_gvec_mov(MO_64, %s%sV_off,\n" % \
(regtype, regid))
f.write(" vreg_src_off(ctx, %s%sN),\n" % \
(regtype, regid))
f.write(" sizeof(MMVector), sizeof(MMVector));\n")
else:
print("Bad register parse: ", regtype, regid)
elif (regtype == "Q"):
if (regid in {"s", "t", "u", "v"}):
if (not hex_common.skip_qemu_helper(tag)):
f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \
(regtype, regid, regtype, regid))
elif (regid in {"x"}):
f.write(" tcg_gen_gvec_mov(MO_64, %s%sV_off,\n" % \
(regtype, regid))
f.write(" offsetof(CPUHexagonState, QRegs[%s%sN]),\n" % \
(regtype, regid))
f.write(" sizeof(MMQReg), sizeof(MMQReg));\n")
else:
print("Bad register parse: ", regtype, regid)
else:
print("Bad register parse: ", regtype, regid)
def genptr_src_read_new(f,regtype,regid):
if (regtype == "N"):
if (regid not in {"s", "t"}):
print("Bad register parse: ", regtype, regid)
elif (regtype == "P"):
if (regid not in {"t", "u", "v"}):
print("Bad register parse: ", regtype, regid)
elif (regtype == "O"):
if (regid != "s"):
print("Bad register parse: ", regtype, regid)
else:
print("Bad register parse: ", regtype, regid)
def genptr_src_read_opn(f,regtype,regid,tag):
if (hex_common.is_pair(regid)):
genptr_src_read(f, tag, regtype, regid)
elif (hex_common.is_single(regid)):
if hex_common.is_old_val(regtype, regid, tag):
genptr_src_read(f, tag, regtype, regid)
elif hex_common.is_new_val(regtype, regid, tag):
genptr_src_read_new(f,regtype,regid)
else:
print("Bad register parse: ",regtype,regid,toss,numregs)
else:
print("Bad register parse: ",regtype,regid,toss,numregs)
def gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i):
if (i > 0): f.write(", ")
if (hex_common.is_pair(regid)):
f.write("%s%sV" % (regtype,regid))
elif (hex_common.is_single(regid)):
if hex_common.is_old_val(regtype, regid, tag):
f.write("%s%sV" % (regtype,regid))
elif hex_common.is_new_val(regtype, regid, tag):
f.write("%s%sN" % (regtype,regid))
else:
print("Bad register parse: ",regtype,regid,toss,numregs)
else:
print("Bad register parse: ",regtype,regid,toss,numregs)
def gen_helper_decl_imm(f,immlett):
f.write(" TCGv tcgv_%s = tcg_constant_tl(%s);\n" % \
(hex_common.imm_name(immlett), hex_common.imm_name(immlett)))
def gen_helper_call_imm(f,immlett):
f.write(", tcgv_%s" % hex_common.imm_name(immlett))
def genptr_dst_write_pair(f, tag, regtype, regid):
f.write(" gen_log_reg_write_pair(%s%sN, %s%sV);\n" % \
(regtype, regid, regtype, regid))
def genptr_dst_write(f, tag, regtype, regid):
if (regtype == "R"):
if (regid in {"dd", "xx", "yy"}):
genptr_dst_write_pair(f, tag, regtype, regid)
elif (regid in {"d", "e", "x", "y"}):
f.write(" gen_log_reg_write(%s%sN, %s%sV);\n" % \
(regtype, regid, regtype, regid))
else:
print("Bad register parse: ", regtype, regid)
elif (regtype == "P"):
if (regid in {"d", "e", "x"}):
f.write(" gen_log_pred_write(ctx, %s%sN, %s%sV);\n" % \
(regtype, regid, regtype, regid))
else:
print("Bad register parse: ", regtype, regid)
elif (regtype == "C"):
if (regid == "dd"):
f.write(" gen_write_ctrl_reg_pair(ctx, %s%sN, %s%sV);\n" % \
(regtype, regid, regtype, regid))
elif (regid == "d"):
f.write(" gen_write_ctrl_reg(ctx, %s%sN, %s%sV);\n" % \
(regtype, regid, regtype, regid))
else:
print("Bad register parse: ", regtype, regid)
else:
print("Bad register parse: ", regtype, regid)
def genptr_dst_write_ext(f, tag, regtype, regid, newv="EXT_DFL"):
if (regtype == "V"):
if (regid in {"xx"}):
f.write(" gen_log_vreg_write_pair(ctx, %s%sV_off, %s%sN, " % \
(regtype, regid, regtype, regid))
f.write("%s);\n" % \
(newv))
elif (regid in {"y"}):
f.write(" gen_log_vreg_write(ctx, %s%sV_off, %s%sN, %s);\n" % \
(regtype, regid, regtype, regid, newv))
elif (regid not in {"dd", "d", "x"}):
print("Bad register parse: ", regtype, regid)
elif (regtype == "Q"):
if (regid not in {"d", "e", "x"}):
print("Bad register parse: ", regtype, regid)
else:
print("Bad register parse: ", regtype, regid)
def genptr_dst_write_opn(f,regtype, regid, tag):
if (hex_common.is_pair(regid)):
if (hex_common.is_hvx_reg(regtype)):
if (hex_common.is_tmp_result(tag)):
genptr_dst_write_ext(f, tag, regtype, regid, "EXT_TMP")
else:
genptr_dst_write_ext(f, tag, regtype, regid)
else:
genptr_dst_write(f, tag, regtype, regid)
elif (hex_common.is_single(regid)):
if (hex_common.is_hvx_reg(regtype)):
if (hex_common.is_new_result(tag)):
genptr_dst_write_ext(f, tag, regtype, regid, "EXT_NEW")
elif (hex_common.is_tmp_result(tag)):
genptr_dst_write_ext(f, tag, regtype, regid, "EXT_TMP")
else:
genptr_dst_write_ext(f, tag, regtype, regid, "EXT_DFL")
else:
genptr_dst_write(f, tag, regtype, regid)
else:
print("Bad register parse: ",regtype,regid,toss,numregs)
##
## Generate the TCG code to call the helper
## For A2_add: Rd32=add(Rs32,Rt32), { RdV=RsV+RtV;}
## We produce:
## static void generate_A2_add(DisasContext *ctx)
## {
## Insn *insn __attribute__((unused)) = ctx->insn;
## const int RdN = insn->regno[0];
## TCGv RdV = get_result_gpr(ctx, RdN);
## TCGv RsV = hex_gpr[insn->regno[1]];
## TCGv RtV = hex_gpr[insn->regno[2]];
## <GEN>
## gen_log_reg_write(RdN, RdV);
## }
##
## where <GEN> depends on hex_common.skip_qemu_helper(tag)
## if hex_common.skip_qemu_helper(tag) is True
## <GEN> is fGEN_TCG_A2_add({ RdV=RsV+RtV;});
## if hex_common.skip_qemu_helper(tag) is False
## <GEN> is gen_helper_A2_add(RdV, cpu_env, RsV, RtV);
##
def gen_tcg_func(f, tag, regs, imms):
f.write("static void generate_%s(DisasContext *ctx)\n" %tag)
f.write('{\n')
f.write(" Insn *insn __attribute__((unused)) = ctx->insn;\n")
if hex_common.need_ea(tag): gen_decl_ea_tcg(f, tag)
i=0
## Declare all the operands (regs and immediates)
for regtype,regid,toss,numregs in regs:
genptr_decl_opn(f, tag, regtype, regid, toss, numregs, i)
i += 1
for immlett,bits,immshift in imms:
genptr_decl_imm(f,immlett)
if 'A_PRIV' in hex_common.attribdict[tag]:
f.write(' fCHECKFORPRIV();\n')
if 'A_GUEST' in hex_common.attribdict[tag]:
f.write(' fCHECKFORGUEST();\n')
## Read all the inputs
for regtype,regid,toss,numregs in regs:
if (hex_common.is_read(regid)):
genptr_src_read_opn(f,regtype,regid,tag)
if hex_common.is_idef_parser_enabled(tag):
declared = []
## Handle registers
for regtype,regid,toss,numregs in regs:
if (hex_common.is_pair(regid)
or (hex_common.is_single(regid)
and hex_common.is_old_val(regtype, regid, tag))):
declared.append("%s%sV" % (regtype, regid))
if regtype == "M":
declared.append("%s%sN" % (regtype, regid))
elif hex_common.is_new_val(regtype, regid, tag):
declared.append("%s%sN" % (regtype,regid))
else:
print("Bad register parse: ",regtype,regid,toss,numregs)
## Handle immediates
for immlett,bits,immshift in imms:
declared.append(hex_common.imm_name(immlett))
arguments = ", ".join(["ctx", "ctx->insn", "ctx->pkt"] + declared)
f.write(" emit_%s(%s);\n" % (tag, arguments))
elif ( hex_common.skip_qemu_helper(tag) ):
f.write(" fGEN_TCG_%s(%s);\n" % (tag, hex_common.semdict[tag]))
else:
## Generate the call to the helper
for immlett,bits,immshift in imms:
gen_helper_decl_imm(f,immlett)
if hex_common.need_pkt_has_multi_cof(tag):
f.write(" TCGv pkt_has_multi_cof = ")
f.write("tcg_constant_tl(ctx->pkt->pkt_has_multi_cof);\n")
if hex_common.need_part1(tag):
f.write(" TCGv part1 = tcg_constant_tl(insn->part1);\n")
if hex_common.need_slot(tag):
f.write(" TCGv slot = tcg_constant_tl(insn->slot);\n")
if hex_common.need_PC(tag):
f.write(" TCGv PC = tcg_constant_tl(ctx->pkt->pc);\n")
if hex_common.helper_needs_next_PC(tag):
f.write(" TCGv next_PC = tcg_constant_tl(ctx->next_PC);\n")
f.write(" gen_helper_%s(" % (tag))
i=0
## If there is a scalar result, it is the return type
for regtype,regid,toss,numregs in regs:
if (hex_common.is_written(regid)):
if (hex_common.is_hvx_reg(regtype)):
continue
gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i)
i += 1
if (i > 0): f.write(", ")
f.write("cpu_env")
i=1
## For conditional instructions, we pass in the destination register
if 'A_CONDEXEC' in hex_common.attribdict[tag]:
for regtype, regid, toss, numregs in regs:
if (hex_common.is_writeonly(regid) and
not hex_common.is_hvx_reg(regtype)):
gen_helper_call_opn(f, tag, regtype, regid, toss, \
numregs, i)
i += 1
for regtype,regid,toss,numregs in regs:
if (hex_common.is_written(regid)):
if (not hex_common.is_hvx_reg(regtype)):
continue
gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i)
i += 1
for regtype,regid,toss,numregs in regs:
if (hex_common.is_read(regid)):
if (hex_common.is_hvx_reg(regtype) and
hex_common.is_readwrite(regid)):
continue
gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i)
i += 1
for immlett,bits,immshift in imms:
gen_helper_call_imm(f,immlett)
if hex_common.need_pkt_has_multi_cof(tag):
f.write(", pkt_has_multi_cof")
if hex_common.need_PC(tag): f.write(", PC")
if hex_common.helper_needs_next_PC(tag): f.write(", next_PC")
if hex_common.need_slot(tag): f.write(", slot")
if hex_common.need_part1(tag): f.write(", part1" )
f.write(");\n")
## Write all the outputs
for regtype,regid,toss,numregs in regs:
if (hex_common.is_written(regid)):
genptr_dst_write_opn(f,regtype, regid, tag)
f.write("}\n\n")
def gen_def_tcg_func(f, tag, tagregs, tagimms):
regs = tagregs[tag]
imms = tagimms[tag]
gen_tcg_func(f, tag, regs, imms)
def main():
hex_common.read_semantics_file(sys.argv[1])
hex_common.read_attribs_file(sys.argv[2])
hex_common.read_overrides_file(sys.argv[3])
hex_common.read_overrides_file(sys.argv[4])
hex_common.calculate_attribs()
## Whether or not idef-parser is enabled is
## determined by the number of arguments to
## this script:
##
## 5 args. -> not enabled,
## 6 args. -> idef-parser enabled.
##
## The 6:th arg. then holds a list of the successfully
## parsed instructions.
is_idef_parser_enabled = len(sys.argv) > 6
if is_idef_parser_enabled:
hex_common.read_idef_parser_enabled_file(sys.argv[5])
tagregs = hex_common.get_tagregs()
tagimms = hex_common.get_tagimms()
output_file = sys.argv[-1]
with open(output_file, 'w') as f:
f.write("#ifndef HEXAGON_TCG_FUNCS_H\n")
f.write("#define HEXAGON_TCG_FUNCS_H\n\n")
if is_idef_parser_enabled:
f.write("#include \"idef-generated-emitter.h.inc\"\n\n")
for tag in hex_common.tags:
## Skip the priv instructions
if ( "A_PRIV" in hex_common.attribdict[tag] ) :
continue
## Skip the guest instructions
if ( "A_GUEST" in hex_common.attribdict[tag] ) :
continue
## Skip the diag instructions
if ( tag == "Y6_diag" ) :
continue
if ( tag == "Y6_diag0" ) :
continue
if ( tag == "Y6_diag1" ) :
continue
gen_def_tcg_func(f, tag, tagregs, tagimms)
f.write("#endif /* HEXAGON_TCG_FUNCS_H */\n")
if __name__ == "__main__":
main()
| 27,179 | 41.66876 | 81 | py |
qemu | qemu-master/target/hexagon/gen_printinsn.py | #!/usr/bin/env python3
##
## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sys
import re
import string
import hex_common
##
## Generate data for printing each instruction (format string + operands)
##
def regprinter(m):
str = m.group(1)
str += ":".join(["%d"]*len(m.group(2)))
str += m.group(3)
if ('S' in m.group(1)) and (len(m.group(2)) == 1):
str += "/%s"
elif ('C' in m.group(1)) and (len(m.group(2)) == 1):
str += "/%s"
return str
def spacify(s):
# Regular expression that matches any operator that contains '=' character:
opswithequal_re = '[-+^&|!<>=]?='
# Regular expression that matches any assignment operator.
assignment_re = '[-+^&|]?='
# Out of the operators that contain the = sign, if the operator is also an
# assignment, spaces will be added around it, unless it's enclosed within
# parentheses, or spaces are already present.
equals = re.compile(opswithequal_re)
assign = re.compile(assignment_re)
slen = len(s)
paren_count = {}
i = 0
pc = 0
while i < slen:
c = s[i]
if c == '(':
pc += 1
elif c == ')':
pc -= 1
paren_count[i] = pc
i += 1
# Iterate over all operators that contain the equal sign. If any
# match is also an assignment operator, add spaces around it if
# the parenthesis count is 0.
pos = 0
out = []
for m in equals.finditer(s):
ms = m.start()
me = m.end()
# t is the string that matched opswithequal_re.
t = m.string[ms:me]
out += s[pos:ms]
pos = me
if paren_count[ms] == 0:
# Check if the entire string t is an assignment.
am = assign.match(t)
if am and len(am.group(0)) == me-ms:
# Don't add spaces if they are already there.
if ms > 0 and s[ms-1] != ' ':
out.append(' ')
out += t
if me < slen and s[me] != ' ':
out.append(' ')
continue
# If this is not an assignment, just append it to the output
# string.
out += t
# Append the remaining part of the string.
out += s[pos:len(s)]
return ''.join(out)
def main():
hex_common.read_semantics_file(sys.argv[1])
hex_common.read_attribs_file(sys.argv[2])
immext_casere = re.compile(r'IMMEXT\(([A-Za-z])')
with open(sys.argv[3], 'w') as f:
for tag in hex_common.tags:
if not hex_common.behdict[tag]: continue
extendable_upper_imm = False
extendable_lower_imm = False
m = immext_casere.search(hex_common.semdict[tag])
if m:
if m.group(1).isupper():
extendable_upper_imm = True
else:
extendable_lower_imm = True
beh = hex_common.behdict[tag]
beh = hex_common.regre.sub(regprinter,beh)
beh = hex_common.absimmre.sub(r"#%s0x%x",beh)
beh = hex_common.relimmre.sub(r"PC+%s%d",beh)
beh = spacify(beh)
# Print out a literal "%s" at the end, used to match empty string
# so C won't complain at us
if ("A_VECX" in hex_common.attribdict[tag]):
macname = "DEF_VECX_PRINTINFO"
else: macname = "DEF_PRINTINFO"
f.write('%s(%s,"%s%%s"' % (macname,tag,beh))
regs_or_imms = \
hex_common.reg_or_immre.findall(hex_common.behdict[tag])
ri = 0
seenregs = {}
for allregs,a,b,c,d,allimm,immlett,bits,immshift in regs_or_imms:
if a:
#register
if b in seenregs:
regno = seenregs[b]
else:
regno = ri
if len(b) == 1:
f.write(', insn->regno[%d]' % regno)
if 'S' in a:
f.write(', sreg2str(insn->regno[%d])' % regno)
elif 'C' in a:
f.write(', creg2str(insn->regno[%d])' % regno)
elif len(b) == 2:
f.write(', insn->regno[%d] + 1, insn->regno[%d]' % \
(regno,regno))
else:
print("Put some stuff to handle quads here")
if b not in seenregs:
seenregs[b] = ri
ri += 1
else:
#immediate
if (immlett.isupper()):
if extendable_upper_imm:
if immlett in 'rR':
f.write(',insn->extension_valid?"##":""')
else:
f.write(',insn->extension_valid?"#":""')
else:
f.write(',""')
ii = 1
else:
if extendable_lower_imm:
if immlett in 'rR':
f.write(',insn->extension_valid?"##":""')
else:
f.write(',insn->extension_valid?"#":""')
else:
f.write(',""')
ii = 0
f.write(', insn->immed[%d]' % ii)
# append empty string so there is at least one more arg
f.write(',"")\n')
if __name__ == "__main__":
main()
| 6,348 | 35.488506 | 80 | py |
qemu | qemu-master/target/hexagon/gen_opcodes_def.py | #!/usr/bin/env python3
##
## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sys
import re
import string
import hex_common
def main():
hex_common.read_semantics_file(sys.argv[1])
##
## Generate a list of all the opcodes
##
with open(sys.argv[3], 'w') as f:
for tag in hex_common.tags:
f.write ( "OPCODE(%s),\n" % (tag) )
if __name__ == "__main__":
main()
| 1,106 | 28.918919 | 80 | py |
qemu | qemu-master/target/hexagon/dectree.py | #!/usr/bin/env python3
##
## Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import io
import re
import sys
import iset
encs = {tag : ''.join(reversed(iset.iset[tag]['enc'].replace(' ', '')))
for tag in iset.tags if iset.iset[tag]['enc'] != 'MISSING ENCODING'}
enc_classes = set([iset.iset[tag]['enc_class'] for tag in encs.keys()])
subinsn_enc_classes = \
set([enc_class for enc_class in enc_classes \
if enc_class.startswith('SUBINSN_')])
ext_enc_classes = \
set([enc_class for enc_class in enc_classes \
if enc_class not in ('NORMAL', '16BIT') and \
not enc_class.startswith('SUBINSN_')])
try:
subinsn_groupings = iset.subinsn_groupings
except AttributeError:
subinsn_groupings = {}
for (tag, subinsn_grouping) in subinsn_groupings.items():
encs[tag] = ''.join(reversed(subinsn_grouping['enc'].replace(' ', '')))
dectree_normal = {'leaves' : set()}
dectree_16bit = {'leaves' : set()}
dectree_subinsn_groupings = {'leaves' : set()}
dectree_subinsns = {name : {'leaves' : set()} for name in subinsn_enc_classes}
dectree_extensions = {name : {'leaves' : set()} for name in ext_enc_classes}
for tag in encs.keys():
if tag in subinsn_groupings:
dectree_subinsn_groupings['leaves'].add(tag)
continue
enc_class = iset.iset[tag]['enc_class']
if enc_class.startswith('SUBINSN_'):
if len(encs[tag]) != 32:
encs[tag] = encs[tag] + '0' * (32 - len(encs[tag]))
dectree_subinsns[enc_class]['leaves'].add(tag)
elif enc_class == '16BIT':
if len(encs[tag]) != 16:
raise Exception('Tag "{}" has enc_class "{}" and not an encoding ' +
'width of 16 bits!'.format(tag, enc_class))
dectree_16bit['leaves'].add(tag)
else:
if len(encs[tag]) != 32:
raise Exception('Tag "{}" has enc_class "{}" and not an encoding ' +
'width of 32 bits!'.format(tag, enc_class))
if enc_class == 'NORMAL':
dectree_normal['leaves'].add(tag)
else:
dectree_extensions[enc_class]['leaves'].add(tag)
faketags = set()
for (tag, enc) in iset.enc_ext_spaces.items():
faketags.add(tag)
encs[tag] = ''.join(reversed(enc.replace(' ', '')))
dectree_normal['leaves'].add(tag)
faketags |= set(subinsn_groupings.keys())
def every_bit_counts(bitset):
for i in range(1, len(next(iter(bitset)))):
if len(set([bits[:i] + bits[i+1:] for bits in bitset])) == len(bitset):
return False
return True
def auto_separate(node):
tags = node['leaves']
if len(tags) <= 1:
return
enc_width = len(encs[next(iter(tags))])
opcode_bit_for_all = \
[all([encs[tag][i] in '01' \
for tag in tags]) for i in range(enc_width)]
opcode_bit_is_0_for_all = \
[opcode_bit_for_all[i] and all([encs[tag][i] == '0' \
for tag in tags]) for i in range(enc_width)]
opcode_bit_is_1_for_all = \
[opcode_bit_for_all[i] and all([encs[tag][i] == '1' \
for tag in tags]) for i in range(enc_width)]
differentiator_opcode_bit = \
[opcode_bit_for_all[i] and \
not (opcode_bit_is_0_for_all[i] or \
opcode_bit_is_1_for_all[i]) \
for i in range(enc_width)]
best_width = 0
for width in range(4, 0, -1):
for lsb in range(enc_width - width, -1, -1):
bitset = set([encs[tag][lsb:lsb+width] for tag in tags])
if all(differentiator_opcode_bit[lsb:lsb+width]) and \
(len(bitset) == len(tags) or every_bit_counts(bitset)):
best_width = width
best_lsb = lsb
caught_all_tags = len(bitset) == len(tags)
break
if best_width != 0:
break
if best_width == 0:
raise Exception('Could not find a way to differentiate the encodings ' +
'of the following tags:\n{}'.format('\n'.join(tags)))
if caught_all_tags:
for width in range(1, best_width):
for lsb in range(enc_width - width, -1, -1):
bitset = set([encs[tag][lsb:lsb+width] for tag in tags])
if all(differentiator_opcode_bit[lsb:lsb+width]) and \
len(bitset) == len(tags):
best_width = width
best_lsb = lsb
break
else:
continue
break
node['separator_lsb'] = best_lsb
node['separator_width'] = best_width
node['children'] = []
for value in range(2 ** best_width):
child = {}
bits = ''.join(reversed('{:0{}b}'.format(value, best_width)))
child['leaves'] = \
set([tag for tag in tags \
if encs[tag][best_lsb:best_lsb+best_width] == bits])
node['children'].append(child)
for child in node['children']:
auto_separate(child)
auto_separate(dectree_normal)
auto_separate(dectree_16bit)
if subinsn_groupings:
auto_separate(dectree_subinsn_groupings)
for dectree_subinsn in dectree_subinsns.values():
auto_separate(dectree_subinsn)
for dectree_ext in dectree_extensions.values():
auto_separate(dectree_ext)
for tag in faketags:
del encs[tag]
def table_name(parents, node):
path = parents + [node]
root = path[0]
tag = next(iter(node['leaves']))
if tag in subinsn_groupings:
enc_width = len(subinsn_groupings[tag]['enc'].replace(' ', ''))
else:
tag = next(iter(node['leaves'] - faketags))
enc_width = len(encs[tag])
determining_bits = ['_'] * enc_width
for (parent, child) in zip(path[:-1], path[1:]):
lsb = parent['separator_lsb']
width = parent['separator_width']
value = parent['children'].index(child)
determining_bits[lsb:lsb+width] = \
list(reversed('{:0{}b}'.format(value, width)))
if tag in subinsn_groupings:
name = 'DECODE_ROOT_EE'
else:
enc_class = iset.iset[tag]['enc_class']
if enc_class in ext_enc_classes:
name = 'DECODE_EXT_{}'.format(enc_class)
elif enc_class in subinsn_enc_classes:
name = 'DECODE_SUBINSN_{}'.format(enc_class)
else:
name = 'DECODE_ROOT_{}'.format(enc_width)
if node != root:
name += '_' + ''.join(reversed(determining_bits))
return name
def print_node(f, node, parents):
if len(node['leaves']) <= 1:
return
name = table_name(parents, node)
lsb = node['separator_lsb']
width = node['separator_width']
print('DECODE_NEW_TABLE({},{},DECODE_SEPARATOR_BITS({},{}))'.\
format(name, 2 ** width, lsb, width), file=f)
for child in node['children']:
if len(child['leaves']) == 0:
print('INVALID()', file=f)
elif len(child['leaves']) == 1:
(tag,) = child['leaves']
if tag in subinsn_groupings:
class_a = subinsn_groupings[tag]['class_a']
class_b = subinsn_groupings[tag]['class_b']
enc = subinsn_groupings[tag]['enc'].replace(' ', '')
if 'RESERVED' in tag:
print('INVALID()', file=f)
else:
print('SUBINSNS({},{},{},"{}")'.\
format(tag, class_a, class_b, enc), file=f)
elif tag in iset.enc_ext_spaces:
enc = iset.enc_ext_spaces[tag].replace(' ', '')
print('EXTSPACE({},"{}")'.format(tag, enc), file=f)
else:
enc = ''.join(reversed(encs[tag]))
print('TERMINAL({},"{}")'.format(tag, enc), file=f)
else:
print('TABLE_LINK({})'.format(table_name(parents + [node], child)),
file=f)
print('DECODE_END_TABLE({},{},DECODE_SEPARATOR_BITS({},{}))'.\
format(name, 2 ** width, lsb, width), file=f)
print(file=f)
parents.append(node)
for child in node['children']:
print_node(f, child, parents)
parents.pop()
def print_tree(f, tree):
print_node(f, tree, [])
def print_match_info(f):
for tag in sorted(encs.keys(), key=iset.tags.index):
enc = ''.join(reversed(encs[tag]))
mask = int(re.sub(r'[^1]', r'0', enc.replace('0', '1')), 2)
match = int(re.sub(r'[^01]', r'0', enc), 2)
suffix = ''
print('DECODE{}_MATCH_INFO({},0x{:x}U,0x{:x}U)'.\
format(suffix, tag, mask, match), file=f)
regre = re.compile(
r'((?<!DUP)[MNORCPQXSGVZA])([stuvwxyzdefg]+)([.]?[LlHh]?)(\d+S?)')
immre = re.compile(r'[#]([rRsSuUm])(\d+)(?:[:](\d+))?')
def ordered_unique(l):
return sorted(set(l), key=l.index)
implicit_registers = {
'SP' : 29,
'FP' : 30,
'LR' : 31
}
num_registers = {
'R' : 32,
'V' : 32
}
def print_op_info(f):
for tag in sorted(encs.keys(), key=iset.tags.index):
enc = encs[tag]
print(file=f)
print('DECODE_OPINFO({},'.format(tag), file=f)
regs = ordered_unique(regre.findall(iset.iset[tag]['syntax']))
imms = ordered_unique(immre.findall(iset.iset[tag]['syntax']))
regno = 0
for reg in regs:
reg_type = reg[0]
reg_letter = reg[1][0]
reg_num_choices = int(reg[3].rstrip('S'))
reg_mapping = reg[0] + ''.join(['_' for letter in reg[1]]) + reg[3]
reg_enc_fields = re.findall(reg_letter + '+', enc)
if len(reg_enc_fields) == 0:
raise Exception('Tag "{}" missing register field!'.format(tag))
if len(reg_enc_fields) > 1:
raise Exception('Tag "{}" has split register field!'.\
format(tag))
reg_enc_field = reg_enc_fields[0]
if 2 ** len(reg_enc_field) != reg_num_choices:
raise Exception('Tag "{}" has incorrect register field width!'.\
format(tag))
print(' DECODE_REG({},{},{})'.\
format(regno, len(reg_enc_field), enc.index(reg_enc_field)),
file=f)
if reg_type in num_registers and \
reg_num_choices != num_registers[reg_type]:
print(' DECODE_MAPPED_REG({},{})'.\
format(regno, reg_mapping), file=f)
regno += 1
def implicit_register_key(reg):
return implicit_registers[reg]
for reg in sorted(
set([r for r in (iset.iset[tag]['rregs'].split(',') + \
iset.iset[tag]['wregs'].split(',')) \
if r in implicit_registers]), key=implicit_register_key):
print(' DECODE_IMPL_REG({},{})'.\
format(regno, implicit_registers[reg]), file=f)
regno += 1
if imms and imms[0][0].isupper():
imms = reversed(imms)
for imm in imms:
if imm[0].isupper():
immno = 1
else:
immno = 0
imm_type = imm[0]
imm_width = int(imm[1])
imm_shift = imm[2]
if imm_shift:
imm_shift = int(imm_shift)
else:
imm_shift = 0
if imm_type.islower():
imm_letter = 'i'
else:
imm_letter = 'I'
remainder = imm_width
for m in reversed(list(re.finditer(imm_letter + '+', enc))):
remainder -= m.end() - m.start()
print(' DECODE_IMM({},{},{},{})'.\
format(immno, m.end() - m.start(), m.start(), remainder),
file=f)
if remainder != 0:
if imm[2]:
imm[2] = ':' + imm[2]
raise Exception('Tag "{}" has an incorrect number of ' + \
'encoding bits for immediate "{}"'.\
format(tag, ''.join(imm)))
if imm_type.lower() in 'sr':
print(' DECODE_IMM_SXT({},{})'.\
format(immno, imm_width), file=f)
if imm_type.lower() == 'n':
print(' DECODE_IMM_NEG({},{})'.\
format(immno, imm_width), file=f)
if imm_shift:
print(' DECODE_IMM_SHIFT({},{})'.\
format(immno, imm_shift), file=f)
print(')', file=f)
if __name__ == '__main__':
with open(sys.argv[1], 'w') as f:
print_tree(f, dectree_normal)
print_tree(f, dectree_16bit)
if subinsn_groupings:
print_tree(f, dectree_subinsn_groupings)
for (name, dectree_subinsn) in sorted(dectree_subinsns.items()):
print_tree(f, dectree_subinsn)
for (name, dectree_ext) in sorted(dectree_extensions.items()):
print_tree(f, dectree_ext)
print_match_info(f)
print_op_info(f)
| 13,570 | 37.553977 | 80 | py |
qemu | qemu-master/scripts/shaderinclude.py | #!/usr/bin/env python3
#
# Copyright (C) 2023 Red Hat, Inc.
#
# SPDX-License-Identifier: GPL-2.0-or-later
import sys
import os
def main(args):
file_path = args[1]
basename = os.path.basename(file_path)
varname = basename.replace('-', '_').replace('.', '_')
with os.fdopen(sys.stdout.fileno(), "wt", closefd=False, newline='\n') as stdout:
with open(file_path, "r", encoding='utf-8') as file:
print(f'static GLchar {varname}_src[] =', file=stdout)
for line in file:
line = line.rstrip()
print(f' "{line}\\n"', file=stdout)
print(' "\\n";', file=stdout)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 712 | 25.407407 | 85 | py |
qemu | qemu-master/scripts/test-driver.py | #! /usr/bin/env python3
# Wrapper for tests that hides the output if they succeed.
# Used by "make check"
#
# Copyright (C) 2020 Red Hat, Inc.
#
# Author: Paolo Bonzini <[email protected]>
import subprocess
import sys
import os
import argparse
parser = argparse.ArgumentParser(description='Test driver for QEMU')
parser.add_argument('-C', metavar='DIR', dest='dir', default='.',
help='change to DIR before doing anything else')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='be more verbose')
parser.add_argument('test_args', nargs=argparse.REMAINDER)
args = parser.parse_args()
os.chdir(args.dir)
test_args = args.test_args
if test_args[0] == '--':
test_args = test_args[1:]
if args.verbose:
result = subprocess.run(test_args, stdout=None, stderr=None)
else:
result = subprocess.run(test_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if result.returncode:
sys.stdout.buffer.write(result.stdout)
sys.exit(result.returncode)
| 1,041 | 27.944444 | 88 | py |
qemu | qemu-master/scripts/meson-buildoptions.py | #! /usr/bin/env python3
# Generate configure command line options handling code, based on Meson's
# user build options introspection data
#
# Copyright (C) 2021 Red Hat, Inc.
#
# Author: Paolo Bonzini <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import json
import textwrap
import shlex
import sys
SKIP_OPTIONS = {
"default_devices",
"fuzzing_engine",
"qemu_suffix",
"smbd",
}
OPTION_NAMES = {
"b_coverage": "gcov",
"b_lto": "lto",
"malloc": "enable-malloc",
"pkgversion": "with-pkgversion",
"qemu_firmwarepath": "firmwarepath",
"trace_backends": "enable-trace-backends",
"trace_file": "with-trace-file",
}
BUILTIN_OPTIONS = {
"b_coverage",
"b_lto",
"datadir",
"includedir",
"libdir",
"libexecdir",
"localedir",
"localstatedir",
"mandir",
"strip",
"sysconfdir",
}
LINE_WIDTH = 76
# Convert the default value of an option to the string used in
# the help message
def get_help(opt):
if opt["name"] == "libdir":
return 'system default'
value = opt["value"]
if isinstance(value, list):
return ",".join(value)
if isinstance(value, bool):
return "enabled" if value else "disabled"
return str(value)
def wrap(left, text, indent):
spaces = " " * indent
if len(left) >= indent:
yield left
left = spaces
else:
left = (left + spaces)[0:indent]
yield from textwrap.wrap(
text, width=LINE_WIDTH, initial_indent=left, subsequent_indent=spaces
)
def sh_print(line=""):
print(' printf "%s\\n"', shlex.quote(line))
def help_line(left, opt, indent, long):
right = f'{opt["description"]}'
if long:
value = get_help(opt)
if value != "auto" and value != "":
right += f" [{value}]"
if "choices" in opt and long:
choices = "/".join(sorted(opt["choices"]))
right += f" (choices: {choices})"
for x in wrap(" " + left, right, indent):
sh_print(x)
# Return whether the option (a dictionary) can be used with
# arguments. Booleans can never be used with arguments;
# combos allow an argument only if they accept other values
# than "auto", "enabled", and "disabled".
def allow_arg(opt):
if opt["type"] == "boolean":
return False
if opt["type"] != "combo":
return True
return not (set(opt["choices"]) <= {"auto", "disabled", "enabled"})
# Return whether the option (a dictionary) can be used without
# arguments. Booleans can only be used without arguments;
# combos require an argument if they accept neither "enabled"
# nor "disabled"
def require_arg(opt):
if opt["type"] == "boolean":
return False
if opt["type"] != "combo":
return True
return not ({"enabled", "disabled"}.intersection(opt["choices"]))
def filter_options(json):
if ":" in json["name"]:
return False
if json["section"] == "user":
return json["name"] not in SKIP_OPTIONS
else:
return json["name"] in BUILTIN_OPTIONS
def load_options(json):
json = [x for x in json if filter_options(x)]
return sorted(json, key=lambda x: x["name"])
def cli_option(opt):
name = opt["name"]
if name in OPTION_NAMES:
return OPTION_NAMES[name]
return name.replace("_", "-")
def cli_help_key(opt):
key = cli_option(opt)
if require_arg(opt):
return key
if opt["type"] == "boolean" and opt["value"]:
return f"disable-{key}"
return f"enable-{key}"
def cli_metavar(opt):
if opt["type"] == "string":
return "VALUE"
if opt["type"] == "array":
return "CHOICES" if "choices" in opt else "VALUES"
return "CHOICE"
def print_help(options):
print("meson_options_help() {")
for opt in sorted(options, key=cli_help_key):
key = cli_help_key(opt)
# The first section includes options that have an arguments,
# and booleans (i.e., only one of enable/disable makes sense)
if require_arg(opt):
metavar = cli_metavar(opt)
left = f"--{key}={metavar}"
help_line(left, opt, 27, True)
elif opt["type"] == "boolean":
left = f"--{key}"
help_line(left, opt, 27, False)
elif allow_arg(opt):
if opt["type"] == "combo" and "enabled" in opt["choices"]:
left = f"--{key}[=CHOICE]"
else:
left = f"--{key}=CHOICE"
help_line(left, opt, 27, True)
sh_print()
sh_print("Optional features, enabled with --enable-FEATURE and")
sh_print("disabled with --disable-FEATURE, default is enabled if available")
sh_print("(unless built with --without-default-features):")
sh_print()
for opt in options:
key = opt["name"].replace("_", "-")
if opt["type"] != "boolean" and not allow_arg(opt):
help_line(key, opt, 18, False)
print("}")
def print_parse(options):
print("_meson_option_parse() {")
print(" case $1 in")
for opt in options:
key = cli_option(opt)
name = opt["name"]
if require_arg(opt):
if opt["type"] == "array" and not "choices" in opt:
print(f' --{key}=*) quote_sh "-D{name}=$(meson_option_build_array $2)" ;;')
else:
print(f' --{key}=*) quote_sh "-D{name}=$2" ;;')
elif opt["type"] == "boolean":
print(f' --enable-{key}) printf "%s" -D{name}=true ;;')
print(f' --disable-{key}) printf "%s" -D{name}=false ;;')
else:
if opt["type"] == "combo" and "enabled" in opt["choices"]:
print(f' --enable-{key}) printf "%s" -D{name}=enabled ;;')
if opt["type"] == "combo" and "disabled" in opt["choices"]:
print(f' --disable-{key}) printf "%s" -D{name}=disabled ;;')
if allow_arg(opt):
print(f' --enable-{key}=*) quote_sh "-D{name}=$2" ;;')
print(" *) return 1 ;;")
print(" esac")
print("}")
options = load_options(json.load(sys.stdin))
print("# This file is generated by meson-buildoptions.py, do not edit!")
print_help(options)
print_parse(options)
| 6,787 | 28.77193 | 94 | py |
qemu | qemu-master/scripts/cpu-x86-uarch-abi.py | #!/usr/bin/python3
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
# A script to generate a CSV file showing the x86_64 ABI
# compatibility levels for each CPU model.
#
from qemu.qmp.legacy import QEMUMonitorProtocol
import sys
if len(sys.argv) != 2:
print("syntax: %s QMP-SOCK\n\n" % __file__ +
"Where QMP-SOCK points to a QEMU process such as\n\n" +
" # qemu-system-x86_64 -qmp unix:/tmp/qmp,server,nowait " +
"-display none -accel kvm", file=sys.stderr)
sys.exit(1)
# Mandatory CPUID features for each microarch ABI level
levels = [
[ # x86-64 baseline
"cmov",
"cx8",
"fpu",
"fxsr",
"mmx",
"syscall",
"sse",
"sse2",
],
[ # x86-64-v2
"cx16",
"lahf-lm",
"popcnt",
"pni",
"sse4.1",
"sse4.2",
"ssse3",
],
[ # x86-64-v3
"avx",
"avx2",
"bmi1",
"bmi2",
"f16c",
"fma",
"abm",
"movbe",
],
[ # x86-64-v4
"avx512f",
"avx512bw",
"avx512cd",
"avx512dq",
"avx512vl",
],
]
# Assumes externally launched process such as
#
# qemu-system-x86_64 -qmp unix:/tmp/qmp,server,nowait -display none -accel kvm
#
# Note different results will be obtained with TCG, as
# TCG masks out certain features otherwise present in
# the CPU model definitions, as does KVM.
sock = sys.argv[1]
shell = QEMUMonitorProtocol(sock)
shell.connect()
models = shell.cmd("query-cpu-definitions")
# These QMP props don't correspond to CPUID fatures
# so ignore them
skip = [
"family",
"min-level",
"min-xlevel",
"vendor",
"model",
"model-id",
"stepping",
]
names = []
for model in models["return"]:
if "alias-of" in model:
continue
names.append(model["name"])
models = {}
for name in sorted(names):
cpu = shell.cmd("query-cpu-model-expansion",
{ "type": "static",
"model": { "name": name }})
got = {}
for (feature, present) in cpu["return"]["model"]["props"].items():
if present and feature not in skip:
got[feature] = True
if name in ["host", "max", "base"]:
continue
models[name] = {
# Dict of all present features in this CPU model
"features": got,
# Whether each x86-64 ABI level is satisfied
"levels": [False, False, False, False],
# Number of extra CPUID features compared to the x86-64 ABI level
"distance":[-1, -1, -1, -1],
# CPUID features present in model, but not in ABI level
"delta":[[], [], [], []],
# CPUID features in ABI level but not present in model
"missing": [[], [], [], []],
}
# Calculate whether the CPU models satisfy each ABI level
for name in models.keys():
for level in range(len(levels)):
got = set(models[name]["features"])
want = set(levels[level])
missing = want - got
match = True
if len(missing) > 0:
match = False
models[name]["levels"][level] = match
models[name]["missing"][level] = missing
# Cache list of CPU models satisfying each ABI level
abi_models = [
[],
[],
[],
[],
]
for name in models.keys():
for level in range(len(levels)):
if models[name]["levels"][level]:
abi_models[level].append(name)
for level in range(len(abi_models)):
# Find the union of features in all CPU models satisfying this ABI
allfeatures = {}
for name in abi_models[level]:
for feat in models[name]["features"]:
allfeatures[feat] = True
# Find the intersection of features in all CPU models satisfying this ABI
commonfeatures = []
for feat in allfeatures:
present = True
for name in models.keys():
if not models[name]["levels"][level]:
continue
if feat not in models[name]["features"]:
present = False
if present:
commonfeatures.append(feat)
# Determine how many extra features are present compared to the lowest
# common denominator
for name in models.keys():
if not models[name]["levels"][level]:
continue
delta = set(models[name]["features"].keys()) - set(commonfeatures)
models[name]["distance"][level] = len(delta)
models[name]["delta"][level] = delta
def print_uarch_abi_csv():
print("# Automatically generated from '%s'" % __file__)
print("Model,baseline,v2,v3,v4")
for name in models.keys():
print(name, end="")
for level in range(len(levels)):
if models[name]["levels"][level]:
print(",✅", end="")
else:
print(",", end="")
print()
print_uarch_abi_csv()
| 4,878 | 24.149485 | 80 | py |
qemu | qemu-master/scripts/qemu-stamp.py | #! /usr/bin/env python3
# Usage: scripts/qemu-stamp.py STRING1 STRING2... -- FILE1 FILE2...
import hashlib
import os
import sys
sha = hashlib.sha1()
is_file = False
for arg in sys.argv[1:]:
if arg == '--':
is_file = True
continue
if is_file:
with open(arg, 'rb') as f:
for chunk in iter(lambda: f.read(65536), b''):
sha.update(chunk)
else:
sha.update(os.fsencode(arg))
sha.update(b'\n')
# The hash can start with a digit, which the compiler doesn't
# like as an symbol. So prefix it with an underscore
print("_" + sha.hexdigest())
| 614 | 23.6 | 67 | py |
qemu | qemu-master/scripts/modinfo-collect.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import json
import shlex
import subprocess
def find_command(src, target, compile_commands):
for command in compile_commands:
if command['file'] != src:
continue
if target != '' and command['command'].find(target) == -1:
continue
return command['command']
return 'false'
def process_command(src, command):
skip = False
out = []
for item in shlex.split(command):
if skip:
skip = False
continue
if item == '-MF' or item == '-MQ' or item == '-o':
skip = True
continue
if item == '-c':
skip = True
continue
out.append(item)
out.append('-DQEMU_MODINFO')
out.append('-E')
out.append(src)
return out
def main(args):
target = ''
if args[0] == '--target':
args.pop(0)
target = args.pop(0)
print("MODINFO_DEBUG target %s" % target)
arch = target[:-8] # cut '-softmmu'
print("MODINFO_START arch \"%s\" MODINFO_END" % arch)
with open('compile_commands.json') as f:
compile_commands = json.load(f)
for src in args:
if not src.endswith('.c'):
print("MODINFO_DEBUG skip %s" % src)
continue
print("MODINFO_DEBUG src %s" % src)
command = find_command(src, target, compile_commands)
cmdline = process_command(src, command)
print("MODINFO_DEBUG cmd", cmdline)
result = subprocess.run(cmdline, stdout = subprocess.PIPE,
universal_newlines = True)
if result.returncode != 0:
sys.exit(result.returncode)
for line in result.stdout.split('\n'):
if line.find('MODINFO') != -1:
print(line)
if __name__ == "__main__":
main(sys.argv[1:])
| 1,898 | 27.772727 | 66 | py |
qemu | qemu-master/scripts/symlink-install-tree.py | #!/usr/bin/env python3
from pathlib import PurePath
import errno
import json
import os
import subprocess
import sys
def destdir_join(d1: str, d2: str) -> str:
if not d1:
return d2
# c:\destdir + c:\prefix must produce c:\destdir\prefix
return str(PurePath(d1, *PurePath(d2).parts[1:]))
introspect = os.environ.get('MESONINTROSPECT')
out = subprocess.run([*introspect.split(' '), '--installed'],
stdout=subprocess.PIPE, check=True).stdout
for source, dest in json.loads(out).items():
bundle_dest = destdir_join('qemu-bundle', dest)
path = os.path.dirname(bundle_dest)
try:
os.makedirs(path, exist_ok=True)
except BaseException as e:
print(f'error making directory {path}', file=sys.stderr)
raise e
try:
os.symlink(source, bundle_dest)
except BaseException as e:
if not isinstance(e, OSError) or e.errno != errno.EEXIST:
print(f'error making symbolic link {dest}', file=sys.stderr)
raise e
| 1,020 | 29.939394 | 72 | py |
qemu | qemu-master/scripts/simpletrace.py | #!/usr/bin/env python3
#
# Pretty-printer for simple trace backend binary trace files
#
# Copyright IBM, Corp. 2010
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
#
# For help see docs/devel/tracing.rst
import struct
import inspect
from tracetool import read_events, Event
from tracetool.backend.simple import is_string
header_event_id = 0xffffffffffffffff
header_magic = 0xf2b177cb0aa429b4
dropped_event_id = 0xfffffffffffffffe
record_type_mapping = 0
record_type_event = 1
log_header_fmt = '=QQQ'
rec_header_fmt = '=QQII'
def read_header(fobj, hfmt):
'''Read a trace record header'''
hlen = struct.calcsize(hfmt)
hdr = fobj.read(hlen)
if len(hdr) != hlen:
return None
return struct.unpack(hfmt, hdr)
def get_record(edict, idtoname, rechdr, fobj):
"""Deserialize a trace record from a file into a tuple
(name, timestamp, pid, arg1, ..., arg6)."""
if rechdr is None:
return None
if rechdr[0] != dropped_event_id:
event_id = rechdr[0]
name = idtoname[event_id]
rec = (name, rechdr[1], rechdr[3])
try:
event = edict[name]
except KeyError as e:
import sys
sys.stderr.write('%s event is logged but is not declared ' \
'in the trace events file, try using ' \
'trace-events-all instead.\n' % str(e))
sys.exit(1)
for type, name in event.args:
if is_string(type):
l = fobj.read(4)
(len,) = struct.unpack('=L', l)
s = fobj.read(len)
rec = rec + (s,)
else:
(value,) = struct.unpack('=Q', fobj.read(8))
rec = rec + (value,)
else:
rec = ("dropped", rechdr[1], rechdr[3])
(value,) = struct.unpack('=Q', fobj.read(8))
rec = rec + (value,)
return rec
def get_mapping(fobj):
(event_id, ) = struct.unpack('=Q', fobj.read(8))
(len, ) = struct.unpack('=L', fobj.read(4))
name = fobj.read(len).decode()
return (event_id, name)
def read_record(edict, idtoname, fobj):
"""Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, arg1, ..., arg6)."""
rechdr = read_header(fobj, rec_header_fmt)
return get_record(edict, idtoname, rechdr, fobj)
def read_trace_header(fobj):
"""Read and verify trace file header"""
header = read_header(fobj, log_header_fmt)
if header is None:
raise ValueError('Not a valid trace file!')
if header[0] != header_event_id:
raise ValueError('Not a valid trace file, header id %d != %d' %
(header[0], header_event_id))
if header[1] != header_magic:
raise ValueError('Not a valid trace file, header magic %d != %d' %
(header[1], header_magic))
log_version = header[2]
if log_version not in [0, 2, 3, 4]:
raise ValueError('Unknown version of tracelog format!')
if log_version != 4:
raise ValueError('Log format %d not supported with this QEMU release!'
% log_version)
def read_trace_records(edict, idtoname, fobj):
"""Deserialize trace records from a file, yielding record tuples (event_num, timestamp, pid, arg1, ..., arg6).
Note that `idtoname` is modified if the file contains mapping records.
Args:
edict (str -> Event): events dict, indexed by name
idtoname (int -> str): event names dict, indexed by event ID
fobj (file): input file
"""
while True:
t = fobj.read(8)
if len(t) == 0:
break
(rectype, ) = struct.unpack('=Q', t)
if rectype == record_type_mapping:
event_id, name = get_mapping(fobj)
idtoname[event_id] = name
else:
rec = read_record(edict, idtoname, fobj)
yield rec
class Analyzer(object):
"""A trace file analyzer which processes trace records.
An analyzer can be passed to run() or process(). The begin() method is
invoked, then each trace record is processed, and finally the end() method
is invoked.
If a method matching a trace event name exists, it is invoked to process
that trace record. Otherwise the catchall() method is invoked.
Example:
The following method handles the runstate_set(int new_state) trace event::
def runstate_set(self, new_state):
...
The method can also take a timestamp argument before the trace event
arguments::
def runstate_set(self, timestamp, new_state):
...
Timestamps have the uint64_t type and are in nanoseconds.
The pid can be included in addition to the timestamp and is useful when
dealing with traces from multiple processes::
def runstate_set(self, timestamp, pid, new_state):
...
"""
def begin(self):
"""Called at the start of the trace."""
pass
def catchall(self, event, rec):
"""Called if no specific method for processing a trace event has been found."""
pass
def end(self):
"""Called at the end of the trace."""
pass
def process(events, log, analyzer, read_header=True):
"""Invoke an analyzer on each event in a log."""
if isinstance(events, str):
events = read_events(open(events, 'r'), events)
if isinstance(log, str):
log = open(log, 'rb')
if read_header:
read_trace_header(log)
frameinfo = inspect.getframeinfo(inspect.currentframe())
dropped_event = Event.build("Dropped_Event(uint64_t num_events_dropped)",
frameinfo.lineno + 1, frameinfo.filename)
edict = {"dropped": dropped_event}
idtoname = {dropped_event_id: "dropped"}
for event in events:
edict[event.name] = event
# If there is no header assume event ID mapping matches events list
if not read_header:
for event_id, event in enumerate(events):
idtoname[event_id] = event.name
def build_fn(analyzer, event):
if isinstance(event, str):
return analyzer.catchall
fn = getattr(analyzer, event.name, None)
if fn is None:
return analyzer.catchall
event_argcount = len(event.args)
fn_argcount = len(inspect.getargspec(fn)[0]) - 1
if fn_argcount == event_argcount + 1:
# Include timestamp as first argument
return lambda _, rec: fn(*(rec[1:2] + rec[3:3 + event_argcount]))
elif fn_argcount == event_argcount + 2:
# Include timestamp and pid
return lambda _, rec: fn(*rec[1:3 + event_argcount])
else:
# Just arguments, no timestamp or pid
return lambda _, rec: fn(*rec[3:3 + event_argcount])
analyzer.begin()
fn_cache = {}
for rec in read_trace_records(edict, idtoname, log):
event_num = rec[0]
event = edict[event_num]
if event_num not in fn_cache:
fn_cache[event_num] = build_fn(analyzer, event)
fn_cache[event_num](event, rec)
analyzer.end()
def run(analyzer):
"""Execute an analyzer on a trace file given on the command-line.
This function is useful as a driver for simple analysis scripts. More
advanced scripts will want to call process() instead."""
import sys
read_header = True
if len(sys.argv) == 4 and sys.argv[1] == '--no-header':
read_header = False
del sys.argv[1]
elif len(sys.argv) != 3:
sys.stderr.write('usage: %s [--no-header] <trace-events> ' \
'<trace-file>\n' % sys.argv[0])
sys.exit(1)
events = read_events(open(sys.argv[1], 'r'), sys.argv[1])
process(events, sys.argv[2], analyzer, read_header=read_header)
if __name__ == '__main__':
class Formatter(Analyzer):
def __init__(self):
self.last_timestamp = None
def catchall(self, event, rec):
timestamp = rec[1]
if self.last_timestamp is None:
self.last_timestamp = timestamp
delta_ns = timestamp - self.last_timestamp
self.last_timestamp = timestamp
fields = [event.name, '%0.3f' % (delta_ns / 1000.0),
'pid=%d' % rec[2]]
i = 3
for type, name in event.args:
if is_string(type):
fields.append('%s=%s' % (name, rec[i]))
else:
fields.append('%s=0x%x' % (name, rec[i]))
i += 1
print(' '.join(fields))
run(Formatter())
| 8,707 | 31.984848 | 114 | py |
qemu | qemu-master/scripts/analyze-migration.py | #!/usr/bin/env python3
#
# Migration Stream Analyzer
#
# Copyright (c) 2015 Alexander Graf <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <http://www.gnu.org/licenses/>.
import json
import os
import argparse
import collections
import struct
import sys
def mkdir_p(path):
try:
os.makedirs(path)
except OSError:
pass
class MigrationFile(object):
def __init__(self, filename):
self.filename = filename
self.file = open(self.filename, "rb")
def read64(self):
return int.from_bytes(self.file.read(8), byteorder='big', signed=True)
def read32(self):
return int.from_bytes(self.file.read(4), byteorder='big', signed=True)
def read16(self):
return int.from_bytes(self.file.read(2), byteorder='big', signed=True)
def read8(self):
return int.from_bytes(self.file.read(1), byteorder='big', signed=True)
def readstr(self, len = None):
return self.readvar(len).decode('utf-8')
def readvar(self, size = None):
if size is None:
size = self.read8()
if size == 0:
return ""
value = self.file.read(size)
if len(value) != size:
raise Exception("Unexpected end of %s at 0x%x" % (self.filename, self.file.tell()))
return value
def tell(self):
return self.file.tell()
# The VMSD description is at the end of the file, after EOF. Look for
# the last NULL byte, then for the beginning brace of JSON.
def read_migration_debug_json(self):
QEMU_VM_VMDESCRIPTION = 0x06
# Remember the offset in the file when we started
entrypos = self.file.tell()
# Read the last 10MB
self.file.seek(0, os.SEEK_END)
endpos = self.file.tell()
self.file.seek(max(-endpos, -10 * 1024 * 1024), os.SEEK_END)
datapos = self.file.tell()
data = self.file.read()
# The full file read closed the file as well, reopen it
self.file = open(self.filename, "rb")
# Find the last NULL byte, then the first brace after that. This should
# be the beginning of our JSON data.
nulpos = data.rfind(b'\0')
jsonpos = data.find(b'{', nulpos)
# Check backwards from there and see whether we guessed right
self.file.seek(datapos + jsonpos - 5, 0)
if self.read8() != QEMU_VM_VMDESCRIPTION:
raise Exception("No Debug Migration device found")
jsonlen = self.read32()
# Seek back to where we were at the beginning
self.file.seek(entrypos, 0)
# explicit decode() needed for Python 3.5 compatibility
return data[jsonpos:jsonpos + jsonlen].decode("utf-8")
def close(self):
self.file.close()
class RamSection(object):
RAM_SAVE_FLAG_COMPRESS = 0x02
RAM_SAVE_FLAG_MEM_SIZE = 0x04
RAM_SAVE_FLAG_PAGE = 0x08
RAM_SAVE_FLAG_EOS = 0x10
RAM_SAVE_FLAG_CONTINUE = 0x20
RAM_SAVE_FLAG_XBZRLE = 0x40
RAM_SAVE_FLAG_HOOK = 0x80
def __init__(self, file, version_id, ramargs, section_key):
if version_id != 4:
raise Exception("Unknown RAM version %d" % version_id)
self.file = file
self.section_key = section_key
self.TARGET_PAGE_SIZE = ramargs['page_size']
self.dump_memory = ramargs['dump_memory']
self.write_memory = ramargs['write_memory']
self.sizeinfo = collections.OrderedDict()
self.data = collections.OrderedDict()
self.data['section sizes'] = self.sizeinfo
self.name = ''
if self.write_memory:
self.files = { }
if self.dump_memory:
self.memory = collections.OrderedDict()
self.data['memory'] = self.memory
def __repr__(self):
return self.data.__repr__()
def __str__(self):
return self.data.__str__()
def getDict(self):
return self.data
def read(self):
# Read all RAM sections
while True:
addr = self.file.read64()
flags = addr & (self.TARGET_PAGE_SIZE - 1)
addr &= ~(self.TARGET_PAGE_SIZE - 1)
if flags & self.RAM_SAVE_FLAG_MEM_SIZE:
while True:
namelen = self.file.read8()
# We assume that no RAM chunk is big enough to ever
# hit the first byte of the address, so when we see
# a zero here we know it has to be an address, not the
# length of the next block.
if namelen == 0:
self.file.file.seek(-1, 1)
break
self.name = self.file.readstr(len = namelen)
len = self.file.read64()
self.sizeinfo[self.name] = '0x%016x' % len
if self.write_memory:
print(self.name)
mkdir_p('./' + os.path.dirname(self.name))
f = open('./' + self.name, "wb")
f.truncate(0)
f.truncate(len)
self.files[self.name] = f
flags &= ~self.RAM_SAVE_FLAG_MEM_SIZE
if flags & self.RAM_SAVE_FLAG_COMPRESS:
if flags & self.RAM_SAVE_FLAG_CONTINUE:
flags &= ~self.RAM_SAVE_FLAG_CONTINUE
else:
self.name = self.file.readstr()
fill_char = self.file.read8()
# The page in question is filled with fill_char now
if self.write_memory and fill_char != 0:
self.files[self.name].seek(addr, os.SEEK_SET)
self.files[self.name].write(chr(fill_char) * self.TARGET_PAGE_SIZE)
if self.dump_memory:
self.memory['%s (0x%016x)' % (self.name, addr)] = 'Filled with 0x%02x' % fill_char
flags &= ~self.RAM_SAVE_FLAG_COMPRESS
elif flags & self.RAM_SAVE_FLAG_PAGE:
if flags & self.RAM_SAVE_FLAG_CONTINUE:
flags &= ~self.RAM_SAVE_FLAG_CONTINUE
else:
self.name = self.file.readstr()
if self.write_memory or self.dump_memory:
data = self.file.readvar(size = self.TARGET_PAGE_SIZE)
else: # Just skip RAM data
self.file.file.seek(self.TARGET_PAGE_SIZE, 1)
if self.write_memory:
self.files[self.name].seek(addr, os.SEEK_SET)
self.files[self.name].write(data)
if self.dump_memory:
hexdata = " ".join("{0:02x}".format(ord(c)) for c in data)
self.memory['%s (0x%016x)' % (self.name, addr)] = hexdata
flags &= ~self.RAM_SAVE_FLAG_PAGE
elif flags & self.RAM_SAVE_FLAG_XBZRLE:
raise Exception("XBZRLE RAM compression is not supported yet")
elif flags & self.RAM_SAVE_FLAG_HOOK:
raise Exception("RAM hooks don't make sense with files")
# End of RAM section
if flags & self.RAM_SAVE_FLAG_EOS:
break
if flags != 0:
raise Exception("Unknown RAM flags: %x" % flags)
def __del__(self):
if self.write_memory:
for key in self.files:
self.files[key].close()
class HTABSection(object):
HASH_PTE_SIZE_64 = 16
def __init__(self, file, version_id, device, section_key):
if version_id != 1:
raise Exception("Unknown HTAB version %d" % version_id)
self.file = file
self.section_key = section_key
def read(self):
header = self.file.read32()
if (header == -1):
# "no HPT" encoding
return
if (header > 0):
# First section, just the hash shift
return
# Read until end marker
while True:
index = self.file.read32()
n_valid = self.file.read16()
n_invalid = self.file.read16()
if index == 0 and n_valid == 0 and n_invalid == 0:
break
self.file.readvar(n_valid * self.HASH_PTE_SIZE_64)
def getDict(self):
return ""
class ConfigurationSection(object):
def __init__(self, file):
self.file = file
def read(self):
name_len = self.file.read32()
name = self.file.readstr(len = name_len)
class VMSDFieldGeneric(object):
def __init__(self, desc, file):
self.file = file
self.desc = desc
self.data = ""
def __repr__(self):
return str(self.__str__())
def __str__(self):
return " ".join("{0:02x}".format(c) for c in self.data)
def getDict(self):
return self.__str__()
def read(self):
size = int(self.desc['size'])
self.data = self.file.readvar(size)
return self.data
class VMSDFieldInt(VMSDFieldGeneric):
def __init__(self, desc, file):
super(VMSDFieldInt, self).__init__(desc, file)
self.size = int(desc['size'])
self.format = '0x%%0%dx' % (self.size * 2)
self.sdtype = '>i%d' % self.size
self.udtype = '>u%d' % self.size
def __repr__(self):
if self.data < 0:
return ('%s (%d)' % ((self.format % self.udata), self.data))
else:
return self.format % self.data
def __str__(self):
return self.__repr__()
def getDict(self):
return self.__str__()
def read(self):
super(VMSDFieldInt, self).read()
self.sdata = int.from_bytes(self.data, byteorder='big', signed=True)
self.udata = int.from_bytes(self.data, byteorder='big', signed=False)
self.data = self.sdata
return self.data
class VMSDFieldUInt(VMSDFieldInt):
def __init__(self, desc, file):
super(VMSDFieldUInt, self).__init__(desc, file)
def read(self):
super(VMSDFieldUInt, self).read()
self.data = self.udata
return self.data
class VMSDFieldIntLE(VMSDFieldInt):
def __init__(self, desc, file):
super(VMSDFieldIntLE, self).__init__(desc, file)
self.dtype = '<i%d' % self.size
class VMSDFieldBool(VMSDFieldGeneric):
def __init__(self, desc, file):
super(VMSDFieldBool, self).__init__(desc, file)
def __repr__(self):
return self.data.__repr__()
def __str__(self):
return self.data.__str__()
def getDict(self):
return self.data
def read(self):
super(VMSDFieldBool, self).read()
if self.data[0] == 0:
self.data = False
else:
self.data = True
return self.data
class VMSDFieldStruct(VMSDFieldGeneric):
QEMU_VM_SUBSECTION = 0x05
def __init__(self, desc, file):
super(VMSDFieldStruct, self).__init__(desc, file)
self.data = collections.OrderedDict()
# When we see compressed array elements, unfold them here
new_fields = []
for field in self.desc['struct']['fields']:
if not 'array_len' in field:
new_fields.append(field)
continue
array_len = field.pop('array_len')
field['index'] = 0
new_fields.append(field)
for i in range(1, array_len):
c = field.copy()
c['index'] = i
new_fields.append(c)
self.desc['struct']['fields'] = new_fields
def __repr__(self):
return self.data.__repr__()
def __str__(self):
return self.data.__str__()
def read(self):
for field in self.desc['struct']['fields']:
try:
reader = vmsd_field_readers[field['type']]
except:
reader = VMSDFieldGeneric
field['data'] = reader(field, self.file)
field['data'].read()
if 'index' in field:
if field['name'] not in self.data:
self.data[field['name']] = []
a = self.data[field['name']]
if len(a) != int(field['index']):
raise Exception("internal index of data field unmatched (%d/%d)" % (len(a), int(field['index'])))
a.append(field['data'])
else:
self.data[field['name']] = field['data']
if 'subsections' in self.desc['struct']:
for subsection in self.desc['struct']['subsections']:
if self.file.read8() != self.QEMU_VM_SUBSECTION:
raise Exception("Subsection %s not found at offset %x" % ( subsection['vmsd_name'], self.file.tell()))
name = self.file.readstr()
version_id = self.file.read32()
self.data[name] = VMSDSection(self.file, version_id, subsection, (name, 0))
self.data[name].read()
def getDictItem(self, value):
# Strings would fall into the array category, treat
# them specially
if value.__class__ is ''.__class__:
return value
try:
return self.getDictOrderedDict(value)
except:
try:
return self.getDictArray(value)
except:
try:
return value.getDict()
except:
return value
def getDictArray(self, array):
r = []
for value in array:
r.append(self.getDictItem(value))
return r
def getDictOrderedDict(self, dict):
r = collections.OrderedDict()
for (key, value) in dict.items():
r[key] = self.getDictItem(value)
return r
def getDict(self):
return self.getDictOrderedDict(self.data)
vmsd_field_readers = {
"bool" : VMSDFieldBool,
"int8" : VMSDFieldInt,
"int16" : VMSDFieldInt,
"int32" : VMSDFieldInt,
"int32 equal" : VMSDFieldInt,
"int32 le" : VMSDFieldIntLE,
"int64" : VMSDFieldInt,
"uint8" : VMSDFieldUInt,
"uint16" : VMSDFieldUInt,
"uint32" : VMSDFieldUInt,
"uint32 equal" : VMSDFieldUInt,
"uint64" : VMSDFieldUInt,
"int64 equal" : VMSDFieldInt,
"uint8 equal" : VMSDFieldInt,
"uint16 equal" : VMSDFieldInt,
"float64" : VMSDFieldGeneric,
"timer" : VMSDFieldGeneric,
"buffer" : VMSDFieldGeneric,
"unused_buffer" : VMSDFieldGeneric,
"bitmap" : VMSDFieldGeneric,
"struct" : VMSDFieldStruct,
"unknown" : VMSDFieldGeneric,
}
class VMSDSection(VMSDFieldStruct):
def __init__(self, file, version_id, device, section_key):
self.file = file
self.data = ""
self.vmsd_name = ""
self.section_key = section_key
desc = device
if 'vmsd_name' in device:
self.vmsd_name = device['vmsd_name']
# A section really is nothing but a FieldStruct :)
super(VMSDSection, self).__init__({ 'struct' : desc }, file)
###############################################################################
class MigrationDump(object):
QEMU_VM_FILE_MAGIC = 0x5145564d
QEMU_VM_FILE_VERSION = 0x00000003
QEMU_VM_EOF = 0x00
QEMU_VM_SECTION_START = 0x01
QEMU_VM_SECTION_PART = 0x02
QEMU_VM_SECTION_END = 0x03
QEMU_VM_SECTION_FULL = 0x04
QEMU_VM_SUBSECTION = 0x05
QEMU_VM_VMDESCRIPTION = 0x06
QEMU_VM_CONFIGURATION = 0x07
QEMU_VM_SECTION_FOOTER= 0x7e
def __init__(self, filename):
self.section_classes = { ( 'ram', 0 ) : [ RamSection, None ],
( 'spapr/htab', 0) : ( HTABSection, None ) }
self.filename = filename
self.vmsd_desc = None
def read(self, desc_only = False, dump_memory = False, write_memory = False):
# Read in the whole file
file = MigrationFile(self.filename)
# File magic
data = file.read32()
if data != self.QEMU_VM_FILE_MAGIC:
raise Exception("Invalid file magic %x" % data)
# Version (has to be v3)
data = file.read32()
if data != self.QEMU_VM_FILE_VERSION:
raise Exception("Invalid version number %d" % data)
self.load_vmsd_json(file)
# Read sections
self.sections = collections.OrderedDict()
if desc_only:
return
ramargs = {}
ramargs['page_size'] = self.vmsd_desc['page_size']
ramargs['dump_memory'] = dump_memory
ramargs['write_memory'] = write_memory
self.section_classes[('ram',0)][1] = ramargs
while True:
section_type = file.read8()
if section_type == self.QEMU_VM_EOF:
break
elif section_type == self.QEMU_VM_CONFIGURATION:
section = ConfigurationSection(file)
section.read()
elif section_type == self.QEMU_VM_SECTION_START or section_type == self.QEMU_VM_SECTION_FULL:
section_id = file.read32()
name = file.readstr()
instance_id = file.read32()
version_id = file.read32()
section_key = (name, instance_id)
classdesc = self.section_classes[section_key]
section = classdesc[0](file, version_id, classdesc[1], section_key)
self.sections[section_id] = section
section.read()
elif section_type == self.QEMU_VM_SECTION_PART or section_type == self.QEMU_VM_SECTION_END:
section_id = file.read32()
self.sections[section_id].read()
elif section_type == self.QEMU_VM_SECTION_FOOTER:
read_section_id = file.read32()
if read_section_id != section_id:
raise Exception("Mismatched section footer: %x vs %x" % (read_section_id, section_id))
else:
raise Exception("Unknown section type: %d" % section_type)
file.close()
def load_vmsd_json(self, file):
vmsd_json = file.read_migration_debug_json()
self.vmsd_desc = json.loads(vmsd_json, object_pairs_hook=collections.OrderedDict)
for device in self.vmsd_desc['devices']:
key = (device['name'], device['instance_id'])
value = ( VMSDSection, device )
self.section_classes[key] = value
def getDict(self):
r = collections.OrderedDict()
for (key, value) in self.sections.items():
key = "%s (%d)" % ( value.section_key[0], key )
r[key] = value.getDict()
return r
###############################################################################
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, VMSDFieldGeneric):
return str(o)
return json.JSONEncoder.default(self, o)
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", help='migration dump to read from', required=True)
parser.add_argument("-m", "--memory", help='dump RAM contents as well', action='store_true')
parser.add_argument("-d", "--dump", help='what to dump ("state" or "desc")', default='state')
parser.add_argument("-x", "--extract", help='extract contents into individual files', action='store_true')
args = parser.parse_args()
jsonenc = JSONEncoder(indent=4, separators=(',', ': '))
if args.extract:
dump = MigrationDump(args.file)
dump.read(desc_only = True)
print("desc.json")
f = open("desc.json", "w")
f.truncate()
f.write(jsonenc.encode(dump.vmsd_desc))
f.close()
dump.read(write_memory = True)
dict = dump.getDict()
print("state.json")
f = open("state.json", "w")
f.truncate()
f.write(jsonenc.encode(dict))
f.close()
elif args.dump == "state":
dump = MigrationDump(args.file)
dump.read(dump_memory = args.memory)
dict = dump.getDict()
print(jsonenc.encode(dict))
elif args.dump == "desc":
dump = MigrationDump(args.file)
dump.read(desc_only = True)
print(jsonenc.encode(dump.vmsd_desc))
else:
raise Exception("Please specify either -x, -d state or -d desc")
| 20,727 | 32.758958 | 122 | py |
qemu | qemu-master/scripts/analyse-locks-simpletrace.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Analyse lock events and compute statistics
#
# Author: Alex Bennée <[email protected]>
#
import simpletrace
import argparse
import numpy as np
class MutexAnalyser(simpletrace.Analyzer):
"A simpletrace Analyser for checking locks."
def __init__(self):
self.locks = 0
self.locked = 0
self.unlocks = 0
self.mutex_records = {}
def _get_mutex(self, mutex):
if not mutex in self.mutex_records:
self.mutex_records[mutex] = {"locks": 0,
"lock_time": 0,
"acquire_times": [],
"locked": 0,
"locked_time": 0,
"held_times": [],
"unlocked": 0}
return self.mutex_records[mutex]
def qemu_mutex_lock(self, timestamp, mutex, filename, line):
self.locks += 1
rec = self._get_mutex(mutex)
rec["locks"] += 1
rec["lock_time"] = timestamp[0]
rec["lock_loc"] = (filename, line)
def qemu_mutex_locked(self, timestamp, mutex, filename, line):
self.locked += 1
rec = self._get_mutex(mutex)
rec["locked"] += 1
rec["locked_time"] = timestamp[0]
acquire_time = rec["locked_time"] - rec["lock_time"]
rec["locked_loc"] = (filename, line)
rec["acquire_times"].append(acquire_time)
def qemu_mutex_unlock(self, timestamp, mutex, filename, line):
self.unlocks += 1
rec = self._get_mutex(mutex)
rec["unlocked"] += 1
held_time = timestamp[0] - rec["locked_time"]
rec["held_times"].append(held_time)
rec["unlock_loc"] = (filename, line)
def get_args():
"Grab options"
parser = argparse.ArgumentParser()
parser.add_argument("--output", "-o", type=str, help="Render plot to file")
parser.add_argument("events", type=str, help='trace file read from')
parser.add_argument("tracefile", type=str, help='trace file read from')
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
# Gather data from the trace
analyser = MutexAnalyser()
simpletrace.process(args.events, args.tracefile, analyser)
print ("Total locks: %d, locked: %d, unlocked: %d" %
(analyser.locks, analyser.locked, analyser.unlocks))
# Now dump the individual lock stats
for key, val in sorted(analyser.mutex_records.iteritems(),
key=lambda k_v: k_v[1]["locks"]):
print ("Lock: %#x locks: %d, locked: %d, unlocked: %d" %
(key, val["locks"], val["locked"], val["unlocked"]))
acquire_times = np.array(val["acquire_times"])
if len(acquire_times) > 0:
print (" Acquire Time: min:%d median:%d avg:%.2f max:%d" %
(acquire_times.min(), np.median(acquire_times),
acquire_times.mean(), acquire_times.max()))
held_times = np.array(val["held_times"])
if len(held_times) > 0:
print (" Held Time: min:%d median:%d avg:%.2f max:%d" %
(held_times.min(), np.median(held_times),
held_times.mean(), held_times.max()))
# Check if any locks still held
if val["locks"] > val["locked"]:
print (" LOCK HELD (%s:%s)" % (val["locked_loc"]))
print (" BLOCKED (%s:%s)" % (val["lock_loc"]))
| 3,543 | 34.79798 | 79 | py |
qemu | qemu-master/scripts/qapi-gen.py | #!/usr/bin/env python3
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
"""
QAPI code generation execution shim.
This standalone script exists primarily to facilitate the running of the QAPI
code generator without needing to install the python module to the current
execution environment.
"""
import sys
from qapi import main
if __name__ == '__main__':
sys.exit(main.main())
| 463 | 22.2 | 77 | py |
qemu | qemu-master/scripts/mtest2make.py | #! /usr/bin/env python3
# Create Makefile targets to run tests, from Meson's test introspection data.
#
# Author: Paolo Bonzini <[email protected]>
from collections import defaultdict
import itertools
import json
import os
import shlex
import sys
class Suite(object):
def __init__(self):
self.deps = set()
self.speeds = ['quick']
def names(self, base):
return [base if speed == 'quick' else f'{base}-{speed}' for speed in self.speeds]
print('''
SPEED = quick
.speed.quick = $(foreach s,$(sort $(filter-out %-slow %-thorough, $1)), --suite $s)
.speed.slow = $(foreach s,$(sort $(filter-out %-thorough, $1)), --suite $s)
.speed.thorough = $(foreach s,$(sort $1), --suite $s)
.mtestargs = --no-rebuild -t 0
ifneq ($(SPEED), quick)
.mtestargs += --setup $(SPEED)
endif
.mtestargs += $(subst -j,--num-processes , $(filter-out -j, $(lastword -j1 $(filter -j%, $(MAKEFLAGS)))))
.check.mtestargs = $(MTESTARGS) $(.mtestargs) $(if $(V),--verbose,--print-errorlogs)
.bench.mtestargs = $(MTESTARGS) $(.mtestargs) --benchmark --verbose''')
introspect = json.load(sys.stdin)
def process_tests(test, targets, suites):
executable = test['cmd'][0]
try:
executable = os.path.relpath(executable)
except:
pass
deps = (targets.get(x, []) for x in test['depends'])
deps = itertools.chain.from_iterable(deps)
deps = list(deps)
test_suites = test['suite'] or ['default']
for s in test_suites:
# The suite name in the introspection info is "PROJECT:SUITE"
s = s.split(':')[1]
if s == 'slow' or s == 'thorough':
continue
if s.endswith('-slow'):
s = s[:-5]
suites[s].speeds.append('slow')
if s.endswith('-thorough'):
s = s[:-9]
suites[s].speeds.append('thorough')
suites[s].deps.update(deps)
def emit_prolog(suites, prefix):
all_targets = ' '.join((f'{prefix}-{k}' for k in suites.keys()))
all_xml = ' '.join((f'{prefix}-report-{k}.junit.xml' for k in suites.keys()))
print()
print(f'all-{prefix}-targets = {all_targets}')
print(f'all-{prefix}-xml = {all_xml}')
print(f'.PHONY: {prefix} do-meson-{prefix} {prefix}-report.junit.xml $(all-{prefix}-targets) $(all-{prefix}-xml)')
print(f'ifeq ($(filter {prefix}, $(MAKECMDGOALS)),)')
print(f'.{prefix}.mtestargs += $(call .speed.$(SPEED), $(.{prefix}.mtest-suites))')
print(f'endif')
print(f'{prefix}-build: run-ninja')
print(f'{prefix} $(all-{prefix}-targets): do-meson-{prefix}')
print(f'do-meson-{prefix}: run-ninja; $(if $(MAKE.n),,+)$(MESON) test $(.{prefix}.mtestargs)')
print(f'{prefix}-report.junit.xml $(all-{prefix}-xml): {prefix}-report%.junit.xml: run-ninja')
print(f'\t$(MAKE) {prefix}$* MTESTARGS="$(MTESTARGS) --logbase {prefix}-report$*" && ln -f meson-logs/$@ .')
def emit_suite_deps(name, suite, prefix):
deps = ' '.join(suite.deps)
targets = [f'{prefix}-{name}', f'{prefix}-report-{name}.junit.xml', f'{prefix}', f'{prefix}-report.junit.xml',
f'{prefix}-build']
print()
print(f'.{prefix}-{name}.deps = {deps}')
for t in targets:
print(f'.ninja-goals.{t} += $(.{prefix}-{name}.deps)')
def emit_suite(name, suite, prefix):
emit_suite_deps(name, suite, prefix)
targets = f'{prefix}-{name} {prefix}-report-{name}.junit.xml {prefix} {prefix}-report.junit.xml'
print(f'ifneq ($(filter {targets}, $(MAKECMDGOALS)),)')
print(f'.{prefix}.mtest-suites += ' + ' '.join(suite.names(name)))
print(f'endif')
targets = {t['id']: [os.path.relpath(f) for f in t['filename']]
for t in introspect['targets']}
testsuites = defaultdict(Suite)
for test in introspect['tests']:
process_tests(test, targets, testsuites)
emit_prolog(testsuites, 'check')
for name, suite in testsuites.items():
emit_suite(name, suite, 'check')
benchsuites = defaultdict(Suite)
for test in introspect['benchmarks']:
process_tests(test, targets, benchsuites)
emit_prolog(benchsuites, 'bench')
for name, suite in benchsuites.items():
emit_suite(name, suite, 'bench')
| 4,116 | 35.114035 | 118 | py |
qemu | qemu-master/scripts/signrom.py | #!/usr/bin/env python3
#
# Option ROM signing utility
#
# Authors:
# Jan Kiszka <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
import sys
import struct
if len(sys.argv) < 3:
print('usage: signrom.py input output')
sys.exit(1)
fin = open(sys.argv[1], 'rb')
fout = open(sys.argv[2], 'wb')
magic = fin.read(2)
if magic != b'\x55\xaa':
sys.exit("%s: option ROM does not begin with magic 55 aa" % sys.argv[1])
size_byte = ord(fin.read(1))
fin.seek(0)
data = fin.read()
size = size_byte * 512
if len(data) > size:
sys.stderr.write('error: ROM is too large (%d > %d)\n' % (len(data), size))
sys.exit(1)
elif len(data) < size:
# Add padding if necessary, rounding the whole input to a multiple of
# 512 bytes according to the third byte of the input.
# size-1 because a final byte is added below to store the checksum.
data = data.ljust(size-1, b'\0')
else:
if ord(data[-1:]) != 0:
sys.stderr.write('WARNING: ROM includes nonzero checksum\n')
data = data[:size-1]
fout.write(data)
checksum = 0
for b in data:
checksum = (checksum - b) & 255
fout.write(struct.pack('B', checksum))
fin.close()
fout.close()
| 1,272 | 22.574074 | 79 | py |
qemu | qemu-master/scripts/block-coroutine-wrapper.py | #! /usr/bin/env python3
"""Generate coroutine wrappers for block subsystem.
The program parses one or several concatenated c files from stdin,
searches for functions with the 'co_wrapper' specifier
and generates corresponding wrappers on stdout.
Usage: block-coroutine-wrapper.py generated-file.c FILE.[ch]...
Copyright (c) 2020 Virtuozzo International GmbH.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import re
from typing import Iterator
def gen_header():
copyright = re.sub('^.*Copyright', 'Copyright', __doc__, flags=re.DOTALL)
copyright = re.sub('^(?=.)', ' * ', copyright.strip(), flags=re.MULTILINE)
copyright = re.sub('^$', ' *', copyright, flags=re.MULTILINE)
return f"""\
/*
* File is generated by scripts/block-coroutine-wrapper.py
*
{copyright}
*/
#include "qemu/osdep.h"
#include "block/coroutines.h"
#include "block/block-gen.h"
#include "block/block_int.h"
#include "block/dirty-bitmap.h"
"""
class ParamDecl:
param_re = re.compile(r'(?P<decl>'
r'(?P<type>.*[ *])'
r'(?P<name>[a-z][a-z0-9_]*)'
r')')
def __init__(self, param_decl: str) -> None:
m = self.param_re.match(param_decl.strip())
if m is None:
raise ValueError(f'Wrong parameter declaration: "{param_decl}"')
self.decl = m.group('decl')
self.type = m.group('type')
self.name = m.group('name')
class FuncDecl:
def __init__(self, wrapper_type: str, return_type: str, name: str,
args: str, variant: str) -> None:
self.return_type = return_type.strip()
self.name = name.strip()
self.struct_name = snake_to_camel(self.name)
self.args = [ParamDecl(arg.strip()) for arg in args.split(',')]
self.create_only_co = 'mixed' not in variant
self.graph_rdlock = 'bdrv_rdlock' in variant
self.wrapper_type = wrapper_type
if wrapper_type == 'co':
subsystem, subname = self.name.split('_', 1)
self.target_name = f'{subsystem}_co_{subname}'
else:
assert wrapper_type == 'no_co'
subsystem, co_infix, subname = self.name.split('_', 2)
if co_infix != 'co':
raise ValueError(f"Invalid no_co function name: {self.name}")
if not self.create_only_co:
raise ValueError(f"no_co function can't be mixed: {self.name}")
if self.graph_rdlock:
raise ValueError(f"no_co function can't be rdlock: {self.name}")
self.target_name = f'{subsystem}_{subname}'
t = self.args[0].type
if t == 'BlockDriverState *':
ctx = 'bdrv_get_aio_context(bs)'
elif t == 'BdrvChild *':
ctx = 'bdrv_get_aio_context(child->bs)'
elif t == 'BlockBackend *':
ctx = 'blk_get_aio_context(blk)'
else:
ctx = 'qemu_get_aio_context()'
self.ctx = ctx
self.get_result = 's->ret = '
self.ret = 'return s.ret;'
self.co_ret = 'return '
self.return_field = self.return_type + " ret;"
if self.return_type == 'void':
self.get_result = ''
self.ret = ''
self.co_ret = ''
self.return_field = ''
def gen_list(self, format: str) -> str:
return ', '.join(format.format_map(arg.__dict__) for arg in self.args)
def gen_block(self, format: str) -> str:
return '\n'.join(format.format_map(arg.__dict__) for arg in self.args)
# Match wrappers declared with a co_wrapper mark
func_decl_re = re.compile(r'^(?P<return_type>[a-zA-Z][a-zA-Z0-9_]* [\*]?)'
r'(\s*coroutine_fn)?'
r'\s*(?P<wrapper_type>(no_)?co)_wrapper'
r'(?P<variant>(_[a-z][a-z0-9_]*)?)\s*'
r'(?P<wrapper_name>[a-z][a-z0-9_]*)'
r'\((?P<args>[^)]*)\);$', re.MULTILINE)
def func_decl_iter(text: str) -> Iterator:
for m in func_decl_re.finditer(text):
yield FuncDecl(wrapper_type=m.group('wrapper_type'),
return_type=m.group('return_type'),
name=m.group('wrapper_name'),
args=m.group('args'),
variant=m.group('variant'))
def snake_to_camel(func_name: str) -> str:
"""
Convert underscore names like 'some_function_name' to camel-case like
'SomeFunctionName'
"""
words = func_name.split('_')
words = [w[0].upper() + w[1:] for w in words]
return ''.join(words)
def create_mixed_wrapper(func: FuncDecl) -> str:
"""
Checks if we are already in coroutine
"""
name = func.target_name
struct_name = func.struct_name
graph_assume_lock = 'assume_graph_lock();' if func.graph_rdlock else ''
return f"""\
{func.return_type} {func.name}({ func.gen_list('{decl}') })
{{
if (qemu_in_coroutine()) {{
{graph_assume_lock}
{func.co_ret}{name}({ func.gen_list('{name}') });
}} else {{
{struct_name} s = {{
.poll_state.ctx = {func.ctx},
.poll_state.in_progress = true,
{ func.gen_block(' .{name} = {name},') }
}};
s.poll_state.co = qemu_coroutine_create({name}_entry, &s);
bdrv_poll_co(&s.poll_state);
{func.ret}
}}
}}"""
def create_co_wrapper(func: FuncDecl) -> str:
"""
Assumes we are not in coroutine, and creates one
"""
name = func.target_name
struct_name = func.struct_name
return f"""\
{func.return_type} {func.name}({ func.gen_list('{decl}') })
{{
{struct_name} s = {{
.poll_state.ctx = {func.ctx},
.poll_state.in_progress = true,
{ func.gen_block(' .{name} = {name},') }
}};
assert(!qemu_in_coroutine());
s.poll_state.co = qemu_coroutine_create({name}_entry, &s);
bdrv_poll_co(&s.poll_state);
{func.ret}
}}"""
def gen_co_wrapper(func: FuncDecl) -> str:
assert not '_co_' in func.name
assert func.wrapper_type == 'co'
name = func.target_name
struct_name = func.struct_name
graph_lock=''
graph_unlock=''
if func.graph_rdlock:
graph_lock=' bdrv_graph_co_rdlock();'
graph_unlock=' bdrv_graph_co_rdunlock();'
creation_function = create_mixed_wrapper
if func.create_only_co:
creation_function = create_co_wrapper
return f"""\
/*
* Wrappers for {name}
*/
typedef struct {struct_name} {{
BdrvPollCo poll_state;
{func.return_field}
{ func.gen_block(' {decl};') }
}} {struct_name};
static void coroutine_fn {name}_entry(void *opaque)
{{
{struct_name} *s = opaque;
{graph_lock}
{func.get_result}{name}({ func.gen_list('s->{name}') });
{graph_unlock}
s->poll_state.in_progress = false;
aio_wait_kick();
}}
{creation_function(func)}"""
def gen_no_co_wrapper(func: FuncDecl) -> str:
assert '_co_' in func.name
assert func.wrapper_type == 'no_co'
name = func.target_name
struct_name = func.struct_name
return f"""\
/*
* Wrappers for {name}
*/
typedef struct {struct_name} {{
Coroutine *co;
{func.return_field}
{ func.gen_block(' {decl};') }
}} {struct_name};
static void {name}_bh(void *opaque)
{{
{struct_name} *s = opaque;
{func.get_result}{name}({ func.gen_list('s->{name}') });
aio_co_wake(s->co);
}}
{func.return_type} coroutine_fn {func.name}({ func.gen_list('{decl}') })
{{
{struct_name} s = {{
.co = qemu_coroutine_self(),
{ func.gen_block(' .{name} = {name},') }
}};
assert(qemu_in_coroutine());
aio_bh_schedule_oneshot(qemu_get_aio_context(), {name}_bh, &s);
qemu_coroutine_yield();
{func.ret}
}}"""
def gen_wrappers(input_code: str) -> str:
res = ''
for func in func_decl_iter(input_code):
res += '\n\n\n'
if func.wrapper_type == 'co':
res += gen_co_wrapper(func)
else:
res += gen_no_co_wrapper(func)
return res
if __name__ == '__main__':
if len(sys.argv) < 3:
exit(f'Usage: {sys.argv[0]} OUT_FILE.c IN_FILE.[ch]...')
with open(sys.argv[1], 'w', encoding='utf-8') as f_out:
f_out.write(gen_header())
for fname in sys.argv[2:]:
with open(fname, encoding='utf-8') as f_in:
f_out.write(gen_wrappers(f_in.read()))
f_out.write('\n')
| 8,992 | 28.198052 | 80 | py |
qemu | qemu-master/scripts/tracetool.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Command-line wrapper for the tracetool machinery.
"""
__author__ = "Lluís Vilanova <[email protected]>"
__copyright__ = "Copyright 2012-2014, Lluís Vilanova <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
import sys
import getopt
from tracetool import error_write, out, out_open
import tracetool.backend
import tracetool.format
_SCRIPT = ""
def error_opt(msg = None):
if msg is not None:
error_write("Error: " + msg + "\n")
backend_descr = "\n".join([ " %-15s %s" % (n, d)
for n,d in tracetool.backend.get_list() ])
format_descr = "\n".join([ " %-15s %s" % (n, d)
for n,d in tracetool.format.get_list() ])
error_write("""\
Usage: %(script)s --format=<format> --backends=<backends> [<options>] <trace-events> ... <output>
Backends:
%(backends)s
Formats:
%(formats)s
Options:
--help This help message.
--list-backends Print list of available backends.
--check-backends Check if the given backend is valid.
--binary <path> Full path to QEMU binary.
--target-type <type> QEMU emulator target type ('system' or 'user').
--target-name <name> QEMU emulator target name.
--group <name> Name of the event group
--probe-prefix <prefix> Prefix for dtrace probe names
(default: qemu-<target-type>-<target-name>).\
""" % {
"script" : _SCRIPT,
"backends" : backend_descr,
"formats" : format_descr,
})
if msg is None:
sys.exit(0)
else:
sys.exit(1)
def main(args):
global _SCRIPT
_SCRIPT = args[0]
long_opts = ["backends=", "format=", "help", "list-backends",
"check-backends", "group="]
long_opts += ["binary=", "target-type=", "target-name=", "probe-prefix="]
try:
opts, args = getopt.getopt(args[1:], "", long_opts)
except getopt.GetoptError as err:
error_opt(str(err))
check_backends = False
arg_backends = []
arg_format = ""
arg_group = None
binary = None
target_type = None
target_name = None
probe_prefix = None
for opt, arg in opts:
if opt == "--help":
error_opt()
elif opt == "--backends":
arg_backends = arg.split(",")
elif opt == "--group":
arg_group = arg
elif opt == "--format":
arg_format = arg
elif opt == "--list-backends":
public_backends = tracetool.backend.get_list(only_public = True)
out(", ".join([ b for b,_ in public_backends ]))
sys.exit(0)
elif opt == "--check-backends":
check_backends = True
elif opt == "--binary":
binary = arg
elif opt == '--target-type':
target_type = arg
elif opt == '--target-name':
target_name = arg
elif opt == '--probe-prefix':
probe_prefix = arg
else:
error_opt("unhandled option: %s" % opt)
if len(arg_backends) == 0:
error_opt("no backends specified")
if check_backends:
for backend in arg_backends:
if not tracetool.backend.exists(backend):
sys.exit(1)
sys.exit(0)
if arg_group is None:
error_opt("group name is required")
if arg_format == "stap":
if binary is None:
error_opt("--binary is required for SystemTAP tapset generator")
if probe_prefix is None and target_type is None:
error_opt("--target-type is required for SystemTAP tapset generator")
if probe_prefix is None and target_name is None:
error_opt("--target-name is required for SystemTAP tapset generator")
if probe_prefix is None:
probe_prefix = ".".join(["qemu", target_type, target_name])
if len(args) < 2:
error_opt("missing trace-events and output filepaths")
events = []
for arg in args[:-1]:
with open(arg, "r") as fh:
events.extend(tracetool.read_events(fh, arg))
out_open(args[-1])
try:
tracetool.generate(events, arg_group, arg_format, arg_backends,
binary=binary, probe_prefix=probe_prefix)
except tracetool.TracetoolError as e:
error_opt(str(e))
if __name__ == "__main__":
main(sys.argv)
| 4,594 | 28.645161 | 97 | py |
qemu | qemu-master/scripts/dump-guest-memory.py | """
This python script adds a new gdb command, "dump-guest-memory". It
should be loaded with "source dump-guest-memory.py" at the (gdb)
prompt.
Copyright (C) 2013, Red Hat, Inc.
Authors:
Laszlo Ersek <[email protected]>
Janosch Frank <[email protected]>
This work is licensed under the terms of the GNU GPL, version 2 or later. See
the COPYING file in the top-level directory.
"""
import ctypes
import struct
try:
UINTPTR_T = gdb.lookup_type("uintptr_t")
except Exception as inst:
raise gdb.GdbError("Symbols must be loaded prior to sourcing dump-guest-memory.\n"
"Symbols may be loaded by 'attach'ing a QEMU process id or by "
"'load'ing a QEMU binary.")
TARGET_PAGE_SIZE = 0x1000
TARGET_PAGE_MASK = 0xFFFFFFFFFFFFF000
# Special value for e_phnum. This indicates that the real number of
# program headers is too large to fit into e_phnum. Instead the real
# value is in the field sh_info of section 0.
PN_XNUM = 0xFFFF
EV_CURRENT = 1
ELFCLASS32 = 1
ELFCLASS64 = 2
ELFDATA2LSB = 1
ELFDATA2MSB = 2
ET_CORE = 4
PT_LOAD = 1
PT_NOTE = 4
EM_386 = 3
EM_PPC = 20
EM_PPC64 = 21
EM_S390 = 22
EM_AARCH = 183
EM_X86_64 = 62
VMCOREINFO_FORMAT_ELF = 1
def le16_to_cpu(val):
return struct.unpack("<H", struct.pack("=H", val))[0]
def le32_to_cpu(val):
return struct.unpack("<I", struct.pack("=I", val))[0]
def le64_to_cpu(val):
return struct.unpack("<Q", struct.pack("=Q", val))[0]
class ELF(object):
"""Representation of a ELF file."""
def __init__(self, arch):
self.ehdr = None
self.notes = []
self.segments = []
self.notes_size = 0
self.endianness = None
self.elfclass = ELFCLASS64
if arch == 'aarch64-le':
self.endianness = ELFDATA2LSB
self.elfclass = ELFCLASS64
self.ehdr = get_arch_ehdr(self.endianness, self.elfclass)
self.ehdr.e_machine = EM_AARCH
elif arch == 'aarch64-be':
self.endianness = ELFDATA2MSB
self.ehdr = get_arch_ehdr(self.endianness, self.elfclass)
self.ehdr.e_machine = EM_AARCH
elif arch == 'X86_64':
self.endianness = ELFDATA2LSB
self.ehdr = get_arch_ehdr(self.endianness, self.elfclass)
self.ehdr.e_machine = EM_X86_64
elif arch == '386':
self.endianness = ELFDATA2LSB
self.elfclass = ELFCLASS32
self.ehdr = get_arch_ehdr(self.endianness, self.elfclass)
self.ehdr.e_machine = EM_386
elif arch == 's390':
self.endianness = ELFDATA2MSB
self.ehdr = get_arch_ehdr(self.endianness, self.elfclass)
self.ehdr.e_machine = EM_S390
elif arch == 'ppc64-le':
self.endianness = ELFDATA2LSB
self.ehdr = get_arch_ehdr(self.endianness, self.elfclass)
self.ehdr.e_machine = EM_PPC64
elif arch == 'ppc64-be':
self.endianness = ELFDATA2MSB
self.ehdr = get_arch_ehdr(self.endianness, self.elfclass)
self.ehdr.e_machine = EM_PPC64
else:
raise gdb.GdbError("No valid arch type specified.\n"
"Currently supported types:\n"
"aarch64-be, aarch64-le, X86_64, 386, s390, "
"ppc64-be, ppc64-le")
self.add_segment(PT_NOTE, 0, 0)
def add_note(self, n_name, n_desc, n_type):
"""Adds a note to the ELF."""
note = get_arch_note(self.endianness, len(n_name), len(n_desc))
note.n_namesz = len(n_name) + 1
note.n_descsz = len(n_desc)
note.n_name = n_name.encode()
note.n_type = n_type
# Desc needs to be 4 byte aligned (although the 64bit spec
# specifies 8 byte). When defining n_desc as uint32 it will be
# automatically aligned but we need the memmove to copy the
# string into it.
ctypes.memmove(note.n_desc, n_desc.encode(), len(n_desc))
self.notes.append(note)
self.segments[0].p_filesz += ctypes.sizeof(note)
self.segments[0].p_memsz += ctypes.sizeof(note)
def add_vmcoreinfo_note(self, vmcoreinfo):
"""Adds a vmcoreinfo note to the ELF dump."""
# compute the header size, and copy that many bytes from the note
header = get_arch_note(self.endianness, 0, 0)
ctypes.memmove(ctypes.pointer(header),
vmcoreinfo, ctypes.sizeof(header))
if header.n_descsz > 1 << 20:
print('warning: invalid vmcoreinfo size')
return
# now get the full note
note = get_arch_note(self.endianness,
header.n_namesz - 1, header.n_descsz)
ctypes.memmove(ctypes.pointer(note), vmcoreinfo, ctypes.sizeof(note))
self.notes.append(note)
self.segments[0].p_filesz += ctypes.sizeof(note)
self.segments[0].p_memsz += ctypes.sizeof(note)
def add_segment(self, p_type, p_paddr, p_size):
"""Adds a segment to the elf."""
phdr = get_arch_phdr(self.endianness, self.elfclass)
phdr.p_type = p_type
phdr.p_paddr = p_paddr
phdr.p_vaddr = p_paddr
phdr.p_filesz = p_size
phdr.p_memsz = p_size
self.segments.append(phdr)
self.ehdr.e_phnum += 1
def to_file(self, elf_file):
"""Writes all ELF structures to the passed file.
Structure:
Ehdr
Segment 0:PT_NOTE
Segment 1:PT_LOAD
Segment N:PT_LOAD
Note 0..N
Dump contents
"""
elf_file.write(self.ehdr)
off = ctypes.sizeof(self.ehdr) + \
len(self.segments) * ctypes.sizeof(self.segments[0])
for phdr in self.segments:
phdr.p_offset = off
elf_file.write(phdr)
off += phdr.p_filesz
for note in self.notes:
elf_file.write(note)
def get_arch_note(endianness, len_name, len_desc):
"""Returns a Note class with the specified endianness."""
if endianness == ELFDATA2LSB:
superclass = ctypes.LittleEndianStructure
else:
superclass = ctypes.BigEndianStructure
len_name = len_name + 1
class Note(superclass):
"""Represents an ELF note, includes the content."""
_fields_ = [("n_namesz", ctypes.c_uint32),
("n_descsz", ctypes.c_uint32),
("n_type", ctypes.c_uint32),
("n_name", ctypes.c_char * len_name),
("n_desc", ctypes.c_uint32 * ((len_desc + 3) // 4))]
return Note()
class Ident(ctypes.Structure):
"""Represents the ELF ident array in the ehdr structure."""
_fields_ = [('ei_mag0', ctypes.c_ubyte),
('ei_mag1', ctypes.c_ubyte),
('ei_mag2', ctypes.c_ubyte),
('ei_mag3', ctypes.c_ubyte),
('ei_class', ctypes.c_ubyte),
('ei_data', ctypes.c_ubyte),
('ei_version', ctypes.c_ubyte),
('ei_osabi', ctypes.c_ubyte),
('ei_abiversion', ctypes.c_ubyte),
('ei_pad', ctypes.c_ubyte * 7)]
def __init__(self, endianness, elfclass):
self.ei_mag0 = 0x7F
self.ei_mag1 = ord('E')
self.ei_mag2 = ord('L')
self.ei_mag3 = ord('F')
self.ei_class = elfclass
self.ei_data = endianness
self.ei_version = EV_CURRENT
def get_arch_ehdr(endianness, elfclass):
"""Returns a EHDR64 class with the specified endianness."""
if endianness == ELFDATA2LSB:
superclass = ctypes.LittleEndianStructure
else:
superclass = ctypes.BigEndianStructure
class EHDR64(superclass):
"""Represents the 64 bit ELF header struct."""
_fields_ = [('e_ident', Ident),
('e_type', ctypes.c_uint16),
('e_machine', ctypes.c_uint16),
('e_version', ctypes.c_uint32),
('e_entry', ctypes.c_uint64),
('e_phoff', ctypes.c_uint64),
('e_shoff', ctypes.c_uint64),
('e_flags', ctypes.c_uint32),
('e_ehsize', ctypes.c_uint16),
('e_phentsize', ctypes.c_uint16),
('e_phnum', ctypes.c_uint16),
('e_shentsize', ctypes.c_uint16),
('e_shnum', ctypes.c_uint16),
('e_shstrndx', ctypes.c_uint16)]
def __init__(self):
super(superclass, self).__init__()
self.e_ident = Ident(endianness, elfclass)
self.e_type = ET_CORE
self.e_version = EV_CURRENT
self.e_ehsize = ctypes.sizeof(self)
self.e_phoff = ctypes.sizeof(self)
self.e_phentsize = ctypes.sizeof(get_arch_phdr(endianness, elfclass))
self.e_phnum = 0
class EHDR32(superclass):
"""Represents the 32 bit ELF header struct."""
_fields_ = [('e_ident', Ident),
('e_type', ctypes.c_uint16),
('e_machine', ctypes.c_uint16),
('e_version', ctypes.c_uint32),
('e_entry', ctypes.c_uint32),
('e_phoff', ctypes.c_uint32),
('e_shoff', ctypes.c_uint32),
('e_flags', ctypes.c_uint32),
('e_ehsize', ctypes.c_uint16),
('e_phentsize', ctypes.c_uint16),
('e_phnum', ctypes.c_uint16),
('e_shentsize', ctypes.c_uint16),
('e_shnum', ctypes.c_uint16),
('e_shstrndx', ctypes.c_uint16)]
def __init__(self):
super(superclass, self).__init__()
self.e_ident = Ident(endianness, elfclass)
self.e_type = ET_CORE
self.e_version = EV_CURRENT
self.e_ehsize = ctypes.sizeof(self)
self.e_phoff = ctypes.sizeof(self)
self.e_phentsize = ctypes.sizeof(get_arch_phdr(endianness, elfclass))
self.e_phnum = 0
# End get_arch_ehdr
if elfclass == ELFCLASS64:
return EHDR64()
else:
return EHDR32()
def get_arch_phdr(endianness, elfclass):
"""Returns a 32 or 64 bit PHDR class with the specified endianness."""
if endianness == ELFDATA2LSB:
superclass = ctypes.LittleEndianStructure
else:
superclass = ctypes.BigEndianStructure
class PHDR64(superclass):
"""Represents the 64 bit ELF program header struct."""
_fields_ = [('p_type', ctypes.c_uint32),
('p_flags', ctypes.c_uint32),
('p_offset', ctypes.c_uint64),
('p_vaddr', ctypes.c_uint64),
('p_paddr', ctypes.c_uint64),
('p_filesz', ctypes.c_uint64),
('p_memsz', ctypes.c_uint64),
('p_align', ctypes.c_uint64)]
class PHDR32(superclass):
"""Represents the 32 bit ELF program header struct."""
_fields_ = [('p_type', ctypes.c_uint32),
('p_offset', ctypes.c_uint32),
('p_vaddr', ctypes.c_uint32),
('p_paddr', ctypes.c_uint32),
('p_filesz', ctypes.c_uint32),
('p_memsz', ctypes.c_uint32),
('p_flags', ctypes.c_uint32),
('p_align', ctypes.c_uint32)]
# End get_arch_phdr
if elfclass == ELFCLASS64:
return PHDR64()
else:
return PHDR32()
def int128_get64(val):
"""Returns low 64bit part of Int128 struct."""
try:
assert val["hi"] == 0
return val["lo"]
except gdb.error:
u64t = gdb.lookup_type('uint64_t').array(2)
u64 = val.cast(u64t)
if sys.byteorder == 'little':
assert u64[1] == 0
return u64[0]
else:
assert u64[0] == 0
return u64[1]
def qlist_foreach(head, field_str):
"""Generator for qlists."""
var_p = head["lh_first"]
while var_p != 0:
var = var_p.dereference()
var_p = var[field_str]["le_next"]
yield var
def qemu_map_ram_ptr(block, offset):
"""Returns qemu vaddr for given guest physical address."""
return block["host"] + offset
def memory_region_get_ram_ptr(memory_region):
if memory_region["alias"] != 0:
return (memory_region_get_ram_ptr(memory_region["alias"].dereference())
+ memory_region["alias_offset"])
return qemu_map_ram_ptr(memory_region["ram_block"], 0)
def get_guest_phys_blocks():
"""Returns a list of ram blocks.
Each block entry contains:
'target_start': guest block phys start address
'target_end': guest block phys end address
'host_addr': qemu vaddr of the block's start
"""
guest_phys_blocks = []
print("guest RAM blocks:")
print("target_start target_end host_addr message "
"count")
print("---------------- ---------------- ---------------- ------- "
"-----")
current_map_p = gdb.parse_and_eval("address_space_memory.current_map")
current_map = current_map_p.dereference()
# Conversion to int is needed for python 3
# compatibility. Otherwise range doesn't cast the value itself and
# breaks.
for cur in range(int(current_map["nr"])):
flat_range = (current_map["ranges"] + cur).dereference()
memory_region = flat_range["mr"].dereference()
# we only care about RAM
if (not memory_region["ram"] or
memory_region["ram_device"] or
memory_region["nonvolatile"]):
continue
section_size = int128_get64(flat_range["addr"]["size"])
target_start = int128_get64(flat_range["addr"]["start"])
target_end = target_start + section_size
host_addr = (memory_region_get_ram_ptr(memory_region)
+ flat_range["offset_in_region"])
predecessor = None
# find continuity in guest physical address space
if len(guest_phys_blocks) > 0:
predecessor = guest_phys_blocks[-1]
predecessor_size = (predecessor["target_end"] -
predecessor["target_start"])
# the memory API guarantees monotonically increasing
# traversal
assert predecessor["target_end"] <= target_start
# we want continuity in both guest-physical and
# host-virtual memory
if (predecessor["target_end"] < target_start or
predecessor["host_addr"] + predecessor_size != host_addr):
predecessor = None
if predecessor is None:
# isolated mapping, add it to the list
guest_phys_blocks.append({"target_start": target_start,
"target_end": target_end,
"host_addr": host_addr})
message = "added"
else:
# expand predecessor until @target_end; predecessor's
# start doesn't change
predecessor["target_end"] = target_end
message = "joined"
print("%016x %016x %016x %-7s %5u" %
(target_start, target_end, host_addr.cast(UINTPTR_T),
message, len(guest_phys_blocks)))
return guest_phys_blocks
# The leading docstring doesn't have idiomatic Python formatting. It is
# printed by gdb's "help" command (the first line is printed in the
# "help data" summary), and it should match how other help texts look in
# gdb.
class DumpGuestMemory(gdb.Command):
"""Extract guest vmcore from qemu process coredump.
The two required arguments are FILE and ARCH:
FILE identifies the target file to write the guest vmcore to.
ARCH specifies the architecture for which the core will be generated.
This GDB command reimplements the dump-guest-memory QMP command in
python, using the representation of guest memory as captured in the qemu
coredump. The qemu process that has been dumped must have had the
command line option "-machine dump-guest-core=on" which is the default.
For simplicity, the "paging", "begin" and "end" parameters of the QMP
command are not supported -- no attempt is made to get the guest's
internal paging structures (ie. paging=false is hard-wired), and guest
memory is always fully dumped.
Currently aarch64-be, aarch64-le, X86_64, 386, s390, ppc64-be,
ppc64-le guests are supported.
The CORE/NT_PRSTATUS and QEMU notes (that is, the VCPUs' statuses) are
not written to the vmcore. Preparing these would require context that is
only present in the KVM host kernel module when the guest is alive. A
fake ELF note is written instead, only to keep the ELF parser of "crash"
happy.
Dependent on how busted the qemu process was at the time of the
coredump, this command might produce unpredictable results. If qemu
deliberately called abort(), or it was dumped in response to a signal at
a halfway fortunate point, then its coredump should be in reasonable
shape and this command should mostly work."""
def __init__(self):
super(DumpGuestMemory, self).__init__("dump-guest-memory",
gdb.COMMAND_DATA,
gdb.COMPLETE_FILENAME)
self.elf = None
self.guest_phys_blocks = None
def dump_init(self, vmcore):
"""Prepares and writes ELF structures to core file."""
# Needed to make crash happy, data for more useful notes is
# not available in a qemu core.
self.elf.add_note("NONE", "EMPTY", 0)
# We should never reach PN_XNUM for paging=false dumps,
# there's just a handful of discontiguous ranges after
# merging.
# The constant is needed to account for the PT_NOTE segment.
phdr_num = len(self.guest_phys_blocks) + 1
assert phdr_num < PN_XNUM
for block in self.guest_phys_blocks:
block_size = block["target_end"] - block["target_start"]
self.elf.add_segment(PT_LOAD, block["target_start"], block_size)
self.elf.to_file(vmcore)
def dump_iterate(self, vmcore):
"""Writes guest core to file."""
qemu_core = gdb.inferiors()[0]
for block in self.guest_phys_blocks:
cur = block["host_addr"]
left = block["target_end"] - block["target_start"]
print("dumping range at %016x for length %016x" %
(cur.cast(UINTPTR_T), left))
while left > 0:
chunk_size = min(TARGET_PAGE_SIZE, left)
chunk = qemu_core.read_memory(cur, chunk_size)
vmcore.write(chunk)
cur += chunk_size
left -= chunk_size
def phys_memory_read(self, addr, size):
qemu_core = gdb.inferiors()[0]
for block in self.guest_phys_blocks:
if block["target_start"] <= addr \
and addr + size <= block["target_end"]:
haddr = block["host_addr"] + (addr - block["target_start"])
return qemu_core.read_memory(haddr, size)
return None
def add_vmcoreinfo(self):
if gdb.lookup_symbol("vmcoreinfo_realize")[0] is None:
return
vmci = 'vmcoreinfo_realize::vmcoreinfo_state'
if not gdb.parse_and_eval("%s" % vmci) \
or not gdb.parse_and_eval("(%s)->has_vmcoreinfo" % vmci):
return
fmt = gdb.parse_and_eval("(%s)->vmcoreinfo.guest_format" % vmci)
addr = gdb.parse_and_eval("(%s)->vmcoreinfo.paddr" % vmci)
size = gdb.parse_and_eval("(%s)->vmcoreinfo.size" % vmci)
fmt = le16_to_cpu(fmt)
addr = le64_to_cpu(addr)
size = le32_to_cpu(size)
if fmt != VMCOREINFO_FORMAT_ELF:
return
vmcoreinfo = self.phys_memory_read(addr, size)
if vmcoreinfo:
self.elf.add_vmcoreinfo_note(bytes(vmcoreinfo))
def invoke(self, args, from_tty):
"""Handles command invocation from gdb."""
# Unwittingly pressing the Enter key after the command should
# not dump the same multi-gig coredump to the same file.
self.dont_repeat()
argv = gdb.string_to_argv(args)
if len(argv) != 2:
raise gdb.GdbError("usage: dump-guest-memory FILE ARCH")
self.elf = ELF(argv[1])
self.guest_phys_blocks = get_guest_phys_blocks()
self.add_vmcoreinfo()
with open(argv[0], "wb") as vmcore:
self.dump_init(vmcore)
self.dump_iterate(vmcore)
DumpGuestMemory()
| 20,723 | 33.597663 | 86 | py |
qemu | qemu-master/scripts/render_block_graph.py | #!/usr/bin/env python3
#
# Render Qemu Block Graph
#
# Copyright (c) 2018 Virtuozzo International GmbH. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import subprocess
import json
from graphviz import Digraph
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'python'))
from qemu.qmp import QMPError
from qemu.qmp.legacy import QEMUMonitorProtocol
def perm(arr):
s = 'w' if 'write' in arr else '_'
s += 'r' if 'consistent-read' in arr else '_'
s += 'u' if 'write-unchanged' in arr else '_'
s += 's' if 'resize' in arr else '_'
return s
def render_block_graph(qmp, filename, format='png'):
'''
Render graph in text (dot) representation into "@filename" and
representation in @format into "@filename.@format"
'''
bds_nodes = qmp.command('query-named-block-nodes')
bds_nodes = {n['node-name']: n for n in bds_nodes}
job_nodes = qmp.command('query-block-jobs')
job_nodes = {n['device']: n for n in job_nodes}
block_graph = qmp.command('x-debug-query-block-graph')
graph = Digraph(comment='Block Nodes Graph')
graph.format = format
graph.node('permission symbols:\l'
' w - Write\l'
' r - consistent-Read\l'
' u - write - Unchanged\l'
' g - Graph-mod\l'
' s - reSize\l'
'edge label scheme:\l'
' <child type>\l'
' <perm>\l'
' <shared_perm>\l', shape='none')
for n in block_graph['nodes']:
if n['type'] == 'block-driver':
info = bds_nodes[n['name']]
label = n['name'] + ' [' + info['drv'] + ']'
if info['drv'] == 'file':
label += '\n' + os.path.basename(info['file'])
shape = 'ellipse'
elif n['type'] == 'block-job':
info = job_nodes[n['name']]
label = info['type'] + ' job (' + n['name'] + ')'
shape = 'box'
else:
assert n['type'] == 'block-backend'
label = n['name'] if n['name'] else 'unnamed blk'
shape = 'box'
graph.node(str(n['id']), label, shape=shape)
for e in block_graph['edges']:
label = '%s\l%s\l%s\l' % (e['name'], perm(e['perm']),
perm(e['shared-perm']))
graph.edge(str(e['parent']), str(e['child']), label=label)
graph.render(filename)
class LibvirtGuest():
def __init__(self, name):
self.name = name
def command(self, cmd):
# only supports qmp commands without parameters
m = {'execute': cmd}
ar = ['virsh', 'qemu-monitor-command', self.name, json.dumps(m)]
reply = json.loads(subprocess.check_output(ar))
if 'error' in reply:
raise QMPError(reply)
return reply['return']
if __name__ == '__main__':
obj = sys.argv[1]
out = sys.argv[2]
if os.path.exists(obj):
# assume unix socket
qmp = QEMUMonitorProtocol(obj)
qmp.connect()
else:
# assume libvirt guest name
qmp = LibvirtGuest(obj)
render_block_graph(qmp, out)
| 3,767 | 29.634146 | 72 | py |
qemu | qemu-master/scripts/analyse-9p-simpletrace.py | #!/usr/bin/env python3
# Pretty print 9p simpletrace log
# Usage: ./analyse-9p-simpletrace <trace-events> <trace-pid>
#
# Author: Harsh Prateek Bora
import os
import simpletrace
symbol_9p = {
6 : 'TLERROR',
7 : 'RLERROR',
8 : 'TSTATFS',
9 : 'RSTATFS',
12 : 'TLOPEN',
13 : 'RLOPEN',
14 : 'TLCREATE',
15 : 'RLCREATE',
16 : 'TSYMLINK',
17 : 'RSYMLINK',
18 : 'TMKNOD',
19 : 'RMKNOD',
20 : 'TRENAME',
21 : 'RRENAME',
22 : 'TREADLINK',
23 : 'RREADLINK',
24 : 'TGETATTR',
25 : 'RGETATTR',
26 : 'TSETATTR',
27 : 'RSETATTR',
30 : 'TXATTRWALK',
31 : 'RXATTRWALK',
32 : 'TXATTRCREATE',
33 : 'RXATTRCREATE',
40 : 'TREADDIR',
41 : 'RREADDIR',
50 : 'TFSYNC',
51 : 'RFSYNC',
52 : 'TLOCK',
53 : 'RLOCK',
54 : 'TGETLOCK',
55 : 'RGETLOCK',
70 : 'TLINK',
71 : 'RLINK',
72 : 'TMKDIR',
73 : 'RMKDIR',
74 : 'TRENAMEAT',
75 : 'RRENAMEAT',
76 : 'TUNLINKAT',
77 : 'RUNLINKAT',
100 : 'TVERSION',
101 : 'RVERSION',
102 : 'TAUTH',
103 : 'RAUTH',
104 : 'TATTACH',
105 : 'RATTACH',
106 : 'TERROR',
107 : 'RERROR',
108 : 'TFLUSH',
109 : 'RFLUSH',
110 : 'TWALK',
111 : 'RWALK',
112 : 'TOPEN',
113 : 'ROPEN',
114 : 'TCREATE',
115 : 'RCREATE',
116 : 'TREAD',
117 : 'RREAD',
118 : 'TWRITE',
119 : 'RWRITE',
120 : 'TCLUNK',
121 : 'RCLUNK',
122 : 'TREMOVE',
123 : 'RREMOVE',
124 : 'TSTAT',
125 : 'RSTAT',
126 : 'TWSTAT',
127 : 'RWSTAT'
}
class VirtFSRequestTracker(simpletrace.Analyzer):
def begin(self):
print("Pretty printing 9p simpletrace log ...")
def v9fs_rerror(self, tag, id, err):
print("RERROR (tag =", tag, ", id =", symbol_9p[id], ", err = \"", os.strerror(err), "\")")
def v9fs_version(self, tag, id, msize, version):
print("TVERSION (tag =", tag, ", msize =", msize, ", version =", version, ")")
def v9fs_version_return(self, tag, id, msize, version):
print("RVERSION (tag =", tag, ", msize =", msize, ", version =", version, ")")
def v9fs_attach(self, tag, id, fid, afid, uname, aname):
print("TATTACH (tag =", tag, ", fid =", fid, ", afid =", afid, ", uname =", uname, ", aname =", aname, ")")
def v9fs_attach_return(self, tag, id, type, version, path):
print("RATTACH (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "})")
def v9fs_stat(self, tag, id, fid):
print("TSTAT (tag =", tag, ", fid =", fid, ")")
def v9fs_stat_return(self, tag, id, mode, atime, mtime, length):
print("RSTAT (tag =", tag, ", mode =", mode, ", atime =", atime, ", mtime =", mtime, ", length =", length, ")")
def v9fs_getattr(self, tag, id, fid, request_mask):
print("TGETATTR (tag =", tag, ", fid =", fid, ", request_mask =", hex(request_mask), ")")
def v9fs_getattr_return(self, tag, id, result_mask, mode, uid, gid):
print("RGETATTR (tag =", tag, ", result_mask =", hex(result_mask), ", mode =", oct(mode), ", uid =", uid, ", gid =", gid, ")")
def v9fs_walk(self, tag, id, fid, newfid, nwnames):
print("TWALK (tag =", tag, ", fid =", fid, ", newfid =", newfid, ", nwnames =", nwnames, ")")
def v9fs_walk_return(self, tag, id, nwnames, qids):
print("RWALK (tag =", tag, ", nwnames =", nwnames, ", qids =", hex(qids), ")")
def v9fs_open(self, tag, id, fid, mode):
print("TOPEN (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ")")
def v9fs_open_return(self, tag, id, type, version, path, iounit):
print("ROPEN (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")")
def v9fs_lcreate(self, tag, id, dfid, flags, mode, gid):
print("TLCREATE (tag =", tag, ", dfid =", dfid, ", flags =", oct(flags), ", mode =", oct(mode), ", gid =", gid, ")")
def v9fs_lcreate_return(self, tag, id, type, version, path, iounit):
print("RLCREATE (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")")
def v9fs_fsync(self, tag, id, fid, datasync):
print("TFSYNC (tag =", tag, ", fid =", fid, ", datasync =", datasync, ")")
def v9fs_clunk(self, tag, id, fid):
print("TCLUNK (tag =", tag, ", fid =", fid, ")")
def v9fs_read(self, tag, id, fid, off, max_count):
print("TREAD (tag =", tag, ", fid =", fid, ", off =", off, ", max_count =", max_count, ")")
def v9fs_read_return(self, tag, id, count, err):
print("RREAD (tag =", tag, ", count =", count, ", err =", err, ")")
def v9fs_readdir(self, tag, id, fid, offset, max_count):
print("TREADDIR (tag =", tag, ", fid =", fid, ", offset =", offset, ", max_count =", max_count, ")")
def v9fs_readdir_return(self, tag, id, count, retval):
print("RREADDIR (tag =", tag, ", count =", count, ", retval =", retval, ")")
def v9fs_write(self, tag, id, fid, off, count, cnt):
print("TWRITE (tag =", tag, ", fid =", fid, ", off =", off, ", count =", count, ", cnt =", cnt, ")")
def v9fs_write_return(self, tag, id, total, err):
print("RWRITE (tag =", tag, ", total =", total, ", err =", err, ")")
def v9fs_create(self, tag, id, fid, name, perm, mode):
print("TCREATE (tag =", tag, ", fid =", fid, ", perm =", oct(perm), ", name =", name, ", mode =", oct(mode), ")")
def v9fs_create_return(self, tag, id, type, version, path, iounit):
print("RCREATE (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")")
def v9fs_symlink(self, tag, id, fid, name, symname, gid):
print("TSYMLINK (tag =", tag, ", fid =", fid, ", name =", name, ", symname =", symname, ", gid =", gid, ")")
def v9fs_symlink_return(self, tag, id, type, version, path):
print("RSYMLINK (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "})")
def v9fs_flush(self, tag, id, flush_tag):
print("TFLUSH (tag =", tag, ", flush_tag =", flush_tag, ")")
def v9fs_link(self, tag, id, dfid, oldfid, name):
print("TLINK (tag =", tag, ", dfid =", dfid, ", oldfid =", oldfid, ", name =", name, ")")
def v9fs_remove(self, tag, id, fid):
print("TREMOVE (tag =", tag, ", fid =", fid, ")")
def v9fs_wstat(self, tag, id, fid, mode, atime, mtime):
print("TWSTAT (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ", atime =", atime, "mtime =", mtime, ")")
def v9fs_mknod(self, tag, id, fid, mode, major, minor):
print("TMKNOD (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ", major =", major, ", minor =", minor, ")")
def v9fs_lock(self, tag, id, fid, type, start, length):
print("TLOCK (tag =", tag, ", fid =", fid, "type =", type, ", start =", start, ", length =", length, ")")
def v9fs_lock_return(self, tag, id, status):
print("RLOCK (tag =", tag, ", status =", status, ")")
def v9fs_getlock(self, tag, id, fid, type, start, length):
print("TGETLOCK (tag =", tag, ", fid =", fid, "type =", type, ", start =", start, ", length =", length, ")")
def v9fs_getlock_return(self, tag, id, type, start, length, proc_id):
print("RGETLOCK (tag =", tag, "type =", type, ", start =", start, ", length =", length, ", proc_id =", proc_id, ")")
def v9fs_mkdir(self, tag, id, fid, name, mode, gid):
print("TMKDIR (tag =", tag, ", fid =", fid, ", name =", name, ", mode =", mode, ", gid =", gid, ")")
def v9fs_mkdir_return(self, tag, id, type, version, path, err):
print("RMKDIR (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, err =", err, ")")
def v9fs_xattrwalk(self, tag, id, fid, newfid, name):
print("TXATTRWALK (tag =", tag, ", fid =", fid, ", newfid =", newfid, ", xattr name =", name, ")")
def v9fs_xattrwalk_return(self, tag, id, size):
print("RXATTRWALK (tag =", tag, ", xattrsize =", size, ")")
def v9fs_xattrcreate(self, tag, id, fid, name, size, flags):
print("TXATTRCREATE (tag =", tag, ", fid =", fid, ", name =", name, ", xattrsize =", size, ", flags =", flags, ")")
def v9fs_readlink(self, tag, id, fid):
print("TREADLINK (tag =", tag, ", fid =", fid, ")")
def v9fs_readlink_return(self, tag, id, target):
print("RREADLINK (tag =", tag, ", target =", target, ")")
simpletrace.run(VirtFSRequestTracker())
| 9,103 | 41.542056 | 142 | py |
qemu | qemu-master/scripts/u2f-setup-gen.py | #!/usr/bin/env python3
#
# Libu2f-emu setup directory generator for USB U2F key emulation.
#
# Copyright (c) 2020 César Belley <[email protected]>
# Written by César Belley <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2
# or, at your option, any later version. See the COPYING file in
# the top-level directory.
import sys
import os
from random import randint
from typing import Tuple
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.serialization import Encoding, \
NoEncryption, PrivateFormat, PublicFormat
from OpenSSL import crypto
def write_setup_dir(dirpath: str, privkey_pem: bytes, cert_pem: bytes,
entropy: bytes, counter: int) -> None:
"""
Write the setup directory.
Args:
dirpath: The directory path.
key_pem: The private key PEM.
cert_pem: The certificate PEM.
entropy: The 48 bytes of entropy.
counter: The counter value.
"""
# Directory
os.mkdir(dirpath)
# Private key
with open(f'{dirpath}/private-key.pem', 'bw') as f:
f.write(privkey_pem)
# Certificate
with open(f'{dirpath}/certificate.pem', 'bw') as f:
f.write(cert_pem)
# Entropy
with open(f'{dirpath}/entropy', 'wb') as f:
f.write(entropy)
# Counter
with open(f'{dirpath}/counter', 'w') as f:
f.write(f'{str(counter)}\n')
def generate_ec_key_pair() -> Tuple[str, str]:
"""
Generate an ec key pair.
Returns:
The private and public key PEM.
"""
# Key generation
privkey = ec.generate_private_key(ec.SECP256R1, default_backend())
pubkey = privkey.public_key()
# PEM serialization
privkey_pem = privkey.private_bytes(encoding=Encoding.PEM,
format=PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=NoEncryption())
pubkey_pem = pubkey.public_bytes(encoding=Encoding.PEM,
format=PublicFormat.SubjectPublicKeyInfo)
return privkey_pem, pubkey_pem
def generate_certificate(privkey_pem: str, pubkey_pem: str) -> str:
"""
Generate a x509 certificate from a key pair.
Args:
privkey_pem: The private key PEM.
pubkey_pem: The public key PEM.
Returns:
The certificate PEM.
"""
# Convert key pair
privkey = crypto.load_privatekey(crypto.FILETYPE_PEM, privkey_pem)
pubkey = crypto.load_publickey(crypto.FILETYPE_PEM, pubkey_pem)
# New x509v3 certificate
cert = crypto.X509()
cert.set_version(0x2)
# Serial number
cert.set_serial_number(randint(1, 2 ** 64))
# Before / After
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(4 * (365 * 24 * 60 * 60))
# Public key
cert.set_pubkey(pubkey)
# Subject name and issueer
cert.get_subject().CN = "U2F emulated"
cert.set_issuer(cert.get_subject())
# Extensions
cert.add_extensions([
crypto.X509Extension(b"subjectKeyIdentifier",
False, b"hash", subject=cert),
])
cert.add_extensions([
crypto.X509Extension(b"authorityKeyIdentifier",
False, b"keyid:always", issuer=cert),
])
cert.add_extensions([
crypto.X509Extension(b"basicConstraints", True, b"CA:TRUE")
])
# Signature
cert.sign(privkey, 'sha256')
return crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
def generate_setup_dir(dirpath: str) -> None:
"""
Generates the setup directory.
Args:
dirpath: The directory path.
"""
# Key pair
privkey_pem, pubkey_pem = generate_ec_key_pair()
# Certificate
certificate_pem = generate_certificate(privkey_pem, pubkey_pem)
# Entropy
entropy = os.urandom(48)
# Counter
counter = 0
# Write
write_setup_dir(dirpath, privkey_pem, certificate_pem, entropy, counter)
def main() -> None:
"""
Main function
"""
# Dir path
if len(sys.argv) != 2:
sys.stderr.write(f'Usage: {sys.argv[0]} <setup_dir>\n')
exit(2)
dirpath = sys.argv[1]
# Dir non existence
if os.path.exists(dirpath):
sys.stderr.write(f'Directory: {dirpath} already exists.\n')
exit(1)
generate_setup_dir(dirpath)
if __name__ == '__main__':
main()
| 4,481 | 25.210526 | 80 | py |
qemu | qemu-master/scripts/replay-dump.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Dump the contents of a recorded execution stream
#
# Copyright (c) 2017 Alex Bennée <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <http://www.gnu.org/licenses/>.
import argparse
import struct
from collections import namedtuple
# This mirrors some of the global replay state which some of the
# stream loading refers to. Some decoders may read the next event so
# we need handle that case. Calling reuse_event will ensure the next
# event is read from the cache rather than advancing the file.
class ReplayState(object):
def __init__(self):
self.event = -1
self.event_count = 0
self.already_read = False
self.current_checkpoint = 0
self.checkpoint = 0
def set_event(self, ev):
self.event = ev
self.event_count += 1
def get_event(self):
self.already_read = False
return self.event
def reuse_event(self, ev):
self.event = ev
self.already_read = True
def set_checkpoint(self):
self.checkpoint = self.event - self.checkpoint_start
def get_checkpoint(self):
return self.checkpoint
replay_state = ReplayState()
# Simple read functions that mirror replay-internal.c
# The file-stream is big-endian and manually written out a byte at a time.
def read_byte(fin):
"Read a single byte"
return struct.unpack('>B', fin.read(1))[0]
def read_event(fin):
"Read a single byte event, but save some state"
if replay_state.already_read:
return replay_state.get_event()
else:
replay_state.set_event(read_byte(fin))
return replay_state.event
def read_word(fin):
"Read a 16 bit word"
return struct.unpack('>H', fin.read(2))[0]
def read_dword(fin):
"Read a 32 bit word"
return struct.unpack('>I', fin.read(4))[0]
def read_qword(fin):
"Read a 64 bit word"
return struct.unpack('>Q', fin.read(8))[0]
# Generic decoder structure
Decoder = namedtuple("Decoder", "eid name fn")
def call_decode(table, index, dumpfile):
"Search decode table for next step"
decoder = next((d for d in table if d.eid == index), None)
if not decoder:
print("Could not decode index: %d" % (index))
print("Entry is: %s" % (decoder))
print("Decode Table is:\n%s" % (table))
return False
else:
return decoder.fn(decoder.eid, decoder.name, dumpfile)
# Print event
def print_event(eid, name, string=None, event_count=None):
"Print event with count"
if not event_count:
event_count = replay_state.event_count
if string:
print("%d:%s(%d) %s" % (event_count, name, eid, string))
else:
print("%d:%s(%d)" % (event_count, name, eid))
# Decoders for each event type
def decode_unimp(eid, name, _unused_dumpfile):
"Unimplimented decoder, will trigger exit"
print("%s not handled - will now stop" % (name))
return False
# Checkpoint decoder
def swallow_async_qword(eid, name, dumpfile):
"Swallow a qword of data without looking at it"
step_id = read_qword(dumpfile)
print(" %s(%d) @ %d" % (name, eid, step_id))
return True
async_decode_table = [ Decoder(0, "REPLAY_ASYNC_EVENT_BH", swallow_async_qword),
Decoder(1, "REPLAY_ASYNC_INPUT", decode_unimp),
Decoder(2, "REPLAY_ASYNC_INPUT_SYNC", decode_unimp),
Decoder(3, "REPLAY_ASYNC_CHAR_READ", decode_unimp),
Decoder(4, "REPLAY_ASYNC_EVENT_BLOCK", decode_unimp),
Decoder(5, "REPLAY_ASYNC_EVENT_NET", decode_unimp),
]
# See replay_read_events/replay_read_event
def decode_async(eid, name, dumpfile):
"""Decode an ASYNC event"""
print_event(eid, name)
async_event_kind = read_byte(dumpfile)
async_event_checkpoint = read_byte(dumpfile)
if async_event_checkpoint != replay_state.current_checkpoint:
print(" mismatch between checkpoint %d and async data %d" % (
replay_state.current_checkpoint, async_event_checkpoint))
return True
return call_decode(async_decode_table, async_event_kind, dumpfile)
def decode_instruction(eid, name, dumpfile):
ins_diff = read_dword(dumpfile)
print_event(eid, name, "0x%x" % (ins_diff))
return True
def decode_audio_out(eid, name, dumpfile):
audio_data = read_dword(dumpfile)
print_event(eid, name, "%d" % (audio_data))
return True
def decode_checkpoint(eid, name, dumpfile):
"""Decode a checkpoint.
Checkpoints contain a series of async events with their own specific data.
"""
replay_state.set_checkpoint()
# save event count as we peek ahead
event_number = replay_state.event_count
next_event = read_event(dumpfile)
# if the next event is EVENT_ASYNC there are a bunch of
# async events to read, otherwise we are done
if next_event != 3:
print_event(eid, name, "no additional data", event_number)
else:
print_event(eid, name, "more data follows", event_number)
replay_state.reuse_event(next_event)
return True
def decode_checkpoint_init(eid, name, dumpfile):
print_event(eid, name)
return True
def decode_interrupt(eid, name, dumpfile):
print_event(eid, name)
return True
def decode_clock(eid, name, dumpfile):
clock_data = read_qword(dumpfile)
print_event(eid, name, "0x%x" % (clock_data))
return True
# pre-MTTCG merge
v5_event_table = [Decoder(0, "EVENT_INSTRUCTION", decode_instruction),
Decoder(1, "EVENT_INTERRUPT", decode_interrupt),
Decoder(2, "EVENT_EXCEPTION", decode_unimp),
Decoder(3, "EVENT_ASYNC", decode_async),
Decoder(4, "EVENT_SHUTDOWN", decode_unimp),
Decoder(5, "EVENT_CHAR_WRITE", decode_unimp),
Decoder(6, "EVENT_CHAR_READ_ALL", decode_unimp),
Decoder(7, "EVENT_CHAR_READ_ALL_ERROR", decode_unimp),
Decoder(8, "EVENT_CLOCK_HOST", decode_clock),
Decoder(9, "EVENT_CLOCK_VIRTUAL_RT", decode_clock),
Decoder(10, "EVENT_CP_CLOCK_WARP_START", decode_checkpoint),
Decoder(11, "EVENT_CP_CLOCK_WARP_ACCOUNT", decode_checkpoint),
Decoder(12, "EVENT_CP_RESET_REQUESTED", decode_checkpoint),
Decoder(13, "EVENT_CP_SUSPEND_REQUESTED", decode_checkpoint),
Decoder(14, "EVENT_CP_CLOCK_VIRTUAL", decode_checkpoint),
Decoder(15, "EVENT_CP_CLOCK_HOST", decode_checkpoint),
Decoder(16, "EVENT_CP_CLOCK_VIRTUAL_RT", decode_checkpoint),
Decoder(17, "EVENT_CP_INIT", decode_checkpoint_init),
Decoder(18, "EVENT_CP_RESET", decode_checkpoint),
]
# post-MTTCG merge, AUDIO support added
v6_event_table = [Decoder(0, "EVENT_INSTRUCTION", decode_instruction),
Decoder(1, "EVENT_INTERRUPT", decode_interrupt),
Decoder(2, "EVENT_EXCEPTION", decode_unimp),
Decoder(3, "EVENT_ASYNC", decode_async),
Decoder(4, "EVENT_SHUTDOWN", decode_unimp),
Decoder(5, "EVENT_CHAR_WRITE", decode_unimp),
Decoder(6, "EVENT_CHAR_READ_ALL", decode_unimp),
Decoder(7, "EVENT_CHAR_READ_ALL_ERROR", decode_unimp),
Decoder(8, "EVENT_AUDIO_OUT", decode_audio_out),
Decoder(9, "EVENT_AUDIO_IN", decode_unimp),
Decoder(10, "EVENT_CLOCK_HOST", decode_clock),
Decoder(11, "EVENT_CLOCK_VIRTUAL_RT", decode_clock),
Decoder(12, "EVENT_CP_CLOCK_WARP_START", decode_checkpoint),
Decoder(13, "EVENT_CP_CLOCK_WARP_ACCOUNT", decode_checkpoint),
Decoder(14, "EVENT_CP_RESET_REQUESTED", decode_checkpoint),
Decoder(15, "EVENT_CP_SUSPEND_REQUESTED", decode_checkpoint),
Decoder(16, "EVENT_CP_CLOCK_VIRTUAL", decode_checkpoint),
Decoder(17, "EVENT_CP_CLOCK_HOST", decode_checkpoint),
Decoder(18, "EVENT_CP_CLOCK_VIRTUAL_RT", decode_checkpoint),
Decoder(19, "EVENT_CP_INIT", decode_checkpoint_init),
Decoder(20, "EVENT_CP_RESET", decode_checkpoint),
]
# Shutdown cause added
v7_event_table = [Decoder(0, "EVENT_INSTRUCTION", decode_instruction),
Decoder(1, "EVENT_INTERRUPT", decode_interrupt),
Decoder(2, "EVENT_EXCEPTION", decode_unimp),
Decoder(3, "EVENT_ASYNC", decode_async),
Decoder(4, "EVENT_SHUTDOWN", decode_unimp),
Decoder(5, "EVENT_SHUTDOWN_HOST_ERR", decode_unimp),
Decoder(6, "EVENT_SHUTDOWN_HOST_QMP", decode_unimp),
Decoder(7, "EVENT_SHUTDOWN_HOST_SIGNAL", decode_unimp),
Decoder(8, "EVENT_SHUTDOWN_HOST_UI", decode_unimp),
Decoder(9, "EVENT_SHUTDOWN_GUEST_SHUTDOWN", decode_unimp),
Decoder(10, "EVENT_SHUTDOWN_GUEST_RESET", decode_unimp),
Decoder(11, "EVENT_SHUTDOWN_GUEST_PANIC", decode_unimp),
Decoder(12, "EVENT_SHUTDOWN___MAX", decode_unimp),
Decoder(13, "EVENT_CHAR_WRITE", decode_unimp),
Decoder(14, "EVENT_CHAR_READ_ALL", decode_unimp),
Decoder(15, "EVENT_CHAR_READ_ALL_ERROR", decode_unimp),
Decoder(16, "EVENT_AUDIO_OUT", decode_audio_out),
Decoder(17, "EVENT_AUDIO_IN", decode_unimp),
Decoder(18, "EVENT_CLOCK_HOST", decode_clock),
Decoder(19, "EVENT_CLOCK_VIRTUAL_RT", decode_clock),
Decoder(20, "EVENT_CP_CLOCK_WARP_START", decode_checkpoint),
Decoder(21, "EVENT_CP_CLOCK_WARP_ACCOUNT", decode_checkpoint),
Decoder(22, "EVENT_CP_RESET_REQUESTED", decode_checkpoint),
Decoder(23, "EVENT_CP_SUSPEND_REQUESTED", decode_checkpoint),
Decoder(24, "EVENT_CP_CLOCK_VIRTUAL", decode_checkpoint),
Decoder(25, "EVENT_CP_CLOCK_HOST", decode_checkpoint),
Decoder(26, "EVENT_CP_CLOCK_VIRTUAL_RT", decode_checkpoint),
Decoder(27, "EVENT_CP_INIT", decode_checkpoint_init),
Decoder(28, "EVENT_CP_RESET", decode_checkpoint),
]
def parse_arguments():
"Grab arguments for script"
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", help='record/replay dump to read from',
required=True)
return parser.parse_args()
def decode_file(filename):
"Decode a record/replay dump"
dumpfile = open(filename, "rb")
# read and throwaway the header
version = read_dword(dumpfile)
junk = read_qword(dumpfile)
print("HEADER: version 0x%x" % (version))
if version == 0xe02007:
event_decode_table = v7_event_table
replay_state.checkpoint_start = 12
elif version == 0xe02006:
event_decode_table = v6_event_table
replay_state.checkpoint_start = 12
else:
event_decode_table = v5_event_table
replay_state.checkpoint_start = 10
try:
decode_ok = True
while decode_ok:
event = read_event(dumpfile)
decode_ok = call_decode(event_decode_table, event, dumpfile)
finally:
dumpfile.close()
if __name__ == "__main__":
args = parse_arguments()
decode_file(args.file)
| 12,119 | 38.223301 | 80 | py |
qemu | qemu-master/scripts/decodetree.py | #!/usr/bin/env python3
# Copyright (c) 2018 Linaro Limited
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <http://www.gnu.org/licenses/>.
#
#
# Generate a decoding tree from a specification file.
# See the syntax and semantics in docs/devel/decodetree.rst.
#
import io
import os
import re
import sys
import getopt
insnwidth = 32
bitop_width = 32
insnmask = 0xffffffff
variablewidth = False
fields = {}
arguments = {}
formats = {}
allpatterns = []
anyextern = False
translate_prefix = 'trans'
translate_scope = 'static '
input_file = ''
output_file = None
output_fd = None
insntype = 'uint32_t'
decode_function = 'decode'
# An identifier for C.
re_C_ident = '[a-zA-Z][a-zA-Z0-9_]*'
# Identifiers for Arguments, Fields, Formats and Patterns.
re_arg_ident = '&[a-zA-Z0-9_]*'
re_fld_ident = '%[a-zA-Z0-9_]*'
re_fmt_ident = '@[a-zA-Z0-9_]*'
re_pat_ident = '[a-zA-Z0-9_]*'
def error_with_file(file, lineno, *args):
"""Print an error message from file:line and args and exit."""
global output_file
global output_fd
prefix = ''
if file:
prefix += f'{file}:'
if lineno:
prefix += f'{lineno}:'
if prefix:
prefix += ' '
print(prefix, end='error: ', file=sys.stderr)
print(*args, file=sys.stderr)
if output_file and output_fd:
output_fd.close()
os.remove(output_file)
exit(1)
# end error_with_file
def error(lineno, *args):
error_with_file(input_file, lineno, *args)
# end error
def output(*args):
global output_fd
for a in args:
output_fd.write(a)
def output_autogen():
output('/* This file is autogenerated by scripts/decodetree.py. */\n\n')
def str_indent(c):
"""Return a string with C spaces"""
return ' ' * c
def str_fields(fields):
"""Return a string uniquely identifying FIELDS"""
r = ''
for n in sorted(fields.keys()):
r += '_' + n
return r[1:]
def whex(val):
"""Return a hex string for val padded for insnwidth"""
global insnwidth
return f'0x{val:0{insnwidth // 4}x}'
def whexC(val):
"""Return a hex string for val padded for insnwidth,
and with the proper suffix for a C constant."""
suffix = ''
if val >= 0x100000000:
suffix = 'ull'
elif val >= 0x80000000:
suffix = 'u'
return whex(val) + suffix
def str_match_bits(bits, mask):
"""Return a string pretty-printing BITS/MASK"""
global insnwidth
i = 1 << (insnwidth - 1)
space = 0x01010100
r = ''
while i != 0:
if i & mask:
if i & bits:
r += '1'
else:
r += '0'
else:
r += '.'
if i & space:
r += ' '
i >>= 1
return r
def is_pow2(x):
"""Return true iff X is equal to a power of 2."""
return (x & (x - 1)) == 0
def ctz(x):
"""Return the number of times 2 factors into X."""
assert x != 0
r = 0
while ((x >> r) & 1) == 0:
r += 1
return r
def is_contiguous(bits):
if bits == 0:
return -1
shift = ctz(bits)
if is_pow2((bits >> shift) + 1):
return shift
else:
return -1
def eq_fields_for_args(flds_a, arg):
if len(flds_a) != len(arg.fields):
return False
# Only allow inference on default types
for t in arg.types:
if t != 'int':
return False
for k, a in flds_a.items():
if k not in arg.fields:
return False
return True
def eq_fields_for_fmts(flds_a, flds_b):
if len(flds_a) != len(flds_b):
return False
for k, a in flds_a.items():
if k not in flds_b:
return False
b = flds_b[k]
if a.__class__ != b.__class__ or a != b:
return False
return True
class Field:
"""Class representing a simple instruction field"""
def __init__(self, sign, pos, len):
self.sign = sign
self.pos = pos
self.len = len
self.mask = ((1 << len) - 1) << pos
def __str__(self):
if self.sign:
s = 's'
else:
s = ''
return str(self.pos) + ':' + s + str(self.len)
def str_extract(self):
global bitop_width
s = 's' if self.sign else ''
return f'{s}extract{bitop_width}(insn, {self.pos}, {self.len})'
def __eq__(self, other):
return self.sign == other.sign and self.mask == other.mask
def __ne__(self, other):
return not self.__eq__(other)
# end Field
class MultiField:
"""Class representing a compound instruction field"""
def __init__(self, subs, mask):
self.subs = subs
self.sign = subs[0].sign
self.mask = mask
def __str__(self):
return str(self.subs)
def str_extract(self):
global bitop_width
ret = '0'
pos = 0
for f in reversed(self.subs):
ext = f.str_extract()
if pos == 0:
ret = ext
else:
ret = f'deposit{bitop_width}({ret}, {pos}, {bitop_width - pos}, {ext})'
pos += f.len
return ret
def __ne__(self, other):
if len(self.subs) != len(other.subs):
return True
for a, b in zip(self.subs, other.subs):
if a.__class__ != b.__class__ or a != b:
return True
return False
def __eq__(self, other):
return not self.__ne__(other)
# end MultiField
class ConstField:
"""Class representing an argument field with constant value"""
def __init__(self, value):
self.value = value
self.mask = 0
self.sign = value < 0
def __str__(self):
return str(self.value)
def str_extract(self):
return str(self.value)
def __cmp__(self, other):
return self.value - other.value
# end ConstField
class FunctionField:
"""Class representing a field passed through a function"""
def __init__(self, func, base):
self.mask = base.mask
self.sign = base.sign
self.base = base
self.func = func
def __str__(self):
return self.func + '(' + str(self.base) + ')'
def str_extract(self):
return self.func + '(ctx, ' + self.base.str_extract() + ')'
def __eq__(self, other):
return self.func == other.func and self.base == other.base
def __ne__(self, other):
return not self.__eq__(other)
# end FunctionField
class ParameterField:
"""Class representing a pseudo-field read from a function"""
def __init__(self, func):
self.mask = 0
self.sign = 0
self.func = func
def __str__(self):
return self.func
def str_extract(self):
return self.func + '(ctx)'
def __eq__(self, other):
return self.func == other.func
def __ne__(self, other):
return not self.__eq__(other)
# end ParameterField
class Arguments:
"""Class representing the extracted fields of a format"""
def __init__(self, nm, flds, types, extern):
self.name = nm
self.extern = extern
self.fields = flds
self.types = types
def __str__(self):
return self.name + ' ' + str(self.fields)
def struct_name(self):
return 'arg_' + self.name
def output_def(self):
if not self.extern:
output('typedef struct {\n')
for (n, t) in zip(self.fields, self.types):
output(f' {t} {n};\n')
output('} ', self.struct_name(), ';\n\n')
# end Arguments
class General:
"""Common code between instruction formats and instruction patterns"""
def __init__(self, name, lineno, base, fixb, fixm, udfm, fldm, flds, w):
self.name = name
self.file = input_file
self.lineno = lineno
self.base = base
self.fixedbits = fixb
self.fixedmask = fixm
self.undefmask = udfm
self.fieldmask = fldm
self.fields = flds
self.width = w
def __str__(self):
return self.name + ' ' + str_match_bits(self.fixedbits, self.fixedmask)
def str1(self, i):
return str_indent(i) + self.__str__()
# end General
class Format(General):
"""Class representing an instruction format"""
def extract_name(self):
global decode_function
return decode_function + '_extract_' + self.name
def output_extract(self):
output('static void ', self.extract_name(), '(DisasContext *ctx, ',
self.base.struct_name(), ' *a, ', insntype, ' insn)\n{\n')
for n, f in self.fields.items():
output(' a->', n, ' = ', f.str_extract(), ';\n')
output('}\n\n')
# end Format
class Pattern(General):
"""Class representing an instruction pattern"""
def output_decl(self):
global translate_scope
global translate_prefix
output('typedef ', self.base.base.struct_name(),
' arg_', self.name, ';\n')
output(translate_scope, 'bool ', translate_prefix, '_', self.name,
'(DisasContext *ctx, arg_', self.name, ' *a);\n')
def output_code(self, i, extracted, outerbits, outermask):
global translate_prefix
ind = str_indent(i)
arg = self.base.base.name
output(ind, '/* ', self.file, ':', str(self.lineno), ' */\n')
if not extracted:
output(ind, self.base.extract_name(),
'(ctx, &u.f_', arg, ', insn);\n')
for n, f in self.fields.items():
output(ind, 'u.f_', arg, '.', n, ' = ', f.str_extract(), ';\n')
output(ind, 'if (', translate_prefix, '_', self.name,
'(ctx, &u.f_', arg, ')) return true;\n')
# Normal patterns do not have children.
def build_tree(self):
return
def prop_masks(self):
return
def prop_format(self):
return
def prop_width(self):
return
# end Pattern
class MultiPattern(General):
"""Class representing a set of instruction patterns"""
def __init__(self, lineno):
self.file = input_file
self.lineno = lineno
self.pats = []
self.base = None
self.fixedbits = 0
self.fixedmask = 0
self.undefmask = 0
self.width = None
def __str__(self):
r = 'group'
if self.fixedbits is not None:
r += ' ' + str_match_bits(self.fixedbits, self.fixedmask)
return r
def output_decl(self):
for p in self.pats:
p.output_decl()
def prop_masks(self):
global insnmask
fixedmask = insnmask
undefmask = insnmask
# Collect fixedmask/undefmask for all of the children.
for p in self.pats:
p.prop_masks()
fixedmask &= p.fixedmask
undefmask &= p.undefmask
# Widen fixedmask until all fixedbits match
repeat = True
fixedbits = 0
while repeat and fixedmask != 0:
fixedbits = None
for p in self.pats:
thisbits = p.fixedbits & fixedmask
if fixedbits is None:
fixedbits = thisbits
elif fixedbits != thisbits:
fixedmask &= ~(fixedbits ^ thisbits)
break
else:
repeat = False
self.fixedbits = fixedbits
self.fixedmask = fixedmask
self.undefmask = undefmask
def build_tree(self):
for p in self.pats:
p.build_tree()
def prop_format(self):
for p in self.pats:
p.build_tree()
def prop_width(self):
width = None
for p in self.pats:
p.prop_width()
if width is None:
width = p.width
elif width != p.width:
error_with_file(self.file, self.lineno,
'width mismatch in patterns within braces')
self.width = width
# end MultiPattern
class IncMultiPattern(MultiPattern):
"""Class representing an overlapping set of instruction patterns"""
def output_code(self, i, extracted, outerbits, outermask):
global translate_prefix
ind = str_indent(i)
for p in self.pats:
if outermask != p.fixedmask:
innermask = p.fixedmask & ~outermask
innerbits = p.fixedbits & ~outermask
output(ind, f'if ((insn & {whexC(innermask)}) == {whexC(innerbits)}) {{\n')
output(ind, f' /* {str_match_bits(p.fixedbits, p.fixedmask)} */\n')
p.output_code(i + 4, extracted, p.fixedbits, p.fixedmask)
output(ind, '}\n')
else:
p.output_code(i, extracted, p.fixedbits, p.fixedmask)
#end IncMultiPattern
class Tree:
"""Class representing a node in a decode tree"""
def __init__(self, fm, tm):
self.fixedmask = fm
self.thismask = tm
self.subs = []
self.base = None
def str1(self, i):
ind = str_indent(i)
r = ind + whex(self.fixedmask)
if self.format:
r += ' ' + self.format.name
r += ' [\n'
for (b, s) in self.subs:
r += ind + f' {whex(b)}:\n'
r += s.str1(i + 4) + '\n'
r += ind + ']'
return r
def __str__(self):
return self.str1(0)
def output_code(self, i, extracted, outerbits, outermask):
ind = str_indent(i)
# If we identified all nodes below have the same format,
# extract the fields now.
if not extracted and self.base:
output(ind, self.base.extract_name(),
'(ctx, &u.f_', self.base.base.name, ', insn);\n')
extracted = True
# Attempt to aid the compiler in producing compact switch statements.
# If the bits in the mask are contiguous, extract them.
sh = is_contiguous(self.thismask)
if sh > 0:
# Propagate SH down into the local functions.
def str_switch(b, sh=sh):
return f'(insn >> {sh}) & {b >> sh:#x}'
def str_case(b, sh=sh):
return hex(b >> sh)
else:
def str_switch(b):
return f'insn & {whexC(b)}'
def str_case(b):
return whexC(b)
output(ind, 'switch (', str_switch(self.thismask), ') {\n')
for b, s in sorted(self.subs):
assert (self.thismask & ~s.fixedmask) == 0
innermask = outermask | self.thismask
innerbits = outerbits | b
output(ind, 'case ', str_case(b), ':\n')
output(ind, ' /* ',
str_match_bits(innerbits, innermask), ' */\n')
s.output_code(i + 4, extracted, innerbits, innermask)
output(ind, ' break;\n')
output(ind, '}\n')
# end Tree
class ExcMultiPattern(MultiPattern):
"""Class representing a non-overlapping set of instruction patterns"""
def output_code(self, i, extracted, outerbits, outermask):
# Defer everything to our decomposed Tree node
self.tree.output_code(i, extracted, outerbits, outermask)
@staticmethod
def __build_tree(pats, outerbits, outermask):
# Find the intersection of all remaining fixedmask.
innermask = ~outermask & insnmask
for i in pats:
innermask &= i.fixedmask
if innermask == 0:
# Edge condition: One pattern covers the entire insnmask
if len(pats) == 1:
t = Tree(outermask, innermask)
t.subs.append((0, pats[0]))
return t
text = 'overlapping patterns:'
for p in pats:
text += '\n' + p.file + ':' + str(p.lineno) + ': ' + str(p)
error_with_file(pats[0].file, pats[0].lineno, text)
fullmask = outermask | innermask
# Sort each element of pats into the bin selected by the mask.
bins = {}
for i in pats:
fb = i.fixedbits & innermask
if fb in bins:
bins[fb].append(i)
else:
bins[fb] = [i]
# We must recurse if any bin has more than one element or if
# the single element in the bin has not been fully matched.
t = Tree(fullmask, innermask)
for b, l in bins.items():
s = l[0]
if len(l) > 1 or s.fixedmask & ~fullmask != 0:
s = ExcMultiPattern.__build_tree(l, b | outerbits, fullmask)
t.subs.append((b, s))
return t
def build_tree(self):
super().prop_format()
self.tree = self.__build_tree(self.pats, self.fixedbits,
self.fixedmask)
@staticmethod
def __prop_format(tree):
"""Propagate Format objects into the decode tree"""
# Depth first search.
for (b, s) in tree.subs:
if isinstance(s, Tree):
ExcMultiPattern.__prop_format(s)
# If all entries in SUBS have the same format, then
# propagate that into the tree.
f = None
for (b, s) in tree.subs:
if f is None:
f = s.base
if f is None:
return
if f is not s.base:
return
tree.base = f
def prop_format(self):
super().prop_format()
self.__prop_format(self.tree)
# end ExcMultiPattern
def parse_field(lineno, name, toks):
"""Parse one instruction field from TOKS at LINENO"""
global fields
global insnwidth
# A "simple" field will have only one entry;
# a "multifield" will have several.
subs = []
width = 0
func = None
for t in toks:
if re.match('^!function=', t):
if func:
error(lineno, 'duplicate function')
func = t.split('=')
func = func[1]
continue
if re.fullmatch('[0-9]+:s[0-9]+', t):
# Signed field extract
subtoks = t.split(':s')
sign = True
elif re.fullmatch('[0-9]+:[0-9]+', t):
# Unsigned field extract
subtoks = t.split(':')
sign = False
else:
error(lineno, f'invalid field token "{t}"')
po = int(subtoks[0])
le = int(subtoks[1])
if po + le > insnwidth:
error(lineno, f'field {t} too large')
f = Field(sign, po, le)
subs.append(f)
width += le
if width > insnwidth:
error(lineno, 'field too large')
if len(subs) == 0:
if func:
f = ParameterField(func)
else:
error(lineno, 'field with no value')
else:
if len(subs) == 1:
f = subs[0]
else:
mask = 0
for s in subs:
if mask & s.mask:
error(lineno, 'field components overlap')
mask |= s.mask
f = MultiField(subs, mask)
if func:
f = FunctionField(func, f)
if name in fields:
error(lineno, 'duplicate field', name)
fields[name] = f
# end parse_field
def parse_arguments(lineno, name, toks):
"""Parse one argument set from TOKS at LINENO"""
global arguments
global re_C_ident
global anyextern
flds = []
types = []
extern = False
for n in toks:
if re.fullmatch('!extern', n):
extern = True
anyextern = True
continue
if re.fullmatch(re_C_ident + ':' + re_C_ident, n):
(n, t) = n.split(':')
elif re.fullmatch(re_C_ident, n):
t = 'int'
else:
error(lineno, f'invalid argument set token "{n}"')
if n in flds:
error(lineno, f'duplicate argument "{n}"')
flds.append(n)
types.append(t)
if name in arguments:
error(lineno, 'duplicate argument set', name)
arguments[name] = Arguments(name, flds, types, extern)
# end parse_arguments
def lookup_field(lineno, name):
global fields
if name in fields:
return fields[name]
error(lineno, 'undefined field', name)
def add_field(lineno, flds, new_name, f):
if new_name in flds:
error(lineno, 'duplicate field', new_name)
flds[new_name] = f
return flds
def add_field_byname(lineno, flds, new_name, old_name):
return add_field(lineno, flds, new_name, lookup_field(lineno, old_name))
def infer_argument_set(flds):
global arguments
global decode_function
for arg in arguments.values():
if eq_fields_for_args(flds, arg):
return arg
name = decode_function + str(len(arguments))
arg = Arguments(name, flds.keys(), ['int'] * len(flds), False)
arguments[name] = arg
return arg
def infer_format(arg, fieldmask, flds, width):
global arguments
global formats
global decode_function
const_flds = {}
var_flds = {}
for n, c in flds.items():
if c is ConstField:
const_flds[n] = c
else:
var_flds[n] = c
# Look for an existing format with the same argument set and fields
for fmt in formats.values():
if arg and fmt.base != arg:
continue
if fieldmask != fmt.fieldmask:
continue
if width != fmt.width:
continue
if not eq_fields_for_fmts(flds, fmt.fields):
continue
return (fmt, const_flds)
name = decode_function + '_Fmt_' + str(len(formats))
if not arg:
arg = infer_argument_set(flds)
fmt = Format(name, 0, arg, 0, 0, 0, fieldmask, var_flds, width)
formats[name] = fmt
return (fmt, const_flds)
# end infer_format
def parse_generic(lineno, parent_pat, name, toks):
"""Parse one instruction format from TOKS at LINENO"""
global fields
global arguments
global formats
global allpatterns
global re_arg_ident
global re_fld_ident
global re_fmt_ident
global re_C_ident
global insnwidth
global insnmask
global variablewidth
is_format = parent_pat is None
fixedmask = 0
fixedbits = 0
undefmask = 0
width = 0
flds = {}
arg = None
fmt = None
for t in toks:
# '&Foo' gives a format an explicit argument set.
if re.fullmatch(re_arg_ident, t):
tt = t[1:]
if arg:
error(lineno, 'multiple argument sets')
if tt in arguments:
arg = arguments[tt]
else:
error(lineno, 'undefined argument set', t)
continue
# '@Foo' gives a pattern an explicit format.
if re.fullmatch(re_fmt_ident, t):
tt = t[1:]
if fmt:
error(lineno, 'multiple formats')
if tt in formats:
fmt = formats[tt]
else:
error(lineno, 'undefined format', t)
continue
# '%Foo' imports a field.
if re.fullmatch(re_fld_ident, t):
tt = t[1:]
flds = add_field_byname(lineno, flds, tt, tt)
continue
# 'Foo=%Bar' imports a field with a different name.
if re.fullmatch(re_C_ident + '=' + re_fld_ident, t):
(fname, iname) = t.split('=%')
flds = add_field_byname(lineno, flds, fname, iname)
continue
# 'Foo=number' sets an argument field to a constant value
if re.fullmatch(re_C_ident + '=[+-]?[0-9]+', t):
(fname, value) = t.split('=')
value = int(value)
flds = add_field(lineno, flds, fname, ConstField(value))
continue
# Pattern of 0s, 1s, dots and dashes indicate required zeros,
# required ones, or dont-cares.
if re.fullmatch('[01.-]+', t):
shift = len(t)
fms = t.replace('0', '1')
fms = fms.replace('.', '0')
fms = fms.replace('-', '0')
fbs = t.replace('.', '0')
fbs = fbs.replace('-', '0')
ubm = t.replace('1', '0')
ubm = ubm.replace('.', '0')
ubm = ubm.replace('-', '1')
fms = int(fms, 2)
fbs = int(fbs, 2)
ubm = int(ubm, 2)
fixedbits = (fixedbits << shift) | fbs
fixedmask = (fixedmask << shift) | fms
undefmask = (undefmask << shift) | ubm
# Otherwise, fieldname:fieldwidth
elif re.fullmatch(re_C_ident + ':s?[0-9]+', t):
(fname, flen) = t.split(':')
sign = False
if flen[0] == 's':
sign = True
flen = flen[1:]
shift = int(flen, 10)
if shift + width > insnwidth:
error(lineno, f'field {fname} exceeds insnwidth')
f = Field(sign, insnwidth - width - shift, shift)
flds = add_field(lineno, flds, fname, f)
fixedbits <<= shift
fixedmask <<= shift
undefmask <<= shift
else:
error(lineno, f'invalid token "{t}"')
width += shift
if variablewidth and width < insnwidth and width % 8 == 0:
shift = insnwidth - width
fixedbits <<= shift
fixedmask <<= shift
undefmask <<= shift
undefmask |= (1 << shift) - 1
# We should have filled in all of the bits of the instruction.
elif not (is_format and width == 0) and width != insnwidth:
error(lineno, f'definition has {width} bits')
# Do not check for fields overlapping fields; one valid usage
# is to be able to duplicate fields via import.
fieldmask = 0
for f in flds.values():
fieldmask |= f.mask
# Fix up what we've parsed to match either a format or a pattern.
if is_format:
# Formats cannot reference formats.
if fmt:
error(lineno, 'format referencing format')
# If an argument set is given, then there should be no fields
# without a place to store it.
if arg:
for f in flds.keys():
if f not in arg.fields:
error(lineno, f'field {f} not in argument set {arg.name}')
else:
arg = infer_argument_set(flds)
if name in formats:
error(lineno, 'duplicate format name', name)
fmt = Format(name, lineno, arg, fixedbits, fixedmask,
undefmask, fieldmask, flds, width)
formats[name] = fmt
else:
# Patterns can reference a format ...
if fmt:
# ... but not an argument simultaneously
if arg:
error(lineno, 'pattern specifies both format and argument set')
if fixedmask & fmt.fixedmask:
error(lineno, 'pattern fixed bits overlap format fixed bits')
if width != fmt.width:
error(lineno, 'pattern uses format of different width')
fieldmask |= fmt.fieldmask
fixedbits |= fmt.fixedbits
fixedmask |= fmt.fixedmask
undefmask |= fmt.undefmask
else:
(fmt, flds) = infer_format(arg, fieldmask, flds, width)
arg = fmt.base
for f in flds.keys():
if f not in arg.fields:
error(lineno, f'field {f} not in argument set {arg.name}')
if f in fmt.fields.keys():
error(lineno, f'field {f} set by format and pattern')
for f in arg.fields:
if f not in flds.keys() and f not in fmt.fields.keys():
error(lineno, f'field {f} not initialized')
pat = Pattern(name, lineno, fmt, fixedbits, fixedmask,
undefmask, fieldmask, flds, width)
parent_pat.pats.append(pat)
allpatterns.append(pat)
# Validate the masks that we have assembled.
if fieldmask & fixedmask:
error(lineno, 'fieldmask overlaps fixedmask ',
f'({whex(fieldmask)} & {whex(fixedmask)})')
if fieldmask & undefmask:
error(lineno, 'fieldmask overlaps undefmask ',
f'({whex(fieldmask)} & {whex(undefmask)})')
if fixedmask & undefmask:
error(lineno, 'fixedmask overlaps undefmask ',
f'({whex(fixedmask)} & {whex(undefmask)})')
if not is_format:
allbits = fieldmask | fixedmask | undefmask
if allbits != insnmask:
error(lineno, 'bits left unspecified ',
f'({whex(allbits ^ insnmask)})')
# end parse_general
def parse_file(f, parent_pat):
"""Parse all of the patterns within a file"""
global re_arg_ident
global re_fld_ident
global re_fmt_ident
global re_pat_ident
# Read all of the lines of the file. Concatenate lines
# ending in backslash; discard empty lines and comments.
toks = []
lineno = 0
nesting = 0
nesting_pats = []
for line in f:
lineno += 1
# Expand and strip spaces, to find indent.
line = line.rstrip()
line = line.expandtabs()
len1 = len(line)
line = line.lstrip()
len2 = len(line)
# Discard comments
end = line.find('#')
if end >= 0:
line = line[:end]
t = line.split()
if len(toks) != 0:
# Next line after continuation
toks.extend(t)
else:
# Allow completely blank lines.
if len1 == 0:
continue
indent = len1 - len2
# Empty line due to comment.
if len(t) == 0:
# Indentation must be correct, even for comment lines.
if indent != nesting:
error(lineno, 'indentation ', indent, ' != ', nesting)
continue
start_lineno = lineno
toks = t
# Continuation?
if toks[-1] == '\\':
toks.pop()
continue
name = toks[0]
del toks[0]
# End nesting?
if name == '}' or name == ']':
if len(toks) != 0:
error(start_lineno, 'extra tokens after close brace')
# Make sure { } and [ ] nest properly.
if (name == '}') != isinstance(parent_pat, IncMultiPattern):
error(lineno, 'mismatched close brace')
try:
parent_pat = nesting_pats.pop()
except:
error(lineno, 'extra close brace')
nesting -= 2
if indent != nesting:
error(lineno, 'indentation ', indent, ' != ', nesting)
toks = []
continue
# Everything else should have current indentation.
if indent != nesting:
error(start_lineno, 'indentation ', indent, ' != ', nesting)
# Start nesting?
if name == '{' or name == '[':
if len(toks) != 0:
error(start_lineno, 'extra tokens after open brace')
if name == '{':
nested_pat = IncMultiPattern(start_lineno)
else:
nested_pat = ExcMultiPattern(start_lineno)
parent_pat.pats.append(nested_pat)
nesting_pats.append(parent_pat)
parent_pat = nested_pat
nesting += 2
toks = []
continue
# Determine the type of object needing to be parsed.
if re.fullmatch(re_fld_ident, name):
parse_field(start_lineno, name[1:], toks)
elif re.fullmatch(re_arg_ident, name):
parse_arguments(start_lineno, name[1:], toks)
elif re.fullmatch(re_fmt_ident, name):
parse_generic(start_lineno, None, name[1:], toks)
elif re.fullmatch(re_pat_ident, name):
parse_generic(start_lineno, parent_pat, name, toks)
else:
error(lineno, f'invalid token "{name}"')
toks = []
if nesting != 0:
error(lineno, 'missing close brace')
# end parse_file
class SizeTree:
"""Class representing a node in a size decode tree"""
def __init__(self, m, w):
self.mask = m
self.subs = []
self.base = None
self.width = w
def str1(self, i):
ind = str_indent(i)
r = ind + whex(self.mask) + ' [\n'
for (b, s) in self.subs:
r += ind + f' {whex(b)}:\n'
r += s.str1(i + 4) + '\n'
r += ind + ']'
return r
def __str__(self):
return self.str1(0)
def output_code(self, i, extracted, outerbits, outermask):
ind = str_indent(i)
# If we need to load more bytes to test, do so now.
if extracted < self.width:
output(ind, f'insn = {decode_function}_load_bytes',
f'(ctx, insn, {extracted // 8}, {self.width // 8});\n')
extracted = self.width
# Attempt to aid the compiler in producing compact switch statements.
# If the bits in the mask are contiguous, extract them.
sh = is_contiguous(self.mask)
if sh > 0:
# Propagate SH down into the local functions.
def str_switch(b, sh=sh):
return f'(insn >> {sh}) & {b >> sh:#x}'
def str_case(b, sh=sh):
return hex(b >> sh)
else:
def str_switch(b):
return f'insn & {whexC(b)}'
def str_case(b):
return whexC(b)
output(ind, 'switch (', str_switch(self.mask), ') {\n')
for b, s in sorted(self.subs):
innermask = outermask | self.mask
innerbits = outerbits | b
output(ind, 'case ', str_case(b), ':\n')
output(ind, ' /* ',
str_match_bits(innerbits, innermask), ' */\n')
s.output_code(i + 4, extracted, innerbits, innermask)
output(ind, '}\n')
output(ind, 'return insn;\n')
# end SizeTree
class SizeLeaf:
"""Class representing a leaf node in a size decode tree"""
def __init__(self, m, w):
self.mask = m
self.width = w
def str1(self, i):
return str_indent(i) + whex(self.mask)
def __str__(self):
return self.str1(0)
def output_code(self, i, extracted, outerbits, outermask):
global decode_function
ind = str_indent(i)
# If we need to load more bytes, do so now.
if extracted < self.width:
output(ind, f'insn = {decode_function}_load_bytes',
f'(ctx, insn, {extracted // 8}, {self.width // 8});\n')
extracted = self.width
output(ind, 'return insn;\n')
# end SizeLeaf
def build_size_tree(pats, width, outerbits, outermask):
global insnwidth
# Collect the mask of bits that are fixed in this width
innermask = 0xff << (insnwidth - width)
innermask &= ~outermask
minwidth = None
onewidth = True
for i in pats:
innermask &= i.fixedmask
if minwidth is None:
minwidth = i.width
elif minwidth != i.width:
onewidth = False;
if minwidth < i.width:
minwidth = i.width
if onewidth:
return SizeLeaf(innermask, minwidth)
if innermask == 0:
if width < minwidth:
return build_size_tree(pats, width + 8, outerbits, outermask)
pnames = []
for p in pats:
pnames.append(p.name + ':' + p.file + ':' + str(p.lineno))
error_with_file(pats[0].file, pats[0].lineno,
f'overlapping patterns size {width}:', pnames)
bins = {}
for i in pats:
fb = i.fixedbits & innermask
if fb in bins:
bins[fb].append(i)
else:
bins[fb] = [i]
fullmask = outermask | innermask
lens = sorted(bins.keys())
if len(lens) == 1:
b = lens[0]
return build_size_tree(bins[b], width + 8, b | outerbits, fullmask)
r = SizeTree(innermask, width)
for b, l in bins.items():
s = build_size_tree(l, width, b | outerbits, fullmask)
r.subs.append((b, s))
return r
# end build_size_tree
def prop_size(tree):
"""Propagate minimum widths up the decode size tree"""
if isinstance(tree, SizeTree):
min = None
for (b, s) in tree.subs:
width = prop_size(s)
if min is None or min > width:
min = width
assert min >= tree.width
tree.width = min
else:
min = tree.width
return min
# end prop_size
def main():
global arguments
global formats
global allpatterns
global translate_scope
global translate_prefix
global output_fd
global output_file
global input_file
global insnwidth
global insntype
global insnmask
global decode_function
global bitop_width
global variablewidth
global anyextern
decode_scope = 'static '
long_opts = ['decode=', 'translate=', 'output=', 'insnwidth=',
'static-decode=', 'varinsnwidth=']
try:
(opts, args) = getopt.gnu_getopt(sys.argv[1:], 'o:vw:', long_opts)
except getopt.GetoptError as err:
error(0, err)
for o, a in opts:
if o in ('-o', '--output'):
output_file = a
elif o == '--decode':
decode_function = a
decode_scope = ''
elif o == '--static-decode':
decode_function = a
elif o == '--translate':
translate_prefix = a
translate_scope = ''
elif o in ('-w', '--insnwidth', '--varinsnwidth'):
if o == '--varinsnwidth':
variablewidth = True
insnwidth = int(a)
if insnwidth == 16:
insntype = 'uint16_t'
insnmask = 0xffff
elif insnwidth == 64:
insntype = 'uint64_t'
insnmask = 0xffffffffffffffff
bitop_width = 64
elif insnwidth != 32:
error(0, 'cannot handle insns of width', insnwidth)
else:
assert False, 'unhandled option'
if len(args) < 1:
error(0, 'missing input file')
toppat = ExcMultiPattern(0)
for filename in args:
input_file = filename
f = open(filename, 'rt', encoding='utf-8')
parse_file(f, toppat)
f.close()
# We do not want to compute masks for toppat, because those masks
# are used as a starting point for build_tree. For toppat, we must
# insist that decode begins from naught.
for i in toppat.pats:
i.prop_masks()
toppat.build_tree()
toppat.prop_format()
if variablewidth:
for i in toppat.pats:
i.prop_width()
stree = build_size_tree(toppat.pats, 8, 0, 0)
prop_size(stree)
if output_file:
output_fd = open(output_file, 'wt', encoding='utf-8')
else:
output_fd = io.TextIOWrapper(sys.stdout.buffer,
encoding=sys.stdout.encoding,
errors="ignore")
output_autogen()
for n in sorted(arguments.keys()):
f = arguments[n]
f.output_def()
# A single translate function can be invoked for different patterns.
# Make sure that the argument sets are the same, and declare the
# function only once.
#
# If we're sharing formats, we're likely also sharing trans_* functions,
# but we can't tell which ones. Prevent issues from the compiler by
# suppressing redundant declaration warnings.
if anyextern:
output("#pragma GCC diagnostic push\n",
"#pragma GCC diagnostic ignored \"-Wredundant-decls\"\n",
"#ifdef __clang__\n"
"# pragma GCC diagnostic ignored \"-Wtypedef-redefinition\"\n",
"#endif\n\n")
out_pats = {}
for i in allpatterns:
if i.name in out_pats:
p = out_pats[i.name]
if i.base.base != p.base.base:
error(0, i.name, ' has conflicting argument sets')
else:
i.output_decl()
out_pats[i.name] = i
output('\n')
if anyextern:
output("#pragma GCC diagnostic pop\n\n")
for n in sorted(formats.keys()):
f = formats[n]
f.output_extract()
output(decode_scope, 'bool ', decode_function,
'(DisasContext *ctx, ', insntype, ' insn)\n{\n')
i4 = str_indent(4)
if len(allpatterns) != 0:
output(i4, 'union {\n')
for n in sorted(arguments.keys()):
f = arguments[n]
output(i4, i4, f.struct_name(), ' f_', f.name, ';\n')
output(i4, '} u;\n\n')
toppat.output_code(4, False, 0, 0)
output(i4, 'return false;\n')
output('}\n')
if variablewidth:
output('\n', decode_scope, insntype, ' ', decode_function,
'_load(DisasContext *ctx)\n{\n',
' ', insntype, ' insn = 0;\n\n')
stree.output_code(4, 0, 0, 0)
output('}\n')
if output_file:
output_fd.close()
# end main
if __name__ == '__main__':
main()
| 41,623 | 28.209825 | 91 | py |
qemu | qemu-master/scripts/qemu-gdb.py | #!/usr/bin/env python3
#
# GDB debugging support
#
# Copyright 2012 Red Hat, Inc. and/or its affiliates
#
# Authors:
# Avi Kivity <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2 or
# later. See the COPYING file in the top-level directory.
# Usage:
# At the (gdb) prompt, type "source scripts/qemu-gdb.py".
# "help qemu" should then list the supported QEMU debug support commands.
import gdb
import os, sys
# Annoyingly, gdb doesn't put the directory of scripts onto the
# module search path. Do it manually.
sys.path.append(os.path.dirname(__file__))
from qemugdb import aio, mtree, coroutine, tcg, timers
class QemuCommand(gdb.Command):
'''Prefix for QEMU debug support commands'''
def __init__(self):
gdb.Command.__init__(self, 'qemu', gdb.COMMAND_DATA,
gdb.COMPLETE_NONE, True)
QemuCommand()
coroutine.CoroutineCommand()
mtree.MtreeCommand()
aio.HandlersCommand()
tcg.TCGLockStatusCommand()
timers.TimersCommand()
coroutine.CoroutineSPFunction()
coroutine.CoroutinePCFunction()
coroutine.CoroutineBt()
# Default to silently passing through SIGUSR1, because QEMU sends it
# to itself a lot.
gdb.execute('handle SIGUSR1 pass noprint nostop')
| 1,237 | 24.791667 | 73 | py |
qemu | qemu-master/scripts/undefsym.py | #!/usr/bin/env python3
# Before a shared module's DSO is produced, a static library is built for it
# and passed to this script. The script generates -Wl,-u options to force
# the inclusion of symbol from libqemuutil.a if the shared modules need them,
# This is necessary because the modules may use functions not needed by the
# executable itself, which would cause the function to not be linked in.
# Then the DSO loading would fail because of the missing symbol.
import sys
import subprocess
def filter_lines_set(stdout, from_staticlib):
linesSet = set()
for line in stdout.splitlines():
tokens = line.split(b' ')
if len(tokens) >= 2:
if from_staticlib and tokens[1] == b'U':
continue
if not from_staticlib and tokens[1] != b'U':
continue
new_line = b'-Wl,-u,' + tokens[0]
if not new_line in linesSet:
linesSet.add(new_line)
return linesSet
def main(args):
if len(args) <= 3:
sys.exit(0)
nm = args[1]
staticlib = args[2]
pc = subprocess.run([nm, "-P", "-g", staticlib], stdout=subprocess.PIPE)
if pc.returncode != 0:
sys.exit(1)
staticlib_syms = filter_lines_set(pc.stdout, True)
shared_modules = args[3:]
pc = subprocess.run([nm, "-P", "-g"] + shared_modules, stdout=subprocess.PIPE)
if pc.returncode != 0:
sys.exit(1)
modules_undef_syms = filter_lines_set(pc.stdout, False)
lines = sorted(staticlib_syms.intersection(modules_undef_syms))
sys.stdout.buffer.write(b'\n'.join(lines))
if __name__ == "__main__":
main(sys.argv)
| 1,637 | 32.428571 | 82 | py |
qemu | qemu-master/scripts/nsis.py | #!/usr/bin/env python3
#
# Copyright (C) 2020 Red Hat, Inc.
#
# SPDX-License-Identifier: GPL-2.0-or-later
import argparse
import glob
import os
import shutil
import subprocess
import tempfile
def signcode(path):
cmd = os.environ.get("SIGNCODE")
if not cmd:
return
subprocess.run([cmd, path])
def find_deps(exe_or_dll, search_path, analyzed_deps):
deps = [exe_or_dll]
output = subprocess.check_output(["objdump", "-p", exe_or_dll], text=True)
output = output.split("\n")
for line in output:
if not line.startswith("\tDLL Name: "):
continue
dep = line.split("DLL Name: ")[1].strip()
if dep in analyzed_deps:
continue
dll = os.path.join(search_path, dep)
if not os.path.exists(dll):
# assume it's a Windows provided dll, skip it
continue
analyzed_deps.add(dep)
# locate the dll dependencies recursively
rdeps = find_deps(dll, search_path, analyzed_deps)
deps.extend(rdeps)
return deps
def main():
parser = argparse.ArgumentParser(description="QEMU NSIS build helper.")
parser.add_argument("outfile")
parser.add_argument("prefix")
parser.add_argument("srcdir")
parser.add_argument("dlldir")
parser.add_argument("cpu")
parser.add_argument("nsisargs", nargs="*")
args = parser.parse_args()
# canonicalize the Windows native prefix path
prefix = os.path.splitdrive(args.prefix)[1]
destdir = tempfile.mkdtemp()
try:
subprocess.run(["make", "install", "DESTDIR=" + destdir])
with open(
os.path.join(destdir + prefix, "system-emulations.nsh"), "w"
) as nsh, open(
os.path.join(destdir + prefix, "system-mui-text.nsh"), "w"
) as muinsh:
for exe in sorted(glob.glob(
os.path.join(destdir + prefix, "qemu-system-*.exe")
)):
exe = os.path.basename(exe)
arch = exe[12:-4]
nsh.write(
"""
Section "{0}" Section_{0}
SetOutPath "$INSTDIR"
File "${{BINDIR}}\\{1}"
SectionEnd
""".format(
arch, exe
)
)
if arch.endswith('w'):
desc = arch[:-1] + " emulation (GUI)."
else:
desc = arch + " emulation."
muinsh.write(
"""
!insertmacro MUI_DESCRIPTION_TEXT ${{Section_{0}}} "{1}"
""".format(arch, desc))
search_path = args.dlldir
print("Searching '%s' for the dependent dlls ..." % search_path)
dlldir = os.path.join(destdir + prefix, "dll")
os.mkdir(dlldir)
for exe in glob.glob(os.path.join(destdir + prefix, "*.exe")):
signcode(exe)
# find all dll dependencies
deps = set(find_deps(exe, search_path, set()))
deps.remove(exe)
# copy all dlls to the DLLDIR
for dep in deps:
dllfile = os.path.join(dlldir, os.path.basename(dep))
if (os.path.exists(dllfile)):
continue
print("Copying '%s' to '%s'" % (dep, dllfile))
shutil.copy(dep, dllfile)
makensis = [
"makensis",
"-V2",
"-NOCD",
"-DSRCDIR=" + args.srcdir,
"-DBINDIR=" + destdir + prefix,
]
if args.cpu == "x86_64":
makensis += ["-DW64"]
makensis += ["-DDLLDIR=" + dlldir]
makensis += ["-DOUTFILE=" + args.outfile] + args.nsisargs
subprocess.run(makensis)
signcode(args.outfile)
finally:
shutil.rmtree(destdir)
if __name__ == "__main__":
main()
| 3,880 | 28.853846 | 78 | py |
qemu | qemu-master/scripts/minikconf.py | #!/usr/bin/env python3
#
# Mini-Kconfig parser
#
# Copyright (c) 2015 Red Hat Inc.
#
# Authors:
# Paolo Bonzini <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2
# or, at your option, any later version. See the COPYING file in
# the top-level directory.
import os
import sys
import re
import random
__all__ = [ 'KconfigDataError', 'KconfigParserError',
'KconfigData', 'KconfigParser' ,
'defconfig', 'allyesconfig', 'allnoconfig', 'randconfig' ]
def debug_print(*args):
#print('# ' + (' '.join(str(x) for x in args)))
pass
# -------------------------------------------
# KconfigData implements the Kconfig semantics. For now it can only
# detect undefined symbols, i.e. symbols that were referenced in
# assignments or dependencies but were not declared with "config FOO".
#
# Semantic actions are represented by methods called do_*. The do_var
# method return the semantic value of a variable (which right now is
# just its name).
# -------------------------------------------
class KconfigDataError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
allyesconfig = lambda x: True
allnoconfig = lambda x: False
defconfig = lambda x: x
randconfig = lambda x: random.randint(0, 1) == 1
class KconfigData:
class Expr:
def __and__(self, rhs):
return KconfigData.AND(self, rhs)
def __or__(self, rhs):
return KconfigData.OR(self, rhs)
def __invert__(self):
return KconfigData.NOT(self)
# Abstract methods
def add_edges_to(self, var):
pass
def evaluate(self):
assert False
class AND(Expr):
def __init__(self, lhs, rhs):
self.lhs = lhs
self.rhs = rhs
def __str__(self):
return "(%s && %s)" % (self.lhs, self.rhs)
def add_edges_to(self, var):
self.lhs.add_edges_to(var)
self.rhs.add_edges_to(var)
def evaluate(self):
return self.lhs.evaluate() and self.rhs.evaluate()
class OR(Expr):
def __init__(self, lhs, rhs):
self.lhs = lhs
self.rhs = rhs
def __str__(self):
return "(%s || %s)" % (self.lhs, self.rhs)
def add_edges_to(self, var):
self.lhs.add_edges_to(var)
self.rhs.add_edges_to(var)
def evaluate(self):
return self.lhs.evaluate() or self.rhs.evaluate()
class NOT(Expr):
def __init__(self, lhs):
self.lhs = lhs
def __str__(self):
return "!%s" % (self.lhs)
def add_edges_to(self, var):
self.lhs.add_edges_to(var)
def evaluate(self):
return not self.lhs.evaluate()
class Var(Expr):
def __init__(self, name):
self.name = name
self.value = None
self.outgoing = set()
self.clauses_for_var = list()
def __str__(self):
return self.name
def has_value(self):
return not (self.value is None)
def set_value(self, val, clause):
self.clauses_for_var.append(clause)
if self.has_value() and self.value != val:
print("The following clauses were found for " + self.name)
for i in self.clauses_for_var:
print(" " + str(i), file=sys.stderr)
raise KconfigDataError('contradiction between clauses when setting %s' % self)
debug_print("=> %s is now %s" % (self.name, val))
self.value = val
# depth first search of the dependency graph
def dfs(self, visited, f):
if self in visited:
return
visited.add(self)
for v in self.outgoing:
v.dfs(visited, f)
f(self)
def add_edges_to(self, var):
self.outgoing.add(var)
def evaluate(self):
if not self.has_value():
raise KconfigDataError('cycle found including %s' % self)
return self.value
class Clause:
def __init__(self, dest):
self.dest = dest
def priority(self):
return 0
def process(self):
pass
class AssignmentClause(Clause):
def __init__(self, dest, value):
KconfigData.Clause.__init__(self, dest)
self.value = value
def __str__(self):
return "CONFIG_%s=%s" % (self.dest, 'y' if self.value else 'n')
def process(self):
self.dest.set_value(self.value, self)
class DefaultClause(Clause):
def __init__(self, dest, value, cond=None):
KconfigData.Clause.__init__(self, dest)
self.value = value
self.cond = cond
if not (self.cond is None):
self.cond.add_edges_to(self.dest)
def __str__(self):
value = 'y' if self.value else 'n'
if self.cond is None:
return "config %s default %s" % (self.dest, value)
else:
return "config %s default %s if %s" % (self.dest, value, self.cond)
def priority(self):
# Defaults are processed just before leaving the variable
return -1
def process(self):
if not self.dest.has_value() and \
(self.cond is None or self.cond.evaluate()):
self.dest.set_value(self.value, self)
class DependsOnClause(Clause):
def __init__(self, dest, expr):
KconfigData.Clause.__init__(self, dest)
self.expr = expr
self.expr.add_edges_to(self.dest)
def __str__(self):
return "config %s depends on %s" % (self.dest, self.expr)
def process(self):
if not self.expr.evaluate():
self.dest.set_value(False, self)
class SelectClause(Clause):
def __init__(self, dest, cond):
KconfigData.Clause.__init__(self, dest)
self.cond = cond
self.cond.add_edges_to(self.dest)
def __str__(self):
return "select %s if %s" % (self.dest, self.cond)
def process(self):
if self.cond.evaluate():
self.dest.set_value(True, self)
def __init__(self, value_mangler=defconfig):
self.value_mangler = value_mangler
self.previously_included = []
self.incl_info = None
self.defined_vars = set()
self.referenced_vars = dict()
self.clauses = list()
# semantic analysis -------------
def check_undefined(self):
undef = False
for i in self.referenced_vars:
if not (i in self.defined_vars):
print("undefined symbol %s" % (i), file=sys.stderr)
undef = True
return undef
def compute_config(self):
if self.check_undefined():
raise KconfigDataError("there were undefined symbols")
return None
debug_print("Input:")
for clause in self.clauses:
debug_print(clause)
debug_print("\nDependency graph:")
for i in self.referenced_vars:
debug_print(i, "->", [str(x) for x in self.referenced_vars[i].outgoing])
# The reverse of the depth-first order is the topological sort
dfo = dict()
visited = set()
debug_print("\n")
def visit_fn(var):
debug_print(var, "has DFS number", len(dfo))
dfo[var] = len(dfo)
for name, v in self.referenced_vars.items():
self.do_default(v, False)
v.dfs(visited, visit_fn)
# Put higher DFS numbers and higher priorities first. This
# places the clauses in topological order and places defaults
# after assignments and dependencies.
self.clauses.sort(key=lambda x: (-dfo[x.dest], -x.priority()))
debug_print("\nSorted clauses:")
for clause in self.clauses:
debug_print(clause)
clause.process()
debug_print("")
values = dict()
for name, v in self.referenced_vars.items():
debug_print("Evaluating", name)
values[name] = v.evaluate()
return values
# semantic actions -------------
def do_declaration(self, var):
if (var in self.defined_vars):
raise KconfigDataError('variable "' + var + '" defined twice')
self.defined_vars.add(var.name)
# var is a string with the variable's name.
def do_var(self, var):
if (var in self.referenced_vars):
return self.referenced_vars[var]
var_obj = self.referenced_vars[var] = KconfigData.Var(var)
return var_obj
def do_assignment(self, var, val):
self.clauses.append(KconfigData.AssignmentClause(var, val))
def do_default(self, var, val, cond=None):
val = self.value_mangler(val)
self.clauses.append(KconfigData.DefaultClause(var, val, cond))
def do_depends_on(self, var, expr):
self.clauses.append(KconfigData.DependsOnClause(var, expr))
def do_select(self, var, symbol, cond=None):
cond = (cond & var) if cond is not None else var
self.clauses.append(KconfigData.SelectClause(symbol, cond))
def do_imply(self, var, symbol, cond=None):
# "config X imply Y [if COND]" is the same as
# "config Y default y if X [&& COND]"
cond = (cond & var) if cond is not None else var
self.do_default(symbol, True, cond)
# -------------------------------------------
# KconfigParser implements a recursive descent parser for (simplified)
# Kconfig syntax.
# -------------------------------------------
# tokens table
TOKENS = {}
TOK_NONE = -1
TOK_LPAREN = 0; TOKENS[TOK_LPAREN] = '"("';
TOK_RPAREN = 1; TOKENS[TOK_RPAREN] = '")"';
TOK_EQUAL = 2; TOKENS[TOK_EQUAL] = '"="';
TOK_AND = 3; TOKENS[TOK_AND] = '"&&"';
TOK_OR = 4; TOKENS[TOK_OR] = '"||"';
TOK_NOT = 5; TOKENS[TOK_NOT] = '"!"';
TOK_DEPENDS = 6; TOKENS[TOK_DEPENDS] = '"depends"';
TOK_ON = 7; TOKENS[TOK_ON] = '"on"';
TOK_SELECT = 8; TOKENS[TOK_SELECT] = '"select"';
TOK_IMPLY = 9; TOKENS[TOK_IMPLY] = '"imply"';
TOK_CONFIG = 10; TOKENS[TOK_CONFIG] = '"config"';
TOK_DEFAULT = 11; TOKENS[TOK_DEFAULT] = '"default"';
TOK_Y = 12; TOKENS[TOK_Y] = '"y"';
TOK_N = 13; TOKENS[TOK_N] = '"n"';
TOK_SOURCE = 14; TOKENS[TOK_SOURCE] = '"source"';
TOK_BOOL = 15; TOKENS[TOK_BOOL] = '"bool"';
TOK_IF = 16; TOKENS[TOK_IF] = '"if"';
TOK_ID = 17; TOKENS[TOK_ID] = 'identifier';
TOK_EOF = 18; TOKENS[TOK_EOF] = 'end of file';
class KconfigParserError(Exception):
def __init__(self, parser, msg, tok=None):
self.loc = parser.location()
tok = tok or parser.tok
if tok != TOK_NONE:
location = TOKENS.get(tok, None) or ('"%s"' % tok)
msg = '%s before %s' % (msg, location)
self.msg = msg
def __str__(self):
return "%s: %s" % (self.loc, self.msg)
class KconfigParser:
@classmethod
def parse(self, fp, mode=None):
data = KconfigData(mode or KconfigParser.defconfig)
parser = KconfigParser(data)
parser.parse_file(fp)
return data
def __init__(self, data):
self.data = data
def parse_file(self, fp):
self.abs_fname = os.path.abspath(fp.name)
self.fname = fp.name
self.data.previously_included.append(self.abs_fname)
self.src = fp.read()
if self.src == '' or self.src[-1] != '\n':
self.src += '\n'
self.cursor = 0
self.line = 1
self.line_pos = 0
self.get_token()
self.parse_config()
def do_assignment(self, var, val):
if not var.startswith("CONFIG_"):
raise Error('assigned variable should start with CONFIG_')
var = self.data.do_var(var[7:])
self.data.do_assignment(var, val)
# file management -----
def error_path(self):
inf = self.data.incl_info
res = ""
while inf:
res = ("In file included from %s:%d:\n" % (inf['file'],
inf['line'])) + res
inf = inf['parent']
return res
def location(self):
col = 1
for ch in self.src[self.line_pos:self.pos]:
if ch == '\t':
col += 8 - ((col - 1) % 8)
else:
col += 1
return '%s%s:%d:%d' %(self.error_path(), self.fname, self.line, col)
def do_include(self, include):
incl_abs_fname = os.path.join(os.path.dirname(self.abs_fname),
include)
# catch inclusion cycle
inf = self.data.incl_info
while inf:
if incl_abs_fname == os.path.abspath(inf['file']):
raise KconfigParserError(self, "Inclusion loop for %s"
% include)
inf = inf['parent']
# skip multiple include of the same file
if incl_abs_fname in self.data.previously_included:
return
try:
fp = open(incl_abs_fname, 'rt', encoding='utf-8')
except IOError as e:
raise KconfigParserError(self,
'%s: %s' % (e.strerror, include))
inf = self.data.incl_info
self.data.incl_info = { 'file': self.fname, 'line': self.line,
'parent': inf }
KconfigParser(self.data).parse_file(fp)
self.data.incl_info = inf
# recursive descent parser -----
# y_or_n: Y | N
def parse_y_or_n(self):
if self.tok == TOK_Y:
self.get_token()
return True
if self.tok == TOK_N:
self.get_token()
return False
raise KconfigParserError(self, 'Expected "y" or "n"')
# var: ID
def parse_var(self):
if self.tok == TOK_ID:
val = self.val
self.get_token()
return self.data.do_var(val)
else:
raise KconfigParserError(self, 'Expected identifier')
# assignment_var: ID (starting with "CONFIG_")
def parse_assignment_var(self):
if self.tok == TOK_ID:
val = self.val
if not val.startswith("CONFIG_"):
raise KconfigParserError(self,
'Expected identifier starting with "CONFIG_"', TOK_NONE)
self.get_token()
return self.data.do_var(val[7:])
else:
raise KconfigParserError(self, 'Expected identifier')
# assignment: var EQUAL y_or_n
def parse_assignment(self):
var = self.parse_assignment_var()
if self.tok != TOK_EQUAL:
raise KconfigParserError(self, 'Expected "="')
self.get_token()
self.data.do_assignment(var, self.parse_y_or_n())
# primary: NOT primary
# | LPAREN expr RPAREN
# | var
def parse_primary(self):
if self.tok == TOK_NOT:
self.get_token()
val = ~self.parse_primary()
elif self.tok == TOK_LPAREN:
self.get_token()
val = self.parse_expr()
if self.tok != TOK_RPAREN:
raise KconfigParserError(self, 'Expected ")"')
self.get_token()
elif self.tok == TOK_ID:
val = self.parse_var()
else:
raise KconfigParserError(self, 'Expected "!" or "(" or identifier')
return val
# disj: primary (OR primary)*
def parse_disj(self):
lhs = self.parse_primary()
while self.tok == TOK_OR:
self.get_token()
lhs = lhs | self.parse_primary()
return lhs
# expr: disj (AND disj)*
def parse_expr(self):
lhs = self.parse_disj()
while self.tok == TOK_AND:
self.get_token()
lhs = lhs & self.parse_disj()
return lhs
# condition: IF expr
# | empty
def parse_condition(self):
if self.tok == TOK_IF:
self.get_token()
return self.parse_expr()
else:
return None
# property: DEFAULT y_or_n condition
# | DEPENDS ON expr
# | SELECT var condition
# | BOOL
def parse_property(self, var):
if self.tok == TOK_DEFAULT:
self.get_token()
val = self.parse_y_or_n()
cond = self.parse_condition()
self.data.do_default(var, val, cond)
elif self.tok == TOK_DEPENDS:
self.get_token()
if self.tok != TOK_ON:
raise KconfigParserError(self, 'Expected "on"')
self.get_token()
self.data.do_depends_on(var, self.parse_expr())
elif self.tok == TOK_SELECT:
self.get_token()
symbol = self.parse_var()
cond = self.parse_condition()
self.data.do_select(var, symbol, cond)
elif self.tok == TOK_IMPLY:
self.get_token()
symbol = self.parse_var()
cond = self.parse_condition()
self.data.do_imply(var, symbol, cond)
elif self.tok == TOK_BOOL:
self.get_token()
else:
raise KconfigParserError(self, 'Error in recursive descent?')
# properties: properties property
# | /* empty */
def parse_properties(self, var):
had_default = False
while self.tok == TOK_DEFAULT or self.tok == TOK_DEPENDS or \
self.tok == TOK_SELECT or self.tok == TOK_BOOL or \
self.tok == TOK_IMPLY:
self.parse_property(var)
# for nicer error message
if self.tok != TOK_SOURCE and self.tok != TOK_CONFIG and \
self.tok != TOK_ID and self.tok != TOK_EOF:
raise KconfigParserError(self, 'expected "source", "config", identifier, '
+ '"default", "depends on", "imply" or "select"')
# declaration: config var properties
def parse_declaration(self):
if self.tok == TOK_CONFIG:
self.get_token()
var = self.parse_var()
self.data.do_declaration(var)
self.parse_properties(var)
else:
raise KconfigParserError(self, 'Error in recursive descent?')
# clause: SOURCE
# | declaration
# | assignment
def parse_clause(self):
if self.tok == TOK_SOURCE:
val = self.val
self.get_token()
self.do_include(val)
elif self.tok == TOK_CONFIG:
self.parse_declaration()
elif self.tok == TOK_ID:
self.parse_assignment()
else:
raise KconfigParserError(self, 'expected "source", "config" or identifier')
# config: clause+ EOF
def parse_config(self):
while self.tok != TOK_EOF:
self.parse_clause()
return self.data
# scanner -----
def get_token(self):
while True:
self.tok = self.src[self.cursor]
self.pos = self.cursor
self.cursor += 1
self.val = None
self.tok = self.scan_token()
if self.tok is not None:
return
def check_keyword(self, rest):
if not self.src.startswith(rest, self.cursor):
return False
length = len(rest)
if self.src[self.cursor + length].isalnum() or self.src[self.cursor + length] == '_':
return False
self.cursor += length
return True
def scan_token(self):
if self.tok == '#':
self.cursor = self.src.find('\n', self.cursor)
return None
elif self.tok == '=':
return TOK_EQUAL
elif self.tok == '(':
return TOK_LPAREN
elif self.tok == ')':
return TOK_RPAREN
elif self.tok == '&' and self.src[self.pos+1] == '&':
self.cursor += 1
return TOK_AND
elif self.tok == '|' and self.src[self.pos+1] == '|':
self.cursor += 1
return TOK_OR
elif self.tok == '!':
return TOK_NOT
elif self.tok == 'd' and self.check_keyword("epends"):
return TOK_DEPENDS
elif self.tok == 'o' and self.check_keyword("n"):
return TOK_ON
elif self.tok == 's' and self.check_keyword("elect"):
return TOK_SELECT
elif self.tok == 'i' and self.check_keyword("mply"):
return TOK_IMPLY
elif self.tok == 'c' and self.check_keyword("onfig"):
return TOK_CONFIG
elif self.tok == 'd' and self.check_keyword("efault"):
return TOK_DEFAULT
elif self.tok == 'b' and self.check_keyword("ool"):
return TOK_BOOL
elif self.tok == 'i' and self.check_keyword("f"):
return TOK_IF
elif self.tok == 'y' and self.check_keyword(""):
return TOK_Y
elif self.tok == 'n' and self.check_keyword(""):
return TOK_N
elif (self.tok == 's' and self.check_keyword("ource")) or \
self.tok == 'i' and self.check_keyword("nclude"):
# source FILENAME
# include FILENAME
while self.src[self.cursor].isspace():
self.cursor += 1
start = self.cursor
self.cursor = self.src.find('\n', self.cursor)
self.val = self.src[start:self.cursor]
return TOK_SOURCE
elif self.tok.isalnum():
# identifier
while self.src[self.cursor].isalnum() or self.src[self.cursor] == '_':
self.cursor += 1
self.val = self.src[self.pos:self.cursor]
return TOK_ID
elif self.tok == '\n':
if self.cursor == len(self.src):
return TOK_EOF
self.line += 1
self.line_pos = self.cursor
elif not self.tok.isspace():
raise KconfigParserError(self, 'invalid input')
return None
if __name__ == '__main__':
argv = sys.argv
mode = defconfig
if len(sys.argv) > 1:
if argv[1] == '--defconfig':
del argv[1]
elif argv[1] == '--randconfig':
random.seed()
mode = randconfig
del argv[1]
elif argv[1] == '--allyesconfig':
mode = allyesconfig
del argv[1]
elif argv[1] == '--allnoconfig':
mode = allnoconfig
del argv[1]
if len(argv) == 1:
print ("%s: at least one argument is required" % argv[0], file=sys.stderr)
sys.exit(1)
if argv[1].startswith('-'):
print ("%s: invalid option %s" % (argv[0], argv[1]), file=sys.stderr)
sys.exit(1)
data = KconfigData(mode)
parser = KconfigParser(data)
external_vars = set()
for arg in argv[3:]:
m = re.match(r'^(CONFIG_[A-Z0-9_]+)=([yn]?)$', arg)
if m is not None:
name, value = m.groups()
parser.do_assignment(name, value == 'y')
external_vars.add(name[7:])
else:
fp = open(arg, 'rt', encoding='utf-8')
parser.parse_file(fp)
fp.close()
config = data.compute_config()
for key in sorted(config.keys()):
if key not in external_vars and config[key]:
print ('CONFIG_%s=y' % key)
deps = open(argv[2], 'wt', encoding='utf-8')
for fname in data.previously_included:
print ('%s: %s' % (argv[1], fname), file=deps)
deps.close()
| 23,656 | 32.226124 | 94 | py |
qemu | qemu-master/scripts/check_sparse.py | #! /usr/bin/env python3
# Invoke sparse based on the contents of compile_commands.json,
# also working around several deficiencies in cgcc's command line
# parsing
import json
import subprocess
import os
import sys
import shlex
def cmdline_for_sparse(sparse, cmdline):
# Do not include the C compiler executable
skip = True
arg = False
out = sparse + ['-no-compile']
for x in cmdline:
if arg:
out.append(x)
arg = False
continue
if skip:
skip = False
continue
# prevent sparse from treating output files as inputs
if x == '-MF' or x == '-MQ' or x == '-o':
skip = True
continue
# cgcc ignores -no-compile if it sees -M or -MM?
if x.startswith('-M'):
continue
# sparse does not understand these!
if x == '-iquote' or x == '-isystem':
x = '-I'
if x == '-I':
arg = True
out.append(x)
return out
root_path = os.getenv('MESON_BUILD_ROOT')
def build_path(s):
return s if not root_path else os.path.join(root_path, s)
ccjson_path = build_path(sys.argv[1])
with open(ccjson_path, 'r') as fd:
compile_commands = json.load(fd)
sparse = sys.argv[2:]
sparse_env = os.environ.copy()
for cmd in compile_commands:
cmdline = shlex.split(cmd['command'])
cmd = cmdline_for_sparse(sparse, cmdline)
print('REAL_CC=%s' % shlex.quote(cmdline[0]),
' '.join((shlex.quote(x) for x in cmd)))
sparse_env['REAL_CC'] = cmdline[0]
r = subprocess.run(cmd, env=sparse_env, cwd=root_path)
if r.returncode != 0:
sys.exit(r.returncode)
| 1,679 | 27 | 65 | py |
qemu | qemu-master/scripts/probe-gdb-support.py | #!/usr/bin/env python3
# coding: utf-8
#
# Probe gdb for supported architectures.
#
# This is required to support testing of the gdbstub as its hard to
# handle errors gracefully during the test. Instead this script when
# passed a GDB binary will probe its architecture support and return a
# string of supported arches, stripped of guff.
#
# Copyright 2023 Linaro Ltd
#
# Author: Alex Bennée <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
#
# SPDX-License-Identifier: GPL-2.0-or-later
import argparse
import re
from subprocess import check_output, STDOUT
# mappings from gdb arch to QEMU target
mappings = {
"alpha" : "alpha",
"aarch64" : ["aarch64", "aarch64_be"],
"armv7": "arm",
"armv8-a" : ["aarch64", "aarch64_be"],
"avr" : "avr",
"cris" : "cris",
# no hexagon in upstream gdb
"hppa1.0" : "hppa",
"i386" : "i386",
"i386:x86-64" : "x86_64",
"Loongarch64" : "loongarch64",
"m68k" : "m68k",
"MicroBlaze" : "microblaze",
"mips:isa64" : ["mips64", "mips64el"],
"nios2" : "nios2",
"or1k" : "or1k",
"powerpc:common" : "ppc",
"powerpc:common64" : ["ppc64", "ppc64le"],
"riscv:rv32" : "riscv32",
"riscv:rv64" : "riscv64",
"s390:64-bit" : "s390x",
"sh4" : ["sh4", "sh4eb"],
"sparc": "sparc",
"sparc:v8plus": "sparc32plus",
"sparc:v9a" : "sparc64",
# no tricore in upstream gdb
"xtensa" : ["xtensa", "xtensaeb"]
}
def do_probe(gdb):
gdb_out = check_output([gdb,
"-ex", "set architecture",
"-ex", "quit"], stderr=STDOUT)
m = re.search(r"Valid arguments are (.*)",
gdb_out.decode("utf-8"))
valid_arches = set()
if m.group(1):
for arch in m.group(1).split(", "):
if arch in mappings:
mapping = mappings[arch]
if isinstance(mapping, str):
valid_arches.add(mapping)
else:
for entry in mapping:
valid_arches.add(entry)
return valid_arches
def main() -> None:
parser = argparse.ArgumentParser(description='Probe GDB Architectures')
parser.add_argument('gdb', help='Path to GDB binary.')
args = parser.parse_args()
supported = do_probe(args.gdb)
print(" ".join(supported))
if __name__ == '__main__':
main()
| 2,472 | 26.786517 | 75 | py |
qemu | qemu-master/scripts/userfaultfd-wrlat.py | #!/usr/bin/python3
#
# userfaultfd-wrlat Summarize userfaultfd write fault latencies.
# Events are continuously accumulated for the
# run, while latency distribution histogram is
# dumped each 'interval' seconds.
#
# For Linux, uses BCC, eBPF.
#
# USAGE: userfaultfd-lat [interval [count]]
#
# Copyright Virtuozzo GmbH, 2020
#
# Authors:
# Andrey Gruzdev <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2 or
# later. See the COPYING file in the top-level directory.
from __future__ import print_function
from bcc import BPF
from ctypes import c_ushort, c_int, c_ulonglong
from time import sleep
from sys import argv
def usage():
print("USAGE: %s [interval [count]]" % argv[0])
exit()
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/mm.h>
BPF_HASH(ev_start, u32, u64);
BPF_HISTOGRAM(ev_delta_hist, u64);
/* Trace UFFD page fault start event. */
static void do_event_start()
{
/* Using "(u32)" to drop group ID which is upper 32 bits */
u32 tid = (u32) bpf_get_current_pid_tgid();
u64 ts = bpf_ktime_get_ns();
ev_start.update(&tid, &ts);
}
/* Trace UFFD page fault end event. */
static void do_event_end()
{
/* Using "(u32)" to drop group ID which is upper 32 bits */
u32 tid = (u32) bpf_get_current_pid_tgid();
u64 ts = bpf_ktime_get_ns();
u64 *tsp;
tsp = ev_start.lookup(&tid);
if (tsp) {
u64 delta = ts - (*tsp);
/* Transform time delta to milliseconds */
ev_delta_hist.increment(bpf_log2l(delta / 1000000));
ev_start.delete(&tid);
}
}
/* KPROBE for handle_userfault(). */
int probe_handle_userfault(struct pt_regs *ctx, struct vm_fault *vmf,
unsigned long reason)
{
/* Trace only UFFD write faults. */
if (reason & VM_UFFD_WP) {
do_event_start();
}
return 0;
}
/* KRETPROBE for handle_userfault(). */
int retprobe_handle_userfault(struct pt_regs *ctx)
{
do_event_end();
return 0;
}
"""
# arguments
interval = 10
count = -1
if len(argv) > 1:
try:
interval = int(argv[1])
if interval == 0:
raise
if len(argv) > 2:
count = int(argv[2])
except: # also catches -h, --help
usage()
# load BPF program
b = BPF(text=bpf_text)
# attach KRPOBEs
b.attach_kprobe(event="handle_userfault", fn_name="probe_handle_userfault")
b.attach_kretprobe(event="handle_userfault", fn_name="retprobe_handle_userfault")
# header
print("Tracing UFFD-WP write fault latency... Hit Ctrl-C to end.")
# output
loop = 0
do_exit = 0
while (1):
if count > 0:
loop += 1
if loop > count:
exit()
try:
sleep(interval)
except KeyboardInterrupt:
pass; do_exit = 1
print()
b["ev_delta_hist"].print_log2_hist("msecs")
if do_exit:
exit()
| 2,938 | 22.894309 | 81 | py |
qemu | qemu-master/scripts/vmstate-static-checker.py | #!/usr/bin/env python3
#
# Compares vmstate information stored in JSON format, obtained from
# the -dump-vmstate QEMU command.
#
# Copyright 2014 Amit Shah <[email protected]>
# Copyright 2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, see <http://www.gnu.org/licenses/>.
import argparse
import json
import sys
# Count the number of errors found
taint = 0
def bump_taint():
global taint
# Ensure we don't wrap around or reset to 0 -- the shell only has
# an 8-bit return value.
if taint < 255:
taint = taint + 1
def check_fields_match(name, s_field, d_field):
if s_field == d_field:
return True
# Some fields changed names between qemu versions. This list
# is used to allow such changes in each section / description.
changed_names = {
'apic': ['timer', 'timer_expiry'],
'e1000': ['dev', 'parent_obj'],
'ehci': ['dev', 'pcidev'],
'I440FX': ['dev', 'parent_obj'],
'ich9_ahci': ['card', 'parent_obj'],
'ich9-ahci': ['ahci', 'ich9_ahci'],
'ioh3420': ['PCIDevice', 'PCIEDevice'],
'ioh-3240-express-root-port': ['port.br.dev',
'parent_obj.parent_obj.parent_obj',
'port.br.dev.exp.aer_log',
'parent_obj.parent_obj.parent_obj.exp.aer_log'],
'cirrus_vga': ['hw_cursor_x', 'vga.hw_cursor_x',
'hw_cursor_y', 'vga.hw_cursor_y'],
'lsiscsi': ['dev', 'parent_obj'],
'mch': ['d', 'parent_obj'],
'pci_bridge': ['bridge.dev', 'parent_obj', 'bridge.dev.shpc', 'shpc'],
'pcnet': ['pci_dev', 'parent_obj'],
'PIIX3': ['pci_irq_levels', 'pci_irq_levels_vmstate'],
'piix4_pm': ['dev', 'parent_obj', 'pci0_status',
'acpi_pci_hotplug.acpi_pcihp_pci_status[0x0]',
'pm1a.sts', 'ar.pm1.evt.sts', 'pm1a.en', 'ar.pm1.evt.en',
'pm1_cnt.cnt', 'ar.pm1.cnt.cnt',
'tmr.timer', 'ar.tmr.timer',
'tmr.overflow_time', 'ar.tmr.overflow_time',
'gpe', 'ar.gpe'],
'rtl8139': ['dev', 'parent_obj'],
'qxl': ['num_surfaces', 'ssd.num_surfaces'],
'usb-ccid': ['abProtocolDataStructure', 'abProtocolDataStructure.data'],
'usb-host': ['dev', 'parent_obj'],
'usb-mouse': ['usb-ptr-queue', 'HIDPointerEventQueue'],
'usb-tablet': ['usb-ptr-queue', 'HIDPointerEventQueue'],
'vmware_vga': ['card', 'parent_obj'],
'vmware_vga_internal': ['depth', 'new_depth'],
'xhci': ['pci_dev', 'parent_obj'],
'x3130-upstream': ['PCIDevice', 'PCIEDevice'],
'xio3130-express-downstream-port': ['port.br.dev',
'parent_obj.parent_obj.parent_obj',
'port.br.dev.exp.aer_log',
'parent_obj.parent_obj.parent_obj.exp.aer_log'],
'xio3130-downstream': ['PCIDevice', 'PCIEDevice'],
'xio3130-express-upstream-port': ['br.dev', 'parent_obj.parent_obj',
'br.dev.exp.aer_log',
'parent_obj.parent_obj.exp.aer_log'],
'spapr_pci': ['dma_liobn[0]', 'mig_liobn',
'mem_win_addr', 'mig_mem_win_addr',
'mem_win_size', 'mig_mem_win_size',
'io_win_addr', 'mig_io_win_addr',
'io_win_size', 'mig_io_win_size'],
}
if not name in changed_names:
return False
if s_field in changed_names[name] and d_field in changed_names[name]:
return True
return False
def get_changed_sec_name(sec):
# Section names can change -- see commit 292b1634 for an example.
changes = {
"ICH9 LPC": "ICH9-LPC",
"e1000-82540em": "e1000",
}
for item in changes:
if item == sec:
return changes[item]
if changes[item] == sec:
return item
return ""
def exists_in_substruct(fields, item):
# Some QEMU versions moved a few fields inside a substruct. This
# kept the on-wire format the same. This function checks if
# something got shifted inside a substruct. For example, the
# change in commit 1f42d22233b4f3d1a2933ff30e8d6a6d9ee2d08f
if not "Description" in fields:
return False
if not "Fields" in fields["Description"]:
return False
substruct_fields = fields["Description"]["Fields"]
if substruct_fields == []:
return False
return check_fields_match(fields["Description"]["name"],
substruct_fields[0]["field"], item)
def check_fields(src_fields, dest_fields, desc, sec):
# This function checks for all the fields in a section. If some
# fields got embedded into a substruct, this function will also
# attempt to check inside the substruct.
d_iter = iter(dest_fields)
s_iter = iter(src_fields)
# Using these lists as stacks to store previous value of s_iter
# and d_iter, so that when time comes to exit out of a substruct,
# we can go back one level up and continue from where we left off.
s_iter_list = []
d_iter_list = []
advance_src = True
advance_dest = True
unused_count = 0
while True:
if advance_src:
try:
s_item = next(s_iter)
except StopIteration:
if s_iter_list == []:
break
s_iter = s_iter_list.pop()
continue
else:
if unused_count == 0:
# We want to avoid advancing just once -- when entering a
# dest substruct, or when exiting one.
advance_src = True
if advance_dest:
try:
d_item = next(d_iter)
except StopIteration:
if d_iter_list == []:
# We were not in a substruct
print("Section \"" + sec + "\",", end=' ')
print("Description " + "\"" + desc + "\":", end=' ')
print("expected field \"" + s_item["field"] + "\",", end=' ')
print("while dest has no further fields")
bump_taint()
break
d_iter = d_iter_list.pop()
advance_src = False
continue
else:
if unused_count == 0:
advance_dest = True
if unused_count != 0:
if advance_dest == False:
unused_count = unused_count - s_item["size"]
if unused_count == 0:
advance_dest = True
continue
if unused_count < 0:
print("Section \"" + sec + "\",", end=' ')
print("Description \"" + desc + "\":", end=' ')
print("unused size mismatch near \"", end=' ')
print(s_item["field"] + "\"")
bump_taint()
break
continue
if advance_src == False:
unused_count = unused_count - d_item["size"]
if unused_count == 0:
advance_src = True
continue
if unused_count < 0:
print("Section \"" + sec + "\",", end=' ')
print("Description \"" + desc + "\":", end=' ')
print("unused size mismatch near \"", end=' ')
print(d_item["field"] + "\"")
bump_taint()
break
continue
if not check_fields_match(desc, s_item["field"], d_item["field"]):
# Some fields were put in substructs, keeping the
# on-wire format the same, but breaking static tools
# like this one.
# First, check if dest has a new substruct.
if exists_in_substruct(d_item, s_item["field"]):
# listiterators don't have a prev() function, so we
# have to store our current location, descend into the
# substruct, and ensure we come out as if nothing
# happened when the substruct is over.
#
# Essentially we're opening the substructs that got
# added which didn't change the wire format.
d_iter_list.append(d_iter)
substruct_fields = d_item["Description"]["Fields"]
d_iter = iter(substruct_fields)
advance_src = False
continue
# Next, check if src has substruct that dest removed
# (can happen in backward migration: 2.0 -> 1.5)
if exists_in_substruct(s_item, d_item["field"]):
s_iter_list.append(s_iter)
substruct_fields = s_item["Description"]["Fields"]
s_iter = iter(substruct_fields)
advance_dest = False
continue
if s_item["field"] == "unused" or d_item["field"] == "unused":
if s_item["size"] == d_item["size"]:
continue
if d_item["field"] == "unused":
advance_dest = False
unused_count = d_item["size"] - s_item["size"]
continue
if s_item["field"] == "unused":
advance_src = False
unused_count = s_item["size"] - d_item["size"]
continue
print("Section \"" + sec + "\",", end=' ')
print("Description \"" + desc + "\":", end=' ')
print("expected field \"" + s_item["field"] + "\",", end=' ')
print("got \"" + d_item["field"] + "\"; skipping rest")
bump_taint()
break
check_version(s_item, d_item, sec, desc)
if not "Description" in s_item:
# Check size of this field only if it's not a VMSTRUCT entry
check_size(s_item, d_item, sec, desc, s_item["field"])
check_description_in_list(s_item, d_item, sec, desc)
def check_subsections(src_sub, dest_sub, desc, sec):
for s_item in src_sub:
found = False
for d_item in dest_sub:
if s_item["name"] != d_item["name"]:
continue
found = True
check_descriptions(s_item, d_item, sec)
if not found:
print("Section \"" + sec + "\", Description \"" + desc + "\":", end=' ')
print("Subsection \"" + s_item["name"] + "\" not found")
bump_taint()
def check_description_in_list(s_item, d_item, sec, desc):
if not "Description" in s_item:
return
if not "Description" in d_item:
print("Section \"" + sec + "\", Description \"" + desc + "\",", end=' ')
print("Field \"" + s_item["field"] + "\": missing description")
bump_taint()
return
check_descriptions(s_item["Description"], d_item["Description"], sec)
def check_descriptions(src_desc, dest_desc, sec):
check_version(src_desc, dest_desc, sec, src_desc["name"])
if not check_fields_match(sec, src_desc["name"], dest_desc["name"]):
print("Section \"" + sec + "\":", end=' ')
print("Description \"" + src_desc["name"] + "\"", end=' ')
print("missing, got \"" + dest_desc["name"] + "\" instead; skipping")
bump_taint()
return
for f in src_desc:
if not f in dest_desc:
print("Section \"" + sec + "\"", end=' ')
print("Description \"" + src_desc["name"] + "\":", end=' ')
print("Entry \"" + f + "\" missing")
bump_taint()
continue
if f == 'Fields':
check_fields(src_desc[f], dest_desc[f], src_desc["name"], sec)
if f == 'Subsections':
check_subsections(src_desc[f], dest_desc[f], src_desc["name"], sec)
def check_version(s, d, sec, desc=None):
if s["version_id"] > d["version_id"]:
print("Section \"" + sec + "\"", end=' ')
if desc:
print("Description \"" + desc + "\":", end=' ')
print("version error:", s["version_id"], ">", d["version_id"])
bump_taint()
if not "minimum_version_id" in d:
return
if s["version_id"] < d["minimum_version_id"]:
print("Section \"" + sec + "\"", end=' ')
if desc:
print("Description \"" + desc + "\":", end=' ')
print("minimum version error:", s["version_id"], "<", end=' ')
print(d["minimum_version_id"])
bump_taint()
def check_size(s, d, sec, desc=None, field=None):
if s["size"] != d["size"]:
print("Section \"" + sec + "\"", end=' ')
if desc:
print("Description \"" + desc + "\"", end=' ')
if field:
print("Field \"" + field + "\"", end=' ')
print("size mismatch:", s["size"], ",", d["size"])
bump_taint()
def check_machine_type(s, d):
if s["Name"] != d["Name"]:
print("Warning: checking incompatible machine types:", end=' ')
print("\"" + s["Name"] + "\", \"" + d["Name"] + "\"")
def main():
help_text = "Parse JSON-formatted vmstate dumps from QEMU in files SRC and DEST. Checks whether migration from SRC to DEST QEMU versions would break based on the VMSTATE information contained within the JSON outputs. The JSON output is created from a QEMU invocation with the -dump-vmstate parameter and a filename argument to it. Other parameters to QEMU do not matter, except the -M (machine type) parameter."
parser = argparse.ArgumentParser(description=help_text)
parser.add_argument('-s', '--src', type=argparse.FileType('r'),
required=True,
help='json dump from src qemu')
parser.add_argument('-d', '--dest', type=argparse.FileType('r'),
required=True,
help='json dump from dest qemu')
parser.add_argument('--reverse', required=False, default=False,
action='store_true',
help='reverse the direction')
args = parser.parse_args()
src_data = json.load(args.src)
dest_data = json.load(args.dest)
args.src.close()
args.dest.close()
if args.reverse:
temp = src_data
src_data = dest_data
dest_data = temp
for sec in src_data:
dest_sec = sec
if not dest_sec in dest_data:
# Either the section name got changed, or the section
# doesn't exist in dest.
dest_sec = get_changed_sec_name(sec)
if not dest_sec in dest_data:
print("Section \"" + sec + "\" does not exist in dest")
bump_taint()
continue
s = src_data[sec]
d = dest_data[dest_sec]
if sec == "vmschkmachine":
check_machine_type(s, d)
continue
check_version(s, d, sec)
for entry in s:
if not entry in d:
print("Section \"" + sec + "\": Entry \"" + entry + "\"", end=' ')
print("missing")
bump_taint()
continue
if entry == "Description":
check_descriptions(s[entry], d[entry], sec)
return taint
if __name__ == '__main__':
sys.exit(main())
| 16,102 | 36.275463 | 418 | py |
qemu | qemu-master/scripts/modinfo-generate.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
def print_array(name, values):
if len(values) == 0:
return
list = ", ".join(values)
print(" .%s = ((const char*[]){ %s, NULL })," % (name, list))
def parse_line(line):
kind = ""
data = ""
get_kind = False
get_data = False
for item in line.split():
if item == "MODINFO_START":
get_kind = True
continue
if item.startswith("MODINFO_END"):
get_data = False
continue
if get_kind:
kind = item
get_kind = False
get_data = True
continue
if get_data:
data += " " + item
continue
return (kind, data)
def generate(name, lines, enabled):
arch = ""
objs = []
deps = []
opts = []
for line in lines:
if line.find("MODINFO_START") != -1:
(kind, data) = parse_line(line)
if kind == 'obj':
objs.append(data)
elif kind == 'dep':
deps.append(data)
elif kind == 'opts':
opts.append(data)
elif kind == 'arch':
arch = data;
elif kind == 'kconfig':
# don't add a module which dependency is not enabled
# in kconfig
if data.strip() not in enabled:
print(" /* module {} isn't enabled in Kconfig. */"
.format(data.strip()))
print("/* },{ */")
return None
else:
print("unknown:", kind)
exit(1)
print(" .name = \"%s\"," % name)
if arch != "":
print(" .arch = %s," % arch)
print_array("objs", objs)
print_array("deps", deps)
print_array("opts", opts)
print("},{")
return {dep.strip('" ') for dep in deps}
def print_pre():
print("/* generated by scripts/modinfo-generate.py */")
print("#include \"qemu/osdep.h\"")
print("#include \"qemu/module.h\"")
print("const QemuModinfo qemu_modinfo[] = {{")
def print_post():
print(" /* end of list */")
print("}};")
def main(args):
if len(args) < 3 or args[0] != '--devices':
print('Expected: modinfo-generate.py --devices '
'config-device.mak [modinfo files]', file=sys.stderr)
exit(1)
# get all devices enabled in kconfig, from *-config-device.mak
enabled = set()
with open(args[1]) as file:
for line in file.readlines():
config = line.split('=')
if config[1].rstrip() == 'y':
enabled.add(config[0][7:]) # remove CONFIG_
deps = set()
modules = set()
print_pre()
for modinfo in args[2:]:
with open(modinfo) as f:
lines = f.readlines()
print(" /* %s */" % modinfo)
(basename, _) = os.path.splitext(modinfo)
moddeps = generate(basename, lines, enabled)
if moddeps is not None:
modules.add(basename)
deps.update(moddeps)
print_post()
error = False
for dep in deps.difference(modules):
print("Dependency {} cannot be satisfied".format(dep),
file=sys.stderr)
error = True
if error:
exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
| 3,382 | 26.958678 | 73 | py |
qemu | qemu-master/scripts/qemugdb/mtree.py | #
# GDB debugging support
#
# Copyright 2012 Red Hat, Inc. and/or its affiliates
#
# Authors:
# Avi Kivity <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2 or
# later. See the COPYING file in the top-level directory.
# 'qemu mtree' -- display the memory hierarchy
import gdb
def isnull(ptr):
return ptr == gdb.Value(0).cast(ptr.type)
def int128(p):
'''Read an Int128 type to a python integer.
QEMU can be built with native Int128 support so we need to detect
if the value is a structure or the native type.
'''
if p.type.code == gdb.TYPE_CODE_STRUCT:
return int(p['lo']) + (int(p['hi']) << 64)
else:
return int(("%s" % p), 16)
class MtreeCommand(gdb.Command):
'''Display the memory tree hierarchy'''
def __init__(self):
gdb.Command.__init__(self, 'qemu mtree', gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
self.queue = []
def invoke(self, arg, from_tty):
self.seen = set()
self.queue_root('address_space_memory')
self.queue_root('address_space_io')
self.process_queue()
def queue_root(self, varname):
ptr = gdb.parse_and_eval(varname)['root']
self.queue.append(ptr)
def process_queue(self):
while self.queue:
ptr = self.queue.pop(0)
if int(ptr) in self.seen:
continue
self.print_item(ptr)
def print_item(self, ptr, offset = gdb.Value(0), level = 0):
self.seen.add(int(ptr))
addr = ptr['addr']
addr += offset
size = int128(ptr['size'])
alias = ptr['alias']
klass = ''
if not isnull(alias):
klass = ' (alias)'
elif not isnull(ptr['ops']):
klass = ' (I/O)'
elif bool(ptr['ram']):
klass = ' (RAM)'
gdb.write('%s%016x-%016x %s%s (@ %s)\n'
% (' ' * level,
int(addr),
int(addr + (size - 1)),
ptr['name'].string(),
klass,
ptr,
),
gdb.STDOUT)
if not isnull(alias):
gdb.write('%s alias: %s@%016x (@ %s)\n' %
(' ' * level,
alias['name'].string(),
int(ptr['alias_offset']),
alias,
),
gdb.STDOUT)
self.queue.append(alias)
subregion = ptr['subregions']['tqh_first']
level += 1
while not isnull(subregion):
self.print_item(subregion, addr, level)
subregion = subregion['subregions_link']['tqe_next']
| 2,741 | 30.883721 | 69 | py |
qemu | qemu-master/scripts/qemugdb/tcg.py | # -*- coding: utf-8 -*-
#
# GDB debugging support, TCG status
#
# Copyright 2016 Linaro Ltd
#
# Authors:
# Alex Bennée <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2 or
# later. See the COPYING file in the top-level directory.
# 'qemu tcg-lock-status' -- display the TCG lock status across threads
import gdb
class TCGLockStatusCommand(gdb.Command):
'''Display TCG Execution Status'''
def __init__(self):
gdb.Command.__init__(self, 'qemu tcg-lock-status', gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, arg, from_tty):
gdb.write("Thread, BQL (iothread_mutex), Replay, Blocked?\n")
for thread in gdb.inferiors()[0].threads():
thread.switch()
iothread = gdb.parse_and_eval("iothread_locked")
replay = gdb.parse_and_eval("replay_locked")
frame = gdb.selected_frame()
if frame.name() == "__lll_lock_wait":
frame.older().select()
mutex = gdb.parse_and_eval("mutex")
owner = gdb.parse_and_eval("mutex->__data.__owner")
blocked = ("__lll_lock_wait waiting on %s from %d" %
(mutex, owner))
else:
blocked = "not blocked"
gdb.write("%d/%d, %s, %s, %s\n" % (thread.num, thread.ptid[1],
iothread, replay, blocked))
| 1,468 | 33.162791 | 76 | py |
qemu | qemu-master/scripts/qemugdb/timers.py | # -*- coding: utf-8 -*-
# GDB debugging support
#
# Copyright 2017 Linaro Ltd
#
# Author: Alex Bennée <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# 'qemu timers' -- display the current timerlists
import gdb
class TimersCommand(gdb.Command):
'''Display the current QEMU timers'''
def __init__(self):
'Register the class as a gdb command'
gdb.Command.__init__(self, 'qemu timers', gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def dump_timers(self, timer):
"Follow a timer and recursively dump each one in the list."
# timer should be of type QemuTimer
gdb.write(" timer %s/%s (cb:%s,opq:%s)\n" % (
timer['expire_time'],
timer['scale'],
timer['cb'],
timer['opaque']))
if int(timer['next']) > 0:
self.dump_timers(timer['next'])
def process_timerlist(self, tlist, ttype):
gdb.write("Processing %s timers\n" % (ttype))
gdb.write(" clock %s is enabled:%s, last:%s\n" % (
tlist['clock']['type'],
tlist['clock']['enabled'],
tlist['clock']['last']))
if int(tlist['active_timers']) > 0:
self.dump_timers(tlist['active_timers'])
def invoke(self, arg, from_tty):
'Run the command'
main_timers = gdb.parse_and_eval("main_loop_tlg")
# This will break if QEMUClockType in timer.h is redfined
self.process_timerlist(main_timers['tl'][0], "Realtime")
self.process_timerlist(main_timers['tl'][1], "Virtual")
self.process_timerlist(main_timers['tl'][2], "Host")
self.process_timerlist(main_timers['tl'][3], "Virtual RT")
| 1,849 | 31.45614 | 75 | py |
qemu | qemu-master/scripts/qemugdb/__init__.py | #
# GDB debugging support
#
# Copyright (c) 2015 Linaro Ltd
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see
# <http://www.gnu.org/licenses/gpl-2.0.html>
#
# We don't need to do anything in our init file currently.
"""
Support routines for debugging QEMU under GDB
"""
__license__ = "GPL version 2 or (at your option) any later version"
| 897 | 31.071429 | 70 | py |
qemu | qemu-master/scripts/qemugdb/coroutine.py | #
# GDB debugging support
#
# Copyright 2012 Red Hat, Inc. and/or its affiliates
#
# Authors:
# Avi Kivity <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2
# or later. See the COPYING file in the top-level directory.
import gdb
VOID_PTR = gdb.lookup_type('void').pointer()
def get_fs_base():
'''Fetch %fs base value using arch_prctl(ARCH_GET_FS). This is
pthread_self().'''
# %rsp - 120 is scratch space according to the SystemV ABI
old = gdb.parse_and_eval('*(uint64_t*)($rsp - 120)')
gdb.execute('call (int)arch_prctl(0x1003, $rsp - 120)', False, True)
fs_base = gdb.parse_and_eval('*(uint64_t*)($rsp - 120)')
gdb.execute('set *(uint64_t*)($rsp - 120) = %s' % old, False, True)
return fs_base
def pthread_self():
'''Fetch pthread_self() from the glibc start_thread function.'''
f = gdb.newest_frame()
while f.name() != 'start_thread':
f = f.older()
if f is None:
return get_fs_base()
try:
return f.read_var("arg")
except ValueError:
return get_fs_base()
def get_glibc_pointer_guard():
'''Fetch glibc pointer guard value'''
fs_base = pthread_self()
return gdb.parse_and_eval('*(uint64_t*)((uint64_t)%s + 0x30)' % fs_base)
def glibc_ptr_demangle(val, pointer_guard):
'''Undo effect of glibc's PTR_MANGLE()'''
return gdb.parse_and_eval('(((uint64_t)%s >> 0x11) | ((uint64_t)%s << (64 - 0x11))) ^ (uint64_t)%s' % (val, val, pointer_guard))
def get_jmpbuf_regs(jmpbuf):
JB_RBX = 0
JB_RBP = 1
JB_R12 = 2
JB_R13 = 3
JB_R14 = 4
JB_R15 = 5
JB_RSP = 6
JB_PC = 7
pointer_guard = get_glibc_pointer_guard()
return {'rbx': jmpbuf[JB_RBX],
'rbp': glibc_ptr_demangle(jmpbuf[JB_RBP], pointer_guard),
'rsp': glibc_ptr_demangle(jmpbuf[JB_RSP], pointer_guard),
'r12': jmpbuf[JB_R12],
'r13': jmpbuf[JB_R13],
'r14': jmpbuf[JB_R14],
'r15': jmpbuf[JB_R15],
'rip': glibc_ptr_demangle(jmpbuf[JB_PC], pointer_guard) }
def bt_jmpbuf(jmpbuf):
'''Backtrace a jmpbuf'''
regs = get_jmpbuf_regs(jmpbuf)
old = dict()
# remember current stack frame and select the topmost
# so that register modifications don't wreck it
selected_frame = gdb.selected_frame()
gdb.newest_frame().select()
for i in regs:
old[i] = gdb.parse_and_eval('(uint64_t)$%s' % i)
for i in regs:
gdb.execute('set $%s = %s' % (i, regs[i]))
gdb.execute('bt')
for i in regs:
gdb.execute('set $%s = %s' % (i, old[i]))
selected_frame.select()
def co_cast(co):
return co.cast(gdb.lookup_type('CoroutineUContext').pointer())
def coroutine_to_jmpbuf(co):
coroutine_pointer = co_cast(co)
return coroutine_pointer['env']['__jmpbuf']
class CoroutineCommand(gdb.Command):
'''Display coroutine backtrace'''
def __init__(self):
gdb.Command.__init__(self, 'qemu coroutine', gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if len(argv) != 1:
gdb.write('usage: qemu coroutine <coroutine-pointer>\n')
return
bt_jmpbuf(coroutine_to_jmpbuf(gdb.parse_and_eval(argv[0])))
class CoroutineBt(gdb.Command):
'''Display backtrace including coroutine switches'''
def __init__(self):
gdb.Command.__init__(self, 'qemu bt', gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, arg, from_tty):
gdb.execute("bt")
if gdb.parse_and_eval("qemu_in_coroutine()") == False:
return
co_ptr = gdb.parse_and_eval("qemu_coroutine_self()")
while True:
co = co_cast(co_ptr)
co_ptr = co["base"]["caller"]
if co_ptr == 0:
break
gdb.write("Coroutine at " + str(co_ptr) + ":\n")
bt_jmpbuf(coroutine_to_jmpbuf(co_ptr))
class CoroutineSPFunction(gdb.Function):
def __init__(self):
gdb.Function.__init__(self, 'qemu_coroutine_sp')
def invoke(self, addr):
return get_jmpbuf_regs(coroutine_to_jmpbuf(addr))['rsp'].cast(VOID_PTR)
class CoroutinePCFunction(gdb.Function):
def __init__(self):
gdb.Function.__init__(self, 'qemu_coroutine_pc')
def invoke(self, addr):
return get_jmpbuf_regs(coroutine_to_jmpbuf(addr))['rip'].cast(VOID_PTR)
| 4,464 | 28.966443 | 132 | py |
qemu | qemu-master/scripts/qemugdb/aio.py | #
# GDB debugging support: aio/iohandler debug
#
# Copyright (c) 2015 Red Hat, Inc.
#
# Author: Dr. David Alan Gilbert <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2 or
# later. See the COPYING file in the top-level directory.
#
import gdb
from qemugdb import coroutine
def isnull(ptr):
return ptr == gdb.Value(0).cast(ptr.type)
def dump_aiocontext(context, verbose):
'''Display a dump and backtrace for an aiocontext'''
cur = context['aio_handlers']['lh_first']
# Get pointers to functions we're going to process specially
sym_fd_coroutine_enter = gdb.parse_and_eval('fd_coroutine_enter')
while not isnull(cur):
entry = cur.dereference()
gdb.write('----\n%s\n' % entry)
if verbose and cur['io_read'] == sym_fd_coroutine_enter:
coptr = (cur['opaque'].cast(gdb.lookup_type('FDYieldUntilData').pointer()))['co']
coptr = coptr.cast(gdb.lookup_type('CoroutineUContext').pointer())
coroutine.bt_jmpbuf(coptr['env']['__jmpbuf'])
cur = cur['node']['le_next'];
gdb.write('----\n')
class HandlersCommand(gdb.Command):
'''Display aio handlers'''
def __init__(self):
gdb.Command.__init__(self, 'qemu handlers', gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, arg, from_tty):
verbose = False
argv = gdb.string_to_argv(arg)
if len(argv) > 0 and argv[0] == '--verbose':
verbose = True
argv.pop(0)
if len(argv) > 1:
gdb.write('usage: qemu handlers [--verbose] [handler]\n')
return
if len(argv) == 1:
handlers_name = argv[0]
else:
handlers_name = 'qemu_aio_context'
dump_aiocontext(gdb.parse_and_eval(handlers_name), verbose)
| 1,843 | 30.793103 | 93 | py |
qemu | qemu-master/scripts/modules/module_block.py | #!/usr/bin/env python3
#
# Module information generator
#
# Copyright Red Hat, Inc. 2015 - 2016
#
# Authors:
# Marc Mari <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
import sys
import os
def get_string_struct(line):
data = line.split()
# data[0] -> struct element name
# data[1] -> =
# data[2] -> value
return data[2].replace('"', '')[:-1]
def add_module(fheader, library, format_name, protocol_name):
lines = []
lines.append('.library_name = "' + library + '",')
if format_name != "":
lines.append('.format_name = "' + format_name + '",')
if protocol_name != "":
lines.append('.protocol_name = "' + protocol_name + '",')
text = '\n '.join(lines)
fheader.write('\n {\n ' + text + '\n },')
def process_file(fheader, filename):
# This parser assumes the coding style rules are being followed
with open(filename, "r") as cfile:
found_start = False
library, _ = os.path.splitext(os.path.basename(filename))
for line in cfile:
if found_start:
line = line.replace('\n', '')
if line.find(".format_name") != -1:
format_name = get_string_struct(line)
elif line.find(".protocol_name") != -1:
protocol_name = get_string_struct(line)
elif line == "};":
add_module(fheader, library, format_name, protocol_name)
found_start = False
elif line.find("static BlockDriver") != -1:
found_start = True
format_name = ""
protocol_name = ""
def print_top(fheader):
fheader.write('''/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* QEMU Block Module Infrastructure
*
* Authors:
* Marc Mari <[email protected]>
*/
''')
fheader.write('''#ifndef QEMU_MODULE_BLOCK_H
#define QEMU_MODULE_BLOCK_H
static const struct {
const char *format_name;
const char *protocol_name;
const char *library_name;
} block_driver_modules[] = {''')
def print_bottom(fheader):
fheader.write('''
};
#endif
''')
if __name__ == '__main__':
# First argument: output file
# All other arguments: modules source files (.c)
output_file = sys.argv[1]
with open(output_file, 'w') as fheader:
print_top(fheader)
for filename in sys.argv[2:]:
if os.path.isfile(filename):
process_file(fheader, filename)
else:
print("File " + filename + " does not exist.", file=sys.stderr)
sys.exit(1)
print_bottom(fheader)
sys.exit(0)
| 2,751 | 26.52 | 79 | py |
qemu | qemu-master/scripts/codeconverter/converter.py | #!/usr/bin/env python3
# QEMU library
#
# Copyright (C) 2020 Red Hat Inc.
#
# Authors:
# Eduardo Habkost <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
#
import sys
import argparse
import os
import os.path
import re
from typing import *
from codeconverter.patching import FileInfo, match_class_dict, FileList
import codeconverter.qom_macros
from codeconverter.qom_type_info import TI_FIELDS, type_infos, TypeInfoVar
import logging
logger = logging.getLogger(__name__)
DBG = logger.debug
INFO = logger.info
WARN = logger.warning
def process_all_files(parser: argparse.ArgumentParser, args: argparse.Namespace) -> None:
DBG("filenames: %r", args.filenames)
files = FileList()
files.extend(FileInfo(files, fn, args.force) for fn in args.filenames)
for f in files:
DBG('opening %s', f.filename)
f.load()
if args.table:
fields = ['filename', 'variable_name'] + TI_FIELDS
print('\t'.join(fields))
for f in files:
for t in f.matches_of_type(TypeInfoVar):
assert isinstance(t, TypeInfoVar)
values = [f.filename, t.name] + \
[t.get_raw_initializer_value(f)
for f in TI_FIELDS]
DBG('values: %r', values)
assert all('\t' not in v for v in values)
values = [v.replace('\n', ' ').replace('"', '') for v in values]
print('\t'.join(values))
return
match_classes = match_class_dict()
if not args.patterns:
parser.error("--pattern is required")
classes = [p for arg in args.patterns
for p in re.split(r'[\s,]', arg)
if p.strip()]
for c in classes:
if c not in match_classes \
or not match_classes[c].regexp:
print("Invalid pattern name: %s" % (c), file=sys.stderr)
print("Valid patterns:", file=sys.stderr)
print(PATTERN_HELP, file=sys.stderr)
sys.exit(1)
DBG("classes: %r", classes)
files.patch_content(max_passes=args.passes, class_names=classes)
for f in files:
#alltypes.extend(f.type_infos)
#full_types.extend(f.full_types())
if not args.dry_run:
if args.inplace:
f.patch_inplace()
if args.diff:
f.show_diff()
if not args.diff and not args.inplace:
f.write_to_file(sys.stdout)
sys.stdout.flush()
PATTERN_HELP = ('\n'.join(" %s: %s" % (n, str(c.__doc__).strip())
for (n,c) in sorted(match_class_dict().items())
if c.has_replacement_rule()))
def main() -> None:
p = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
p.add_argument('filenames', nargs='+')
p.add_argument('--passes', type=int, default=1,
help="Number of passes (0 means unlimited)")
p.add_argument('--pattern', required=True, action='append',
default=[], dest='patterns',
help="Pattern to scan for")
p.add_argument('--inplace', '-i', action='store_true',
help="Patch file in place")
p.add_argument('--dry-run', action='store_true',
help="Don't patch files or print patching results")
p.add_argument('--force', '-f', action='store_true',
help="Perform changes even if not completely safe")
p.add_argument('--diff', action='store_true',
help="Print diff output on stdout")
p.add_argument('--debug', '-d', action='store_true',
help="Enable debugging")
p.add_argument('--verbose', '-v', action='store_true',
help="Verbose logging on stderr")
p.add_argument('--table', action='store_true',
help="Print CSV table of type information")
p.add_argument_group("Valid pattern names",
PATTERN_HELP)
args = p.parse_args()
loglevel = (logging.DEBUG if args.debug
else logging.INFO if args.verbose
else logging.WARN)
logging.basicConfig(format='%(levelname)s: %(message)s', level=loglevel)
DBG("args: %r", args)
process_all_files(p, args)
if __name__ == '__main__':
main() | 4,378 | 34.601626 | 89 | py |
qemu | qemu-master/scripts/codeconverter/codeconverter/patching.py | # Copyright (C) 2020 Red Hat Inc.
#
# Authors:
# Eduardo Habkost <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
from typing import IO, Match, NamedTuple, Optional, Literal, Iterable, Type, Dict, List, Any, TypeVar, NewType, Tuple, Union
from pathlib import Path
from itertools import chain
from tempfile import NamedTemporaryFile
import os
import re
import subprocess
from io import StringIO
import logging
logger = logging.getLogger(__name__)
DBG = logger.debug
INFO = logger.info
WARN = logger.warning
ERROR = logger.error
from .utils import *
T = TypeVar('T')
class Patch(NamedTuple):
# start inside file.original_content
start: int
# end position inside file.original_content
end: int
# replacement string for file.original_content[start:end]
replacement: str
IdentifierType = Literal['type', 'symbol', 'include', 'constant']
class RequiredIdentifier(NamedTuple):
type: IdentifierType
name: str
class FileMatch:
"""Base class for regex matches
Subclasses just need to set the `regexp` class attribute
"""
regexp: Optional[str] = None
def __init__(self, f: 'FileInfo', m: Match) -> None:
self.file: 'FileInfo' = f
self.match: Match[str] = m
@property
def name(self) -> str:
if 'name' not in self.match.groupdict():
return '[no name]'
return self.group('name')
@classmethod
def compiled_re(klass):
return re.compile(klass.regexp, re.MULTILINE)
def start(self) -> int:
return self.match.start()
def end(self) -> int:
return self.match.end()
def line_col(self) -> LineAndColumn:
return self.file.line_col(self.start())
def group(self, group: Union[int, str]) -> str:
return self.match.group(group)
def getgroup(self, group: str) -> Optional[str]:
if group not in self.match.groupdict():
return None
return self.match.group(group)
def log(self, level, fmt, *args) -> None:
pos = self.line_col()
logger.log(level, '%s:%d:%d: '+fmt, self.file.filename, pos.line, pos.col, *args)
def debug(self, fmt, *args) -> None:
self.log(logging.DEBUG, fmt, *args)
def info(self, fmt, *args) -> None:
self.log(logging.INFO, fmt, *args)
def warn(self, fmt, *args) -> None:
self.log(logging.WARNING, fmt, *args)
def error(self, fmt, *args) -> None:
self.log(logging.ERROR, fmt, *args)
def sub(self, original: str, replacement: str) -> str:
"""Replace content
XXX: this won't use the match position, but will just
replace all strings that look like the original match.
This should be enough for all the patterns used in this
script.
"""
return original.replace(self.group(0), replacement)
def sanity_check(self) -> None:
"""Sanity check match, and print warnings if necessary"""
pass
def replacement(self) -> Optional[str]:
"""Return replacement text for pattern, to use new code conventions"""
return None
def make_patch(self, replacement: str) -> 'Patch':
"""Make patch replacing the content of this match"""
return Patch(self.start(), self.end(), replacement)
def make_subpatch(self, start: int, end: int, replacement: str) -> 'Patch':
return Patch(self.start() + start, self.start() + end, replacement)
def make_removal_patch(self) -> 'Patch':
"""Make patch removing contents of match completely"""
return self.make_patch('')
def append(self, s: str) -> 'Patch':
"""Make patch appending string after this match"""
return Patch(self.end(), self.end(), s)
def prepend(self, s: str) -> 'Patch':
"""Make patch prepending string before this match"""
return Patch(self.start(), self.start(), s)
def gen_patches(self) -> Iterable['Patch']:
"""Patch source code contents to use new code patterns"""
replacement = self.replacement()
if replacement is not None:
yield self.make_patch(replacement)
@classmethod
def has_replacement_rule(klass) -> bool:
return (klass.gen_patches is not FileMatch.gen_patches
or klass.replacement is not FileMatch.replacement)
def contains(self, other: 'FileMatch') -> bool:
return other.start() >= self.start() and other.end() <= self.end()
def __repr__(self) -> str:
start = self.file.line_col(self.start())
end = self.file.line_col(self.end() - 1)
return '<%s %s at %d:%d-%d:%d: %r>' % (self.__class__.__name__,
self.name,
start.line, start.col,
end.line, end.col, self.group(0)[:100])
def required_identifiers(self) -> Iterable[RequiredIdentifier]:
"""Can be implemented by subclasses to keep track of identifier references
This method will be used by the code that moves declarations around the file,
to make sure we find the right spot for them.
"""
raise NotImplementedError()
def provided_identifiers(self) -> Iterable[RequiredIdentifier]:
"""Can be implemented by subclasses to keep track of identifier references
This method will be used by the code that moves declarations around the file,
to make sure we find the right spot for them.
"""
raise NotImplementedError()
@classmethod
def finditer(klass, content: str, pos=0, endpos=-1) -> Iterable[Match]:
"""Helper for re.finditer()"""
if endpos >= 0:
content = content[:endpos]
return klass.compiled_re().finditer(content, pos)
@classmethod
def domatch(klass, content: str, pos=0, endpos=-1) -> Optional[Match]:
"""Helper for re.match()"""
if endpos >= 0:
content = content[:endpos]
return klass.compiled_re().match(content, pos)
def group_finditer(self, klass: Type['FileMatch'], group: Union[str, int]) -> Iterable['FileMatch']:
assert self.file.original_content
return (klass(self.file, m)
for m in klass.finditer(self.file.original_content,
self.match.start(group),
self.match.end(group)))
def try_group_match(self, klass: Type['FileMatch'], group: Union[str, int]) -> Optional['FileMatch']:
assert self.file.original_content
m = klass.domatch(self.file.original_content,
self.match.start(group),
self.match.end(group))
if not m:
return None
else:
return klass(self.file, m)
def group_match(self, group: Union[str, int]) -> 'FileMatch':
m = self.try_group_match(FullMatch, group)
assert m
return m
@property
def allfiles(self) -> 'FileList':
return self.file.allfiles
class FullMatch(FileMatch):
"""Regexp that will match all contents of string
Useful when used with group_match()
"""
regexp = r'(?s).*' # (?s) is re.DOTALL
def all_subclasses(c: Type[FileMatch]) -> Iterable[Type[FileMatch]]:
for sc in c.__subclasses__():
yield sc
yield from all_subclasses(sc)
def match_class_dict() -> Dict[str, Type[FileMatch]]:
d = dict((t.__name__, t) for t in all_subclasses(FileMatch))
return d
def names(matches: Iterable[FileMatch]) -> Iterable[str]:
return [m.name for m in matches]
class PatchingError(Exception):
pass
class OverLappingPatchesError(PatchingError):
pass
def apply_patches(s: str, patches: Iterable[Patch]) -> str:
"""Apply a sequence of patches to string
>>> apply_patches('abcdefg', [Patch(2,2,'xxx'), Patch(0, 1, 'yy')])
'yybxxxcdefg'
"""
r = StringIO()
last = 0
def patch_sort_key(item: Tuple[int, Patch]) -> Tuple[int, int, int]:
"""Patches are sorted by byte position,
patches at the same byte position are applied in the order
they were generated.
"""
i,p = item
return (p.start, p.end, i)
for i,p in sorted(enumerate(patches), key=patch_sort_key):
DBG("Applying patch at position %d (%s) - %d (%s): %r",
p.start, line_col(s, p.start),
p.end, line_col(s, p.end),
p.replacement)
if last > p.start:
raise OverLappingPatchesError("Overlapping patch at position %d (%s), last patch at %d (%s)" % \
(p.start, line_col(s, p.start), last, line_col(s, last)))
r.write(s[last:p.start])
r.write(p.replacement)
last = p.end
r.write(s[last:])
return r.getvalue()
class RegexpScanner:
def __init__(self) -> None:
self.match_index: Dict[Type[Any], List[FileMatch]] = {}
self.match_name_index: Dict[Tuple[Type[Any], str, str], Optional[FileMatch]] = {}
def _matches_of_type(self, klass: Type[Any]) -> Iterable[FileMatch]:
raise NotImplementedError()
def matches_of_type(self, t: Type[T]) -> List[T]:
if t not in self.match_index:
self.match_index[t] = list(self._matches_of_type(t))
return self.match_index[t] # type: ignore
def find_matches(self, t: Type[T], name: str, group: str='name') -> List[T]:
indexkey = (t, name, group)
if indexkey in self.match_name_index:
return self.match_name_index[indexkey] # type: ignore
r: List[T] = []
for m in self.matches_of_type(t):
assert isinstance(m, FileMatch)
if m.getgroup(group) == name:
r.append(m) # type: ignore
self.match_name_index[indexkey] = r # type: ignore
return r
def find_match(self, t: Type[T], name: str, group: str='name') -> Optional[T]:
l = self.find_matches(t, name, group)
if not l:
return None
if len(l) > 1:
logger.warn("multiple matches found for %r (%s=%r)", t, group, name)
return None
return l[0]
def reset_index(self) -> None:
self.match_index.clear()
self.match_name_index.clear()
class FileInfo(RegexpScanner):
filename: Path
original_content: Optional[str] = None
def __init__(self, files: 'FileList', filename: os.PathLike, force:bool=False) -> None:
super().__init__()
self.allfiles = files
self.filename = Path(filename)
self.patches: List[Patch] = []
self.force = force
def __repr__(self) -> str:
return f'<FileInfo {repr(self.filename)}>'
def filename_matches(self, name: str) -> bool:
nameparts = Path(name).parts
return self.filename.parts[-len(nameparts):] == nameparts
def line_col(self, start: int) -> LineAndColumn:
"""Return line and column for a match object inside original_content"""
return line_col(self.original_content, start)
def _matches_of_type(self, klass: Type[Any]) -> List[FileMatch]:
"""Build FileMatch objects for each match of regexp"""
if not hasattr(klass, 'regexp') or klass.regexp is None:
return []
assert hasattr(klass, 'regexp')
DBG("%s: scanning for %s", self.filename, klass.__name__)
DBG("regexp: %s", klass.regexp)
matches = [klass(self, m) for m in klass.finditer(self.original_content)]
DBG('%s: %d matches found for %s: %s', self.filename, len(matches),
klass.__name__,' '.join(names(matches)))
return matches
def find_match(self, t: Type[T], name: str, group: str='name') -> Optional[T]:
for m in self.matches_of_type(t):
assert isinstance(m, FileMatch)
if m.getgroup(group) == name:
return m # type: ignore
return None
def reset_content(self, s:str):
self.original_content = s
self.patches.clear()
self.reset_index()
self.allfiles.reset_index()
def load(self) -> None:
if self.original_content is not None:
return
with open(self.filename, 'rt') as f:
self.reset_content(f.read())
@property
def all_matches(self) -> Iterable[FileMatch]:
lists = list(self.match_index.values())
return (m for l in lists
for m in l)
def gen_patches(self, matches: List[FileMatch]) -> None:
for m in matches:
DBG("Generating patches for %r", m)
for i,p in enumerate(m.gen_patches()):
DBG("patch %d generated by %r:", i, m)
DBG("replace contents at %s-%s with %r",
self.line_col(p.start), self.line_col(p.end), p.replacement)
self.patches.append(p)
def scan_for_matches(self, class_names: Optional[List[str]]=None) -> Iterable[FileMatch]:
DBG("class names: %r", class_names)
class_dict = match_class_dict()
if class_names is None:
DBG("default class names")
class_names = list(name for name,klass in class_dict.items()
if klass.has_replacement_rule())
DBG("class_names: %r", class_names)
for cn in class_names:
matches = self.matches_of_type(class_dict[cn])
DBG('%d matches found for %s: %s',
len(matches), cn, ' '.join(names(matches)))
yield from matches
def apply_patches(self) -> None:
"""Replace self.original_content after applying patches from self.patches"""
self.reset_content(self.get_patched_content())
def get_patched_content(self) -> str:
assert self.original_content is not None
return apply_patches(self.original_content, self.patches)
def write_to_file(self, f: IO[str]) -> None:
f.write(self.get_patched_content())
def write_to_filename(self, filename: os.PathLike) -> None:
with open(filename, 'wt') as of:
self.write_to_file(of)
def patch_inplace(self) -> None:
newfile = self.filename.with_suffix('.changed')
self.write_to_filename(newfile)
os.rename(newfile, self.filename)
def show_diff(self) -> None:
with NamedTemporaryFile('wt') as f:
self.write_to_file(f)
f.flush()
subprocess.call(['diff', '-u', self.filename, f.name])
def ref(self):
return TypeInfoReference
class FileList(RegexpScanner):
def __init__(self):
super().__init__()
self.files: List[FileInfo] = []
def extend(self, *args, **kwargs):
self.files.extend(*args, **kwargs)
def __iter__(self):
return iter(self.files)
def _matches_of_type(self, klass: Type[Any]) -> Iterable[FileMatch]:
return chain(*(f._matches_of_type(klass) for f in self.files))
def find_file(self, name: str) -> Optional[FileInfo]:
"""Get file with path ending with @name"""
for f in self.files:
if f.filename_matches(name):
return f
else:
return None
def one_pass(self, class_names: List[str]) -> int:
total_patches = 0
for f in self.files:
INFO("Scanning file %s", f.filename)
matches = list(f.scan_for_matches(class_names))
INFO("Generating patches for file %s", f.filename)
f.gen_patches(matches)
total_patches += len(f.patches)
if total_patches:
for f in self.files:
try:
f.apply_patches()
except PatchingError:
logger.exception("%s: failed to patch file", f.filename)
return total_patches
def patch_content(self, max_passes, class_names: List[str]) -> None:
"""Multi-pass content patching loop
We run multiple passes because there are rules that will
delete init functions once they become empty.
"""
passes = 0
total_patches = 0
DBG("max_passes: %r", max_passes)
while not max_passes or max_passes <= 0 or passes < max_passes:
passes += 1
INFO("Running pass: %d", passes)
count = self.one_pass(class_names)
DBG("patch content: pass %d: %d patches generated", passes, count)
total_patches += count
DBG("%d patches applied total in %d passes", total_patches, passes)
| 16,604 | 34.556745 | 124 | py |
qemu | qemu-master/scripts/codeconverter/codeconverter/test_patching.py | # Copyright (C) 2020 Red Hat Inc.
#
# Authors:
# Eduardo Habkost <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
from tempfile import NamedTemporaryFile
from .patching import FileInfo, FileMatch, Patch, FileList
from .regexps import *
class BasicPattern(FileMatch):
regexp = '[abc]{3}'
@property
def name(self):
return self.group(0)
def replacement(self) -> str:
# replace match with the middle character repeated 5 times
return self.group(0)[1].upper()*5
def test_pattern_patching():
of = NamedTemporaryFile('wt')
of.writelines(['one line\n',
'this pattern will be patched: defbbahij\n',
'third line\n',
'another pattern: jihaabfed'])
of.flush()
files = FileList()
f = FileInfo(files, of.name)
f.load()
matches = f.matches_of_type(BasicPattern)
assert len(matches) == 2
p2 = matches[1]
# manually add patch, to see if .append() works:
f.patches.append(p2.append('XXX'))
# apply all patches:
f.gen_patches(matches)
patched = f.get_patched_content()
assert patched == ('one line\n'+
'this pattern will be patched: defBBBBBhij\n'+
'third line\n'+
'another pattern: jihAAAAAXXXfed')
class Function(FileMatch):
regexp = S(r'BEGIN\s+', NAMED('name', RE_IDENTIFIER), r'\n',
r'(.*\n)*?END\n')
class Statement(FileMatch):
regexp = S(r'^\s*', NAMED('name', RE_IDENTIFIER), r'\(\)\n')
def test_container_match():
of = NamedTemporaryFile('wt')
of.writelines(['statement1()\n',
'statement2()\n',
'BEGIN function1\n',
' statement3()\n',
' statement4()\n',
'END\n',
'BEGIN function2\n',
' statement5()\n',
' statement6()\n',
'END\n',
'statement7()\n'])
of.flush()
files = FileList()
f = FileInfo(files, of.name)
f.load()
assert len(f.matches_of_type(Function)) == 2
print(' '.join(m.name for m in f.matches_of_type(Statement)))
assert len(f.matches_of_type(Statement)) == 7
f1 = f.find_match(Function, 'function1')
f2 = f.find_match(Function, 'function2')
st1 = f.find_match(Statement, 'statement1')
st2 = f.find_match(Statement, 'statement2')
st3 = f.find_match(Statement, 'statement3')
st4 = f.find_match(Statement, 'statement4')
st5 = f.find_match(Statement, 'statement5')
st6 = f.find_match(Statement, 'statement6')
st7 = f.find_match(Statement, 'statement7')
assert not f1.contains(st1)
assert not f1.contains(st2)
assert not f1.contains(st2)
assert f1.contains(st3)
assert f1.contains(st4)
assert not f1.contains(st5)
assert not f1.contains(st6)
assert not f1.contains(st7)
assert not f2.contains(st1)
assert not f2.contains(st2)
assert not f2.contains(st2)
assert not f2.contains(st3)
assert not f2.contains(st4)
assert f2.contains(st5)
assert f2.contains(st6)
assert not f2.contains(st7)
| 3,268 | 30.133333 | 71 | py |
qemu | qemu-master/scripts/codeconverter/codeconverter/test_regexps.py | # Copyright (C) 2020 Red Hat Inc.
#
# Authors:
# Eduardo Habkost <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
from .regexps import *
from .qom_macros import *
from .qom_type_info import *
def test_res() -> None:
def fullmatch(regexp, s):
return re.fullmatch(regexp, s, re.MULTILINE)
assert fullmatch(RE_IDENTIFIER, 'sizeof')
assert fullmatch(RE_IDENTIFIER, 'X86CPU')
assert fullmatch(RE_FUN_CALL, 'sizeof(X86CPU)')
assert fullmatch(RE_IDENTIFIER, 'X86_CPU_TYPE_NAME')
assert fullmatch(RE_SIMPLE_VALUE, '"base"')
print(RE_FUN_CALL)
assert fullmatch(RE_FUN_CALL, 'X86_CPU_TYPE_NAME("base")')
print(RE_TI_FIELD_INIT)
assert fullmatch(RE_TI_FIELD_INIT, '.name = X86_CPU_TYPE_NAME("base"),\n')
assert fullmatch(RE_MACRO_CONCAT, 'TYPE_ASPEED_GPIO "-ast2600"')
assert fullmatch(RE_EXPRESSION, 'TYPE_ASPEED_GPIO "-ast2600"')
print(RE_MACRO_DEFINE)
assert re.search(RE_MACRO_DEFINE, r'''
#define OFFSET_CHECK(c) \
do { \
if (!(c)) { \
goto bad_offset; \
} \
} while (0)
''', re.MULTILINE)
print(RE_CHECK_MACRO)
print(CPP_SPACE)
assert not re.match(RE_CHECK_MACRO, r'''
#define OFFSET_CHECK(c) \
do { \
if (!(c)) { \
goto bad_offset; \
} \
} while (0)''', re.MULTILINE)
print(RE_CHECK_MACRO)
assert fullmatch(RE_CHECK_MACRO, r'''#define PCI_DEVICE(obj) \
OBJECT_CHECK(PCIDevice, (obj), TYPE_PCI_DEVICE)
''')
assert fullmatch(RE_CHECK_MACRO, r'''#define COLLIE_MACHINE(obj) \
OBJECT_CHECK(CollieMachineState, obj, TYPE_COLLIE_MACHINE)
''')
print(RE_TYPEINFO_START)
assert re.search(RE_TYPEINFO_START, r'''
cc->open = qmp_chardev_open_file;
}
static const TypeInfo char_file_type_info = {
.name = TYPE_CHARDEV_FILE,
#ifdef _WIN32
.parent = TYPE_CHARDEV_WIN,
''', re.MULTILINE)
assert re.search(RE_TYPEINFO_START, r'''
TypeInfo ti = {
.name = armsse_variants[i].name,
.parent = TYPE_ARMSSE,
.class_init = armsse_class_init,
.class_data = (void *)&armsse_variants[i],
};''', re.MULTILINE)
print(RE_ARRAY_ITEM)
assert fullmatch(RE_ARRAY_ITEM, '{ TYPE_HOTPLUG_HANDLER },')
assert fullmatch(RE_ARRAY_ITEM, '{ TYPE_ACPI_DEVICE_IF },')
assert fullmatch(RE_ARRAY_ITEM, '{ }')
assert fullmatch(RE_ARRAY_CAST, '(InterfaceInfo[])')
assert fullmatch(RE_ARRAY, '''(InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ TYPE_ACPI_DEVICE_IF },
{ }
}''')
print(RE_COMMENT)
assert fullmatch(RE_COMMENT, r'''/* multi-line
* comment
*/''')
print(RE_TI_FIELDS)
assert fullmatch(RE_TI_FIELDS,
r'''/* could be TYPE_SYS_BUS_DEVICE (or LPC etc) */
.parent = TYPE_DEVICE,
''')
assert fullmatch(RE_TI_FIELDS, r'''.name = TYPE_TPM_CRB,
/* could be TYPE_SYS_BUS_DEVICE (or LPC etc) */
.parent = TYPE_DEVICE,
.instance_size = sizeof(CRBState),
.class_init = tpm_crb_class_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_TPM_IF },
{ }
}
''')
assert fullmatch(RE_TI_FIELDS + SP + RE_COMMENTS,
r'''.name = TYPE_PALM_MISC_GPIO,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(PalmMiscGPIOState),
.instance_init = palm_misc_gpio_init,
/*
* No class init required: device has no internal state so does not
* need to set up reset or vmstate, and has no realize method.
*/''')
print(TypeInfoVar.regexp)
test_empty = 'static const TypeInfo x86_base_cpu_type_info = {\n'+\
'};\n';
assert fullmatch(TypeInfoVar.regexp, test_empty)
test_simple = r'''
static const TypeInfo x86_base_cpu_type_info = {
.name = X86_CPU_TYPE_NAME("base"),
.parent = TYPE_X86_CPU,
.class_init = x86_cpu_base_class_init,
};
'''
assert re.search(TypeInfoVar.regexp, test_simple, re.MULTILINE)
test_interfaces = r'''
static const TypeInfo acpi_ged_info = {
.name = TYPE_ACPI_GED,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(AcpiGedState),
.instance_init = acpi_ged_initfn,
.class_init = acpi_ged_class_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ TYPE_ACPI_DEVICE_IF },
{ }
}
};
'''
assert re.search(TypeInfoVar.regexp, test_interfaces, re.MULTILINE)
test_comments = r'''
static const TypeInfo palm_misc_gpio_info = {
.name = TYPE_PALM_MISC_GPIO,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(PalmMiscGPIOState),
.instance_init = palm_misc_gpio_init,
/*
* No class init required: device has no internal state so does not
* need to set up reset or vmstate, and has no realize method.
*/
};
'''
assert re.search(TypeInfoVar.regexp, test_comments, re.MULTILINE)
test_comments = r'''
static const TypeInfo tpm_crb_info = {
.name = TYPE_TPM_CRB,
/* could be TYPE_SYS_BUS_DEVICE (or LPC etc) */
.parent = TYPE_DEVICE,
.instance_size = sizeof(CRBState),
.class_init = tpm_crb_class_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_TPM_IF },
{ }
}
};
'''
assert re.search(TypeInfoVar.regexp, test_comments, re.MULTILINE)
def test_struct_re():
print('---')
print(RE_STRUCT_TYPEDEF)
assert re.search(RE_STRUCT_TYPEDEF, r'''
typedef struct TCGState {
AccelState parent_obj;
bool mttcg_enabled;
unsigned long tb_size;
} TCGState;
''', re.MULTILINE)
assert re.search(RE_STRUCT_TYPEDEF, r'''
typedef struct {
ISADevice parent_obj;
QEMUSoundCard card;
uint32_t freq;
uint32_t port;
int ticking[2];
int enabled;
int active;
int bufpos;
#ifdef DEBUG
int64_t exp[2];
#endif
int16_t *mixbuf;
uint64_t dexp[2];
SWVoiceOut *voice;
int left, pos, samples;
QEMUAudioTimeStamp ats;
FM_OPL *opl;
PortioList port_list;
} AdlibState;
''', re.MULTILINE)
false_positive = r'''
typedef struct dma_pagetable_entry {
int32_t frame;
int32_t owner;
} A B C D E;
struct foo {
int x;
} some_variable;
'''
assert not re.search(RE_STRUCT_TYPEDEF, false_positive, re.MULTILINE)
def test_initial_includes():
print(InitialIncludes.regexp)
c = '''
#ifndef HW_FLASH_H
#define HW_FLASH_H
/* NOR flash devices */
#include "qom/object.h"
#include "exec/hwaddr.h"
/* pflash_cfi01.c */
'''
print(repr(list(m.groupdict() for m in InitialIncludes.finditer(c))))
m = InitialIncludes.domatch(c)
assert m
print(repr(m.group(0)))
assert m.group(0).endswith('#include "exec/hwaddr.h"\n')
c = '''#ifndef QEMU_VIRTIO_9P_H
#define QEMU_VIRTIO_9P_H
#include "standard-headers/linux/virtio_9p.h"
#include "hw/virtio/virtio.h"
#include "9p.h"
'''
print(repr(list(m.groupdict() for m in InitialIncludes.finditer(c))))
m = InitialIncludes.domatch(c)
assert m
print(repr(m.group(0)))
assert m.group(0).endswith('#include "9p.h"\n')
c = '''#include "qom/object.h"
/*
* QEMU ES1370 emulation
...
*/
/* #define DEBUG_ES1370 */
/* #define VERBOSE_ES1370 */
#define SILENT_ES1370
#include "qemu/osdep.h"
#include "hw/audio/soundhw.h"
#include "audio/audio.h"
#include "hw/pci/pci.h"
#include "migration/vmstate.h"
#include "qemu/module.h"
#include "sysemu/dma.h"
/* Missing stuff:
SCTRL_P[12](END|ST)INC
'''
print(repr(list(m.groupdict() for m in InitialIncludes.finditer(c))))
m = InitialIncludes.domatch(c)
assert m
print(repr(m.group(0)))
assert m.group(0).endswith('#include "sysemu/dma.h"\n')
| 8,344 | 28.487633 | 79 | py |
qemu | qemu-master/scripts/codeconverter/codeconverter/utils.py | # Copyright (C) 2020 Red Hat Inc.
#
# Authors:
# Eduardo Habkost <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
from typing import *
import logging
logger = logging.getLogger(__name__)
DBG = logger.debug
INFO = logger.info
WARN = logger.warning
T = TypeVar('T')
def opt_compare(a: T, b: T) -> bool:
"""Compare two values, ignoring mismatches if one of them is None"""
return (a is None) or (b is None) or (a == b)
def merge(a: T, b: T) -> T:
"""Merge two values if they matched using opt_compare()"""
assert opt_compare(a, b)
if a is None:
return b
else:
return a
def test_comp_merge():
assert opt_compare(None, 1) == True
assert opt_compare(2, None) == True
assert opt_compare(1, 1) == True
assert opt_compare(1, 2) == False
assert merge(None, None) is None
assert merge(None, 10) == 10
assert merge(10, None) == 10
assert merge(10, 10) == 10
LineNumber = NewType('LineNumber', int)
ColumnNumber = NewType('ColumnNumber', int)
class LineAndColumn(NamedTuple):
line: int
col: int
def __str__(self):
return '%d:%d' % (self.line, self.col)
def line_col(s, position: int) -> LineAndColumn:
"""Return line and column for a char position in string
Character position starts in 0, but lines and columns start in 1.
"""
before = s[:position]
lines = before.split('\n')
line = len(lines)
col = len(lines[-1]) + 1
return LineAndColumn(line, col)
def test_line_col():
assert line_col('abc\ndefg\nhijkl', 0) == (1, 1)
assert line_col('abc\ndefg\nhijkl', 2) == (1, 3)
assert line_col('abc\ndefg\nhijkl', 3) == (1, 4)
assert line_col('abc\ndefg\nhijkl', 4) == (2, 1)
assert line_col('abc\ndefg\nhijkl', 10) == (3, 2)
def not_optional(arg: Optional[T]) -> T:
assert arg is not None
return arg
__all__ = ['not_optional', 'opt_compare', 'merge', 'line_col', 'LineAndColumn'] | 2,020 | 27.069444 | 79 | py |
qemu | qemu-master/scripts/codeconverter/codeconverter/qom_macros.py | # Copyright (C) 2020 Red Hat Inc.
#
# Authors:
# Eduardo Habkost <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
import re
from itertools import chain
from typing import *
from .regexps import *
from .patching import *
from .utils import *
import logging
logger = logging.getLogger(__name__)
DBG = logger.debug
INFO = logger.info
WARN = logger.warning
# simple expressions:
RE_CONSTANT = OR(RE_STRING, RE_NUMBER)
class DefineDirective(FileMatch):
"""Match any #define directive"""
regexp = S(r'^[ \t]*#[ \t]*define', CPP_SPACE, NAMED('name', RE_IDENTIFIER), r'\b')
class ExpressionDefine(FileMatch):
"""Simple #define preprocessor directive for an expression"""
regexp = S(r'^[ \t]*#[ \t]*define', CPP_SPACE, NAMED('name', RE_IDENTIFIER),
CPP_SPACE, NAMED('value', RE_EXPRESSION), r'[ \t]*\n')
def provided_identifiers(self) -> Iterable[RequiredIdentifier]:
yield RequiredIdentifier('constant', self.group('name'))
class ConstantDefine(ExpressionDefine):
"""Simple #define preprocessor directive for a number or string constant"""
regexp = S(r'^[ \t]*#[ \t]*define', CPP_SPACE, NAMED('name', RE_IDENTIFIER),
CPP_SPACE, NAMED('value', RE_CONSTANT), r'[ \t]*\n')
class TypeIdentifiers(NamedTuple):
"""Type names found in type declarations"""
# TYPE_MYDEVICE
typename: Optional[str]
# MYDEVICE
uppercase: Optional[str] = None
# MyDevice
instancetype: Optional[str] = None
# MyDeviceClass
classtype: Optional[str] = None
# my_device
lowercase: Optional[str] = None
def allfields(self):
return tuple(getattr(self, f) for f in self._fields)
def merge(self, other: 'TypeIdentifiers') -> Optional['TypeIdentifiers']:
"""Check if identifiers match, return new identifier with complete list"""
if any(not opt_compare(a, b) for a,b in zip(self, other)):
return None
return TypeIdentifiers(*(merge(a, b) for a,b in zip(self, other)))
def __str__(self) -> str:
values = ((f, getattr(self, f)) for f in self._fields)
s = ', '.join('%s=%s' % (f,v) for f,v in values if v is not None)
return f'{s}'
def check_consistency(self) -> List[str]:
"""Check if identifiers are consistent with each other,
return list of problems (or empty list if everything seems consistent)
"""
r = []
if self.typename is None:
r.append("typename (TYPE_MYDEVICE) is unavailable")
if self.uppercase is None:
r.append("uppercase name is unavailable")
if (self.instancetype is not None
and self.classtype is not None
and self.classtype != f'{self.instancetype}Class'):
r.append("class typedef %s doesn't match instance typedef %s" %
(self.classtype, self.instancetype))
if (self.uppercase is not None
and self.typename is not None
and f'TYPE_{self.uppercase}' != self.typename):
r.append("uppercase name (%s) doesn't match type name (%s)" %
(self.uppercase, self.typename))
return r
class TypedefMatch(FileMatch):
"""typedef declaration"""
def provided_identifiers(self) -> Iterable[RequiredIdentifier]:
yield RequiredIdentifier('type', self.group('name'))
class SimpleTypedefMatch(TypedefMatch):
"""Simple typedef declaration
(no replacement rules)"""
regexp = S(r'^[ \t]*typedef', SP,
NAMED('typedef_type', RE_TYPE), SP,
NAMED('name', RE_IDENTIFIER), r'\s*;[ \t]*\n')
RE_MACRO_DEFINE = S(r'^[ \t]*#\s*define\s+', NAMED('name', RE_IDENTIFIER),
r'\s*\(\s*', RE_IDENTIFIER, r'\s*\)', CPP_SPACE)
RE_STRUCT_ATTRIBUTE = r'QEMU_PACKED'
# This doesn't parse the struct definitions completely, it just assumes
# the closing brackets are going to be in an unindented line:
RE_FULL_STRUCT = S('struct', SP, M(RE_IDENTIFIER, n='?', name='structname'), SP,
NAMED('body', r'{\n',
# acceptable inside the struct body:
# - lines starting with space or tab
# - empty lines
# - preprocessor directives
# - comments
OR(r'[ \t][^\n]*\n',
r'#[^\n]*\n',
r'\n',
S(r'[ \t]*', RE_COMMENT, r'[ \t]*\n'),
repeat='*?'),
r'}', M(RE_STRUCT_ATTRIBUTE, SP, n='*')))
RE_STRUCT_TYPEDEF = S(r'^[ \t]*typedef', SP, RE_FULL_STRUCT, SP,
NAMED('name', RE_IDENTIFIER), r'\s*;[ \t]*\n')
class FullStructTypedefMatch(TypedefMatch):
"""typedef struct [SomeStruct] { ...} SomeType
Will be replaced by separate struct declaration + typedef
"""
regexp = RE_STRUCT_TYPEDEF
def make_structname(self) -> str:
"""Make struct name for struct+typedef split"""
name = self.group('structname')
if not name:
name = self.name
return name
def strip_typedef(self) -> Patch:
"""generate patch that will strip typedef from the struct declartion
The caller is responsible for readding the typedef somewhere else.
"""
name = self.make_structname()
body = self.group('body')
return self.make_patch(f'struct {name} {body};\n')
def make_simple_typedef(self) -> str:
structname = self.make_structname()
name = self.name
return f'typedef struct {structname} {name};\n'
def move_typedef(self, position) -> Iterator[Patch]:
"""Generate patches to move typedef elsewhere"""
yield self.strip_typedef()
yield Patch(position, position, self.make_simple_typedef())
def split_typedef(self) -> Iterator[Patch]:
"""Split into struct definition + typedef in-place"""
yield self.strip_typedef()
yield self.append(self.make_simple_typedef())
class StructTypedefSplit(FullStructTypedefMatch):
"""split struct+typedef declaration"""
def gen_patches(self) -> Iterator[Patch]:
if self.group('structname'):
yield from self.split_typedef()
class DuplicatedTypedefs(SimpleTypedefMatch):
"""Delete ALL duplicate typedefs (unsafe)"""
def gen_patches(self) -> Iterable[Patch]:
other_td = [td for td in chain(self.file.matches_of_type(SimpleTypedefMatch),
self.file.matches_of_type(FullStructTypedefMatch))
if td.name == self.name]
DBG("other_td: %r", other_td)
if any(td.start() < self.start() for td in other_td):
# patch only if handling the first typedef
return
for td in other_td:
if isinstance(td, SimpleTypedefMatch):
DBG("other td: %r", td.match.groupdict())
if td.group('typedef_type') != self.group('typedef_type'):
yield td.make_removal_patch()
elif isinstance(td, FullStructTypedefMatch):
DBG("other td: %r", td.match.groupdict())
if self.group('typedef_type') == 'struct '+td.group('structname'):
yield td.strip_typedef()
class QOMDuplicatedTypedefs(DuplicatedTypedefs):
"""Delete duplicate typedefs if used by QOM type"""
def gen_patches(self) -> Iterable[Patch]:
qom_macros = [TypeCheckMacro, DeclareInstanceChecker, DeclareClassCheckers, DeclareObjCheckers]
qom_matches = chain(*(self.file.matches_of_type(t) for t in qom_macros))
in_use = any(RequiredIdentifier('type', self.name) in m.required_identifiers()
for m in qom_matches)
if in_use:
yield from DuplicatedTypedefs.gen_patches(self)
class QOMStructTypedefSplit(FullStructTypedefMatch):
"""split struct+typedef declaration if used by QOM type"""
def gen_patches(self) -> Iterator[Patch]:
qom_macros = [TypeCheckMacro, DeclareInstanceChecker, DeclareClassCheckers, DeclareObjCheckers]
qom_matches = chain(*(self.file.matches_of_type(t) for t in qom_macros))
in_use = any(RequiredIdentifier('type', self.name) in m.required_identifiers()
for m in qom_matches)
if in_use:
yield from self.split_typedef()
def typedefs(file: FileInfo) -> Iterable[TypedefMatch]:
return (cast(TypedefMatch, m)
for m in chain(file.matches_of_type(SimpleTypedefMatch),
file.matches_of_type(FullStructTypedefMatch)))
def find_typedef(f: FileInfo, name: Optional[str]) -> Optional[TypedefMatch]:
if not name:
return None
for td in typedefs(f):
if td.name == name:
return td
return None
CHECKER_MACROS = ['OBJECT_CHECK', 'OBJECT_CLASS_CHECK', 'OBJECT_GET_CLASS']
CheckerMacroName = Literal['OBJECT_CHECK', 'OBJECT_CLASS_CHECK', 'OBJECT_GET_CLASS']
RE_CHECK_MACRO = \
S(RE_MACRO_DEFINE,
OR(*CHECKER_MACROS, name='checker'),
M(r'\s*\(\s*', OR(NAMED('typedefname', RE_IDENTIFIER), RE_TYPE, name='c_type'), r'\s*,', CPP_SPACE,
OPTIONAL_PARS(RE_IDENTIFIER), r',', CPP_SPACE,
NAMED('qom_typename', RE_IDENTIFIER), r'\s*\)\n',
n='?', name='check_args'))
EXPECTED_CHECKER_SUFFIXES: List[Tuple[CheckerMacroName, str]] = [
('OBJECT_GET_CLASS', '_GET_CLASS'),
('OBJECT_CLASS_CHECK', '_CLASS'),
]
class TypeCheckMacro(FileMatch):
"""OBJECT_CHECK/OBJECT_CLASS_CHECK/OBJECT_GET_CLASS macro definitions
Will be replaced by DECLARE_*_CHECKERS macro
"""
regexp = RE_CHECK_MACRO
@property
def checker(self) -> CheckerMacroName:
"""Name of checker macro being used"""
return self.group('checker') # type: ignore
@property
def typedefname(self) -> Optional[str]:
return self.group('typedefname')
def find_typedef(self) -> Optional[TypedefMatch]:
return find_typedef(self.file, self.typedefname)
def sanity_check(self) -> None:
DBG("groups: %r", self.match.groups())
if not self.group('check_args'):
self.warn("type check macro not parsed completely: %s", self.name)
return
DBG("type identifiers: %r", self.type_identifiers)
if self.typedefname and self.find_typedef() is None:
self.warn("typedef used by %s not found", self.name)
def find_matching_macros(self) -> List['TypeCheckMacro']:
"""Find other check macros that generate the same macro names
The returned list will always be sorted.
"""
my_ids = self.type_identifiers
assert my_ids
return [m for m in self.file.matches_of_type(TypeCheckMacro)
if m.type_identifiers is not None
and my_ids.uppercase is not None
and (my_ids.uppercase == m.type_identifiers.uppercase
or my_ids.typename == m.type_identifiers.typename)]
def merge_ids(self, matches: List['TypeCheckMacro']) -> Optional[TypeIdentifiers]:
"""Try to merge info about type identifiers from all matches in a list"""
if not matches:
return None
r = matches[0].type_identifiers
if r is None:
return None
for m in matches[1:]:
assert m.type_identifiers
new = r.merge(m.type_identifiers)
if new is None:
self.warn("macro %s identifiers (%s) don't match macro %s (%s)",
matches[0].name, r, m.name, m.type_identifiers)
return None
r = new
return r
def required_identifiers(self) -> Iterable[RequiredIdentifier]:
yield RequiredIdentifier('include', '"qom/object.h"')
if self.type_identifiers is None:
return
# to make sure typedefs will be moved above all related macros,
# return dependencies from all of them, not just this match
for m in self.find_matching_macros():
yield RequiredIdentifier('type', m.group('c_type'))
yield RequiredIdentifier('constant', m.group('qom_typename'))
@property
def type_identifiers(self) -> Optional[TypeIdentifiers]:
"""Extract type identifier information from match"""
typename = self.group('qom_typename')
c_type = self.group('c_type')
if not typename or not c_type:
return None
typedef = self.group('typedefname')
classtype = None
instancetype = None
uppercase = None
expected_suffix = dict(EXPECTED_CHECKER_SUFFIXES).get(self.checker)
# here the available data depends on the checker macro being called:
# - we need to remove the suffix from the macro name
# - depending on the macro type, we know the class type name, or
# the instance type name
if self.checker in ('OBJECT_GET_CLASS', 'OBJECT_CLASS_CHECK'):
classtype = c_type
elif self.checker == 'OBJECT_CHECK':
instancetype = c_type
uppercase = self.name
else:
assert False
if expected_suffix and self.name.endswith(expected_suffix):
uppercase = self.name[:-len(expected_suffix)]
return TypeIdentifiers(typename=typename, classtype=classtype,
instancetype=instancetype, uppercase=uppercase)
def gen_patches(self) -> Iterable[Patch]:
# the implementation is a bit tricky because we need to group
# macros dealing with the same type into a single declaration
if self.type_identifiers is None:
self.warn("couldn't extract type information from macro %s", self.name)
return
if self.name == 'INTERFACE_CLASS':
# INTERFACE_CLASS is special and won't be patched
return
for checker,suffix in EXPECTED_CHECKER_SUFFIXES:
if self.name.endswith(suffix):
if self.checker != checker:
self.warn("macro %s is using macro %s instead of %s", self.name, self.checker, checker)
return
break
matches = self.find_matching_macros()
DBG("found %d matching macros: %s", len(matches), ' '.join(m.name for m in matches))
# we will generate patches only when processing the first macro:
if matches[0].start != self.start:
DBG("skipping %s (will patch when handling %s)", self.name, matches[0].name)
return
ids = self.merge_ids(matches)
if ids is None:
DBG("type identifier mismatch, won't patch %s", self.name)
return
if not ids.uppercase:
self.warn("macro %s doesn't follow the expected name pattern", self.name)
return
if not ids.typename:
self.warn("macro %s: couldn't extract type name", self.name)
return
#issues = ids.check_consistency()
#if issues:
# for i in issues:
# self.warn("inconsistent identifiers: %s", i)
names = [n for n in (ids.instancetype, ids.classtype, ids.uppercase, ids.typename)
if n is not None]
if len(set(names)) != len(names):
self.warn("duplicate names used by macro: %r", ids)
return
assert ids.classtype or ids.instancetype
assert ids.typename
assert ids.uppercase
if ids.classtype and ids.instancetype:
new_decl = (f'DECLARE_OBJ_CHECKERS({ids.instancetype}, {ids.classtype},\n'
f' {ids.uppercase}, {ids.typename})\n')
elif ids.classtype:
new_decl = (f'DECLARE_CLASS_CHECKERS({ids.classtype}, {ids.uppercase},\n'
f' {ids.typename})\n')
elif ids.instancetype:
new_decl = (f'DECLARE_INSTANCE_CHECKER({ids.instancetype}, {ids.uppercase},\n'
f' {ids.typename})\n')
else:
assert False
# we need to ensure the typedefs are already available
issues = []
for t in [ids.instancetype, ids.classtype]:
if not t:
continue
if re.fullmatch(RE_STRUCT_TYPE, t):
self.info("type %s is not a typedef", t)
continue
td = find_typedef(self.file, t)
#if not td and self.allfiles.find_file('include/qemu/typedefs.h'):
#
if not td:
# it is OK if the typedef is in typedefs.h
f = self.allfiles.find_file('include/qemu/typedefs.h')
if f and find_typedef(f, t):
self.info("typedef %s found in typedefs.h", t)
continue
issues.append("couldn't find typedef %s" % (t))
elif td.start() > self.start():
issues.append("typedef %s need to be moved earlier in the file" % (td.name))
for issue in issues:
self.warn(issue)
if issues and not self.file.force:
return
# delete all matching macros and add new declaration:
for m in matches:
yield m.make_patch('')
for issue in issues:
yield self.prepend("/* FIXME: %s */\n" % (issue))
yield self.append(new_decl)
class InterfaceCheckMacro(FileMatch):
"""Type checking macro using INTERFACE_CHECK
Will be replaced by DECLARE_INTERFACE_CHECKER
"""
regexp = S(RE_MACRO_DEFINE,
'INTERFACE_CHECK',
r'\s*\(\s*', OR(NAMED('instancetype', RE_IDENTIFIER), RE_TYPE, name='c_type'),
r'\s*,', CPP_SPACE,
OPTIONAL_PARS(RE_IDENTIFIER), r',', CPP_SPACE,
NAMED('qom_typename', RE_IDENTIFIER), r'\s*\)\n')
def required_identifiers(self) -> Iterable[RequiredIdentifier]:
yield RequiredIdentifier('include', '"qom/object.h"')
yield RequiredIdentifier('type', self.group('instancetype'))
yield RequiredIdentifier('constant', self.group('qom_typename'))
def gen_patches(self) -> Iterable[Patch]:
if self.file.filename_matches('qom/object.h'):
self.debug("skipping object.h")
return
typename = self.group('qom_typename')
uppercase = self.name
instancetype = self.group('instancetype')
c = f"DECLARE_INTERFACE_CHECKER({instancetype}, {uppercase},\n"+\
f" {typename})\n"
yield self.make_patch(c)
class TypeDeclaration(FileMatch):
"""Parent class to all type declarations"""
@property
def instancetype(self) -> Optional[str]:
return self.getgroup('instancetype')
@property
def classtype(self) -> Optional[str]:
return self.getgroup('classtype')
@property
def typename(self) -> Optional[str]:
return self.getgroup('typename')
class TypeCheckerDeclaration(TypeDeclaration):
"""Parent class to all type checker declarations"""
@property
def typename(self) -> str:
return self.group('typename')
@property
def uppercase(self) -> str:
return self.group('uppercase')
class DeclareInstanceChecker(TypeCheckerDeclaration):
"""DECLARE_INSTANCE_CHECKER use"""
#TODO: replace lonely DECLARE_INSTANCE_CHECKER with DECLARE_OBJ_CHECKERS
# if all types are found.
# This will require looking up the correct class type in the TypeInfo
# structs in another file
regexp = S(r'^[ \t]*DECLARE_INSTANCE_CHECKER\s*\(\s*',
NAMED('instancetype', RE_TYPE), r'\s*,\s*',
NAMED('uppercase', RE_IDENTIFIER), r'\s*,\s*',
OR(RE_IDENTIFIER, RE_STRING, RE_MACRO_CONCAT, RE_FUN_CALL, name='typename'), SP,
r'\)[ \t]*;?[ \t]*\n')
def required_identifiers(self) -> Iterable[RequiredIdentifier]:
yield RequiredIdentifier('include', '"qom/object.h"')
yield RequiredIdentifier('constant', self.group('typename'))
yield RequiredIdentifier('type', self.group('instancetype'))
class DeclareInterfaceChecker(TypeCheckerDeclaration):
"""DECLARE_INTERFACE_CHECKER use"""
regexp = S(r'^[ \t]*DECLARE_INTERFACE_CHECKER\s*\(\s*',
NAMED('instancetype', RE_TYPE), r'\s*,\s*',
NAMED('uppercase', RE_IDENTIFIER), r'\s*,\s*',
OR(RE_IDENTIFIER, RE_STRING, RE_MACRO_CONCAT, RE_FUN_CALL, name='typename'), SP,
r'\)[ \t]*;?[ \t]*\n')
def required_identifiers(self) -> Iterable[RequiredIdentifier]:
yield RequiredIdentifier('include', '"qom/object.h"')
yield RequiredIdentifier('constant', self.group('typename'))
yield RequiredIdentifier('type', self.group('instancetype'))
class DeclareInstanceType(TypeDeclaration):
"""DECLARE_INSTANCE_TYPE use"""
regexp = S(r'^[ \t]*DECLARE_INSTANCE_TYPE\s*\(\s*',
NAMED('uppercase', RE_IDENTIFIER), r'\s*,\s*',
NAMED('instancetype', RE_TYPE), SP,
r'\)[ \t]*;?[ \t]*\n')
def required_identifiers(self) -> Iterable[RequiredIdentifier]:
yield RequiredIdentifier('include', '"qom/object.h"')
yield RequiredIdentifier('type', self.group('instancetype'))
class DeclareClassType(TypeDeclaration):
"""DECLARE_CLASS_TYPE use"""
regexp = S(r'^[ \t]*DECLARE_CLASS_TYPE\s*\(\s*',
NAMED('uppercase', RE_IDENTIFIER), r'\s*,\s*',
NAMED('classtype', RE_TYPE), SP,
r'\)[ \t]*;?[ \t]*\n')
def required_identifiers(self) -> Iterable[RequiredIdentifier]:
yield RequiredIdentifier('include', '"qom/object.h"')
yield RequiredIdentifier('type', self.group('classtype'))
class DeclareClassCheckers(TypeCheckerDeclaration):
"""DECLARE_CLASS_CHECKER use"""
regexp = S(r'^[ \t]*DECLARE_CLASS_CHECKERS\s*\(\s*',
NAMED('classtype', RE_TYPE), r'\s*,\s*',
NAMED('uppercase', RE_IDENTIFIER), r'\s*,\s*',
OR(RE_IDENTIFIER, RE_STRING, RE_MACRO_CONCAT, RE_FUN_CALL, name='typename'), SP,
r'\)[ \t]*;?[ \t]*\n')
def required_identifiers(self) -> Iterable[RequiredIdentifier]:
yield RequiredIdentifier('include', '"qom/object.h"')
yield RequiredIdentifier('constant', self.group('typename'))
yield RequiredIdentifier('type', self.group('classtype'))
class DeclareObjCheckers(TypeCheckerDeclaration):
"""DECLARE_OBJ_CHECKERS use"""
#TODO: detect when OBJECT_DECLARE_SIMPLE_TYPE can be used
regexp = S(r'^[ \t]*DECLARE_OBJ_CHECKERS\s*\(\s*',
NAMED('instancetype', RE_TYPE), r'\s*,\s*',
NAMED('classtype', RE_TYPE), r'\s*,\s*',
NAMED('uppercase', RE_IDENTIFIER), r'\s*,\s*',
OR(RE_IDENTIFIER, RE_STRING, RE_MACRO_CONCAT, RE_FUN_CALL, name='typename'), SP,
r'\)[ \t]*;?[ \t]*\n')
def required_identifiers(self) -> Iterable[RequiredIdentifier]:
yield RequiredIdentifier('include', '"qom/object.h"')
yield RequiredIdentifier('constant', self.group('typename'))
yield RequiredIdentifier('type', self.group('classtype'))
yield RequiredIdentifier('type', self.group('instancetype'))
class TypeDeclarationFixup(FileMatch):
"""Common base class for code that will look at a set of type declarations"""
regexp = RE_FILE_BEGIN
def gen_patches(self) -> Iterable[Patch]:
if self.file.filename_matches('qom/object.h'):
self.debug("skipping object.h")
return
# group checkers by uppercase name:
decl_types: List[Type[TypeDeclaration]] = [DeclareInstanceChecker, DeclareInstanceType,
DeclareClassCheckers, DeclareClassType,
DeclareObjCheckers]
checker_dict: Dict[str, List[TypeDeclaration]] = {}
for t in decl_types:
for m in self.file.matches_of_type(t):
checker_dict.setdefault(m.group('uppercase'), []).append(m)
self.debug("checker_dict: %r", checker_dict)
for uppercase,checkers in checker_dict.items():
fields = ('instancetype', 'classtype', 'uppercase', 'typename')
fvalues = dict((field, set(getattr(m, field) for m in checkers
if getattr(m, field, None) is not None))
for field in fields)
for field,values in fvalues.items():
if len(values) > 1:
for c in checkers:
c.warn("%s mismatch (%s)", field, ' '.join(values))
return
field_dict = dict((f, v.pop() if v else None) for f,v in fvalues.items())
yield from self.gen_patches_for_type(uppercase, checkers, field_dict)
def find_conflicts(self, uppercase: str, checkers: List[TypeDeclaration]) -> bool:
"""Look for conflicting declarations that would make it unsafe to add new ones"""
conflicting: List[FileMatch] = []
# conflicts in the same file:
conflicting.extend(chain(self.file.find_matches(DefineDirective, uppercase),
self.file.find_matches(DeclareInterfaceChecker, uppercase, 'uppercase'),
self.file.find_matches(DeclareClassType, uppercase, 'uppercase'),
self.file.find_matches(DeclareInstanceType, uppercase, 'uppercase')))
# conflicts in another file:
conflicting.extend(o for o in chain(self.allfiles.find_matches(DeclareInstanceChecker, uppercase, 'uppercase'),
self.allfiles.find_matches(DeclareClassCheckers, uppercase, 'uppercase'),
self.allfiles.find_matches(DeclareInterfaceChecker, uppercase, 'uppercase'),
self.allfiles.find_matches(DefineDirective, uppercase))
if o is not None and o.file != self.file
# if both are .c files, there's no conflict at all:
and not (o.file.filename.suffix == '.c' and
self.file.filename.suffix == '.c'))
if conflicting:
for c in checkers:
c.warn("skipping due to conflicting %s macro", uppercase)
for o in conflicting:
if o is None:
continue
o.warn("conflicting %s macro is here", uppercase)
return True
return False
def gen_patches_for_type(self, uppercase: str,
checkers: List[TypeDeclaration],
fields: Dict[str, Optional[str]]) -> Iterable[Patch]:
"""Should be reimplemented by subclasses"""
return
yield
class DeclareVoidTypes(TypeDeclarationFixup):
"""Add DECLARE_*_TYPE(..., void) when there's no declared type"""
regexp = RE_FILE_BEGIN
def gen_patches_for_type(self, uppercase: str,
checkers: List[TypeDeclaration],
fields: Dict[str, Optional[str]]) -> Iterable[Patch]:
if self.find_conflicts(uppercase, checkers):
return
#_,last_checker = max((m.start(), m) for m in checkers)
_,first_checker = min((m.start(), m) for m in checkers)
if not any(m.instancetype for m in checkers):
yield first_checker.prepend(f'DECLARE_INSTANCE_TYPE({uppercase}, void)\n')
if not any(m.classtype for m in checkers):
yield first_checker.prepend(f'DECLARE_CLASS_TYPE({uppercase}, void)\n')
#if not all(len(v) == 1 for v in fvalues.values()):
# return
#
#final_values = dict((field, values.pop())
# for field,values in fvalues.items())
#s = (f"DECLARE_OBJ_CHECKERS({final_values['instancetype']}, {final_values['classtype']},\n"+
# f" {final_values['uppercase']}, {final_values['typename']})\n")
#for c in checkers:
# yield c.make_removal_patch()
#yield last_checker.append(s)
class AddDeclareTypeName(TypeDeclarationFixup):
"""Add DECLARE_TYPE_NAME declarations if necessary"""
def gen_patches_for_type(self, uppercase: str,
checkers: List[TypeDeclaration],
fields: Dict[str, Optional[str]]) -> Iterable[Patch]:
typename = fields.get('typename')
if typename is None:
self.warn("typename unavailable")
return
if typename == f'TYPE_{uppercase}':
self.info("already using TYPE_%s as type name", uppercase)
return
if self.file.find_match(DeclareTypeName, uppercase, 'uppercase'):
self.info("type name for %s already declared", uppercase)
return
_,first_checker = min((m.start(), m) for m in checkers)
s = f'DECLARE_TYPE_NAME({uppercase}, {typename})\n'
yield first_checker.prepend(s)
class TrivialClassStruct(FileMatch):
"""Trivial class struct"""
regexp = S(r'^[ \t]*struct\s*', NAMED('name', RE_IDENTIFIER),
r'\s*{\s*', NAMED('parent_struct', RE_IDENTIFIER), r'\s*parent(_class)?\s*;\s*};\n')
class DeclareTypeName(FileMatch):
"""DECLARE_TYPE_NAME usage"""
regexp = S(r'^[ \t]*DECLARE_TYPE_NAME\s*\(',
NAMED('uppercase', RE_IDENTIFIER), r'\s*,\s*',
OR(RE_IDENTIFIER, RE_STRING, RE_MACRO_CONCAT, RE_FUN_CALL, name='typename'),
r'\s*\);?[ \t]*\n')
class ObjectDeclareType(TypeCheckerDeclaration):
"""OBJECT_DECLARE_TYPE usage
Will be replaced with OBJECT_DECLARE_SIMPLE_TYPE if possible
"""
regexp = S(r'^[ \t]*OBJECT_DECLARE_TYPE\s*\(',
NAMED('instancetype', RE_TYPE), r'\s*,\s*',
NAMED('classtype', RE_TYPE), r'\s*,\s*',
NAMED('uppercase', RE_IDENTIFIER), SP,
r'\)[ \t]*;?[ \t]*\n')
def gen_patches(self):
DBG("groups: %r", self.match.groupdict())
trivial_struct = self.file.find_match(TrivialClassStruct, self.group('classtype'))
if trivial_struct:
d = self.match.groupdict().copy()
d['parent_struct'] = trivial_struct.group("parent_struct")
yield trivial_struct.make_removal_patch()
c = ("OBJECT_DECLARE_SIMPLE_TYPE(%(instancetype)s, %(lowercase)s,\n"
" %(uppercase)s, %(parent_struct)s)\n" % d)
yield self.make_patch(c)
class ObjectDeclareSimpleType(TypeCheckerDeclaration):
"""OBJECT_DECLARE_SIMPLE_TYPE usage"""
regexp = S(r'^[ \t]*OBJECT_DECLARE_SIMPLE_TYPE\s*\(',
NAMED('instancetype', RE_TYPE), r'\s*,\s*',
NAMED('uppercase', RE_IDENTIFIER), SP,
r'\)[ \t]*;?[ \t]*\n')
class OldStyleObjectDeclareSimpleType(TypeCheckerDeclaration):
"""OBJECT_DECLARE_SIMPLE_TYPE usage (old API)"""
regexp = S(r'^[ \t]*OBJECT_DECLARE_SIMPLE_TYPE\s*\(',
NAMED('instancetype', RE_TYPE), r'\s*,\s*',
NAMED('lowercase', RE_IDENTIFIER), r'\s*,\s*',
NAMED('uppercase', RE_IDENTIFIER), r'\s*,\s*',
NAMED('parent_classtype', RE_TYPE), SP,
r'\)[ \t]*;?[ \t]*\n')
@property
def classtype(self) -> Optional[str]:
instancetype = self.instancetype
assert instancetype
return f"{instancetype}Class"
def find_typename_uppercase(files: FileList, typename: str) -> Optional[str]:
"""Try to find what's the right MODULE_OBJ_NAME for a given type name"""
decl = files.find_match(DeclareTypeName, name=typename, group='typename')
if decl:
return decl.group('uppercase')
if typename.startswith('TYPE_'):
return typename[len('TYPE_'):]
return None
def find_type_checkers(files:FileList, name:str, group:str='uppercase') -> Iterable[TypeCheckerDeclaration]:
"""Find usage of DECLARE*CHECKER macro"""
c: Type[TypeCheckerDeclaration]
for c in (DeclareInstanceChecker, DeclareClassCheckers, DeclareObjCheckers, ObjectDeclareType, ObjectDeclareSimpleType):
yield from files.find_matches(c, name=name, group=group)
class Include(FileMatch):
"""#include directive"""
regexp = RE_INCLUDE
def provided_identifiers(self) -> Iterable[RequiredIdentifier]:
yield RequiredIdentifier('include', self.group('includepath'))
class InitialIncludes(FileMatch):
"""Initial #include block"""
regexp = S(RE_FILE_BEGIN,
M(SP, RE_COMMENTS,
r'^[ \t]*#[ \t]*ifndef[ \t]+', RE_IDENTIFIER, r'[ \t]*\n',
n='?', name='ifndef_block'),
M(SP, RE_COMMENTS,
OR(RE_INCLUDE, RE_SIMPLEDEFINE),
n='*', name='includes'))
class SymbolUserList(NamedTuple):
definitions: List[FileMatch]
users: List[FileMatch]
class MoveSymbols(FileMatch):
"""Handle missing symbols
- Move typedefs and defines when necessary
- Add missing #include lines when necessary
"""
regexp = RE_FILE_BEGIN
def gen_patches(self) -> Iterator[Patch]:
if self.file.filename_matches('qom/object.h'):
self.debug("skipping object.h")
return
index: Dict[RequiredIdentifier, SymbolUserList] = {}
definition_classes = [SimpleTypedefMatch, FullStructTypedefMatch, ConstantDefine, Include]
user_classes = [TypeCheckMacro, DeclareObjCheckers, DeclareInstanceChecker, DeclareClassCheckers, InterfaceCheckMacro]
# first we scan for all symbol definitions and usage:
for dc in definition_classes:
defs = self.file.matches_of_type(dc)
for d in defs:
DBG("scanning %r", d)
for i in d.provided_identifiers():
index.setdefault(i, SymbolUserList([], [])).definitions.append(d)
DBG("index: %r", list(index.keys()))
for uc in user_classes:
users = self.file.matches_of_type(uc)
for u in users:
for i in u.required_identifiers():
index.setdefault(i, SymbolUserList([], [])).users.append(u)
# validate all symbols:
for i,ul in index.items():
if not ul.users:
# unused symbol
continue
# symbol not defined
if len(ul.definitions) == 0:
if i.type == 'include':
includes, = self.file.matches_of_type(InitialIncludes)
#FIXME: don't do this if we're already inside qom/object.h
yield includes.append(f'#include {i.name}\n')
else:
u.warn("definition of %s %s not found in file", i.type, i.name)
continue
# symbol defined twice:
if len(ul.definitions) > 1:
ul.definitions[1].warn("%s defined twice", i.name)
ul.definitions[0].warn("previously defined here")
continue
# symbol defined. check if all users are after its definition:
assert len(ul.definitions) == 1
definition = ul.definitions[0]
DBG("handling repositioning of %r", definition)
earliest = min(ul.users, key=lambda u: u.start())
if earliest.start() > definition.start():
DBG("%r is OK", definition)
continue
DBG("%r needs to be moved", definition)
if isinstance(definition, SimpleTypedefMatch) \
or isinstance(definition, ConstantDefine):
# simple typedef or define can be moved directly:
yield definition.make_removal_patch()
yield earliest.prepend(definition.group(0))
elif isinstance(definition, FullStructTypedefMatch) \
and definition.group('structname'):
# full struct typedef is more complex: we need to remove
# the typedef
yield from definition.move_typedef(earliest.start())
else:
definition.warn("definition of %s %s needs to be moved earlier in the file", i.type, i.name)
earliest.warn("definition of %s %s is used here", i.type, i.name)
class EmptyPreprocessorConditional(FileMatch):
"""Delete empty preprocessor conditionals"""
regexp = r'^[ \t]*#(if|ifdef)[ \t].*\n+[ \t]*#endif[ \t]*\n'
def gen_patches(self) -> Iterable[Patch]:
yield self.make_removal_patch()
| 36,970 | 41.889791 | 126 | py |
qemu | qemu-master/scripts/codeconverter/codeconverter/qom_type_info.py | # Copyright (C) 2020 Red Hat Inc.
#
# Authors:
# Eduardo Habkost <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
import re
from .regexps import *
from .patching import *
from .utils import *
from .qom_macros import *
TI_FIELDS = [ 'name', 'parent', 'abstract', 'interfaces',
'instance_size', 'instance_init', 'instance_post_init', 'instance_finalize',
'class_size', 'class_init', 'class_base_init', 'class_data']
RE_TI_FIELD_NAME = OR(*TI_FIELDS)
RE_TI_FIELD_INIT = S(r'[ \t]*', NAMED('comments', RE_COMMENTS),
r'\.', NAMED('field', RE_TI_FIELD_NAME), r'\s*=\s*',
NAMED('value', RE_EXPRESSION), r'[ \t]*,?[ \t]*\n')
RE_TI_FIELDS = M(RE_TI_FIELD_INIT)
RE_TYPEINFO_START = S(r'^[ \t]*', M(r'(static|const)\s+', name='modifiers'), r'TypeInfo\s+',
NAMED('name', RE_IDENTIFIER), r'\s*=\s*{[ \t]*\n')
ParsedArray = List[str]
ParsedInitializerValue = Union[str, ParsedArray]
class InitializerValue(NamedTuple):
raw: str
parsed: Optional[ParsedInitializerValue]
match: Optional[Match]
class ArrayItem(FileMatch):
regexp = RE_ARRAY_ITEM
class ArrayInitializer(FileMatch):
regexp = RE_ARRAY
def parsed(self) -> ParsedArray:
#DBG('parse_array: %r', m.group(0))
return [m.group('arrayitem') for m in self.group_finditer(ArrayItem, 'arrayitems')]
class FieldInitializer(FileMatch):
regexp = RE_TI_FIELD_INIT
@property
def raw(self) -> str:
return self.group('value')
@property
def parsed(self) -> ParsedInitializerValue:
parsed: ParsedInitializerValue = self.raw
#DBG("parse_initializer_value: %r", s)
array = self.try_group_match(ArrayInitializer, 'value')
if array:
assert isinstance(array, ArrayInitializer)
return array.parsed()
return parsed
TypeInfoInitializers = Dict[str, FieldInitializer]
class TypeDefinition(FileMatch):
"""
Common base class for type definitions (TypeInfo variables or OBJECT_DEFINE* macros)
"""
@property
def instancetype(self) -> Optional[str]:
return self.group('instancetype')
@property
def classtype(self) -> Optional[str]:
return self.group('classtype')
@property
def uppercase(self) -> Optional[str]:
return self.group('uppercase')
@property
def parent_uppercase(self) -> str:
return self.group('parent_uppercase')
@property
def initializers(self) -> Optional[TypeInfoInitializers]:
if getattr(self, '_inititalizers', None):
self._initializers: TypeInfoInitializers
return self._initializers
fields = self.group('fields')
if fields is None:
return None
d = dict((fm.group('field'), fm)
for fm in self.group_finditer(FieldInitializer, 'fields'))
self._initializers = d # type: ignore
return self._initializers
class TypeInfoVar(TypeDefinition):
"""TypeInfo variable declaration with initializer"""
regexp = S(NAMED('begin', RE_TYPEINFO_START),
M(NAMED('fields', RE_TI_FIELDS),
NAMED('endcomments', SP, RE_COMMENTS),
NAMED('end', r'};?\n'),
n='?', name='fullspec'))
def is_static(self) -> bool:
return 'static' in self.group('modifiers')
def is_const(self) -> bool:
return 'const' in self.group('modifiers')
def is_full(self) -> bool:
return bool(self.group('fullspec'))
def get_initializers(self) -> TypeInfoInitializers:
"""Helper for code that needs to deal with missing initializer info"""
if self.initializers is None:
return {}
return self.initializers
def get_raw_initializer_value(self, field: str, default: str = '') -> str:
initializers = self.get_initializers()
if field in initializers:
return initializers[field].raw
else:
return default
@property
def typename(self) -> Optional[str]:
return self.get_raw_initializer_value('name')
@property
def uppercase(self) -> Optional[str]:
typename = self.typename
if not typename:
return None
if not typename.startswith('TYPE_'):
return None
return typename[len('TYPE_'):]
@property
def classtype(self) -> Optional[str]:
class_size = self.get_raw_initializer_value('class_size')
if not class_size:
return None
m = re.fullmatch(RE_SIZEOF, class_size)
if not m:
return None
return m.group('sizeoftype')
@property
def instancetype(self) -> Optional[str]:
instance_size = self.get_raw_initializer_value('instance_size')
if not instance_size:
return None
m = re.fullmatch(RE_SIZEOF, instance_size)
if not m:
return None
return m.group('sizeoftype')
#def extract_identifiers(self) -> Optional[TypeIdentifiers]:
# """Try to extract identifiers from names being used"""
# DBG("extracting idenfiers from %s", self.name)
#uppercase = None
#if typename and re.fullmatch(RE_IDENTIFIER, typename) and typename.startswith("TYPE_"):
# uppercase = typename[len('TYPE_'):]
#lowercase = None
#funcs = set()
#prefixes = set()
#for field,suffix in [('instance_init', '_init'),
# ('instance_finalize', '_finalize'),
# ('class_init', '_class_init')]:
# if field not in values:
# continue
# func = values[field].raw
# funcs.add(func)
# if func.endswith(suffix):
# prefixes.add(func[:-len(suffix)])
# else:
# self.warn("function name %s doesn't have expected %s suffix",
# func, suffix)
#if len(prefixes) == 1:
# lowercase = prefixes.pop()
#elif len(prefixes) > 1:
# self.warn("inconsistent function names: %s", ' '.join(funcs))
#.parent = TYPE_##PARENT_MODULE_OBJ_NAME, \
#return TypeIdentifiers(typename=typename,
# uppercase=uppercase, lowercase=lowercase,
# instancetype=instancetype, classtype=classtype)
def append_field(self, field: str, value: str) -> Patch:
"""Generate patch appending a field initializer"""
content = f' .{field} = {value},\n'
fm = self.group_match('fields')
assert fm
return fm.append(content)
def patch_field(self, field: str, replacement: str) -> Patch:
"""Generate patch replacing a field initializer"""
initializers = self.initializers
assert initializers
value = initializers.get(field)
assert value
return value.make_patch(replacement)
def remove_field(self, field: str) -> Iterable[Patch]:
initializers = self.initializers
assert initializers
if field in initializers:
yield self.patch_field(field, '')
def remove_fields(self, *fields: str) -> Iterable[Patch]:
for f in fields:
yield from self.remove_field(f)
def patch_field_value(self, field: str, replacement: str) -> Patch:
"""Replace just the value of a field initializer"""
initializers = self.initializers
assert initializers
value = initializers.get(field)
assert value
vm = value.group_match('value')
assert vm
return vm.make_patch(replacement)
class RemoveRedundantClassSize(TypeInfoVar):
"""Remove class_size when using OBJECT_DECLARE_SIMPLE_TYPE"""
def gen_patches(self) -> Iterable[Patch]:
initializers = self.initializers
if initializers is None:
return
if 'class_size' not in initializers:
return
self.debug("Handling %s", self.name)
m = re.fullmatch(RE_SIZEOF, initializers['class_size'].raw)
if not m:
self.warn("%s class_size is not sizeof?", self.name)
return
classtype = m.group('sizeoftype')
if not classtype.endswith('Class'):
self.warn("%s class size type (%s) is not *Class?", self.name, classtype)
return
self.debug("classtype is %s", classtype)
instancetype = classtype[:-len('Class')]
self.debug("intanceypte is %s", instancetype)
self.debug("searching for simpletype declaration using %s as InstanceType", instancetype)
decl = self.allfiles.find_match(OldStyleObjectDeclareSimpleType,
instancetype, 'instancetype')
if not decl:
self.debug("No simpletype declaration found for %s", instancetype)
return
self.debug("Found simple type declaration")
decl.debug("declaration is here")
yield from self.remove_field('class_size')
class RemoveDeclareSimpleTypeArg(OldStyleObjectDeclareSimpleType):
"""Remove class_size when using OBJECT_DECLARE_SIMPLE_TYPE"""
def gen_patches(self) -> Iterable[Patch]:
c = (f'OBJECT_DECLARE_SIMPLE_TYPE({self.group("instancetype")}, {self.group("lowercase")},\n'
f' {self.group("uppercase")})\n')
yield self.make_patch(c)
class UseDeclareTypeExtended(TypeInfoVar):
"""Replace TypeInfo variable with OBJECT_DEFINE_TYPE_EXTENDED"""
def gen_patches(self) -> Iterable[Patch]:
# this will just ensure the caches for find_match() and matches_for_type()
# will be loaded in advance:
find_type_checkers(self.allfiles, 'xxxxxxxxxxxxxxxxx')
if not self.is_static():
self.info("Skipping non-static TypeInfo variable")
return
type_info_macro = self.file.find_match(TypeInfoMacro, self.name)
if not type_info_macro:
self.warn("TYPE_INFO(%s) line not found", self.name)
return
values = self.initializers
if values is None:
return
if 'name' not in values:
self.warn("name not set in TypeInfo variable %s", self.name)
return
typename = values['name'].raw
if 'parent' not in values:
self.warn("parent not set in TypeInfo variable %s", self.name)
return
parent_typename = values['parent'].raw
instancetype = None
if 'instance_size' in values:
m = re.fullmatch(RE_SIZEOF, values['instance_size'].raw)
if m:
instancetype = m.group('sizeoftype')
else:
self.warn("can't extract instance type in TypeInfo variable %s", self.name)
self.warn("instance_size is set to: %r", values['instance_size'].raw)
return
classtype = None
if 'class_size' in values:
m = re.fullmatch(RE_SIZEOF, values['class_size'].raw)
if m:
classtype = m.group('sizeoftype')
else:
self.warn("can't extract class type in TypeInfo variable %s", self.name)
self.warn("class_size is set to: %r", values['class_size'].raw)
return
#for t in (typename, parent_typename):
# if not re.fullmatch(RE_IDENTIFIER, t):
# self.info("type name is not a macro/constant")
# if instancetype or classtype:
# self.warn("macro/constant type name is required for instance/class type")
# if not self.file.force:
# return
# Now, the challenge is to find out the right MODULE_OBJ_NAME for the
# type and for the parent type
self.info("TypeInfo variable for %s is here", typename)
uppercase = find_typename_uppercase(self.allfiles, typename)
if not uppercase:
self.info("Can't find right uppercase name for %s", typename)
if instancetype or classtype:
self.warn("Can't find right uppercase name for %s", typename)
self.warn("This will make type validation difficult in the future")
return
parent_uppercase = find_typename_uppercase(self.allfiles, parent_typename)
if not parent_uppercase:
self.info("Can't find right uppercase name for parent type (%s)", parent_typename)
if instancetype or classtype:
self.warn("Can't find right uppercase name for parent type (%s)", parent_typename)
self.warn("This will make type validation difficult in the future")
return
ok = True
#checkers: List[TypeCheckerDeclaration] = list(find_type_checkers(self.allfiles, uppercase))
#for c in checkers:
# c.info("instance type checker declaration (%s) is here", c.group('uppercase'))
#if not checkers:
# self.info("No type checkers declared for %s", uppercase)
# if instancetype or classtype:
# self.warn("Can't find where type checkers for %s (%s) are declared. We will need them to validate sizes of %s",
# typename, uppercase, self.name)
if not instancetype:
instancetype = 'void'
if not classtype:
classtype = 'void'
#checker_instancetypes = set(c.instancetype for c in checkers
# if c.instancetype is not None)
#if len(checker_instancetypes) > 1:
# self.warn("ambiguous set of type checkers")
# for c in checkers:
# c.warn("instancetype is %s here", c.instancetype)
# ok = False
#elif len(checker_instancetypes) == 1:
# checker_instancetype = checker_instancetypes.pop()
# DBG("checker instance type: %r", checker_instancetype)
# if instancetype != checker_instancetype:
# self.warn("type at instance_size is %r. Should instance_size be set to sizeof(%s) ?",
# instancetype, checker_instancetype)
# ok = False
#else:
# if instancetype != 'void':
# self.warn("instance type checker for %s (%s) not found", typename, instancetype)
# ok = False
#checker_classtypes = set(c.classtype for c in checkers
# if c.classtype is not None)
#if len(checker_classtypes) > 1:
# self.warn("ambiguous set of type checkers")
# for c in checkers:
# c.warn("classtype is %s here", c.classtype)
# ok = False
#elif len(checker_classtypes) == 1:
# checker_classtype = checker_classtypes.pop()
# DBG("checker class type: %r", checker_classtype)
# if classtype != checker_classtype:
# self.warn("type at class_size is %r. Should class_size be set to sizeof(%s) ?",
# classtype, checker_classtype)
# ok = False
#else:
# if classtype != 'void':
# self.warn("class type checker for %s (%s) not found", typename, classtype)
# ok = False
#if not ok:
# for c in checkers:
# c.warn("Type checker declaration for %s (%s) is here",
# typename, type(c).__name__)
# return
#if parent_decl is None:
# self.warn("Can't find where parent type %s is declared", parent_typename)
#yield self.prepend(f'DECLARE_TYPE_NAME({uppercase}, {typename})\n')
#if not instancetype:
# yield self.prepend(f'DECLARE_INSTANCE_TYPE({uppercase}, void)\n')
#if not classtype:
# yield self.prepend(f'DECLARE_CLASS_TYPE({uppercase}, void)\n')
self.info("%s can be patched!", self.name)
replaced_fields = ['name', 'parent', 'instance_size', 'class_size']
begin = self.group_match('begin')
newbegin = f'OBJECT_DEFINE_TYPE_EXTENDED({self.name},\n'
newbegin += f' {instancetype}, {classtype},\n'
newbegin += f' {uppercase}, {parent_uppercase}'
if set(values.keys()) - set(replaced_fields):
newbegin += ',\n'
yield begin.make_patch(newbegin)
yield from self.remove_fields(*replaced_fields)
end = self.group_match('end')
yield end.make_patch(')\n')
yield type_info_macro.make_removal_patch()
class ObjectDefineTypeExtended(TypeDefinition):
"""OBJECT_DEFINE_TYPE_EXTENDED usage"""
regexp = S(r'^[ \t]*OBJECT_DEFINE_TYPE_EXTENDED\s*\(\s*',
NAMED('name', RE_IDENTIFIER), r'\s*,\s*',
NAMED('instancetype', RE_IDENTIFIER), r'\s*,\s*',
NAMED('classtype', RE_IDENTIFIER), r'\s*,\s*',
NAMED('uppercase', RE_IDENTIFIER), r'\s*,\s*',
NAMED('parent_uppercase', RE_IDENTIFIER),
M(r',\s*\n',
NAMED('fields', RE_TI_FIELDS),
n='?'),
r'\s*\);?\n?')
class ObjectDefineType(TypeDefinition):
"""OBJECT_DEFINE_TYPE usage"""
regexp = S(r'^[ \t]*OBJECT_DEFINE_TYPE\s*\(\s*',
NAMED('lowercase', RE_IDENTIFIER), r'\s*,\s*',
NAMED('uppercase', RE_IDENTIFIER), r'\s*,\s*',
NAMED('parent_uppercase', RE_IDENTIFIER),
M(r',\s*\n',
NAMED('fields', RE_TI_FIELDS),
n='?'),
r'\s*\);?\n?')
def find_type_definitions(files: FileList, uppercase: str) -> Iterable[TypeDefinition]:
types: List[Type[TypeDefinition]] = [TypeInfoVar, ObjectDefineType, ObjectDefineTypeExtended]
for t in types:
for m in files.matches_of_type(t):
m.debug("uppercase: %s", m.uppercase)
yield from (m for t in types
for m in files.matches_of_type(t)
if m.uppercase == uppercase)
class AddDeclareVoidClassType(TypeDeclarationFixup):
"""Will add DECLARE_CLASS_TYPE(..., void) if possible"""
def gen_patches_for_type(self, uppercase: str,
checkers: List[TypeDeclaration],
fields: Dict[str, Optional[str]]) -> Iterable[Patch]:
defs = list(find_type_definitions(self.allfiles, uppercase))
if len(defs) > 1:
self.warn("multiple definitions for %s", uppercase)
for d in defs:
d.warn("definition found here")
return
elif len(defs) == 0:
self.warn("type definition for %s not found", uppercase)
return
d = defs[0]
if d.classtype is None:
d.info("definition for %s has classtype, skipping", uppercase)
return
class_type_checkers = [c for c in checkers
if c.classtype is not None]
if class_type_checkers:
for c in class_type_checkers:
c.warn("class type checker for %s is present here", uppercase)
return
_,last_checker = max((m.start(), m) for m in checkers)
s = f'DECLARE_CLASS_TYPE({uppercase}, void)\n'
yield last_checker.append(s)
class AddDeclareVoidInstanceType(FileMatch):
"""Will add DECLARE_INSTANCE_TYPE(..., void) if possible"""
regexp = S(r'^[ \t]*#[ \t]*define', CPP_SPACE,
NAMED('name', r'TYPE_[a-zA-Z0-9_]+\b'),
CPP_SPACE, r'.*\n')
def gen_patches(self) -> Iterable[Patch]:
assert self.name.startswith('TYPE_')
uppercase = self.name[len('TYPE_'):]
defs = list(find_type_definitions(self.allfiles, uppercase))
if len(defs) > 1:
self.warn("multiple definitions for %s", uppercase)
for d in defs:
d.warn("definition found here")
return
elif len(defs) == 0:
self.warn("type definition for %s not found", uppercase)
return
d = defs[0]
instancetype = d.instancetype
if instancetype is not None and instancetype != 'void':
return
instance_checkers = [c for c in find_type_checkers(self.allfiles, uppercase)
if c.instancetype]
if instance_checkers:
d.warn("instance type checker for %s already declared", uppercase)
for c in instance_checkers:
c.warn("instance checker for %s is here", uppercase)
return
s = f'DECLARE_INSTANCE_TYPE({uppercase}, void)\n'
yield self.append(s)
class AddObjectDeclareType(DeclareObjCheckers):
"""Will add OBJECT_DECLARE_TYPE(...) if possible"""
def gen_patches(self) -> Iterable[Patch]:
uppercase = self.uppercase
typename = self.group('typename')
instancetype = self.group('instancetype')
classtype = self.group('classtype')
if typename != f'TYPE_{uppercase}':
self.warn("type name mismatch: %s vs %s", typename, uppercase)
return
typedefs = [(t,self.allfiles.find_matches(SimpleTypedefMatch, t))
for t in (instancetype, classtype)]
for t,tds in typedefs:
if not tds:
self.warn("typedef %s not found", t)
return
for td in tds:
td_type = td.group('typedef_type')
if td_type != f'struct {t}':
self.warn("typedef mismatch: %s is defined as %s", t, td_type)
td.warn("typedef is here")
return
# look for reuse of same struct type
other_instance_checkers = [c for c in find_type_checkers(self.allfiles, instancetype, 'instancetype')
if c.uppercase != uppercase]
if other_instance_checkers:
self.warn("typedef %s is being reused", instancetype)
for ic in other_instance_checkers:
ic.warn("%s is reused here", instancetype)
if not self.file.force:
return
decl_types: List[Type[TypeDeclaration]] = [DeclareClassCheckers, DeclareObjCheckers]
class_decls = [m for t in decl_types
for m in self.allfiles.find_matches(t, uppercase, 'uppercase')]
defs = list(find_type_definitions(self.allfiles, uppercase))
if len(defs) > 1:
self.warn("multiple definitions for %s", uppercase)
for d in defs:
d.warn("definition found here")
if not self.file.force:
return
elif len(defs) == 0:
self.warn("type definition for %s not found", uppercase)
if not self.file.force:
return
else:
d = defs[0]
if d.instancetype != instancetype:
self.warn("mismatching instance type for %s (%s)", uppercase, instancetype)
d.warn("instance type declared here (%s)", d.instancetype)
if not self.file.force:
return
if d.classtype != classtype:
self.warn("mismatching class type for %s (%s)", uppercase, classtype)
d.warn("class type declared here (%s)", d.classtype)
if not self.file.force:
return
assert self.file.original_content
for t,tds in typedefs:
assert tds
for td in tds:
if td.file is not self.file:
continue
# delete typedefs that are truly redundant:
# 1) defined after DECLARE_OBJ_CHECKERS
if td.start() > self.start():
yield td.make_removal_patch()
# 2) defined before DECLARE_OBJ_CHECKERS, but unused
elif not re.search(r'\b'+t+r'\b', self.file.original_content[td.end():self.start()]):
yield td.make_removal_patch()
c = (f'OBJECT_DECLARE_TYPE({instancetype}, {classtype}, {uppercase})\n')
yield self.make_patch(c)
class AddObjectDeclareSimpleType(DeclareInstanceChecker):
"""Will add OBJECT_DECLARE_SIMPLE_TYPE(...) if possible"""
def gen_patches(self) -> Iterable[Patch]:
uppercase = self.uppercase
typename = self.group('typename')
instancetype = self.group('instancetype')
if typename != f'TYPE_{uppercase}':
self.warn("type name mismatch: %s vs %s", typename, uppercase)
return
typedefs = [(t,self.allfiles.find_matches(SimpleTypedefMatch, t))
for t in (instancetype,)]
for t,tds in typedefs:
if not tds:
self.warn("typedef %s not found", t)
return
for td in tds:
td_type = td.group('typedef_type')
if td_type != f'struct {t}':
self.warn("typedef mismatch: %s is defined as %s", t, td_type)
td.warn("typedef is here")
return
# look for reuse of same struct type
other_instance_checkers = [c for c in find_type_checkers(self.allfiles, instancetype, 'instancetype')
if c.uppercase != uppercase]
if other_instance_checkers:
self.warn("typedef %s is being reused", instancetype)
for ic in other_instance_checkers:
ic.warn("%s is reused here", instancetype)
if not self.file.force:
return
decl_types: List[Type[TypeDeclaration]] = [DeclareClassCheckers, DeclareObjCheckers]
class_decls = [m for t in decl_types
for m in self.allfiles.find_matches(t, uppercase, 'uppercase')]
if class_decls:
self.warn("class type declared for %s", uppercase)
for cd in class_decls:
cd.warn("class declaration found here")
return
defs = list(find_type_definitions(self.allfiles, uppercase))
if len(defs) > 1:
self.warn("multiple definitions for %s", uppercase)
for d in defs:
d.warn("definition found here")
if not self.file.force:
return
elif len(defs) == 0:
self.warn("type definition for %s not found", uppercase)
if not self.file.force:
return
else:
d = defs[0]
if d.instancetype != instancetype:
self.warn("mismatching instance type for %s (%s)", uppercase, instancetype)
d.warn("instance type declared here (%s)", d.instancetype)
if not self.file.force:
return
if d.classtype:
self.warn("class type set for %s", uppercase)
d.warn("class type declared here")
if not self.file.force:
return
assert self.file.original_content
for t,tds in typedefs:
assert tds
for td in tds:
if td.file is not self.file:
continue
# delete typedefs that are truly redundant:
# 1) defined after DECLARE_OBJ_CHECKERS
if td.start() > self.start():
yield td.make_removal_patch()
# 2) defined before DECLARE_OBJ_CHECKERS, but unused
elif not re.search(r'\b'+t+r'\b', self.file.original_content[td.end():self.start()]):
yield td.make_removal_patch()
c = (f'OBJECT_DECLARE_SIMPLE_TYPE({instancetype}, {uppercase})\n')
yield self.make_patch(c)
class TypeInfoStringName(TypeInfoVar):
"""Replace hardcoded type names with TYPE_ constant"""
def gen_patches(self) -> Iterable[Patch]:
values = self.initializers
if values is None:
return
if 'name' not in values:
self.warn("name not set in TypeInfo variable %s", self.name)
return
typename = values['name'].raw
if re.fullmatch(RE_IDENTIFIER, typename):
return
self.warn("name %s is not an identifier", typename)
#all_defines = [m for m in self.allfiles.matches_of_type(ExpressionDefine)]
#self.debug("all_defines: %r", all_defines)
constants = [m for m in self.allfiles.matches_of_type(ExpressionDefine)
if m.group('value').strip() == typename.strip()]
if not constants:
self.warn("No macro for %s found", typename)
return
if len(constants) > 1:
self.warn("I don't know which macro to use: %r", constants)
return
yield self.patch_field_value('name', constants[0].name)
class RedundantTypeSizes(TypeInfoVar):
"""Remove redundant instance_size/class_size from TypeInfo vars"""
def gen_patches(self) -> Iterable[Patch]:
values = self.initializers
if values is None:
return
if 'name' not in values:
self.warn("name not set in TypeInfo variable %s", self.name)
return
typename = values['name'].raw
if 'parent' not in values:
self.warn("parent not set in TypeInfo variable %s", self.name)
return
parent_typename = values['parent'].raw
if 'instance_size' not in values and 'class_size' not in values:
self.debug("no need to validate %s", self.name)
return
instance_decls = find_type_checkers(self.allfiles, typename)
if instance_decls:
self.debug("won't touch TypeInfo var that has type checkers")
return
parent = find_type_info(self.allfiles, parent_typename)
if not parent:
self.warn("Can't find TypeInfo for %s", parent_typename)
return
if 'instance_size' in values and parent.get_raw_initializer_value('instance_size') != values['instance_size'].raw:
self.info("instance_size mismatch")
parent.info("parent type declared here")
return
if 'class_size' in values and parent.get_raw_initializer_value('class_size') != values['class_size'].raw:
self.info("class_size mismatch")
parent.info("parent type declared here")
return
self.debug("will patch variable %s", self.name)
if 'instance_size' in values:
self.debug("deleting instance_size")
yield self.patch_field('instance_size', '')
if 'class_size' in values:
self.debug("deleting class_size")
yield self.patch_field('class_size', '')
#class TypeInfoVarInitFuncs(TypeInfoVar):
# """TypeInfo variable
# Will create missing init functions
# """
# def gen_patches(self) -> Iterable[Patch]:
# values = self.initializers
# if values is None:
# self.warn("type not parsed completely: %s", self.name)
# return
#
# macro = self.file.find_match(TypeInfoVar, self.name)
# if macro is None:
# self.warn("No TYPE_INFO macro for %s", self.name)
# return
#
# ids = self.extract_identifiers()
# if ids is None:
# return
#
# DBG("identifiers extracted: %r", ids)
# fields = set(values.keys())
# if ids.lowercase:
# if 'instance_init' not in fields:
# yield self.prepend(('static void %s_init(Object *obj)\n'
# '{\n'
# '}\n\n') % (ids.lowercase))
# yield self.append_field('instance_init', ids.lowercase+'_init')
#
# if 'instance_finalize' not in fields:
# yield self.prepend(('static void %s_finalize(Object *obj)\n'
# '{\n'
# '}\n\n') % (ids.lowercase))
# yield self.append_field('instance_finalize', ids.lowercase+'_finalize')
#
#
# if 'class_init' not in fields:
# yield self.prepend(('static void %s_class_init(ObjectClass *oc, void *data)\n'
# '{\n'
# '}\n\n') % (ids.lowercase))
# yield self.append_field('class_init', ids.lowercase+'_class_init')
class TypeInitMacro(FileMatch):
"""Use of type_init(...) macro"""
regexp = S(r'^[ \t]*type_init\s*\(\s*', NAMED('name', RE_IDENTIFIER), r'\s*\);?[ \t]*\n')
class DeleteEmptyTypeInitFunc(TypeInitMacro):
"""Delete empty function declared using type_init(...)"""
def gen_patches(self) -> Iterable[Patch]:
fn = self.file.find_match(StaticVoidFunction, self.name)
DBG("function for %s: %s", self.name, fn)
if fn and fn.body == '':
yield fn.make_patch('')
yield self.make_patch('')
class StaticVoidFunction(FileMatch):
"""simple static void function
(no replacement rules)
"""
#NOTE: just like RE_FULL_STRUCT, this doesn't parse any of the body contents
# of the function. Tt will just look for "}" in the beginning of a line
regexp = S(r'static\s+void\s+', NAMED('name', RE_IDENTIFIER), r'\s*\(\s*void\s*\)\n',
r'{\n',
NAMED('body',
# acceptable inside the function body:
# - lines starting with space or tab
# - empty lines
# - preprocessor directives
OR(r'[ \t][^\n]*\n',
r'#[^\n]*\n',
r'\n',
repeat='*')),
r'};?\n')
@property
def body(self) -> str:
return self.group('body')
def has_preprocessor_directive(self) -> bool:
return bool(re.search(r'^[ \t]*#', self.body, re.MULTILINE))
def find_containing_func(m: FileMatch) -> Optional['StaticVoidFunction']:
"""Return function containing this match"""
for fn in m.file.matches_of_type(StaticVoidFunction):
if fn.contains(m):
return fn
return None
class TypeRegisterStaticCall(FileMatch):
"""type_register_static() call
Will be replaced by TYPE_INFO() macro
"""
regexp = S(r'^[ \t]*', NAMED('func_name', 'type_register_static'),
r'\s*\(&\s*', NAMED('name', RE_IDENTIFIER), r'\s*\);[ \t]*\n')
class UseTypeInfo(TypeRegisterStaticCall):
"""Replace type_register_static() call with TYPE_INFO declaration"""
def gen_patches(self) -> Iterable[Patch]:
fn = find_containing_func(self)
if fn:
DBG("%r is inside %r", self, fn)
type_init = self.file.find_match(TypeInitMacro, fn.name)
if type_init is None:
self.warn("can't find type_init(%s) line", fn.name)
if not self.file.force:
return
else:
self.warn("can't identify the function where type_register_static(&%s) is called", self.name)
if not self.file.force:
return
#if fn.has_preprocessor_directive() and not self.file.force:
# self.warn("function %s has preprocessor directives, this requires --force", fn.name)
# return
var = self.file.find_match(TypeInfoVar, self.name)
if var is None:
self.warn("can't find TypeInfo var declaration for %s", self.name)
return
if not var.is_full():
self.warn("variable declaration %s wasn't parsed fully", var.name)
if not self.file.force:
return
if fn and fn.contains(var):
self.warn("TypeInfo %s variable is inside a function", self.name)
if not self.file.force:
return
# delete type_register_static() call:
yield self.make_patch('')
# append TYPE_REGISTER(...) after variable declaration:
yield var.append(f'TYPE_INFO({self.name})\n')
class TypeRegisterCall(FileMatch):
"""type_register_static() call"""
regexp = S(r'^[ \t]*', NAMED('func_name', 'type_register'),
r'\s*\(&\s*', NAMED('name', RE_IDENTIFIER), r'\s*\);[ \t]*\n')
class MakeTypeRegisterStatic(TypeRegisterCall):
"""Make type_register() call static if variable is static const"""
def gen_patches(self):
var = self.file.find_match(TypeInfoVar, self.name)
if var is None:
self.warn("can't find TypeInfo var declaration for %s", self.name)
return
if var.is_static() and var.is_const():
yield self.group_match('func_name').make_patch('type_register_static')
class MakeTypeRegisterNotStatic(TypeRegisterStaticCall):
"""Make type_register() call static if variable is static const"""
def gen_patches(self):
var = self.file.find_match(TypeInfoVar, self.name)
if var is None:
self.warn("can't find TypeInfo var declaration for %s", self.name)
return
if not var.is_static() or not var.is_const():
yield self.group_match('func_name').make_patch('type_register')
class TypeInfoMacro(FileMatch):
"""TYPE_INFO macro usage"""
regexp = S(r'^[ \t]*TYPE_INFO\s*\(\s*', NAMED('name', RE_IDENTIFIER), r'\s*\)[ \t]*;?[ \t]*\n')
def find_type_info(files: RegexpScanner, name: str) -> Optional[TypeInfoVar]:
ti = [ti for ti in files.matches_of_type(TypeInfoVar)
if ti.get_raw_initializer_value('name') == name]
DBG("type info vars: %r", ti)
if len(ti) > 1:
DBG("multiple TypeInfo vars found for %s", name)
return None
if len(ti) == 0:
DBG("no TypeInfo var found for %s", name)
return None
return ti[0]
class CreateClassStruct(DeclareInstanceChecker):
"""Replace DECLARE_INSTANCE_CHECKER with OBJECT_DECLARE_SIMPLE_TYPE"""
def gen_patches(self) -> Iterable[Patch]:
typename = self.group('typename')
DBG("looking for TypeInfo variable for %s", typename)
var = find_type_info(self.allfiles, typename)
if var is None:
self.warn("no TypeInfo var found for %s", typename)
return
assert var.initializers
if 'class_size' in var.initializers:
self.warn("class size already set for TypeInfo %s", var.name)
return
classtype = self.group('instancetype')+'Class'
return
yield
#TODO: need to find out what's the parent class type...
#yield var.append_field('class_size', f'sizeof({classtype})')
#c = (f'OBJECT_DECLARE_SIMPLE_TYPE({instancetype}, {lowercase},\n'
# f' MODULE_OBJ_NAME, ParentClassType)\n')
#yield self.make_patch(c)
def type_infos(file: FileInfo) -> Iterable[TypeInfoVar]:
return file.matches_of_type(TypeInfoVar)
def full_types(file: FileInfo) -> Iterable[TypeInfoVar]:
return [t for t in type_infos(file) if t.is_full()]
def partial_types(file: FileInfo) -> Iterable[TypeInfoVar]:
return [t for t in type_infos(file) if not t.is_full()]
| 39,283 | 39.498969 | 129 | py |
qemu | qemu-master/scripts/codeconverter/codeconverter/__init__.py | 0 | 0 | 0 | py |
|
qemu | qemu-master/scripts/codeconverter/codeconverter/regexps.py | # Copyright (C) 2020 Red Hat Inc.
#
# Authors:
# Eduardo Habkost <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
"""Helpers for creation of regular expressions"""
import re
import logging
logger = logging.getLogger(__name__)
DBG = logger.debug
INFO = logger.info
WARN = logger.warning
def S(*regexps) -> str:
"""Just a shortcut to concatenate multiple regexps more easily"""
return ''.join(regexps)
def P(*regexps, name=None, capture=False, repeat='') -> str:
"""Just add parenthesis around regexp(s), with optional name or repeat suffix"""
s = S(*regexps)
if name:
return f'(?P<{name}>{s}){repeat}'
elif capture:
return f'({s}){repeat}'
else:
return f'(?:{s}){repeat}'
def NAMED(name, *regexps) -> str:
"""Make named group using <P<name>...) syntax
>>> NAMED('mygroup', 'xyz', 'abc')
'(?P<mygroup>xyzabc)'
"""
return P(*regexps, name=name)
def OR(*regexps, **kwargs) -> str:
"""Build (a|b|c) regexp"""
return P('|'.join(regexps), **kwargs)
def M(*regexps, n='*', name=None) -> str:
"""Add repetition qualifier to regexp(s)
>>> M('a', 'b')
'(?:ab)*'
>>> M('a' , 'b', n='+')
'(?:ab)+'
>>> M('a' , 'b', n='{2,3}', name='name')
'(?P<name>(?:ab){2,3})'
"""
r = P(*regexps, repeat=n)
if name:
r = NAMED(name, r)
return r
# helper to make parenthesis optional around regexp
OPTIONAL_PARS = lambda R: OR(S(r'\(\s*', R, r'\s*\)'), R)
def test_optional_pars():
r = OPTIONAL_PARS('abc')+'$'
assert re.match(r, 'abc')
assert re.match(r, '(abc)')
assert not re.match(r, '(abcd)')
assert not re.match(r, '(abc')
assert not re.match(r, 'abc)')
# this disables the MULTILINE flag, so it will match at the
# beginning of the file:
RE_FILE_BEGIN = r'(?-m:^)'
# C primitives:
SP = r'\s*'
RE_COMMENT = r'//[^\n]*$|/\*([^*]|\*[^/])*\*/'
RE_COMMENTS = M(RE_COMMENT + SP)
RE_IDENTIFIER = r'[a-zA-Z_][a-zA-Z0-9_]*(?![a-zA-Z0-9])'
RE_STRING = r'\"([^\"\\]|\\[a-z\"])*\"'
RE_NUMBER = r'[0-9]+|0x[0-9a-fA-F]+'
# space or escaped newlines:
CPP_SPACE = OR(r'\s', r'\\\n', repeat='+')
RE_PATH = '[a-zA-Z0-9/_.-]+'
RE_INCLUDEPATH = OR(S(r'\"', RE_PATH, r'\"'),
S(r'<', RE_PATH, r'>'))
RE_INCLUDE = S(r'^[ \t]*#[ \t]*include[ \t]+', NAMED('includepath', RE_INCLUDEPATH), r'[ \t]*\n')
RE_SIMPLEDEFINE = S(r'^[ \t]*#[ \t]*define[ \t]+', RE_IDENTIFIER, r'[ \t]*\n')
RE_STRUCT_TYPE = S(r'struct\s+', RE_IDENTIFIER)
RE_TYPE = OR(RE_IDENTIFIER, RE_STRUCT_TYPE)
RE_MACRO_CONCAT = M(S(OR(RE_IDENTIFIER, RE_STRING), SP), n='{2,}')
RE_SIMPLE_VALUE = OR(RE_IDENTIFIER, RE_STRING, RE_NUMBER)
RE_FUN_CALL = S(RE_IDENTIFIER, r'\s*\(\s*', RE_SIMPLE_VALUE, r'\s*\)')
RE_SIZEOF = S(r'sizeof\s*\(\s*', NAMED('sizeoftype', RE_TYPE), r'\s*\)')
RE_ADDRESS = S(r'&\s*', RE_IDENTIFIER)
RE_ARRAY_ITEM = S(r'{\s*', NAMED('arrayitem', M(RE_SIMPLE_VALUE, n='?')), r'\s*}\s*,?')
RE_ARRAY_CAST = S(r'\(\s*', RE_IDENTIFIER, r'\s*\[\s*\]\)')
RE_ARRAY_ITEMS = M(S(RE_ARRAY_ITEM, SP))
RE_ARRAY = S(M(RE_ARRAY_CAST, n='?'), r'\s*{\s*',
NAMED('arrayitems', RE_ARRAY_ITEMS),
r'}')
# NOTE: this covers a very small subset of valid expressions
RE_EXPRESSION = OR(RE_SIZEOF, RE_FUN_CALL, RE_MACRO_CONCAT, RE_SIMPLE_VALUE,
RE_ARRAY, RE_ADDRESS)
| 3,419 | 27.739496 | 97 | py |
qemu | qemu-master/scripts/coverage/compare_gcov_json.py | #!/usr/bin/env python3
#
# Compare output of two gcovr JSON reports and report differences. To
# generate the required output first:
# - create two build dirs with --enable-gcov
# - run set of tests in each
# - run make coverage-html in each
# - run gcovr --json --exclude-unreachable-branches \
# --print-summary -o coverage.json --root ../../ . *.p
#
# Author: Alex Bennée <[email protected]>
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
import argparse
import json
import sys
from pathlib import Path
def create_parser():
parser = argparse.ArgumentParser(
prog='compare_gcov_json',
description='analyse the differences in coverage between two runs')
parser.add_argument('-a', type=Path, default=None,
help=('First file to check'))
parser.add_argument('-b', type=Path, default=None,
help=('Second file to check'))
parser.add_argument('--verbose', action='store_true', default=False,
help=('A minimal verbosity level that prints the '
'overall result of the check/wait'))
return parser
# See https://gcovr.com/en/stable/output/json.html#json-format-reference
def load_json(json_file_path: Path, verbose = False) -> dict[str, set[int]]:
with open(json_file_path) as f:
data = json.load(f)
root_dir = json_file_path.absolute().parent
covered_lines = dict()
for filecov in data["files"]:
file_path = Path(filecov["file"])
# account for generated files - map into src tree
resolved_path = Path(file_path).absolute()
if resolved_path.is_relative_to(root_dir):
file_path = resolved_path.relative_to(root_dir)
# print(f"remapped {resolved_path} to {file_path}")
lines = filecov["lines"]
executed_lines = set(
linecov["line_number"]
for linecov in filecov["lines"]
if linecov["count"] != 0 and not linecov["gcovr/noncode"]
)
# if this file has any coverage add it to the system
if len(executed_lines) > 0:
if verbose:
print(f"file {file_path} {len(executed_lines)}/{len(lines)}")
covered_lines[str(file_path)] = executed_lines
return covered_lines
def find_missing_files(first, second):
"""
Return a list of files not covered in the second set
"""
missing_files = []
for f in sorted(first):
file_a = first[f]
try:
file_b = second[f]
except KeyError:
missing_files.append(f)
return missing_files
def main():
"""
Script entry point
"""
parser = create_parser()
args = parser.parse_args()
if not args.a or not args.b:
print("We need two files to compare")
sys.exit(1)
first_coverage = load_json(args.a, args.verbose)
second_coverage = load_json(args.b, args.verbose)
first_missing = find_missing_files(first_coverage,
second_coverage)
second_missing = find_missing_files(second_coverage,
first_coverage)
a_name = args.a.parent.name
b_name = args.b.parent.name
print(f"{b_name} missing coverage in {len(first_missing)} files")
for f in first_missing:
print(f" {f}")
print(f"{a_name} missing coverage in {len(second_missing)} files")
for f in second_missing:
print(f" {f}")
if __name__ == '__main__':
main()
| 3,540 | 28.508333 | 77 | py |
qemu | qemu-master/scripts/simplebench/bench_prealloc.py | #!/usr/bin/env python3
#
# Benchmark preallocate filter
#
# Copyright (c) 2020 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import subprocess
import re
import json
import simplebench
from results_to_text import results_to_text
def qemu_img_bench(args):
p = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True)
if p.returncode == 0:
try:
m = re.search(r'Run completed in (\d+.\d+) seconds.', p.stdout)
return {'seconds': float(m.group(1))}
except Exception:
return {'error': f'failed to parse qemu-img output: {p.stdout}'}
else:
return {'error': f'qemu-img failed: {p.returncode}: {p.stdout}'}
def bench_func(env, case):
fname = f"{case['dir']}/prealloc-test.qcow2"
try:
os.remove(fname)
except OSError:
pass
subprocess.run([env['qemu-img-binary'], 'create', '-f', 'qcow2', fname,
'16G'], stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True)
args = [env['qemu-img-binary'], 'bench', '-c', str(case['count']),
'-d', '64', '-s', case['block-size'], '-t', 'none', '-n', '-w']
if env['prealloc']:
args += ['--image-opts',
'driver=qcow2,file.driver=preallocate,file.file.driver=file,'
f'file.file.filename={fname}']
else:
args += ['-f', 'qcow2', fname]
return qemu_img_bench(args)
def auto_count_bench_func(env, case):
case['count'] = 100
while True:
res = bench_func(env, case)
if 'error' in res:
return res
if res['seconds'] >= 1:
break
case['count'] *= 10
if res['seconds'] < 5:
case['count'] = round(case['count'] * 5 / res['seconds'])
res = bench_func(env, case)
if 'error' in res:
return res
res['iops'] = case['count'] / res['seconds']
return res
if __name__ == '__main__':
if len(sys.argv) < 2:
print(f'USAGE: {sys.argv[0]} <qemu-img binary> '
'DISK_NAME:DIR_PATH ...')
exit(1)
qemu_img = sys.argv[1]
envs = [
{
'id': 'no-prealloc',
'qemu-img-binary': qemu_img,
'prealloc': False
},
{
'id': 'prealloc',
'qemu-img-binary': qemu_img,
'prealloc': True
}
]
aligned_cases = []
unaligned_cases = []
for disk in sys.argv[2:]:
name, path = disk.split(':')
aligned_cases.append({
'id': f'{name}, aligned sequential 16k',
'block-size': '16k',
'dir': path
})
unaligned_cases.append({
'id': f'{name}, unaligned sequential 64k',
'block-size': '16k',
'dir': path
})
result = simplebench.bench(auto_count_bench_func, envs,
aligned_cases + unaligned_cases, count=5)
print(results_to_text(result))
with open('results.json', 'w') as f:
json.dump(result, f, indent=4)
| 3,755 | 27.240602 | 78 | py |
qemu | qemu-master/scripts/simplebench/bench-backup.py | #!/usr/bin/env python3
#
# Bench backup block-job
#
# Copyright (c) 2020 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import argparse
import json
import simplebench
from results_to_text import results_to_text
from bench_block_job import bench_block_copy, drv_file, drv_nbd, drv_qcow2
def bench_func(env, case):
""" Handle one "cell" of benchmarking table. """
cmd_options = env['cmd-options'] if 'cmd-options' in env else {}
return bench_block_copy(env['qemu-binary'], env['cmd'],
cmd_options,
case['source'], case['target'])
def bench(args):
test_cases = []
# paths with colon not supported, so we just split by ':'
dirs = dict(d.split(':') for d in args.dir)
nbd_drv = None
if args.nbd:
nbd = args.nbd.split(':')
host = nbd[0]
port = '10809' if len(nbd) == 1 else nbd[1]
nbd_drv = drv_nbd(host, port)
for t in args.test:
src, dst = t.split(':')
if src == 'nbd' and dst == 'nbd':
raise ValueError("Can't use 'nbd' label for both src and dst")
if (src == 'nbd' or dst == 'nbd') and not nbd_drv:
raise ValueError("'nbd' label used but --nbd is not given")
if src == 'nbd':
source = nbd_drv
elif args.qcow2_sources:
source = drv_qcow2(drv_file(dirs[src] + '/test-source.qcow2'))
else:
source = drv_file(dirs[src] + '/test-source')
if dst == 'nbd':
test_cases.append({'id': t, 'source': source, 'target': nbd_drv})
continue
if args.target_cache == 'both':
target_caches = ['direct', 'cached']
else:
target_caches = [args.target_cache]
for c in target_caches:
o_direct = c == 'direct'
fname = dirs[dst] + '/test-target'
if args.compressed:
fname += '.qcow2'
target = drv_file(fname, o_direct=o_direct)
if args.compressed:
target = drv_qcow2(target)
test_id = t
if args.target_cache == 'both':
test_id += f'({c})'
test_cases.append({'id': test_id, 'source': source,
'target': target})
binaries = [] # list of (<label>, <path>, [<options>])
for i, q in enumerate(args.env):
name_path = q.split(':')
if len(name_path) == 1:
label = f'q{i}'
path_opts = name_path[0].split(',')
else:
assert len(name_path) == 2 # paths with colon not supported
label = name_path[0]
path_opts = name_path[1].split(',')
binaries.append((label, path_opts[0], path_opts[1:]))
test_envs = []
bin_paths = {}
for i, q in enumerate(args.env):
opts = q.split(',')
label_path = opts[0]
opts = opts[1:]
if ':' in label_path:
# path with colon inside is not supported
label, path = label_path.split(':')
bin_paths[label] = path
elif label_path in bin_paths:
label = label_path
path = bin_paths[label]
else:
path = label_path
label = f'q{i}'
bin_paths[label] = path
x_perf = {}
is_mirror = False
for opt in opts:
if opt == 'mirror':
is_mirror = True
elif opt == 'copy-range=on':
x_perf['use-copy-range'] = True
elif opt == 'copy-range=off':
x_perf['use-copy-range'] = False
elif opt.startswith('max-workers='):
x_perf['max-workers'] = int(opt.split('=')[1])
backup_options = {}
if x_perf:
backup_options['x-perf'] = x_perf
if args.compressed:
backup_options['compress'] = True
if is_mirror:
assert not x_perf
test_envs.append({
'id': f'mirror({label})',
'cmd': 'blockdev-mirror',
'qemu-binary': path
})
else:
test_envs.append({
'id': f'backup({label})\n' + '\n'.join(opts),
'cmd': 'blockdev-backup',
'cmd-options': backup_options,
'qemu-binary': path
})
result = simplebench.bench(bench_func, test_envs, test_cases,
count=args.count, initial_run=args.initial_run,
drop_caches=args.drop_caches)
with open('results.json', 'w') as f:
json.dump(result, f, indent=4)
print(results_to_text(result))
class ExtendAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
items = getattr(namespace, self.dest) or []
items.extend(values)
setattr(namespace, self.dest, items)
if __name__ == '__main__':
p = argparse.ArgumentParser('Backup benchmark', epilog='''
ENV format
(LABEL:PATH|LABEL|PATH)[,max-workers=N][,use-copy-range=(on|off)][,mirror]
LABEL short name for the binary
PATH path to the binary
max-workers set x-perf.max-workers of backup job
use-copy-range set x-perf.use-copy-range of backup job
mirror use mirror job instead of backup''',
formatter_class=argparse.RawTextHelpFormatter)
p.add_argument('--env', nargs='+', help='''\
Qemu binaries with labels and options, see below
"ENV format" section''',
action=ExtendAction)
p.add_argument('--dir', nargs='+', help='''\
Directories, each containing "test-source" and/or
"test-target" files, raw images to used in
benchmarking. File path with label, like
label:/path/to/directory''',
action=ExtendAction)
p.add_argument('--nbd', help='''\
host:port for remote NBD image, (or just host, for
default port 10809). Use it in tests, label is "nbd"
(but you cannot create test nbd:nbd).''')
p.add_argument('--test', nargs='+', help='''\
Tests, in form source-dir-label:target-dir-label''',
action=ExtendAction)
p.add_argument('--compressed', help='''\
Use compressed backup. It automatically means
automatically creating qcow2 target with
lazy_refcounts for each test run''', action='store_true')
p.add_argument('--qcow2-sources', help='''\
Use test-source.qcow2 images as sources instead of
test-source raw images''', action='store_true')
p.add_argument('--target-cache', help='''\
Setup cache for target nodes. Options:
direct: default, use O_DIRECT and aio=native
cached: use system cache (Qemu default) and aio=threads (Qemu default)
both: generate two test cases for each src:dst pair''',
default='direct', choices=('direct', 'cached', 'both'))
p.add_argument('--count', type=int, default=3, help='''\
Number of test runs per table cell''')
# BooleanOptionalAction helps to support --no-initial-run option
p.add_argument('--initial-run', action=argparse.BooleanOptionalAction,
help='''\
Do additional initial run per cell which doesn't count in result,
default true''')
p.add_argument('--drop-caches', action='store_true', help='''\
Do "sync; echo 3 > /proc/sys/vm/drop_caches" before each test run''')
bench(p.parse_args())
| 8,046 | 34.139738 | 78 | py |
qemu | qemu-master/scripts/simplebench/simplebench.py | #!/usr/bin/env python
#
# Simple benchmarking framework
#
# Copyright (c) 2019 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import statistics
import subprocess
import time
def do_drop_caches():
subprocess.run('sync; echo 3 > /proc/sys/vm/drop_caches', shell=True,
check=True)
def bench_one(test_func, test_env, test_case, count=5, initial_run=True,
slow_limit=100, drop_caches=False):
"""Benchmark one test-case
test_func -- benchmarking function with prototype
test_func(env, case), which takes test_env and test_case
arguments and on success returns dict with 'seconds' or
'iops' (or both) fields, specifying the benchmark result.
If both 'iops' and 'seconds' provided, the 'iops' is
considered the main, and 'seconds' is just an additional
info. On failure test_func should return {'error': str}.
Returned dict may contain any other additional fields.
test_env -- test environment - opaque first argument for test_func
test_case -- test case - opaque second argument for test_func
count -- how many times to call test_func, to calculate average
initial_run -- do initial run of test_func, which don't get into result
slow_limit -- stop at slow run (that exceedes the slow_limit by seconds).
(initial run is not measured)
drop_caches -- drop caches before each run
Returns dict with the following fields:
'runs': list of test_func results
'dimension': dimension of results, may be 'seconds' or 'iops'
'average': average value (iops or seconds) per run (exists only if at
least one run succeeded)
'stdev': standard deviation of results
(exists only if at least one run succeeded)
'n-failed': number of failed runs (exists only if at least one run
failed)
"""
if initial_run:
print(' #initial run:')
do_drop_caches()
print(' ', test_func(test_env, test_case))
runs = []
for i in range(count):
t = time.time()
print(' #run {}'.format(i+1))
do_drop_caches()
res = test_func(test_env, test_case)
print(' ', res)
runs.append(res)
if time.time() - t > slow_limit:
print(' - run is too slow, stop here')
break
count = len(runs)
result = {'runs': runs}
succeeded = [r for r in runs if ('seconds' in r or 'iops' in r)]
if succeeded:
if 'iops' in succeeded[0]:
assert all('iops' in r for r in succeeded)
dim = 'iops'
else:
assert all('seconds' in r for r in succeeded)
assert all('iops' not in r for r in succeeded)
dim = 'seconds'
result['dimension'] = dim
result['average'] = statistics.mean(r[dim] for r in succeeded)
if len(succeeded) == 1:
result['stdev'] = 0
else:
result['stdev'] = statistics.stdev(r[dim] for r in succeeded)
if len(succeeded) < count:
result['n-failed'] = count - len(succeeded)
return result
def bench(test_func, test_envs, test_cases, *args, **vargs):
"""Fill benchmark table
test_func -- benchmarking function, see bench_one for description
test_envs -- list of test environments, see bench_one
test_cases -- list of test cases, see bench_one
args, vargs -- additional arguments for bench_one
Returns dict with the following fields:
'envs': test_envs
'cases': test_cases
'tab': filled 2D array, where cell [i][j] is bench_one result for
test_cases[i] for test_envs[j] (i.e., rows are test cases and
columns are test environments)
"""
tab = {}
results = {
'envs': test_envs,
'cases': test_cases,
'tab': tab
}
n = 1
n_tests = len(test_envs) * len(test_cases)
for env in test_envs:
for case in test_cases:
print('Testing {}/{}: {} :: {}'.format(n, n_tests,
env['id'], case['id']))
if case['id'] not in tab:
tab[case['id']] = {}
tab[case['id']][env['id']] = bench_one(test_func, env, case,
*args, **vargs)
n += 1
print('Done')
return results
| 5,159 | 35.595745 | 78 | py |
qemu | qemu-master/scripts/simplebench/bench-example.py | #!/usr/bin/env python3
#
# Benchmark example
#
# Copyright (c) 2019 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import simplebench
from results_to_text import results_to_text
from bench_block_job import bench_block_copy, drv_file, drv_nbd
def bench_func(env, case):
""" Handle one "cell" of benchmarking table. """
return bench_block_copy(env['qemu_binary'], env['cmd'], {},
case['source'], case['target'])
# You may set the following five variables to correct values, to turn this
# example to real benchmark.
ssd_source = '/path-to-raw-source-image-at-ssd'
ssd_target = '/path-to-raw-target-image-at-ssd'
hdd_target = '/path-to-raw-source-image-at-hdd'
nbd_ip = 'nbd-ip-addr'
nbd_port = 'nbd-port-number'
# Test-cases are "rows" in benchmark resulting table, 'id' is a caption for
# the row, other fields are handled by bench_func.
test_cases = [
{
'id': 'ssd -> ssd',
'source': drv_file(ssd_source),
'target': drv_file(ssd_target)
},
{
'id': 'ssd -> hdd',
'source': drv_file(ssd_source),
'target': drv_file(hdd_target)
},
{
'id': 'ssd -> nbd',
'source': drv_file(ssd_source),
'target': drv_nbd(nbd_ip, nbd_port)
},
]
# Test-envs are "columns" in benchmark resulting table, 'id is a caption for
# the column, other fields are handled by bench_func.
test_envs = [
{
'id': 'backup-1',
'cmd': 'blockdev-backup',
'qemu_binary': '/path-to-qemu-binary-1'
},
{
'id': 'backup-2',
'cmd': 'blockdev-backup',
'qemu_binary': '/path-to-qemu-binary-2'
},
{
'id': 'mirror',
'cmd': 'blockdev-mirror',
'qemu_binary': '/path-to-qemu-binary-1'
}
]
result = simplebench.bench(bench_func, test_envs, test_cases, count=3)
print(results_to_text(result))
| 2,493 | 29.414634 | 76 | py |
qemu | qemu-master/scripts/simplebench/results_to_text.py | #!/usr/bin/env python3
#
# Simple benchmarking framework
#
# Copyright (c) 2019 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import math
import tabulate
# We want leading whitespace for difference row cells (see below)
tabulate.PRESERVE_WHITESPACE = True
def format_value(x, stdev):
stdev_pr = stdev / x * 100
if stdev_pr < 1.5:
# don't care too much
return f'{x:.2g}'
else:
return f'{x:.2g} ± {math.ceil(stdev_pr)}%'
def result_to_text(result):
"""Return text representation of bench_one() returned dict."""
if 'average' in result:
s = format_value(result['average'], result['stdev'])
if 'n-failed' in result:
s += '\n({} failed)'.format(result['n-failed'])
return s
else:
return 'FAILED'
def results_dimension(results):
dim = None
for case in results['cases']:
for env in results['envs']:
res = results['tab'][case['id']][env['id']]
if dim is None:
dim = res['dimension']
else:
assert dim == res['dimension']
assert dim in ('iops', 'seconds')
return dim
def results_to_text(results):
"""Return text representation of bench() returned dict."""
n_columns = len(results['envs'])
named_columns = n_columns > 2
dim = results_dimension(results)
tab = []
if named_columns:
# Environment columns are named A, B, ...
tab.append([''] + [chr(ord('A') + i) for i in range(n_columns)])
tab.append([''] + [c['id'] for c in results['envs']])
for case in results['cases']:
row = [case['id']]
case_results = results['tab'][case['id']]
for env in results['envs']:
res = case_results[env['id']]
row.append(result_to_text(res))
tab.append(row)
# Add row of difference between columns. For each column starting from
# B we calculate difference with all previous columns.
row = ['', ''] # case name and first column
for i in range(1, n_columns):
cell = ''
env = results['envs'][i]
res = case_results[env['id']]
if 'average' not in res:
# Failed result
row.append(cell)
continue
for j in range(0, i):
env_j = results['envs'][j]
res_j = case_results[env_j['id']]
cell += ' '
if 'average' not in res_j:
# Failed result
cell += '--'
continue
col_j = tab[0][j + 1] if named_columns else ''
diff_pr = round((res['average'] - res_j['average']) /
res_j['average'] * 100)
cell += f' {col_j}{diff_pr:+}%'
row.append(cell)
tab.append(row)
return f'All results are in {dim}\n\n' + tabulate.tabulate(tab)
if __name__ == '__main__':
import sys
import json
if len(sys.argv) < 2:
print(f'USAGE: {sys.argv[0]} results.json')
exit(1)
with open(sys.argv[1]) as f:
print(results_to_text(json.load(f)))
| 3,807 | 28.984252 | 78 | py |
qemu | qemu-master/scripts/simplebench/img_bench_templater.py | #!/usr/bin/env python3
#
# Process img-bench test templates
#
# Copyright (c) 2021 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import subprocess
import re
import json
import simplebench
from results_to_text import results_to_text
from table_templater import Templater
def bench_func(env, case):
test = templater.gen(env['data'], case['data'])
p = subprocess.run(test, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, universal_newlines=True)
if p.returncode == 0:
try:
m = re.search(r'Run completed in (\d+.\d+) seconds.', p.stdout)
return {'seconds': float(m.group(1))}
except Exception:
return {'error': f'failed to parse qemu-img output: {p.stdout}'}
else:
return {'error': f'qemu-img failed: {p.returncode}: {p.stdout}'}
if __name__ == '__main__':
if len(sys.argv) > 1:
print("""
Usage: img_bench_templater.py < path/to/test-template.sh
This script generates performance tests from a test template (example below),
runs them, and displays the results in a table. The template is read from
stdin. It must be written in bash and end with a `qemu-img bench` invocation
(whose result is parsed to get the test instance’s result).
Use the following syntax in the template to create the various different test
instances:
column templating: {var1|var2|...} - test will use different values in
different columns. You may use several {} constructions in the test, in this
case product of all choice-sets will be used.
row templating: [var1|var2|...] - similar thing to define rows (test-cases)
Test template example:
Assume you want to compare two qemu-img binaries, called qemu-img-old and
qemu-img-new in your build directory in two test-cases with 4K writes and 64K
writes. The template may look like this:
qemu_img=/path/to/qemu/build/qemu-img-{old|new}
$qemu_img create -f qcow2 /ssd/x.qcow2 1G
$qemu_img bench -c 100 -d 8 [-s 4K|-s 64K] -w -t none -n /ssd/x.qcow2
When passing this to stdin of img_bench_templater.py, the resulting comparison
table will contain two columns (for two binaries) and two rows (for two
test-cases).
In addition to displaying the results, script also stores results in JSON
format into results.json file in current directory.
""")
sys.exit()
templater = Templater(sys.stdin.read())
envs = [{'id': ' / '.join(x), 'data': x} for x in templater.columns]
cases = [{'id': ' / '.join(x), 'data': x} for x in templater.rows]
result = simplebench.bench(bench_func, envs, cases, count=5,
initial_run=False)
print(results_to_text(result))
with open('results.json', 'w') as f:
json.dump(result, f, indent=4)
| 3,386 | 34.28125 | 78 | py |
qemu | qemu-master/scripts/simplebench/bench_write_req.py | #!/usr/bin/env python3
#
# Test to compare performance of write requests for two qemu-img binary files.
#
# The idea of the test comes from intention to check the benefit of c8bb23cbdbe
# "qcow2: skip writing zero buffers to empty COW areas".
#
# Copyright (c) 2020 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import subprocess
import simplebench
from results_to_text import results_to_text
def bench_func(env, case):
""" Handle one "cell" of benchmarking table. """
return bench_write_req(env['qemu_img'], env['image_name'],
case['block_size'], case['block_offset'],
case['cluster_size'])
def qemu_img_pipe(*args):
'''Run qemu-img and return its output'''
subp = subprocess.Popen(list(args),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
exitcode = subp.wait()
if exitcode < 0:
sys.stderr.write('qemu-img received signal %i: %s\n'
% (-exitcode, ' '.join(list(args))))
return subp.communicate()[0]
def bench_write_req(qemu_img, image_name, block_size, block_offset,
cluster_size):
"""Benchmark write requests
The function creates a QCOW2 image with the given path/name. Then it runs
the 'qemu-img bench' command and makes series of write requests on the
image clusters. Finally, it returns the total time of the write operations
on the disk.
qemu_img -- path to qemu_img executable file
image_name -- QCOW2 image name to create
block_size -- size of a block to write to clusters
block_offset -- offset of the block in clusters
cluster_size -- size of the image cluster
Returns {'seconds': int} on success and {'error': str} on failure.
Return value is compatible with simplebench lib.
"""
if not os.path.isfile(qemu_img):
print(f'File not found: {qemu_img}')
sys.exit(1)
image_dir = os.path.dirname(os.path.abspath(image_name))
if not os.path.isdir(image_dir):
print(f'Path not found: {image_name}')
sys.exit(1)
image_size = 1024 * 1024 * 1024
args_create = [qemu_img, 'create', '-f', 'qcow2', '-o',
f'cluster_size={cluster_size}',
image_name, str(image_size)]
count = int(image_size / cluster_size) - 1
step = str(cluster_size)
args_bench = [qemu_img, 'bench', '-w', '-n', '-t', 'none', '-c',
str(count), '-s', f'{block_size}', '-o', str(block_offset),
'-S', step, '-f', 'qcow2', image_name]
try:
qemu_img_pipe(*args_create)
except OSError as e:
os.remove(image_name)
return {'error': 'qemu_img create failed: ' + str(e)}
try:
ret = qemu_img_pipe(*args_bench)
except OSError as e:
os.remove(image_name)
return {'error': 'qemu_img bench failed: ' + str(e)}
os.remove(image_name)
if 'seconds' in ret:
ret_list = ret.split()
index = ret_list.index('seconds.')
return {'seconds': float(ret_list[index-1])}
else:
return {'error': 'qemu_img bench failed: ' + ret}
if __name__ == '__main__':
if len(sys.argv) < 4:
program = os.path.basename(sys.argv[0])
print(f'USAGE: {program} <path to qemu-img binary file> '
'<path to another qemu-img to compare performance with> '
'<full or relative name for QCOW2 image to create>')
exit(1)
# Test-cases are "rows" in benchmark resulting table, 'id' is a caption
# for the row, other fields are handled by bench_func.
test_cases = [
{
'id': '<cluster front>',
'block_size': 4096,
'block_offset': 0,
'cluster_size': 1048576
},
{
'id': '<cluster middle>',
'block_size': 4096,
'block_offset': 524288,
'cluster_size': 1048576
},
{
'id': '<cross cluster>',
'block_size': 1048576,
'block_offset': 4096,
'cluster_size': 1048576
},
{
'id': '<cluster 64K>',
'block_size': 4096,
'block_offset': 0,
'cluster_size': 65536
},
]
# Test-envs are "columns" in benchmark resulting table, 'id is a caption
# for the column, other fields are handled by bench_func.
# Set the paths below to desired values
test_envs = [
{
'id': '<qemu-img binary 1>',
'qemu_img': f'{sys.argv[1]}',
'image_name': f'{sys.argv[3]}'
},
{
'id': '<qemu-img binary 2>',
'qemu_img': f'{sys.argv[2]}',
'image_name': f'{sys.argv[3]}'
},
]
result = simplebench.bench(bench_func, test_envs, test_cases, count=3,
initial_run=False)
print(results_to_text(result))
| 5,664 | 31.936047 | 79 | py |
qemu | qemu-master/scripts/simplebench/table_templater.py | # Parser for test templates
#
# Copyright (c) 2021 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import itertools
from lark import Lark
grammar = """
start: ( text | column_switch | row_switch )+
column_switch: "{" text ["|" text]+ "}"
row_switch: "[" text ["|" text]+ "]"
text: /[^|{}\[\]]+/
"""
parser = Lark(grammar)
class Templater:
def __init__(self, template):
self.tree = parser.parse(template)
c_switches = []
r_switches = []
for x in self.tree.children:
if x.data == 'column_switch':
c_switches.append([el.children[0].value for el in x.children])
elif x.data == 'row_switch':
r_switches.append([el.children[0].value for el in x.children])
self.columns = list(itertools.product(*c_switches))
self.rows = list(itertools.product(*r_switches))
def gen(self, column, row):
i = 0
j = 0
result = []
for x in self.tree.children:
if x.data == 'text':
result.append(x.children[0].value)
elif x.data == 'column_switch':
result.append(column[i])
i += 1
elif x.data == 'row_switch':
result.append(row[j])
j += 1
return ''.join(result)
| 1,931 | 29.666667 | 78 | py |
qemu | qemu-master/scripts/simplebench/bench_block_job.py | #!/usr/bin/env python3
#
# Benchmark block jobs
#
# Copyright (c) 2019 Virtuozzo International GmbH.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import subprocess
import socket
import json
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python'))
from qemu.machine import QEMUMachine
from qemu.qmp import ConnectError
def bench_block_job(cmd, cmd_args, qemu_args):
"""Benchmark block-job
cmd -- qmp command to run block-job (like blockdev-backup)
cmd_args -- dict of qmp command arguments
qemu_args -- list of Qemu command line arguments, including path to Qemu
binary
Returns {'seconds': int} on success and {'error': str} on failure, dict may
contain addional 'vm-log' field. Return value is compatible with
simplebench lib.
"""
vm = QEMUMachine(qemu_args[0], args=qemu_args[1:])
try:
vm.launch()
except OSError as e:
return {'error': 'popen failed: ' + str(e)}
except (ConnectError, socket.timeout):
return {'error': 'qemu failed: ' + str(vm.get_log())}
try:
res = vm.qmp(cmd, **cmd_args)
if res != {'return': {}}:
vm.shutdown()
return {'error': '"{}" command failed: {}'.format(cmd, str(res))}
e = vm.event_wait('JOB_STATUS_CHANGE')
assert e['data']['status'] == 'created'
start_ms = e['timestamp']['seconds'] * 1000000 + \
e['timestamp']['microseconds']
e = vm.events_wait((('BLOCK_JOB_READY', None),
('BLOCK_JOB_COMPLETED', None),
('BLOCK_JOB_FAILED', None)), timeout=True)
if e['event'] not in ('BLOCK_JOB_READY', 'BLOCK_JOB_COMPLETED'):
vm.shutdown()
return {'error': 'block-job failed: ' + str(e),
'vm-log': vm.get_log()}
if 'error' in e['data']:
vm.shutdown()
return {'error': 'block-job failed: ' + e['data']['error'],
'vm-log': vm.get_log()}
end_ms = e['timestamp']['seconds'] * 1000000 + \
e['timestamp']['microseconds']
finally:
vm.shutdown()
return {'seconds': (end_ms - start_ms) / 1000000.0}
def get_image_size(path):
out = subprocess.run(['qemu-img', 'info', '--out=json', path],
stdout=subprocess.PIPE, check=True).stdout
return json.loads(out)['virtual-size']
def get_blockdev_size(obj):
img = obj['filename'] if 'filename' in obj else obj['file']['filename']
return get_image_size(img)
# Bench backup or mirror
def bench_block_copy(qemu_binary, cmd, cmd_options, source, target):
"""Helper to run bench_block_job() for mirror or backup"""
assert cmd in ('blockdev-backup', 'blockdev-mirror')
if target['driver'] == 'qcow2':
try:
os.remove(target['file']['filename'])
except OSError:
pass
subprocess.run(['qemu-img', 'create', '-f', 'qcow2',
target['file']['filename'],
str(get_blockdev_size(source))],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True)
source['node-name'] = 'source'
target['node-name'] = 'target'
cmd_options['job-id'] = 'job0'
cmd_options['device'] = 'source'
cmd_options['target'] = 'target'
cmd_options['sync'] = 'full'
return bench_block_job(cmd, cmd_options,
[qemu_binary,
'-blockdev', json.dumps(source),
'-blockdev', json.dumps(target)])
def drv_file(filename, o_direct=True):
node = {'driver': 'file', 'filename': filename}
if o_direct:
node['cache'] = {'direct': True}
node['aio'] = 'native'
return node
def drv_nbd(host, port):
return {'driver': 'nbd',
'server': {'type': 'inet', 'host': host, 'port': port}}
def drv_qcow2(file):
return {'driver': 'qcow2', 'file': file}
if __name__ == '__main__':
import sys
if len(sys.argv) < 4:
print('USAGE: {} <qmp block-job command name> '
'<json string of arguments for the command> '
'<qemu binary path and arguments>'.format(sys.argv[0]))
exit(1)
res = bench_block_job(sys.argv[1], json.loads(sys.argv[2]), sys.argv[3:])
if 'seconds' in res:
print('{:.2f}'.format(res['seconds']))
else:
print(res)
| 5,089 | 31.012579 | 79 | py |
qemu | qemu-master/scripts/tracetool/vcpu.py | # -*- coding: utf-8 -*-
"""
Generic management for the 'vcpu' property.
"""
__author__ = "Lluís Vilanova <[email protected]>"
__copyright__ = "Copyright 2016, Lluís Vilanova <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
from tracetool import Arguments, try_import
def transform_event(event):
"""Transform event to comply with the 'vcpu' property (if present)."""
if "vcpu" in event.properties:
event.args = Arguments([("void *", "__cpu"), event.args])
fmt = "\"cpu=%p \""
event.fmt = fmt + event.fmt
return event
def transform_args(format, event, *args, **kwargs):
"""Transforms the arguments to suit the specified format.
The format module must implement function 'vcpu_args', which receives the
implicit arguments added by the 'vcpu' property, and must return suitable
arguments for the given format.
The function is only called for events with the 'vcpu' property.
Parameters
==========
format : str
Format module name.
event : Event
args, kwargs
Passed to 'vcpu_transform_args'.
Returns
=======
Arguments
The transformed arguments, including the non-implicit ones.
"""
if "vcpu" in event.properties:
ok, func = try_import("tracetool.format." + format,
"vcpu_transform_args")
assert ok
assert func
return Arguments([func(event.args[:1], *args, **kwargs),
event.args[1:]])
else:
return event.args
| 1,663 | 26.733333 | 77 | py |
qemu | qemu-master/scripts/tracetool/__init__.py | # -*- coding: utf-8 -*-
"""
Machinery for generating tracing-related intermediate files.
"""
__author__ = "Lluís Vilanova <[email protected]>"
__copyright__ = "Copyright 2012-2017, Lluís Vilanova <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
import re
import sys
import weakref
import tracetool.format
import tracetool.backend
def error_write(*lines):
"""Write a set of error lines."""
sys.stderr.writelines("\n".join(lines) + "\n")
def error(*lines):
"""Write a set of error lines and exit."""
error_write(*lines)
sys.exit(1)
out_lineno = 1
out_filename = '<none>'
out_fobj = sys.stdout
def out_open(filename):
global out_filename, out_fobj
out_filename = filename
out_fobj = open(filename, 'wt')
def out(*lines, **kwargs):
"""Write a set of output lines.
You can use kwargs as a shorthand for mapping variables when formatting all
the strings in lines.
The 'out_lineno' kwarg is automatically added to reflect the current output
file line number. The 'out_next_lineno' kwarg is also automatically added
with the next output line number. The 'out_filename' kwarg is automatically
added with the output filename.
"""
global out_lineno
output = []
for l in lines:
kwargs['out_lineno'] = out_lineno
kwargs['out_next_lineno'] = out_lineno + 1
kwargs['out_filename'] = out_filename
output.append(l % kwargs)
out_lineno += 1
out_fobj.writelines("\n".join(output) + "\n")
# We only want to allow standard C types or fixed sized
# integer types. We don't want QEMU specific types
# as we can't assume trace backends can resolve all the
# typedefs
ALLOWED_TYPES = [
"int",
"long",
"short",
"char",
"bool",
"unsigned",
"signed",
"int8_t",
"uint8_t",
"int16_t",
"uint16_t",
"int32_t",
"uint32_t",
"int64_t",
"uint64_t",
"void",
"size_t",
"ssize_t",
"uintptr_t",
"ptrdiff_t",
]
def validate_type(name):
bits = name.split(" ")
for bit in bits:
bit = re.sub("\*", "", bit)
if bit == "":
continue
if bit == "const":
continue
if bit not in ALLOWED_TYPES:
raise ValueError("Argument type '%s' is not allowed. "
"Only standard C types and fixed size integer "
"types should be used. struct, union, and "
"other complex pointer types should be "
"declared as 'void *'" % name)
class Arguments:
"""Event arguments description."""
def __init__(self, args):
"""
Parameters
----------
args :
List of (type, name) tuples or Arguments objects.
"""
self._args = []
for arg in args:
if isinstance(arg, Arguments):
self._args.extend(arg._args)
else:
self._args.append(arg)
def copy(self):
"""Create a new copy."""
return Arguments(list(self._args))
@staticmethod
def build(arg_str):
"""Build and Arguments instance from an argument string.
Parameters
----------
arg_str : str
String describing the event arguments.
"""
res = []
for arg in arg_str.split(","):
arg = arg.strip()
if not arg:
raise ValueError("Empty argument (did you forget to use 'void'?)")
if arg == 'void':
continue
if '*' in arg:
arg_type, identifier = arg.rsplit('*', 1)
arg_type += '*'
identifier = identifier.strip()
else:
arg_type, identifier = arg.rsplit(None, 1)
validate_type(arg_type)
res.append((arg_type, identifier))
return Arguments(res)
def __getitem__(self, index):
if isinstance(index, slice):
return Arguments(self._args[index])
else:
return self._args[index]
def __iter__(self):
"""Iterate over the (type, name) pairs."""
return iter(self._args)
def __len__(self):
"""Number of arguments."""
return len(self._args)
def __str__(self):
"""String suitable for declaring function arguments."""
if len(self._args) == 0:
return "void"
else:
return ", ".join([ " ".join([t, n]) for t,n in self._args ])
def __repr__(self):
"""Evaluable string representation for this object."""
return "Arguments(\"%s\")" % str(self)
def names(self):
"""List of argument names."""
return [ name for _, name in self._args ]
def types(self):
"""List of argument types."""
return [ type_ for type_, _ in self._args ]
def casted(self):
"""List of argument names casted to their type."""
return ["(%s)%s" % (type_, name) for type_, name in self._args]
class Event(object):
"""Event description.
Attributes
----------
name : str
The event name.
fmt : str
The event format string.
properties : set(str)
Properties of the event.
args : Arguments
The event arguments.
lineno : int
The line number in the input file.
filename : str
The path to the input file.
"""
_CRE = re.compile("((?P<props>[\w\s]+)\s+)?"
"(?P<name>\w+)"
"\((?P<args>[^)]*)\)"
"\s*"
"(?:(?:(?P<fmt_trans>\".+),)?\s*(?P<fmt>\".+))?"
"\s*")
_VALID_PROPS = set(["disable", "vcpu"])
def __init__(self, name, props, fmt, args, lineno, filename, orig=None,
event_trans=None, event_exec=None):
"""
Parameters
----------
name : string
Event name.
props : list of str
Property names.
fmt : str, list of str
Event printing format string(s).
args : Arguments
Event arguments.
lineno : int
The line number in the input file.
filename : str
The path to the input file.
orig : Event or None
Original Event before transformation/generation.
event_trans : Event or None
Generated translation-time event ("tcg" property).
event_exec : Event or None
Generated execution-time event ("tcg" property).
"""
self.name = name
self.properties = props
self.fmt = fmt
self.args = args
self.lineno = int(lineno)
self.filename = str(filename)
self.event_trans = event_trans
self.event_exec = event_exec
if len(args) > 10:
raise ValueError("Event '%s' has more than maximum permitted "
"argument count" % name)
if orig is None:
self.original = weakref.ref(self)
else:
self.original = orig
unknown_props = set(self.properties) - self._VALID_PROPS
if len(unknown_props) > 0:
raise ValueError("Unknown properties: %s"
% ", ".join(unknown_props))
assert isinstance(self.fmt, str) or len(self.fmt) == 2
def copy(self):
"""Create a new copy."""
return Event(self.name, list(self.properties), self.fmt,
self.args.copy(), self.lineno, self.filename,
self, self.event_trans, self.event_exec)
@staticmethod
def build(line_str, lineno, filename):
"""Build an Event instance from a string.
Parameters
----------
line_str : str
Line describing the event.
lineno : int
Line number in input file.
filename : str
Path to input file.
"""
m = Event._CRE.match(line_str)
assert m is not None
groups = m.groupdict('')
name = groups["name"]
props = groups["props"].split()
fmt = groups["fmt"]
fmt_trans = groups["fmt_trans"]
if fmt.find("%m") != -1 or fmt_trans.find("%m") != -1:
raise ValueError("Event format '%m' is forbidden, pass the error "
"as an explicit trace argument")
if fmt.endswith(r'\n"'):
raise ValueError("Event format must not end with a newline "
"character")
if len(fmt_trans) > 0:
fmt = [fmt_trans, fmt]
args = Arguments.build(groups["args"])
event = Event(name, props, fmt, args, lineno, filename)
# add implicit arguments when using the 'vcpu' property
import tracetool.vcpu
event = tracetool.vcpu.transform_event(event)
return event
def __repr__(self):
"""Evaluable string representation for this object."""
if isinstance(self.fmt, str):
fmt = self.fmt
else:
fmt = "%s, %s" % (self.fmt[0], self.fmt[1])
return "Event('%s %s(%s) %s')" % (" ".join(self.properties),
self.name,
self.args,
fmt)
# Star matching on PRI is dangerous as one might have multiple
# arguments with that format, hence the non-greedy version of it.
_FMT = re.compile("(%[\d\.]*\w+|%.*?PRI\S+)")
def formats(self):
"""List conversion specifiers in the argument print format string."""
assert not isinstance(self.fmt, list)
return self._FMT.findall(self.fmt)
QEMU_TRACE = "trace_%(name)s"
QEMU_TRACE_NOCHECK = "_nocheck__" + QEMU_TRACE
QEMU_TRACE_TCG = QEMU_TRACE + "_tcg"
QEMU_DSTATE = "_TRACE_%(NAME)s_DSTATE"
QEMU_BACKEND_DSTATE = "TRACE_%(NAME)s_BACKEND_DSTATE"
QEMU_EVENT = "_TRACE_%(NAME)s_EVENT"
def api(self, fmt=None):
if fmt is None:
fmt = Event.QEMU_TRACE
return fmt % {"name": self.name, "NAME": self.name.upper()}
def read_events(fobj, fname):
"""Generate the output for the given (format, backends) pair.
Parameters
----------
fobj : file
Event description file.
fname : str
Name of event file
Returns a list of Event objects
"""
events = []
for lineno, line in enumerate(fobj, 1):
if line[-1] != '\n':
raise ValueError("%s does not end with a new line" % fname)
if not line.strip():
continue
if line.lstrip().startswith('#'):
continue
try:
event = Event.build(line, lineno, fname)
except ValueError as e:
arg0 = 'Error at %s:%d: %s' % (fname, lineno, e.args[0])
e.args = (arg0,) + e.args[1:]
raise
events.append(event)
return events
class TracetoolError (Exception):
"""Exception for calls to generate."""
pass
def try_import(mod_name, attr_name=None, attr_default=None):
"""Try to import a module and get an attribute from it.
Parameters
----------
mod_name : str
Module name.
attr_name : str, optional
Name of an attribute in the module.
attr_default : optional
Default value if the attribute does not exist in the module.
Returns
-------
A pair indicating whether the module could be imported and the module or
object or attribute value.
"""
try:
module = __import__(mod_name, globals(), locals(), ["__package__"])
if attr_name is None:
return True, module
return True, getattr(module, str(attr_name), attr_default)
except ImportError:
return False, None
def generate(events, group, format, backends,
binary=None, probe_prefix=None):
"""Generate the output for the given (format, backends) pair.
Parameters
----------
events : list
list of Event objects to generate for
group: str
Name of the tracing group
format : str
Output format name.
backends : list
Output backend names.
binary : str or None
See tracetool.backend.dtrace.BINARY.
probe_prefix : str or None
See tracetool.backend.dtrace.PROBEPREFIX.
"""
# fix strange python error (UnboundLocalError tracetool)
import tracetool
format = str(format)
if len(format) == 0:
raise TracetoolError("format not set")
if not tracetool.format.exists(format):
raise TracetoolError("unknown format: %s" % format)
if len(backends) == 0:
raise TracetoolError("no backends specified")
for backend in backends:
if not tracetool.backend.exists(backend):
raise TracetoolError("unknown backend: %s" % backend)
backend = tracetool.backend.Wrapper(backends, format)
import tracetool.backend.dtrace
tracetool.backend.dtrace.BINARY = binary
tracetool.backend.dtrace.PROBEPREFIX = probe_prefix
tracetool.format.generate(events, format, backend, group)
| 13,361 | 28.431718 | 82 | py |
qemu | qemu-master/scripts/tracetool/backend/dtrace.py | # -*- coding: utf-8 -*-
"""
DTrace/SystemTAP backend.
"""
__author__ = "Lluís Vilanova <[email protected]>"
__copyright__ = "Copyright 2012-2017, Lluís Vilanova <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
from tracetool import out
PUBLIC = True
PROBEPREFIX = None
def probeprefix():
if PROBEPREFIX is None:
raise ValueError("you must set PROBEPREFIX")
return PROBEPREFIX
BINARY = None
def binary():
if BINARY is None:
raise ValueError("you must set BINARY")
return BINARY
def generate_h_begin(events, group):
if group == "root":
header = "trace-dtrace-root.h"
else:
header = "trace-dtrace-%s.h" % group
# Workaround for ust backend, which also includes <sys/sdt.h> and may
# require SDT_USE_VARIADIC to be defined. If dtrace includes <sys/sdt.h>
# first without defining SDT_USE_VARIADIC then ust breaks because the
# STAP_PROBEV() macro is not defined.
out('#ifndef SDT_USE_VARIADIC')
out('#define SDT_USE_VARIADIC 1')
out('#endif')
out('#include "%s"' % header,
'')
out('#undef SDT_USE_VARIADIC')
# SystemTap defines <provider>_<name>_ENABLED() but other DTrace
# implementations might not.
for e in events:
out('#ifndef QEMU_%(uppername)s_ENABLED',
'#define QEMU_%(uppername)s_ENABLED() true',
'#endif',
uppername=e.name.upper())
def generate_h(event, group):
out(' QEMU_%(uppername)s(%(argnames)s);',
uppername=event.name.upper(),
argnames=", ".join(event.args.names()))
def generate_h_backend_dstate(event, group):
out(' QEMU_%(uppername)s_ENABLED() || \\',
uppername=event.name.upper())
| 1,840 | 24.219178 | 76 | py |
qemu | qemu-master/scripts/tracetool/backend/simple.py | # -*- coding: utf-8 -*-
"""
Simple built-in backend.
"""
__author__ = "Lluís Vilanova <[email protected]>"
__copyright__ = "Copyright 2012-2017, Lluís Vilanova <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
from tracetool import out
PUBLIC = True
def is_string(arg):
strtype = ('const char*', 'char*', 'const char *', 'char *')
arg_strip = arg.lstrip()
if arg_strip.startswith(strtype) and arg_strip.count('*') == 1:
return True
else:
return False
def generate_h_begin(events, group):
for event in events:
out('void _simple_%(api)s(%(args)s);',
api=event.api(),
args=event.args)
out('')
def generate_h(event, group):
out(' _simple_%(api)s(%(args)s);',
api=event.api(),
args=", ".join(event.args.names()))
def generate_h_backend_dstate(event, group):
out(' trace_event_get_state_dynamic_by_id(%(event_id)s) || \\',
event_id="TRACE_" + event.name.upper())
def generate_c_begin(events, group):
out('#include "qemu/osdep.h"',
'#include "trace/control.h"',
'#include "trace/simple.h"',
'')
def generate_c(event, group):
out('void _simple_%(api)s(%(args)s)',
'{',
' TraceBufferRecord rec;',
api=event.api(),
args=event.args)
sizes = []
for type_, name in event.args:
if is_string(type_):
out(' size_t arg%(name)s_len = %(name)s ? MIN(strlen(%(name)s), MAX_TRACE_STRLEN) : 0;',
name=name)
strsizeinfo = "4 + arg%s_len" % name
sizes.append(strsizeinfo)
else:
sizes.append("8")
sizestr = " + ".join(sizes)
if len(event.args) == 0:
sizestr = '0'
event_id = 'TRACE_' + event.name.upper()
if "vcpu" in event.properties:
# already checked on the generic format code
cond = "true"
else:
cond = "trace_event_get_state(%s)" % event_id
out('',
' if (!%(cond)s) {',
' return;',
' }',
'',
' if (trace_record_start(&rec, %(event_obj)s.id, %(size_str)s)) {',
' return; /* Trace Buffer Full, Event Dropped ! */',
' }',
cond=cond,
event_obj=event.api(event.QEMU_EVENT),
size_str=sizestr)
if len(event.args) > 0:
for type_, name in event.args:
# string
if is_string(type_):
out(' trace_record_write_str(&rec, %(name)s, arg%(name)s_len);',
name=name)
# pointer var (not string)
elif type_.endswith('*'):
out(' trace_record_write_u64(&rec, (uintptr_t)(uint64_t *)%(name)s);',
name=name)
# primitive data type
else:
out(' trace_record_write_u64(&rec, (uint64_t)%(name)s);',
name=name)
out(' trace_record_finish(&rec);',
'}',
'')
| 3,106 | 26.741071 | 103 | py |
qemu | qemu-master/scripts/tracetool/backend/log.py | # -*- coding: utf-8 -*-
"""
Stderr built-in backend.
"""
__author__ = "Lluís Vilanova <[email protected]>"
__copyright__ = "Copyright 2012-2017, Lluís Vilanova <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
from tracetool import out
PUBLIC = True
def generate_h_begin(events, group):
out('#include "qemu/log-for-trace.h"',
'#include "qemu/error-report.h"',
'')
def generate_h(event, group):
argnames = ", ".join(event.args.names())
if len(event.args) > 0:
argnames = ", " + argnames
if "vcpu" in event.properties:
# already checked on the generic format code
cond = "true"
else:
cond = "trace_event_get_state(%s)" % ("TRACE_" + event.name.upper())
out(' if (%(cond)s && qemu_loglevel_mask(LOG_TRACE)) {',
' if (message_with_timestamp) {',
' struct timeval _now;',
' gettimeofday(&_now, NULL);',
'#line %(event_lineno)d "%(event_filename)s"',
' qemu_log("%%d@%%zu.%%06zu:%(name)s " %(fmt)s "\\n",',
' qemu_get_thread_id(),',
' (size_t)_now.tv_sec, (size_t)_now.tv_usec',
' %(argnames)s);',
'#line %(out_next_lineno)d "%(out_filename)s"',
' } else {',
'#line %(event_lineno)d "%(event_filename)s"',
' qemu_log("%(name)s " %(fmt)s "\\n"%(argnames)s);',
'#line %(out_next_lineno)d "%(out_filename)s"',
' }',
' }',
cond=cond,
event_lineno=event.lineno,
event_filename=event.filename,
name=event.name,
fmt=event.fmt.rstrip("\n"),
argnames=argnames)
def generate_h_backend_dstate(event, group):
out(' trace_event_get_state_dynamic_by_id(%(event_id)s) || \\',
event_id="TRACE_" + event.name.upper())
| 2,021 | 30.107692 | 76 | py |
qemu | qemu-master/scripts/tracetool/backend/ftrace.py | # -*- coding: utf-8 -*-
"""
Ftrace built-in backend.
"""
__author__ = "Eiichi Tsukata <[email protected]>"
__copyright__ = "Copyright (C) 2013 Hitachi, Ltd."
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
from tracetool import out
PUBLIC = True
def generate_h_begin(events, group):
out('#include "trace/ftrace.h"',
'')
def generate_h(event, group):
argnames = ", ".join(event.args.names())
if len(event.args) > 0:
argnames = ", " + argnames
out(' {',
' char ftrace_buf[MAX_TRACE_STRLEN];',
' int unused __attribute__ ((unused));',
' int trlen;',
' if (trace_event_get_state(%(event_id)s)) {',
'#line %(event_lineno)d "%(event_filename)s"',
' trlen = snprintf(ftrace_buf, MAX_TRACE_STRLEN,',
' "%(name)s " %(fmt)s "\\n" %(argnames)s);',
'#line %(out_next_lineno)d "%(out_filename)s"',
' trlen = MIN(trlen, MAX_TRACE_STRLEN - 1);',
' unused = write(trace_marker_fd, ftrace_buf, trlen);',
' }',
' }',
name=event.name,
args=event.args,
event_id="TRACE_" + event.name.upper(),
event_lineno=event.lineno,
event_filename=event.filename,
fmt=event.fmt.rstrip("\n"),
argnames=argnames)
def generate_h_backend_dstate(event, group):
out(' trace_event_get_state_dynamic_by_id(%(event_id)s) || \\',
event_id="TRACE_" + event.name.upper())
| 1,656 | 28.589286 | 80 | py |
qemu | qemu-master/scripts/tracetool/backend/ust.py | # -*- coding: utf-8 -*-
"""
LTTng User Space Tracing backend.
"""
__author__ = "Lluís Vilanova <[email protected]>"
__copyright__ = "Copyright 2012-2017, Lluís Vilanova <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
from tracetool import out
PUBLIC = True
def generate_h_begin(events, group):
header = 'trace-ust-' + group + '.h'
out('#include <lttng/tracepoint.h>',
'#include "%s"' % header,
'',
'/* tracepoint_enabled() was introduced in LTTng UST 2.7 */',
'#ifndef tracepoint_enabled',
'#define tracepoint_enabled(a, b) true',
'#endif',
'')
def generate_h(event, group):
argnames = ", ".join(event.args.names())
if len(event.args) > 0:
argnames = ", " + argnames
out(' tracepoint(qemu, %(name)s%(tp_args)s);',
name=event.name,
tp_args=argnames)
def generate_h_backend_dstate(event, group):
out(' tracepoint_enabled(qemu, %(name)s) || \\',
name=event.name)
| 1,119 | 23.347826 | 76 | py |
qemu | qemu-master/scripts/tracetool/backend/__init__.py | # -*- coding: utf-8 -*-
"""
Backend management.
Creating new backends
---------------------
A new backend named 'foo-bar' corresponds to Python module
'tracetool/backend/foo_bar.py'.
A backend module should provide a docstring, whose first non-empty line will be
considered its short description.
All backends must generate their contents through the 'tracetool.out' routine.
Backend attributes
------------------
========= ====================================================================
Attribute Description
========= ====================================================================
PUBLIC If exists and is set to 'True', the backend is considered "public".
========= ====================================================================
Backend functions
-----------------
All the following functions are optional, and no output will be generated if
they do not exist.
=============================== ==============================================
Function Description
=============================== ==============================================
generate_<format>_begin(events) Generate backend- and format-specific file
header contents.
generate_<format>_end(events) Generate backend- and format-specific file
footer contents.
generate_<format>(event) Generate backend- and format-specific contents
for the given event.
=============================== ==============================================
"""
__author__ = "Lluís Vilanova <[email protected]>"
__copyright__ = "Copyright 2012-2014, Lluís Vilanova <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
import os
import tracetool
def get_list(only_public = False):
"""Get a list of (name, description) pairs."""
res = [("nop", "Tracing disabled.")]
modnames = []
for filename in os.listdir(tracetool.backend.__path__[0]):
if filename.endswith('.py') and filename != '__init__.py':
modnames.append(filename.rsplit('.', 1)[0])
for modname in sorted(modnames):
module = tracetool.try_import("tracetool.backend." + modname)
# just in case; should never fail unless non-module files are put there
if not module[0]:
continue
module = module[1]
public = getattr(module, "PUBLIC", False)
if only_public and not public:
continue
doc = module.__doc__
if doc is None:
doc = ""
doc = doc.strip().split("\n")[0]
name = modname.replace("_", "-")
res.append((name, doc))
return res
def exists(name):
"""Return whether the given backend exists."""
if len(name) == 0:
return False
if name == "nop":
return True
name = name.replace("-", "_")
return tracetool.try_import("tracetool.backend." + name)[1]
class Wrapper:
def __init__(self, backends, format):
self._backends = [backend.replace("-", "_") for backend in backends]
self._format = format.replace("-", "_")
for backend in self._backends:
assert exists(backend)
assert tracetool.format.exists(self._format)
def _run_function(self, name, *args, **kwargs):
for backend in self._backends:
func = tracetool.try_import("tracetool.backend." + backend,
name % self._format, None)[1]
if func is not None:
func(*args, **kwargs)
def generate_begin(self, events, group):
self._run_function("generate_%s_begin", events, group)
def generate(self, event, group):
self._run_function("generate_%s", event, group)
def generate_backend_dstate(self, event, group):
self._run_function("generate_%s_backend_dstate", event, group)
def generate_end(self, events, group):
self._run_function("generate_%s_end", events, group)
| 4,088 | 31.452381 | 79 | py |
qemu | qemu-master/scripts/tracetool/backend/syslog.py | # -*- coding: utf-8 -*-
"""
Syslog built-in backend.
"""
__author__ = "Paul Durrant <[email protected]>"
__copyright__ = "Copyright 2016, Citrix Systems Inc."
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
from tracetool import out
PUBLIC = True
def generate_h_begin(events, group):
out('#include <syslog.h>',
'')
def generate_h(event, group):
argnames = ", ".join(event.args.names())
if len(event.args) > 0:
argnames = ", " + argnames
if "vcpu" in event.properties:
# already checked on the generic format code
cond = "true"
else:
cond = "trace_event_get_state(%s)" % ("TRACE_" + event.name.upper())
out(' if (%(cond)s) {',
'#line %(event_lineno)d "%(event_filename)s"',
' syslog(LOG_INFO, "%(name)s " %(fmt)s %(argnames)s);',
'#line %(out_next_lineno)d "%(out_filename)s"',
' }',
cond=cond,
event_lineno=event.lineno,
event_filename=event.filename,
name=event.name,
fmt=event.fmt.rstrip("\n"),
argnames=argnames)
def generate_h_backend_dstate(event, group):
out(' trace_event_get_state_dynamic_by_id(%(event_id)s) || \\',
event_id="TRACE_" + event.name.upper())
| 1,360 | 24.679245 | 76 | py |
qemu | qemu-master/scripts/tracetool/format/d.py | # -*- coding: utf-8 -*-
"""
trace/generated-tracers.dtrace (DTrace only).
"""
__author__ = "Lluís Vilanova <[email protected]>"
__copyright__ = "Copyright 2012-2014, Lluís Vilanova <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
from tracetool import out
from sys import platform
# Reserved keywords from
# https://wikis.oracle.com/display/DTrace/Types,+Operators+and+Expressions
RESERVED_WORDS = (
'auto', 'goto', 'sizeof', 'break', 'if', 'static', 'case', 'import',
'string', 'char', 'inline', 'stringof', 'const', 'int', 'struct',
'continue', 'long', 'switch', 'counter', 'offsetof', 'this',
'default', 'probe', 'translator', 'do', 'provider', 'typedef',
'double', 'register', 'union', 'else', 'restrict', 'unsigned',
'enum', 'return', 'void', 'extern', 'self', 'volatile', 'float',
'short', 'while', 'for', 'signed', 'xlate',
)
def generate(events, backend, group):
events = [e for e in events
if "disable" not in e.properties]
# SystemTap's dtrace(1) warns about empty "provider qemu {}" but is happy
# with an empty file. Avoid the warning.
# But dtrace on macOS can't deal with empty files.
if not events and platform != "darwin":
return
out('/* This file is autogenerated by tracetool, do not edit. */'
'',
'provider qemu {')
for e in events:
args = []
for type_, name in e.args:
if platform == "darwin":
# macOS dtrace accepts only C99 _Bool
if type_ == 'bool':
type_ = '_Bool'
if type_ == 'bool *':
type_ = '_Bool *'
# It converts int8_t * in probe points to char * in header
# files and introduces [-Wpointer-sign] warning.
# Avoid it by changing probe type to signed char * beforehand.
if type_ == 'int8_t *':
type_ = 'signed char *'
# SystemTap dtrace(1) emits a warning when long long is used
type_ = type_.replace('unsigned long long', 'uint64_t')
type_ = type_.replace('signed long long', 'int64_t')
type_ = type_.replace('long long', 'int64_t')
if name in RESERVED_WORDS:
name += '_'
args.append(type_ + ' ' + name)
# Define prototype for probe arguments
out('',
'probe %(name)s(%(args)s);',
name=e.name,
args=','.join(args))
out('',
'};')
| 2,647 | 32.948718 | 78 | py |
qemu | qemu-master/scripts/tracetool/format/simpletrace_stap.py | # -*- coding: utf-8 -*-
"""
Generate .stp file that outputs simpletrace binary traces (DTrace with SystemTAP only).
"""
__author__ = "Stefan Hajnoczi <redhat.com>"
__copyright__ = "Copyright (C) 2014, Red Hat, Inc."
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
from tracetool import out
from tracetool.backend.dtrace import probeprefix
from tracetool.backend.simple import is_string
from tracetool.format.stap import stap_escape
def global_var_name(name):
return probeprefix().replace(".", "_") + "_" + name
def generate(events, backend, group):
out('/* This file is autogenerated by tracetool, do not edit. */',
'')
for event_id, e in enumerate(events):
if 'disable' in e.properties:
continue
out('probe %(probeprefix)s.simpletrace.%(name)s = %(probeprefix)s.%(name)s ?',
'{',
probeprefix=probeprefix(),
name=e.name)
# Calculate record size
sizes = ['24'] # sizeof(TraceRecord)
for type_, name in e.args:
name = stap_escape(name)
if is_string(type_):
out(' try {',
' arg%(name)s_str = %(name)s ? user_string_n(%(name)s, 512) : "<null>"',
' } catch {}',
' arg%(name)s_len = strlen(arg%(name)s_str)',
name=name)
sizes.append('4 + arg%s_len' % name)
else:
sizes.append('8')
sizestr = ' + '.join(sizes)
# Generate format string and value pairs for record header and arguments
fields = [('8b', str(event_id)),
('8b', 'gettimeofday_ns()'),
('4b', sizestr),
('4b', 'pid()')]
for type_, name in e.args:
name = stap_escape(name)
if is_string(type_):
fields.extend([('4b', 'arg%s_len' % name),
('.*s', 'arg%s_len, arg%s_str' % (name, name))])
else:
fields.append(('8b', name))
# Emit the entire record in a single SystemTap printf()
fmt_str = '%'.join(fmt for fmt, _ in fields)
arg_str = ', '.join(arg for _, arg in fields)
out(' printf("%%8b%%%(fmt_str)s", 1, %(arg_str)s)',
fmt_str=fmt_str, arg_str=arg_str)
out('}')
out()
| 2,467 | 32.808219 | 99 | py |
qemu | qemu-master/scripts/tracetool/format/ust_events_h.py | # -*- coding: utf-8 -*-
"""
trace/generated-ust-provider.h
"""
__author__ = "Mohamad Gebai <[email protected]>"
__copyright__ = "Copyright 2012, Mohamad Gebai <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
from tracetool import out
def generate(events, backend, group):
events = [e for e in events
if "disabled" not in e.properties]
if group == "all":
include = "trace-ust-all.h"
else:
include = "trace-ust.h"
out('/* This file is autogenerated by tracetool, do not edit. */',
'',
'#undef TRACEPOINT_PROVIDER',
'#define TRACEPOINT_PROVIDER qemu',
'',
'#undef TRACEPOINT_INCLUDE',
'#define TRACEPOINT_INCLUDE "./%s"' % include,
'',
'#if !defined (TRACE_%s_GENERATED_UST_H) || \\' % group.upper(),
' defined(TRACEPOINT_HEADER_MULTI_READ)',
'#define TRACE_%s_GENERATED_UST_H' % group.upper(),
'',
'#include <lttng/tracepoint.h>',
'',
'/*',
' * LTTng ust 2.0 does not allow you to use TP_ARGS(void) for tracepoints',
' * requiring no arguments. We define these macros introduced in more recent'
' * versions of LTTng ust as a workaround',
' */',
'#ifndef _TP_EXPROTO1',
'#define _TP_EXPROTO1(a) void',
'#endif',
'#ifndef _TP_EXDATA_PROTO1',
'#define _TP_EXDATA_PROTO1(a) void *__tp_data',
'#endif',
'#ifndef _TP_EXDATA_VAR1',
'#define _TP_EXDATA_VAR1(a) __tp_data',
'#endif',
'#ifndef _TP_EXVAR1',
'#define _TP_EXVAR1(a)',
'#endif',
'')
for e in events:
if len(e.args) > 0:
out('TRACEPOINT_EVENT(',
' qemu,',
' %(name)s,',
' TP_ARGS(%(args)s),',
' TP_FIELDS(',
name=e.name,
args=", ".join(", ".join(i) for i in e.args))
types = e.args.types()
names = e.args.names()
fmts = e.formats()
for t,n,f in zip(types, names, fmts):
if ('char *' in t) or ('char*' in t):
out(' ctf_string(' + n + ', ' + n + ')')
elif ("%p" in f) or ("x" in f) or ("PRIx" in f):
out(' ctf_integer_hex('+ t + ', ' + n + ', ' + n + ')')
elif ("ptr" in t) or ("*" in t):
out(' ctf_integer_hex('+ t + ', ' + n + ', ' + n + ')')
elif ('int' in t) or ('long' in t) or ('unsigned' in t) \
or ('size_t' in t) or ('bool' in t):
out(' ctf_integer(' + t + ', ' + n + ', ' + n + ')')
elif ('double' in t) or ('float' in t):
out(' ctf_float(' + t + ', ' + n + ', ' + n + ')')
elif ('void *' in t) or ('void*' in t):
out(' ctf_integer_hex(unsigned long, ' + n + ', ' + n + ')')
out(' )',
')',
'')
else:
out('TRACEPOINT_EVENT(',
' qemu,',
' %(name)s,',
' TP_ARGS(void),',
' TP_FIELDS()',
')',
'',
name=e.name)
out('#endif /* TRACE_%s_GENERATED_UST_H */' % group.upper(),
'',
'/* This part must be outside ifdef protection */',
'#include <lttng/tracepoint-event.h>')
| 3,670 | 33.632075 | 86 | py |
qemu | qemu-master/scripts/tracetool/format/stap.py | # -*- coding: utf-8 -*-
"""
Generate .stp file (DTrace with SystemTAP only).
"""
__author__ = "Lluís Vilanova <[email protected]>"
__copyright__ = "Copyright 2012-2014, Lluís Vilanova <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
from tracetool import out
from tracetool.backend.dtrace import binary, probeprefix
# Technically 'self' is not used by systemtap yet, but
# they recommended we keep it in the reserved list anyway
RESERVED_WORDS = (
'break', 'catch', 'continue', 'delete', 'else', 'for',
'foreach', 'function', 'global', 'if', 'in', 'limit',
'long', 'next', 'probe', 'return', 'self', 'string',
'try', 'while'
)
def stap_escape(identifier):
# Append underscore to reserved keywords
if identifier in RESERVED_WORDS:
return identifier + '_'
return identifier
def generate(events, backend, group):
events = [e for e in events
if "disable" not in e.properties]
out('/* This file is autogenerated by tracetool, do not edit. */',
'')
for e in events:
# Define prototype for probe arguments
out('probe %(probeprefix)s.%(name)s = process("%(binary)s").mark("%(name)s")',
'{',
probeprefix=probeprefix(),
name=e.name,
binary=binary())
i = 1
if len(e.args) > 0:
for name in e.args.names():
name = stap_escape(name)
out(' %s = $arg%d;' % (name, i))
i += 1
out('}')
out()
| 1,643 | 25.95082 | 86 | py |
qemu | qemu-master/scripts/tracetool/format/log_stap.py | # -*- coding: utf-8 -*-
"""
Generate .stp file that printfs log messages (DTrace with SystemTAP only).
"""
__author__ = "Daniel P. Berrange <[email protected]>"
__copyright__ = "Copyright (C) 2014-2019, Red Hat, Inc."
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Daniel Berrange"
__email__ = "[email protected]"
import re
from tracetool import out
from tracetool.backend.dtrace import binary, probeprefix
from tracetool.backend.simple import is_string
from tracetool.format.stap import stap_escape
def global_var_name(name):
return probeprefix().replace(".", "_") + "_" + name
STATE_SKIP = 0
STATE_LITERAL = 1
STATE_MACRO = 2
def c_macro_to_format(macro):
if macro.startswith("PRI"):
return macro[3]
raise Exception("Unhandled macro '%s'" % macro)
def c_fmt_to_stap(fmt):
state = 0
bits = []
literal = ""
macro = ""
escape = 0;
for i in range(len(fmt)):
if fmt[i] == '\\':
if escape:
escape = 0
else:
escape = 1
if state != STATE_LITERAL:
raise Exception("Unexpected escape outside string literal")
literal = literal + fmt[i]
elif fmt[i] == '"' and not escape:
if state == STATE_LITERAL:
state = STATE_SKIP
bits.append(literal)
literal = ""
else:
if state == STATE_MACRO:
bits.append(c_macro_to_format(macro))
macro = ""
state = STATE_LITERAL
elif fmt[i] == ' ' or fmt[i] == '\t':
if state == STATE_MACRO:
bits.append(c_macro_to_format(macro))
macro = ""
state = STATE_SKIP
elif state == STATE_LITERAL:
literal = literal + fmt[i]
else:
escape = 0
if state == STATE_SKIP:
state = STATE_MACRO
if state == STATE_LITERAL:
literal = literal + fmt[i]
else:
macro = macro + fmt[i]
if state == STATE_MACRO:
bits.append(c_macro_to_format(macro))
elif state == STATE_LITERAL:
bits.append(literal)
# All variables in systemtap are 64-bit in size
# The "%l" integer size qualifier is thus redundant
# and "%ll" is not valid at all. Similarly the size_t
# based "%z" size qualifier is not valid. We just
# strip all size qualifiers for sanity.
fmt = re.sub("%(\d*)(l+|z)(x|u|d)", "%\\1\\3", "".join(bits))
return fmt
def generate(events, backend, group):
out('/* This file is autogenerated by tracetool, do not edit. */',
'')
for event_id, e in enumerate(events):
if 'disable' in e.properties:
continue
out('probe %(probeprefix)s.log.%(name)s = %(probeprefix)s.%(name)s ?',
'{',
probeprefix=probeprefix(),
name=e.name)
# Get references to userspace strings
for type_, name in e.args:
name = stap_escape(name)
if is_string(type_):
out(' try {',
' arg%(name)s_str = %(name)s ? ' +
'user_string_n(%(name)s, 512) : "<null>"',
' } catch {}',
name=name)
# Determine systemtap's view of variable names
fields = ["pid()", "gettimeofday_ns()"]
for type_, name in e.args:
name = stap_escape(name)
if is_string(type_):
fields.append("arg" + name + "_str")
else:
fields.append(name)
# Emit the entire record in a single SystemTap printf()
arg_str = ', '.join(arg for arg in fields)
fmt_str = "%d@%d " + e.name + " " + c_fmt_to_stap(e.fmt) + "\\n"
out(' printf("%(fmt_str)s", %(arg_str)s)',
fmt_str=fmt_str, arg_str=arg_str)
out('}')
out()
| 4,025 | 29.969231 | 78 | py |
qemu | qemu-master/scripts/tracetool/format/__init__.py | # -*- coding: utf-8 -*-
"""
Format management.
Creating new formats
--------------------
A new format named 'foo-bar' corresponds to Python module
'tracetool/format/foo_bar.py'.
A format module should provide a docstring, whose first non-empty line will be
considered its short description.
All formats must generate their contents through the 'tracetool.out' routine.
Format functions
----------------
======== ==================================================================
Function Description
======== ==================================================================
generate Called to generate a format-specific file.
======== ==================================================================
"""
__author__ = "Lluís Vilanova <[email protected]>"
__copyright__ = "Copyright 2012-2014, Lluís Vilanova <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
import os
import tracetool
def get_list():
"""Get a list of (name, description) pairs."""
res = []
modnames = []
for filename in os.listdir(tracetool.format.__path__[0]):
if filename.endswith('.py') and filename != '__init__.py':
modnames.append(filename.rsplit('.', 1)[0])
for modname in sorted(modnames):
module = tracetool.try_import("tracetool.format." + modname)
# just in case; should never fail unless non-module files are put there
if not module[0]:
continue
module = module[1]
doc = module.__doc__
if doc is None:
doc = ""
doc = doc.strip().split("\n")[0]
name = modname.replace("_", "-")
res.append((name, doc))
return res
def exists(name):
"""Return whether the given format exists."""
if len(name) == 0:
return False
name = name.replace("-", "_")
return tracetool.try_import("tracetool.format." + name)[1]
def generate(events, format, backend, group):
if not exists(format):
raise ValueError("unknown format: %s" % format)
format = format.replace("-", "_")
func = tracetool.try_import("tracetool.format." + format,
"generate")[1]
if func is None:
raise AttributeError("format has no 'generate': %s" % format)
func(events, backend, group)
| 2,392 | 27.152941 | 79 | py |
qemu | qemu-master/scripts/tracetool/format/c.py | # -*- coding: utf-8 -*-
"""
trace/generated-tracers.c
"""
__author__ = "Lluís Vilanova <[email protected]>"
__copyright__ = "Copyright 2012-2014, Lluís Vilanova <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
from tracetool import out
def generate(events, backend, group):
active_events = [e for e in events
if "disable" not in e.properties]
header = "trace-" + group + ".h"
out('/* This file is autogenerated by tracetool, do not edit. */',
'',
'#include "qemu/osdep.h"',
'#include "qemu/module.h"',
'#include "%s"' % header,
'')
for e in events:
out('uint16_t %s;' % e.api(e.QEMU_DSTATE))
for e in events:
if "vcpu" in e.properties:
vcpu_id = 0
else:
vcpu_id = "TRACE_VCPU_EVENT_NONE"
out('TraceEvent %(event)s = {',
' .id = 0,',
' .vcpu_id = %(vcpu_id)s,',
' .name = \"%(name)s\",',
' .sstate = %(sstate)s,',
' .dstate = &%(dstate)s ',
'};',
event = e.api(e.QEMU_EVENT),
vcpu_id = vcpu_id,
name = e.name,
sstate = "TRACE_%s_ENABLED" % e.name.upper(),
dstate = e.api(e.QEMU_DSTATE))
out('TraceEvent *%(group)s_trace_events[] = {',
group = group.lower())
for e in events:
out(' &%(event)s,', event = e.api(e.QEMU_EVENT))
out(' NULL,',
'};',
'')
out('static void trace_%(group)s_register_events(void)',
'{',
' trace_event_register_group(%(group)s_trace_events);',
'}',
'trace_init(trace_%(group)s_register_events)',
group = group.lower())
backend.generate_begin(active_events, group)
for event in active_events:
backend.generate(event, group)
backend.generate_end(active_events, group)
| 2,028 | 26.794521 | 76 | py |
qemu | qemu-master/scripts/tracetool/format/h.py | # -*- coding: utf-8 -*-
"""
trace/generated-tracers.h
"""
__author__ = "Lluís Vilanova <[email protected]>"
__copyright__ = "Copyright 2012-2017, Lluís Vilanova <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
from tracetool import out
def generate(events, backend, group):
if group == "root":
header = "trace/control-vcpu.h"
else:
header = "trace/control.h"
out('/* This file is autogenerated by tracetool, do not edit. */',
'',
'#ifndef TRACE_%s_GENERATED_TRACERS_H' % group.upper(),
'#define TRACE_%s_GENERATED_TRACERS_H' % group.upper(),
'',
'#include "%s"' % header,
'')
for e in events:
out('extern TraceEvent %(event)s;',
event = e.api(e.QEMU_EVENT))
for e in events:
out('extern uint16_t %s;' % e.api(e.QEMU_DSTATE))
# static state
for e in events:
if 'disable' in e.properties:
enabled = 0
else:
enabled = 1
if "tcg-exec" in e.properties:
# a single define for the two "sub-events"
out('#define TRACE_%(name)s_ENABLED %(enabled)d',
name=e.original.name.upper(),
enabled=enabled)
out('#define TRACE_%s_ENABLED %d' % (e.name.upper(), enabled))
backend.generate_begin(events, group)
for e in events:
# tracer-specific dstate
out('',
'#define %(api)s() ( \\',
api=e.api(e.QEMU_BACKEND_DSTATE))
if "disable" not in e.properties:
backend.generate_backend_dstate(e, group)
out(' false)')
# tracer without checks
out('',
'static inline void %(api)s(%(args)s)',
'{',
api=e.api(e.QEMU_TRACE_NOCHECK),
args=e.args)
if "disable" not in e.properties:
backend.generate(e, group)
out('}')
# tracer wrapper with checks (per-vCPU tracing)
if "vcpu" in e.properties:
trace_cpu = next(iter(e.args))[1]
cond = "trace_event_get_vcpu_state(%(cpu)s,"\
" TRACE_%(id)s)"\
% dict(
cpu=trace_cpu,
id=e.name.upper())
else:
cond = "true"
out('',
'static inline void %(api)s(%(args)s)',
'{',
' if (%(cond)s) {',
' %(api_nocheck)s(%(names)s);',
' }',
'}',
api=e.api(),
api_nocheck=e.api(e.QEMU_TRACE_NOCHECK),
args=e.args,
names=", ".join(e.args.names()),
cond=cond)
backend.generate_end(events, group)
out('#endif /* TRACE_%s_GENERATED_TRACERS_H */' % group.upper())
| 2,902 | 26.913462 | 76 | py |
qemu | qemu-master/scripts/tracetool/format/ust_events_c.py | # -*- coding: utf-8 -*-
"""
trace/generated-ust.c
"""
__author__ = "Mohamad Gebai <[email protected]>"
__copyright__ = "Copyright 2012, Mohamad Gebai <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
from tracetool import out
def generate(events, backend, group):
events = [e for e in events
if "disabled" not in e.properties]
out('/* This file is autogenerated by tracetool, do not edit. */',
'',
'#include "qemu/osdep.h"',
'',
'#define TRACEPOINT_DEFINE',
'#define TRACEPOINT_CREATE_PROBES',
'',
'/* If gcc version 4.7 or older is used, LTTng ust gives a warning when compiling with',
' -Wredundant-decls.',
' */',
'#pragma GCC diagnostic ignored "-Wredundant-decls"',
'',
'#include "trace-ust-all.h"')
| 968 | 26.685714 | 96 | py |
qemu | qemu-master/scripts/performance/topN_callgrind.py | #!/usr/bin/env python3
# Print the top N most executed functions in QEMU using callgrind.
# Syntax:
# topN_callgrind.py [-h] [-n] <number of displayed top functions> -- \
# <qemu executable> [<qemu executable options>] \
# <target executable> [<target execurable options>]
#
# [-h] - Print the script arguments help message.
# [-n] - Specify the number of top functions to print.
# - If this flag is not specified, the tool defaults to 25.
#
# Example of usage:
# topN_callgrind.py -n 20 -- qemu-arm coulomb_double-arm
#
# This file is a part of the project "TCG Continuous Benchmarking".
#
# Copyright (C) 2020 Ahmed Karaman <[email protected]>
# Copyright (C) 2020 Aleksandar Markovic <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import argparse
import os
import subprocess
import sys
# Parse the command line arguments
parser = argparse.ArgumentParser(
usage='topN_callgrind.py [-h] [-n] <number of displayed top functions> -- '
'<qemu executable> [<qemu executable options>] '
'<target executable> [<target executable options>]')
parser.add_argument('-n', dest='top', type=int, default=25,
help='Specify the number of top functions to print.')
parser.add_argument('command', type=str, nargs='+', help=argparse.SUPPRESS)
args = parser.parse_args()
# Extract the needed variables from the args
command = args.command
top = args.top
# Insure that valgrind is installed
check_valgrind_presence = subprocess.run(["which", "valgrind"],
stdout=subprocess.DEVNULL)
if check_valgrind_presence.returncode:
sys.exit("Please install valgrind before running the script!")
# Run callgrind
callgrind = subprocess.run((
["valgrind", "--tool=callgrind", "--callgrind-out-file=/tmp/callgrind.data"]
+ command),
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE)
if callgrind.returncode:
sys.exit(callgrind.stderr.decode("utf-8"))
# Save callgrind_annotate output to /tmp/callgrind_annotate.out
with open("/tmp/callgrind_annotate.out", "w") as output:
callgrind_annotate = subprocess.run(["callgrind_annotate",
"/tmp/callgrind.data"],
stdout=output,
stderr=subprocess.PIPE)
if callgrind_annotate.returncode:
os.unlink('/tmp/callgrind.data')
output.close()
os.unlink('/tmp/callgrind_annotate.out')
sys.exit(callgrind_annotate.stderr.decode("utf-8"))
# Read the callgrind_annotate output to callgrind_data[]
callgrind_data = []
with open('/tmp/callgrind_annotate.out', 'r') as data:
callgrind_data = data.readlines()
# Line number with the total number of instructions
total_instructions_line_number = 20
# Get the total number of instructions
total_instructions_line_data = callgrind_data[total_instructions_line_number]
total_number_of_instructions = total_instructions_line_data.split(' ')[0]
total_number_of_instructions = int(
total_number_of_instructions.replace(',', ''))
# Line number with the top function
first_func_line = 25
# Number of functions recorded by callgrind, last two lines are always empty
number_of_functions = len(callgrind_data) - first_func_line - 2
# Limit the number of top functions to "top"
number_of_top_functions = (top if number_of_functions >
top else number_of_functions)
# Store the data of the top functions in top_functions[]
top_functions = callgrind_data[first_func_line:
first_func_line + number_of_top_functions]
# Print table header
print('{:>4} {:>10} {:<30} {}\n{} {} {} {}'.format('No.',
'Percentage',
'Function Name',
'Source File',
'-' * 4,
'-' * 10,
'-' * 30,
'-' * 30,
))
# Print top N functions
for (index, function) in enumerate(top_functions, start=1):
function_data = function.split()
# Calculate function percentage
function_instructions = float(function_data[0].replace(',', ''))
function_percentage = (function_instructions /
total_number_of_instructions)*100
# Get function name and source files path
function_source_file, function_name = function_data[1].split(':')
# Print extracted data
print('{:>4} {:>9.3f}% {:<30} {}'.format(index,
round(function_percentage, 3),
function_name,
function_source_file))
# Remove intermediate files
os.unlink('/tmp/callgrind.data')
os.unlink('/tmp/callgrind_annotate.out')
| 5,761 | 39.865248 | 80 | py |
qemu | qemu-master/scripts/performance/topN_perf.py | #!/usr/bin/env python3
# Print the top N most executed functions in QEMU using perf.
# Syntax:
# topN_perf.py [-h] [-n] <number of displayed top functions> -- \
# <qemu executable> [<qemu executable options>] \
# <target executable> [<target execurable options>]
#
# [-h] - Print the script arguments help message.
# [-n] - Specify the number of top functions to print.
# - If this flag is not specified, the tool defaults to 25.
#
# Example of usage:
# topN_perf.py -n 20 -- qemu-arm coulomb_double-arm
#
# This file is a part of the project "TCG Continuous Benchmarking".
#
# Copyright (C) 2020 Ahmed Karaman <[email protected]>
# Copyright (C) 2020 Aleksandar Markovic <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import argparse
import os
import subprocess
import sys
# Parse the command line arguments
parser = argparse.ArgumentParser(
usage='topN_perf.py [-h] [-n] <number of displayed top functions > -- '
'<qemu executable> [<qemu executable options>] '
'<target executable> [<target executable options>]')
parser.add_argument('-n', dest='top', type=int, default=25,
help='Specify the number of top functions to print.')
parser.add_argument('command', type=str, nargs='+', help=argparse.SUPPRESS)
args = parser.parse_args()
# Extract the needed variables from the args
command = args.command
top = args.top
# Insure that perf is installed
check_perf_presence = subprocess.run(["which", "perf"],
stdout=subprocess.DEVNULL)
if check_perf_presence.returncode:
sys.exit("Please install perf before running the script!")
# Insure user has previllage to run perf
check_perf_executability = subprocess.run(["perf", "stat", "ls", "/"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if check_perf_executability.returncode:
sys.exit(
"""
Error:
You may not have permission to collect stats.
Consider tweaking /proc/sys/kernel/perf_event_paranoid,
which controls use of the performance events system by
unprivileged users (without CAP_SYS_ADMIN).
-1: Allow use of (almost) all events by all users
Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK
0: Disallow ftrace function tracepoint by users without CAP_SYS_ADMIN
Disallow raw tracepoint access by users without CAP_SYS_ADMIN
1: Disallow CPU event access by users without CAP_SYS_ADMIN
2: Disallow kernel profiling by users without CAP_SYS_ADMIN
To make this setting permanent, edit /etc/sysctl.conf too, e.g.:
kernel.perf_event_paranoid = -1
* Alternatively, you can run this script under sudo privileges.
"""
)
# Run perf record
perf_record = subprocess.run((["perf", "record", "--output=/tmp/perf.data"] +
command),
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE)
if perf_record.returncode:
os.unlink('/tmp/perf.data')
sys.exit(perf_record.stderr.decode("utf-8"))
# Save perf report output to /tmp/perf_report.out
with open("/tmp/perf_report.out", "w") as output:
perf_report = subprocess.run(
["perf", "report", "--input=/tmp/perf.data", "--stdio"],
stdout=output,
stderr=subprocess.PIPE)
if perf_report.returncode:
os.unlink('/tmp/perf.data')
output.close()
os.unlink('/tmp/perf_report.out')
sys.exit(perf_report.stderr.decode("utf-8"))
# Read the reported data to functions[]
functions = []
with open("/tmp/perf_report.out", "r") as data:
# Only read lines that are not comments (comments start with #)
# Only read lines that are not empty
functions = [line for line in data.readlines() if line and line[0]
!= '#' and line[0] != "\n"]
# Limit the number of top functions to "top"
number_of_top_functions = top if len(functions) > top else len(functions)
# Store the data of the top functions in top_functions[]
top_functions = functions[:number_of_top_functions]
# Print table header
print('{:>4} {:>10} {:<30} {}\n{} {} {} {}'.format('No.',
'Percentage',
'Name',
'Invoked by',
'-' * 4,
'-' * 10,
'-' * 30,
'-' * 25))
# Print top N functions
for (index, function) in enumerate(top_functions, start=1):
function_data = function.split()
function_percentage = function_data[0]
function_name = function_data[-1]
function_invoker = ' '.join(function_data[2:-2])
print('{:>4} {:>10} {:<30} {}'.format(index,
function_percentage,
function_name,
function_invoker))
# Remove intermediate files
os.unlink('/tmp/perf.data')
os.unlink('/tmp/perf_report.out')
| 5,888 | 38.26 | 77 | py |
qemu | qemu-master/scripts/performance/dissect.py | #!/usr/bin/env python3
# Print the percentage of instructions spent in each phase of QEMU
# execution.
#
# Syntax:
# dissect.py [-h] -- <qemu executable> [<qemu executable options>] \
# <target executable> [<target executable options>]
#
# [-h] - Print the script arguments help message.
#
# Example of usage:
# dissect.py -- qemu-arm coulomb_double-arm
#
# This file is a part of the project "TCG Continuous Benchmarking".
#
# Copyright (C) 2020 Ahmed Karaman <[email protected]>
# Copyright (C) 2020 Aleksandar Markovic <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import argparse
import os
import subprocess
import sys
import tempfile
def get_JIT_line(callgrind_data):
"""
Search for the first instance of the JIT call in
the callgrind_annotate output when ran using --tree=caller
This is equivalent to the self number of instructions of JIT.
Parameters:
callgrind_data (list): callgrind_annotate output
Returns:
(int): Line number
"""
line = -1
for i in range(len(callgrind_data)):
if callgrind_data[i].strip('\n') and \
callgrind_data[i].split()[-1] == "[???]":
line = i
break
if line == -1:
sys.exit("Couldn't locate the JIT call ... Exiting.")
return line
def main():
# Parse the command line arguments
parser = argparse.ArgumentParser(
usage='dissect.py [-h] -- '
'<qemu executable> [<qemu executable options>] '
'<target executable> [<target executable options>]')
parser.add_argument('command', type=str, nargs='+', help=argparse.SUPPRESS)
args = parser.parse_args()
# Extract the needed variables from the args
command = args.command
# Insure that valgrind is installed
check_valgrind = subprocess.run(
["which", "valgrind"], stdout=subprocess.DEVNULL)
if check_valgrind.returncode:
sys.exit("Please install valgrind before running the script.")
# Save all intermediate files in a temporary directory
with tempfile.TemporaryDirectory() as tmpdirname:
# callgrind output file path
data_path = os.path.join(tmpdirname, "callgrind.data")
# callgrind_annotate output file path
annotate_out_path = os.path.join(tmpdirname, "callgrind_annotate.out")
# Run callgrind
callgrind = subprocess.run((["valgrind",
"--tool=callgrind",
"--callgrind-out-file=" + data_path]
+ command),
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE)
if callgrind.returncode:
sys.exit(callgrind.stderr.decode("utf-8"))
# Save callgrind_annotate output
with open(annotate_out_path, "w") as output:
callgrind_annotate = subprocess.run(
["callgrind_annotate", data_path, "--tree=caller"],
stdout=output,
stderr=subprocess.PIPE)
if callgrind_annotate.returncode:
sys.exit(callgrind_annotate.stderr.decode("utf-8"))
# Read the callgrind_annotate output to callgrind_data[]
callgrind_data = []
with open(annotate_out_path, 'r') as data:
callgrind_data = data.readlines()
# Line number with the total number of instructions
total_instructions_line_number = 20
# Get the total number of instructions
total_instructions_line_data = \
callgrind_data[total_instructions_line_number]
total_instructions = total_instructions_line_data.split()[0]
total_instructions = int(total_instructions.replace(',', ''))
# Line number with the JIT self number of instructions
JIT_self_instructions_line_number = get_JIT_line(callgrind_data)
# Get the JIT self number of instructions
JIT_self_instructions_line_data = \
callgrind_data[JIT_self_instructions_line_number]
JIT_self_instructions = JIT_self_instructions_line_data.split()[0]
JIT_self_instructions = int(JIT_self_instructions.replace(',', ''))
# Line number with the JIT self + inclusive number of instructions
# It's the line above the first JIT call when running with --tree=caller
JIT_total_instructions_line_number = JIT_self_instructions_line_number-1
# Get the JIT self + inclusive number of instructions
JIT_total_instructions_line_data = \
callgrind_data[JIT_total_instructions_line_number]
JIT_total_instructions = JIT_total_instructions_line_data.split()[0]
JIT_total_instructions = int(JIT_total_instructions.replace(',', ''))
# Calculate number of instructions in helpers and code generation
helpers_instructions = JIT_total_instructions-JIT_self_instructions
code_generation_instructions = total_instructions-JIT_total_instructions
# Print results (Insert commas in large numbers)
# Print total number of instructions
print('{:<20}{:>20}\n'.
format("Total Instructions:",
format(total_instructions, ',')))
# Print code generation instructions and percentage
print('{:<20}{:>20}\t{:>6.3f}%'.
format("Code Generation:",
format(code_generation_instructions, ","),
(code_generation_instructions / total_instructions) * 100))
# Print JIT instructions and percentage
print('{:<20}{:>20}\t{:>6.3f}%'.
format("JIT Execution:",
format(JIT_self_instructions, ","),
(JIT_self_instructions / total_instructions) * 100))
# Print helpers instructions and percentage
print('{:<20}{:>20}\t{:>6.3f}%'.
format("Helpers:",
format(helpers_instructions, ","),
(helpers_instructions/total_instructions)*100))
if __name__ == "__main__":
main()
| 6,718 | 39.233533 | 80 | py |
qemu | qemu-master/scripts/qapi/main.py | # This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
"""
QAPI Generator
This is the main entry point for generating C code from the QAPI schema.
"""
import argparse
import sys
from typing import Optional
from .commands import gen_commands
from .common import must_match
from .error import QAPIError
from .events import gen_events
from .introspect import gen_introspect
from .schema import QAPISchema
from .types import gen_types
from .visit import gen_visit
def invalid_prefix_char(prefix: str) -> Optional[str]:
match = must_match(r'([A-Za-z_.-][A-Za-z0-9_.-]*)?', prefix)
if match.end() != len(prefix):
return prefix[match.end()]
return None
def generate(schema_file: str,
output_dir: str,
prefix: str,
unmask: bool = False,
builtins: bool = False,
gen_tracing: bool = False) -> None:
"""
Generate C code for the given schema into the target directory.
:param schema_file: The primary QAPI schema file.
:param output_dir: The output directory to store generated code.
:param prefix: Optional C-code prefix for symbol names.
:param unmask: Expose non-ABI names through introspection?
:param builtins: Generate code for built-in types?
:raise QAPIError: On failures.
"""
assert invalid_prefix_char(prefix) is None
schema = QAPISchema(schema_file)
gen_types(schema, output_dir, prefix, builtins)
gen_visit(schema, output_dir, prefix, builtins)
gen_commands(schema, output_dir, prefix, gen_tracing)
gen_events(schema, output_dir, prefix)
gen_introspect(schema, output_dir, prefix, unmask)
def main() -> int:
"""
gapi-gen executable entry point.
Expects arguments via sys.argv, see --help for details.
:return: int, 0 on success, 1 on failure.
"""
parser = argparse.ArgumentParser(
description='Generate code from a QAPI schema')
parser.add_argument('-b', '--builtins', action='store_true',
help="generate code for built-in types")
parser.add_argument('-o', '--output-dir', action='store',
default='',
help="write output to directory OUTPUT_DIR")
parser.add_argument('-p', '--prefix', action='store',
default='',
help="prefix for symbols")
parser.add_argument('-u', '--unmask-non-abi-names', action='store_true',
dest='unmask',
help="expose non-ABI names in introspection")
# Option --suppress-tracing exists so we can avoid solving build system
# problems. TODO Drop it when we no longer need it.
parser.add_argument('--suppress-tracing', action='store_true',
help="suppress adding trace events to qmp marshals")
parser.add_argument('schema', action='store')
args = parser.parse_args()
funny_char = invalid_prefix_char(args.prefix)
if funny_char:
msg = f"funny character '{funny_char}' in argument of --prefix"
print(f"{sys.argv[0]}: {msg}", file=sys.stderr)
return 1
try:
generate(args.schema,
output_dir=args.output_dir,
prefix=args.prefix,
unmask=args.unmask,
builtins=args.builtins,
gen_tracing=not args.suppress_tracing)
except QAPIError as err:
print(f"{sys.argv[0]}: {str(err)}", file=sys.stderr)
return 1
return 0
| 3,575 | 33.384615 | 76 | py |
qemu | qemu-master/scripts/qapi/error.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017-2019 Red Hat Inc.
#
# Authors:
# Markus Armbruster <[email protected]>
# Marc-André Lureau <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
"""
QAPI error classes
Common error classes used throughout the package. Additional errors may
be defined in other modules. At present, `QAPIParseError` is defined in
parser.py.
"""
from typing import Optional
from .source import QAPISourceInfo
class QAPIError(Exception):
"""Base class for all exceptions from the QAPI package."""
class QAPISourceError(QAPIError):
"""Error class for all exceptions identifying a source location."""
def __init__(self,
info: Optional[QAPISourceInfo],
msg: str,
col: Optional[int] = None):
super().__init__()
self.info = info
self.msg = msg
self.col = col
def __str__(self) -> str:
assert self.info is not None
loc = str(self.info)
if self.col is not None:
assert self.info.line is not None
loc += ':%s' % self.col
return loc + ': ' + self.msg
class QAPISemError(QAPISourceError):
"""Error class for semantic QAPI errors."""
| 1,320 | 24.901961 | 72 | py |
qemu | qemu-master/scripts/qapi/parser.py | # -*- coding: utf-8 -*-
#
# QAPI schema parser
#
# Copyright IBM, Corp. 2011
# Copyright (c) 2013-2019 Red Hat Inc.
#
# Authors:
# Anthony Liguori <[email protected]>
# Markus Armbruster <[email protected]>
# Marc-André Lureau <[email protected]>
# Kevin Wolf <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
from collections import OrderedDict
import os
import re
from typing import (
TYPE_CHECKING,
Dict,
List,
Mapping,
Optional,
Set,
Union,
)
from .common import must_match
from .error import QAPISemError, QAPISourceError
from .source import QAPISourceInfo
if TYPE_CHECKING:
# pylint: disable=cyclic-import
# TODO: Remove cycle. [schema -> expr -> parser -> schema]
from .schema import QAPISchemaFeature, QAPISchemaMember
# Return value alias for get_expr().
_ExprValue = Union[List[object], Dict[str, object], str, bool]
class QAPIExpression(Dict[str, object]):
# pylint: disable=too-few-public-methods
def __init__(self,
data: Mapping[str, object],
info: QAPISourceInfo,
doc: Optional['QAPIDoc'] = None):
super().__init__(data)
self.info = info
self.doc: Optional['QAPIDoc'] = doc
class QAPIParseError(QAPISourceError):
"""Error class for all QAPI schema parsing errors."""
def __init__(self, parser: 'QAPISchemaParser', msg: str):
col = 1
for ch in parser.src[parser.line_pos:parser.pos]:
if ch == '\t':
col = (col + 7) % 8 + 1
else:
col += 1
super().__init__(parser.info, msg, col)
class QAPISchemaParser:
"""
Parse QAPI schema source.
Parse a JSON-esque schema file and process directives. See
qapi-code-gen.txt section "Schema Syntax" for the exact syntax.
Grammatical validation is handled later by `expr.check_exprs()`.
:param fname: Source file name.
:param previously_included:
The absolute names of previously included source files,
if being invoked from another parser.
:param incl_info:
`QAPISourceInfo` belonging to the parent module.
``None`` implies this is the root module.
:ivar exprs: Resulting parsed expressions.
:ivar docs: Resulting parsed documentation blocks.
:raise OSError: For problems reading the root schema document.
:raise QAPIError: For errors in the schema source.
"""
def __init__(self,
fname: str,
previously_included: Optional[Set[str]] = None,
incl_info: Optional[QAPISourceInfo] = None):
self._fname = fname
self._included = previously_included or set()
self._included.add(os.path.abspath(self._fname))
self.src = ''
# Lexer state (see `accept` for details):
self.info = QAPISourceInfo(self._fname, incl_info)
self.tok: Union[None, str] = None
self.pos = 0
self.cursor = 0
self.val: Optional[Union[bool, str]] = None
self.line_pos = 0
# Parser output:
self.exprs: List[QAPIExpression] = []
self.docs: List[QAPIDoc] = []
# Showtime!
self._parse()
def _parse(self) -> None:
"""
Parse the QAPI schema document.
:return: None. Results are stored in ``.exprs`` and ``.docs``.
"""
cur_doc = None
# May raise OSError; allow the caller to handle it.
with open(self._fname, 'r', encoding='utf-8') as fp:
self.src = fp.read()
if self.src == '' or self.src[-1] != '\n':
self.src += '\n'
# Prime the lexer:
self.accept()
# Parse until done:
while self.tok is not None:
info = self.info
if self.tok == '#':
self.reject_expr_doc(cur_doc)
for cur_doc in self.get_doc(info):
self.docs.append(cur_doc)
continue
expr = self.get_expr()
if not isinstance(expr, dict):
raise QAPISemError(
info, "top-level expression must be an object")
if 'include' in expr:
self.reject_expr_doc(cur_doc)
if len(expr) != 1:
raise QAPISemError(info, "invalid 'include' directive")
include = expr['include']
if not isinstance(include, str):
raise QAPISemError(info,
"value of 'include' must be a string")
incl_fname = os.path.join(os.path.dirname(self._fname),
include)
self._add_expr(OrderedDict({'include': incl_fname}), info)
exprs_include = self._include(include, info, incl_fname,
self._included)
if exprs_include:
self.exprs.extend(exprs_include.exprs)
self.docs.extend(exprs_include.docs)
elif "pragma" in expr:
self.reject_expr_doc(cur_doc)
if len(expr) != 1:
raise QAPISemError(info, "invalid 'pragma' directive")
pragma = expr['pragma']
if not isinstance(pragma, dict):
raise QAPISemError(
info, "value of 'pragma' must be an object")
for name, value in pragma.items():
self._pragma(name, value, info)
else:
if cur_doc and not cur_doc.symbol:
raise QAPISemError(
cur_doc.info, "definition documentation required")
self._add_expr(expr, info, cur_doc)
cur_doc = None
self.reject_expr_doc(cur_doc)
def _add_expr(self, expr: Mapping[str, object],
info: QAPISourceInfo,
doc: Optional['QAPIDoc'] = None) -> None:
self.exprs.append(QAPIExpression(expr, info, doc))
@staticmethod
def reject_expr_doc(doc: Optional['QAPIDoc']) -> None:
if doc and doc.symbol:
raise QAPISemError(
doc.info,
"documentation for '%s' is not followed by the definition"
% doc.symbol)
@staticmethod
def _include(include: str,
info: QAPISourceInfo,
incl_fname: str,
previously_included: Set[str]
) -> Optional['QAPISchemaParser']:
incl_abs_fname = os.path.abspath(incl_fname)
# catch inclusion cycle
inf: Optional[QAPISourceInfo] = info
while inf:
if incl_abs_fname == os.path.abspath(inf.fname):
raise QAPISemError(info, "inclusion loop for %s" % include)
inf = inf.parent
# skip multiple include of the same file
if incl_abs_fname in previously_included:
return None
try:
return QAPISchemaParser(incl_fname, previously_included, info)
except OSError as err:
raise QAPISemError(
info,
f"can't read include file '{incl_fname}': {err.strerror}"
) from err
@staticmethod
def _pragma(name: str, value: object, info: QAPISourceInfo) -> None:
def check_list_str(name: str, value: object) -> List[str]:
if (not isinstance(value, list) or
any(not isinstance(elt, str) for elt in value)):
raise QAPISemError(
info,
"pragma %s must be a list of strings" % name)
return value
pragma = info.pragma
if name == 'doc-required':
if not isinstance(value, bool):
raise QAPISemError(info,
"pragma 'doc-required' must be boolean")
pragma.doc_required = value
elif name == 'command-name-exceptions':
pragma.command_name_exceptions = check_list_str(name, value)
elif name == 'command-returns-exceptions':
pragma.command_returns_exceptions = check_list_str(name, value)
elif name == 'member-name-exceptions':
pragma.member_name_exceptions = check_list_str(name, value)
else:
raise QAPISemError(info, "unknown pragma '%s'" % name)
def accept(self, skip_comment: bool = True) -> None:
"""
Read and store the next token.
:param skip_comment:
When false, return COMMENT tokens ("#").
This is used when reading documentation blocks.
:return:
None. Several instance attributes are updated instead:
- ``.tok`` represents the token type. See below for values.
- ``.info`` describes the token's source location.
- ``.val`` is the token's value, if any. See below.
- ``.pos`` is the buffer index of the first character of
the token.
* Single-character tokens:
These are "{", "}", ":", ",", "[", and "]".
``.tok`` holds the single character and ``.val`` is None.
* Multi-character tokens:
* COMMENT:
This token is not normally returned by the lexer, but it can
be when ``skip_comment`` is False. ``.tok`` is "#", and
``.val`` is a string including all chars until end-of-line,
including the "#" itself.
* STRING:
``.tok`` is "'", the single quote. ``.val`` contains the
string, excluding the surrounding quotes.
* TRUE and FALSE:
``.tok`` is either "t" or "f", ``.val`` will be the
corresponding bool value.
* EOF:
``.tok`` and ``.val`` will both be None at EOF.
"""
while True:
self.tok = self.src[self.cursor]
self.pos = self.cursor
self.cursor += 1
self.val = None
if self.tok == '#':
if self.src[self.cursor] == '#':
# Start of doc comment
skip_comment = False
self.cursor = self.src.find('\n', self.cursor)
if not skip_comment:
self.val = self.src[self.pos:self.cursor]
return
elif self.tok in '{}:,[]':
return
elif self.tok == "'":
# Note: we accept only printable ASCII
string = ''
esc = False
while True:
ch = self.src[self.cursor]
self.cursor += 1
if ch == '\n':
raise QAPIParseError(self, "missing terminating \"'\"")
if esc:
# Note: we recognize only \\ because we have
# no use for funny characters in strings
if ch != '\\':
raise QAPIParseError(self,
"unknown escape \\%s" % ch)
esc = False
elif ch == '\\':
esc = True
continue
elif ch == "'":
self.val = string
return
if ord(ch) < 32 or ord(ch) >= 127:
raise QAPIParseError(
self, "funny character in string")
string += ch
elif self.src.startswith('true', self.pos):
self.val = True
self.cursor += 3
return
elif self.src.startswith('false', self.pos):
self.val = False
self.cursor += 4
return
elif self.tok == '\n':
if self.cursor == len(self.src):
self.tok = None
return
self.info = self.info.next_line()
self.line_pos = self.cursor
elif not self.tok.isspace():
# Show up to next structural, whitespace or quote
# character
match = must_match('[^[\\]{}:,\\s\'"]+',
self.src[self.cursor-1:])
raise QAPIParseError(self, "stray '%s'" % match.group(0))
def get_members(self) -> Dict[str, object]:
expr: Dict[str, object] = OrderedDict()
if self.tok == '}':
self.accept()
return expr
if self.tok != "'":
raise QAPIParseError(self, "expected string or '}'")
while True:
key = self.val
assert isinstance(key, str) # Guaranteed by tok == "'"
self.accept()
if self.tok != ':':
raise QAPIParseError(self, "expected ':'")
self.accept()
if key in expr:
raise QAPIParseError(self, "duplicate key '%s'" % key)
expr[key] = self.get_expr()
if self.tok == '}':
self.accept()
return expr
if self.tok != ',':
raise QAPIParseError(self, "expected ',' or '}'")
self.accept()
if self.tok != "'":
raise QAPIParseError(self, "expected string")
def get_values(self) -> List[object]:
expr: List[object] = []
if self.tok == ']':
self.accept()
return expr
if self.tok not in tuple("{['tf"):
raise QAPIParseError(
self, "expected '{', '[', ']', string, or boolean")
while True:
expr.append(self.get_expr())
if self.tok == ']':
self.accept()
return expr
if self.tok != ',':
raise QAPIParseError(self, "expected ',' or ']'")
self.accept()
def get_expr(self) -> _ExprValue:
expr: _ExprValue
if self.tok == '{':
self.accept()
expr = self.get_members()
elif self.tok == '[':
self.accept()
expr = self.get_values()
elif self.tok in tuple("'tf"):
assert isinstance(self.val, (str, bool))
expr = self.val
self.accept()
else:
raise QAPIParseError(
self, "expected '{', '[', string, or boolean")
return expr
def get_doc(self, info: QAPISourceInfo) -> List['QAPIDoc']:
if self.val != '##':
raise QAPIParseError(
self, "junk after '##' at start of documentation comment")
docs = []
cur_doc = QAPIDoc(self, info)
self.accept(False)
while self.tok == '#':
assert isinstance(self.val, str)
if self.val.startswith('##'):
# End of doc comment
if self.val != '##':
raise QAPIParseError(
self,
"junk after '##' at end of documentation comment")
cur_doc.end_comment()
docs.append(cur_doc)
self.accept()
return docs
if self.val.startswith('# ='):
if cur_doc.symbol:
raise QAPIParseError(
self,
"unexpected '=' markup in definition documentation")
if cur_doc.body.text:
cur_doc.end_comment()
docs.append(cur_doc)
cur_doc = QAPIDoc(self, info)
cur_doc.append(self.val)
self.accept(False)
raise QAPIParseError(self, "documentation comment must end with '##'")
class QAPIDoc:
"""
A documentation comment block, either definition or free-form
Definition documentation blocks consist of
* a body section: one line naming the definition, followed by an
overview (any number of lines)
* argument sections: a description of each argument (for commands
and events) or member (for structs, unions and alternates)
* features sections: a description of each feature flag
* additional (non-argument) sections, possibly tagged
Free-form documentation blocks consist only of a body section.
"""
class Section:
# pylint: disable=too-few-public-methods
def __init__(self, parser: QAPISchemaParser,
name: Optional[str] = None, indent: int = 0):
# parser, for error messages about indentation
self._parser = parser
# optional section name (argument/member or section name)
self.name = name
self.text = ''
# the expected indent level of the text of this section
self._indent = indent
def append(self, line: str) -> None:
# Strip leading spaces corresponding to the expected indent level
# Blank lines are always OK.
if line:
indent = must_match(r'\s*', line).end()
if indent < self._indent:
raise QAPIParseError(
self._parser,
"unexpected de-indent (expected at least %d spaces)" %
self._indent)
line = line[self._indent:]
self.text += line.rstrip() + '\n'
class ArgSection(Section):
def __init__(self, parser: QAPISchemaParser,
name: str, indent: int = 0):
super().__init__(parser, name, indent)
self.member: Optional['QAPISchemaMember'] = None
def connect(self, member: 'QAPISchemaMember') -> None:
self.member = member
class NullSection(Section):
"""
Immutable dummy section for use at the end of a doc block.
"""
# pylint: disable=too-few-public-methods
def append(self, line: str) -> None:
assert False, "Text appended after end_comment() called."
def __init__(self, parser: QAPISchemaParser, info: QAPISourceInfo):
# self._parser is used to report errors with QAPIParseError. The
# resulting error position depends on the state of the parser.
# It happens to be the beginning of the comment. More or less
# servicable, but action at a distance.
self._parser = parser
self.info = info
self.symbol: Optional[str] = None
self.body = QAPIDoc.Section(parser)
# dicts mapping parameter/feature names to their ArgSection
self.args: Dict[str, QAPIDoc.ArgSection] = OrderedDict()
self.features: Dict[str, QAPIDoc.ArgSection] = OrderedDict()
self.sections: List[QAPIDoc.Section] = []
# the current section
self._section = self.body
self._append_line = self._append_body_line
def has_section(self, name: str) -> bool:
"""Return True if we have a section with this name."""
for i in self.sections:
if i.name == name:
return True
return False
def append(self, line: str) -> None:
"""
Parse a comment line and add it to the documentation.
The way that the line is dealt with depends on which part of
the documentation we're parsing right now:
* The body section: ._append_line is ._append_body_line
* An argument section: ._append_line is ._append_args_line
* A features section: ._append_line is ._append_features_line
* An additional section: ._append_line is ._append_various_line
"""
line = line[1:]
if not line:
self._append_freeform(line)
return
if line[0] != ' ':
raise QAPIParseError(self._parser, "missing space after #")
line = line[1:]
self._append_line(line)
def end_comment(self) -> None:
self._switch_section(QAPIDoc.NullSection(self._parser))
@staticmethod
def _is_section_tag(name: str) -> bool:
return name in ('Returns:', 'Since:',
# those are often singular or plural
'Note:', 'Notes:',
'Example:', 'Examples:',
'TODO:')
def _append_body_line(self, line: str) -> None:
"""
Process a line of documentation text in the body section.
If this a symbol line and it is the section's first line, this
is a definition documentation block for that symbol.
If it's a definition documentation block, another symbol line
begins the argument section for the argument named by it, and
a section tag begins an additional section. Start that
section and append the line to it.
Else, append the line to the current section.
"""
name = line.split(' ', 1)[0]
# FIXME not nice: things like '# @foo:' and '# @foo: ' aren't
# recognized, and get silently treated as ordinary text
if not self.symbol and not self.body.text and line.startswith('@'):
if not line.endswith(':'):
raise QAPIParseError(self._parser, "line should end with ':'")
self.symbol = line[1:-1]
# Invalid names are not checked here, but the name provided MUST
# match the following definition, which *is* validated in expr.py.
if not self.symbol:
raise QAPIParseError(
self._parser, "name required after '@'")
elif self.symbol:
# This is a definition documentation block
if name.startswith('@') and name.endswith(':'):
self._append_line = self._append_args_line
self._append_args_line(line)
elif line == 'Features:':
self._append_line = self._append_features_line
elif self._is_section_tag(name):
self._append_line = self._append_various_line
self._append_various_line(line)
else:
self._append_freeform(line)
else:
# This is a free-form documentation block
self._append_freeform(line)
def _append_args_line(self, line: str) -> None:
"""
Process a line of documentation text in an argument section.
A symbol line begins the next argument section, a section tag
section or a non-indented line after a blank line begins an
additional section. Start that section and append the line to
it.
Else, append the line to the current section.
"""
name = line.split(' ', 1)[0]
if name.startswith('@') and name.endswith(':'):
# If line is "@arg: first line of description", find
# the index of 'f', which is the indent we expect for any
# following lines. We then remove the leading "@arg:"
# from line and replace it with spaces so that 'f' has the
# same index as it did in the original line and can be
# handled the same way we will handle following lines.
indent = must_match(r'@\S*:\s*', line).end()
line = line[indent:]
if not line:
# Line was just the "@arg:" header; following lines
# are not indented
indent = 0
else:
line = ' ' * indent + line
self._start_args_section(name[1:-1], indent)
elif self._is_section_tag(name):
self._append_line = self._append_various_line
self._append_various_line(line)
return
elif (self._section.text.endswith('\n\n')
and line and not line[0].isspace()):
if line == 'Features:':
self._append_line = self._append_features_line
else:
self._start_section()
self._append_line = self._append_various_line
self._append_various_line(line)
return
self._append_freeform(line)
def _append_features_line(self, line: str) -> None:
name = line.split(' ', 1)[0]
if name.startswith('@') and name.endswith(':'):
# If line is "@arg: first line of description", find
# the index of 'f', which is the indent we expect for any
# following lines. We then remove the leading "@arg:"
# from line and replace it with spaces so that 'f' has the
# same index as it did in the original line and can be
# handled the same way we will handle following lines.
indent = must_match(r'@\S*:\s*', line).end()
line = line[indent:]
if not line:
# Line was just the "@arg:" header; following lines
# are not indented
indent = 0
else:
line = ' ' * indent + line
self._start_features_section(name[1:-1], indent)
elif self._is_section_tag(name):
self._append_line = self._append_various_line
self._append_various_line(line)
return
elif (self._section.text.endswith('\n\n')
and line and not line[0].isspace()):
self._start_section()
self._append_line = self._append_various_line
self._append_various_line(line)
return
self._append_freeform(line)
def _append_various_line(self, line: str) -> None:
"""
Process a line of documentation text in an additional section.
A symbol line is an error.
A section tag begins an additional section. Start that
section and append the line to it.
Else, append the line to the current section.
"""
name = line.split(' ', 1)[0]
if name.startswith('@') and name.endswith(':'):
raise QAPIParseError(self._parser,
"'%s' can't follow '%s' section"
% (name, self.sections[0].name))
if self._is_section_tag(name):
# If line is "Section: first line of description", find
# the index of 'f', which is the indent we expect for any
# following lines. We then remove the leading "Section:"
# from line and replace it with spaces so that 'f' has the
# same index as it did in the original line and can be
# handled the same way we will handle following lines.
indent = must_match(r'\S*:\s*', line).end()
line = line[indent:]
if not line:
# Line was just the "Section:" header; following lines
# are not indented
indent = 0
else:
line = ' ' * indent + line
self._start_section(name[:-1], indent)
self._append_freeform(line)
def _start_symbol_section(
self,
symbols_dict: Dict[str, 'QAPIDoc.ArgSection'],
name: str,
indent: int) -> None:
# FIXME invalid names other than the empty string aren't flagged
if not name:
raise QAPIParseError(self._parser, "invalid parameter name")
if name in symbols_dict:
raise QAPIParseError(self._parser,
"'%s' parameter name duplicated" % name)
assert not self.sections
new_section = QAPIDoc.ArgSection(self._parser, name, indent)
self._switch_section(new_section)
symbols_dict[name] = new_section
def _start_args_section(self, name: str, indent: int) -> None:
self._start_symbol_section(self.args, name, indent)
def _start_features_section(self, name: str, indent: int) -> None:
self._start_symbol_section(self.features, name, indent)
def _start_section(self, name: Optional[str] = None,
indent: int = 0) -> None:
if name in ('Returns', 'Since') and self.has_section(name):
raise QAPIParseError(self._parser,
"duplicated '%s' section" % name)
new_section = QAPIDoc.Section(self._parser, name, indent)
self._switch_section(new_section)
self.sections.append(new_section)
def _switch_section(self, new_section: 'QAPIDoc.Section') -> None:
text = self._section.text = self._section.text.strip()
# Only the 'body' section is allowed to have an empty body.
# All other sections, including anonymous ones, must have text.
if self._section != self.body and not text:
# We do not create anonymous sections unless there is
# something to put in them; this is a parser bug.
assert self._section.name
raise QAPIParseError(
self._parser,
"empty doc section '%s'" % self._section.name)
self._section = new_section
def _append_freeform(self, line: str) -> None:
match = re.match(r'(@\S+:)', line)
if match:
raise QAPIParseError(self._parser,
"'%s' not allowed in free-form documentation"
% match.group(1))
self._section.append(line)
def connect_member(self, member: 'QAPISchemaMember') -> None:
if member.name not in self.args:
# Undocumented TODO outlaw
self.args[member.name] = QAPIDoc.ArgSection(self._parser,
member.name)
self.args[member.name].connect(member)
def connect_feature(self, feature: 'QAPISchemaFeature') -> None:
if feature.name not in self.features:
raise QAPISemError(feature.info,
"feature '%s' lacks documentation"
% feature.name)
self.features[feature.name].connect(feature)
def check_expr(self, expr: QAPIExpression) -> None:
if self.has_section('Returns') and 'command' not in expr:
raise QAPISemError(self.info,
"'Returns:' is only valid for commands")
def check(self) -> None:
def check_args_section(
args: Dict[str, QAPIDoc.ArgSection], what: str
) -> None:
bogus = [name for name, section in args.items()
if not section.member]
if bogus:
raise QAPISemError(
self.info,
"documented %s%s '%s' %s not exist" % (
what,
"s" if len(bogus) > 1 else "",
"', '".join(bogus),
"do" if len(bogus) > 1 else "does"
))
check_args_section(self.args, 'member')
check_args_section(self.features, 'feature')
| 31,095 | 37.107843 | 79 | py |
qemu | qemu-master/scripts/qapi/events.py | """
QAPI event generator
Copyright (c) 2014 Wenchao Xia
Copyright (c) 2015-2018 Red Hat Inc.
Authors:
Wenchao Xia <[email protected]>
Markus Armbruster <[email protected]>
This work is licensed under the terms of the GNU GPL, version 2.
See the COPYING file in the top-level directory.
"""
from typing import List, Optional
from .common import c_enum_const, c_name, mcgen
from .gen import QAPISchemaModularCVisitor, build_params, ifcontext
from .schema import (
QAPISchema,
QAPISchemaEnumMember,
QAPISchemaFeature,
QAPISchemaIfCond,
QAPISchemaObjectType,
)
from .source import QAPISourceInfo
from .types import gen_enum, gen_enum_lookup
def build_event_send_proto(name: str,
arg_type: Optional[QAPISchemaObjectType],
boxed: bool) -> str:
return 'void qapi_event_send_%(c_name)s(%(param)s)' % {
'c_name': c_name(name.lower()),
'param': build_params(arg_type, boxed)}
def gen_event_send_decl(name: str,
arg_type: Optional[QAPISchemaObjectType],
boxed: bool) -> str:
return mcgen('''
%(proto)s;
''',
proto=build_event_send_proto(name, arg_type, boxed))
def gen_param_var(typ: QAPISchemaObjectType) -> str:
"""
Generate a struct variable holding the event parameters.
Initialize it with the function arguments defined in `gen_event_send`.
"""
assert not typ.variants
ret = mcgen('''
%(c_name)s param = {
''',
c_name=typ.c_name())
sep = ' '
for memb in typ.members:
ret += sep
sep = ', '
if memb.need_has():
ret += 'has_' + c_name(memb.name) + sep
if memb.type.name == 'str':
# Cast away const added in build_params()
ret += '(char *)'
ret += c_name(memb.name)
ret += mcgen('''
};
''')
if not typ.is_implicit():
ret += mcgen('''
%(c_name)s *arg = ¶m;
''',
c_name=typ.c_name())
return ret
def gen_event_send(name: str,
arg_type: Optional[QAPISchemaObjectType],
features: List[QAPISchemaFeature],
boxed: bool,
event_enum_name: str,
event_emit: str) -> str:
# FIXME: Our declaration of local variables (and of 'errp' in the
# parameter list) can collide with exploded members of the event's
# data type passed in as parameters. If this collision ever hits in
# practice, we can rename our local variables with a leading _ prefix,
# or split the code into a wrapper function that creates a boxed
# 'param' object then calls another to do the real work.
have_args = boxed or (arg_type and not arg_type.is_empty())
ret = mcgen('''
%(proto)s
{
QDict *qmp;
''',
proto=build_event_send_proto(name, arg_type, boxed))
if have_args:
assert arg_type is not None
ret += mcgen('''
QObject *obj;
Visitor *v;
''')
if not boxed:
ret += gen_param_var(arg_type)
for f in features:
if f.is_special():
ret += mcgen('''
if (compat_policy.%(feat)s_output == COMPAT_POLICY_OUTPUT_HIDE) {
return;
}
''',
feat=f.name)
ret += mcgen('''
qmp = qmp_event_build_dict("%(name)s");
''',
name=name)
if have_args:
assert arg_type is not None
ret += mcgen('''
v = qobject_output_visitor_new_qmp(&obj);
''')
if not arg_type.is_implicit():
ret += mcgen('''
visit_type_%(c_name)s(v, "%(name)s", &arg, &error_abort);
''',
name=name, c_name=arg_type.c_name())
else:
ret += mcgen('''
visit_start_struct(v, "%(name)s", NULL, 0, &error_abort);
visit_type_%(c_name)s_members(v, ¶m, &error_abort);
visit_check_struct(v, &error_abort);
visit_end_struct(v, NULL);
''',
name=name, c_name=arg_type.c_name())
ret += mcgen('''
visit_complete(v, &obj);
if (qdict_size(qobject_to(QDict, obj))) {
qdict_put_obj(qmp, "data", obj);
} else {
qobject_unref(obj);
}
''')
ret += mcgen('''
%(event_emit)s(%(c_enum)s, qmp);
''',
event_emit=event_emit,
c_enum=c_enum_const(event_enum_name, name))
if have_args:
ret += mcgen('''
visit_free(v);
''')
ret += mcgen('''
qobject_unref(qmp);
}
''')
return ret
class QAPISchemaGenEventVisitor(QAPISchemaModularCVisitor):
def __init__(self, prefix: str):
super().__init__(
prefix, 'qapi-events',
' * Schema-defined QAPI/QMP events', None, __doc__)
self._event_enum_name = c_name(prefix + 'QAPIEvent', protect=False)
self._event_enum_members: List[QAPISchemaEnumMember] = []
self._event_emit_name = c_name(prefix + 'qapi_event_emit')
def _begin_user_module(self, name: str) -> None:
events = self._module_basename('qapi-events', name)
types = self._module_basename('qapi-types', name)
visit = self._module_basename('qapi-visit', name)
self._genc.add(mcgen('''
#include "qemu/osdep.h"
#include "%(prefix)sqapi-emit-events.h"
#include "%(events)s.h"
#include "%(visit)s.h"
#include "qapi/compat-policy.h"
#include "qapi/error.h"
#include "qapi/qmp/qdict.h"
#include "qapi/qmp-event.h"
''',
events=events, visit=visit,
prefix=self._prefix))
self._genh.add(mcgen('''
#include "qapi/util.h"
#include "%(types)s.h"
''',
types=types))
def visit_end(self) -> None:
self._add_module('./emit', ' * QAPI Events emission')
self._genc.preamble_add(mcgen('''
#include "qemu/osdep.h"
#include "%(prefix)sqapi-emit-events.h"
''',
prefix=self._prefix))
self._genh.preamble_add(mcgen('''
#include "qapi/util.h"
'''))
self._genh.add(gen_enum(self._event_enum_name,
self._event_enum_members))
self._genc.add(gen_enum_lookup(self._event_enum_name,
self._event_enum_members))
self._genh.add(mcgen('''
void %(event_emit)s(%(event_enum)s event, QDict *qdict);
''',
event_emit=self._event_emit_name,
event_enum=self._event_enum_name))
def visit_event(self,
name: str,
info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
features: List[QAPISchemaFeature],
arg_type: Optional[QAPISchemaObjectType],
boxed: bool) -> None:
with ifcontext(ifcond, self._genh, self._genc):
self._genh.add(gen_event_send_decl(name, arg_type, boxed))
self._genc.add(gen_event_send(name, arg_type, features, boxed,
self._event_enum_name,
self._event_emit_name))
# Note: we generate the enum member regardless of @ifcond, to
# keep the enumeration usable in target-independent code.
self._event_enum_members.append(QAPISchemaEnumMember(name, None))
def gen_events(schema: QAPISchema,
output_dir: str,
prefix: str) -> None:
vis = QAPISchemaGenEventVisitor(prefix)
schema.visit(vis)
vis.write(output_dir)
| 7,573 | 29.055556 | 75 | py |
qemu | qemu-master/scripts/qapi/visit.py | """
QAPI visitor generator
Copyright IBM, Corp. 2011
Copyright (C) 2014-2018 Red Hat, Inc.
Authors:
Anthony Liguori <[email protected]>
Michael Roth <[email protected]>
Markus Armbruster <[email protected]>
This work is licensed under the terms of the GNU GPL, version 2.
See the COPYING file in the top-level directory.
"""
from typing import List, Optional
from .common import (
c_enum_const,
c_name,
indent,
mcgen,
)
from .gen import (
QAPISchemaModularCVisitor,
gen_special_features,
ifcontext,
)
from .schema import (
QAPISchema,
QAPISchemaEnumMember,
QAPISchemaEnumType,
QAPISchemaFeature,
QAPISchemaIfCond,
QAPISchemaObjectType,
QAPISchemaObjectTypeMember,
QAPISchemaType,
QAPISchemaVariants,
)
from .source import QAPISourceInfo
def gen_visit_decl(name: str, scalar: bool = False) -> str:
c_type = c_name(name) + ' *'
if not scalar:
c_type += '*'
return mcgen('''
bool visit_type_%(c_name)s(Visitor *v, const char *name,
%(c_type)sobj, Error **errp);
''',
c_name=c_name(name), c_type=c_type)
def gen_visit_members_decl(name: str) -> str:
return mcgen('''
bool visit_type_%(c_name)s_members(Visitor *v, %(c_name)s *obj, Error **errp);
''',
c_name=c_name(name))
def gen_visit_object_members(name: str,
base: Optional[QAPISchemaObjectType],
members: List[QAPISchemaObjectTypeMember],
variants: Optional[QAPISchemaVariants]) -> str:
ret = mcgen('''
bool visit_type_%(c_name)s_members(Visitor *v, %(c_name)s *obj, Error **errp)
{
''',
c_name=c_name(name))
sep = ''
for memb in members:
if memb.optional and not memb.need_has():
ret += mcgen('''
bool has_%(c_name)s = !!obj->%(c_name)s;
''',
c_name=c_name(memb.name))
sep = '\n'
ret += sep
if base:
ret += mcgen('''
if (!visit_type_%(c_type)s_members(v, (%(c_type)s *)obj, errp)) {
return false;
}
''',
c_type=base.c_name())
for memb in members:
ret += memb.ifcond.gen_if()
if memb.optional:
has = 'has_' + c_name(memb.name)
if memb.need_has():
has = 'obj->' + has
ret += mcgen('''
if (visit_optional(v, "%(name)s", &%(has)s)) {
''',
name=memb.name, has=has)
indent.increase()
special_features = gen_special_features(memb.features)
if special_features != '0':
ret += mcgen('''
if (visit_policy_reject(v, "%(name)s", %(special_features)s, errp)) {
return false;
}
if (!visit_policy_skip(v, "%(name)s", %(special_features)s)) {
''',
name=memb.name, special_features=special_features)
indent.increase()
ret += mcgen('''
if (!visit_type_%(c_type)s(v, "%(name)s", &obj->%(c_name)s, errp)) {
return false;
}
''',
c_type=memb.type.c_name(), name=memb.name,
c_name=c_name(memb.name))
if special_features != '0':
indent.decrease()
ret += mcgen('''
}
''')
if memb.optional:
indent.decrease()
ret += mcgen('''
}
''')
ret += memb.ifcond.gen_endif()
if variants:
tag_member = variants.tag_member
assert isinstance(tag_member.type, QAPISchemaEnumType)
ret += mcgen('''
switch (obj->%(c_name)s) {
''',
c_name=c_name(tag_member.name))
for var in variants.variants:
case_str = c_enum_const(tag_member.type.name, var.name,
tag_member.type.prefix)
ret += var.ifcond.gen_if()
if var.type.name == 'q_empty':
# valid variant and nothing to do
ret += mcgen('''
case %(case)s:
break;
''',
case=case_str)
else:
ret += mcgen('''
case %(case)s:
return visit_type_%(c_type)s_members(v, &obj->u.%(c_name)s, errp);
''',
case=case_str,
c_type=var.type.c_name(), c_name=c_name(var.name))
ret += var.ifcond.gen_endif()
ret += mcgen('''
default:
abort();
}
''')
ret += mcgen('''
return true;
}
''')
return ret
def gen_visit_list(name: str, element_type: QAPISchemaType) -> str:
return mcgen('''
bool visit_type_%(c_name)s(Visitor *v, const char *name,
%(c_name)s **obj, Error **errp)
{
bool ok = false;
%(c_name)s *tail;
size_t size = sizeof(**obj);
if (!visit_start_list(v, name, (GenericList **)obj, size, errp)) {
return false;
}
for (tail = *obj; tail;
tail = (%(c_name)s *)visit_next_list(v, (GenericList *)tail, size)) {
if (!visit_type_%(c_elt_type)s(v, NULL, &tail->value, errp)) {
goto out_obj;
}
}
ok = visit_check_list(v, errp);
out_obj:
visit_end_list(v, (void **)obj);
if (!ok && visit_is_input(v)) {
qapi_free_%(c_name)s(*obj);
*obj = NULL;
}
return ok;
}
''',
c_name=c_name(name), c_elt_type=element_type.c_name())
def gen_visit_enum(name: str) -> str:
return mcgen('''
bool visit_type_%(c_name)s(Visitor *v, const char *name,
%(c_name)s *obj, Error **errp)
{
int value = *obj;
bool ok = visit_type_enum(v, name, &value, &%(c_name)s_lookup, errp);
*obj = value;
return ok;
}
''',
c_name=c_name(name))
def gen_visit_alternate(name: str, variants: QAPISchemaVariants) -> str:
ret = mcgen('''
bool visit_type_%(c_name)s(Visitor *v, const char *name,
%(c_name)s **obj, Error **errp)
{
bool ok = false;
if (!visit_start_alternate(v, name, (GenericAlternate **)obj,
sizeof(**obj), errp)) {
return false;
}
if (!*obj) {
/* incomplete */
assert(visit_is_dealloc(v));
ok = true;
goto out_obj;
}
switch ((*obj)->type) {
''',
c_name=c_name(name))
for var in variants.variants:
ret += var.ifcond.gen_if()
ret += mcgen('''
case %(case)s:
''',
case=var.type.alternate_qtype())
if isinstance(var.type, QAPISchemaObjectType):
ret += mcgen('''
if (!visit_start_struct(v, name, NULL, 0, errp)) {
break;
}
if (visit_type_%(c_type)s_members(v, &(*obj)->u.%(c_name)s, errp)) {
ok = visit_check_struct(v, errp);
}
visit_end_struct(v, NULL);
''',
c_type=var.type.c_name(),
c_name=c_name(var.name))
else:
ret += mcgen('''
ok = visit_type_%(c_type)s(v, name, &(*obj)->u.%(c_name)s, errp);
''',
c_type=var.type.c_name(),
c_name=c_name(var.name))
ret += mcgen('''
break;
''')
ret += var.ifcond.gen_endif()
ret += mcgen('''
case QTYPE_NONE:
abort();
default:
assert(visit_is_input(v));
error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null",
"%(name)s");
/* Avoid passing invalid *obj to qapi_free_%(c_name)s() */
g_free(*obj);
*obj = NULL;
}
out_obj:
visit_end_alternate(v, (void **)obj);
if (!ok && visit_is_input(v)) {
qapi_free_%(c_name)s(*obj);
*obj = NULL;
}
return ok;
}
''',
name=name, c_name=c_name(name))
return ret
def gen_visit_object(name: str) -> str:
return mcgen('''
bool visit_type_%(c_name)s(Visitor *v, const char *name,
%(c_name)s **obj, Error **errp)
{
bool ok = false;
if (!visit_start_struct(v, name, (void **)obj, sizeof(%(c_name)s), errp)) {
return false;
}
if (!*obj) {
/* incomplete */
assert(visit_is_dealloc(v));
ok = true;
goto out_obj;
}
if (!visit_type_%(c_name)s_members(v, *obj, errp)) {
goto out_obj;
}
ok = visit_check_struct(v, errp);
out_obj:
visit_end_struct(v, (void **)obj);
if (!ok && visit_is_input(v)) {
qapi_free_%(c_name)s(*obj);
*obj = NULL;
}
return ok;
}
''',
c_name=c_name(name))
class QAPISchemaGenVisitVisitor(QAPISchemaModularCVisitor):
def __init__(self, prefix: str):
super().__init__(
prefix, 'qapi-visit', ' * Schema-defined QAPI visitors',
' * Built-in QAPI visitors', __doc__)
def _begin_builtin_module(self) -> None:
self._genc.preamble_add(mcgen('''
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qapi/qapi-builtin-visit.h"
'''))
self._genh.preamble_add(mcgen('''
#include "qapi/visitor.h"
#include "qapi/qapi-builtin-types.h"
'''))
def _begin_user_module(self, name: str) -> None:
types = self._module_basename('qapi-types', name)
visit = self._module_basename('qapi-visit', name)
self._genc.preamble_add(mcgen('''
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
#include "%(visit)s.h"
''',
visit=visit))
self._genh.preamble_add(mcgen('''
#include "qapi/qapi-builtin-visit.h"
#include "%(types)s.h"
''',
types=types))
def visit_enum_type(self,
name: str,
info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
features: List[QAPISchemaFeature],
members: List[QAPISchemaEnumMember],
prefix: Optional[str]) -> None:
with ifcontext(ifcond, self._genh, self._genc):
self._genh.add(gen_visit_decl(name, scalar=True))
self._genc.add(gen_visit_enum(name))
def visit_array_type(self,
name: str,
info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
element_type: QAPISchemaType) -> None:
with ifcontext(ifcond, self._genh, self._genc):
self._genh.add(gen_visit_decl(name))
self._genc.add(gen_visit_list(name, element_type))
def visit_object_type(self,
name: str,
info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
features: List[QAPISchemaFeature],
base: Optional[QAPISchemaObjectType],
members: List[QAPISchemaObjectTypeMember],
variants: Optional[QAPISchemaVariants]) -> None:
# Nothing to do for the special empty builtin
if name == 'q_empty':
return
with ifcontext(ifcond, self._genh, self._genc):
self._genh.add(gen_visit_members_decl(name))
self._genc.add(gen_visit_object_members(name, base,
members, variants))
# TODO Worth changing the visitor signature, so we could
# directly use rather than repeat type.is_implicit()?
if not name.startswith('q_'):
# only explicit types need an allocating visit
self._genh.add(gen_visit_decl(name))
self._genc.add(gen_visit_object(name))
def visit_alternate_type(self,
name: str,
info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
features: List[QAPISchemaFeature],
variants: QAPISchemaVariants) -> None:
with ifcontext(ifcond, self._genh, self._genc):
self._genh.add(gen_visit_decl(name))
self._genc.add(gen_visit_alternate(name, variants))
def gen_visit(schema: QAPISchema,
output_dir: str,
prefix: str,
opt_builtins: bool) -> None:
vis = QAPISchemaGenVisitVisitor(prefix)
schema.visit(vis)
vis.write(output_dir, opt_builtins)
| 12,442 | 28.07243 | 79 | py |
qemu | qemu-master/scripts/qapi/commands.py | """
QAPI command marshaller generator
Copyright IBM, Corp. 2011
Copyright (C) 2014-2018 Red Hat, Inc.
Authors:
Anthony Liguori <[email protected]>
Michael Roth <[email protected]>
Markus Armbruster <[email protected]>
This work is licensed under the terms of the GNU GPL, version 2.
See the COPYING file in the top-level directory.
"""
from typing import (
Dict,
List,
Optional,
Set,
)
from .common import c_name, mcgen
from .gen import (
QAPIGenC,
QAPISchemaModularCVisitor,
build_params,
gen_special_features,
ifcontext,
)
from .schema import (
QAPISchema,
QAPISchemaFeature,
QAPISchemaIfCond,
QAPISchemaObjectType,
QAPISchemaType,
)
from .source import QAPISourceInfo
def gen_command_decl(name: str,
arg_type: Optional[QAPISchemaObjectType],
boxed: bool,
ret_type: Optional[QAPISchemaType]) -> str:
return mcgen('''
%(c_type)s qmp_%(c_name)s(%(params)s);
''',
c_type=(ret_type and ret_type.c_type()) or 'void',
c_name=c_name(name),
params=build_params(arg_type, boxed, 'Error **errp'))
def gen_call(name: str,
arg_type: Optional[QAPISchemaObjectType],
boxed: bool,
ret_type: Optional[QAPISchemaType],
gen_tracing: bool) -> str:
ret = ''
argstr = ''
if boxed:
assert arg_type
argstr = '&arg, '
elif arg_type:
assert not arg_type.variants
for memb in arg_type.members:
if memb.need_has():
argstr += 'arg.has_%s, ' % c_name(memb.name)
argstr += 'arg.%s, ' % c_name(memb.name)
lhs = ''
if ret_type:
lhs = 'retval = '
name = c_name(name)
upper = name.upper()
if gen_tracing:
ret += mcgen('''
if (trace_event_get_state_backends(TRACE_QMP_ENTER_%(upper)s)) {
g_autoptr(GString) req_json = qobject_to_json(QOBJECT(args));
trace_qmp_enter_%(name)s(req_json->str);
}
''',
upper=upper, name=name)
ret += mcgen('''
%(lhs)sqmp_%(name)s(%(args)s&err);
''',
name=name, args=argstr, lhs=lhs)
ret += mcgen('''
if (err) {
''')
if gen_tracing:
ret += mcgen('''
trace_qmp_exit_%(name)s(error_get_pretty(err), false);
''',
name=name)
ret += mcgen('''
error_propagate(errp, err);
goto out;
}
''')
if ret_type:
ret += mcgen('''
qmp_marshal_output_%(c_name)s(retval, ret, errp);
''',
c_name=ret_type.c_name())
if gen_tracing:
if ret_type:
ret += mcgen('''
if (trace_event_get_state_backends(TRACE_QMP_EXIT_%(upper)s)) {
g_autoptr(GString) ret_json = qobject_to_json(*ret);
trace_qmp_exit_%(name)s(ret_json->str, true);
}
''',
upper=upper, name=name)
else:
ret += mcgen('''
trace_qmp_exit_%(name)s("{}", true);
''',
name=name)
return ret
def gen_marshal_output(ret_type: QAPISchemaType) -> str:
return mcgen('''
static void qmp_marshal_output_%(c_name)s(%(c_type)s ret_in,
QObject **ret_out, Error **errp)
{
Visitor *v;
v = qobject_output_visitor_new_qmp(ret_out);
if (visit_type_%(c_name)s(v, "unused", &ret_in, errp)) {
visit_complete(v, ret_out);
}
visit_free(v);
v = qapi_dealloc_visitor_new();
visit_type_%(c_name)s(v, "unused", &ret_in, NULL);
visit_free(v);
}
''',
c_type=ret_type.c_type(), c_name=ret_type.c_name())
def build_marshal_proto(name: str) -> str:
return ('void qmp_marshal_%s(QDict *args, QObject **ret, Error **errp)'
% c_name(name))
def gen_marshal_decl(name: str) -> str:
return mcgen('''
%(proto)s;
''',
proto=build_marshal_proto(name))
def gen_trace(name: str) -> str:
return mcgen('''
qmp_enter_%(name)s(const char *json) "%%s"
qmp_exit_%(name)s(const char *result, bool succeeded) "%%s %%d"
''',
name=c_name(name))
def gen_marshal(name: str,
arg_type: Optional[QAPISchemaObjectType],
boxed: bool,
ret_type: Optional[QAPISchemaType],
gen_tracing: bool) -> str:
have_args = boxed or (arg_type and not arg_type.is_empty())
if have_args:
assert arg_type is not None
arg_type_c_name = arg_type.c_name()
ret = mcgen('''
%(proto)s
{
Error *err = NULL;
bool ok = false;
Visitor *v;
''',
proto=build_marshal_proto(name))
if ret_type:
ret += mcgen('''
%(c_type)s retval;
''',
c_type=ret_type.c_type())
if have_args:
ret += mcgen('''
%(c_name)s arg = {0};
''',
c_name=arg_type_c_name)
ret += mcgen('''
v = qobject_input_visitor_new_qmp(QOBJECT(args));
if (!visit_start_struct(v, NULL, NULL, 0, errp)) {
goto out;
}
''')
if have_args:
ret += mcgen('''
if (visit_type_%(c_arg_type)s_members(v, &arg, errp)) {
ok = visit_check_struct(v, errp);
}
''',
c_arg_type=arg_type_c_name)
else:
ret += mcgen('''
ok = visit_check_struct(v, errp);
''')
ret += mcgen('''
visit_end_struct(v, NULL);
if (!ok) {
goto out;
}
''')
ret += gen_call(name, arg_type, boxed, ret_type, gen_tracing)
ret += mcgen('''
out:
visit_free(v);
''')
ret += mcgen('''
v = qapi_dealloc_visitor_new();
visit_start_struct(v, NULL, NULL, 0, NULL);
''')
if have_args:
ret += mcgen('''
visit_type_%(c_arg_type)s_members(v, &arg, NULL);
''',
c_arg_type=arg_type_c_name)
ret += mcgen('''
visit_end_struct(v, NULL);
visit_free(v);
''')
ret += mcgen('''
}
''')
return ret
def gen_register_command(name: str,
features: List[QAPISchemaFeature],
success_response: bool,
allow_oob: bool,
allow_preconfig: bool,
coroutine: bool) -> str:
options = []
if not success_response:
options += ['QCO_NO_SUCCESS_RESP']
if allow_oob:
options += ['QCO_ALLOW_OOB']
if allow_preconfig:
options += ['QCO_ALLOW_PRECONFIG']
if coroutine:
options += ['QCO_COROUTINE']
ret = mcgen('''
qmp_register_command(cmds, "%(name)s",
qmp_marshal_%(c_name)s, %(opts)s, %(feats)s);
''',
name=name, c_name=c_name(name),
opts=' | '.join(options) or 0,
feats=gen_special_features(features))
return ret
class QAPISchemaGenCommandVisitor(QAPISchemaModularCVisitor):
def __init__(self, prefix: str, gen_tracing: bool):
super().__init__(
prefix, 'qapi-commands',
' * Schema-defined QAPI/QMP commands', None, __doc__,
gen_tracing=gen_tracing)
self._visited_ret_types: Dict[QAPIGenC, Set[QAPISchemaType]] = {}
self._gen_tracing = gen_tracing
def _begin_user_module(self, name: str) -> None:
self._visited_ret_types[self._genc] = set()
commands = self._module_basename('qapi-commands', name)
types = self._module_basename('qapi-types', name)
visit = self._module_basename('qapi-visit', name)
self._genc.add(mcgen('''
#include "qemu/osdep.h"
#include "qapi/compat-policy.h"
#include "qapi/visitor.h"
#include "qapi/qmp/qdict.h"
#include "qapi/dealloc-visitor.h"
#include "qapi/error.h"
#include "%(visit)s.h"
#include "%(commands)s.h"
''',
commands=commands, visit=visit))
if self._gen_tracing and commands != 'qapi-commands':
self._genc.add(mcgen('''
#include "qapi/qmp/qjson.h"
#include "trace/trace-%(nm)s_trace_events.h"
''',
nm=c_name(commands, protect=False)))
# We use c_name(commands, protect=False) to turn '-' into '_', to
# match .underscorify() in trace/meson.build
self._genh.add(mcgen('''
#include "%(types)s.h"
''',
types=types))
def visit_begin(self, schema: QAPISchema) -> None:
self._add_module('./init', ' * QAPI Commands initialization')
self._genh.add(mcgen('''
#include "qapi/qmp/dispatch.h"
void %(c_prefix)sqmp_init_marshal(QmpCommandList *cmds);
''',
c_prefix=c_name(self._prefix, protect=False)))
self._genc.add(mcgen('''
#include "qemu/osdep.h"
#include "%(prefix)sqapi-commands.h"
#include "%(prefix)sqapi-init-commands.h"
void %(c_prefix)sqmp_init_marshal(QmpCommandList *cmds)
{
QTAILQ_INIT(cmds);
''',
prefix=self._prefix,
c_prefix=c_name(self._prefix, protect=False)))
def visit_end(self) -> None:
with self._temp_module('./init'):
self._genc.add(mcgen('''
}
'''))
def visit_command(self,
name: str,
info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
features: List[QAPISchemaFeature],
arg_type: Optional[QAPISchemaObjectType],
ret_type: Optional[QAPISchemaType],
gen: bool,
success_response: bool,
boxed: bool,
allow_oob: bool,
allow_preconfig: bool,
coroutine: bool) -> None:
if not gen:
return
# FIXME: If T is a user-defined type, the user is responsible
# for making this work, i.e. to make T's condition the
# conjunction of the T-returning commands' conditions. If T
# is a built-in type, this isn't possible: the
# qmp_marshal_output_T() will be generated unconditionally.
if ret_type and ret_type not in self._visited_ret_types[self._genc]:
self._visited_ret_types[self._genc].add(ret_type)
with ifcontext(ret_type.ifcond,
self._genh, self._genc):
self._genc.add(gen_marshal_output(ret_type))
with ifcontext(ifcond, self._genh, self._genc):
self._genh.add(gen_command_decl(name, arg_type, boxed, ret_type))
self._genh.add(gen_marshal_decl(name))
self._genc.add(gen_marshal(name, arg_type, boxed, ret_type,
self._gen_tracing))
if self._gen_tracing:
self._gen_trace_events.add(gen_trace(name))
with self._temp_module('./init'):
with ifcontext(ifcond, self._genh, self._genc):
self._genc.add(gen_register_command(
name, features, success_response, allow_oob,
allow_preconfig, coroutine))
def gen_commands(schema: QAPISchema,
output_dir: str,
prefix: str,
gen_tracing: bool) -> None:
vis = QAPISchemaGenCommandVisitor(prefix, gen_tracing)
schema.visit(vis)
vis.write(output_dir)
| 11,314 | 26.597561 | 77 | py |
qemu | qemu-master/scripts/qapi/gen.py | # -*- coding: utf-8 -*-
#
# QAPI code generation
#
# Copyright (c) 2015-2019 Red Hat Inc.
#
# Authors:
# Markus Armbruster <[email protected]>
# Marc-André Lureau <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
from contextlib import contextmanager
import os
import re
from typing import (
Dict,
Iterator,
Optional,
Sequence,
Tuple,
)
from .common import (
c_fname,
c_name,
guardend,
guardstart,
mcgen,
)
from .schema import (
QAPISchemaFeature,
QAPISchemaIfCond,
QAPISchemaModule,
QAPISchemaObjectType,
QAPISchemaVisitor,
)
from .source import QAPISourceInfo
def gen_special_features(features: Sequence[QAPISchemaFeature]) -> str:
special_features = [f"1u << QAPI_{feat.name.upper()}"
for feat in features if feat.is_special()]
return ' | '.join(special_features) or '0'
class QAPIGen:
def __init__(self, fname: str):
self.fname = fname
self._preamble = ''
self._body = ''
def preamble_add(self, text: str) -> None:
self._preamble += text
def add(self, text: str) -> None:
self._body += text
def get_content(self) -> str:
return self._top() + self._preamble + self._body + self._bottom()
def _top(self) -> str:
# pylint: disable=no-self-use
return ''
def _bottom(self) -> str:
# pylint: disable=no-self-use
return ''
def write(self, output_dir: str) -> None:
# Include paths starting with ../ are used to reuse modules of the main
# schema in specialised schemas. Don't overwrite the files that are
# already generated for the main schema.
if self.fname.startswith('../'):
return
pathname = os.path.join(output_dir, self.fname)
odir = os.path.dirname(pathname)
if odir:
os.makedirs(odir, exist_ok=True)
# use os.open for O_CREAT to create and read a non-existant file
fd = os.open(pathname, os.O_RDWR | os.O_CREAT, 0o666)
with os.fdopen(fd, 'r+', encoding='utf-8') as fp:
text = self.get_content()
oldtext = fp.read(len(text) + 1)
if text != oldtext:
fp.seek(0)
fp.truncate(0)
fp.write(text)
def _wrap_ifcond(ifcond: QAPISchemaIfCond, before: str, after: str) -> str:
if before == after:
return after # suppress empty #if ... #endif
assert after.startswith(before)
out = before
added = after[len(before):]
if added[0] == '\n':
out += '\n'
added = added[1:]
out += ifcond.gen_if()
out += added
out += ifcond.gen_endif()
return out
def build_params(arg_type: Optional[QAPISchemaObjectType],
boxed: bool,
extra: Optional[str] = None) -> str:
ret = ''
sep = ''
if boxed:
assert arg_type
ret += '%s arg' % arg_type.c_param_type()
sep = ', '
elif arg_type:
assert not arg_type.variants
for memb in arg_type.members:
ret += sep
sep = ', '
if memb.need_has():
ret += 'bool has_%s, ' % c_name(memb.name)
ret += '%s %s' % (memb.type.c_param_type(),
c_name(memb.name))
if extra:
ret += sep + extra
return ret if ret else 'void'
class QAPIGenCCode(QAPIGen):
def __init__(self, fname: str):
super().__init__(fname)
self._start_if: Optional[Tuple[QAPISchemaIfCond, str, str]] = None
def start_if(self, ifcond: QAPISchemaIfCond) -> None:
assert self._start_if is None
self._start_if = (ifcond, self._body, self._preamble)
def end_if(self) -> None:
assert self._start_if is not None
self._body = _wrap_ifcond(self._start_if[0],
self._start_if[1], self._body)
self._preamble = _wrap_ifcond(self._start_if[0],
self._start_if[2], self._preamble)
self._start_if = None
def get_content(self) -> str:
assert self._start_if is None
return super().get_content()
class QAPIGenC(QAPIGenCCode):
def __init__(self, fname: str, blurb: str, pydoc: str):
super().__init__(fname)
self._blurb = blurb
self._copyright = '\n * '.join(re.findall(r'^Copyright .*', pydoc,
re.MULTILINE))
def _top(self) -> str:
return mcgen('''
/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
%(blurb)s
*
* %(copyright)s
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*/
''',
blurb=self._blurb, copyright=self._copyright)
def _bottom(self) -> str:
return mcgen('''
/* Dummy declaration to prevent empty .o file */
char qapi_dummy_%(name)s;
''',
name=c_fname(self.fname))
class QAPIGenH(QAPIGenC):
def _top(self) -> str:
return super()._top() + guardstart(self.fname)
def _bottom(self) -> str:
return guardend(self.fname)
class QAPIGenTrace(QAPIGen):
def _top(self) -> str:
return super()._top() + '# AUTOMATICALLY GENERATED, DO NOT MODIFY\n\n'
@contextmanager
def ifcontext(ifcond: QAPISchemaIfCond, *args: QAPIGenCCode) -> Iterator[None]:
"""
A with-statement context manager that wraps with `start_if()` / `end_if()`.
:param ifcond: A sequence of conditionals, passed to `start_if()`.
:param args: any number of `QAPIGenCCode`.
Example::
with ifcontext(ifcond, self._genh, self._genc):
modify self._genh and self._genc ...
Is equivalent to calling::
self._genh.start_if(ifcond)
self._genc.start_if(ifcond)
modify self._genh and self._genc ...
self._genh.end_if()
self._genc.end_if()
"""
for arg in args:
arg.start_if(ifcond)
yield
for arg in args:
arg.end_if()
class QAPISchemaMonolithicCVisitor(QAPISchemaVisitor):
def __init__(self,
prefix: str,
what: str,
blurb: str,
pydoc: str):
self._prefix = prefix
self._what = what
self._genc = QAPIGenC(self._prefix + self._what + '.c',
blurb, pydoc)
self._genh = QAPIGenH(self._prefix + self._what + '.h',
blurb, pydoc)
def write(self, output_dir: str) -> None:
self._genc.write(output_dir)
self._genh.write(output_dir)
class QAPISchemaModularCVisitor(QAPISchemaVisitor):
def __init__(self,
prefix: str,
what: str,
user_blurb: str,
builtin_blurb: Optional[str],
pydoc: str,
gen_tracing: bool = False):
self._prefix = prefix
self._what = what
self._user_blurb = user_blurb
self._builtin_blurb = builtin_blurb
self._pydoc = pydoc
self._current_module: Optional[str] = None
self._module: Dict[str, Tuple[QAPIGenC, QAPIGenH,
Optional[QAPIGenTrace]]] = {}
self._main_module: Optional[str] = None
self._gen_tracing = gen_tracing
@property
def _genc(self) -> QAPIGenC:
assert self._current_module is not None
return self._module[self._current_module][0]
@property
def _genh(self) -> QAPIGenH:
assert self._current_module is not None
return self._module[self._current_module][1]
@property
def _gen_trace_events(self) -> QAPIGenTrace:
assert self._gen_tracing
assert self._current_module is not None
gent = self._module[self._current_module][2]
assert gent is not None
return gent
@staticmethod
def _module_dirname(name: str) -> str:
if QAPISchemaModule.is_user_module(name):
return os.path.dirname(name)
return ''
def _module_basename(self, what: str, name: str) -> str:
ret = '' if QAPISchemaModule.is_builtin_module(name) else self._prefix
if QAPISchemaModule.is_user_module(name):
basename = os.path.basename(name)
ret += what
if name != self._main_module:
ret += '-' + os.path.splitext(basename)[0]
else:
assert QAPISchemaModule.is_system_module(name)
ret += re.sub(r'-', '-' + name[2:] + '-', what)
return ret
def _module_filename(self, what: str, name: str) -> str:
return os.path.join(self._module_dirname(name),
self._module_basename(what, name))
def _add_module(self, name: str, blurb: str) -> None:
if QAPISchemaModule.is_user_module(name):
if self._main_module is None:
self._main_module = name
basename = self._module_filename(self._what, name)
genc = QAPIGenC(basename + '.c', blurb, self._pydoc)
genh = QAPIGenH(basename + '.h', blurb, self._pydoc)
gent: Optional[QAPIGenTrace] = None
if self._gen_tracing:
gent = QAPIGenTrace(basename + '.trace-events')
self._module[name] = (genc, genh, gent)
self._current_module = name
@contextmanager
def _temp_module(self, name: str) -> Iterator[None]:
old_module = self._current_module
self._current_module = name
yield
self._current_module = old_module
def write(self, output_dir: str, opt_builtins: bool = False) -> None:
for name, (genc, genh, gent) in self._module.items():
if QAPISchemaModule.is_builtin_module(name) and not opt_builtins:
continue
genc.write(output_dir)
genh.write(output_dir)
if gent is not None:
gent.write(output_dir)
def _begin_builtin_module(self) -> None:
pass
def _begin_user_module(self, name: str) -> None:
pass
def visit_module(self, name: str) -> None:
if QAPISchemaModule.is_builtin_module(name):
if self._builtin_blurb:
self._add_module(name, self._builtin_blurb)
self._begin_builtin_module()
else:
# The built-in module has not been created. No code may
# be generated.
self._current_module = None
else:
assert QAPISchemaModule.is_user_module(name)
self._add_module(name, self._user_blurb)
self._begin_user_module(name)
def visit_include(self, name: str, info: Optional[QAPISourceInfo]) -> None:
relname = os.path.relpath(self._module_filename(self._what, name),
os.path.dirname(self._genh.fname))
self._genh.preamble_add(mcgen('''
#include "%(relname)s.h"
''',
relname=relname))
| 11,131 | 29.666667 | 79 | py |
qemu | qemu-master/scripts/qapi/types.py | """
QAPI types generator
Copyright IBM, Corp. 2011
Copyright (c) 2013-2018 Red Hat Inc.
Authors:
Anthony Liguori <[email protected]>
Michael Roth <[email protected]>
Markus Armbruster <[email protected]>
This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
"""
from typing import List, Optional
from .common import c_enum_const, c_name, mcgen
from .gen import (
QAPISchemaModularCVisitor,
gen_special_features,
ifcontext,
)
from .schema import (
QAPISchema,
QAPISchemaEnumMember,
QAPISchemaFeature,
QAPISchemaIfCond,
QAPISchemaObjectType,
QAPISchemaObjectTypeMember,
QAPISchemaType,
QAPISchemaVariants,
)
from .source import QAPISourceInfo
# variants must be emitted before their container; track what has already
# been output
objects_seen = set()
def gen_enum_lookup(name: str,
members: List[QAPISchemaEnumMember],
prefix: Optional[str] = None) -> str:
max_index = c_enum_const(name, '_MAX', prefix)
feats = ''
ret = mcgen('''
const QEnumLookup %(c_name)s_lookup = {
.array = (const char *const[]) {
''',
c_name=c_name(name))
for memb in members:
ret += memb.ifcond.gen_if()
index = c_enum_const(name, memb.name, prefix)
ret += mcgen('''
[%(index)s] = "%(name)s",
''',
index=index, name=memb.name)
ret += memb.ifcond.gen_endif()
special_features = gen_special_features(memb.features)
if special_features != '0':
feats += mcgen('''
[%(index)s] = %(special_features)s,
''',
index=index, special_features=special_features)
if feats:
ret += mcgen('''
},
.special_features = (const unsigned char[%(max_index)s]) {
''',
max_index=max_index)
ret += feats
ret += mcgen('''
},
.size = %(max_index)s
};
''',
max_index=max_index)
return ret
def gen_enum(name: str,
members: List[QAPISchemaEnumMember],
prefix: Optional[str] = None) -> str:
# append automatically generated _MAX value
enum_members = members + [QAPISchemaEnumMember('_MAX', None)]
ret = mcgen('''
typedef enum %(c_name)s {
''',
c_name=c_name(name))
for memb in enum_members:
ret += memb.ifcond.gen_if()
ret += mcgen('''
%(c_enum)s,
''',
c_enum=c_enum_const(name, memb.name, prefix))
ret += memb.ifcond.gen_endif()
ret += mcgen('''
} %(c_name)s;
''',
c_name=c_name(name))
ret += mcgen('''
#define %(c_name)s_str(val) \\
qapi_enum_lookup(&%(c_name)s_lookup, (val))
extern const QEnumLookup %(c_name)s_lookup;
''',
c_name=c_name(name))
return ret
def gen_fwd_object_or_array(name: str) -> str:
return mcgen('''
typedef struct %(c_name)s %(c_name)s;
''',
c_name=c_name(name))
def gen_array(name: str, element_type: QAPISchemaType) -> str:
return mcgen('''
struct %(c_name)s {
%(c_name)s *next;
%(c_type)s value;
};
''',
c_name=c_name(name), c_type=element_type.c_type())
def gen_struct_members(members: List[QAPISchemaObjectTypeMember]) -> str:
ret = ''
for memb in members:
ret += memb.ifcond.gen_if()
if memb.need_has():
ret += mcgen('''
bool has_%(c_name)s;
''',
c_name=c_name(memb.name))
ret += mcgen('''
%(c_type)s %(c_name)s;
''',
c_type=memb.type.c_type(), c_name=c_name(memb.name))
ret += memb.ifcond.gen_endif()
return ret
def gen_object(name: str, ifcond: QAPISchemaIfCond,
base: Optional[QAPISchemaObjectType],
members: List[QAPISchemaObjectTypeMember],
variants: Optional[QAPISchemaVariants]) -> str:
if name in objects_seen:
return ''
objects_seen.add(name)
ret = ''
for var in variants.variants if variants else ():
obj = var.type
if not isinstance(obj, QAPISchemaObjectType):
continue
ret += gen_object(obj.name, obj.ifcond, obj.base,
obj.local_members, obj.variants)
ret += mcgen('''
''')
ret += ifcond.gen_if()
ret += mcgen('''
struct %(c_name)s {
''',
c_name=c_name(name))
if base:
if not base.is_implicit():
ret += mcgen('''
/* Members inherited from %(c_name)s: */
''',
c_name=base.c_name())
ret += gen_struct_members(base.members)
if not base.is_implicit():
ret += mcgen('''
/* Own members: */
''')
ret += gen_struct_members(members)
if variants:
ret += gen_variants(variants)
# Make sure that all structs have at least one member; this avoids
# potential issues with attempting to malloc space for zero-length
# structs in C, and also incompatibility with C++ (where an empty
# struct is size 1).
if (not base or base.is_empty()) and not members and not variants:
ret += mcgen('''
char qapi_dummy_for_empty_struct;
''')
ret += mcgen('''
};
''')
ret += ifcond.gen_endif()
return ret
def gen_upcast(name: str, base: QAPISchemaObjectType) -> str:
# C makes const-correctness ugly. We have to cast away const to let
# this function work for both const and non-const obj.
return mcgen('''
static inline %(base)s *qapi_%(c_name)s_base(const %(c_name)s *obj)
{
return (%(base)s *)obj;
}
''',
c_name=c_name(name), base=base.c_name())
def gen_variants(variants: QAPISchemaVariants) -> str:
ret = mcgen('''
union { /* union tag is @%(c_name)s */
''',
c_name=c_name(variants.tag_member.name))
for var in variants.variants:
if var.type.name == 'q_empty':
continue
ret += var.ifcond.gen_if()
ret += mcgen('''
%(c_type)s %(c_name)s;
''',
c_type=var.type.c_unboxed_type(),
c_name=c_name(var.name))
ret += var.ifcond.gen_endif()
ret += mcgen('''
} u;
''')
return ret
def gen_type_cleanup_decl(name: str) -> str:
ret = mcgen('''
void qapi_free_%(c_name)s(%(c_name)s *obj);
G_DEFINE_AUTOPTR_CLEANUP_FUNC(%(c_name)s, qapi_free_%(c_name)s)
''',
c_name=c_name(name))
return ret
def gen_type_cleanup(name: str) -> str:
ret = mcgen('''
void qapi_free_%(c_name)s(%(c_name)s *obj)
{
Visitor *v;
if (!obj) {
return;
}
v = qapi_dealloc_visitor_new();
visit_type_%(c_name)s(v, NULL, &obj, NULL);
visit_free(v);
}
''',
c_name=c_name(name))
return ret
class QAPISchemaGenTypeVisitor(QAPISchemaModularCVisitor):
def __init__(self, prefix: str):
super().__init__(
prefix, 'qapi-types', ' * Schema-defined QAPI types',
' * Built-in QAPI types', __doc__)
def _begin_builtin_module(self) -> None:
self._genc.preamble_add(mcgen('''
#include "qemu/osdep.h"
#include "qapi/dealloc-visitor.h"
#include "qapi/qapi-builtin-types.h"
#include "qapi/qapi-builtin-visit.h"
'''))
self._genh.preamble_add(mcgen('''
#include "qapi/util.h"
'''))
def _begin_user_module(self, name: str) -> None:
types = self._module_basename('qapi-types', name)
visit = self._module_basename('qapi-visit', name)
self._genc.preamble_add(mcgen('''
#include "qemu/osdep.h"
#include "qapi/dealloc-visitor.h"
#include "%(types)s.h"
#include "%(visit)s.h"
''',
types=types, visit=visit))
self._genh.preamble_add(mcgen('''
#include "qapi/qapi-builtin-types.h"
'''))
def visit_begin(self, schema: QAPISchema) -> None:
# gen_object() is recursive, ensure it doesn't visit the empty type
objects_seen.add(schema.the_empty_object_type.name)
def _gen_type_cleanup(self, name: str) -> None:
self._genh.add(gen_type_cleanup_decl(name))
self._genc.add(gen_type_cleanup(name))
def visit_enum_type(self,
name: str,
info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
features: List[QAPISchemaFeature],
members: List[QAPISchemaEnumMember],
prefix: Optional[str]) -> None:
with ifcontext(ifcond, self._genh, self._genc):
self._genh.preamble_add(gen_enum(name, members, prefix))
self._genc.add(gen_enum_lookup(name, members, prefix))
def visit_array_type(self,
name: str,
info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
element_type: QAPISchemaType) -> None:
with ifcontext(ifcond, self._genh, self._genc):
self._genh.preamble_add(gen_fwd_object_or_array(name))
self._genh.add(gen_array(name, element_type))
self._gen_type_cleanup(name)
def visit_object_type(self,
name: str,
info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
features: List[QAPISchemaFeature],
base: Optional[QAPISchemaObjectType],
members: List[QAPISchemaObjectTypeMember],
variants: Optional[QAPISchemaVariants]) -> None:
# Nothing to do for the special empty builtin
if name == 'q_empty':
return
with ifcontext(ifcond, self._genh):
self._genh.preamble_add(gen_fwd_object_or_array(name))
self._genh.add(gen_object(name, ifcond, base, members, variants))
with ifcontext(ifcond, self._genh, self._genc):
if base and not base.is_implicit():
self._genh.add(gen_upcast(name, base))
# TODO Worth changing the visitor signature, so we could
# directly use rather than repeat type.is_implicit()?
if not name.startswith('q_'):
# implicit types won't be directly allocated/freed
self._gen_type_cleanup(name)
def visit_alternate_type(self,
name: str,
info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
features: List[QAPISchemaFeature],
variants: QAPISchemaVariants) -> None:
with ifcontext(ifcond, self._genh):
self._genh.preamble_add(gen_fwd_object_or_array(name))
self._genh.add(gen_object(name, ifcond, None,
[variants.tag_member], variants))
with ifcontext(ifcond, self._genh, self._genc):
self._gen_type_cleanup(name)
def gen_types(schema: QAPISchema,
output_dir: str,
prefix: str,
opt_builtins: bool) -> None:
vis = QAPISchemaGenTypeVisitor(prefix)
schema.visit(vis)
vis.write(output_dir, opt_builtins)
| 11,257 | 28.015464 | 75 | py |
qemu | qemu-master/scripts/qapi/source.py | #
# QAPI frontend source file info
#
# Copyright (c) 2019 Red Hat Inc.
#
# Authors:
# Markus Armbruster <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
import copy
from typing import List, Optional, TypeVar
class QAPISchemaPragma:
# Replace with @dataclass in Python 3.7+
# pylint: disable=too-few-public-methods
def __init__(self) -> None:
# Are documentation comments required?
self.doc_required = False
# Commands whose names may use '_'
self.command_name_exceptions: List[str] = []
# Commands allowed to return a non-dictionary
self.command_returns_exceptions: List[str] = []
# Types whose member names may violate case conventions
self.member_name_exceptions: List[str] = []
class QAPISourceInfo:
T = TypeVar('T', bound='QAPISourceInfo')
def __init__(self, fname: str, parent: Optional['QAPISourceInfo']):
self.fname = fname
self.line = 1
self.parent = parent
self.pragma: QAPISchemaPragma = (
parent.pragma if parent else QAPISchemaPragma()
)
self.defn_meta: Optional[str] = None
self.defn_name: Optional[str] = None
def set_defn(self, meta: str, name: str) -> None:
self.defn_meta = meta
self.defn_name = name
def next_line(self: T) -> T:
info = copy.copy(self)
info.line += 1
return info
def loc(self) -> str:
return f"{self.fname}:{self.line}"
def in_defn(self) -> str:
if self.defn_name:
return "%s: In %s '%s':\n" % (self.fname,
self.defn_meta, self.defn_name)
return ''
def include_path(self) -> str:
ret = ''
parent = self.parent
while parent:
ret = 'In file included from %s:\n' % parent.loc() + ret
parent = parent.parent
return ret
def __str__(self) -> str:
return self.include_path() + self.in_defn() + self.loc()
| 2,095 | 28.111111 | 73 | py |
qemu | qemu-master/scripts/qapi/common.py | #
# QAPI helper library
#
# Copyright IBM, Corp. 2011
# Copyright (c) 2013-2018 Red Hat Inc.
#
# Authors:
# Anthony Liguori <[email protected]>
# Markus Armbruster <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
import re
from typing import (
Any,
Dict,
Match,
Optional,
Sequence,
Union,
)
#: Magic string that gets removed along with all space to its right.
EATSPACE = '\033EATSPACE.'
POINTER_SUFFIX = ' *' + EATSPACE
def camel_to_upper(value: str) -> str:
"""
Converts CamelCase to CAMEL_CASE.
Examples::
ENUMName -> ENUM_NAME
EnumName1 -> ENUM_NAME1
ENUM_NAME -> ENUM_NAME
ENUM_NAME1 -> ENUM_NAME1
ENUM_Name2 -> ENUM_NAME2
ENUM24_Name -> ENUM24_NAME
"""
c_fun_str = c_name(value, False)
if value.isupper():
return c_fun_str
new_name = ''
length = len(c_fun_str)
for i in range(length):
char = c_fun_str[i]
# When char is upper case and no '_' appears before, do more checks
if char.isupper() and (i > 0) and c_fun_str[i - 1] != '_':
if i < length - 1 and c_fun_str[i + 1].islower():
new_name += '_'
elif c_fun_str[i - 1].isdigit():
new_name += '_'
new_name += char
return new_name.lstrip('_').upper()
def c_enum_const(type_name: str,
const_name: str,
prefix: Optional[str] = None) -> str:
"""
Generate a C enumeration constant name.
:param type_name: The name of the enumeration.
:param const_name: The name of this constant.
:param prefix: Optional, prefix that overrides the type_name.
"""
if prefix is not None:
type_name = prefix
return camel_to_upper(type_name) + '_' + c_name(const_name, False).upper()
def c_name(name: str, protect: bool = True) -> str:
"""
Map ``name`` to a valid C identifier.
Used for converting 'name' from a 'name':'type' qapi definition
into a generated struct member, as well as converting type names
into substrings of a generated C function name.
'__a.b_c' -> '__a_b_c', 'x-foo' -> 'x_foo'
protect=True: 'int' -> 'q_int'; protect=False: 'int' -> 'int'
:param name: The name to map.
:param protect: If true, avoid returning certain ticklish identifiers
(like C keywords) by prepending ``q_``.
"""
# ANSI X3J11/88-090, 3.1.1
c89_words = set(['auto', 'break', 'case', 'char', 'const', 'continue',
'default', 'do', 'double', 'else', 'enum', 'extern',
'float', 'for', 'goto', 'if', 'int', 'long', 'register',
'return', 'short', 'signed', 'sizeof', 'static',
'struct', 'switch', 'typedef', 'union', 'unsigned',
'void', 'volatile', 'while'])
# ISO/IEC 9899:1999, 6.4.1
c99_words = set(['inline', 'restrict', '_Bool', '_Complex', '_Imaginary'])
# ISO/IEC 9899:2011, 6.4.1
c11_words = set(['_Alignas', '_Alignof', '_Atomic', '_Generic',
'_Noreturn', '_Static_assert', '_Thread_local'])
# GCC http://gcc.gnu.org/onlinedocs/gcc-4.7.1/gcc/C-Extensions.html
# excluding _.*
gcc_words = set(['asm', 'typeof'])
# C++ ISO/IEC 14882:2003 2.11
cpp_words = set(['bool', 'catch', 'class', 'const_cast', 'delete',
'dynamic_cast', 'explicit', 'false', 'friend', 'mutable',
'namespace', 'new', 'operator', 'private', 'protected',
'public', 'reinterpret_cast', 'static_cast', 'template',
'this', 'throw', 'true', 'try', 'typeid', 'typename',
'using', 'virtual', 'wchar_t',
# alternative representations
'and', 'and_eq', 'bitand', 'bitor', 'compl', 'not',
'not_eq', 'or', 'or_eq', 'xor', 'xor_eq'])
# namespace pollution:
polluted_words = set(['unix', 'errno', 'mips', 'sparc', 'i386', 'linux'])
name = re.sub(r'[^A-Za-z0-9_]', '_', name)
if protect and (name in (c89_words | c99_words | c11_words | gcc_words
| cpp_words | polluted_words)
or name[0].isdigit()):
return 'q_' + name
return name
class Indentation:
"""
Indentation level management.
:param initial: Initial number of spaces, default 0.
"""
def __init__(self, initial: int = 0) -> None:
self._level = initial
def __repr__(self) -> str:
return "{}({:d})".format(type(self).__name__, self._level)
def __str__(self) -> str:
"""Return the current indentation as a string of spaces."""
return ' ' * self._level
def increase(self, amount: int = 4) -> None:
"""Increase the indentation level by ``amount``, default 4."""
self._level += amount
def decrease(self, amount: int = 4) -> None:
"""Decrease the indentation level by ``amount``, default 4."""
assert amount <= self._level
self._level -= amount
#: Global, current indent level for code generation.
indent = Indentation()
def cgen(code: str, **kwds: object) -> str:
"""
Generate ``code`` with ``kwds`` interpolated.
Obey `indent`, and strip `EATSPACE`.
"""
raw = code % kwds
pfx = str(indent)
if pfx:
raw = re.sub(r'^(?!(#|$))', pfx, raw, flags=re.MULTILINE)
return re.sub(re.escape(EATSPACE) + r' *', '', raw)
def mcgen(code: str, **kwds: object) -> str:
if code[0] == '\n':
code = code[1:]
return cgen(code, **kwds)
def c_fname(filename: str) -> str:
return re.sub(r'[^A-Za-z0-9_]', '_', filename)
def guardstart(name: str) -> str:
return mcgen('''
#ifndef %(name)s
#define %(name)s
''',
name=c_fname(name).upper())
def guardend(name: str) -> str:
return mcgen('''
#endif /* %(name)s */
''',
name=c_fname(name).upper())
def gen_ifcond(ifcond: Optional[Union[str, Dict[str, Any]]],
cond_fmt: str, not_fmt: str,
all_operator: str, any_operator: str) -> str:
def do_gen(ifcond: Union[str, Dict[str, Any]],
need_parens: bool) -> str:
if isinstance(ifcond, str):
return cond_fmt % ifcond
assert isinstance(ifcond, dict) and len(ifcond) == 1
if 'not' in ifcond:
return not_fmt % do_gen(ifcond['not'], True)
if 'all' in ifcond:
gen = gen_infix(all_operator, ifcond['all'])
else:
gen = gen_infix(any_operator, ifcond['any'])
if need_parens:
gen = '(' + gen + ')'
return gen
def gen_infix(operator: str, operands: Sequence[Any]) -> str:
return operator.join([do_gen(o, True) for o in operands])
if not ifcond:
return ''
return do_gen(ifcond, False)
def cgen_ifcond(ifcond: Optional[Union[str, Dict[str, Any]]]) -> str:
return gen_ifcond(ifcond, 'defined(%s)', '!%s', ' && ', ' || ')
def docgen_ifcond(ifcond: Optional[Union[str, Dict[str, Any]]]) -> str:
# TODO Doc generated for conditions needs polish
return gen_ifcond(ifcond, '%s', 'not %s', ' and ', ' or ')
def gen_if(cond: str) -> str:
if not cond:
return ''
return mcgen('''
#if %(cond)s
''', cond=cond)
def gen_endif(cond: str) -> str:
if not cond:
return ''
return mcgen('''
#endif /* %(cond)s */
''', cond=cond)
def must_match(pattern: str, string: str) -> Match[str]:
match = re.match(pattern, string)
assert match is not None
return match
| 7,683 | 29.492063 | 78 | py |
qemu | qemu-master/scripts/qapi/introspect.py | """
QAPI introspection generator
Copyright (C) 2015-2021 Red Hat, Inc.
Authors:
Markus Armbruster <[email protected]>
John Snow <[email protected]>
This work is licensed under the terms of the GNU GPL, version 2.
See the COPYING file in the top-level directory.
"""
from typing import (
Any,
Dict,
Generic,
List,
Optional,
Sequence,
TypeVar,
Union,
)
from .common import c_name, mcgen
from .gen import QAPISchemaMonolithicCVisitor
from .schema import (
QAPISchema,
QAPISchemaArrayType,
QAPISchemaBuiltinType,
QAPISchemaEntity,
QAPISchemaEnumMember,
QAPISchemaFeature,
QAPISchemaIfCond,
QAPISchemaObjectType,
QAPISchemaObjectTypeMember,
QAPISchemaType,
QAPISchemaVariant,
QAPISchemaVariants,
)
from .source import QAPISourceInfo
# This module constructs a tree data structure that is used to
# generate the introspection information for QEMU. It is shaped
# like a JSON value.
#
# A complexity over JSON is that our values may or may not be annotated.
#
# Un-annotated values may be:
# Scalar: str, bool, None.
# Non-scalar: List, Dict
# _value = Union[str, bool, None, Dict[str, JSONValue], List[JSONValue]]
#
# With optional annotations, the type of all values is:
# JSONValue = Union[_Value, Annotated[_Value]]
#
# Sadly, mypy does not support recursive types; so the _Stub alias is used to
# mark the imprecision in the type model where we'd otherwise use JSONValue.
_Stub = Any
_Scalar = Union[str, bool, None]
_NonScalar = Union[Dict[str, _Stub], List[_Stub]]
_Value = Union[_Scalar, _NonScalar]
JSONValue = Union[_Value, 'Annotated[_Value]']
# These types are based on structures defined in QEMU's schema, so we
# lack precise types for them here. Python 3.6 does not offer
# TypedDict constructs, so they are broadly typed here as simple
# Python Dicts.
SchemaInfo = Dict[str, object]
SchemaInfoEnumMember = Dict[str, object]
SchemaInfoObject = Dict[str, object]
SchemaInfoObjectVariant = Dict[str, object]
SchemaInfoObjectMember = Dict[str, object]
SchemaInfoCommand = Dict[str, object]
_ValueT = TypeVar('_ValueT', bound=_Value)
class Annotated(Generic[_ValueT]):
"""
Annotated generally contains a SchemaInfo-like type (as a dict),
But it also used to wrap comments/ifconds around scalar leaf values,
for the benefit of features and enums.
"""
# TODO: Remove after Python 3.7 adds @dataclass:
# pylint: disable=too-few-public-methods
def __init__(self, value: _ValueT, ifcond: QAPISchemaIfCond,
comment: Optional[str] = None):
self.value = value
self.comment: Optional[str] = comment
self.ifcond = ifcond
def _tree_to_qlit(obj: JSONValue,
level: int = 0,
dict_value: bool = False) -> str:
"""
Convert the type tree into a QLIT C string, recursively.
:param obj: The value to convert.
This value may not be Annotated when dict_value is True.
:param level: The indentation level for this particular value.
:param dict_value: True when the value being processed belongs to a
dict key; which suppresses the output indent.
"""
def indent(level: int) -> str:
return level * 4 * ' '
if isinstance(obj, Annotated):
# NB: _tree_to_qlit is called recursively on the values of a
# key:value pair; those values can't be decorated with
# comments or conditionals.
msg = "dict values cannot have attached comments or if-conditionals."
assert not dict_value, msg
ret = ''
if obj.comment:
ret += indent(level) + f"/* {obj.comment} */\n"
if obj.ifcond.is_present():
ret += obj.ifcond.gen_if()
ret += _tree_to_qlit(obj.value, level)
if obj.ifcond.is_present():
ret += '\n' + obj.ifcond.gen_endif()
return ret
ret = ''
if not dict_value:
ret += indent(level)
# Scalars:
if obj is None:
ret += 'QLIT_QNULL'
elif isinstance(obj, str):
ret += f"QLIT_QSTR({to_c_string(obj)})"
elif isinstance(obj, bool):
ret += f"QLIT_QBOOL({str(obj).lower()})"
# Non-scalars:
elif isinstance(obj, list):
ret += 'QLIT_QLIST(((QLitObject[]) {\n'
for value in obj:
ret += _tree_to_qlit(value, level + 1).strip('\n') + '\n'
ret += indent(level + 1) + '{}\n'
ret += indent(level) + '}))'
elif isinstance(obj, dict):
ret += 'QLIT_QDICT(((QLitDictEntry[]) {\n'
for key, value in sorted(obj.items()):
ret += indent(level + 1) + "{{ {:s}, {:s} }},\n".format(
to_c_string(key),
_tree_to_qlit(value, level + 1, dict_value=True)
)
ret += indent(level + 1) + '{}\n'
ret += indent(level) + '}))'
else:
raise NotImplementedError(
f"type '{type(obj).__name__}' not implemented"
)
if level > 0:
ret += ','
return ret
def to_c_string(string: str) -> str:
return '"' + string.replace('\\', r'\\').replace('"', r'\"') + '"'
class QAPISchemaGenIntrospectVisitor(QAPISchemaMonolithicCVisitor):
def __init__(self, prefix: str, unmask: bool):
super().__init__(
prefix, 'qapi-introspect',
' * QAPI/QMP schema introspection', __doc__)
self._unmask = unmask
self._schema: Optional[QAPISchema] = None
self._trees: List[Annotated[SchemaInfo]] = []
self._used_types: List[QAPISchemaType] = []
self._name_map: Dict[str, str] = {}
self._genc.add(mcgen('''
#include "qemu/osdep.h"
#include "%(prefix)sqapi-introspect.h"
''',
prefix=prefix))
def visit_begin(self, schema: QAPISchema) -> None:
self._schema = schema
def visit_end(self) -> None:
# visit the types that are actually used
for typ in self._used_types:
typ.visit(self)
# generate C
name = c_name(self._prefix, protect=False) + 'qmp_schema_qlit'
self._genh.add(mcgen('''
#include "qapi/qmp/qlit.h"
extern const QLitObject %(c_name)s;
''',
c_name=c_name(name)))
self._genc.add(mcgen('''
const QLitObject %(c_name)s = %(c_string)s;
''',
c_name=c_name(name),
c_string=_tree_to_qlit(self._trees)))
self._schema = None
self._trees = []
self._used_types = []
self._name_map = {}
def visit_needed(self, entity: QAPISchemaEntity) -> bool:
# Ignore types on first pass; visit_end() will pick up used types
return not isinstance(entity, QAPISchemaType)
def _name(self, name: str) -> str:
if self._unmask:
return name
if name not in self._name_map:
self._name_map[name] = '%d' % len(self._name_map)
return self._name_map[name]
def _use_type(self, typ: QAPISchemaType) -> str:
assert self._schema is not None
# Map the various integer types to plain int
if typ.json_type() == 'int':
typ = self._schema.lookup_type('int')
elif (isinstance(typ, QAPISchemaArrayType) and
typ.element_type.json_type() == 'int'):
typ = self._schema.lookup_type('intList')
# Add type to work queue if new
if typ not in self._used_types:
self._used_types.append(typ)
# Clients should examine commands and events, not types. Hide
# type names as integers to reduce the temptation. Also, it
# saves a few characters on the wire.
if isinstance(typ, QAPISchemaBuiltinType):
return typ.name
if isinstance(typ, QAPISchemaArrayType):
return '[' + self._use_type(typ.element_type) + ']'
return self._name(typ.name)
@staticmethod
def _gen_features(features: Sequence[QAPISchemaFeature]
) -> List[Annotated[str]]:
return [Annotated(f.name, f.ifcond) for f in features]
def _gen_tree(self, name: str, mtype: str, obj: Dict[str, object],
ifcond: QAPISchemaIfCond = QAPISchemaIfCond(),
features: Sequence[QAPISchemaFeature] = ()) -> None:
"""
Build and append a SchemaInfo object to self._trees.
:param name: The SchemaInfo's name.
:param mtype: The SchemaInfo's meta-type.
:param obj: Additional SchemaInfo members, as appropriate for
the meta-type.
:param ifcond: Conditionals to apply to the SchemaInfo.
:param features: The SchemaInfo's features.
Will be omitted from the output if empty.
"""
comment: Optional[str] = None
if mtype not in ('command', 'event', 'builtin', 'array'):
if not self._unmask:
# Output a comment to make it easy to map masked names
# back to the source when reading the generated output.
comment = f'"{self._name(name)}" = {name}'
name = self._name(name)
obj['name'] = name
obj['meta-type'] = mtype
if features:
obj['features'] = self._gen_features(features)
self._trees.append(Annotated(obj, ifcond, comment))
def _gen_enum_member(self, member: QAPISchemaEnumMember
) -> Annotated[SchemaInfoEnumMember]:
obj: SchemaInfoEnumMember = {
'name': member.name,
}
if member.features:
obj['features'] = self._gen_features(member.features)
return Annotated(obj, member.ifcond)
def _gen_object_member(self, member: QAPISchemaObjectTypeMember
) -> Annotated[SchemaInfoObjectMember]:
obj: SchemaInfoObjectMember = {
'name': member.name,
'type': self._use_type(member.type)
}
if member.optional:
obj['default'] = None
if member.features:
obj['features'] = self._gen_features(member.features)
return Annotated(obj, member.ifcond)
def _gen_variant(self, variant: QAPISchemaVariant
) -> Annotated[SchemaInfoObjectVariant]:
obj: SchemaInfoObjectVariant = {
'case': variant.name,
'type': self._use_type(variant.type)
}
return Annotated(obj, variant.ifcond)
def visit_builtin_type(self, name: str, info: Optional[QAPISourceInfo],
json_type: str) -> None:
self._gen_tree(name, 'builtin', {'json-type': json_type})
def visit_enum_type(self, name: str, info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
features: List[QAPISchemaFeature],
members: List[QAPISchemaEnumMember],
prefix: Optional[str]) -> None:
self._gen_tree(
name, 'enum',
{'members': [self._gen_enum_member(m) for m in members],
'values': [Annotated(m.name, m.ifcond) for m in members]},
ifcond, features
)
def visit_array_type(self, name: str, info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
element_type: QAPISchemaType) -> None:
element = self._use_type(element_type)
self._gen_tree('[' + element + ']', 'array', {'element-type': element},
ifcond)
def visit_object_type_flat(self, name: str, info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
features: List[QAPISchemaFeature],
members: List[QAPISchemaObjectTypeMember],
variants: Optional[QAPISchemaVariants]) -> None:
obj: SchemaInfoObject = {
'members': [self._gen_object_member(m) for m in members]
}
if variants:
obj['tag'] = variants.tag_member.name
obj['variants'] = [self._gen_variant(v) for v in variants.variants]
self._gen_tree(name, 'object', obj, ifcond, features)
def visit_alternate_type(self, name: str, info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
features: List[QAPISchemaFeature],
variants: QAPISchemaVariants) -> None:
self._gen_tree(
name, 'alternate',
{'members': [Annotated({'type': self._use_type(m.type)},
m.ifcond)
for m in variants.variants]},
ifcond, features
)
def visit_command(self, name: str, info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
features: List[QAPISchemaFeature],
arg_type: Optional[QAPISchemaObjectType],
ret_type: Optional[QAPISchemaType], gen: bool,
success_response: bool, boxed: bool, allow_oob: bool,
allow_preconfig: bool, coroutine: bool) -> None:
assert self._schema is not None
arg_type = arg_type or self._schema.the_empty_object_type
ret_type = ret_type or self._schema.the_empty_object_type
obj: SchemaInfoCommand = {
'arg-type': self._use_type(arg_type),
'ret-type': self._use_type(ret_type)
}
if allow_oob:
obj['allow-oob'] = allow_oob
self._gen_tree(name, 'command', obj, ifcond, features)
def visit_event(self, name: str, info: Optional[QAPISourceInfo],
ifcond: QAPISchemaIfCond,
features: List[QAPISchemaFeature],
arg_type: Optional[QAPISchemaObjectType],
boxed: bool) -> None:
assert self._schema is not None
arg_type = arg_type or self._schema.the_empty_object_type
self._gen_tree(name, 'event', {'arg-type': self._use_type(arg_type)},
ifcond, features)
def gen_introspect(schema: QAPISchema, output_dir: str, prefix: str,
opt_unmask: bool) -> None:
vis = QAPISchemaGenIntrospectVisitor(prefix, opt_unmask)
schema.visit(vis)
vis.write(output_dir)
| 14,377 | 35.772379 | 79 | py |
qemu | qemu-master/scripts/qapi/expr.py | # -*- coding: utf-8 -*-
#
# Copyright IBM, Corp. 2011
# Copyright (c) 2013-2021 Red Hat Inc.
#
# Authors:
# Anthony Liguori <[email protected]>
# Markus Armbruster <[email protected]>
# Eric Blake <[email protected]>
# Marc-André Lureau <[email protected]>
# John Snow <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
"""
Normalize and validate (context-free) QAPI schema expression structures.
`QAPISchemaParser` parses a QAPI schema into abstract syntax trees
consisting of dict, list, str, bool, and int nodes. This module ensures
that these nested structures have the correct type(s) and key(s) where
appropriate for the QAPI context-free grammar.
The QAPI schema expression language allows for certain syntactic sugar;
this module also handles the normalization process of these nested
structures.
See `check_exprs` for the main entry point.
See `schema.QAPISchema` for processing into native Python data
structures and contextual semantic validation.
"""
import re
from typing import (
Dict,
Iterable,
List,
Optional,
Union,
cast,
)
from .common import c_name
from .error import QAPISemError
from .parser import QAPIExpression
from .source import QAPISourceInfo
# See check_name_str(), below.
valid_name = re.compile(r'(__[a-z0-9.-]+_)?'
r'(x-)?'
r'([a-z][a-z0-9_-]*)$', re.IGNORECASE)
def check_name_is_str(name: object,
info: QAPISourceInfo,
source: str) -> None:
"""
Ensure that ``name`` is a ``str``.
:raise QAPISemError: When ``name`` fails validation.
"""
if not isinstance(name, str):
raise QAPISemError(info, "%s requires a string name" % source)
def check_name_str(name: str, info: QAPISourceInfo, source: str) -> str:
"""
Ensure that ``name`` is a valid QAPI name.
A valid name consists of ASCII letters, digits, ``-``, and ``_``,
starting with a letter. It may be prefixed by a downstream prefix
of the form __RFQDN_, or the experimental prefix ``x-``. If both
prefixes are present, the __RFDQN_ prefix goes first.
A valid name cannot start with ``q_``, which is reserved.
:param name: Name to check.
:param info: QAPI schema source file information.
:param source: Error string describing what ``name`` belongs to.
:raise QAPISemError: When ``name`` fails validation.
:return: The stem of the valid name, with no prefixes.
"""
# Reserve the entire 'q_' namespace for c_name(), and for 'q_empty'
# and 'q_obj_*' implicit type names.
match = valid_name.match(name)
if not match or c_name(name, False).startswith('q_'):
raise QAPISemError(info, "%s has an invalid name" % source)
return match.group(3)
def check_name_upper(name: str, info: QAPISourceInfo, source: str) -> None:
"""
Ensure that ``name`` is a valid event name.
This means it must be a valid QAPI name as checked by
`check_name_str()`, but where the stem prohibits lowercase
characters and ``-``.
:param name: Name to check.
:param info: QAPI schema source file information.
:param source: Error string describing what ``name`` belongs to.
:raise QAPISemError: When ``name`` fails validation.
"""
stem = check_name_str(name, info, source)
if re.search(r'[a-z-]', stem):
raise QAPISemError(
info, "name of %s must not use lowercase or '-'" % source)
def check_name_lower(name: str, info: QAPISourceInfo, source: str,
permit_upper: bool = False,
permit_underscore: bool = False) -> None:
"""
Ensure that ``name`` is a valid command or member name.
This means it must be a valid QAPI name as checked by
`check_name_str()`, but where the stem prohibits uppercase
characters and ``_``.
:param name: Name to check.
:param info: QAPI schema source file information.
:param source: Error string describing what ``name`` belongs to.
:param permit_upper: Additionally permit uppercase.
:param permit_underscore: Additionally permit ``_``.
:raise QAPISemError: When ``name`` fails validation.
"""
stem = check_name_str(name, info, source)
if ((not permit_upper and re.search(r'[A-Z]', stem))
or (not permit_underscore and '_' in stem)):
raise QAPISemError(
info, "name of %s must not use uppercase or '_'" % source)
def check_name_camel(name: str, info: QAPISourceInfo, source: str) -> None:
"""
Ensure that ``name`` is a valid user-defined type name.
This means it must be a valid QAPI name as checked by
`check_name_str()`, but where the stem must be in CamelCase.
:param name: Name to check.
:param info: QAPI schema source file information.
:param source: Error string describing what ``name`` belongs to.
:raise QAPISemError: When ``name`` fails validation.
"""
stem = check_name_str(name, info, source)
if not re.match(r'[A-Z][A-Za-z0-9]*[a-z][A-Za-z0-9]*$', stem):
raise QAPISemError(info, "name of %s must use CamelCase" % source)
def check_defn_name_str(name: str, info: QAPISourceInfo, meta: str) -> None:
"""
Ensure that ``name`` is a valid definition name.
Based on the value of ``meta``, this means that:
- 'event' names adhere to `check_name_upper()`.
- 'command' names adhere to `check_name_lower()`.
- Else, meta is a type, and must pass `check_name_camel()`.
These names must not end with ``List``.
:param name: Name to check.
:param info: QAPI schema source file information.
:param meta: Meta-type name of the QAPI expression.
:raise QAPISemError: When ``name`` fails validation.
"""
if meta == 'event':
check_name_upper(name, info, meta)
elif meta == 'command':
check_name_lower(
name, info, meta,
permit_underscore=name in info.pragma.command_name_exceptions)
else:
check_name_camel(name, info, meta)
if name.endswith('List'):
raise QAPISemError(
info, "%s name should not end in 'List'" % meta)
def check_keys(value: Dict[str, object],
info: QAPISourceInfo,
source: str,
required: List[str],
optional: List[str]) -> None:
"""
Ensure that a dict has a specific set of keys.
:param value: The dict to check.
:param info: QAPI schema source file information.
:param source: Error string describing this ``value``.
:param required: Keys that *must* be present.
:param optional: Keys that *may* be present.
:raise QAPISemError: When unknown keys are present.
"""
def pprint(elems: Iterable[str]) -> str:
return ', '.join("'" + e + "'" for e in sorted(elems))
missing = set(required) - set(value)
if missing:
raise QAPISemError(
info,
"%s misses key%s %s"
% (source, 's' if len(missing) > 1 else '',
pprint(missing)))
allowed = set(required) | set(optional)
unknown = set(value) - allowed
if unknown:
raise QAPISemError(
info,
"%s has unknown key%s %s\nValid keys are %s."
% (source, 's' if len(unknown) > 1 else '',
pprint(unknown), pprint(allowed)))
def check_flags(expr: QAPIExpression) -> None:
"""
Ensure flag members (if present) have valid values.
:param expr: The expression to validate.
:raise QAPISemError:
When certain flags have an invalid value, or when
incompatible flags are present.
"""
for key in ('gen', 'success-response'):
if key in expr and expr[key] is not False:
raise QAPISemError(
expr.info, "flag '%s' may only use false value" % key)
for key in ('boxed', 'allow-oob', 'allow-preconfig', 'coroutine'):
if key in expr and expr[key] is not True:
raise QAPISemError(
expr.info, "flag '%s' may only use true value" % key)
if 'allow-oob' in expr and 'coroutine' in expr:
# This is not necessarily a fundamental incompatibility, but
# we don't have a use case and the desired semantics isn't
# obvious. The simplest solution is to forbid it until we get
# a use case for it.
raise QAPISemError(
expr.info, "flags 'allow-oob' and 'coroutine' are incompatible")
def check_if(expr: Dict[str, object],
info: QAPISourceInfo, source: str) -> None:
"""
Validate the ``if`` member of an object.
The ``if`` member may be either a ``str`` or a dict.
:param expr: The expression containing the ``if`` member to validate.
:param info: QAPI schema source file information.
:param source: Error string describing ``expr``.
:raise QAPISemError:
When the "if" member fails validation, or when there are no
non-empty conditions.
:return: None
"""
def _check_if(cond: Union[str, object]) -> None:
if isinstance(cond, str):
if not re.fullmatch(r'[A-Z][A-Z0-9_]*', cond):
raise QAPISemError(
info,
"'if' condition '%s' of %s is not a valid identifier"
% (cond, source))
return
if not isinstance(cond, dict):
raise QAPISemError(
info,
"'if' condition of %s must be a string or an object" % source)
check_keys(cond, info, "'if' condition of %s" % source, [],
["all", "any", "not"])
if len(cond) != 1:
raise QAPISemError(
info,
"'if' condition of %s has conflicting keys" % source)
if 'not' in cond:
_check_if(cond['not'])
elif 'all' in cond:
_check_infix('all', cond['all'])
else:
_check_infix('any', cond['any'])
def _check_infix(operator: str, operands: object) -> None:
if not isinstance(operands, list):
raise QAPISemError(
info,
"'%s' condition of %s must be an array"
% (operator, source))
if not operands:
raise QAPISemError(
info, "'if' condition [] of %s is useless" % source)
for operand in operands:
_check_if(operand)
ifcond = expr.get('if')
if ifcond is None:
return
_check_if(ifcond)
def normalize_members(members: object) -> None:
"""
Normalize a "members" value.
If ``members`` is a dict, for every value in that dict, if that
value is not itself already a dict, normalize it to
``{'type': value}``.
:forms:
:sugared: ``Dict[str, Union[str, TypeRef]]``
:canonical: ``Dict[str, TypeRef]``
:param members: The members value to normalize.
:return: None, ``members`` is normalized in-place as needed.
"""
if isinstance(members, dict):
for key, arg in members.items():
if isinstance(arg, dict):
continue
members[key] = {'type': arg}
def check_type(value: Optional[object],
info: QAPISourceInfo,
source: str,
allow_array: bool = False,
allow_dict: Union[bool, str] = False) -> None:
"""
Normalize and validate the QAPI type of ``value``.
Python types of ``str`` or ``None`` are always allowed.
:param value: The value to check.
:param info: QAPI schema source file information.
:param source: Error string describing this ``value``.
:param allow_array:
Allow a ``List[str]`` of length 1, which indicates an array of
the type named by the list element.
:param allow_dict:
Allow a dict. Its members can be struct type members or union
branches. When the value of ``allow_dict`` is in pragma
``member-name-exceptions``, the dict's keys may violate the
member naming rules. The dict members are normalized in place.
:raise QAPISemError: When ``value`` fails validation.
:return: None, ``value`` is normalized in-place as needed.
"""
if value is None:
return
# Type name
if isinstance(value, str):
return
# Array type
if isinstance(value, list):
if not allow_array:
raise QAPISemError(info, "%s cannot be an array" % source)
if len(value) != 1 or not isinstance(value[0], str):
raise QAPISemError(info,
"%s: array type must contain single type name" %
source)
return
# Anonymous type
if not allow_dict:
raise QAPISemError(info, "%s should be a type name" % source)
if not isinstance(value, dict):
raise QAPISemError(info,
"%s should be an object or type name" % source)
permissive = False
if isinstance(allow_dict, str):
permissive = allow_dict in info.pragma.member_name_exceptions
# value is a dictionary, check that each member is okay
for (key, arg) in value.items():
key_source = "%s member '%s'" % (source, key)
if key.startswith('*'):
key = key[1:]
check_name_lower(key, info, key_source,
permit_upper=permissive,
permit_underscore=permissive)
if c_name(key, False) == 'u' or c_name(key, False).startswith('has_'):
raise QAPISemError(info, "%s uses reserved name" % key_source)
check_keys(arg, info, key_source, ['type'], ['if', 'features'])
check_if(arg, info, key_source)
check_features(arg.get('features'), info)
check_type(arg['type'], info, key_source, allow_array=True)
def check_features(features: Optional[object],
info: QAPISourceInfo) -> None:
"""
Normalize and validate the ``features`` member.
``features`` may be a ``list`` of either ``str`` or ``dict``.
Any ``str`` element will be normalized to ``{'name': element}``.
:forms:
:sugared: ``List[Union[str, Feature]]``
:canonical: ``List[Feature]``
:param features: The features member value to validate.
:param info: QAPI schema source file information.
:raise QAPISemError: When ``features`` fails validation.
:return: None, ``features`` is normalized in-place as needed.
"""
if features is None:
return
if not isinstance(features, list):
raise QAPISemError(info, "'features' must be an array")
features[:] = [f if isinstance(f, dict) else {'name': f}
for f in features]
for feat in features:
source = "'features' member"
assert isinstance(feat, dict)
check_keys(feat, info, source, ['name'], ['if'])
check_name_is_str(feat['name'], info, source)
source = "%s '%s'" % (source, feat['name'])
check_name_lower(feat['name'], info, source)
check_if(feat, info, source)
def check_enum(expr: QAPIExpression) -> None:
"""
Normalize and validate this expression as an ``enum`` definition.
:param expr: The expression to validate.
:raise QAPISemError: When ``expr`` is not a valid ``enum``.
:return: None, ``expr`` is normalized in-place as needed.
"""
name = expr['enum']
members = expr['data']
prefix = expr.get('prefix')
info = expr.info
if not isinstance(members, list):
raise QAPISemError(info, "'data' must be an array")
if prefix is not None and not isinstance(prefix, str):
raise QAPISemError(info, "'prefix' must be a string")
permissive = name in info.pragma.member_name_exceptions
members[:] = [m if isinstance(m, dict) else {'name': m}
for m in members]
for member in members:
source = "'data' member"
check_keys(member, info, source, ['name'], ['if', 'features'])
member_name = member['name']
check_name_is_str(member_name, info, source)
source = "%s '%s'" % (source, member_name)
# Enum members may start with a digit
if member_name[0].isdigit():
member_name = 'd' + member_name # Hack: hide the digit
check_name_lower(member_name, info, source,
permit_upper=permissive,
permit_underscore=permissive)
check_if(member, info, source)
check_features(member.get('features'), info)
def check_struct(expr: QAPIExpression) -> None:
"""
Normalize and validate this expression as a ``struct`` definition.
:param expr: The expression to validate.
:raise QAPISemError: When ``expr`` is not a valid ``struct``.
:return: None, ``expr`` is normalized in-place as needed.
"""
name = cast(str, expr['struct']) # Checked in check_exprs
members = expr['data']
check_type(members, expr.info, "'data'", allow_dict=name)
check_type(expr.get('base'), expr.info, "'base'")
def check_union(expr: QAPIExpression) -> None:
"""
Normalize and validate this expression as a ``union`` definition.
:param expr: The expression to validate.
:raise QAPISemError: when ``expr`` is not a valid ``union``.
:return: None, ``expr`` is normalized in-place as needed.
"""
name = cast(str, expr['union']) # Checked in check_exprs
base = expr['base']
discriminator = expr['discriminator']
members = expr['data']
info = expr.info
check_type(base, info, "'base'", allow_dict=name)
check_name_is_str(discriminator, info, "'discriminator'")
if not isinstance(members, dict):
raise QAPISemError(info, "'data' must be an object")
for (key, value) in members.items():
source = "'data' member '%s'" % key
check_keys(value, info, source, ['type'], ['if'])
check_if(value, info, source)
check_type(value['type'], info, source, allow_array=not base)
def check_alternate(expr: QAPIExpression) -> None:
"""
Normalize and validate this expression as an ``alternate`` definition.
:param expr: The expression to validate.
:raise QAPISemError: When ``expr`` is not a valid ``alternate``.
:return: None, ``expr`` is normalized in-place as needed.
"""
members = expr['data']
info = expr.info
if not members:
raise QAPISemError(info, "'data' must not be empty")
if not isinstance(members, dict):
raise QAPISemError(info, "'data' must be an object")
for (key, value) in members.items():
source = "'data' member '%s'" % key
check_name_lower(key, info, source)
check_keys(value, info, source, ['type'], ['if'])
check_if(value, info, source)
check_type(value['type'], info, source, allow_array=True)
def check_command(expr: QAPIExpression) -> None:
"""
Normalize and validate this expression as a ``command`` definition.
:param expr: The expression to validate.
:raise QAPISemError: When ``expr`` is not a valid ``command``.
:return: None, ``expr`` is normalized in-place as needed.
"""
args = expr.get('data')
rets = expr.get('returns')
boxed = expr.get('boxed', False)
if boxed and args is None:
raise QAPISemError(expr.info, "'boxed': true requires 'data'")
check_type(args, expr.info, "'data'", allow_dict=not boxed)
check_type(rets, expr.info, "'returns'", allow_array=True)
def check_event(expr: QAPIExpression) -> None:
"""
Normalize and validate this expression as an ``event`` definition.
:param expr: The expression to validate.
:raise QAPISemError: When ``expr`` is not a valid ``event``.
:return: None, ``expr`` is normalized in-place as needed.
"""
args = expr.get('data')
boxed = expr.get('boxed', False)
if boxed and args is None:
raise QAPISemError(expr.info, "'boxed': true requires 'data'")
check_type(args, expr.info, "'data'", allow_dict=not boxed)
def check_exprs(exprs: List[QAPIExpression]) -> List[QAPIExpression]:
"""
Validate and normalize a list of parsed QAPI schema expressions.
This function accepts a list of expressions and metadata as returned
by the parser. It destructively normalizes the expressions in-place.
:param exprs: The list of expressions to normalize and validate.
:raise QAPISemError: When any expression fails validation.
:return: The same list of expressions (now modified).
"""
for expr in exprs:
info = expr.info
doc = expr.doc
if 'include' in expr:
continue
metas = expr.keys() & {'enum', 'struct', 'union', 'alternate',
'command', 'event'}
if len(metas) != 1:
raise QAPISemError(
info,
"expression must have exactly one key"
" 'enum', 'struct', 'union', 'alternate',"
" 'command', 'event'")
meta = metas.pop()
check_name_is_str(expr[meta], info, "'%s'" % meta)
name = cast(str, expr[meta])
info.set_defn(meta, name)
check_defn_name_str(name, info, meta)
if doc:
if doc.symbol != name:
raise QAPISemError(
info, "documentation comment is for '%s'" % doc.symbol)
doc.check_expr(expr)
elif info.pragma.doc_required:
raise QAPISemError(info,
"documentation comment required")
if meta == 'enum':
check_keys(expr, info, meta,
['enum', 'data'], ['if', 'features', 'prefix'])
check_enum(expr)
elif meta == 'union':
check_keys(expr, info, meta,
['union', 'base', 'discriminator', 'data'],
['if', 'features'])
normalize_members(expr.get('base'))
normalize_members(expr['data'])
check_union(expr)
elif meta == 'alternate':
check_keys(expr, info, meta,
['alternate', 'data'], ['if', 'features'])
normalize_members(expr['data'])
check_alternate(expr)
elif meta == 'struct':
check_keys(expr, info, meta,
['struct', 'data'], ['base', 'if', 'features'])
normalize_members(expr['data'])
check_struct(expr)
elif meta == 'command':
check_keys(expr, info, meta,
['command'],
['data', 'returns', 'boxed', 'if', 'features',
'gen', 'success-response', 'allow-oob',
'allow-preconfig', 'coroutine'])
normalize_members(expr.get('data'))
check_command(expr)
elif meta == 'event':
check_keys(expr, info, meta,
['event'], ['data', 'boxed', 'if', 'features'])
normalize_members(expr.get('data'))
check_event(expr)
else:
assert False, 'unexpected meta type'
check_if(expr, info, meta)
check_features(expr.get('features'), info)
check_flags(expr)
return exprs
| 23,359 | 33.813711 | 79 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.