repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
qemu
qemu-master/scripts/qapi/__init__.py
0
0
0
py
qemu
qemu-master/scripts/qapi/schema.py
# -*- coding: utf-8 -*- # # QAPI schema internal representation # # Copyright (c) 2015-2019 Red Hat Inc. # # Authors: # Markus Armbruster <[email protected]> # Eric Blake <[email protected]> # Marc-André Lureau <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2. # See the COPYING file in the top-level directory. # TODO catching name collisions in generated code would be nice from collections import OrderedDict import os import re from typing import List, Optional from .common import ( POINTER_SUFFIX, c_name, cgen_ifcond, docgen_ifcond, gen_endif, gen_if, ) from .error import QAPIError, QAPISemError, QAPISourceError from .expr import check_exprs from .parser import QAPIExpression, QAPISchemaParser class QAPISchemaIfCond: def __init__(self, ifcond=None): self.ifcond = ifcond def _cgen(self): return cgen_ifcond(self.ifcond) def gen_if(self): return gen_if(self._cgen()) def gen_endif(self): return gen_endif(self._cgen()) def docgen(self): return docgen_ifcond(self.ifcond) def is_present(self): return bool(self.ifcond) class QAPISchemaEntity: meta: Optional[str] = None def __init__(self, name: str, info, doc, ifcond=None, features=None): assert name is None or isinstance(name, str) for f in features or []: assert isinstance(f, QAPISchemaFeature) f.set_defined_in(name) self.name = name self._module = None # For explicitly defined entities, info points to the (explicit) # definition. For builtins (and their arrays), info is None. # For implicitly defined entities, info points to a place that # triggered the implicit definition (there may be more than one # such place). self.info = info self.doc = doc self._ifcond = ifcond or QAPISchemaIfCond() self.features = features or [] self._checked = False def c_name(self): return c_name(self.name) def check(self, schema): assert not self._checked seen = {} for f in self.features: f.check_clash(self.info, seen) self._checked = True def connect_doc(self, doc=None): doc = doc or self.doc if doc: for f in self.features: doc.connect_feature(f) def check_doc(self): if self.doc: self.doc.check() def _set_module(self, schema, info): assert self._checked fname = info.fname if info else QAPISchemaModule.BUILTIN_MODULE_NAME self._module = schema.module_by_fname(fname) self._module.add_entity(self) def set_module(self, schema): self._set_module(schema, self.info) @property def ifcond(self): assert self._checked return self._ifcond def is_implicit(self): return not self.info def visit(self, visitor): assert self._checked def describe(self): assert self.meta return "%s '%s'" % (self.meta, self.name) class QAPISchemaVisitor: def visit_begin(self, schema): pass def visit_end(self): pass def visit_module(self, name): pass def visit_needed(self, entity): # Default to visiting everything return True def visit_include(self, name, info): pass def visit_builtin_type(self, name, info, json_type): pass def visit_enum_type(self, name, info, ifcond, features, members, prefix): pass def visit_array_type(self, name, info, ifcond, element_type): pass def visit_object_type(self, name, info, ifcond, features, base, members, variants): pass def visit_object_type_flat(self, name, info, ifcond, features, members, variants): pass def visit_alternate_type(self, name, info, ifcond, features, variants): pass def visit_command(self, name, info, ifcond, features, arg_type, ret_type, gen, success_response, boxed, allow_oob, allow_preconfig, coroutine): pass def visit_event(self, name, info, ifcond, features, arg_type, boxed): pass class QAPISchemaModule: BUILTIN_MODULE_NAME = './builtin' def __init__(self, name): self.name = name self._entity_list = [] @staticmethod def is_system_module(name: str) -> bool: """ System modules are internally defined modules. Their names start with the "./" prefix. """ return name.startswith('./') @classmethod def is_user_module(cls, name: str) -> bool: """ User modules are those defined by the user in qapi JSON files. They do not start with the "./" prefix. """ return not cls.is_system_module(name) @classmethod def is_builtin_module(cls, name: str) -> bool: """ The built-in module is a single System module for the built-in types. It is always "./builtin". """ return name == cls.BUILTIN_MODULE_NAME def add_entity(self, ent): self._entity_list.append(ent) def visit(self, visitor): visitor.visit_module(self.name) for entity in self._entity_list: if visitor.visit_needed(entity): entity.visit(visitor) class QAPISchemaInclude(QAPISchemaEntity): def __init__(self, sub_module, info): super().__init__(None, info, None) self._sub_module = sub_module def visit(self, visitor): super().visit(visitor) visitor.visit_include(self._sub_module.name, self.info) class QAPISchemaType(QAPISchemaEntity): # Return the C type for common use. # For the types we commonly box, this is a pointer type. def c_type(self): pass # Return the C type to be used in a parameter list. def c_param_type(self): return self.c_type() # Return the C type to be used where we suppress boxing. def c_unboxed_type(self): return self.c_type() def json_type(self): pass def alternate_qtype(self): json2qtype = { 'null': 'QTYPE_QNULL', 'string': 'QTYPE_QSTRING', 'number': 'QTYPE_QNUM', 'int': 'QTYPE_QNUM', 'boolean': 'QTYPE_QBOOL', 'array': 'QTYPE_QLIST', 'object': 'QTYPE_QDICT' } return json2qtype.get(self.json_type()) def doc_type(self): if self.is_implicit(): return None return self.name def need_has_if_optional(self): # When FOO is a pointer, has_FOO == !!FOO, i.e. has_FOO is redundant. # Except for arrays; see QAPISchemaArrayType.need_has_if_optional(). return not self.c_type().endswith(POINTER_SUFFIX) def check(self, schema): QAPISchemaEntity.check(self, schema) for feat in self.features: if feat.is_special(): raise QAPISemError( self.info, f"feature '{feat.name}' is not supported for types") def describe(self): assert self.meta return "%s type '%s'" % (self.meta, self.name) class QAPISchemaBuiltinType(QAPISchemaType): meta = 'built-in' def __init__(self, name, json_type, c_type): super().__init__(name, None, None) assert not c_type or isinstance(c_type, str) assert json_type in ('string', 'number', 'int', 'boolean', 'null', 'value') self._json_type_name = json_type self._c_type_name = c_type def c_name(self): return self.name def c_type(self): return self._c_type_name def c_param_type(self): if self.name == 'str': return 'const ' + self._c_type_name return self._c_type_name def json_type(self): return self._json_type_name def doc_type(self): return self.json_type() def visit(self, visitor): super().visit(visitor) visitor.visit_builtin_type(self.name, self.info, self.json_type()) class QAPISchemaEnumType(QAPISchemaType): meta = 'enum' def __init__(self, name, info, doc, ifcond, features, members, prefix): super().__init__(name, info, doc, ifcond, features) for m in members: assert isinstance(m, QAPISchemaEnumMember) m.set_defined_in(name) assert prefix is None or isinstance(prefix, str) self.members = members self.prefix = prefix def check(self, schema): super().check(schema) seen = {} for m in self.members: m.check_clash(self.info, seen) def connect_doc(self, doc=None): super().connect_doc(doc) doc = doc or self.doc for m in self.members: m.connect_doc(doc) def is_implicit(self): # See QAPISchema._def_predefineds() return self.name == 'QType' def c_type(self): return c_name(self.name) def member_names(self): return [m.name for m in self.members] def json_type(self): return 'string' def visit(self, visitor): super().visit(visitor) visitor.visit_enum_type( self.name, self.info, self.ifcond, self.features, self.members, self.prefix) class QAPISchemaArrayType(QAPISchemaType): meta = 'array' def __init__(self, name, info, element_type): super().__init__(name, info, None) assert isinstance(element_type, str) self._element_type_name = element_type self.element_type = None def need_has_if_optional(self): # When FOO is an array, we still need has_FOO to distinguish # absent (!has_FOO) from present and empty (has_FOO && !FOO). return True def check(self, schema): super().check(schema) self.element_type = schema.resolve_type( self._element_type_name, self.info, self.info and self.info.defn_meta) assert not isinstance(self.element_type, QAPISchemaArrayType) def set_module(self, schema): self._set_module(schema, self.element_type.info) @property def ifcond(self): assert self._checked return self.element_type.ifcond def is_implicit(self): return True def c_type(self): return c_name(self.name) + POINTER_SUFFIX def json_type(self): return 'array' def doc_type(self): elt_doc_type = self.element_type.doc_type() if not elt_doc_type: return None return 'array of ' + elt_doc_type def visit(self, visitor): super().visit(visitor) visitor.visit_array_type(self.name, self.info, self.ifcond, self.element_type) def describe(self): assert self.meta return "%s type ['%s']" % (self.meta, self._element_type_name) class QAPISchemaObjectType(QAPISchemaType): def __init__(self, name, info, doc, ifcond, features, base, local_members, variants): # struct has local_members, optional base, and no variants # union has base, variants, and no local_members super().__init__(name, info, doc, ifcond, features) self.meta = 'union' if variants else 'struct' assert base is None or isinstance(base, str) for m in local_members: assert isinstance(m, QAPISchemaObjectTypeMember) m.set_defined_in(name) if variants is not None: assert isinstance(variants, QAPISchemaVariants) variants.set_defined_in(name) self._base_name = base self.base = None self.local_members = local_members self.variants = variants self.members = None def check(self, schema): # This calls another type T's .check() exactly when the C # struct emitted by gen_object() contains that T's C struct # (pointers don't count). if self.members is not None: # A previous .check() completed: nothing to do return if self._checked: # Recursed: C struct contains itself raise QAPISemError(self.info, "object %s contains itself" % self.name) super().check(schema) assert self._checked and self.members is None seen = OrderedDict() if self._base_name: self.base = schema.resolve_type(self._base_name, self.info, "'base'") if (not isinstance(self.base, QAPISchemaObjectType) or self.base.variants): raise QAPISemError( self.info, "'base' requires a struct type, %s isn't" % self.base.describe()) self.base.check(schema) self.base.check_clash(self.info, seen) for m in self.local_members: m.check(schema) m.check_clash(self.info, seen) members = seen.values() if self.variants: self.variants.check(schema, seen) self.variants.check_clash(self.info, seen) self.members = members # mark completed # Check that the members of this type do not cause duplicate JSON members, # and update seen to track the members seen so far. Report any errors # on behalf of info, which is not necessarily self.info def check_clash(self, info, seen): assert self._checked assert not self.variants # not implemented for m in self.members: m.check_clash(info, seen) def connect_doc(self, doc=None): super().connect_doc(doc) doc = doc or self.doc if self.base and self.base.is_implicit(): self.base.connect_doc(doc) for m in self.local_members: m.connect_doc(doc) def is_implicit(self): # See QAPISchema._make_implicit_object_type(), as well as # _def_predefineds() return self.name.startswith('q_') def is_empty(self): assert self.members is not None return not self.members and not self.variants def c_name(self): assert self.name != 'q_empty' return super().c_name() def c_type(self): assert not self.is_implicit() return c_name(self.name) + POINTER_SUFFIX def c_unboxed_type(self): return c_name(self.name) def json_type(self): return 'object' def visit(self, visitor): super().visit(visitor) visitor.visit_object_type( self.name, self.info, self.ifcond, self.features, self.base, self.local_members, self.variants) visitor.visit_object_type_flat( self.name, self.info, self.ifcond, self.features, self.members, self.variants) class QAPISchemaAlternateType(QAPISchemaType): meta = 'alternate' def __init__(self, name, info, doc, ifcond, features, variants): super().__init__(name, info, doc, ifcond, features) assert isinstance(variants, QAPISchemaVariants) assert variants.tag_member variants.set_defined_in(name) variants.tag_member.set_defined_in(self.name) self.variants = variants def check(self, schema): super().check(schema) self.variants.tag_member.check(schema) # Not calling self.variants.check_clash(), because there's nothing # to clash with self.variants.check(schema, {}) # Alternate branch names have no relation to the tag enum values; # so we have to check for potential name collisions ourselves. seen = {} types_seen = {} for v in self.variants.variants: v.check_clash(self.info, seen) qtype = v.type.alternate_qtype() if not qtype: raise QAPISemError( self.info, "%s cannot use %s" % (v.describe(self.info), v.type.describe())) conflicting = set([qtype]) if qtype == 'QTYPE_QSTRING': if isinstance(v.type, QAPISchemaEnumType): for m in v.type.members: if m.name in ['on', 'off']: conflicting.add('QTYPE_QBOOL') if re.match(r'[-+0-9.]', m.name): # lazy, could be tightened conflicting.add('QTYPE_QNUM') else: conflicting.add('QTYPE_QNUM') conflicting.add('QTYPE_QBOOL') for qt in conflicting: if qt in types_seen: raise QAPISemError( self.info, "%s can't be distinguished from '%s'" % (v.describe(self.info), types_seen[qt])) types_seen[qt] = v.name def connect_doc(self, doc=None): super().connect_doc(doc) doc = doc or self.doc for v in self.variants.variants: v.connect_doc(doc) def c_type(self): return c_name(self.name) + POINTER_SUFFIX def json_type(self): return 'value' def visit(self, visitor): super().visit(visitor) visitor.visit_alternate_type( self.name, self.info, self.ifcond, self.features, self.variants) class QAPISchemaVariants: def __init__(self, tag_name, info, tag_member, variants): # Unions pass tag_name but not tag_member. # Alternates pass tag_member but not tag_name. # After check(), tag_member is always set. assert bool(tag_member) != bool(tag_name) assert (isinstance(tag_name, str) or isinstance(tag_member, QAPISchemaObjectTypeMember)) for v in variants: assert isinstance(v, QAPISchemaVariant) self._tag_name = tag_name self.info = info self.tag_member = tag_member self.variants = variants def set_defined_in(self, name): for v in self.variants: v.set_defined_in(name) def check(self, schema, seen): if self._tag_name: # union self.tag_member = seen.get(c_name(self._tag_name)) base = "'base'" # Pointing to the base type when not implicit would be # nice, but we don't know it here if not self.tag_member or self._tag_name != self.tag_member.name: raise QAPISemError( self.info, "discriminator '%s' is not a member of %s" % (self._tag_name, base)) # Here we do: base_type = schema.lookup_type(self.tag_member.defined_in) assert base_type if not base_type.is_implicit(): base = "base type '%s'" % self.tag_member.defined_in if not isinstance(self.tag_member.type, QAPISchemaEnumType): raise QAPISemError( self.info, "discriminator member '%s' of %s must be of enum type" % (self._tag_name, base)) if self.tag_member.optional: raise QAPISemError( self.info, "discriminator member '%s' of %s must not be optional" % (self._tag_name, base)) if self.tag_member.ifcond.is_present(): raise QAPISemError( self.info, "discriminator member '%s' of %s must not be conditional" % (self._tag_name, base)) else: # alternate assert isinstance(self.tag_member.type, QAPISchemaEnumType) assert not self.tag_member.optional assert not self.tag_member.ifcond.is_present() if self._tag_name: # union # branches that are not explicitly covered get an empty type cases = {v.name for v in self.variants} for m in self.tag_member.type.members: if m.name not in cases: v = QAPISchemaVariant(m.name, self.info, 'q_empty', m.ifcond) v.set_defined_in(self.tag_member.defined_in) self.variants.append(v) if not self.variants: raise QAPISemError(self.info, "union has no branches") for v in self.variants: v.check(schema) # Union names must match enum values; alternate names are # checked separately. Use 'seen' to tell the two apart. if seen: if v.name not in self.tag_member.type.member_names(): raise QAPISemError( self.info, "branch '%s' is not a value of %s" % (v.name, self.tag_member.type.describe())) if (not isinstance(v.type, QAPISchemaObjectType) or v.type.variants): raise QAPISemError( self.info, "%s cannot use %s" % (v.describe(self.info), v.type.describe())) v.type.check(schema) def check_clash(self, info, seen): for v in self.variants: # Reset seen map for each variant, since qapi names from one # branch do not affect another branch v.type.check_clash(info, dict(seen)) class QAPISchemaMember: """ Represents object members, enum members and features """ role = 'member' def __init__(self, name, info, ifcond=None): assert isinstance(name, str) self.name = name self.info = info self.ifcond = ifcond or QAPISchemaIfCond() self.defined_in = None def set_defined_in(self, name): assert not self.defined_in self.defined_in = name def check_clash(self, info, seen): cname = c_name(self.name) if cname in seen: raise QAPISemError( info, "%s collides with %s" % (self.describe(info), seen[cname].describe(info))) seen[cname] = self def connect_doc(self, doc): if doc: doc.connect_member(self) def describe(self, info): role = self.role defined_in = self.defined_in assert defined_in if defined_in.startswith('q_obj_'): # See QAPISchema._make_implicit_object_type() - reverse the # mapping there to create a nice human-readable description defined_in = defined_in[6:] if defined_in.endswith('-arg'): # Implicit type created for a command's dict 'data' assert role == 'member' role = 'parameter' elif defined_in.endswith('-base'): # Implicit type created for a union's dict 'base' role = 'base ' + role else: assert False elif defined_in != info.defn_name: return "%s '%s' of type '%s'" % (role, self.name, defined_in) return "%s '%s'" % (role, self.name) class QAPISchemaEnumMember(QAPISchemaMember): role = 'value' def __init__(self, name, info, ifcond=None, features=None): super().__init__(name, info, ifcond) for f in features or []: assert isinstance(f, QAPISchemaFeature) f.set_defined_in(name) self.features = features or [] def connect_doc(self, doc): super().connect_doc(doc) if doc: for f in self.features: doc.connect_feature(f) class QAPISchemaFeature(QAPISchemaMember): role = 'feature' def is_special(self): return self.name in ('deprecated', 'unstable') class QAPISchemaObjectTypeMember(QAPISchemaMember): def __init__(self, name, info, typ, optional, ifcond=None, features=None): super().__init__(name, info, ifcond) assert isinstance(typ, str) assert isinstance(optional, bool) for f in features or []: assert isinstance(f, QAPISchemaFeature) f.set_defined_in(name) self._type_name = typ self.type = None self.optional = optional self.features = features or [] def need_has(self): assert self.type return self.optional and self.type.need_has_if_optional() def check(self, schema): assert self.defined_in self.type = schema.resolve_type(self._type_name, self.info, self.describe) seen = {} for f in self.features: f.check_clash(self.info, seen) def connect_doc(self, doc): super().connect_doc(doc) if doc: for f in self.features: doc.connect_feature(f) class QAPISchemaVariant(QAPISchemaObjectTypeMember): role = 'branch' def __init__(self, name, info, typ, ifcond=None): super().__init__(name, info, typ, False, ifcond) class QAPISchemaCommand(QAPISchemaEntity): meta = 'command' def __init__(self, name, info, doc, ifcond, features, arg_type, ret_type, gen, success_response, boxed, allow_oob, allow_preconfig, coroutine): super().__init__(name, info, doc, ifcond, features) assert not arg_type or isinstance(arg_type, str) assert not ret_type or isinstance(ret_type, str) self._arg_type_name = arg_type self.arg_type = None self._ret_type_name = ret_type self.ret_type = None self.gen = gen self.success_response = success_response self.boxed = boxed self.allow_oob = allow_oob self.allow_preconfig = allow_preconfig self.coroutine = coroutine def check(self, schema): super().check(schema) if self._arg_type_name: self.arg_type = schema.resolve_type( self._arg_type_name, self.info, "command's 'data'") if not isinstance(self.arg_type, QAPISchemaObjectType): raise QAPISemError( self.info, "command's 'data' cannot take %s" % self.arg_type.describe()) if self.arg_type.variants and not self.boxed: raise QAPISemError( self.info, "command's 'data' can take %s only with 'boxed': true" % self.arg_type.describe()) if self._ret_type_name: self.ret_type = schema.resolve_type( self._ret_type_name, self.info, "command's 'returns'") if self.name not in self.info.pragma.command_returns_exceptions: typ = self.ret_type if isinstance(typ, QAPISchemaArrayType): typ = self.ret_type.element_type assert typ if not isinstance(typ, QAPISchemaObjectType): raise QAPISemError( self.info, "command's 'returns' cannot take %s" % self.ret_type.describe()) def connect_doc(self, doc=None): super().connect_doc(doc) doc = doc or self.doc if doc: if self.arg_type and self.arg_type.is_implicit(): self.arg_type.connect_doc(doc) def visit(self, visitor): super().visit(visitor) visitor.visit_command( self.name, self.info, self.ifcond, self.features, self.arg_type, self.ret_type, self.gen, self.success_response, self.boxed, self.allow_oob, self.allow_preconfig, self.coroutine) class QAPISchemaEvent(QAPISchemaEntity): meta = 'event' def __init__(self, name, info, doc, ifcond, features, arg_type, boxed): super().__init__(name, info, doc, ifcond, features) assert not arg_type or isinstance(arg_type, str) self._arg_type_name = arg_type self.arg_type = None self.boxed = boxed def check(self, schema): super().check(schema) if self._arg_type_name: self.arg_type = schema.resolve_type( self._arg_type_name, self.info, "event's 'data'") if not isinstance(self.arg_type, QAPISchemaObjectType): raise QAPISemError( self.info, "event's 'data' cannot take %s" % self.arg_type.describe()) if self.arg_type.variants and not self.boxed: raise QAPISemError( self.info, "event's 'data' can take %s only with 'boxed': true" % self.arg_type.describe()) def connect_doc(self, doc=None): super().connect_doc(doc) doc = doc or self.doc if doc: if self.arg_type and self.arg_type.is_implicit(): self.arg_type.connect_doc(doc) def visit(self, visitor): super().visit(visitor) visitor.visit_event( self.name, self.info, self.ifcond, self.features, self.arg_type, self.boxed) class QAPISchema: def __init__(self, fname): self.fname = fname try: parser = QAPISchemaParser(fname) except OSError as err: raise QAPIError( f"can't read schema file '{fname}': {err.strerror}" ) from err exprs = check_exprs(parser.exprs) self.docs = parser.docs self._entity_list = [] self._entity_dict = {} self._module_dict = OrderedDict() self._schema_dir = os.path.dirname(fname) self._make_module(QAPISchemaModule.BUILTIN_MODULE_NAME) self._make_module(fname) self._predefining = True self._def_predefineds() self._predefining = False self._def_exprs(exprs) self.check() def _def_entity(self, ent): # Only the predefined types are allowed to not have info assert ent.info or self._predefining self._entity_list.append(ent) if ent.name is None: return # TODO reject names that differ only in '_' vs. '.' vs. '-', # because they're liable to clash in generated C. other_ent = self._entity_dict.get(ent.name) if other_ent: if other_ent.info: where = QAPISourceError(other_ent.info, "previous definition") raise QAPISemError( ent.info, "'%s' is already defined\n%s" % (ent.name, where)) raise QAPISemError( ent.info, "%s is already defined" % other_ent.describe()) self._entity_dict[ent.name] = ent def lookup_entity(self, name, typ=None): ent = self._entity_dict.get(name) if typ and not isinstance(ent, typ): return None return ent def lookup_type(self, name): return self.lookup_entity(name, QAPISchemaType) def resolve_type(self, name, info, what): typ = self.lookup_type(name) if not typ: if callable(what): what = what(info) raise QAPISemError( info, "%s uses unknown type '%s'" % (what, name)) return typ def _module_name(self, fname: str) -> str: if QAPISchemaModule.is_system_module(fname): return fname return os.path.relpath(fname, self._schema_dir) def _make_module(self, fname): name = self._module_name(fname) if name not in self._module_dict: self._module_dict[name] = QAPISchemaModule(name) return self._module_dict[name] def module_by_fname(self, fname): name = self._module_name(fname) return self._module_dict[name] def _def_include(self, expr: QAPIExpression): include = expr['include'] assert expr.doc is None self._def_entity( QAPISchemaInclude(self._make_module(include), expr.info)) def _def_builtin_type(self, name, json_type, c_type): self._def_entity(QAPISchemaBuiltinType(name, json_type, c_type)) # Instantiating only the arrays that are actually used would # be nice, but we can't as long as their generated code # (qapi-builtin-types.[ch]) may be shared by some other # schema. self._make_array_type(name, None) def _def_predefineds(self): for t in [('str', 'string', 'char' + POINTER_SUFFIX), ('number', 'number', 'double'), ('int', 'int', 'int64_t'), ('int8', 'int', 'int8_t'), ('int16', 'int', 'int16_t'), ('int32', 'int', 'int32_t'), ('int64', 'int', 'int64_t'), ('uint8', 'int', 'uint8_t'), ('uint16', 'int', 'uint16_t'), ('uint32', 'int', 'uint32_t'), ('uint64', 'int', 'uint64_t'), ('size', 'int', 'uint64_t'), ('bool', 'boolean', 'bool'), ('any', 'value', 'QObject' + POINTER_SUFFIX), ('null', 'null', 'QNull' + POINTER_SUFFIX)]: self._def_builtin_type(*t) self.the_empty_object_type = QAPISchemaObjectType( 'q_empty', None, None, None, None, None, [], None) self._def_entity(self.the_empty_object_type) qtypes = ['none', 'qnull', 'qnum', 'qstring', 'qdict', 'qlist', 'qbool'] qtype_values = self._make_enum_members( [{'name': n} for n in qtypes], None) self._def_entity(QAPISchemaEnumType('QType', None, None, None, None, qtype_values, 'QTYPE')) def _make_features(self, features, info): if features is None: return [] return [QAPISchemaFeature(f['name'], info, QAPISchemaIfCond(f.get('if'))) for f in features] def _make_enum_member(self, name, ifcond, features, info): return QAPISchemaEnumMember(name, info, QAPISchemaIfCond(ifcond), self._make_features(features, info)) def _make_enum_members(self, values, info): return [self._make_enum_member(v['name'], v.get('if'), v.get('features'), info) for v in values] def _make_array_type(self, element_type, info): name = element_type + 'List' # reserved by check_defn_name_str() if not self.lookup_type(name): self._def_entity(QAPISchemaArrayType(name, info, element_type)) return name def _make_implicit_object_type(self, name, info, ifcond, role, members): if not members: return None # See also QAPISchemaObjectTypeMember.describe() name = 'q_obj_%s-%s' % (name, role) typ = self.lookup_entity(name, QAPISchemaObjectType) if typ: # The implicit object type has multiple users. This can # only be a duplicate definition, which will be flagged # later. pass else: self._def_entity(QAPISchemaObjectType( name, info, None, ifcond, None, None, members, None)) return name def _def_enum_type(self, expr: QAPIExpression): name = expr['enum'] data = expr['data'] prefix = expr.get('prefix') ifcond = QAPISchemaIfCond(expr.get('if')) info = expr.info features = self._make_features(expr.get('features'), info) self._def_entity(QAPISchemaEnumType( name, info, expr.doc, ifcond, features, self._make_enum_members(data, info), prefix)) def _make_member(self, name, typ, ifcond, features, info): optional = False if name.startswith('*'): name = name[1:] optional = True if isinstance(typ, list): assert len(typ) == 1 typ = self._make_array_type(typ[0], info) return QAPISchemaObjectTypeMember(name, info, typ, optional, ifcond, self._make_features(features, info)) def _make_members(self, data, info): return [self._make_member(key, value['type'], QAPISchemaIfCond(value.get('if')), value.get('features'), info) for (key, value) in data.items()] def _def_struct_type(self, expr: QAPIExpression): name = expr['struct'] base = expr.get('base') data = expr['data'] info = expr.info ifcond = QAPISchemaIfCond(expr.get('if')) features = self._make_features(expr.get('features'), info) self._def_entity(QAPISchemaObjectType( name, info, expr.doc, ifcond, features, base, self._make_members(data, info), None)) def _make_variant(self, case, typ, ifcond, info): if isinstance(typ, list): assert len(typ) == 1 typ = self._make_array_type(typ[0], info) return QAPISchemaVariant(case, info, typ, ifcond) def _def_union_type(self, expr: QAPIExpression): name = expr['union'] base = expr['base'] tag_name = expr['discriminator'] data = expr['data'] assert isinstance(data, dict) info = expr.info ifcond = QAPISchemaIfCond(expr.get('if')) features = self._make_features(expr.get('features'), info) if isinstance(base, dict): base = self._make_implicit_object_type( name, info, ifcond, 'base', self._make_members(base, info)) variants = [ self._make_variant(key, value['type'], QAPISchemaIfCond(value.get('if')), info) for (key, value) in data.items()] members: List[QAPISchemaObjectTypeMember] = [] self._def_entity( QAPISchemaObjectType(name, info, expr.doc, ifcond, features, base, members, QAPISchemaVariants( tag_name, info, None, variants))) def _def_alternate_type(self, expr: QAPIExpression): name = expr['alternate'] data = expr['data'] assert isinstance(data, dict) ifcond = QAPISchemaIfCond(expr.get('if')) info = expr.info features = self._make_features(expr.get('features'), info) variants = [ self._make_variant(key, value['type'], QAPISchemaIfCond(value.get('if')), info) for (key, value) in data.items()] tag_member = QAPISchemaObjectTypeMember('type', info, 'QType', False) self._def_entity( QAPISchemaAlternateType( name, info, expr.doc, ifcond, features, QAPISchemaVariants(None, info, tag_member, variants))) def _def_command(self, expr: QAPIExpression): name = expr['command'] data = expr.get('data') rets = expr.get('returns') gen = expr.get('gen', True) success_response = expr.get('success-response', True) boxed = expr.get('boxed', False) allow_oob = expr.get('allow-oob', False) allow_preconfig = expr.get('allow-preconfig', False) coroutine = expr.get('coroutine', False) ifcond = QAPISchemaIfCond(expr.get('if')) info = expr.info features = self._make_features(expr.get('features'), info) if isinstance(data, OrderedDict): data = self._make_implicit_object_type( name, info, ifcond, 'arg', self._make_members(data, info)) if isinstance(rets, list): assert len(rets) == 1 rets = self._make_array_type(rets[0], info) self._def_entity(QAPISchemaCommand(name, info, expr.doc, ifcond, features, data, rets, gen, success_response, boxed, allow_oob, allow_preconfig, coroutine)) def _def_event(self, expr: QAPIExpression): name = expr['event'] data = expr.get('data') boxed = expr.get('boxed', False) ifcond = QAPISchemaIfCond(expr.get('if')) info = expr.info features = self._make_features(expr.get('features'), info) if isinstance(data, OrderedDict): data = self._make_implicit_object_type( name, info, ifcond, 'arg', self._make_members(data, info)) self._def_entity(QAPISchemaEvent(name, info, expr.doc, ifcond, features, data, boxed)) def _def_exprs(self, exprs): for expr in exprs: if 'enum' in expr: self._def_enum_type(expr) elif 'struct' in expr: self._def_struct_type(expr) elif 'union' in expr: self._def_union_type(expr) elif 'alternate' in expr: self._def_alternate_type(expr) elif 'command' in expr: self._def_command(expr) elif 'event' in expr: self._def_event(expr) elif 'include' in expr: self._def_include(expr) else: assert False def check(self): for ent in self._entity_list: ent.check(self) ent.connect_doc() ent.check_doc() for ent in self._entity_list: ent.set_module(self) def visit(self, visitor): visitor.visit_begin(self) for mod in self._module_dict.values(): mod.visit(visitor) visitor.visit_end()
42,332
33.98595
78
py
qemu
qemu-master/scripts/oss-fuzz/reorder_fuzzer_qtest_trace.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Use this to convert qtest log info from a generic fuzzer input into a qtest trace that you can feed into a standard qemu-system process. Example usage: QEMU_FUZZ_ARGS="-machine q35,accel=qtest" QEMU_FUZZ_OBJECTS="*" \ ./i386-softmmu/qemu-fuzz-i386 --fuzz-target=generic-pci-fuzz # .. Finds some crash QTEST_LOG=1 FUZZ_SERIALIZE_QTEST=1 \ QEMU_FUZZ_ARGS="-machine q35,accel=qtest" QEMU_FUZZ_OBJECTS="*" \ ./i386-softmmu/qemu-fuzz-i386 --fuzz-target=generic-pci-fuzz /path/to/crash 2> qtest_log_output scripts/oss-fuzz/reorder_fuzzer_qtest_trace.py qtest_log_output > qtest_trace ./i386-softmmu/qemu-fuzz-i386 -machine q35,accel=qtest \ -qtest stdio < qtest_trace ### Details ### Some fuzzer make use of hooks that allow us to populate some memory range, just before a DMA read from that range. This means that the fuzzer can produce activity that looks like: [start] read from mmio addr [end] read from mmio addr [start] write to pio addr [start] fill a DMA buffer just in time [end] fill a DMA buffer just in time [start] fill a DMA buffer just in time [end] fill a DMA buffer just in time [end] write to pio addr [start] read from mmio addr [end] read from mmio addr We annotate these "nested" DMA writes, so with QTEST_LOG=1 the QTest trace might look something like: [R +0.028431] readw 0x10000 [R +0.028434] outl 0xc000 0xbeef # Triggers a DMA read from 0xbeef and 0xbf00 [DMA][R +0.034639] write 0xbeef 0x2 0xAAAA [DMA][R +0.034639] write 0xbf00 0x2 0xBBBB [R +0.028431] readw 0xfc000 This script would reorder the above trace so it becomes: readw 0x10000 write 0xbeef 0x2 0xAAAA write 0xbf00 0x2 0xBBBB outl 0xc000 0xbeef readw 0xfc000 I.e. by the time, 0xc000 tries to read from DMA, those DMA buffers have already been set up, removing the need for the DMA hooks. We can simply provide this reordered trace via -qtest stdio to reproduce the input Note: this won't work for traces where the device tries to read from the same DMA region twice in between MMIO/PIO commands. E.g: [R +0.028434] outl 0xc000 0xbeef [DMA][R +0.034639] write 0xbeef 0x2 0xAAAA [DMA][R +0.034639] write 0xbeef 0x2 0xBBBB The fuzzer will annotate suspected double-fetches with [DOUBLE-FETCH]. This script looks for these tags and warns the users that the resulting trace might not reproduce the bug. """ import sys __author__ = "Alexander Bulekov <[email protected]>" __copyright__ = "Copyright (C) 2020, Red Hat, Inc." __license__ = "GPL version 2 or (at your option) any later version" __maintainer__ = "Alexander Bulekov" __email__ = "[email protected]" def usage(): sys.exit("Usage: {} /path/to/qtest_log_output".format((sys.argv[0]))) def main(filename): with open(filename, "r") as f: trace = f.readlines() # Leave only lines that look like logged qtest commands trace[:] = [x.strip() for x in trace if "[R +" in x or "[S +" in x and "CLOSED" not in x] for i in range(len(trace)): if i+1 < len(trace): if "[DMA]" in trace[i+1]: if "[DOUBLE-FETCH]" in trace[i+1]: sys.stderr.write("Warning: Likely double fetch on line" "{}.\n There will likely be problems " "reproducing behavior with the " "resulting qtest trace\n\n".format(i+1)) trace[i], trace[i+1] = trace[i+1], trace[i] for line in trace: print(line.split("]")[-1].strip()) if __name__ == '__main__': if len(sys.argv) == 1: usage() main(sys.argv[1])
3,736
34.932692
79
py
qemu
qemu-master/scripts/oss-fuzz/output_reproducer.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Convert plain qtest traces to C or Bash reproducers Use this to help build bug-reports or create in-tree reproducers for bugs. Note: This will not format C code for you. Pipe the output through clang-format -style="{BasedOnStyle: llvm, IndentWidth: 4, ColumnLimit: 90}" or similar """ import sys import os import argparse import textwrap from datetime import date __author__ = "Alexander Bulekov <[email protected]>" __copyright__ = "Copyright (C) 2021, Red Hat, Inc." __license__ = "GPL version 2 or (at your option) any later version" __maintainer__ = "Alexander Bulekov" __email__ = "[email protected]" def c_header(owner): return """/* * Autogenerated Fuzzer Test Case * * Copyright (c) {date} {owner} * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "libqtest.h" """.format(date=date.today().year, owner=owner) def c_comment(s): """ Return a multi-line C comment. Assume the text is already wrapped """ return "/*\n * " + "\n * ".join(s.splitlines()) + "\n*/" def print_c_function(s): print("/* ") for l in s.splitlines(): print(" * {}".format(l)) def bash_reproducer(path, args, trace): result = '\\\n'.join(textwrap.wrap("cat << EOF | {} {}".format(path, args), 72, break_on_hyphens=False, drop_whitespace=False)) for l in trace.splitlines(): result += "\n" + '\\\n'.join(textwrap.wrap(l,72,drop_whitespace=False)) result += "\nEOF" return result def c_reproducer(name, args, trace): result = [] result.append("""static void {}(void)\n{{""".format(name)) # libqtest will add its own qtest args, so get rid of them args = args.replace("-accel qtest","") args = args.replace(",accel=qtest","") args = args.replace("-machine accel=qtest","") args = args.replace("-qtest stdio","") result.append("""QTestState *s = qtest_init("{}");""".format(args)) for l in trace.splitlines(): param = l.split() cmd = param[0] if cmd == "write": buf = param[3][2:] #Get the 0x... buffer and trim the "0x" assert len(buf)%2 == 0 bufbytes = [buf[i:i+2] for i in range(0, len(buf), 2)] bufstring = '\\x'+'\\x'.join(bufbytes) addr = param[1] size = param[2] result.append("""qtest_bufwrite(s, {}, "{}", {});""".format( addr, bufstring, size)) elif cmd.startswith("in") or cmd.startswith("read"): result.append("qtest_{}(s, {});".format( cmd, param[1])) elif cmd.startswith("out") or cmd.startswith("write"): result.append("qtest_{}(s, {}, {});".format( cmd, param[1], param[2])) elif cmd == "clock_step": if len(param) ==1: result.append("qtest_clock_step_next(s);") else: result.append("qtest_clock_step(s, {});".format(param[1])) result.append("qtest_quit(s);\n}") return "\n".join(result) def c_main(name, arch): return """int main(int argc, char **argv) {{ const char *arch = qtest_get_arch(); g_test_init(&argc, &argv, NULL); if (strcmp(arch, "{arch}") == 0) {{ qtest_add_func("fuzz/{name}",{name}); }} return g_test_run(); }}""".format(name=name, arch=arch) def main(): parser = argparse.ArgumentParser() group = parser.add_mutually_exclusive_group() group.add_argument("-bash", help="Only output a copy-pastable bash command", action="store_true") group.add_argument("-c", help="Only output a c function", action="store_true") parser.add_argument('-owner', help="If generating complete C source code, \ this specifies the Copyright owner", nargs='?', default="<name of author>") parser.add_argument("-no_comment", help="Don't include a bash reproducer \ as a comment in the C reproducers", action="store_true") parser.add_argument('-name', help="The name of the c function", nargs='?', default="test_fuzz") parser.add_argument('input_trace', help="input QTest command sequence \ (stdin by default)", nargs='?', type=argparse.FileType('r'), default=sys.stdin) args = parser.parse_args() qemu_path = os.getenv("QEMU_PATH") qemu_args = os.getenv("QEMU_ARGS") if not qemu_args or not qemu_path: print("Please set QEMU_PATH and QEMU_ARGS environment variables") sys.exit(1) bash_args = qemu_args if " -qtest stdio" not in qemu_args: bash_args += " -qtest stdio" arch = qemu_path.split("-")[-1] trace = args.input_trace.read().strip() if args.bash : print(bash_reproducer(qemu_path, bash_args, trace)) else: output = "" if not args.c: output += c_header(args.owner) + "\n" if not args.no_comment: output += c_comment(bash_reproducer(qemu_path, bash_args, trace)) output += c_reproducer(args.name, qemu_args, trace) if not args.c: output += c_main(args.name, arch) print(output) if __name__ == '__main__': main()
5,532
33.36646
80
py
qemu
qemu-master/scripts/oss-fuzz/minimize_qtest_trace.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ This takes a crashing qtest trace and tries to remove superflous operations """ import sys import os import subprocess import time import struct QEMU_ARGS = None QEMU_PATH = None TIMEOUT = 5 CRASH_TOKEN = None # Minimization levels M1 = False # try removing IO commands iteratively M2 = False # try setting bits in operand of write/out to zero write_suffix_lookup = {"b": (1, "B"), "w": (2, "H"), "l": (4, "L"), "q": (8, "Q")} def usage(): sys.exit("""\ Usage: QEMU_PATH="/path/to/qemu" QEMU_ARGS="args" {} [Options] input_trace output_trace By default, will try to use the second-to-last line in the output to identify whether the crash occred. Optionally, manually set a string that idenitifes the crash by setting CRASH_TOKEN= Options: -M1: enable a loop around the remove minimizer, which may help decrease some timing dependant instructions. Off by default. -M2: try setting bits in operand of write/out to zero. Off by default. """.format((sys.argv[0]))) deduplication_note = """\n\ Note: While trimming the input, sometimes the mutated trace triggers a different type crash but indicates the same bug. Under this situation, our minimizer is incapable of recognizing and stopped from removing it. In the future, we may use a more sophisticated crash case deduplication method. \n""" def check_if_trace_crashes(trace, path): with open(path, "w") as tracefile: tracefile.write("".join(trace)) rc = subprocess.Popen("timeout -s 9 {timeout}s {qemu_path} {qemu_args} 2>&1\ < {trace_path}".format(timeout=TIMEOUT, qemu_path=QEMU_PATH, qemu_args=QEMU_ARGS, trace_path=path), shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, encoding="utf-8") global CRASH_TOKEN if CRASH_TOKEN is None: try: outs, _ = rc.communicate(timeout=5) CRASH_TOKEN = " ".join(outs.splitlines()[-2].split()[0:3]) except subprocess.TimeoutExpired: print("subprocess.TimeoutExpired") return False print("Identifying Crashes by this string: {}".format(CRASH_TOKEN)) global deduplication_note print(deduplication_note) return True for line in iter(rc.stdout.readline, ""): if "CLOSED" in line: return False if CRASH_TOKEN in line: return True print("\nWarning:") print(" There is no 'CLOSED'or CRASH_TOKEN in the stdout of subprocess.") print(" Usually this indicates a different type of crash.\n") return False # If previous write commands write the same length of data at the same # interval, we view it as a hint. def split_write_hint(newtrace, i): HINT_LEN = 3 # > 2 if i <=(HINT_LEN-1): return None #find previous continuous write traces k = 0 l = i-1 writes = [] while (k != HINT_LEN and l >= 0): if newtrace[l].startswith("write "): writes.append(newtrace[l]) k += 1 l -= 1 elif newtrace[l] == "": l -= 1 else: return None if k != HINT_LEN: return None length = int(writes[0].split()[2], 16) for j in range(1, HINT_LEN): if length != int(writes[j].split()[2], 16): return None step = int(writes[0].split()[1], 16) - int(writes[1].split()[1], 16) for j in range(1, HINT_LEN-1): if step != int(writes[j].split()[1], 16) - \ int(writes[j+1].split()[1], 16): return None return (int(writes[0].split()[1], 16)+step, length) def remove_lines(newtrace, outpath): remove_step = 1 i = 0 while i < len(newtrace): # 1.) Try to remove lines completely and reproduce the crash. # If it works, we're done. if (i+remove_step) >= len(newtrace): remove_step = 1 prior = newtrace[i:i+remove_step] for j in range(i, i+remove_step): newtrace[j] = "" print("Removing {lines} ...\n".format(lines=prior)) if check_if_trace_crashes(newtrace, outpath): i += remove_step # Double the number of lines to remove for next round remove_step *= 2 continue # Failed to remove multiple IOs, fast recovery if remove_step > 1: for j in range(i, i+remove_step): newtrace[j] = prior[j-i] remove_step = 1 continue newtrace[i] = prior[0] # remove_step = 1 # 2.) Try to replace write{bwlq} commands with a write addr, len # command. Since this can require swapping endianness, try both LE and # BE options. We do this, so we can "trim" the writes in (3) if (newtrace[i].startswith("write") and not newtrace[i].startswith("write ")): suffix = newtrace[i].split()[0][-1] assert(suffix in write_suffix_lookup) addr = int(newtrace[i].split()[1], 16) value = int(newtrace[i].split()[2], 16) for endianness in ['<', '>']: data = struct.pack("{end}{size}".format(end=endianness, size=write_suffix_lookup[suffix][1]), value) newtrace[i] = "write {addr} {size} 0x{data}\n".format( addr=hex(addr), size=hex(write_suffix_lookup[suffix][0]), data=data.hex()) if(check_if_trace_crashes(newtrace, outpath)): break else: newtrace[i] = prior[0] # 3.) If it is a qtest write command: write addr len data, try to split # it into two separate write commands. If splitting the data operand # from length/2^n bytes to the left does not work, try to move the pivot # to the right side, then add one to n, until length/2^n == 0. The idea # is to prune unneccessary bytes from long writes, while accommodating # arbitrary MemoryRegion access sizes and alignments. # This algorithm will fail under some rare situations. # e.g., xxxxxxxxxuxxxxxx (u is the unnecessary byte) if newtrace[i].startswith("write "): addr = int(newtrace[i].split()[1], 16) length = int(newtrace[i].split()[2], 16) data = newtrace[i].split()[3][2:] if length > 1: # Can we get a hint from previous writes? hint = split_write_hint(newtrace, i) if hint is not None: hint_addr = hint[0] hint_len = hint[1] if hint_addr >= addr and hint_addr+hint_len <= addr+length: newtrace[i] = "write {addr} {size} 0x{data}\n".format( addr=hex(hint_addr), size=hex(hint_len), data=data[(hint_addr-addr)*2:\ (hint_addr-addr)*2+hint_len*2]) if check_if_trace_crashes(newtrace, outpath): # next round i += 1 continue newtrace[i] = prior[0] # Try splitting it using a binary approach leftlength = int(length/2) rightlength = length - leftlength newtrace.insert(i+1, "") power = 1 while leftlength > 0: newtrace[i] = "write {addr} {size} 0x{data}\n".format( addr=hex(addr), size=hex(leftlength), data=data[:leftlength*2]) newtrace[i+1] = "write {addr} {size} 0x{data}\n".format( addr=hex(addr+leftlength), size=hex(rightlength), data=data[leftlength*2:]) if check_if_trace_crashes(newtrace, outpath): break # move the pivot to right side if leftlength < rightlength: rightlength, leftlength = leftlength, rightlength continue power += 1 leftlength = int(length/pow(2, power)) rightlength = length - leftlength if check_if_trace_crashes(newtrace, outpath): i -= 1 else: newtrace[i] = prior[0] del newtrace[i+1] i += 1 def clear_bits(newtrace, outpath): # try setting bits in operands of out/write to zero i = 0 while i < len(newtrace): if (not newtrace[i].startswith("write ") and not newtrace[i].startswith("out")): i += 1 continue # write ADDR SIZE DATA # outx ADDR VALUE print("\nzero setting bits: {}".format(newtrace[i])) prefix = " ".join(newtrace[i].split()[:-1]) data = newtrace[i].split()[-1] data_bin = bin(int(data, 16)) data_bin_list = list(data_bin) for j in range(2, len(data_bin_list)): prior = newtrace[i] if (data_bin_list[j] == '1'): data_bin_list[j] = '0' data_try = hex(int("".join(data_bin_list), 2)) # It seems qtest only accepts padded hex-values. if len(data_try) % 2 == 1: data_try = data_try[:2] + "0" + data_try[2:] newtrace[i] = "{prefix} {data_try}\n".format( prefix=prefix, data_try=data_try) if not check_if_trace_crashes(newtrace, outpath): data_bin_list[j] = '1' newtrace[i] = prior i += 1 def minimize_trace(inpath, outpath): global TIMEOUT with open(inpath) as f: trace = f.readlines() start = time.time() if not check_if_trace_crashes(trace, outpath): sys.exit("The input qtest trace didn't cause a crash...") end = time.time() print("Crashed in {} seconds".format(end-start)) TIMEOUT = (end-start)*5 print("Setting the timeout for {} seconds".format(TIMEOUT)) newtrace = trace[:] global M1, M2 # remove lines old_len = len(newtrace) + 1 while(old_len > len(newtrace)): old_len = len(newtrace) print("trace lenth = ", old_len) remove_lines(newtrace, outpath) if not M1 and not M2: break newtrace = list(filter(lambda s: s != "", newtrace)) assert(check_if_trace_crashes(newtrace, outpath)) # set bits to zero if M2: clear_bits(newtrace, outpath) assert(check_if_trace_crashes(newtrace, outpath)) if __name__ == '__main__': if len(sys.argv) < 3: usage() if "-M1" in sys.argv: M1 = True if "-M2" in sys.argv: M2 = True QEMU_PATH = os.getenv("QEMU_PATH") QEMU_ARGS = os.getenv("QEMU_ARGS") if QEMU_PATH is None or QEMU_ARGS is None: usage() # if "accel" not in QEMU_ARGS: # QEMU_ARGS += " -accel qtest" CRASH_TOKEN = os.getenv("CRASH_TOKEN") QEMU_ARGS += " -qtest stdio -monitor none -serial none " minimize_trace(sys.argv[-2], sys.argv[-1])
11,613
34.845679
80
py
qemu
qemu-master/tests/qemu-iotests/findtests.py
# TestFinder class, define set of tests to run. # # Copyright (c) 2020-2021 Virtuozzo International GmbH # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import os import glob import re from collections import defaultdict from contextlib import contextmanager from typing import Optional, List, Iterator, Set @contextmanager def chdir(path: Optional[str] = None) -> Iterator[None]: if path is None: yield return saved_dir = os.getcwd() os.chdir(path) try: yield finally: os.chdir(saved_dir) class TestFinder: def __init__(self, test_dir: Optional[str] = None) -> None: self.groups = defaultdict(set) with chdir(test_dir): self.all_tests = glob.glob('[0-9][0-9][0-9]') self.all_tests += [f for f in glob.iglob('tests/*') if not f.endswith('.out') and os.path.isfile(f + '.out')] for t in self.all_tests: with open(t, encoding="utf-8") as f: for line in f: if line.startswith('# group: '): for g in line.split()[2:]: self.groups[g].add(t) break def add_group_file(self, fname: str) -> None: with open(fname, encoding="utf-8") as f: for line in f: line = line.strip() if (not line) or line[0] == '#': continue words = line.split() test_file = self.parse_test_name(words[0]) groups = words[1:] for g in groups: self.groups[g].add(test_file) def parse_test_name(self, name: str) -> str: if '/' in name: raise ValueError('Paths are unsupported for test selection, ' f'requiring "{name}" is wrong') if re.fullmatch(r'\d+', name): # Numbered tests are old naming convention. We should convert them # to three-digit-length, like 1 --> 001. name = f'{int(name):03}' else: # Named tests all should be in tests/ subdirectory name = os.path.join('tests', name) if name not in self.all_tests: raise ValueError(f'Test "{name}" is not found') return name def find_tests(self, groups: Optional[List[str]] = None, exclude_groups: Optional[List[str]] = None, tests: Optional[List[str]] = None, start_from: Optional[str] = None) -> List[str]: """Find tests Algorithm: 1. a. if some @groups specified a.1 Take all tests from @groups a.2 Drop tests, which are in at least one of @exclude_groups or in 'disabled' group (if 'disabled' is not listed in @groups) a.3 Add tests from @tests (don't exclude anything from them) b. else, if some @tests specified: b.1 exclude_groups must be not specified, so just take @tests c. else (only @exclude_groups list is non-empty): c.1 Take all tests c.2 Drop tests, which are in at least one of @exclude_groups or in 'disabled' group 2. sort 3. If start_from specified, drop tests from first one to @start_from (not inclusive) """ if groups is None: groups = [] if exclude_groups is None: exclude_groups = [] if tests is None: tests = [] res: Set[str] = set() if groups: # Some groups specified. exclude_groups supported, additionally # selecting some individual tests supported as well. res.update(*(self.groups[g] for g in groups)) elif tests: # Some individual tests specified, but no groups. In this case # we don't support exclude_groups. if exclude_groups: raise ValueError("Can't exclude from individually specified " "tests.") else: # No tests no groups: start from all tests, exclude_groups # supported. res.update(self.all_tests) if 'disabled' not in groups and 'disabled' not in exclude_groups: # Don't want to modify function argument, so create new list. exclude_groups = exclude_groups + ['disabled'] res = res.difference(*(self.groups[g] for g in exclude_groups)) # We want to add @tests. But for compatibility with old test names, # we should convert any number < 100 to number padded by # leading zeroes, like 1 -> 001 and 23 -> 023. for t in tests: res.add(self.parse_test_name(t)) sequence = sorted(res) if start_from is not None: del sequence[:sequence.index(self.parse_test_name(start_from))] return sequence
5,621
34.1375
79
py
qemu
qemu-master/tests/qemu-iotests/testenv.py
# TestEnv class to manage test environment variables. # # Copyright (c) 2020-2021 Virtuozzo International GmbH # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import os import sys import tempfile from pathlib import Path import shutil import collections import random import subprocess import glob from typing import List, Dict, Any, Optional, ContextManager DEF_GDB_OPTIONS = 'localhost:12345' def isxfile(path: str) -> bool: return os.path.isfile(path) and os.access(path, os.X_OK) def get_default_machine(qemu_prog: str) -> str: outp = subprocess.run([qemu_prog, '-machine', 'help'], check=True, universal_newlines=True, stdout=subprocess.PIPE).stdout machines = outp.split('\n') try: default_machine = next(m for m in machines if m.endswith(' (default)')) except StopIteration: return '' default_machine = default_machine.split(' ', 1)[0] alias_suf = ' (alias of {})'.format(default_machine) alias = next((m for m in machines if m.endswith(alias_suf)), None) if alias is not None: default_machine = alias.split(' ', 1)[0] return default_machine class TestEnv(ContextManager['TestEnv']): """ Manage system environment for running tests The following variables are supported/provided. They are represented by lower-cased TestEnv attributes. """ # We store environment variables as instance attributes, and there are a # lot of them. Silence pylint: # pylint: disable=too-many-instance-attributes env_variables = ['PYTHONPATH', 'TEST_DIR', 'SOCK_DIR', 'SAMPLE_IMG_DIR', 'PYTHON', 'QEMU_PROG', 'QEMU_IMG_PROG', 'QEMU_IO_PROG', 'QEMU_NBD_PROG', 'QSD_PROG', 'QEMU_OPTIONS', 'QEMU_IMG_OPTIONS', 'QEMU_IO_OPTIONS', 'QEMU_IO_OPTIONS_NO_FMT', 'QEMU_NBD_OPTIONS', 'IMGOPTS', 'IMGFMT', 'IMGPROTO', 'AIOMODE', 'CACHEMODE', 'VALGRIND_QEMU', 'CACHEMODE_IS_DEFAULT', 'IMGFMT_GENERIC', 'IMGOPTSSYNTAX', 'IMGKEYSECRET', 'QEMU_DEFAULT_MACHINE', 'MALLOC_PERTURB_', 'GDB_OPTIONS', 'PRINT_QEMU'] def prepare_subprocess(self, args: List[str]) -> Dict[str, str]: if self.debug: args.append('-d') with open(args[0], encoding="utf-8") as f: try: if f.readline().rstrip() == '#!/usr/bin/env python3': args.insert(0, self.python) except UnicodeDecodeError: # binary test? for future. pass os_env = os.environ.copy() os_env.update(self.get_env()) return os_env def get_env(self) -> Dict[str, str]: env = {} for v in self.env_variables: val = getattr(self, v.lower(), None) if val is not None: env[v] = val return env def init_directories(self) -> None: """Init directory variables: PYTHONPATH TEST_DIR SOCK_DIR SAMPLE_IMG_DIR """ # Path where qemu goodies live in this source tree. qemu_srctree_path = Path(__file__, '../../../python').resolve() self.pythonpath = os.pathsep.join(filter(None, ( self.source_iotests, str(qemu_srctree_path), os.getenv('PYTHONPATH'), ))) self.test_dir = os.getenv('TEST_DIR', os.path.join(os.getcwd(), 'scratch')) Path(self.test_dir).mkdir(parents=True, exist_ok=True) try: self.sock_dir = os.environ['SOCK_DIR'] self.tmp_sock_dir = False Path(self.sock_dir).mkdir(parents=True, exist_ok=True) except KeyError: self.sock_dir = tempfile.mkdtemp() self.tmp_sock_dir = True self.sample_img_dir = os.getenv('SAMPLE_IMG_DIR', os.path.join(self.source_iotests, 'sample_images')) def init_binaries(self) -> None: """Init binary path variables: PYTHON (for bash tests) QEMU_PROG, QEMU_IMG_PROG, QEMU_IO_PROG, QEMU_NBD_PROG, QSD_PROG """ self.python = sys.executable def root(*names: str) -> str: return os.path.join(self.build_root, *names) arch = os.uname().machine if 'ppc64' in arch: arch = 'ppc64' self.qemu_prog = os.getenv('QEMU_PROG', root(f'qemu-system-{arch}')) if not os.path.exists(self.qemu_prog): pattern = root('qemu-system-*') try: progs = sorted(glob.iglob(pattern)) self.qemu_prog = next(p for p in progs if isxfile(p)) except StopIteration: sys.exit("Not found any Qemu executable binary by pattern " f"'{pattern}'") self.qemu_img_prog = os.getenv('QEMU_IMG_PROG', root('qemu-img')) self.qemu_io_prog = os.getenv('QEMU_IO_PROG', root('qemu-io')) self.qemu_nbd_prog = os.getenv('QEMU_NBD_PROG', root('qemu-nbd')) self.qsd_prog = os.getenv('QSD_PROG', root('storage-daemon', 'qemu-storage-daemon')) for b in [self.qemu_img_prog, self.qemu_io_prog, self.qemu_nbd_prog, self.qemu_prog, self.qsd_prog]: if not os.path.exists(b): sys.exit('No such file: ' + b) if not isxfile(b): sys.exit('Not executable: ' + b) def __init__(self, source_dir: str, build_dir: str, imgfmt: str, imgproto: str, aiomode: str, cachemode: Optional[str] = None, imgopts: Optional[str] = None, misalign: bool = False, debug: bool = False, valgrind: bool = False, gdb: bool = False, qprint: bool = False, dry_run: bool = False) -> None: self.imgfmt = imgfmt self.imgproto = imgproto self.aiomode = aiomode self.imgopts = imgopts self.misalign = misalign self.debug = debug if qprint: self.print_qemu = 'y' if gdb: self.gdb_options = os.getenv('GDB_OPTIONS', DEF_GDB_OPTIONS) if not self.gdb_options: # cover the case 'export GDB_OPTIONS=' self.gdb_options = DEF_GDB_OPTIONS elif 'GDB_OPTIONS' in os.environ: # to not propagate it in prepare_subprocess() del os.environ['GDB_OPTIONS'] if valgrind: self.valgrind_qemu = 'y' if cachemode is None: self.cachemode_is_default = 'true' self.cachemode = 'writeback' else: self.cachemode_is_default = 'false' self.cachemode = cachemode # Initialize generic paths: build_root, build_iotests, source_iotests, # which are needed to initialize some environment variables. They are # used by init_*() functions as well. self.source_iotests = source_dir self.build_iotests = build_dir self.build_root = os.path.join(self.build_iotests, '..', '..') self.init_directories() if dry_run: return self.init_binaries() self.malloc_perturb_ = os.getenv('MALLOC_PERTURB_', str(random.randrange(1, 255))) # QEMU_OPTIONS self.qemu_options = '-nodefaults -display none -accel qtest' machine_map = ( ('arm', 'virt'), ('aarch64', 'virt'), ('avr', 'mega2560'), ('m68k', 'virt'), ('riscv32', 'virt'), ('riscv64', 'virt'), ('rx', 'gdbsim-r5f562n8'), ('tricore', 'tricore_testboard') ) for suffix, machine in machine_map: if self.qemu_prog.endswith(f'qemu-system-{suffix}'): self.qemu_options += f' -machine {machine}' # QEMU_DEFAULT_MACHINE self.qemu_default_machine = get_default_machine(self.qemu_prog) self.qemu_img_options = os.getenv('QEMU_IMG_OPTIONS') self.qemu_nbd_options = os.getenv('QEMU_NBD_OPTIONS') is_generic = self.imgfmt not in ['bochs', 'cloop', 'dmg'] self.imgfmt_generic = 'true' if is_generic else 'false' self.qemu_io_options = f'--cache {self.cachemode} --aio {self.aiomode}' if self.misalign: self.qemu_io_options += ' --misalign' self.qemu_io_options_no_fmt = self.qemu_io_options if self.imgfmt == 'luks': self.imgoptssyntax = 'true' self.imgkeysecret = '123456' if not self.imgopts: self.imgopts = 'iter-time=10' elif 'iter-time=' not in self.imgopts: self.imgopts += ',iter-time=10' else: self.imgoptssyntax = 'false' self.qemu_io_options += ' -f ' + self.imgfmt if self.imgfmt == 'vmdk': if not self.imgopts: self.imgopts = 'zeroed_grain=on' elif 'zeroed_grain=' not in self.imgopts: self.imgopts += ',zeroed_grain=on' def close(self) -> None: if self.tmp_sock_dir: shutil.rmtree(self.sock_dir) def __enter__(self) -> 'TestEnv': return self def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: self.close() def print_env(self, prefix: str = '') -> None: template = """\ {prefix}QEMU -- "{QEMU_PROG}" {QEMU_OPTIONS} {prefix}QEMU_IMG -- "{QEMU_IMG_PROG}" {QEMU_IMG_OPTIONS} {prefix}QEMU_IO -- "{QEMU_IO_PROG}" {QEMU_IO_OPTIONS} {prefix}QEMU_NBD -- "{QEMU_NBD_PROG}" {QEMU_NBD_OPTIONS} {prefix}IMGFMT -- {IMGFMT}{imgopts} {prefix}IMGPROTO -- {IMGPROTO} {prefix}PLATFORM -- {platform} {prefix}TEST_DIR -- {TEST_DIR} {prefix}SOCK_DIR -- {SOCK_DIR} {prefix}GDB_OPTIONS -- {GDB_OPTIONS} {prefix}VALGRIND_QEMU -- {VALGRIND_QEMU} {prefix}PRINT_QEMU_OUTPUT -- {PRINT_QEMU} {prefix}""" args = collections.defaultdict(str, self.get_env()) if 'IMGOPTS' in args: args['imgopts'] = f" ({args['IMGOPTS']})" u = os.uname() args['platform'] = f'{u.sysname}/{u.machine} {u.nodename} {u.release}' args['prefix'] = prefix print(template.format_map(args))
11,181
34.611465
79
py
qemu
qemu-master/tests/qemu-iotests/testrunner.py
# Class for actually running tests. # # Copyright (c) 2020-2021 Virtuozzo International GmbH # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import os from pathlib import Path import datetime import time import difflib import subprocess import contextlib import json import shutil import sys from multiprocessing import Pool from typing import List, Optional, Any, Sequence, Dict, \ ContextManager from testenv import TestEnv def silent_unlink(path: Path) -> None: try: path.unlink() except OSError: pass def file_diff(file1: str, file2: str) -> List[str]: with open(file1, encoding="utf-8") as f1, \ open(file2, encoding="utf-8") as f2: # We want to ignore spaces at line ends. There are a lot of mess about # it in iotests. # TODO: fix all tests to not produce extra spaces, fix all .out files # and use strict diff here! seq1 = [line.rstrip() for line in f1] seq2 = [line.rstrip() for line in f2] res = [line.rstrip() for line in difflib.unified_diff(seq1, seq2, file1, file2)] return res class LastElapsedTime(ContextManager['LastElapsedTime']): """ Cache for elapsed time for tests, to show it during new test run It is safe to use get() at any time. To use update(), you must either use it inside with-block or use save() after update(). """ def __init__(self, cache_file: str, env: TestEnv) -> None: self.env = env self.cache_file = cache_file self.cache: Dict[str, Dict[str, Dict[str, float]]] try: with open(cache_file, encoding="utf-8") as f: self.cache = json.load(f) except (OSError, ValueError): self.cache = {} def get(self, test: str, default: Optional[float] = None) -> Optional[float]: if test not in self.cache: return default if self.env.imgproto not in self.cache[test]: return default return self.cache[test][self.env.imgproto].get(self.env.imgfmt, default) def update(self, test: str, elapsed: float) -> None: d = self.cache.setdefault(test, {}) d.setdefault(self.env.imgproto, {})[self.env.imgfmt] = elapsed def save(self) -> None: with open(self.cache_file, 'w', encoding="utf-8") as f: json.dump(self.cache, f) def __enter__(self) -> 'LastElapsedTime': return self def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: self.save() class TestResult: def __init__(self, status: str, description: str = '', elapsed: Optional[float] = None, diff: Sequence[str] = (), casenotrun: str = '', interrupted: bool = False) -> None: self.status = status self.description = description self.elapsed = elapsed self.diff = diff self.casenotrun = casenotrun self.interrupted = interrupted class TestRunner(ContextManager['TestRunner']): shared_self = None @staticmethod def proc_run_test(test: str, test_field_width: int) -> TestResult: # We are in a subprocess, we can't change the runner object! runner = TestRunner.shared_self assert runner is not None return runner.run_test(test, test_field_width, mp=True) def run_tests_pool(self, tests: List[str], test_field_width: int, jobs: int) -> List[TestResult]: # passing self directly to Pool.starmap() just doesn't work, because # it's a context manager. assert TestRunner.shared_self is None TestRunner.shared_self = self with Pool(jobs) as p: results = p.starmap(self.proc_run_test, zip(tests, [test_field_width] * len(tests))) TestRunner.shared_self = None return results def __init__(self, env: TestEnv, tap: bool = False, color: str = 'auto') -> None: self.env = env self.tap = tap self.last_elapsed = LastElapsedTime('.last-elapsed-cache', env) assert color in ('auto', 'on', 'off') self.color = (color == 'on') or (color == 'auto' and sys.stdout.isatty()) self._stack: contextlib.ExitStack def __enter__(self) -> 'TestRunner': self._stack = contextlib.ExitStack() self._stack.enter_context(self.env) self._stack.enter_context(self.last_elapsed) return self def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: self._stack.close() def test_print_one_line(self, test: str, test_field_width: int, starttime: str, endtime: Optional[str] = None, status: str = '...', lasttime: Optional[float] = None, thistime: Optional[float] = None, description: str = '', end: str = '\n') -> None: """ Print short test info before/after test run """ test = os.path.basename(test) if test_field_width is None: test_field_width = 8 if self.tap: if status == 'pass': print(f'ok {self.env.imgfmt} {test}') elif status == 'fail': print(f'not ok {self.env.imgfmt} {test}') elif status == 'not run': print(f'ok {self.env.imgfmt} {test} # SKIP') return if lasttime: lasttime_s = f' (last: {lasttime:.1f}s)' else: lasttime_s = '' if thistime: thistime_s = f'{thistime:.1f}s' else: thistime_s = '...' if endtime: endtime = f'[{endtime}]' else: endtime = '' if self.color: if status == 'pass': col = '\033[32m' elif status == 'fail': col = '\033[1m\033[31m' elif status == 'not run': col = '\033[33m' else: col = '' col_end = '\033[0m' else: col = '' col_end = '' print(f'{test:{test_field_width}} {col}{status:10}{col_end} ' f'[{starttime}] {endtime:13}{thistime_s:5} {lasttime_s:14} ' f'{description}', end=end) def find_reference(self, test: str) -> str: if self.env.cachemode == 'none': ref = f'{test}.out.nocache' if os.path.isfile(ref): return ref ref = f'{test}.out.{self.env.imgfmt}' if os.path.isfile(ref): return ref ref = f'{test}.{self.env.qemu_default_machine}.out' if os.path.isfile(ref): return ref return f'{test}.out' def do_run_test(self, test: str) -> TestResult: """ Run one test :param test: test file path Note: this method may be called from subprocess, so it does not change ``self`` object in any way! """ f_test = Path(test) f_reference = Path(self.find_reference(test)) if not f_test.exists(): return TestResult(status='fail', description=f'No such test file: {f_test}') if not os.access(str(f_test), os.X_OK): sys.exit(f'Not executable: {f_test}') if not f_reference.exists(): return TestResult(status='not run', description='No qualified output ' f'(expected {f_reference})') args = [str(f_test.resolve())] env = self.env.prepare_subprocess(args) # Split test directories, so that tests running in parallel don't # break each other. for d in ['TEST_DIR', 'SOCK_DIR']: env[d] = os.path.join( env[d], f"{self.env.imgfmt}-{self.env.imgproto}-{f_test.name}") Path(env[d]).mkdir(parents=True, exist_ok=True) test_dir = env['TEST_DIR'] f_bad = Path(test_dir, f_test.name + '.out.bad') f_notrun = Path(test_dir, f_test.name + '.notrun') f_casenotrun = Path(test_dir, f_test.name + '.casenotrun') for p in (f_notrun, f_casenotrun): silent_unlink(p) t0 = time.time() with f_bad.open('w', encoding="utf-8") as f: with subprocess.Popen(args, cwd=str(f_test.parent), env=env, stdin=subprocess.DEVNULL, stdout=f, stderr=subprocess.STDOUT) as proc: try: proc.wait() except KeyboardInterrupt: proc.terminate() proc.wait() return TestResult(status='not run', description='Interrupted by user', interrupted=True) ret = proc.returncode elapsed = round(time.time() - t0, 1) if ret != 0: return TestResult(status='fail', elapsed=elapsed, description=f'failed, exit status {ret}', diff=file_diff(str(f_reference), str(f_bad))) if f_notrun.exists(): return TestResult( status='not run', description=f_notrun.read_text(encoding='utf-8').strip()) casenotrun = '' if f_casenotrun.exists(): casenotrun = f_casenotrun.read_text(encoding='utf-8') diff = file_diff(str(f_reference), str(f_bad)) if diff: if os.environ.get("QEMU_IOTESTS_REGEN", None) is not None: shutil.copyfile(str(f_bad), str(f_reference)) print("########################################") print("##### REFERENCE FILE UPDATED #####") print("########################################") return TestResult(status='fail', elapsed=elapsed, description=f'output mismatch (see {f_bad})', diff=diff, casenotrun=casenotrun) else: f_bad.unlink() return TestResult(status='pass', elapsed=elapsed, casenotrun=casenotrun) def run_test(self, test: str, test_field_width: int, mp: bool = False) -> TestResult: """ Run one test and print short status :param test: test file path :param test_field_width: width for first field of status format :param mp: if true, we are in a multiprocessing environment, don't try to rewrite things in stdout Note: this method may be called from subprocess, so it does not change ``self`` object in any way! """ last_el = self.last_elapsed.get(test) start = datetime.datetime.now().strftime('%H:%M:%S') if not self.tap: self.test_print_one_line(test=test, test_field_width=test_field_width, status = 'started' if mp else '...', starttime=start, lasttime=last_el, end = '\n' if mp else '\r') else: testname = os.path.basename(test) print(f'# running {self.env.imgfmt} {testname}') res = self.do_run_test(test) end = datetime.datetime.now().strftime('%H:%M:%S') self.test_print_one_line(test=test, test_field_width=test_field_width, status=res.status, starttime=start, endtime=end, lasttime=last_el, thistime=res.elapsed, description=res.description) if res.casenotrun: if self.tap: print('#' + res.casenotrun.replace('\n', '\n#')) else: print(res.casenotrun) sys.stdout.flush() return res def run_tests(self, tests: List[str], jobs: int = 1) -> bool: n_run = 0 failed = [] notrun = [] casenotrun = [] if self.tap: print('TAP version 13') self.env.print_env('# ') print('1..%d' % len(tests)) else: self.env.print_env() test_field_width = max(len(os.path.basename(t)) for t in tests) + 2 if jobs > 1: results = self.run_tests_pool(tests, test_field_width, jobs) for i, t in enumerate(tests): name = os.path.basename(t) if jobs > 1: res = results[i] else: res = self.run_test(t, test_field_width) assert res.status in ('pass', 'fail', 'not run') if res.casenotrun: casenotrun.append(t) if res.status != 'not run': n_run += 1 if res.status == 'fail': failed.append(name) if res.diff: if self.tap: print('\n'.join(res.diff), file=sys.stderr) else: print('\n'.join(res.diff)) elif res.status == 'not run': notrun.append(name) elif res.status == 'pass': assert res.elapsed is not None self.last_elapsed.update(t, res.elapsed) sys.stdout.flush() if res.interrupted: break if not self.tap: if notrun: print('Not run:', ' '.join(notrun)) if casenotrun: print('Some cases not run in:', ' '.join(casenotrun)) if failed: print('Failures:', ' '.join(failed)) print(f'Failed {len(failed)} of {n_run} iotests') else: print(f'Passed all {n_run} iotests') return not failed
14,877
33.360277
79
py
qemu
qemu-master/tests/qemu-iotests/nbd-fault-injector.py
#!/usr/bin/env python3 # NBD server - fault injection utility # # Configuration file syntax: # [inject-error "disconnect-neg1"] # event=neg1 # io=readwrite # when=before # # Note that Python's ConfigParser squashes together all sections with the same # name, so give each [inject-error] a unique name. # # inject-error options: # event - name of the trigger event # "neg1" - first part of negotiation struct # "export" - export struct # "neg2" - second part of negotiation struct # "request" - NBD request struct # "reply" - NBD reply struct # "data" - request/reply data # io - I/O direction that triggers this rule: # "read", "write", or "readwrite" # default: readwrite # when - after how many bytes to inject the fault # -1 - inject error after I/O # 0 - inject error before I/O # integer - inject error after integer bytes # "before" - alias for 0 # "after" - alias for -1 # default: before # # Currently the only error injection action is to terminate the server process. # This resets the TCP connection and thus forces the client to handle # unexpected connection termination. # # Other error injection actions could be added in the future. # # Copyright Red Hat, Inc. 2014 # # Authors: # Stefan Hajnoczi <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or later. # See the COPYING file in the top-level directory. import sys import socket import struct import collections import configparser FAKE_DISK_SIZE = 8 * 1024 * 1024 * 1024 # 8 GB # Protocol constants NBD_CMD_READ = 0 NBD_CMD_WRITE = 1 NBD_CMD_DISC = 2 NBD_REQUEST_MAGIC = 0x25609513 NBD_SIMPLE_REPLY_MAGIC = 0x67446698 NBD_PASSWD = 0x4e42444d41474943 NBD_OPTS_MAGIC = 0x49484156454F5054 NBD_CLIENT_MAGIC = 0x0000420281861253 NBD_OPT_EXPORT_NAME = 1 << 0 # Protocol structs neg_classic_struct = struct.Struct('>QQQI124x') neg1_struct = struct.Struct('>QQH') export_tuple = collections.namedtuple('Export', 'reserved magic opt len') export_struct = struct.Struct('>IQII') neg2_struct = struct.Struct('>QH124x') request_tuple = collections.namedtuple('Request', 'magic type handle from_ len') request_struct = struct.Struct('>IIQQI') reply_struct = struct.Struct('>IIQ') def err(msg): sys.stderr.write(msg + '\n') sys.exit(1) def recvall(sock, bufsize): received = 0 chunks = [] while received < bufsize: chunk = sock.recv(bufsize - received) if len(chunk) == 0: raise Exception('unexpected disconnect') chunks.append(chunk) received += len(chunk) return b''.join(chunks) class Rule(object): def __init__(self, name, event, io, when): self.name = name self.event = event self.io = io self.when = when def match(self, event, io): if event != self.event: return False if io != self.io and self.io != 'readwrite': return False return True class FaultInjectionSocket(object): def __init__(self, sock, rules): self.sock = sock self.rules = rules def check(self, event, io, bufsize=None): for rule in self.rules: if rule.match(event, io): if rule.when == 0 or bufsize is None: print('Closing connection on rule match %s' % rule.name) self.sock.close() sys.stdout.flush() sys.exit(0) if rule.when != -1: return rule.when return bufsize def send(self, buf, event): bufsize = self.check(event, 'write', bufsize=len(buf)) self.sock.sendall(buf[:bufsize]) self.check(event, 'write') def recv(self, bufsize, event): bufsize = self.check(event, 'read', bufsize=bufsize) data = recvall(self.sock, bufsize) self.check(event, 'read') return data def close(self): self.sock.close() def negotiate_classic(conn): buf = neg_classic_struct.pack(NBD_PASSWD, NBD_CLIENT_MAGIC, FAKE_DISK_SIZE, 0) conn.send(buf, event='neg-classic') def negotiate_export(conn): # Send negotiation part 1 buf = neg1_struct.pack(NBD_PASSWD, NBD_OPTS_MAGIC, 0) conn.send(buf, event='neg1') # Receive export option buf = conn.recv(export_struct.size, event='export') export = export_tuple._make(export_struct.unpack(buf)) assert export.magic == NBD_OPTS_MAGIC assert export.opt == NBD_OPT_EXPORT_NAME name = conn.recv(export.len, event='export-name') # Send negotiation part 2 buf = neg2_struct.pack(FAKE_DISK_SIZE, 0) conn.send(buf, event='neg2') def negotiate(conn, use_export): '''Negotiate export with client''' if use_export: negotiate_export(conn) else: negotiate_classic(conn) def read_request(conn): '''Parse NBD request from client''' buf = conn.recv(request_struct.size, event='request') req = request_tuple._make(request_struct.unpack(buf)) assert req.magic == NBD_REQUEST_MAGIC return req def write_reply(conn, error, handle): buf = reply_struct.pack(NBD_SIMPLE_REPLY_MAGIC, error, handle) conn.send(buf, event='reply') def handle_connection(conn, use_export): negotiate(conn, use_export) while True: req = read_request(conn) if req.type == NBD_CMD_READ: write_reply(conn, 0, req.handle) conn.send(b'\0' * req.len, event='data') elif req.type == NBD_CMD_WRITE: _ = conn.recv(req.len, event='data') write_reply(conn, 0, req.handle) elif req.type == NBD_CMD_DISC: break else: print('unrecognized command type %#02x' % req.type) break conn.close() def run_server(sock, rules, use_export): while True: conn, _ = sock.accept() handle_connection(FaultInjectionSocket(conn, rules), use_export) def parse_inject_error(name, options): if 'event' not in options: err('missing \"event\" option in %s' % name) event = options['event'] if event not in ('neg-classic', 'neg1', 'export', 'neg2', 'request', 'reply', 'data'): err('invalid \"event\" option value \"%s\" in %s' % (event, name)) io = options.get('io', 'readwrite') if io not in ('read', 'write', 'readwrite'): err('invalid \"io\" option value \"%s\" in %s' % (io, name)) when = options.get('when', 'before') try: when = int(when) except ValueError: if when == 'before': when = 0 elif when == 'after': when = -1 else: err('invalid \"when\" option value \"%s\" in %s' % (when, name)) return Rule(name, event, io, when) def parse_config(config): rules = [] for name in config.sections(): if name.startswith('inject-error'): options = dict(config.items(name)) rules.append(parse_inject_error(name, options)) else: err('invalid config section name: %s' % name) return rules def load_rules(filename): config = configparser.RawConfigParser() with open(filename, 'rt') as f: config.read_file(f, filename) return parse_config(config) def open_socket(path): '''Open a TCP or UNIX domain listen socket''' if ':' in path: host, port = path.split(':', 1) sock = socket.socket() sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind((host, int(port))) # If given port was 0 the final port number is now available path = '%s:%d' % sock.getsockname() else: sock = socket.socket(socket.AF_UNIX) sock.bind(path) sock.listen(0) print('Listening on %s' % path) sys.stdout.flush() # another process may be waiting, show message now return sock def usage(args): sys.stderr.write('usage: %s [--classic-negotiation] <tcp-port>|<unix-path> <config-file>\n' % args[0]) sys.stderr.write('Run an fault injector NBD server with rules defined in a config file.\n') sys.exit(1) def main(args): if len(args) != 3 and len(args) != 4: usage(args) use_export = True if args[1] == '--classic-negotiation': use_export = False elif len(args) == 4: usage(args) sock = open_socket(args[1 if use_export else 2]) rules = load_rules(args[2 if use_export else 3]) run_server(sock, rules, use_export) return 0 if __name__ == '__main__': sys.exit(main(sys.argv))
8,638
30.878229
106
py
qemu
qemu-master/tests/qemu-iotests/qcow2_format.py
# Library for manipulations with qcow2 image # # Copyright (c) 2020 Virtuozzo International GmbH. # Copyright (C) 2012 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import struct import string import json class ComplexEncoder(json.JSONEncoder): def default(self, obj): if hasattr(obj, 'to_json'): return obj.to_json() else: return json.JSONEncoder.default(self, obj) class Qcow2Field: def __init__(self, value): self.value = value def __str__(self): return str(self.value) class Flags64(Qcow2Field): def __str__(self): bits = [] for bit in range(64): if self.value & (1 << bit): bits.append(bit) return str(bits) class BitmapFlags(Qcow2Field): flags = { 0x1: 'in-use', 0x2: 'auto' } def __str__(self): bits = [] for bit in range(64): flag = self.value & (1 << bit) if flag: bits.append(self.flags.get(flag, f'bit-{bit}')) return f'{self.value:#x} ({bits})' class Enum(Qcow2Field): def __str__(self): return f'{self.value:#x} ({self.mapping.get(self.value, "<unknown>")})' class Qcow2StructMeta(type): # Mapping from c types to python struct format ctypes = { 'u8': 'B', 'u16': 'H', 'u32': 'I', 'u64': 'Q' } def __init__(self, name, bases, attrs): if 'fields' in attrs: self.fmt = '>' + ''.join(self.ctypes[f[0]] for f in self.fields) class Qcow2Struct(metaclass=Qcow2StructMeta): """Qcow2Struct: base class for qcow2 data structures Successors should define fields class variable, which is: list of tuples, each of three elements: - c-type (one of 'u8', 'u16', 'u32', 'u64') - format (format_spec to use with .format() when dump or 'mask' to dump bitmasks) - field name """ def __init__(self, fd=None, offset=None, data=None): """ Two variants: 1. Specify data. fd and offset must be None. 2. Specify fd and offset, data must be None. offset may be omitted in this case, than current position of fd is used. """ if data is None: assert fd is not None buf_size = struct.calcsize(self.fmt) if offset is not None: fd.seek(offset) data = fd.read(buf_size) else: assert fd is None and offset is None values = struct.unpack(self.fmt, data) self.__dict__ = dict((field[2], values[i]) for i, field in enumerate(self.fields)) def dump(self, is_json=False): if is_json: print(json.dumps(self.to_json(), indent=4, cls=ComplexEncoder)) return for f in self.fields: value = self.__dict__[f[2]] if isinstance(f[1], str): value_str = f[1].format(value) else: value_str = str(f[1](value)) print('{:<25} {}'.format(f[2], value_str)) def to_json(self): return dict((f[2], self.__dict__[f[2]]) for f in self.fields) class Qcow2BitmapExt(Qcow2Struct): fields = ( ('u32', '{}', 'nb_bitmaps'), ('u32', '{}', 'reserved32'), ('u64', '{:#x}', 'bitmap_directory_size'), ('u64', '{:#x}', 'bitmap_directory_offset') ) def __init__(self, fd, cluster_size): super().__init__(fd=fd) tail = struct.calcsize(self.fmt) % 8 if tail: fd.seek(8 - tail, 1) position = fd.tell() self.cluster_size = cluster_size self.read_bitmap_directory(fd) fd.seek(position) def read_bitmap_directory(self, fd): fd.seek(self.bitmap_directory_offset) self.bitmap_directory = \ [Qcow2BitmapDirEntry(fd, cluster_size=self.cluster_size) for _ in range(self.nb_bitmaps)] def dump(self): super().dump() for entry in self.bitmap_directory: print() entry.dump() def to_json(self): fields_dict = super().to_json() fields_dict['bitmap_directory'] = self.bitmap_directory return fields_dict class Qcow2BitmapDirEntry(Qcow2Struct): fields = ( ('u64', '{:#x}', 'bitmap_table_offset'), ('u32', '{}', 'bitmap_table_size'), ('u32', BitmapFlags, 'flags'), ('u8', '{}', 'type'), ('u8', '{}', 'granularity_bits'), ('u16', '{}', 'name_size'), ('u32', '{}', 'extra_data_size') ) def __init__(self, fd, cluster_size): super().__init__(fd=fd) self.cluster_size = cluster_size # Seek relative to the current position in the file fd.seek(self.extra_data_size, 1) bitmap_name = fd.read(self.name_size) self.name = bitmap_name.decode('ascii') # Move position to the end of the entry in the directory entry_raw_size = self.bitmap_dir_entry_raw_size() padding = ((entry_raw_size + 7) & ~7) - entry_raw_size fd.seek(padding, 1) self.bitmap_table = Qcow2BitmapTable(fd=fd, offset=self.bitmap_table_offset, nb_entries=self.bitmap_table_size, cluster_size=self.cluster_size) def bitmap_dir_entry_raw_size(self): return struct.calcsize(self.fmt) + self.name_size + \ self.extra_data_size def dump(self): print(f'{"Bitmap name":<25} {self.name}') super(Qcow2BitmapDirEntry, self).dump() self.bitmap_table.dump() def to_json(self): # Put the name ahead of the dict return { 'name': self.name, **super().to_json(), 'bitmap_table': self.bitmap_table } class Qcow2BitmapTableEntry(Qcow2Struct): fields = ( ('u64', '{}', 'entry'), ) BME_TABLE_ENTRY_RESERVED_MASK = 0xff000000000001fe BME_TABLE_ENTRY_OFFSET_MASK = 0x00fffffffffffe00 BME_TABLE_ENTRY_FLAG_ALL_ONES = 1 def __init__(self, fd): super().__init__(fd=fd) self.reserved = self.entry & self.BME_TABLE_ENTRY_RESERVED_MASK self.offset = self.entry & self.BME_TABLE_ENTRY_OFFSET_MASK if self.offset: if self.entry & self.BME_TABLE_ENTRY_FLAG_ALL_ONES: self.type = 'invalid' else: self.type = 'serialized' elif self.entry & self.BME_TABLE_ENTRY_FLAG_ALL_ONES: self.type = 'all-ones' else: self.type = 'all-zeroes' def to_json(self): return {'type': self.type, 'offset': self.offset, 'reserved': self.reserved} class Qcow2BitmapTable: def __init__(self, fd, offset, nb_entries, cluster_size): self.cluster_size = cluster_size position = fd.tell() fd.seek(offset) self.entries = [Qcow2BitmapTableEntry(fd) for _ in range(nb_entries)] fd.seek(position) def dump(self): bitmap_table = enumerate(self.entries) print(f'{"Bitmap table":<14} {"type":<15} {"size":<12} {"offset"}') for i, entry in bitmap_table: if entry.type == 'serialized': size = self.cluster_size else: size = 0 print(f'{i:<14} {entry.type:<15} {size:<12} {entry.offset}') def to_json(self): return self.entries QCOW2_EXT_MAGIC_BITMAPS = 0x23852875 class QcowHeaderExtension(Qcow2Struct): class Magic(Enum): mapping = { 0xe2792aca: 'Backing format', 0x6803f857: 'Feature table', 0x0537be77: 'Crypto header', QCOW2_EXT_MAGIC_BITMAPS: 'Bitmaps', 0x44415441: 'Data file' } def to_json(self): return self.mapping.get(self.value, "<unknown>") fields = ( ('u32', Magic, 'magic'), ('u32', '{}', 'length') # length bytes of data follows # then padding to next multiply of 8 ) def __init__(self, magic=None, length=None, data=None, fd=None, cluster_size=None): """ Support both loading from fd and creation from user data. For fd-based creation current position in a file will be used to read the data. The cluster_size value may be obtained by dependent structures. This should be somehow refactored and functionality should be moved to superclass (to allow creation of any qcow2 struct), but then, fields of variable length (data here) should be supported in base class somehow. Note also, that we probably want to parse different extensions. Should they be subclasses of this class, or how to do it better? Should it be something like QAPI union with discriminator field (magic here). So, it's a TODO. We'll see how to properly refactor this when we have more qcow2 structures. """ if fd is None: assert all(v is not None for v in (magic, length, data)) self.magic = magic self.length = length if length % 8 != 0: padding = 8 - (length % 8) data += b'\0' * padding self.data = data else: assert all(v is None for v in (magic, length, data)) super().__init__(fd=fd) if self.magic == QCOW2_EXT_MAGIC_BITMAPS: self.obj = Qcow2BitmapExt(fd=fd, cluster_size=cluster_size) self.data = None else: padded = (self.length + 7) & ~7 self.data = fd.read(padded) assert self.data is not None self.obj = None if self.data is not None: data_str = self.data[:self.length] if all(c in string.printable.encode( 'ascii') for c in data_str): data_str = f"'{ data_str.decode('ascii') }'" else: data_str = '<binary>' self.data_str = data_str def dump(self): super().dump() if self.obj is None: print(f'{"data":<25} {self.data_str}') else: self.obj.dump() def to_json(self): # Put the name ahead of the dict res = {'name': self.Magic(self.magic), **super().to_json()} if self.obj is not None: res['data'] = self.obj else: res['data_str'] = self.data_str return res @classmethod def create(cls, magic, data): return QcowHeaderExtension(magic, len(data), data) class QcowHeader(Qcow2Struct): fields = ( # Version 2 header fields ('u32', '{:#x}', 'magic'), ('u32', '{}', 'version'), ('u64', '{:#x}', 'backing_file_offset'), ('u32', '{:#x}', 'backing_file_size'), ('u32', '{}', 'cluster_bits'), ('u64', '{}', 'size'), ('u32', '{}', 'crypt_method'), ('u32', '{}', 'l1_size'), ('u64', '{:#x}', 'l1_table_offset'), ('u64', '{:#x}', 'refcount_table_offset'), ('u32', '{}', 'refcount_table_clusters'), ('u32', '{}', 'nb_snapshots'), ('u64', '{:#x}', 'snapshot_offset'), # Version 3 header fields ('u64', Flags64, 'incompatible_features'), ('u64', Flags64, 'compatible_features'), ('u64', Flags64, 'autoclear_features'), ('u32', '{}', 'refcount_order'), ('u32', '{}', 'header_length'), ) def __init__(self, fd): super().__init__(fd=fd, offset=0) self.set_defaults() self.cluster_size = 1 << self.cluster_bits fd.seek(self.header_length) self.load_extensions(fd) if self.backing_file_offset: fd.seek(self.backing_file_offset) self.backing_file = fd.read(self.backing_file_size) else: self.backing_file = None def set_defaults(self): if self.version == 2: self.incompatible_features = 0 self.compatible_features = 0 self.autoclear_features = 0 self.refcount_order = 4 self.header_length = 72 def load_extensions(self, fd): self.extensions = [] if self.backing_file_offset != 0: end = min(self.cluster_size, self.backing_file_offset) else: end = self.cluster_size while fd.tell() < end: ext = QcowHeaderExtension(fd=fd, cluster_size=self.cluster_size) if ext.magic == 0: break else: self.extensions.append(ext) def update_extensions(self, fd): fd.seek(self.header_length) extensions = self.extensions extensions.append(QcowHeaderExtension(0, 0, b'')) for ex in extensions: buf = struct.pack('>II', ex.magic, ex.length) fd.write(buf) fd.write(ex.data) if self.backing_file is not None: self.backing_file_offset = fd.tell() fd.write(self.backing_file) if fd.tell() > self.cluster_size: raise Exception('I think I just broke the image...') def update(self, fd): header_bytes = self.header_length self.update_extensions(fd) fd.seek(0) header = tuple(self.__dict__[f] for t, p, f in QcowHeader.fields) buf = struct.pack(QcowHeader.fmt, *header) buf = buf[0:header_bytes-1] fd.write(buf) def dump_extensions(self, is_json=False): if is_json: print(json.dumps(self.extensions, indent=4, cls=ComplexEncoder)) return for ex in self.extensions: print('Header extension:') ex.dump() print()
14,526
29.974414
79
py
qemu
qemu-master/tests/qemu-iotests/qcow2.py
#!/usr/bin/env python3 # # Manipulations with qcow2 image # # Copyright (C) 2012 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import sys from qcow2_format import ( QcowHeader, QcowHeaderExtension ) is_json = False def cmd_dump_header(fd): h = QcowHeader(fd) h.dump(is_json) print() h.dump_extensions(is_json) def cmd_dump_header_exts(fd): h = QcowHeader(fd) h.dump_extensions(is_json) def cmd_set_header(fd, name, value): try: value = int(value, 0) except ValueError: print("'%s' is not a valid number" % value) sys.exit(1) fields = (field[2] for field in QcowHeader.fields) if name not in fields: print("'%s' is not a known header field" % name) sys.exit(1) h = QcowHeader(fd) h.__dict__[name] = value h.update(fd) def cmd_add_header_ext(fd, magic, data): try: magic = int(magic, 0) except ValueError: print("'%s' is not a valid magic number" % magic) sys.exit(1) h = QcowHeader(fd) h.extensions.append(QcowHeaderExtension.create(magic, data.encode('ascii'))) h.update(fd) def cmd_add_header_ext_stdio(fd, magic): data = sys.stdin.read() cmd_add_header_ext(fd, magic, data) def cmd_del_header_ext(fd, magic): try: magic = int(magic, 0) except ValueError: print("'%s' is not a valid magic number" % magic) sys.exit(1) h = QcowHeader(fd) found = False for ex in h.extensions: if ex.magic == magic: found = True h.extensions.remove(ex) if not found: print("No such header extension") return h.update(fd) def cmd_set_feature_bit(fd, group, bit): try: bit = int(bit, 0) if bit < 0 or bit >= 64: raise ValueError except ValueError: print("'%s' is not a valid bit number in range [0, 64)" % bit) sys.exit(1) h = QcowHeader(fd) if group == 'incompatible': h.incompatible_features |= 1 << bit elif group == 'compatible': h.compatible_features |= 1 << bit elif group == 'autoclear': h.autoclear_features |= 1 << bit else: print("'%s' is not a valid group, try " "'incompatible', 'compatible', or 'autoclear'" % group) sys.exit(1) h.update(fd) cmds = [ ['dump-header', cmd_dump_header, 0, 'Dump image header and header extensions'], ['dump-header-exts', cmd_dump_header_exts, 0, 'Dump image header extensions'], ['set-header', cmd_set_header, 2, 'Set a field in the header'], ['add-header-ext', cmd_add_header_ext, 2, 'Add a header extension'], ['add-header-ext-stdio', cmd_add_header_ext_stdio, 1, 'Add a header extension, data from stdin'], ['del-header-ext', cmd_del_header_ext, 1, 'Delete a header extension'], ['set-feature-bit', cmd_set_feature_bit, 2, 'Set a feature bit'], ] def main(filename, cmd, args): fd = open(filename, "r+b") try: for name, handler, num_args, desc in cmds: if name != cmd: continue elif len(args) != num_args: usage() return else: handler(fd, *args) return print("Unknown command '%s'" % cmd) finally: fd.close() def usage(): print("Usage: %s <file> <cmd> [<arg>, ...] [<key>, ...]" % sys.argv[0]) print("") print("Supported commands:") for name, handler, num_args, desc in cmds: print(" %-20s - %s" % (name, desc)) print("") print("Supported keys:") print(" %-20s - %s" % ('-j', 'Dump in JSON format')) if __name__ == '__main__': if len(sys.argv) < 3: usage() sys.exit(1) is_json = '-j' in sys.argv if is_json: sys.argv.remove('-j') main(sys.argv[1], sys.argv[2], sys.argv[3:])
4,555
24.740113
75
py
qemu
qemu-master/tests/qemu-iotests/qed.py
#!/usr/bin/env python3 # # Tool to manipulate QED image files # # Copyright (C) 2010 IBM, Corp. # # Authors: # Stefan Hajnoczi <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or later. # See the COPYING file in the top-level directory. import sys import struct import random import optparse # This can be used as a module __all__ = ['QED_F_NEED_CHECK', 'QED'] QED_F_NEED_CHECK = 0x02 header_fmt = '<IIIIQQQQQII' header_size = struct.calcsize(header_fmt) field_names = ['magic', 'cluster_size', 'table_size', 'header_size', 'features', 'compat_features', 'autoclear_features', 'l1_table_offset', 'image_size', 'backing_filename_offset', 'backing_filename_size'] table_elem_fmt = '<Q' table_elem_size = struct.calcsize(table_elem_fmt) def err(msg): sys.stderr.write(msg + '\n') sys.exit(1) def unpack_header(s): fields = struct.unpack(header_fmt, s) return dict((field_names[idx], val) for idx, val in enumerate(fields)) def pack_header(header): fields = tuple(header[x] for x in field_names) return struct.pack(header_fmt, *fields) def unpack_table_elem(s): return struct.unpack(table_elem_fmt, s)[0] def pack_table_elem(elem): return struct.pack(table_elem_fmt, elem) class QED(object): def __init__(self, f): self.f = f self.f.seek(0, 2) self.filesize = f.tell() self.load_header() self.load_l1_table() def raw_pread(self, offset, size): self.f.seek(offset) return self.f.read(size) def raw_pwrite(self, offset, data): self.f.seek(offset) return self.f.write(data) def load_header(self): self.header = unpack_header(self.raw_pread(0, header_size)) def store_header(self): self.raw_pwrite(0, pack_header(self.header)) def read_table(self, offset): size = self.header['table_size'] * self.header['cluster_size'] s = self.raw_pread(offset, size) table = [unpack_table_elem(s[i:i + table_elem_size]) for i in xrange(0, size, table_elem_size)] return table def load_l1_table(self): self.l1_table = self.read_table(self.header['l1_table_offset']) self.table_nelems = self.header['table_size'] * self.header['cluster_size'] // table_elem_size def write_table(self, offset, table): s = ''.join(pack_table_elem(x) for x in table) self.raw_pwrite(offset, s) def random_table_item(table): vals = [(index, offset) for index, offset in enumerate(table) if offset != 0] if not vals: err('cannot pick random item because table is empty') return random.choice(vals) def corrupt_table_duplicate(table): '''Corrupt a table by introducing a duplicate offset''' victim_idx, victim_val = random_table_item(table) unique_vals = set(table) if len(unique_vals) == 1: err('no duplication corruption possible in table') dup_val = random.choice(list(unique_vals.difference([victim_val]))) table[victim_idx] = dup_val def corrupt_table_invalidate(qed, table): '''Corrupt a table by introducing an invalid offset''' index, _ = random_table_item(table) table[index] = qed.filesize + random.randint(0, 100 * 1024 * 1024 * 1024 * 1024) def cmd_show(qed, *args): '''show [header|l1|l2 <offset>]- Show header or l1/l2 tables''' if not args or args[0] == 'header': print(qed.header) elif args[0] == 'l1': print(qed.l1_table) elif len(args) == 2 and args[0] == 'l2': offset = int(args[1]) print(qed.read_table(offset)) else: err('unrecognized sub-command') def cmd_duplicate(qed, table_level): '''duplicate l1|l2 - Duplicate a random table element''' if table_level == 'l1': offset = qed.header['l1_table_offset'] table = qed.l1_table elif table_level == 'l2': _, offset = random_table_item(qed.l1_table) table = qed.read_table(offset) else: err('unrecognized sub-command') corrupt_table_duplicate(table) qed.write_table(offset, table) def cmd_invalidate(qed, table_level): '''invalidate l1|l2 - Plant an invalid table element at random''' if table_level == 'l1': offset = qed.header['l1_table_offset'] table = qed.l1_table elif table_level == 'l2': _, offset = random_table_item(qed.l1_table) table = qed.read_table(offset) else: err('unrecognized sub-command') corrupt_table_invalidate(qed, table) qed.write_table(offset, table) def cmd_need_check(qed, *args): '''need-check [on|off] - Test, set, or clear the QED_F_NEED_CHECK header bit''' if not args: print(bool(qed.header['features'] & QED_F_NEED_CHECK)) return if args[0] == 'on': qed.header['features'] |= QED_F_NEED_CHECK elif args[0] == 'off': qed.header['features'] &= ~QED_F_NEED_CHECK else: err('unrecognized sub-command') qed.store_header() def cmd_zero_cluster(qed, pos, *args): '''zero-cluster <pos> [<n>] - Zero data clusters''' pos, n = int(pos), 1 if args: if len(args) != 1: err('expected one argument') n = int(args[0]) for i in xrange(n): l1_index = pos // qed.header['cluster_size'] // len(qed.l1_table) if qed.l1_table[l1_index] == 0: err('no l2 table allocated') l2_offset = qed.l1_table[l1_index] l2_table = qed.read_table(l2_offset) l2_index = (pos // qed.header['cluster_size']) % len(qed.l1_table) l2_table[l2_index] = 1 # zero the data cluster qed.write_table(l2_offset, l2_table) pos += qed.header['cluster_size'] def cmd_copy_metadata(qed, outfile): '''copy-metadata <outfile> - Copy metadata only (for scrubbing corrupted images)''' out = open(outfile, 'wb') # Match file size out.seek(qed.filesize - 1) out.write('\0') # Copy header clusters out.seek(0) header_size_bytes = qed.header['header_size'] * qed.header['cluster_size'] out.write(qed.raw_pread(0, header_size_bytes)) # Copy L1 table out.seek(qed.header['l1_table_offset']) s = ''.join(pack_table_elem(x) for x in qed.l1_table) out.write(s) # Copy L2 tables for l2_offset in qed.l1_table: if l2_offset == 0: continue l2_table = qed.read_table(l2_offset) out.seek(l2_offset) s = ''.join(pack_table_elem(x) for x in l2_table) out.write(s) out.close() def usage(): print('Usage: %s <file> <cmd> [<arg>, ...]' % sys.argv[0]) print() print('Supported commands:') for cmd in sorted(x for x in globals() if x.startswith('cmd_')): print(globals()[cmd].__doc__) sys.exit(1) def main(): if len(sys.argv) < 3: usage() filename, cmd = sys.argv[1:3] cmd = 'cmd_' + cmd.replace('-', '_') if cmd not in globals(): usage() qed = QED(open(filename, 'r+b')) try: globals()[cmd](qed, *sys.argv[3:]) except TypeError as e: sys.stderr.write(globals()[cmd].__doc__ + '\n') sys.exit(1) if __name__ == '__main__': main()
7,210
29.555085
103
py
qemu
qemu-master/tests/qemu-iotests/iotests.py
# Common utilities and Python wrappers for qemu-iotests # # Copyright (C) 2012 IBM Corp. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import argparse import atexit import bz2 from collections import OrderedDict import faulthandler import json import logging import os import re import shutil import signal import struct import subprocess import sys import time from typing import (Any, Callable, Dict, Iterable, Iterator, List, Optional, Sequence, TextIO, Tuple, Type, TypeVar) import unittest from contextlib import contextmanager from qemu.machine import qtest from qemu.qmp.legacy import QMPMessage, QEMUMonitorProtocol from qemu.utils import VerboseProcessError # Use this logger for logging messages directly from the iotests module logger = logging.getLogger('qemu.iotests') logger.addHandler(logging.NullHandler()) # Use this logger for messages that ought to be used for diff output. test_logger = logging.getLogger('qemu.iotests.diff_io') faulthandler.enable() # This will not work if arguments contain spaces but is necessary if we # want to support the override options that ./check supports. qemu_img_args = [os.environ.get('QEMU_IMG_PROG', 'qemu-img')] if os.environ.get('QEMU_IMG_OPTIONS'): qemu_img_args += os.environ['QEMU_IMG_OPTIONS'].strip().split(' ') qemu_io_args = [os.environ.get('QEMU_IO_PROG', 'qemu-io')] if os.environ.get('QEMU_IO_OPTIONS'): qemu_io_args += os.environ['QEMU_IO_OPTIONS'].strip().split(' ') qemu_io_args_no_fmt = [os.environ.get('QEMU_IO_PROG', 'qemu-io')] if os.environ.get('QEMU_IO_OPTIONS_NO_FMT'): qemu_io_args_no_fmt += \ os.environ['QEMU_IO_OPTIONS_NO_FMT'].strip().split(' ') qemu_nbd_prog = os.environ.get('QEMU_NBD_PROG', 'qemu-nbd') qemu_nbd_args = [qemu_nbd_prog] if os.environ.get('QEMU_NBD_OPTIONS'): qemu_nbd_args += os.environ['QEMU_NBD_OPTIONS'].strip().split(' ') qemu_prog = os.environ.get('QEMU_PROG', 'qemu') qemu_opts = os.environ.get('QEMU_OPTIONS', '').strip().split(' ') qsd_prog = os.environ.get('QSD_PROG', 'qemu-storage-daemon') gdb_qemu_env = os.environ.get('GDB_OPTIONS') qemu_gdb = [] if gdb_qemu_env: qemu_gdb = ['gdbserver'] + gdb_qemu_env.strip().split(' ') qemu_print = os.environ.get('PRINT_QEMU', False) imgfmt = os.environ.get('IMGFMT', 'raw') imgproto = os.environ.get('IMGPROTO', 'file') try: test_dir = os.environ['TEST_DIR'] sock_dir = os.environ['SOCK_DIR'] cachemode = os.environ['CACHEMODE'] aiomode = os.environ['AIOMODE'] qemu_default_machine = os.environ['QEMU_DEFAULT_MACHINE'] except KeyError: # We are using these variables as proxies to indicate that we're # not being run via "check". There may be other things set up by # "check" that individual test cases rely on. sys.stderr.write('Please run this test via the "check" script\n') sys.exit(os.EX_USAGE) qemu_valgrind = [] if os.environ.get('VALGRIND_QEMU') == "y" and \ os.environ.get('NO_VALGRIND') != "y": valgrind_logfile = "--log-file=" + test_dir # %p allows to put the valgrind process PID, since # we don't know it a priori (subprocess.Popen is # not yet invoked) valgrind_logfile += "/%p.valgrind" qemu_valgrind = ['valgrind', valgrind_logfile, '--error-exitcode=99'] luks_default_secret_object = 'secret,id=keysec0,data=' + \ os.environ.get('IMGKEYSECRET', '') luks_default_key_secret_opt = 'key-secret=keysec0' sample_img_dir = os.environ['SAMPLE_IMG_DIR'] @contextmanager def change_log_level( logger_name: str, level: int = logging.CRITICAL) -> Iterator[None]: """ Utility function for temporarily changing the log level of a logger. This can be used to silence errors that are expected or uninteresting. """ _logger = logging.getLogger(logger_name) current_level = _logger.level _logger.setLevel(level) try: yield finally: _logger.setLevel(current_level) def unarchive_sample_image(sample, fname): sample_fname = os.path.join(sample_img_dir, sample + '.bz2') with bz2.open(sample_fname) as f_in, open(fname, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) def qemu_tool_popen(args: Sequence[str], connect_stderr: bool = True) -> 'subprocess.Popen[str]': stderr = subprocess.STDOUT if connect_stderr else None # pylint: disable=consider-using-with return subprocess.Popen(args, stdout=subprocess.PIPE, stderr=stderr, universal_newlines=True) def qemu_tool_pipe_and_status(tool: str, args: Sequence[str], connect_stderr: bool = True, drop_successful_output: bool = False) \ -> Tuple[str, int]: """ Run a tool and return both its output and its exit code """ with qemu_tool_popen(args, connect_stderr) as subp: output = subp.communicate()[0] if subp.returncode < 0: cmd = ' '.join(args) sys.stderr.write(f'{tool} received signal \ {-subp.returncode}: {cmd}\n') if drop_successful_output and subp.returncode == 0: output = '' return (output, subp.returncode) def qemu_img_create_prepare_args(args: List[str]) -> List[str]: if not args or args[0] != 'create': return list(args) args = args[1:] p = argparse.ArgumentParser(allow_abbrev=False) # -o option may be specified several times p.add_argument('-o', action='append', default=[]) p.add_argument('-f') parsed, remaining = p.parse_known_args(args) opts_list = parsed.o result = ['create'] if parsed.f is not None: result += ['-f', parsed.f] # IMGOPTS most probably contain options specific for the selected format, # like extended_l2 or compression_type for qcow2. Test may want to create # additional images in other formats that doesn't support these options. # So, use IMGOPTS only for images created in imgfmt format. imgopts = os.environ.get('IMGOPTS') if imgopts and parsed.f == imgfmt: opts_list.insert(0, imgopts) # default luks support if parsed.f == 'luks' and \ all('key-secret' not in opts for opts in opts_list): result += ['--object', luks_default_secret_object] opts_list.append(luks_default_key_secret_opt) for opts in opts_list: result += ['-o', opts] result += remaining return result def qemu_tool(*args: str, check: bool = True, combine_stdio: bool = True ) -> 'subprocess.CompletedProcess[str]': """ Run a qemu tool and return its status code and console output. :param args: full command line to run. :param check: Enforce a return code of zero. :param combine_stdio: set to False to keep stdout/stderr separated. :raise VerboseProcessError: When the return code is negative, or on any non-zero exit code when 'check=True' was provided (the default). This exception has 'stdout', 'stderr', and 'returncode' properties that may be inspected to show greater detail. If this exception is not handled, the command-line, return code, and all console output will be included at the bottom of the stack trace. :return: a CompletedProcess. This object has args, returncode, and stdout properties. If streams are not combined, it will also have a stderr property. """ subp = subprocess.run( args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT if combine_stdio else subprocess.PIPE, universal_newlines=True, check=False ) if check and subp.returncode or (subp.returncode < 0): raise VerboseProcessError( subp.returncode, args, output=subp.stdout, stderr=subp.stderr, ) return subp def qemu_img(*args: str, check: bool = True, combine_stdio: bool = True ) -> 'subprocess.CompletedProcess[str]': """ Run QEMU_IMG_PROG and return its status code and console output. This function always prepends QEMU_IMG_OPTIONS and may further alter the args for 'create' commands. See `qemu_tool()` for greater detail. """ full_args = qemu_img_args + qemu_img_create_prepare_args(list(args)) return qemu_tool(*full_args, check=check, combine_stdio=combine_stdio) def ordered_qmp(qmsg, conv_keys=True): # Dictionaries are not ordered prior to 3.6, therefore: if isinstance(qmsg, list): return [ordered_qmp(atom) for atom in qmsg] if isinstance(qmsg, dict): od = OrderedDict() for k, v in sorted(qmsg.items()): if conv_keys: k = k.replace('_', '-') od[k] = ordered_qmp(v, conv_keys=False) return od return qmsg def qemu_img_create(*args: str) -> 'subprocess.CompletedProcess[str]': return qemu_img('create', *args) def qemu_img_json(*args: str) -> Any: """ Run qemu-img and return its output as deserialized JSON. :raise CalledProcessError: When qemu-img crashes, or returns a non-zero exit code without producing a valid JSON document to stdout. :raise JSONDecoderError: When qemu-img returns 0, but failed to produce a valid JSON document. :return: A deserialized JSON object; probably a dict[str, Any]. """ try: res = qemu_img(*args, combine_stdio=False) except subprocess.CalledProcessError as exc: # Terminated due to signal. Don't bother. if exc.returncode < 0: raise # Commands like 'check' can return failure (exit codes 2 and 3) # to indicate command completion, but with errors found. For # multi-command flexibility, ignore the exact error codes and # *try* to load JSON. try: return json.loads(exc.stdout) except json.JSONDecodeError: # Nope. This thing is toast. Raise the /process/ error. pass raise return json.loads(res.stdout) def qemu_img_measure(*args: str) -> Any: return qemu_img_json("measure", "--output", "json", *args) def qemu_img_check(*args: str) -> Any: return qemu_img_json("check", "--output", "json", *args) def qemu_img_info(*args: str) -> Any: return qemu_img_json('info', "--output", "json", *args) def qemu_img_map(*args: str) -> Any: return qemu_img_json('map', "--output", "json", *args) def qemu_img_log(*args: str, check: bool = True ) -> 'subprocess.CompletedProcess[str]': result = qemu_img(*args, check=check) log(result.stdout, filters=[filter_testfiles]) return result def img_info_log(filename: str, filter_path: Optional[str] = None, use_image_opts: bool = False, extra_args: Sequence[str] = (), check: bool = True, drop_child_info: bool = True, ) -> None: args = ['info'] if use_image_opts: args.append('--image-opts') else: args += ['-f', imgfmt] args += extra_args args.append(filename) output = qemu_img(*args, check=check).stdout if not filter_path: filter_path = filename log(filter_img_info(output, filter_path, drop_child_info)) def qemu_io_wrap_args(args: Sequence[str]) -> List[str]: if '-f' in args or '--image-opts' in args: return qemu_io_args_no_fmt + list(args) else: return qemu_io_args + list(args) def qemu_io_popen(*args): return qemu_tool_popen(qemu_io_wrap_args(args)) def qemu_io(*args: str, check: bool = True, combine_stdio: bool = True ) -> 'subprocess.CompletedProcess[str]': """ Run QEMU_IO_PROG and return the status code and console output. This function always prepends either QEMU_IO_OPTIONS or QEMU_IO_OPTIONS_NO_FMT. """ return qemu_tool(*qemu_io_wrap_args(args), check=check, combine_stdio=combine_stdio) def qemu_io_log(*args: str, check: bool = True ) -> 'subprocess.CompletedProcess[str]': result = qemu_io(*args, check=check) log(result.stdout, filters=[filter_testfiles, filter_qemu_io]) return result class QemuIoInteractive: def __init__(self, *args): self.args = qemu_io_wrap_args(args) # We need to keep the Popen objext around, and not # close it immediately. Therefore, disable the pylint check: # pylint: disable=consider-using-with self._p = subprocess.Popen(self.args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) out = self._p.stdout.read(9) if out != 'qemu-io> ': # Most probably qemu-io just failed to start. # Let's collect the whole output and exit. out += self._p.stdout.read() self._p.wait(timeout=1) raise ValueError(out) def close(self): self._p.communicate('q\n') def _read_output(self): pattern = 'qemu-io> ' n = len(pattern) pos = 0 s = [] while pos != n: c = self._p.stdout.read(1) # check unexpected EOF assert c != '' s.append(c) if c == pattern[pos]: pos += 1 else: pos = 0 return ''.join(s[:-n]) def cmd(self, cmd): # quit command is in close(), '\n' is added automatically assert '\n' not in cmd cmd = cmd.strip() assert cmd not in ('q', 'quit') self._p.stdin.write(cmd + '\n') self._p.stdin.flush() return self._read_output() class QemuStorageDaemon: _qmp: Optional[QEMUMonitorProtocol] = None _qmpsock: Optional[str] = None # Python < 3.8 would complain if this type were not a string literal # (importing `annotations` from `__future__` would work; but not on <= 3.6) _p: 'Optional[subprocess.Popen[bytes]]' = None def __init__(self, *args: str, instance_id: str = 'a', qmp: bool = False): assert '--pidfile' not in args self.pidfile = os.path.join(test_dir, f'qsd-{instance_id}-pid') all_args = [qsd_prog] + list(args) + ['--pidfile', self.pidfile] if qmp: self._qmpsock = os.path.join(sock_dir, f'qsd-{instance_id}.sock') all_args += ['--chardev', f'socket,id=qmp-sock,path={self._qmpsock}', '--monitor', 'qmp-sock'] self._qmp = QEMUMonitorProtocol(self._qmpsock, server=True) # Cannot use with here, we want the subprocess to stay around # pylint: disable=consider-using-with self._p = subprocess.Popen(all_args) if self._qmp is not None: self._qmp.accept() while not os.path.exists(self.pidfile): if self._p.poll() is not None: cmd = ' '.join(all_args) raise RuntimeError( 'qemu-storage-daemon terminated with exit code ' + f'{self._p.returncode}: {cmd}') time.sleep(0.01) with open(self.pidfile, encoding='utf-8') as f: self._pid = int(f.read().strip()) assert self._pid == self._p.pid def qmp(self, cmd: str, args: Optional[Dict[str, object]] = None) \ -> QMPMessage: assert self._qmp is not None return self._qmp.cmd(cmd, args) def stop(self, kill_signal=15): self._p.send_signal(kill_signal) self._p.wait() self._p = None if self._qmp: self._qmp.close() if self._qmpsock is not None: try: os.remove(self._qmpsock) except OSError: pass try: os.remove(self.pidfile) except OSError: pass def __del__(self): if self._p is not None: self.stop(kill_signal=9) def qemu_nbd(*args): '''Run qemu-nbd in daemon mode and return the parent's exit code''' return subprocess.call(qemu_nbd_args + ['--fork'] + list(args)) def qemu_nbd_early_pipe(*args: str) -> Tuple[int, str]: '''Run qemu-nbd in daemon mode and return both the parent's exit code and its output in case of an error''' full_args = qemu_nbd_args + ['--fork'] + list(args) output, returncode = qemu_tool_pipe_and_status('qemu-nbd', full_args, connect_stderr=False) return returncode, output if returncode else '' def qemu_nbd_list_log(*args: str) -> str: '''Run qemu-nbd to list remote exports''' full_args = [qemu_nbd_prog, '-L'] + list(args) output, _ = qemu_tool_pipe_and_status('qemu-nbd', full_args) log(output, filters=[filter_testfiles, filter_nbd_exports]) return output @contextmanager def qemu_nbd_popen(*args): '''Context manager running qemu-nbd within the context''' pid_file = file_path("qemu_nbd_popen-nbd-pid-file") assert not os.path.exists(pid_file) cmd = list(qemu_nbd_args) cmd.extend(('--persistent', '--pid-file', pid_file)) cmd.extend(args) log('Start NBD server') with subprocess.Popen(cmd) as p: try: while not os.path.exists(pid_file): if p.poll() is not None: raise RuntimeError( "qemu-nbd terminated with exit code {}: {}" .format(p.returncode, ' '.join(cmd))) time.sleep(0.01) yield finally: if os.path.exists(pid_file): os.remove(pid_file) log('Kill NBD server') p.kill() p.wait() def compare_images(img1: str, img2: str, fmt1: str = imgfmt, fmt2: str = imgfmt) -> bool: """ Compare two images with QEMU_IMG; return True if they are identical. :raise CalledProcessError: when qemu-img crashes or returns a status code of anything other than 0 (identical) or 1 (different). """ try: qemu_img('compare', '-f', fmt1, '-F', fmt2, img1, img2) return True except subprocess.CalledProcessError as exc: if exc.returncode == 1: return False raise def create_image(name, size): '''Create a fully-allocated raw image with sector markers''' with open(name, 'wb') as file: i = 0 while i < size: sector = struct.pack('>l504xl', i // 512, i // 512) file.write(sector) i = i + 512 def image_size(img: str) -> int: """Return image's virtual size""" value = qemu_img_info('-f', imgfmt, img)['virtual-size'] if not isinstance(value, int): type_name = type(value).__name__ raise TypeError("Expected 'int' for 'virtual-size', " f"got '{value}' of type '{type_name}'") return value def is_str(val): return isinstance(val, str) test_dir_re = re.compile(r"%s" % test_dir) def filter_test_dir(msg): return test_dir_re.sub("TEST_DIR", msg) win32_re = re.compile(r"\r") def filter_win32(msg): return win32_re.sub("", msg) qemu_io_re = re.compile(r"[0-9]* ops; [0-9\/:. sec]* " r"\([0-9\/.inf]* [EPTGMKiBbytes]*\/sec " r"and [0-9\/.inf]* ops\/sec\)") def filter_qemu_io(msg): msg = filter_win32(msg) return qemu_io_re.sub("X ops; XX:XX:XX.X " "(XXX YYY/sec and XXX ops/sec)", msg) chown_re = re.compile(r"chown [0-9]+:[0-9]+") def filter_chown(msg): return chown_re.sub("chown UID:GID", msg) def filter_qmp_event(event): '''Filter a QMP event dict''' event = dict(event) if 'timestamp' in event: event['timestamp']['seconds'] = 'SECS' event['timestamp']['microseconds'] = 'USECS' return event def filter_qmp(qmsg, filter_fn): '''Given a string filter, filter a QMP object's values. filter_fn takes a (key, value) pair.''' # Iterate through either lists or dicts; if isinstance(qmsg, list): items = enumerate(qmsg) elif isinstance(qmsg, dict): items = qmsg.items() else: return filter_fn(None, qmsg) for k, v in items: if isinstance(v, (dict, list)): qmsg[k] = filter_qmp(v, filter_fn) else: qmsg[k] = filter_fn(k, v) return qmsg def filter_testfiles(msg): pref1 = os.path.join(test_dir, "%s-" % (os.getpid())) pref2 = os.path.join(sock_dir, "%s-" % (os.getpid())) return msg.replace(pref1, 'TEST_DIR/PID-').replace(pref2, 'SOCK_DIR/PID-') def filter_qmp_testfiles(qmsg): def _filter(_key, value): if is_str(value): return filter_testfiles(value) return value return filter_qmp(qmsg, _filter) def filter_virtio_scsi(output: str) -> str: return re.sub(r'(virtio-scsi)-(ccw|pci)', r'\1', output) def filter_qmp_virtio_scsi(qmsg): def _filter(_key, value): if is_str(value): return filter_virtio_scsi(value) return value return filter_qmp(qmsg, _filter) def filter_generated_node_ids(msg): return re.sub("#block[0-9]+", "NODE_NAME", msg) def filter_img_info(output: str, filename: str, drop_child_info: bool = True) -> str: lines = [] drop_indented = False for line in output.split('\n'): if 'disk size' in line or 'actual-size' in line: continue # Drop child node info if drop_indented: if line.startswith(' '): continue drop_indented = False if drop_child_info and "Child node '/" in line: drop_indented = True continue line = line.replace(filename, 'TEST_IMG') line = filter_testfiles(line) line = line.replace(imgfmt, 'IMGFMT') line = re.sub('iters: [0-9]+', 'iters: XXX', line) line = re.sub('uuid: [-a-f0-9]+', 'uuid: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX', line) line = re.sub('cid: [0-9]+', 'cid: XXXXXXXXXX', line) line = re.sub('(compression type: )(zlib|zstd)', r'\1COMPRESSION_TYPE', line) lines.append(line) return '\n'.join(lines) def filter_imgfmt(msg): return msg.replace(imgfmt, 'IMGFMT') def filter_qmp_imgfmt(qmsg): def _filter(_key, value): if is_str(value): return filter_imgfmt(value) return value return filter_qmp(qmsg, _filter) def filter_nbd_exports(output: str) -> str: return re.sub(r'((min|opt|max) block): [0-9]+', r'\1: XXX', output) Msg = TypeVar('Msg', Dict[str, Any], List[Any], str) def log(msg: Msg, filters: Iterable[Callable[[Msg], Msg]] = (), indent: Optional[int] = None) -> None: """ Logs either a string message or a JSON serializable message (like QMP). If indent is provided, JSON serializable messages are pretty-printed. """ for flt in filters: msg = flt(msg) if isinstance(msg, (dict, list)): # Don't sort if it's already sorted do_sort = not isinstance(msg, OrderedDict) test_logger.info(json.dumps(msg, sort_keys=do_sort, indent=indent)) else: test_logger.info(msg) class Timeout: def __init__(self, seconds, errmsg="Timeout"): self.seconds = seconds self.errmsg = errmsg def __enter__(self): if qemu_gdb or qemu_valgrind: return self signal.signal(signal.SIGALRM, self.timeout) signal.setitimer(signal.ITIMER_REAL, self.seconds) return self def __exit__(self, exc_type, value, traceback): if qemu_gdb or qemu_valgrind: return False signal.setitimer(signal.ITIMER_REAL, 0) return False def timeout(self, signum, frame): raise TimeoutError(self.errmsg) def file_pattern(name): return "{0}-{1}".format(os.getpid(), name) class FilePath: """ Context manager generating multiple file names. The generated files are removed when exiting the context. Example usage: with FilePath('a.img', 'b.img') as (img_a, img_b): # Use img_a and img_b here... # a.img and b.img are automatically removed here. By default images are created in iotests.test_dir. To create sockets use iotests.sock_dir: with FilePath('a.sock', base_dir=iotests.sock_dir) as sock: For convenience, calling with one argument yields a single file instead of a tuple with one item. """ def __init__(self, *names, base_dir=test_dir): self.paths = [os.path.join(base_dir, file_pattern(name)) for name in names] def __enter__(self): if len(self.paths) == 1: return self.paths[0] else: return self.paths def __exit__(self, exc_type, exc_val, exc_tb): for path in self.paths: try: os.remove(path) except OSError: pass return False def try_remove(img): try: os.remove(img) except OSError: pass def file_path_remover(): for path in reversed(file_path_remover.paths): try_remove(path) def file_path(*names, base_dir=test_dir): ''' Another way to get auto-generated filename that cleans itself up. Use is as simple as: img_a, img_b = file_path('a.img', 'b.img') sock = file_path('socket') ''' if not hasattr(file_path_remover, 'paths'): file_path_remover.paths = [] atexit.register(file_path_remover) paths = [] for name in names: filename = file_pattern(name) path = os.path.join(base_dir, filename) file_path_remover.paths.append(path) paths.append(path) return paths[0] if len(paths) == 1 else paths def remote_filename(path): if imgproto == 'file': return path elif imgproto == 'ssh': return "ssh://%[email protected]:22%s" % (os.environ.get('USER'), path) else: raise ValueError("Protocol %s not supported" % (imgproto)) class VM(qtest.QEMUQtestMachine): '''A QEMU VM''' def __init__(self, path_suffix=''): name = "qemu%s-%d" % (path_suffix, os.getpid()) timer = 15.0 if not (qemu_gdb or qemu_valgrind) else None if qemu_gdb and qemu_valgrind: sys.stderr.write('gdb and valgrind are mutually exclusive\n') sys.exit(1) wrapper = qemu_gdb if qemu_gdb else qemu_valgrind super().__init__(qemu_prog, qemu_opts, wrapper=wrapper, name=name, base_temp_dir=test_dir, sock_dir=sock_dir, qmp_timer=timer) self._num_drives = 0 def _post_shutdown(self) -> None: super()._post_shutdown() if not qemu_valgrind or not self._popen: return valgrind_filename = f"{test_dir}/{self._popen.pid}.valgrind" if self.exitcode() == 99: with open(valgrind_filename, encoding='utf-8') as f: print(f.read()) else: os.remove(valgrind_filename) def _pre_launch(self) -> None: super()._pre_launch() if qemu_print: # set QEMU binary output to stdout self._close_qemu_log_file() def add_object(self, opts): self._args.append('-object') self._args.append(opts) return self def add_device(self, opts): self._args.append('-device') self._args.append(opts) return self def add_drive_raw(self, opts): self._args.append('-drive') self._args.append(opts) return self def add_drive(self, path, opts='', interface='virtio', img_format=imgfmt): '''Add a virtio-blk drive to the VM''' options = ['if=%s' % interface, 'id=drive%d' % self._num_drives] if path is not None: options.append('file=%s' % path) options.append('format=%s' % img_format) options.append('cache=%s' % cachemode) options.append('aio=%s' % aiomode) if opts: options.append(opts) if img_format == 'luks' and 'key-secret' not in opts: # default luks support if luks_default_secret_object not in self._args: self.add_object(luks_default_secret_object) options.append(luks_default_key_secret_opt) self._args.append('-drive') self._args.append(','.join(options)) self._num_drives += 1 return self def add_blockdev(self, opts): self._args.append('-blockdev') if isinstance(opts, str): self._args.append(opts) else: self._args.append(','.join(opts)) return self def add_incoming(self, addr): self._args.append('-incoming') self._args.append(addr) return self def hmp(self, command_line: str, use_log: bool = False) -> QMPMessage: cmd = 'human-monitor-command' kwargs: Dict[str, Any] = {'command-line': command_line} if use_log: return self.qmp_log(cmd, **kwargs) else: return self.qmp(cmd, **kwargs) def pause_drive(self, drive: str, event: Optional[str] = None) -> None: """Pause drive r/w operations""" if not event: self.pause_drive(drive, "read_aio") self.pause_drive(drive, "write_aio") return self.hmp(f'qemu-io {drive} "break {event} bp_{drive}"') def resume_drive(self, drive: str) -> None: """Resume drive r/w operations""" self.hmp(f'qemu-io {drive} "remove_break bp_{drive}"') def hmp_qemu_io(self, drive: str, cmd: str, use_log: bool = False, qdev: bool = False) -> QMPMessage: """Write to a given drive using an HMP command""" d = '-d ' if qdev else '' return self.hmp(f'qemu-io {d}{drive} "{cmd}"', use_log=use_log) def flatten_qmp_object(self, obj, output=None, basestr=''): if output is None: output = {} if isinstance(obj, list): for i, item in enumerate(obj): self.flatten_qmp_object(item, output, basestr + str(i) + '.') elif isinstance(obj, dict): for key in obj: self.flatten_qmp_object(obj[key], output, basestr + key + '.') else: output[basestr[:-1]] = obj # Strip trailing '.' return output def qmp_to_opts(self, obj): obj = self.flatten_qmp_object(obj) output_list = [] for key in obj: output_list += [key + '=' + obj[key]] return ','.join(output_list) def get_qmp_events_filtered(self, wait=60.0): result = [] for ev in self.get_qmp_events(wait=wait): result.append(filter_qmp_event(ev)) return result def qmp_log(self, cmd, filters=(), indent=None, **kwargs): full_cmd = OrderedDict(( ("execute", cmd), ("arguments", ordered_qmp(kwargs)) )) log(full_cmd, filters, indent=indent) result = self.qmp(cmd, **kwargs) log(result, filters, indent=indent) return result # Returns None on success, and an error string on failure def run_job(self, job: str, auto_finalize: bool = True, auto_dismiss: bool = False, pre_finalize: Optional[Callable[[], None]] = None, cancel: bool = False, wait: float = 60.0, filters: Iterable[Callable[[Any], Any]] = (), ) -> Optional[str]: """ run_job moves a job from creation through to dismissal. :param job: String. ID of recently-launched job :param auto_finalize: Bool. True if the job was launched with auto_finalize. Defaults to True. :param auto_dismiss: Bool. True if the job was launched with auto_dismiss=True. Defaults to False. :param pre_finalize: Callback. A callable that takes no arguments to be invoked prior to issuing job-finalize, if any. :param cancel: Bool. When true, cancels the job after the pre_finalize callback. :param wait: Float. Timeout value specifying how long to wait for any event, in seconds. Defaults to 60.0. """ match_device = {'data': {'device': job}} match_id = {'data': {'id': job}} events = [ ('BLOCK_JOB_COMPLETED', match_device), ('BLOCK_JOB_CANCELLED', match_device), ('BLOCK_JOB_ERROR', match_device), ('BLOCK_JOB_READY', match_device), ('BLOCK_JOB_PENDING', match_id), ('JOB_STATUS_CHANGE', match_id) ] error = None while True: ev = filter_qmp_event(self.events_wait(events, timeout=wait)) if ev['event'] != 'JOB_STATUS_CHANGE': log(ev, filters=filters) continue status = ev['data']['status'] if status == 'aborting': result = self.qmp('query-jobs') for j in result['return']: if j['id'] == job: error = j['error'] log('Job failed: %s' % (j['error']), filters=filters) elif status == 'ready': self.qmp_log('job-complete', id=job, filters=filters) elif status == 'pending' and not auto_finalize: if pre_finalize: pre_finalize() if cancel: self.qmp_log('job-cancel', id=job, filters=filters) else: self.qmp_log('job-finalize', id=job, filters=filters) elif status == 'concluded' and not auto_dismiss: self.qmp_log('job-dismiss', id=job, filters=filters) elif status == 'null': return error # Returns None on success, and an error string on failure def blockdev_create(self, options, job_id='job0', filters=None): if filters is None: filters = [filter_qmp_testfiles] result = self.qmp_log('blockdev-create', filters=filters, job_id=job_id, options=options) if 'return' in result: assert result['return'] == {} job_result = self.run_job(job_id, filters=filters) else: job_result = result['error'] log("") return job_result def enable_migration_events(self, name): log('Enabling migration QMP events on %s...' % name) log(self.qmp('migrate-set-capabilities', capabilities=[ { 'capability': 'events', 'state': True } ])) def wait_migration(self, expect_runstate: Optional[str]) -> bool: while True: event = self.event_wait('MIGRATION') # We use the default timeout, and with a timeout, event_wait() # never returns None assert event log(event, filters=[filter_qmp_event]) if event['data']['status'] in ('completed', 'failed'): break if event['data']['status'] == 'completed': # The event may occur in finish-migrate, so wait for the expected # post-migration runstate runstate = None while runstate != expect_runstate: runstate = self.qmp('query-status')['return']['status'] return True else: return False def node_info(self, node_name): nodes = self.qmp('query-named-block-nodes') for x in nodes['return']: if x['node-name'] == node_name: return x return None def query_bitmaps(self): res = self.qmp("query-named-block-nodes") return {device['node-name']: device['dirty-bitmaps'] for device in res['return'] if 'dirty-bitmaps' in device} def get_bitmap(self, node_name, bitmap_name, recording=None, bitmaps=None): """ get a specific bitmap from the object returned by query_bitmaps. :param recording: If specified, filter results by the specified value. :param bitmaps: If specified, use it instead of call query_bitmaps() """ if bitmaps is None: bitmaps = self.query_bitmaps() for bitmap in bitmaps[node_name]: if bitmap.get('name', '') == bitmap_name: if recording is None or bitmap.get('recording') == recording: return bitmap return None def check_bitmap_status(self, node_name, bitmap_name, fields): ret = self.get_bitmap(node_name, bitmap_name) return fields.items() <= ret.items() def assert_block_path(self, root, path, expected_node, graph=None): """ Check whether the node under the given path in the block graph is @expected_node. @root is the node name of the node where the @path is rooted. @path is a string that consists of child names separated by slashes. It must begin with a slash. Examples for @root + @path: - root="qcow2-node", path="/backing/file" - root="quorum-node", path="/children.2/file" Hypothetically, @path could be empty, in which case it would point to @root. However, in practice this case is not useful and hence not allowed. @expected_node may be None. (All elements of the path but the leaf must still exist.) @graph may be None or the result of an x-debug-query-block-graph call that has already been performed. """ if graph is None: graph = self.qmp('x-debug-query-block-graph')['return'] iter_path = iter(path.split('/')) # Must start with a / assert next(iter_path) == '' node = next((node for node in graph['nodes'] if node['name'] == root), None) # An empty @path is not allowed, so the root node must be present assert node is not None, 'Root node %s not found' % root for child_name in iter_path: assert node is not None, 'Cannot follow path %s%s' % (root, path) try: node_id = next(edge['child'] for edge in graph['edges'] if (edge['parent'] == node['id'] and edge['name'] == child_name)) node = next(node for node in graph['nodes'] if node['id'] == node_id) except StopIteration: node = None if node is None: assert expected_node is None, \ 'No node found under %s (but expected %s)' % \ (path, expected_node) else: assert node['name'] == expected_node, \ 'Found node %s under %s (but expected %s)' % \ (node['name'], path, expected_node) index_re = re.compile(r'([^\[]+)\[([^\]]+)\]') class QMPTestCase(unittest.TestCase): '''Abstract base class for QMP test cases''' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Many users of this class set a VM property we rely on heavily # in the methods below. self.vm = None def dictpath(self, d, path): '''Traverse a path in a nested dict''' for component in path.split('/'): m = index_re.match(component) if m: component, idx = m.groups() idx = int(idx) if not isinstance(d, dict) or component not in d: self.fail(f'failed path traversal for "{path}" in "{d}"') d = d[component] if m: if not isinstance(d, list): self.fail(f'path component "{component}" in "{path}" ' f'is not a list in "{d}"') try: d = d[idx] except IndexError: self.fail(f'invalid index "{idx}" in path "{path}" ' f'in "{d}"') return d def assert_qmp_absent(self, d, path): try: result = self.dictpath(d, path) except AssertionError: return self.fail('path "%s" has value "%s"' % (path, str(result))) def assert_qmp(self, d, path, value): '''Assert that the value for a specific path in a QMP dict matches. When given a list of values, assert that any of them matches.''' result = self.dictpath(d, path) # [] makes no sense as a list of valid values, so treat it as # an actual single value. if isinstance(value, list) and value != []: for v in value: if result == v: return self.fail('no match for "%s" in %s' % (str(result), str(value))) else: self.assertEqual(result, value, '"%s" is "%s", expected "%s"' % (path, str(result), str(value))) def assert_no_active_block_jobs(self): result = self.vm.qmp('query-block-jobs') self.assert_qmp(result, 'return', []) def assert_has_block_node(self, node_name=None, file_name=None): """Issue a query-named-block-nodes and assert node_name and/or file_name is present in the result""" def check_equal_or_none(a, b): return a is None or b is None or a == b assert node_name or file_name result = self.vm.qmp('query-named-block-nodes') for x in result["return"]: if check_equal_or_none(x.get("node-name"), node_name) and \ check_equal_or_none(x.get("file"), file_name): return self.fail("Cannot find %s %s in result:\n%s" % (node_name, file_name, result)) def assert_json_filename_equal(self, json_filename, reference): '''Asserts that the given filename is a json: filename and that its content is equal to the given reference object''' self.assertEqual(json_filename[:5], 'json:') self.assertEqual( self.vm.flatten_qmp_object(json.loads(json_filename[5:])), self.vm.flatten_qmp_object(reference) ) def cancel_and_wait(self, drive='drive0', force=False, resume=False, wait=60.0): '''Cancel a block job and wait for it to finish, returning the event''' result = self.vm.qmp('block-job-cancel', device=drive, force=force) self.assert_qmp(result, 'return', {}) if resume: self.vm.resume_drive(drive) cancelled = False result = None while not cancelled: for event in self.vm.get_qmp_events(wait=wait): if event['event'] == 'BLOCK_JOB_COMPLETED' or \ event['event'] == 'BLOCK_JOB_CANCELLED': self.assert_qmp(event, 'data/device', drive) result = event cancelled = True elif event['event'] == 'JOB_STATUS_CHANGE': self.assert_qmp(event, 'data/id', drive) self.assert_no_active_block_jobs() return result def wait_until_completed(self, drive='drive0', check_offset=True, wait=60.0, error=None): '''Wait for a block job to finish, returning the event''' while True: for event in self.vm.get_qmp_events(wait=wait): if event['event'] == 'BLOCK_JOB_COMPLETED': self.assert_qmp(event, 'data/device', drive) if error is None: self.assert_qmp_absent(event, 'data/error') if check_offset: self.assert_qmp(event, 'data/offset', event['data']['len']) else: self.assert_qmp(event, 'data/error', error) self.assert_no_active_block_jobs() return event if event['event'] == 'JOB_STATUS_CHANGE': self.assert_qmp(event, 'data/id', drive) def wait_ready(self, drive='drive0'): """Wait until a BLOCK_JOB_READY event, and return the event.""" return self.vm.events_wait([ ('BLOCK_JOB_READY', {'data': {'type': 'mirror', 'device': drive}}), ('BLOCK_JOB_READY', {'data': {'type': 'commit', 'device': drive}}) ]) def wait_ready_and_cancel(self, drive='drive0'): self.wait_ready(drive=drive) event = self.cancel_and_wait(drive=drive) self.assertEqual(event['event'], 'BLOCK_JOB_COMPLETED') self.assert_qmp(event, 'data/type', 'mirror') self.assert_qmp(event, 'data/offset', event['data']['len']) def complete_and_wait(self, drive='drive0', wait_ready=True, completion_error=None): '''Complete a block job and wait for it to finish''' if wait_ready: self.wait_ready(drive=drive) result = self.vm.qmp('block-job-complete', device=drive) self.assert_qmp(result, 'return', {}) event = self.wait_until_completed(drive=drive, error=completion_error) self.assertTrue(event['data']['type'] in ['mirror', 'commit']) def pause_wait(self, job_id='job0'): with Timeout(3, "Timeout waiting for job to pause"): while True: result = self.vm.qmp('query-block-jobs') found = False for job in result['return']: if job['device'] == job_id: found = True if job['paused'] and not job['busy']: return job break assert found def pause_job(self, job_id='job0', wait=True): result = self.vm.qmp('block-job-pause', device=job_id) self.assert_qmp(result, 'return', {}) if wait: return self.pause_wait(job_id) return result def case_skip(self, reason): '''Skip this test case''' case_notrun(reason) self.skipTest(reason) def notrun(reason): '''Skip this test suite''' # Each test in qemu-iotests has a number ("seq") seq = os.path.basename(sys.argv[0]) with open('%s/%s.notrun' % (test_dir, seq), 'w', encoding='utf-8') \ as outfile: outfile.write(reason + '\n') logger.warning("%s not run: %s", seq, reason) sys.exit(0) def case_notrun(reason): '''Mark this test case as not having been run (without actually skipping it, that is left to the caller). See QMPTestCase.case_skip() for a variant that actually skips the current test case.''' # Each test in qemu-iotests has a number ("seq") seq = os.path.basename(sys.argv[0]) with open('%s/%s.casenotrun' % (test_dir, seq), 'a', encoding='utf-8') \ as outfile: outfile.write(' [case not run] ' + reason + '\n') def _verify_image_format(supported_fmts: Sequence[str] = (), unsupported_fmts: Sequence[str] = ()) -> None: if 'generic' in supported_fmts and \ os.environ.get('IMGFMT_GENERIC', 'true') == 'true': # similar to # _supported_fmt generic # for bash tests supported_fmts = () not_sup = supported_fmts and (imgfmt not in supported_fmts) if not_sup or (imgfmt in unsupported_fmts): notrun('not suitable for this image format: %s' % imgfmt) if imgfmt == 'luks': verify_working_luks() def _verify_protocol(supported: Sequence[str] = (), unsupported: Sequence[str] = ()) -> None: assert not (supported and unsupported) if 'generic' in supported: return not_sup = supported and (imgproto not in supported) if not_sup or (imgproto in unsupported): notrun('not suitable for this protocol: %s' % imgproto) def _verify_platform(supported: Sequence[str] = (), unsupported: Sequence[str] = ()) -> None: if any((sys.platform.startswith(x) for x in unsupported)): notrun('not suitable for this OS: %s' % sys.platform) if supported: if not any((sys.platform.startswith(x) for x in supported)): notrun('not suitable for this OS: %s' % sys.platform) def _verify_cache_mode(supported_cache_modes: Sequence[str] = ()) -> None: if supported_cache_modes and (cachemode not in supported_cache_modes): notrun('not suitable for this cache mode: %s' % cachemode) def _verify_aio_mode(supported_aio_modes: Sequence[str] = ()) -> None: if supported_aio_modes and (aiomode not in supported_aio_modes): notrun('not suitable for this aio mode: %s' % aiomode) def _verify_formats(required_formats: Sequence[str] = ()) -> None: usf_list = list(set(required_formats) - set(supported_formats())) if usf_list: notrun(f'formats {usf_list} are not whitelisted') def _verify_virtio_blk() -> None: out = qemu_pipe('-M', 'none', '-device', 'help') if 'virtio-blk' not in out: notrun('Missing virtio-blk in QEMU binary') def _verify_virtio_scsi_pci_or_ccw() -> None: out = qemu_pipe('-M', 'none', '-device', 'help') if 'virtio-scsi-pci' not in out and 'virtio-scsi-ccw' not in out: notrun('Missing virtio-scsi-pci or virtio-scsi-ccw in QEMU binary') def _verify_imgopts(unsupported: Sequence[str] = ()) -> None: imgopts = os.environ.get('IMGOPTS') # One of usage examples for IMGOPTS is "data_file=$TEST_IMG.ext_data_file" # but it supported only for bash tests. We don't have a concept of global # TEST_IMG in iotests.py, not saying about somehow parsing $variables. # So, for simplicity let's just not support any IMGOPTS with '$' inside. unsup = list(unsupported) + ['$'] if imgopts and any(x in imgopts for x in unsup): notrun(f'not suitable for this imgopts: {imgopts}') def supports_quorum() -> bool: return 'quorum' in qemu_img('--help').stdout def verify_quorum(): '''Skip test suite if quorum support is not available''' if not supports_quorum(): notrun('quorum support missing') def has_working_luks() -> Tuple[bool, str]: """ Check whether our LUKS driver can actually create images (this extends to LUKS encryption for qcow2). If not, return the reason why. """ img_file = f'{test_dir}/luks-test.luks' res = qemu_img('create', '-f', 'luks', '--object', luks_default_secret_object, '-o', luks_default_key_secret_opt, '-o', 'iter-time=10', img_file, '1G', check=False) try: os.remove(img_file) except OSError: pass if res.returncode: reason = res.stdout for line in res.stdout.splitlines(): if img_file + ':' in line: reason = line.split(img_file + ':', 1)[1].strip() break return (False, reason) else: return (True, '') def verify_working_luks(): """ Skip test suite if LUKS does not work """ (working, reason) = has_working_luks() if not working: notrun(reason) def supports_qcow2_zstd_compression() -> bool: img_file = f'{test_dir}/qcow2-zstd-test.qcow2' res = qemu_img('create', '-f', 'qcow2', '-o', 'compression_type=zstd', img_file, '0', check=False) try: os.remove(img_file) except OSError: pass if res.returncode == 1 and \ "'compression-type' does not accept value 'zstd'" in res.stdout: return False else: return True def verify_qcow2_zstd_compression(): if not supports_qcow2_zstd_compression(): notrun('zstd compression not supported') def qemu_pipe(*args: str) -> str: """ Run qemu with an option to print something and exit (e.g. a help option). :return: QEMU's stdout output. """ full_args = [qemu_prog] + qemu_opts + list(args) output, _ = qemu_tool_pipe_and_status('qemu', full_args) return output def supported_formats(read_only=False): '''Set 'read_only' to True to check ro-whitelist Otherwise, rw-whitelist is checked''' if not hasattr(supported_formats, "formats"): supported_formats.formats = {} if read_only not in supported_formats.formats: format_message = qemu_pipe("-drive", "format=help") line = 1 if read_only else 0 supported_formats.formats[read_only] = \ format_message.splitlines()[line].split(":")[1].split() return supported_formats.formats[read_only] def skip_if_unsupported(required_formats=(), read_only=False): '''Skip Test Decorator Runs the test if all the required formats are whitelisted''' def skip_test_decorator(func): def func_wrapper(test_case: QMPTestCase, *args: List[Any], **kwargs: Dict[str, Any]) -> None: if callable(required_formats): fmts = required_formats(test_case) else: fmts = required_formats usf_list = list(set(fmts) - set(supported_formats(read_only))) if usf_list: msg = f'{test_case}: formats {usf_list} are not whitelisted' test_case.case_skip(msg) else: func(test_case, *args, **kwargs) return func_wrapper return skip_test_decorator def skip_for_formats(formats: Sequence[str] = ()) \ -> Callable[[Callable[[QMPTestCase, List[Any], Dict[str, Any]], None]], Callable[[QMPTestCase, List[Any], Dict[str, Any]], None]]: '''Skip Test Decorator Skips the test for the given formats''' def skip_test_decorator(func): def func_wrapper(test_case: QMPTestCase, *args: List[Any], **kwargs: Dict[str, Any]) -> None: if imgfmt in formats: msg = f'{test_case}: Skipped for format {imgfmt}' test_case.case_skip(msg) else: func(test_case, *args, **kwargs) return func_wrapper return skip_test_decorator def skip_if_user_is_root(func): '''Skip Test Decorator Runs the test only without root permissions''' def func_wrapper(*args, **kwargs): if os.getuid() == 0: case_notrun('{}: cannot be run as root'.format(args[0])) return None else: return func(*args, **kwargs) return func_wrapper # We need to filter out the time taken from the output so that # qemu-iotest can reliably diff the results against master output, # and hide skipped tests from the reference output. class ReproducibleTestResult(unittest.TextTestResult): def addSkip(self, test, reason): # Same as TextTestResult, but print dot instead of "s" unittest.TestResult.addSkip(self, test, reason) if self.showAll: self.stream.writeln("skipped {0!r}".format(reason)) elif self.dots: self.stream.write(".") self.stream.flush() class ReproducibleStreamWrapper: def __init__(self, stream: TextIO): self.stream = stream def __getattr__(self, attr): if attr in ('stream', '__getstate__'): raise AttributeError(attr) return getattr(self.stream, attr) def write(self, arg=None): arg = re.sub(r'Ran (\d+) tests? in [\d.]+s', r'Ran \1 tests', arg) arg = re.sub(r' \(skipped=\d+\)', r'', arg) self.stream.write(arg) class ReproducibleTestRunner(unittest.TextTestRunner): def __init__(self, stream: Optional[TextIO] = None, resultclass: Type[unittest.TestResult] = ReproducibleTestResult, **kwargs: Any) -> None: rstream = ReproducibleStreamWrapper(stream or sys.stdout) super().__init__(stream=rstream, # type: ignore descriptions=True, resultclass=resultclass, **kwargs) def execute_unittest(argv: List[str], debug: bool = False) -> None: """Executes unittests within the calling module.""" # Some tests have warnings, especially ResourceWarnings for unclosed # files and sockets. Ignore them for now to ensure reproducibility of # the test output. unittest.main(argv=argv, testRunner=ReproducibleTestRunner, verbosity=2 if debug else 1, warnings=None if sys.warnoptions else 'ignore') def execute_setup_common(supported_fmts: Sequence[str] = (), supported_platforms: Sequence[str] = (), supported_cache_modes: Sequence[str] = (), supported_aio_modes: Sequence[str] = (), unsupported_fmts: Sequence[str] = (), supported_protocols: Sequence[str] = (), unsupported_protocols: Sequence[str] = (), required_fmts: Sequence[str] = (), unsupported_imgopts: Sequence[str] = ()) -> bool: """ Perform necessary setup for either script-style or unittest-style tests. :return: Bool; Whether or not debug mode has been requested via the CLI. """ # Note: Python 3.6 and pylint do not like 'Collection' so use 'Sequence'. debug = '-d' in sys.argv if debug: sys.argv.remove('-d') logging.basicConfig(level=(logging.DEBUG if debug else logging.WARN)) _verify_image_format(supported_fmts, unsupported_fmts) _verify_protocol(supported_protocols, unsupported_protocols) _verify_platform(supported=supported_platforms) _verify_cache_mode(supported_cache_modes) _verify_aio_mode(supported_aio_modes) _verify_formats(required_fmts) _verify_virtio_blk() _verify_imgopts(unsupported_imgopts) return debug def execute_test(*args, test_function=None, **kwargs): """Run either unittest or script-style tests.""" debug = execute_setup_common(*args, **kwargs) if not test_function: execute_unittest(sys.argv, debug) else: test_function() def activate_logging(): """Activate iotests.log() output to stdout for script-style tests.""" handler = logging.StreamHandler(stream=sys.stdout) formatter = logging.Formatter('%(message)s') handler.setFormatter(formatter) test_logger.addHandler(handler) test_logger.setLevel(logging.INFO) test_logger.propagate = False # This is called from script-style iotests without a single point of entry def script_initialize(*args, **kwargs): """Initialize script-style tests without running any tests.""" activate_logging() execute_setup_common(*args, **kwargs) # This is called from script-style iotests with a single point of entry def script_main(test_function, *args, **kwargs): """Run script-style tests outside of the unittest framework""" activate_logging() execute_test(*args, test_function=test_function, **kwargs) # This is called from unittest style iotests def main(*args, **kwargs): """Run tests using the unittest framework""" execute_test(*args, **kwargs)
60,397
34.696217
79
py
qemu
qemu-master/tests/qemu-iotests/linters.py
# Copyright (C) 2020 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import re import subprocess import sys from typing import List, Mapping, Optional # TODO: Empty this list! SKIP_FILES = ( '030', '040', '041', '044', '045', '055', '056', '057', '065', '093', '096', '118', '124', '132', '136', '139', '147', '148', '149', '151', '152', '155', '163', '165', '194', '196', '202', '203', '205', '206', '207', '208', '210', '211', '212', '213', '216', '218', '219', '224', '228', '234', '235', '236', '237', '238', '240', '242', '245', '246', '248', '255', '256', '257', '258', '260', '262', '264', '266', '274', '277', '280', '281', '295', '296', '298', '299', '302', '303', '304', '307', 'nbd-fault-injector.py', 'qcow2.py', 'qcow2_format.py', 'qed.py' ) def is_python_file(filename): if not os.path.isfile(filename): return False if filename.endswith('.py'): return True with open(filename, encoding='utf-8') as f: try: first_line = f.readline() return re.match('^#!.*python', first_line) is not None except UnicodeDecodeError: # Ignore binary files return False def get_test_files() -> List[str]: named_tests = [f'tests/{entry}' for entry in os.listdir('tests')] check_tests = set(os.listdir('.') + named_tests) - set(SKIP_FILES) return list(filter(is_python_file, check_tests)) def run_linter( tool: str, args: List[str], env: Optional[Mapping[str, str]] = None, suppress_output: bool = False, ) -> None: """ Run a python-based linting tool. :param suppress_output: If True, suppress all stdout/stderr output. :raise CalledProcessError: If the linter process exits with failure. """ subprocess.run( ('python3', '-m', tool, *args), env=env, check=True, stdout=subprocess.PIPE if suppress_output else None, stderr=subprocess.STDOUT if suppress_output else None, universal_newlines=True, ) def main() -> None: """ Used by the Python CI system as an entry point to run these linters. """ def show_usage() -> None: print(f"Usage: {sys.argv[0]} < --mypy | --pylint >", file=sys.stderr) sys.exit(1) if len(sys.argv) != 2: show_usage() files = get_test_files() if sys.argv[1] == '--pylint': run_linter('pylint', files) elif sys.argv[1] == '--mypy': # mypy bug #9852; disable incremental checking as a workaround. args = ['--no-incremental'] + files run_linter('mypy', args) else: print(f"Unrecognized argument: '{sys.argv[1]}'", file=sys.stderr) show_usage() if __name__ == '__main__': main()
3,368
30.783019
77
py
qemu
qemu-master/tests/qapi-schema/test-qapi.py
#!/usr/bin/env python3 # # QAPI parser test harness # # Copyright (c) 2013 Red Hat Inc. # # Authors: # Markus Armbruster <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or later. # See the COPYING file in the top-level directory. # import argparse import difflib import os import sys from io import StringIO from qapi.error import QAPIError from qapi.schema import QAPISchema, QAPISchemaVisitor class QAPISchemaTestVisitor(QAPISchemaVisitor): def visit_module(self, name): print('module %s' % name) def visit_include(self, name, info): print('include %s' % name) def visit_enum_type(self, name, info, ifcond, features, members, prefix): print('enum %s' % name) if prefix: print(' prefix %s' % prefix) for m in members: print(' member %s' % m.name) self._print_if(m.ifcond, indent=8) self._print_features(m.features, indent=8) self._print_if(ifcond) self._print_features(features) def visit_array_type(self, name, info, ifcond, element_type): if not info: return # suppress built-in arrays print('array %s %s' % (name, element_type.name)) self._print_if(ifcond) def visit_object_type(self, name, info, ifcond, features, base, members, variants): print('object %s' % name) if base: print(' base %s' % base.name) for m in members: print(' member %s: %s optional=%s' % (m.name, m.type.name, m.optional)) self._print_if(m.ifcond, 8) self._print_features(m.features, indent=8) self._print_variants(variants) self._print_if(ifcond) self._print_features(features) def visit_alternate_type(self, name, info, ifcond, features, variants): print('alternate %s' % name) self._print_variants(variants) self._print_if(ifcond) self._print_features(features) def visit_command(self, name, info, ifcond, features, arg_type, ret_type, gen, success_response, boxed, allow_oob, allow_preconfig, coroutine): print('command %s %s -> %s' % (name, arg_type and arg_type.name, ret_type and ret_type.name)) print(' gen=%s success_response=%s boxed=%s oob=%s preconfig=%s%s' % (gen, success_response, boxed, allow_oob, allow_preconfig, " coroutine=True" if coroutine else "")) self._print_if(ifcond) self._print_features(features) def visit_event(self, name, info, ifcond, features, arg_type, boxed): print('event %s %s' % (name, arg_type and arg_type.name)) print(' boxed=%s' % boxed) self._print_if(ifcond) self._print_features(features) @staticmethod def _print_variants(variants): if variants: print(' tag %s' % variants.tag_member.name) for v in variants.variants: print(' case %s: %s' % (v.name, v.type.name)) QAPISchemaTestVisitor._print_if(v.ifcond, indent=8) @staticmethod def _print_if(ifcond, indent=4): # TODO Drop this hack after replacing OrderedDict by plain # dict (requires Python 3.7) def _massage(subcond): if isinstance(subcond, str): return subcond if isinstance(subcond, list): return [_massage(val) for val in subcond] return {key: _massage(val) for key, val in subcond.items()} if ifcond.is_present(): print('%sif %s' % (' ' * indent, _massage(ifcond.ifcond))) @classmethod def _print_features(cls, features, indent=4): if features: for f in features: print('%sfeature %s' % (' ' * indent, f.name)) cls._print_if(f.ifcond, indent + 4) def test_frontend(fname): schema = QAPISchema(fname) schema.visit(QAPISchemaTestVisitor()) for doc in schema.docs: if doc.symbol: print('doc symbol=%s' % doc.symbol) else: print('doc freeform') print(' body=\n%s' % doc.body.text) for arg, section in doc.args.items(): print(' arg=%s\n%s' % (arg, section.text)) for feat, section in doc.features.items(): print(' feature=%s\n%s' % (feat, section.text)) for section in doc.sections: print(' section=%s\n%s' % (section.name, section.text)) def open_test_result(dir_name, file_name, update): mode = 'r+' if update else 'r' try: fp = open(os.path.join(dir_name, file_name), mode) except FileNotFoundError: if not update: raise fp = open(os.path.join(dir_name, file_name), 'w+') return fp def test_and_diff(test_name, dir_name, update): sys.stdout = StringIO() try: test_frontend(os.path.join(dir_name, test_name + '.json')) except QAPIError as err: errstr = str(err) + '\n' if dir_name: errstr = errstr.replace(dir_name + '/', '') actual_err = errstr.splitlines(True) else: actual_err = [] finally: actual_out = sys.stdout.getvalue().splitlines(True) sys.stdout.close() sys.stdout = sys.__stdout__ try: outfp = open_test_result(dir_name, test_name + '.out', update) errfp = open_test_result(dir_name, test_name + '.err', update) expected_out = outfp.readlines() expected_err = errfp.readlines() except OSError as err: print("%s: can't open '%s': %s" % (sys.argv[0], err.filename, err.strerror), file=sys.stderr) return 2 if actual_out == expected_out and actual_err == expected_err: return 0 print("%s %s" % (test_name, 'UPDATE' if update else 'FAIL'), file=sys.stderr) out_diff = difflib.unified_diff(expected_out, actual_out, outfp.name) err_diff = difflib.unified_diff(expected_err, actual_err, errfp.name) sys.stdout.writelines(out_diff) sys.stdout.writelines(err_diff) if not update: return 1 try: outfp.truncate(0) outfp.seek(0) outfp.writelines(actual_out) errfp.truncate(0) errfp.seek(0) errfp.writelines(actual_err) except OSError as err: print("%s: can't write '%s': %s" % (sys.argv[0], err.filename, err.strerror), file=sys.stderr) return 2 return 0 def main(argv): parser = argparse.ArgumentParser( description='QAPI schema tester') parser.add_argument('-d', '--dir', action='store', default='', help="directory containing tests") parser.add_argument('-u', '--update', action='store_true', help="update expected test results") parser.add_argument('tests', nargs='*', metavar='TEST', action='store') args = parser.parse_args() status = 0 for t in args.tests: (dir_name, base_name) = os.path.split(t) dir_name = dir_name or args.dir test_name = os.path.splitext(base_name)[0] status |= test_and_diff(test_name, dir_name, args.update) exit(status) if __name__ == '__main__': main(sys.argv) exit(0)
7,413
31.80531
77
py
qemu
qemu-master/tests/guest-debug/run-test.py
#!/usr/bin/env python3 # # Run a gdbstub test case # # Copyright (c) 2019 Linaro # # Author: Alex Bennée <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or later. # See the COPYING file in the top-level directory. # # SPDX-License-Identifier: GPL-2.0-or-later import argparse import subprocess import shutil import shlex import os from time import sleep from tempfile import TemporaryDirectory def get_args(): parser = argparse.ArgumentParser(description="A gdbstub test runner") parser.add_argument("--qemu", help="Qemu binary for test", required=True) parser.add_argument("--qargs", help="Qemu arguments for test") parser.add_argument("--binary", help="Binary to debug", required=True) parser.add_argument("--test", help="GDB test script", required=True) parser.add_argument("--gdb", help="The gdb binary to use", default=None) parser.add_argument("--output", help="A file to redirect output to") return parser.parse_args() def log(output, msg): if output: output.write(msg + "\n") output.flush() else: print(msg) if __name__ == '__main__': args = get_args() # Search for a gdb we can use if not args.gdb: args.gdb = shutil.which("gdb-multiarch") if not args.gdb: args.gdb = shutil.which("gdb") if not args.gdb: print("We need gdb to run the test") exit(-1) if args.output: output = open(args.output, "w") else: output = None socket_dir = TemporaryDirectory("qemu-gdbstub") socket_name = os.path.join(socket_dir.name, "gdbstub.socket") # Launch QEMU with binary if "system" in args.qemu: cmd = "%s %s %s -gdb unix:path=%s,server=on" % (args.qemu, args.qargs, args.binary, socket_name) else: cmd = "%s %s -g %s %s" % (args.qemu, args.qargs, socket_name, args.binary) log(output, "QEMU CMD: %s" % (cmd)) inferior = subprocess.Popen(shlex.split(cmd)) # Now launch gdb with our test and collect the result gdb_cmd = "%s %s" % (args.gdb, args.binary) # run quietly and ignore .gdbinit gdb_cmd += " -q -n -batch" # disable prompts in case of crash gdb_cmd += " -ex 'set confirm off'" # connect to remote gdb_cmd += " -ex 'target remote %s'" % (socket_name) # finally the test script itself gdb_cmd += " -x %s" % (args.test) sleep(1) log(output, "GDB CMD: %s" % (gdb_cmd)) result = subprocess.call(gdb_cmd, shell=True, stdout=output) # A result of greater than 128 indicates a fatal signal (likely a # crash due to gdb internal failure). That's a problem for GDB and # not the test so we force a return of 0 so we don't fail the test on # account of broken external tools. if result > 128: log(output, "GDB crashed? (%d, %d) SKIPPING" % (result, result - 128)) exit(0) try: inferior.wait(2) except subprocess.TimeoutExpired: log(output, "GDB never connected? Killed guest") inferior.kill() exit(result)
3,367
29.618182
78
py
qemu
qemu-master/tests/guest-debug/test-gdbstub.py
# # This script needs to be run on startup # qemu -kernel ${KERNEL} -s -S # and then: # gdb ${KERNEL}.vmlinux -x ${QEMU_SRC}/tests/guest-debug/test-gdbstub.py import gdb failcount = 0 def report(cond, msg): "Report success/fail of test" if cond: print ("PASS: %s" % (msg)) else: print ("FAIL: %s" % (msg)) global failcount failcount += 1 def check_step(): "Step an instruction, check it moved." start_pc = gdb.parse_and_eval('$pc') gdb.execute("si") end_pc = gdb.parse_and_eval('$pc') return not (start_pc == end_pc) def check_break(sym_name): "Setup breakpoint, continue and check we stopped." sym, ok = gdb.lookup_symbol(sym_name) bp = gdb.Breakpoint(sym_name) gdb.execute("c") # hopefully we came back end_pc = gdb.parse_and_eval('$pc') print ("%s == %s %d" % (end_pc, sym.value(), bp.hit_count)) bp.delete() # can we test we hit bp? return end_pc == sym.value() # We need to do hbreak manually as the python interface doesn't export it def check_hbreak(sym_name): "Setup hardware breakpoint, continue and check we stopped." sym, ok = gdb.lookup_symbol(sym_name) gdb.execute("hbreak %s" % (sym_name)) gdb.execute("c") # hopefully we came back end_pc = gdb.parse_and_eval('$pc') print ("%s == %s" % (end_pc, sym.value())) if end_pc == sym.value(): gdb.execute("d 1") return True else: return False class WatchPoint(gdb.Breakpoint): def get_wpstr(self, sym_name): "Setup sym and wp_str for given symbol." self.sym, ok = gdb.lookup_symbol(sym_name) wp_addr = gdb.parse_and_eval(sym_name).address self.wp_str = '*(%(type)s)(&%(address)s)' % dict( type = wp_addr.type, address = sym_name) return(self.wp_str) def __init__(self, sym_name, type): wp_str = self.get_wpstr(sym_name) super(WatchPoint, self).__init__(wp_str, gdb.BP_WATCHPOINT, type) def stop(self): end_pc = gdb.parse_and_eval('$pc') print ("HIT WP @ %s" % (end_pc)) return True def do_one_watch(sym, wtype, text): wp = WatchPoint(sym, wtype) gdb.execute("c") report_str = "%s for %s (%s)" % (text, sym, wp.sym.value()) if wp.hit_count > 0: report(True, report_str) wp.delete() else: report(False, report_str) def check_watches(sym_name): "Watch a symbol for any access." # Should hit for any read do_one_watch(sym_name, gdb.WP_ACCESS, "awatch") # Again should hit for reads do_one_watch(sym_name, gdb.WP_READ, "rwatch") # Finally when it is written do_one_watch(sym_name, gdb.WP_WRITE, "watch") class CatchBreakpoint(gdb.Breakpoint): def __init__(self, sym_name): super(CatchBreakpoint, self).__init__(sym_name) self.sym, ok = gdb.lookup_symbol(sym_name) def stop(self): end_pc = gdb.parse_and_eval('$pc') print ("CB: %s == %s" % (end_pc, self.sym.value())) if end_pc == self.sym.value(): report(False, "Hit final catchpoint") def run_test(): "Run through the tests one by one" print ("Checking we can step the first few instructions") step_ok = 0 for i in range(3): if check_step(): step_ok += 1 report(step_ok == 3, "single step in boot code") print ("Checking HW breakpoint works") break_ok = check_hbreak("kernel_init") report(break_ok, "hbreak @ kernel_init") # Can't set this up until we are in the kernel proper # if we make it to run_init_process we've over-run and # one of the tests failed print ("Setup catch-all for run_init_process") cbp = CatchBreakpoint("run_init_process") cpb2 = CatchBreakpoint("try_to_run_init_process") print ("Checking Normal breakpoint works") break_ok = check_break("wait_for_completion") report(break_ok, "break @ wait_for_completion") print ("Checking watchpoint works") check_watches("system_state") # # This runs as the script it sourced (via -x) # try: print ("Connecting to remote") gdb.execute("target remote localhost:1234") # These are not very useful in scripts gdb.execute("set pagination off") gdb.execute("set confirm off") # Run the actual tests run_test() except: print ("GDB Exception: %s" % (sys.exc_info()[0])) failcount += 1 import code code.InteractiveConsole(locals=globals()).interact() raise # Finally kill the inferior and exit gdb with a count of failures gdb.execute("kill") exit(failcount)
4,609
24.898876
73
py
qemu
qemu-master/tests/migration/guestperf-plot.py
#!/usr/bin/env python3 # # Migration test graph plotting command # # Copyright (c) 2016 Red Hat, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, see <http://www.gnu.org/licenses/>. # import sys from guestperf.shell import PlotShell shell = PlotShell() sys.exit(shell.run(sys.argv[1:]))
868
31.185185
78
py
qemu
qemu-master/tests/migration/guestperf.py
#!/usr/bin/env python3 # # Migration test direct invokation command # # Copyright (c) 2016 Red Hat, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, see <http://www.gnu.org/licenses/>. # import sys from guestperf.shell import Shell shell = Shell() sys.exit(shell.run(sys.argv[1:]))
864
29.892857
78
py
qemu
qemu-master/tests/migration/guestperf-batch.py
#!/usr/bin/env python3 # # Migration test batch comparison invokation # # Copyright (c) 2016 Red Hat, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, see <http://www.gnu.org/licenses/>. # import sys from guestperf.shell import BatchShell shell = BatchShell() sys.exit(shell.run(sys.argv[1:]))
875
31.444444
78
py
qemu
qemu-master/tests/migration/guestperf/scenario.py
# # Migration test scenario parameter description # # Copyright (c) 2016 Red Hat, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, see <http://www.gnu.org/licenses/>. # class Scenario(object): def __init__(self, name, downtime=500, bandwidth=125000, # 1000 gig-e, effectively unlimited max_iters=30, max_time=300, pause=False, pause_iters=5, post_copy=False, post_copy_iters=5, auto_converge=False, auto_converge_step=10, compression_mt=False, compression_mt_threads=1, compression_xbzrle=False, compression_xbzrle_cache=10, multifd=False, multifd_channels=2): self._name = name # General migration tunables self._downtime = downtime # milliseconds self._bandwidth = bandwidth # MiB per second self._max_iters = max_iters self._max_time = max_time # seconds # Strategies for ensuring completion self._pause = pause self._pause_iters = pause_iters self._post_copy = post_copy self._post_copy_iters = post_copy_iters self._auto_converge = auto_converge self._auto_converge_step = auto_converge_step # percentage CPU time self._compression_mt = compression_mt self._compression_mt_threads = compression_mt_threads self._compression_xbzrle = compression_xbzrle self._compression_xbzrle_cache = compression_xbzrle_cache # percentage of guest RAM self._multifd = multifd self._multifd_channels = multifd_channels def serialize(self): return { "name": self._name, "downtime": self._downtime, "bandwidth": self._bandwidth, "max_iters": self._max_iters, "max_time": self._max_time, "pause": self._pause, "pause_iters": self._pause_iters, "post_copy": self._post_copy, "post_copy_iters": self._post_copy_iters, "auto_converge": self._auto_converge, "auto_converge_step": self._auto_converge_step, "compression_mt": self._compression_mt, "compression_mt_threads": self._compression_mt_threads, "compression_xbzrle": self._compression_xbzrle, "compression_xbzrle_cache": self._compression_xbzrle_cache, "multifd": self._multifd, "multifd_channels": self._multifd_channels, } @classmethod def deserialize(cls, data): return cls( data["name"], data["downtime"], data["bandwidth"], data["max_iters"], data["max_time"], data["pause"], data["pause_iters"], data["post_copy"], data["post_copy_iters"], data["auto_converge"], data["auto_converge_step"], data["compression_mt"], data["compression_mt_threads"], data["compression_xbzrle"], data["compression_xbzrle_cache"], data["multifd"], data["multifd_channels"])
3,759
35.153846
91
py
qemu
qemu-master/tests/migration/guestperf/comparison.py
# # Migration test scenario comparison mapping # # Copyright (c) 2016 Red Hat, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, see <http://www.gnu.org/licenses/>. # from guestperf.scenario import Scenario class Comparison(object): def __init__(self, name, scenarios): self._name = name self._scenarios = scenarios COMPARISONS = [ # Looking at effect of pausing guest during migration # at various stages of iteration over RAM Comparison("pause-iters", scenarios = [ Scenario("pause-iters-0", pause=True, pause_iters=0), Scenario("pause-iters-1", pause=True, pause_iters=1), Scenario("pause-iters-5", pause=True, pause_iters=5), Scenario("pause-iters-20", pause=True, pause_iters=20), ]), # Looking at use of post-copy in relation to bandwidth # available for migration Comparison("post-copy-bandwidth", scenarios = [ Scenario("post-copy-bw-100mbs", post_copy=True, bandwidth=12), Scenario("post-copy-bw-300mbs", post_copy=True, bandwidth=37), Scenario("post-copy-bw-1gbs", post_copy=True, bandwidth=125), Scenario("post-copy-bw-10gbs", post_copy=True, bandwidth=1250), Scenario("post-copy-bw-100gbs", post_copy=True, bandwidth=12500), ]), # Looking at effect of starting post-copy at different # stages of the migration Comparison("post-copy-iters", scenarios = [ Scenario("post-copy-iters-0", post_copy=True, post_copy_iters=0), Scenario("post-copy-iters-1", post_copy=True, post_copy_iters=1), Scenario("post-copy-iters-5", post_copy=True, post_copy_iters=5), Scenario("post-copy-iters-20", post_copy=True, post_copy_iters=20), ]), # Looking at effect of auto-converge with different # throttling percentage step rates Comparison("auto-converge-iters", scenarios = [ Scenario("auto-converge-step-5", auto_converge=True, auto_converge_step=5), Scenario("auto-converge-step-10", auto_converge=True, auto_converge_step=10), Scenario("auto-converge-step-20", auto_converge=True, auto_converge_step=20), ]), # Looking at use of auto-converge in relation to bandwidth # available for migration Comparison("auto-converge-bandwidth", scenarios = [ Scenario("auto-converge-bw-100mbs", auto_converge=True, bandwidth=12), Scenario("auto-converge-bw-300mbs", auto_converge=True, bandwidth=37), Scenario("auto-converge-bw-1gbs", auto_converge=True, bandwidth=125), Scenario("auto-converge-bw-10gbs", auto_converge=True, bandwidth=1250), Scenario("auto-converge-bw-100gbs", auto_converge=True, bandwidth=12500), ]), # Looking at effect of multi-thread compression with # varying numbers of threads Comparison("compr-mt", scenarios = [ Scenario("compr-mt-threads-1", compression_mt=True, compression_mt_threads=1), Scenario("compr-mt-threads-2", compression_mt=True, compression_mt_threads=2), Scenario("compr-mt-threads-4", compression_mt=True, compression_mt_threads=4), ]), # Looking at effect of xbzrle compression with varying # cache sizes Comparison("compr-xbzrle", scenarios = [ Scenario("compr-xbzrle-cache-5", compression_xbzrle=True, compression_xbzrle_cache=5), Scenario("compr-xbzrle-cache-10", compression_xbzrle=True, compression_xbzrle_cache=10), Scenario("compr-xbzrle-cache-20", compression_xbzrle=True, compression_xbzrle_cache=10), Scenario("compr-xbzrle-cache-50", compression_xbzrle=True, compression_xbzrle_cache=50), ]), # Looking at effect of multifd with # varying numbers of channels Comparison("compr-multifd", scenarios = [ Scenario("compr-multifd-channels-4", multifd=True, multifd_channels=2), Scenario("compr-multifd-channels-8", multifd=True, multifd_channels=8), Scenario("compr-multifd-channels-32", multifd=True, multifd_channels=32), Scenario("compr-multifd-channels-64", multifd=True, multifd_channels=64), ]), ]
5,182
36.28777
78
py
qemu
qemu-master/tests/migration/guestperf/engine.py
# # Migration test main engine # # Copyright (c) 2016 Red Hat, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, see <http://www.gnu.org/licenses/>. # import os import re import sys import time from guestperf.progress import Progress, ProgressStats from guestperf.report import Report from guestperf.timings import TimingRecord, Timings sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'python')) from qemu.machine import QEMUMachine class Engine(object): def __init__(self, binary, dst_host, kernel, initrd, transport="tcp", sleep=15, verbose=False, debug=False): self._binary = binary # Path to QEMU binary self._dst_host = dst_host # Hostname of target host self._kernel = kernel # Path to kernel image self._initrd = initrd # Path to stress initrd self._transport = transport # 'unix' or 'tcp' or 'rdma' self._sleep = sleep self._verbose = verbose self._debug = debug if debug: self._verbose = debug def _vcpu_timing(self, pid, tid_list): records = [] now = time.time() jiffies_per_sec = os.sysconf(os.sysconf_names['SC_CLK_TCK']) for tid in tid_list: statfile = "/proc/%d/task/%d/stat" % (pid, tid) with open(statfile, "r") as fh: stat = fh.readline() fields = stat.split(" ") stime = int(fields[13]) utime = int(fields[14]) records.append(TimingRecord(tid, now, 1000 * (stime + utime) / jiffies_per_sec)) return records def _cpu_timing(self, pid): now = time.time() jiffies_per_sec = os.sysconf(os.sysconf_names['SC_CLK_TCK']) statfile = "/proc/%d/stat" % pid with open(statfile, "r") as fh: stat = fh.readline() fields = stat.split(" ") stime = int(fields[13]) utime = int(fields[14]) return TimingRecord(pid, now, 1000 * (stime + utime) / jiffies_per_sec) def _migrate_progress(self, vm): info = vm.command("query-migrate") if "ram" not in info: info["ram"] = {} return Progress( info.get("status", "active"), ProgressStats( info["ram"].get("transferred", 0), info["ram"].get("remaining", 0), info["ram"].get("total", 0), info["ram"].get("duplicate", 0), info["ram"].get("skipped", 0), info["ram"].get("normal", 0), info["ram"].get("normal-bytes", 0), info["ram"].get("dirty-pages-rate", 0), info["ram"].get("mbps", 0), info["ram"].get("dirty-sync-count", 0) ), time.time(), info.get("total-time", 0), info.get("downtime", 0), info.get("expected-downtime", 0), info.get("setup-time", 0), info.get("cpu-throttle-percentage", 0), ) def _migrate(self, hardware, scenario, src, dst, connect_uri): src_qemu_time = [] src_vcpu_time = [] src_pid = src.get_pid() vcpus = src.command("query-cpus-fast") src_threads = [] for vcpu in vcpus: src_threads.append(vcpu["thread-id"]) # XXX how to get dst timings on remote host ? if self._verbose: print("Sleeping %d seconds for initial guest workload run" % self._sleep) sleep_secs = self._sleep while sleep_secs > 1: src_qemu_time.append(self._cpu_timing(src_pid)) src_vcpu_time.extend(self._vcpu_timing(src_pid, src_threads)) time.sleep(1) sleep_secs -= 1 if self._verbose: print("Starting migration") if scenario._auto_converge: resp = src.command("migrate-set-capabilities", capabilities = [ { "capability": "auto-converge", "state": True } ]) resp = src.command("migrate-set-parameters", cpu_throttle_increment=scenario._auto_converge_step) if scenario._post_copy: resp = src.command("migrate-set-capabilities", capabilities = [ { "capability": "postcopy-ram", "state": True } ]) resp = dst.command("migrate-set-capabilities", capabilities = [ { "capability": "postcopy-ram", "state": True } ]) resp = src.command("migrate-set-parameters", max_bandwidth=scenario._bandwidth * 1024 * 1024) resp = src.command("migrate-set-parameters", downtime_limit=scenario._downtime) if scenario._compression_mt: resp = src.command("migrate-set-capabilities", capabilities = [ { "capability": "compress", "state": True } ]) resp = src.command("migrate-set-parameters", compress_threads=scenario._compression_mt_threads) resp = dst.command("migrate-set-capabilities", capabilities = [ { "capability": "compress", "state": True } ]) resp = dst.command("migrate-set-parameters", decompress_threads=scenario._compression_mt_threads) if scenario._compression_xbzrle: resp = src.command("migrate-set-capabilities", capabilities = [ { "capability": "xbzrle", "state": True } ]) resp = dst.command("migrate-set-capabilities", capabilities = [ { "capability": "xbzrle", "state": True } ]) resp = src.command("migrate-set-parameters", xbzrle_cache_size=( hardware._mem * 1024 * 1024 * 1024 / 100 * scenario._compression_xbzrle_cache)) if scenario._multifd: resp = src.command("migrate-set-capabilities", capabilities = [ { "capability": "multifd", "state": True } ]) resp = src.command("migrate-set-parameters", multifd_channels=scenario._multifd_channels) resp = dst.command("migrate-set-capabilities", capabilities = [ { "capability": "multifd", "state": True } ]) resp = dst.command("migrate-set-parameters", multifd_channels=scenario._multifd_channels) resp = src.command("migrate", uri=connect_uri) post_copy = False paused = False progress_history = [] start = time.time() loop = 0 while True: loop = loop + 1 time.sleep(0.05) progress = self._migrate_progress(src) if (loop % 20) == 0: src_qemu_time.append(self._cpu_timing(src_pid)) src_vcpu_time.extend(self._vcpu_timing(src_pid, src_threads)) if (len(progress_history) == 0 or (progress_history[-1]._ram._iterations < progress._ram._iterations)): progress_history.append(progress) if progress._status in ("completed", "failed", "cancelled"): if progress._status == "completed" and paused: dst.command("cont") if progress_history[-1] != progress: progress_history.append(progress) if progress._status == "completed": if self._verbose: print("Sleeping %d seconds for final guest workload run" % self._sleep) sleep_secs = self._sleep while sleep_secs > 1: time.sleep(1) src_qemu_time.append(self._cpu_timing(src_pid)) src_vcpu_time.extend(self._vcpu_timing(src_pid, src_threads)) sleep_secs -= 1 return [progress_history, src_qemu_time, src_vcpu_time] if self._verbose and (loop % 20) == 0: print("Iter %d: remain %5dMB of %5dMB (total %5dMB @ %5dMb/sec)" % ( progress._ram._iterations, progress._ram._remaining_bytes / (1024 * 1024), progress._ram._total_bytes / (1024 * 1024), progress._ram._transferred_bytes / (1024 * 1024), progress._ram._transfer_rate_mbs, )) if progress._ram._iterations > scenario._max_iters: if self._verbose: print("No completion after %d iterations over RAM" % scenario._max_iters) src.command("migrate_cancel") continue if time.time() > (start + scenario._max_time): if self._verbose: print("No completion after %d seconds" % scenario._max_time) src.command("migrate_cancel") continue if (scenario._post_copy and progress._ram._iterations >= scenario._post_copy_iters and not post_copy): if self._verbose: print("Switching to post-copy after %d iterations" % scenario._post_copy_iters) resp = src.command("migrate-start-postcopy") post_copy = True if (scenario._pause and progress._ram._iterations >= scenario._pause_iters and not paused): if self._verbose: print("Pausing VM after %d iterations" % scenario._pause_iters) resp = src.command("stop") paused = True def _is_ppc64le(self): _, _, _, _, machine = os.uname() if machine == "ppc64le": return True return False def _get_guest_console_args(self): if self._is_ppc64le(): return "console=hvc0" else: return "console=ttyS0" def _get_qemu_serial_args(self): if self._is_ppc64le(): return ["-chardev", "stdio,id=cdev0", "-device", "spapr-vty,chardev=cdev0"] else: return ["-chardev", "stdio,id=cdev0", "-device", "isa-serial,chardev=cdev0"] def _get_common_args(self, hardware, tunnelled=False): args = [ "noapic", "edd=off", "printk.time=1", "noreplace-smp", "cgroup_disable=memory", "pci=noearly", ] args.append(self._get_guest_console_args()) if self._debug: args.append("debug") else: args.append("quiet") args.append("ramsize=%s" % hardware._mem) cmdline = " ".join(args) if tunnelled: cmdline = "'" + cmdline + "'" argv = [ "-accel", "kvm", "-cpu", "host", "-kernel", self._kernel, "-initrd", self._initrd, "-append", cmdline, "-m", str((hardware._mem * 1024) + 512), "-smp", str(hardware._cpus), ] argv.extend(self._get_qemu_serial_args()) if self._debug: argv.extend(["-machine", "graphics=off"]) if hardware._prealloc_pages: argv_source += ["-mem-path", "/dev/shm", "-mem-prealloc"] if hardware._locked_pages: argv_source += ["-overcommit", "mem-lock=on"] if hardware._huge_pages: pass return argv def _get_src_args(self, hardware): return self._get_common_args(hardware) def _get_dst_args(self, hardware, uri): tunnelled = False if self._dst_host != "localhost": tunnelled = True argv = self._get_common_args(hardware, tunnelled) return argv + ["-incoming", uri] @staticmethod def _get_common_wrapper(cpu_bind, mem_bind): wrapper = [] if len(cpu_bind) > 0 or len(mem_bind) > 0: wrapper.append("numactl") if cpu_bind: wrapper.append("--physcpubind=%s" % ",".join(cpu_bind)) if mem_bind: wrapper.append("--membind=%s" % ",".join(mem_bind)) return wrapper def _get_src_wrapper(self, hardware): return self._get_common_wrapper(hardware._src_cpu_bind, hardware._src_mem_bind) def _get_dst_wrapper(self, hardware): wrapper = self._get_common_wrapper(hardware._dst_cpu_bind, hardware._dst_mem_bind) if self._dst_host != "localhost": return ["ssh", "-R", "9001:localhost:9001", self._dst_host] + wrapper else: return wrapper def _get_timings(self, vm): log = vm.get_log() if not log: return [] if self._debug: print(log) regex = r"[^\s]+\s\((\d+)\):\sINFO:\s(\d+)ms\scopied\s\d+\sGB\sin\s(\d+)ms" matcher = re.compile(regex) records = [] for line in log.split("\n"): match = matcher.match(line) if match: records.append(TimingRecord(int(match.group(1)), int(match.group(2)) / 1000.0, int(match.group(3)))) return records def run(self, hardware, scenario, result_dir=os.getcwd()): abs_result_dir = os.path.join(result_dir, scenario._name) if self._transport == "tcp": uri = "tcp:%s:9000" % self._dst_host elif self._transport == "rdma": uri = "rdma:%s:9000" % self._dst_host elif self._transport == "unix": if self._dst_host != "localhost": raise Exception("Running use unix migration transport for non-local host") uri = "unix:/var/tmp/qemu-migrate-%d.migrate" % os.getpid() try: os.remove(uri[5:]) os.remove(monaddr) except: pass if self._dst_host != "localhost": dstmonaddr = ("localhost", 9001) else: dstmonaddr = "/var/tmp/qemu-dst-%d-monitor.sock" % os.getpid() srcmonaddr = "/var/tmp/qemu-src-%d-monitor.sock" % os.getpid() src = QEMUMachine(self._binary, args=self._get_src_args(hardware), wrapper=self._get_src_wrapper(hardware), name="qemu-src-%d" % os.getpid(), monitor_address=srcmonaddr) dst = QEMUMachine(self._binary, args=self._get_dst_args(hardware, uri), wrapper=self._get_dst_wrapper(hardware), name="qemu-dst-%d" % os.getpid(), monitor_address=dstmonaddr) try: src.launch() dst.launch() ret = self._migrate(hardware, scenario, src, dst, uri) progress_history = ret[0] qemu_timings = ret[1] vcpu_timings = ret[2] if uri[0:5] == "unix:" and os.path.exists(uri[5:]): os.remove(uri[5:]) if os.path.exists(srcmonaddr): os.remove(srcmonaddr) if self._dst_host == "localhost" and os.path.exists(dstmonaddr): os.remove(dstmonaddr) if self._verbose: print("Finished migration") src.shutdown() dst.shutdown() return Report(hardware, scenario, progress_history, Timings(self._get_timings(src) + self._get_timings(dst)), Timings(qemu_timings), Timings(vcpu_timings), self._binary, self._dst_host, self._kernel, self._initrd, self._transport, self._sleep) except Exception as e: if self._debug: print("Failed: %s" % str(e)) try: src.shutdown() except: pass try: dst.shutdown() except: pass if self._debug: print(src.get_log()) print(dst.get_log()) raise
18,037
36.191753
99
py
qemu
qemu-master/tests/migration/guestperf/hardware.py
# # Migration test hardware configuration description # # Copyright (c) 2016 Red Hat, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, see <http://www.gnu.org/licenses/>. # class Hardware(object): def __init__(self, cpus=1, mem=1, src_cpu_bind=None, src_mem_bind=None, dst_cpu_bind=None, dst_mem_bind=None, prealloc_pages = False, huge_pages=False, locked_pages=False): self._cpus = cpus self._mem = mem # GiB self._src_mem_bind = src_mem_bind # List of NUMA nodes self._src_cpu_bind = src_cpu_bind # List of pCPUs self._dst_mem_bind = dst_mem_bind # List of NUMA nodes self._dst_cpu_bind = dst_cpu_bind # List of pCPUs self._prealloc_pages = prealloc_pages self._huge_pages = huge_pages self._locked_pages = locked_pages def serialize(self): return { "cpus": self._cpus, "mem": self._mem, "src_mem_bind": self._src_mem_bind, "dst_mem_bind": self._dst_mem_bind, "src_cpu_bind": self._src_cpu_bind, "dst_cpu_bind": self._dst_cpu_bind, "prealloc_pages": self._prealloc_pages, "huge_pages": self._huge_pages, "locked_pages": self._locked_pages, } @classmethod def deserialize(cls, data): return cls( data["cpus"], data["mem"], data["src_cpu_bind"], data["src_mem_bind"], data["dst_cpu_bind"], data["dst_mem_bind"], data["prealloc_pages"], data["huge_pages"], data["locked_pages"])
2,257
34.84127
78
py
qemu
qemu-master/tests/migration/guestperf/progress.py
# # Migration test migration operation progress # # Copyright (c) 2016 Red Hat, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, see <http://www.gnu.org/licenses/>. # class ProgressStats(object): def __init__(self, transferred_bytes, remaining_bytes, total_bytes, duplicate_pages, skipped_pages, normal_pages, normal_bytes, dirty_rate_pps, transfer_rate_mbs, iterations): self._transferred_bytes = transferred_bytes self._remaining_bytes = remaining_bytes self._total_bytes = total_bytes self._duplicate_pages = duplicate_pages self._skipped_pages = skipped_pages self._normal_pages = normal_pages self._normal_bytes = normal_bytes self._dirty_rate_pps = dirty_rate_pps self._transfer_rate_mbs = transfer_rate_mbs self._iterations = iterations def serialize(self): return { "transferred_bytes": self._transferred_bytes, "remaining_bytes": self._remaining_bytes, "total_bytes": self._total_bytes, "duplicate_pages": self._duplicate_pages, "skipped_pages": self._skipped_pages, "normal_pages": self._normal_pages, "normal_bytes": self._normal_bytes, "dirty_rate_pps": self._dirty_rate_pps, "transfer_rate_mbs": self._transfer_rate_mbs, "iterations": self._iterations, } @classmethod def deserialize(cls, data): return cls( data["transferred_bytes"], data["remaining_bytes"], data["total_bytes"], data["duplicate_pages"], data["skipped_pages"], data["normal_pages"], data["normal_bytes"], data["dirty_rate_pps"], data["transfer_rate_mbs"], data["iterations"]) class Progress(object): def __init__(self, status, ram, now, duration, downtime, downtime_expected, setup_time, throttle_pcent): self._status = status self._ram = ram self._now = now self._duration = duration self._downtime = downtime self._downtime_expected = downtime_expected self._setup_time = setup_time self._throttle_pcent = throttle_pcent def serialize(self): return { "status": self._status, "ram": self._ram.serialize(), "now": self._now, "duration": self._duration, "downtime": self._downtime, "downtime_expected": self._downtime_expected, "setup_time": self._setup_time, "throttle_pcent": self._throttle_pcent, } @classmethod def deserialize(cls, data): return cls( data["status"], ProgressStats.deserialize(data["ram"]), data["now"], data["duration"], data["downtime"], data["downtime_expected"], data["setup_time"], data["throttle_pcent"])
3,862
31.737288
78
py
qemu
qemu-master/tests/migration/guestperf/timings.py
# # Migration test timing records # # Copyright (c) 2016 Red Hat, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, see <http://www.gnu.org/licenses/>. # class TimingRecord(object): def __init__(self, tid, timestamp, value): self._tid = tid self._timestamp = timestamp self._value = value def serialize(self): return { "tid": self._tid, "timestamp": self._timestamp, "value": self._value } @classmethod def deserialize(cls, data): return cls( data["tid"], data["timestamp"], data["value"]) class Timings(object): def __init__(self, records): self._records = records def serialize(self): return [record.serialize() for record in self._records] @classmethod def deserialize(cls, data): return Timings([TimingRecord.deserialize(record) for record in data])
1,519
26.142857
78
py
qemu
qemu-master/tests/migration/guestperf/shell.py
# # Migration test command line shell integration # # Copyright (c) 2016 Red Hat, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, see <http://www.gnu.org/licenses/>. # import argparse import fnmatch import os import os.path import platform import sys import logging from guestperf.hardware import Hardware from guestperf.engine import Engine from guestperf.scenario import Scenario from guestperf.comparison import COMPARISONS from guestperf.plot import Plot from guestperf.report import Report class BaseShell(object): def __init__(self): parser = argparse.ArgumentParser(description="Migration Test Tool") # Test args parser.add_argument("--debug", dest="debug", default=False, action="store_true") parser.add_argument("--verbose", dest="verbose", default=False, action="store_true") parser.add_argument("--sleep", dest="sleep", default=15, type=int) parser.add_argument("--binary", dest="binary", default="/usr/bin/qemu-system-x86_64") parser.add_argument("--dst-host", dest="dst_host", default="localhost") parser.add_argument("--kernel", dest="kernel", default="/boot/vmlinuz-%s" % platform.release()) parser.add_argument("--initrd", dest="initrd", default="tests/migration/initrd-stress.img") parser.add_argument("--transport", dest="transport", default="unix") # Hardware args parser.add_argument("--cpus", dest="cpus", default=1, type=int) parser.add_argument("--mem", dest="mem", default=1, type=int) parser.add_argument("--src-cpu-bind", dest="src_cpu_bind", default="") parser.add_argument("--src-mem-bind", dest="src_mem_bind", default="") parser.add_argument("--dst-cpu-bind", dest="dst_cpu_bind", default="") parser.add_argument("--dst-mem-bind", dest="dst_mem_bind", default="") parser.add_argument("--prealloc-pages", dest="prealloc_pages", default=False) parser.add_argument("--huge-pages", dest="huge_pages", default=False) parser.add_argument("--locked-pages", dest="locked_pages", default=False) self._parser = parser def get_engine(self, args): return Engine(binary=args.binary, dst_host=args.dst_host, kernel=args.kernel, initrd=args.initrd, transport=args.transport, sleep=args.sleep, debug=args.debug, verbose=args.verbose) def get_hardware(self, args): def split_map(value): if value == "": return [] return value.split(",") return Hardware(cpus=args.cpus, mem=args.mem, src_cpu_bind=split_map(args.src_cpu_bind), src_mem_bind=split_map(args.src_mem_bind), dst_cpu_bind=split_map(args.dst_cpu_bind), dst_mem_bind=split_map(args.dst_mem_bind), locked_pages=args.locked_pages, huge_pages=args.huge_pages, prealloc_pages=args.prealloc_pages) class Shell(BaseShell): def __init__(self): super(Shell, self).__init__() parser = self._parser parser.add_argument("--output", dest="output", default=None) # Scenario args parser.add_argument("--max-iters", dest="max_iters", default=30, type=int) parser.add_argument("--max-time", dest="max_time", default=300, type=int) parser.add_argument("--bandwidth", dest="bandwidth", default=125000, type=int) parser.add_argument("--downtime", dest="downtime", default=500, type=int) parser.add_argument("--pause", dest="pause", default=False, action="store_true") parser.add_argument("--pause-iters", dest="pause_iters", default=5, type=int) parser.add_argument("--post-copy", dest="post_copy", default=False, action="store_true") parser.add_argument("--post-copy-iters", dest="post_copy_iters", default=5, type=int) parser.add_argument("--auto-converge", dest="auto_converge", default=False, action="store_true") parser.add_argument("--auto-converge-step", dest="auto_converge_step", default=10, type=int) parser.add_argument("--compression-mt", dest="compression_mt", default=False, action="store_true") parser.add_argument("--compression-mt-threads", dest="compression_mt_threads", default=1, type=int) parser.add_argument("--compression-xbzrle", dest="compression_xbzrle", default=False, action="store_true") parser.add_argument("--compression-xbzrle-cache", dest="compression_xbzrle_cache", default=10, type=int) parser.add_argument("--multifd", dest="multifd", default=False, action="store_true") parser.add_argument("--multifd-channels", dest="multifd_channels", default=2, type=int) def get_scenario(self, args): return Scenario(name="perfreport", downtime=args.downtime, bandwidth=args.bandwidth, max_iters=args.max_iters, max_time=args.max_time, pause=args.pause, pause_iters=args.pause_iters, post_copy=args.post_copy, post_copy_iters=args.post_copy_iters, auto_converge=args.auto_converge, auto_converge_step=args.auto_converge_step, compression_mt=args.compression_mt, compression_mt_threads=args.compression_mt_threads, compression_xbzrle=args.compression_xbzrle, compression_xbzrle_cache=args.compression_xbzrle_cache, multifd=args.multifd, multifd_channels=args.multifd_channels) def run(self, argv): args = self._parser.parse_args(argv) logging.basicConfig(level=(logging.DEBUG if args.debug else logging.INFO if args.verbose else logging.WARN)) engine = self.get_engine(args) hardware = self.get_hardware(args) scenario = self.get_scenario(args) try: report = engine.run(hardware, scenario) if args.output is None: print(report.to_json()) else: with open(args.output, "w") as fh: print(report.to_json(), file=fh) return 0 except Exception as e: print("Error: %s" % str(e), file=sys.stderr) if args.debug: raise return 1 class BatchShell(BaseShell): def __init__(self): super(BatchShell, self).__init__() parser = self._parser parser.add_argument("--filter", dest="filter", default="*") parser.add_argument("--output", dest="output", default=os.getcwd()) def run(self, argv): args = self._parser.parse_args(argv) logging.basicConfig(level=(logging.DEBUG if args.debug else logging.INFO if args.verbose else logging.WARN)) engine = self.get_engine(args) hardware = self.get_hardware(args) try: for comparison in COMPARISONS: compdir = os.path.join(args.output, comparison._name) for scenario in comparison._scenarios: name = os.path.join(comparison._name, scenario._name) if not fnmatch.fnmatch(name, args.filter): if args.verbose: print("Skipping %s" % name) continue if args.verbose: print("Running %s" % name) dirname = os.path.join(args.output, comparison._name) filename = os.path.join(dirname, scenario._name + ".json") if not os.path.exists(dirname): os.makedirs(dirname) report = engine.run(hardware, scenario) with open(filename, "w") as fh: print(report.to_json(), file=fh) except Exception as e: print("Error: %s" % str(e), file=sys.stderr) if args.debug: raise class PlotShell(object): def __init__(self): super(PlotShell, self).__init__() self._parser = argparse.ArgumentParser(description="Migration Test Tool") self._parser.add_argument("--output", dest="output", default=None) self._parser.add_argument("--debug", dest="debug", default=False, action="store_true") self._parser.add_argument("--verbose", dest="verbose", default=False, action="store_true") self._parser.add_argument("--migration-iters", dest="migration_iters", default=False, action="store_true") self._parser.add_argument("--total-guest-cpu", dest="total_guest_cpu", default=False, action="store_true") self._parser.add_argument("--split-guest-cpu", dest="split_guest_cpu", default=False, action="store_true") self._parser.add_argument("--qemu-cpu", dest="qemu_cpu", default=False, action="store_true") self._parser.add_argument("--vcpu-cpu", dest="vcpu_cpu", default=False, action="store_true") self._parser.add_argument("reports", nargs='*') def run(self, argv): args = self._parser.parse_args(argv) logging.basicConfig(level=(logging.DEBUG if args.debug else logging.INFO if args.verbose else logging.WARN)) if len(args.reports) == 0: print("At least one report required", file=sys.stderr) return 1 if not (args.qemu_cpu or args.vcpu_cpu or args.total_guest_cpu or args.split_guest_cpu): print("At least one chart type is required", file=sys.stderr) return 1 reports = [] for report in args.reports: reports.append(Report.from_json_file(report)) plot = Plot(reports, args.migration_iters, args.total_guest_cpu, args.split_guest_cpu, args.qemu_cpu, args.vcpu_cpu) plot.generate(args.output)
11,166
39.314079
114
py
qemu
qemu-master/tests/migration/guestperf/report.py
# # Migration test output result reporting # # Copyright (c) 2016 Red Hat, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, see <http://www.gnu.org/licenses/>. # import json from guestperf.hardware import Hardware from guestperf.scenario import Scenario from guestperf.progress import Progress from guestperf.timings import Timings class Report(object): def __init__(self, hardware, scenario, progress_history, guest_timings, qemu_timings, vcpu_timings, binary, dst_host, kernel, initrd, transport, sleep): self._hardware = hardware self._scenario = scenario self._progress_history = progress_history self._guest_timings = guest_timings self._qemu_timings = qemu_timings self._vcpu_timings = vcpu_timings self._binary = binary self._dst_host = dst_host self._kernel = kernel self._initrd = initrd self._transport = transport self._sleep = sleep def serialize(self): return { "hardware": self._hardware.serialize(), "scenario": self._scenario.serialize(), "progress_history": [progress.serialize() for progress in self._progress_history], "guest_timings": self._guest_timings.serialize(), "qemu_timings": self._qemu_timings.serialize(), "vcpu_timings": self._vcpu_timings.serialize(), "binary": self._binary, "dst_host": self._dst_host, "kernel": self._kernel, "initrd": self._initrd, "transport": self._transport, "sleep": self._sleep, } @classmethod def deserialize(cls, data): return cls( Hardware.deserialize(data["hardware"]), Scenario.deserialize(data["scenario"]), [Progress.deserialize(record) for record in data["progress_history"]], Timings.deserialize(data["guest_timings"]), Timings.deserialize(data["qemu_timings"]), Timings.deserialize(data["vcpu_timings"]), data["binary"], data["dst_host"], data["kernel"], data["initrd"], data["transport"], data["sleep"]) def to_json(self): return json.dumps(self.serialize(), indent=4) @classmethod def from_json(cls, data): return cls.deserialize(json.loads(data)) @classmethod def from_json_file(cls, filename): with open(filename, "r") as fh: return cls.deserialize(json.load(fh))
3,307
32.414141
94
py
qemu
qemu-master/tests/migration/guestperf/plot.py
# # Migration test graph plotting # # Copyright (c) 2016 Red Hat, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, see <http://www.gnu.org/licenses/>. # import sys class Plot(object): # Generated using # http://tools.medialab.sciences-po.fr/iwanthue/ COLORS = ["#CD54D0", "#79D94C", "#7470CD", "#D2D251", "#863D79", "#76DDA6", "#D4467B", "#61923D", "#CB9CCA", "#D98F36", "#8CC8DA", "#CE4831", "#5E7693", "#9B803F", "#412F4C", "#CECBA6", "#6D3229", "#598B73", "#C8827C", "#394427"] def __init__(self, reports, migration_iters, total_guest_cpu, split_guest_cpu, qemu_cpu, vcpu_cpu): self._reports = reports self._migration_iters = migration_iters self._total_guest_cpu = total_guest_cpu self._split_guest_cpu = split_guest_cpu self._qemu_cpu = qemu_cpu self._vcpu_cpu = vcpu_cpu self._color_idx = 0 def _next_color(self): color = self.COLORS[self._color_idx] self._color_idx += 1 if self._color_idx >= len(self.COLORS): self._color_idx = 0 return color def _get_progress_label(self, progress): if progress: return "\n\n" + "\n".join( ["Status: %s" % progress._status, "Iteration: %d" % progress._ram._iterations, "Throttle: %02d%%" % progress._throttle_pcent, "Dirty rate: %dMB/s" % (progress._ram._dirty_rate_pps * 4 / 1024.0)]) else: return "\n\n" + "\n".join( ["Status: %s" % "none", "Iteration: %d" % 0]) def _find_start_time(self, report): startqemu = report._qemu_timings._records[0]._timestamp startguest = report._guest_timings._records[0]._timestamp if startqemu < startguest: return startqemu else: return stasrtguest def _get_guest_max_value(self, report): maxvalue = 0 for record in report._guest_timings._records: if record._value > maxvalue: maxvalue = record._value return maxvalue def _get_qemu_max_value(self, report): maxvalue = 0 oldvalue = None oldtime = None for record in report._qemu_timings._records: if oldvalue is not None: cpudelta = (record._value - oldvalue) / 1000.0 timedelta = record._timestamp - oldtime if timedelta == 0: continue util = cpudelta / timedelta * 100.0 else: util = 0 oldvalue = record._value oldtime = record._timestamp if util > maxvalue: maxvalue = util return maxvalue def _get_total_guest_cpu_graph(self, report, starttime): xaxis = [] yaxis = [] labels = [] progress_idx = -1 for record in report._guest_timings._records: while ((progress_idx + 1) < len(report._progress_history) and report._progress_history[progress_idx + 1]._now < record._timestamp): progress_idx = progress_idx + 1 if progress_idx >= 0: progress = report._progress_history[progress_idx] else: progress = None xaxis.append(record._timestamp - starttime) yaxis.append(record._value) labels.append(self._get_progress_label(progress)) from plotly import graph_objs as go return go.Scatter(x=xaxis, y=yaxis, name="Guest PIDs: %s" % report._scenario._name, mode='lines', line={ "dash": "solid", "color": self._next_color(), "shape": "linear", "width": 1 }, text=labels) def _get_split_guest_cpu_graphs(self, report, starttime): threads = {} for record in report._guest_timings._records: if record._tid in threads: continue threads[record._tid] = { "xaxis": [], "yaxis": [], "labels": [], } progress_idx = -1 for record in report._guest_timings._records: while ((progress_idx + 1) < len(report._progress_history) and report._progress_history[progress_idx + 1]._now < record._timestamp): progress_idx = progress_idx + 1 if progress_idx >= 0: progress = report._progress_history[progress_idx] else: progress = None threads[record._tid]["xaxis"].append(record._timestamp - starttime) threads[record._tid]["yaxis"].append(record._value) threads[record._tid]["labels"].append(self._get_progress_label(progress)) graphs = [] from plotly import graph_objs as go for tid in threads.keys(): graphs.append( go.Scatter(x=threads[tid]["xaxis"], y=threads[tid]["yaxis"], name="PID %s: %s" % (tid, report._scenario._name), mode="lines", line={ "dash": "solid", "color": self._next_color(), "shape": "linear", "width": 1 }, text=threads[tid]["labels"])) return graphs def _get_migration_iters_graph(self, report, starttime): xaxis = [] yaxis = [] labels = [] for progress in report._progress_history: xaxis.append(progress._now - starttime) yaxis.append(0) labels.append(self._get_progress_label(progress)) from plotly import graph_objs as go return go.Scatter(x=xaxis, y=yaxis, text=labels, name="Migration iterations", mode="markers", marker={ "color": self._next_color(), "symbol": "star", "size": 5 }) def _get_qemu_cpu_graph(self, report, starttime): xaxis = [] yaxis = [] labels = [] progress_idx = -1 first = report._qemu_timings._records[0] abstimestamps = [first._timestamp] absvalues = [first._value] for record in report._qemu_timings._records[1:]: while ((progress_idx + 1) < len(report._progress_history) and report._progress_history[progress_idx + 1]._now < record._timestamp): progress_idx = progress_idx + 1 if progress_idx >= 0: progress = report._progress_history[progress_idx] else: progress = None oldvalue = absvalues[-1] oldtime = abstimestamps[-1] cpudelta = (record._value - oldvalue) / 1000.0 timedelta = record._timestamp - oldtime if timedelta == 0: continue util = cpudelta / timedelta * 100.0 abstimestamps.append(record._timestamp) absvalues.append(record._value) xaxis.append(record._timestamp - starttime) yaxis.append(util) labels.append(self._get_progress_label(progress)) from plotly import graph_objs as go return go.Scatter(x=xaxis, y=yaxis, yaxis="y2", name="QEMU: %s" % report._scenario._name, mode='lines', line={ "dash": "solid", "color": self._next_color(), "shape": "linear", "width": 1 }, text=labels) def _get_vcpu_cpu_graphs(self, report, starttime): threads = {} for record in report._vcpu_timings._records: if record._tid in threads: continue threads[record._tid] = { "xaxis": [], "yaxis": [], "labels": [], "absvalue": [record._value], "abstime": [record._timestamp], } progress_idx = -1 for record in report._vcpu_timings._records: while ((progress_idx + 1) < len(report._progress_history) and report._progress_history[progress_idx + 1]._now < record._timestamp): progress_idx = progress_idx + 1 if progress_idx >= 0: progress = report._progress_history[progress_idx] else: progress = None oldvalue = threads[record._tid]["absvalue"][-1] oldtime = threads[record._tid]["abstime"][-1] cpudelta = (record._value - oldvalue) / 1000.0 timedelta = record._timestamp - oldtime if timedelta == 0: continue util = cpudelta / timedelta * 100.0 if util > 100: util = 100 threads[record._tid]["absvalue"].append(record._value) threads[record._tid]["abstime"].append(record._timestamp) threads[record._tid]["xaxis"].append(record._timestamp - starttime) threads[record._tid]["yaxis"].append(util) threads[record._tid]["labels"].append(self._get_progress_label(progress)) graphs = [] from plotly import graph_objs as go for tid in threads.keys(): graphs.append( go.Scatter(x=threads[tid]["xaxis"], y=threads[tid]["yaxis"], yaxis="y2", name="VCPU %s: %s" % (tid, report._scenario._name), mode="lines", line={ "dash": "solid", "color": self._next_color(), "shape": "linear", "width": 1 }, text=threads[tid]["labels"])) return graphs def _generate_chart_report(self, report): graphs = [] starttime = self._find_start_time(report) if self._total_guest_cpu: graphs.append(self._get_total_guest_cpu_graph(report, starttime)) if self._split_guest_cpu: graphs.extend(self._get_split_guest_cpu_graphs(report, starttime)) if self._qemu_cpu: graphs.append(self._get_qemu_cpu_graph(report, starttime)) if self._vcpu_cpu: graphs.extend(self._get_vcpu_cpu_graphs(report, starttime)) if self._migration_iters: graphs.append(self._get_migration_iters_graph(report, starttime)) return graphs def _generate_annotation(self, starttime, progress): return { "text": progress._status, "x": progress._now - starttime, "y": 10, } def _generate_annotations(self, report): starttime = self._find_start_time(report) annotations = {} started = False for progress in report._progress_history: if progress._status == "setup": continue if progress._status not in annotations: annotations[progress._status] = self._generate_annotation(starttime, progress) return annotations.values() def _generate_chart(self): from plotly.offline import plot from plotly import graph_objs as go graphs = [] yaxismax = 0 yaxismax2 = 0 for report in self._reports: graphs.extend(self._generate_chart_report(report)) maxvalue = self._get_guest_max_value(report) if maxvalue > yaxismax: yaxismax = maxvalue maxvalue = self._get_qemu_max_value(report) if maxvalue > yaxismax2: yaxismax2 = maxvalue yaxismax += 100 if not self._qemu_cpu: yaxismax2 = 110 yaxismax2 += 10 annotations = [] if self._migration_iters: for report in self._reports: annotations.extend(self._generate_annotations(report)) layout = go.Layout(title="Migration comparison", xaxis={ "title": "Wallclock time (secs)", "showgrid": False, }, yaxis={ "title": "Memory update speed (ms/GB)", "showgrid": False, "range": [0, yaxismax], }, yaxis2={ "title": "Hostutilization (%)", "overlaying": "y", "side": "right", "range": [0, yaxismax2], "showgrid": False, }, annotations=annotations) figure = go.Figure(data=graphs, layout=layout) return plot(figure, show_link=False, include_plotlyjs=False, output_type="div") def _generate_report(self): pieces = [] for report in self._reports: pieces.append(""" <h3>Report %s</h3> <table> """ % report._scenario._name) pieces.append(""" <tr class="subhead"> <th colspan="2">Test config</th> </tr> <tr> <th>Emulator:</th> <td>%s</td> </tr> <tr> <th>Kernel:</th> <td>%s</td> </tr> <tr> <th>Ramdisk:</th> <td>%s</td> </tr> <tr> <th>Transport:</th> <td>%s</td> </tr> <tr> <th>Host:</th> <td>%s</td> </tr> """ % (report._binary, report._kernel, report._initrd, report._transport, report._dst_host)) hardware = report._hardware pieces.append(""" <tr class="subhead"> <th colspan="2">Hardware config</th> </tr> <tr> <th>CPUs:</th> <td>%d</td> </tr> <tr> <th>RAM:</th> <td>%d GB</td> </tr> <tr> <th>Source CPU bind:</th> <td>%s</td> </tr> <tr> <th>Source RAM bind:</th> <td>%s</td> </tr> <tr> <th>Dest CPU bind:</th> <td>%s</td> </tr> <tr> <th>Dest RAM bind:</th> <td>%s</td> </tr> <tr> <th>Preallocate RAM:</th> <td>%s</td> </tr> <tr> <th>Locked RAM:</th> <td>%s</td> </tr> <tr> <th>Huge pages:</th> <td>%s</td> </tr> """ % (hardware._cpus, hardware._mem, ",".join(hardware._src_cpu_bind), ",".join(hardware._src_mem_bind), ",".join(hardware._dst_cpu_bind), ",".join(hardware._dst_mem_bind), "yes" if hardware._prealloc_pages else "no", "yes" if hardware._locked_pages else "no", "yes" if hardware._huge_pages else "no")) scenario = report._scenario pieces.append(""" <tr class="subhead"> <th colspan="2">Scenario config</th> </tr> <tr> <th>Max downtime:</th> <td>%d milli-sec</td> </tr> <tr> <th>Max bandwidth:</th> <td>%d MB/sec</td> </tr> <tr> <th>Max iters:</th> <td>%d</td> </tr> <tr> <th>Max time:</th> <td>%d secs</td> </tr> <tr> <th>Pause:</th> <td>%s</td> </tr> <tr> <th>Pause iters:</th> <td>%d</td> </tr> <tr> <th>Post-copy:</th> <td>%s</td> </tr> <tr> <th>Post-copy iters:</th> <td>%d</td> </tr> <tr> <th>Auto-converge:</th> <td>%s</td> </tr> <tr> <th>Auto-converge iters:</th> <td>%d</td> </tr> <tr> <th>MT compression:</th> <td>%s</td> </tr> <tr> <th>MT compression threads:</th> <td>%d</td> </tr> <tr> <th>XBZRLE compression:</th> <td>%s</td> </tr> <tr> <th>XBZRLE compression cache:</th> <td>%d%% of RAM</td> </tr> """ % (scenario._downtime, scenario._bandwidth, scenario._max_iters, scenario._max_time, "yes" if scenario._pause else "no", scenario._pause_iters, "yes" if scenario._post_copy else "no", scenario._post_copy_iters, "yes" if scenario._auto_converge else "no", scenario._auto_converge_step, "yes" if scenario._compression_mt else "no", scenario._compression_mt_threads, "yes" if scenario._compression_xbzrle else "no", scenario._compression_xbzrle_cache)) pieces.append(""" </table> """) return "\n".join(pieces) def _generate_style(self): return """ #report table tr th { text-align: right; } #report table tr td { text-align: left; } #report table tr.subhead th { background: rgb(192, 192, 192); text-align: center; } """ def generate_html(self, fh): print("""<html> <head> <script type="text/javascript" src="plotly.min.js"> </script> <style type="text/css"> %s </style> <title>Migration report</title> </head> <body> <h1>Migration report</h1> <h2>Chart summary</h2> <div id="chart"> """ % self._generate_style(), file=fh) print(self._generate_chart(), file=fh) print(""" </div> <h2>Report details</h2> <div id="report"> """, file=fh) print(self._generate_report(), file=fh) print(""" </div> </body> </html> """, file=fh) def generate(self, filename): if filename is None: self.generate_html(sys.stdout) else: with open(filename, "w") as fh: self.generate_html(fh)
19,027
29.49359
94
py
qemu
qemu-master/tests/migration/guestperf/__init__.py
0
0
0
py
qemu
qemu-master/tests/tcg/i386/test-avx.py
#! /usr/bin/env python3 # Generate test-avx.h from x86.csv import csv import sys from fnmatch import fnmatch archs = [ "SSE", "SSE2", "SSE3", "SSSE3", "SSE4_1", "SSE4_2", "AES", "AVX", "AVX2", "AES+AVX", "VAES+AVX", "F16C", "FMA", ] ignore = set(["FISTTP", "LDMXCSR", "VLDMXCSR", "STMXCSR", "VSTMXCSR"]) imask = { 'vBLENDPD': 0xff, 'vBLENDPS': 0x0f, 'CMP[PS][SD]': 0x07, 'VCMP[PS][SD]': 0x1f, 'vCVTPS2PH': 0x7, 'vDPPD': 0x33, 'vDPPS': 0xff, 'vEXTRACTPS': 0x03, 'vINSERTPS': 0xff, 'MPSADBW': 0x7, 'VMPSADBW': 0x3f, 'vPALIGNR': 0x3f, 'vPBLENDW': 0xff, 'vPCMP[EI]STR*': 0x0f, 'vPEXTRB': 0x0f, 'vPEXTRW': 0x07, 'vPEXTRD': 0x03, 'vPEXTRQ': 0x01, 'vPINSRB': 0x0f, 'vPINSRW': 0x07, 'vPINSRD': 0x03, 'vPINSRQ': 0x01, 'vPSHUF[DW]': 0xff, 'vPSHUF[LH]W': 0xff, 'vPS[LR][AL][WDQ]': 0x3f, 'vPS[RL]LDQ': 0x1f, 'vROUND[PS][SD]': 0x7, 'vSHUFPD': 0x0f, 'vSHUFPS': 0xff, 'vAESKEYGENASSIST': 0xff, 'VEXTRACT[FI]128': 0x01, 'VINSERT[FI]128': 0x01, 'VPBLENDD': 0xff, 'VPERM2[FI]128': 0x33, 'VPERMPD': 0xff, 'VPERMQ': 0xff, 'VPERMILPS': 0xff, 'VPERMILPD': 0x0f, } def strip_comments(x): for l in x: if l != '' and l[0] != '#': yield l def reg_w(w): if w == 8: return 'al' elif w == 16: return 'ax' elif w == 32: return 'eax' elif w == 64: return 'rax' raise Exception("bad reg_w %d" % w) def mem_w(w): if w == 8: t = "BYTE" elif w == 16: t = "WORD" elif w == 32: t = "DWORD" elif w == 64: t = "QWORD" elif w == 128: t = "XMMWORD" elif w == 256: t = "YMMWORD" else: raise Exception() return t + " PTR 32[rdx]" class XMMArg(): isxmm = True def __init__(self, reg, mw): if mw not in [0, 8, 16, 32, 64, 128, 256]: raise Exception("Bad /m width: %s" % w) self.reg = reg self.mw = mw self.ismem = mw != 0 def regstr(self, n): if n < 0: return mem_w(self.mw) else: return "%smm%d" % (self.reg, n) class MMArg(): isxmm = True def __init__(self, mw): if mw not in [0, 32, 64]: raise Exception("Bad mem width: %s" % mw) self.mw = mw self.ismem = mw != 0 def regstr(self, n): return "mm%d" % (n & 7) def match(op, pattern): if pattern[0] == 'v': return fnmatch(op, pattern[1:]) or fnmatch(op, 'V'+pattern[1:]) return fnmatch(op, pattern) class ArgVSIB(): isxmm = True ismem = False def __init__(self, reg, w): if w not in [32, 64]: raise Exception("Bad vsib width: %s" % w) self.w = w self.reg = reg def regstr(self, n): reg = "%smm%d" % (self.reg, n >> 2) return "[rsi + %s * %d]" % (reg, 1 << (n & 3)) class ArgImm8u(): isxmm = False ismem = False def __init__(self, op): for k, v in imask.items(): if match(op, k): self.mask = imask[k]; return raise Exception("Unknown immediate") def vals(self): mask = self.mask yield 0 n = 0 while n != mask: n += 1 while (n & ~mask) != 0: n += (n & ~mask) yield n class ArgRM(): isxmm = False def __init__(self, rw, mw): if rw not in [8, 16, 32, 64]: raise Exception("Bad r/w width: %s" % w) if mw not in [0, 8, 16, 32, 64]: raise Exception("Bad r/w width: %s" % w) self.rw = rw self.mw = mw self.ismem = mw != 0 def regstr(self, n): if n < 0: return mem_w(self.mw) else: return reg_w(self.rw) class ArgMem(): isxmm = False ismem = True def __init__(self, w): if w not in [8, 16, 32, 64, 128, 256]: raise Exception("Bad mem width: %s" % w) self.w = w def regstr(self, n): return mem_w(self.w) class SkipInstruction(Exception): pass def ArgGenerator(arg, op): if arg[:3] == 'xmm' or arg[:3] == "ymm": if "/" in arg: r, m = arg.split('/') if (m[0] != 'm'): raise Exception("Expected /m: %s", arg) return XMMArg(arg[0], int(m[1:])); else: return XMMArg(arg[0], 0); elif arg[:2] == 'mm': if "/" in arg: r, m = arg.split('/') if (m[0] != 'm'): raise Exception("Expected /m: %s", arg) return MMArg(int(m[1:])); else: return MMArg(0); elif arg[:4] == 'imm8': return ArgImm8u(op); elif arg == '<XMM0>': return None elif arg[0] == 'r': if '/m' in arg: r, m = arg.split('/') if (m[0] != 'm'): raise Exception("Expected /m: %s", arg) mw = int(m[1:]) if r == 'r': rw = mw else: rw = int(r[1:]) return ArgRM(rw, mw) return ArgRM(int(arg[1:]), 0); elif arg[0] == 'm': return ArgMem(int(arg[1:])) elif arg[:2] == 'vm': return ArgVSIB(arg[-1], int(arg[2:-1])) else: raise Exception("Unrecognised arg: %s", arg) class InsnGenerator: def __init__(self, op, args): self.op = op if op[-2:] in ["PH", "PS", "PD", "SS", "SD"]: if op[-1] == 'H': self.optype = 'F16' elif op[-1] == 'S': self.optype = 'F32' else: self.optype = 'F64' else: self.optype = 'I' try: self.args = list(ArgGenerator(a, op) for a in args) if not any((x.isxmm for x in self.args)): raise SkipInstruction if len(self.args) > 0 and self.args[-1] is None: self.args = self.args[:-1] except SkipInstruction: raise except Exception as e: raise Exception("Bad arg %s: %s" % (op, e)) def gen(self): regs = (10, 11, 12) dest = 9 nreg = len(self.args) if nreg == 0: yield self.op return if isinstance(self.args[-1], ArgImm8u): nreg -= 1 immarg = self.args[-1] else: immarg = None memarg = -1 for n, arg in enumerate(self.args): if arg.ismem: memarg = n if (self.op.startswith("VGATHER") or self.op.startswith("VPGATHER")): if "GATHERD" in self.op: ireg = 13 << 2 else: ireg = 14 << 2 regset = [ (dest, ireg | 0, regs[0]), (dest, ireg | 1, regs[0]), (dest, ireg | 2, regs[0]), (dest, ireg | 3, regs[0]), ] if memarg >= 0: raise Exception("vsib with memory: %s" % self.op) elif nreg == 1: regset = [(regs[0],)] if memarg == 0: regset += [(-1,)] elif nreg == 2: regset = [ (regs[0], regs[1]), (regs[0], regs[0]), ] if memarg == 0: regset += [(-1, regs[0])] elif memarg == 1: regset += [(dest, -1)] elif nreg == 3: regset = [ (dest, regs[0], regs[1]), (dest, regs[0], regs[0]), (regs[0], regs[0], regs[1]), (regs[0], regs[1], regs[0]), (regs[0], regs[0], regs[0]), ] if memarg == 2: regset += [ (dest, regs[0], -1), (regs[0], regs[0], -1), ] elif memarg > 0: raise Exception("Memarg %d" % memarg) elif nreg == 4: regset = [ (dest, regs[0], regs[1], regs[2]), (dest, regs[0], regs[0], regs[1]), (dest, regs[0], regs[1], regs[0]), (dest, regs[1], regs[0], regs[0]), (dest, regs[0], regs[0], regs[0]), (regs[0], regs[0], regs[1], regs[2]), (regs[0], regs[1], regs[0], regs[2]), (regs[0], regs[1], regs[2], regs[0]), (regs[0], regs[0], regs[0], regs[1]), (regs[0], regs[0], regs[1], regs[0]), (regs[0], regs[1], regs[0], regs[0]), (regs[0], regs[0], regs[0], regs[0]), ] if memarg == 2: regset += [ (dest, regs[0], -1, regs[1]), (dest, regs[0], -1, regs[0]), (regs[0], regs[0], -1, regs[1]), (regs[0], regs[1], -1, regs[0]), (regs[0], regs[0], -1, regs[0]), ] elif memarg > 0: raise Exception("Memarg4 %d" % memarg) else: raise Exception("Too many regs: %s(%d)" % (self.op, nreg)) for regv in regset: argstr = [] for i in range(nreg): arg = self.args[i] argstr.append(arg.regstr(regv[i])) if immarg is None: yield self.op + ' ' + ','.join(argstr) else: for immval in immarg.vals(): yield self.op + ' ' + ','.join(argstr) + ',' + str(immval) def split0(s): if s == '': return [] return s.split(',') def main(): n = 0 if len(sys.argv) != 3: print("Usage: test-avx.py x86.csv test-avx.h") exit(1) csvfile = open(sys.argv[1], 'r', newline='') with open(sys.argv[2], "w") as outf: outf.write("// Generated by test-avx.py. Do not edit.\n") for row in csv.reader(strip_comments(csvfile)): insn = row[0].replace(',', '').split() if insn[0] in ignore: continue cpuid = row[6] if cpuid in archs: try: g = InsnGenerator(insn[0], insn[1:]) for insn in g.gen(): outf.write('TEST(%d, "%s", %s)\n' % (n, insn, g.optype)) n += 1 except SkipInstruction: pass outf.write("#undef TEST\n") csvfile.close() if __name__ == "__main__": main()
10,596
27.183511
80
py
qemu
qemu-master/tests/tcg/i386/test-mmx.py
#! /usr/bin/env python3 # Generate test-avx.h from x86.csv import csv import sys from fnmatch import fnmatch ignore = set(["EMMS", "FEMMS", "FISTTP", "LDMXCSR", "VLDMXCSR", "STMXCSR", "VSTMXCSR"]) imask = { 'PALIGNR': 0x3f, 'PEXTRB': 0x0f, 'PEXTRW': 0x07, 'PEXTRD': 0x03, 'PEXTRQ': 0x01, 'PINSRB': 0x0f, 'PINSRW': 0x07, 'PINSRD': 0x03, 'PINSRQ': 0x01, 'PSHUF[DW]': 0xff, 'PSHUF[LH]W': 0xff, 'PS[LR][AL][WDQ]': 0x3f, } def strip_comments(x): for l in x: if l != '' and l[0] != '#': yield l def reg_w(w): if w == 8: return 'al' elif w == 16: return 'ax' elif w == 32: return 'eax' elif w == 64: return 'rax' raise Exception("bad reg_w %d" % w) def mem_w(w): if w == 8: t = "BYTE" elif w == 16: t = "WORD" elif w == 32: t = "DWORD" elif w == 64: t = "QWORD" else: raise Exception() return t + " PTR 32[rdx]" class MMArg(): isxmm = True def __init__(self, mw): if mw not in [0, 32, 64]: raise Exception("Bad /m width: %s" % w) self.mw = mw self.ismem = mw != 0 def regstr(self, n): if n < 0: return mem_w(self.mw) else: return "mm%d" % (n, ) def match(op, pattern): return fnmatch(op, pattern) class ArgImm8u(): isxmm = False ismem = False def __init__(self, op): for k, v in imask.items(): if match(op, k): self.mask = imask[k]; return raise Exception("Unknown immediate") def vals(self): mask = self.mask yield 0 n = 0 while n != mask: n += 1 while (n & ~mask) != 0: n += (n & ~mask) yield n class ArgRM(): isxmm = False def __init__(self, rw, mw): if rw not in [8, 16, 32, 64]: raise Exception("Bad r/w width: %s" % w) if mw not in [0, 8, 16, 32, 64]: raise Exception("Bad r/w width: %s" % w) self.rw = rw self.mw = mw self.ismem = mw != 0 def regstr(self, n): if n < 0: return mem_w(self.mw) else: return reg_w(self.rw) class ArgMem(): isxmm = False ismem = True def __init__(self, w): if w not in [8, 16, 32, 64, 128, 256]: raise Exception("Bad mem width: %s" % w) self.w = w def regstr(self, n): return mem_w(self.w) class SkipInstruction(Exception): pass def ArgGenerator(arg, op): if arg[:2] == 'mm': if "/" in arg: r, m = arg.split('/') if (m[0] != 'm'): raise Exception("Expected /m: %s", arg) return MMArg(int(m[1:])); else: return MMArg(0); elif arg[:4] == 'imm8': return ArgImm8u(op); elif arg[0] == 'r': if '/m' in arg: r, m = arg.split('/') if (m[0] != 'm'): raise Exception("Expected /m: %s", arg) mw = int(m[1:]) if r == 'r': rw = mw else: rw = int(r[1:]) return ArgRM(rw, mw) return ArgRM(int(arg[1:]), 0); elif arg[0] == 'm': return ArgMem(int(arg[1:])) else: raise SkipInstruction class InsnGenerator: def __init__(self, op, args): self.op = op if op[0:2] == "PF": self.optype = 'F32' else: self.optype = 'I' try: self.args = list(ArgGenerator(a, op) for a in args) if len(self.args) > 0 and self.args[-1] is None: self.args = self.args[:-1] except SkipInstruction: raise except Exception as e: raise Exception("Bad arg %s: %s" % (op, e)) def gen(self): regs = (5, 6, 7) dest = 4 nreg = len(self.args) if nreg == 0: yield self.op return if isinstance(self.args[-1], ArgImm8u): nreg -= 1 immarg = self.args[-1] else: immarg = None memarg = -1 for n, arg in enumerate(self.args): if arg.ismem: memarg = n if nreg == 1: regset = [(regs[0],)] if memarg == 0: regset += [(-1,)] elif nreg == 2: regset = [ (regs[0], regs[1]), (regs[0], regs[0]), ] if memarg == 0: regset += [(-1, regs[0])] elif memarg == 1: regset += [(dest, -1)] else: raise Exception("Too many regs: %s(%d)" % (self.op, nreg)) for regv in regset: argstr = [] for i in range(nreg): arg = self.args[i] argstr.append(arg.regstr(regv[i])) if immarg is None: yield self.op + ' ' + ','.join(argstr) else: for immval in immarg.vals(): yield self.op + ' ' + ','.join(argstr) + ',' + str(immval) def split0(s): if s == '': return [] return s.split(',') def main(): n = 0 if len(sys.argv) <= 3: print("Usage: test-mmx.py x86.csv test-mmx.h CPUID...") exit(1) csvfile = open(sys.argv[1], 'r', newline='') archs = sys.argv[3:] with open(sys.argv[2], "w") as outf: outf.write("// Generated by test-mmx.py. Do not edit.\n") for row in csv.reader(strip_comments(csvfile)): insn = row[0].replace(',', '').split() if insn[0] in ignore: continue cpuid = row[6] if cpuid in archs: try: g = InsnGenerator(insn[0], insn[1:]) for insn in g.gen(): outf.write('TEST(%d, "%s", %s)\n' % (n, insn, g.optype)) n += 1 except SkipInstruction: pass outf.write("#undef TEST\n") csvfile.close() if __name__ == "__main__": main()
6,197
24.297959
80
py
qemu
qemu-master/tests/tcg/multiarch/gdbstub/memory.py
from __future__ import print_function # # Test some of the softmmu debug features with the multiarch memory # test. It is a port of the original vmlinux focused test case but # using the "memory" test instead. # # This is launched via tests/guest-debug/run-test.py # import gdb import sys failcount = 0 def report(cond, msg): "Report success/fail of test" if cond: print("PASS: %s" % (msg)) else: print("FAIL: %s" % (msg)) global failcount failcount += 1 def check_step(): "Step an instruction, check it moved." start_pc = gdb.parse_and_eval('$pc') gdb.execute("si") end_pc = gdb.parse_and_eval('$pc') return not (start_pc == end_pc) # # Currently it's hard to create a hbreak with the pure python API and # manually matching PC to symbol address is a bit flaky thanks to # function prologues. However internally QEMU's gdbstub treats them # the same as normal breakpoints so it will do for now. # def check_break(sym_name): "Setup breakpoint, continue and check we stopped." sym, ok = gdb.lookup_symbol(sym_name) bp = gdb.Breakpoint(sym_name, gdb.BP_BREAKPOINT) gdb.execute("c") # hopefully we came back end_pc = gdb.parse_and_eval('$pc') report(bp.hit_count == 1, "break @ %s (%s %d hits)" % (end_pc, sym.value(), bp.hit_count)) bp.delete() def do_one_watch(sym, wtype, text): wp = gdb.Breakpoint(sym, gdb.BP_WATCHPOINT, wtype) gdb.execute("c") report_str = "%s for %s" % (text, sym) if wp.hit_count > 0: report(True, report_str) wp.delete() else: report(False, report_str) def check_watches(sym_name): "Watch a symbol for any access." # Should hit for any read do_one_watch(sym_name, gdb.WP_ACCESS, "awatch") # Again should hit for reads do_one_watch(sym_name, gdb.WP_READ, "rwatch") # Finally when it is written do_one_watch(sym_name, gdb.WP_WRITE, "watch") def run_test(): "Run through the tests one by one" print("Checking we can step the first few instructions") step_ok = 0 for i in range(3): if check_step(): step_ok += 1 report(step_ok == 3, "single step in boot code") # If we get here we have missed some of the other breakpoints. print("Setup catch-all for _exit") cbp = gdb.Breakpoint("_exit", gdb.BP_BREAKPOINT) check_break("main") check_watches("test_data[128]") report(cbp.hit_count == 0, "didn't reach backstop") # # This runs as the script it sourced (via -x, via run-test.py) # try: inferior = gdb.selected_inferior() arch = inferior.architecture() print("ATTACHED: %s" % arch.name()) except (gdb.error, AttributeError): print("SKIPPING (not connected)", file=sys.stderr) exit(0) if gdb.parse_and_eval('$pc') == 0: print("SKIP: PC not set") exit(0) try: # These are not very useful in scripts gdb.execute("set pagination off") # Run the actual tests run_test() except (gdb.error): print("GDB Exception: %s" % (sys.exc_info()[0])) failcount += 1 pass # Finally kill the inferior and exit gdb with a count of failures gdb.execute("kill") exit(failcount)
3,204
23.465649
75
py
qemu
qemu-master/tests/tcg/multiarch/gdbstub/test-qxfer-auxv-read.py
from __future__ import print_function # # Test auxiliary vector is loaded via gdbstub # # This is launched via tests/guest-debug/run-test.py # import gdb import sys failcount = 0 def report(cond, msg): "Report success/fail of test" if cond: print ("PASS: %s" % (msg)) else: print ("FAIL: %s" % (msg)) global failcount failcount += 1 def run_test(): "Run through the tests one by one" auxv = gdb.execute("info auxv", False, True) report(isinstance(auxv, str), "Fetched auxv from inferior") report(auxv.find("sha1"), "Found test binary name in auxv") # # This runs as the script it sourced (via -x, via run-test.py) # try: inferior = gdb.selected_inferior() arch = inferior.architecture() print("ATTACHED: %s" % arch.name()) except (gdb.error, AttributeError): print("SKIPPING (not connected)", file=sys.stderr) exit(0) if gdb.parse_and_eval('$pc') == 0: print("SKIP: PC not set") exit(0) try: # These are not very useful in scripts gdb.execute("set pagination off") gdb.execute("set confirm off") # Run the actual tests run_test() except (gdb.error): print ("GDB Exception: %s" % (sys.exc_info()[0])) failcount += 1 pass print("All tests complete: %d failures" % failcount) exit(failcount)
1,319
21.758621
63
py
qemu
qemu-master/tests/tcg/multiarch/gdbstub/sha1.py
from __future__ import print_function # # A very simple smoke test for debugging the SHA1 userspace test on # each target. # # This is launched via tests/guest-debug/run-test.py # import gdb import sys initial_vlen = 0 failcount = 0 def report(cond, msg): "Report success/fail of test" if cond: print("PASS: %s" % (msg)) else: print("FAIL: %s" % (msg)) global failcount failcount += 1 def check_break(sym_name): "Setup breakpoint, continue and check we stopped." sym, ok = gdb.lookup_symbol(sym_name) bp = gdb.Breakpoint(sym_name) gdb.execute("c") # hopefully we came back end_pc = gdb.parse_and_eval('$pc') report(bp.hit_count == 1, "break @ %s (%s %d hits)" % (end_pc, sym.value(), bp.hit_count)) bp.delete() def run_test(): "Run through the tests one by one" check_break("SHA1Init") # Check step and inspect values. We do a double next after the # breakpoint as depending on the version of gdb we may step the # preamble and not the first actual line of source. gdb.execute("next") gdb.execute("next") val_ctx = gdb.parse_and_eval("context->state[0]") exp_ctx = 0x67452301 report(int(val_ctx) == exp_ctx, "context->state[0] == %x" % exp_ctx); gdb.execute("next") val_ctx = gdb.parse_and_eval("context->state[1]") exp_ctx = 0xEFCDAB89 report(int(val_ctx) == exp_ctx, "context->state[1] == %x" % exp_ctx); # finally check we don't barf inspecting registers gdb.execute("info registers") # # This runs as the script it sourced (via -x, via run-test.py) # try: inferior = gdb.selected_inferior() arch = inferior.architecture() print("ATTACHED: %s" % arch.name()) except (gdb.error, AttributeError): print("SKIPPING (not connected)", file=sys.stderr) exit(0) if gdb.parse_and_eval('$pc') == 0: print("SKIP: PC not set") exit(0) try: # These are not very useful in scripts gdb.execute("set pagination off") gdb.execute("set confirm off") # Run the actual tests run_test() except (gdb.error): print ("GDB Exception: %s" % (sys.exc_info()[0])) failcount += 1 pass print("All tests complete: %d failures" % failcount) exit(failcount)
2,256
24.359551
75
py
qemu
qemu-master/tests/tcg/multiarch/gdbstub/test-thread-breakpoint.py
from __future__ import print_function # # Test auxiliary vector is loaded via gdbstub # # This is launched via tests/guest-debug/run-test.py # import gdb import sys failcount = 0 def report(cond, msg): "Report success/fail of test" if cond: print ("PASS: %s" % (msg)) else: print ("FAIL: %s" % (msg)) global failcount failcount += 1 def run_test(): "Run through the tests one by one" sym, ok = gdb.lookup_symbol("thread1_func") gdb.execute("b thread1_func") gdb.execute("c") frame = gdb.selected_frame() report(str(frame.function()) == "thread1_func", "break @ %s"%frame) # # This runs as the script it sourced (via -x, via run-test.py) # try: inferior = gdb.selected_inferior() arch = inferior.architecture() print("ATTACHED: %s" % arch.name()) except (gdb.error, AttributeError): print("SKIPPING (not connected)", file=sys.stderr) exit(0) if gdb.parse_and_eval('$pc') == 0: print("SKIP: PC not set") exit(0) try: # These are not very useful in scripts gdb.execute("set pagination off") gdb.execute("set confirm off") # Run the actual tests run_test() except (gdb.error): print ("GDB Exception: %s" % (sys.exc_info()[0])) failcount += 1 pass print("All tests complete: %d failures" % failcount) exit(failcount)
1,351
21.163934
71
py
qemu
qemu-master/tests/tcg/aarch64/gdbstub/test-sve-ioctl.py
from __future__ import print_function # # Test the SVE ZReg reports the right amount of data. It uses the # sve-ioctl test and examines the register data each time the # __sve_ld_done breakpoint is hit. # # This is launched via tests/guest-debug/run-test.py # import gdb import sys initial_vlen = 0 failcount = 0 def report(cond, msg): "Report success/fail of test" if cond: print ("PASS: %s" % (msg)) else: print ("FAIL: %s" % (msg)) global failcount failcount += 1 class TestBreakpoint(gdb.Breakpoint): def __init__(self, sym_name="__sve_ld_done"): super(TestBreakpoint, self).__init__(sym_name) # self.sym, ok = gdb.lookup_symbol(sym_name) def stop(self): val_i = gdb.parse_and_eval('i') global initial_vlen try: for i in range(0, int(val_i)): val_z = gdb.parse_and_eval("$z0.b.u[%d]" % i) report(int(val_z) == i, "z0.b.u[%d] == %d" % (i, i)) for i in range(i + 1, initial_vlen): val_z = gdb.parse_and_eval("$z0.b.u[%d]" % i) report(int(val_z) == 0, "z0.b.u[%d] == 0" % (i)) except gdb.error: report(False, "checking zregs (out of range)") # Check the aliased V registers are set and GDB has correctly # created them for us having recognised and handled SVE. try: for i in range(0, 16): val_z = gdb.parse_and_eval("$z0.b.u[%d]" % i) val_v = gdb.parse_and_eval("$v0.b.u[%d]" % i) report(int(val_z) == int(val_v), "v0.b.u[%d] == z0.b.u[%d]" % (i, i)) except gdb.error: report(False, "checking vregs (out of range)") def run_test(): "Run through the tests one by one" print ("Setup breakpoint") bp = TestBreakpoint() global initial_vlen vg = gdb.parse_and_eval("$vg") initial_vlen = int(vg) * 8 gdb.execute("c") # # This runs as the script it sourced (via -x, via run-test.py) # try: inferior = gdb.selected_inferior() arch = inferior.architecture() report(arch.name() == "aarch64", "connected to aarch64") except (gdb.error, AttributeError): print("SKIPPING (not connected)", file=sys.stderr) exit(0) try: # These are not very useful in scripts gdb.execute("set pagination off") # Run the actual tests run_test() except: print ("GDB Exception: %s" % (sys.exc_info()[0])) failcount += 1 import code code.InteractiveConsole(locals=globals()).interact() raise print("All tests complete: %d failures" % failcount) exit(failcount)
2,653
27.537634
69
py
qemu
qemu-master/tests/tcg/aarch64/gdbstub/test-sve.py
from __future__ import print_function # # Test the SVE registers are visable and changeable via gdbstub # # This is launched via tests/guest-debug/run-test.py # import gdb import sys MAGIC = 0xDEADBEEF failcount = 0 def report(cond, msg): "Report success/fail of test" if cond: print ("PASS: %s" % (msg)) else: print ("FAIL: %s" % (msg)) global failcount failcount += 1 def run_test(): "Run through the tests one by one" gdb.execute("info registers") report(True, "info registers") gdb.execute("info registers vector") report(True, "info registers vector") # Now all the zregs frame = gdb.selected_frame() for i in range(0, 32): rname = "z%d" % (i) zreg = frame.read_register(rname) report(True, "Reading %s" % rname) for j in range(0, 4): cmd = "set $%s.q.u[%d] = 0x%x" % (rname, j, MAGIC) gdb.execute(cmd) report(True, "%s" % cmd) for j in range(0, 4): reg = "$%s.q.u[%d]" % (rname, j) v = gdb.parse_and_eval(reg) report(str(v.type) == "uint128_t", "size of %s" % (reg)) for j in range(0, 8): cmd = "set $%s.d.u[%d] = 0x%x" % (rname, j, MAGIC) gdb.execute(cmd) report(True, "%s" % cmd) for j in range(0, 8): reg = "$%s.d.u[%d]" % (rname, j) v = gdb.parse_and_eval(reg) report(str(v.type) == "uint64_t", "size of %s" % (reg)) report(int(v) == MAGIC, "%s is 0x%x" % (reg, MAGIC)) # # This runs as the script it sourced (via -x, via run-test.py) # try: inferior = gdb.selected_inferior() arch = inferior.architecture() report(arch.name() == "aarch64", "connected to aarch64") except (gdb.error, AttributeError): print("SKIPPING (not connected)", file=sys.stderr) exit(0) try: # These are not very useful in scripts gdb.execute("set pagination off") # Run the actual tests run_test() except: print ("GDB Exception: %s" % (sys.exc_info()[0])) failcount += 1 print("All tests complete: %d failures" % failcount) exit(failcount)
2,169
25.790123
68
py
qemu
qemu-master/tests/tcg/s390x/gdbstub/test-signals-s390x.py
from __future__ import print_function # # Test that signals and debugging mix well together on s390x. # # This is launched via tests/guest-debug/run-test.py # import gdb import sys failcount = 0 def report(cond, msg): """Report success/fail of test""" if cond: print("PASS: %s" % (msg)) else: print("FAIL: %s" % (msg)) global failcount failcount += 1 def run_test(): """Run through the tests one by one""" illegal_op = gdb.Breakpoint("illegal_op") stg = gdb.Breakpoint("stg") mvc_8 = gdb.Breakpoint("mvc_8") # Expect the following events: # 1x illegal_op breakpoint # 2x stg breakpoint, segv, breakpoint # 2x mvc_8 breakpoint, segv, breakpoint for _ in range(14): gdb.execute("c") report(illegal_op.hit_count == 1, "illegal_op.hit_count == 1") report(stg.hit_count == 4, "stg.hit_count == 4") report(mvc_8.hit_count == 4, "mvc_8.hit_count == 4") # The test must succeed. gdb.Breakpoint("_exit") gdb.execute("c") status = int(gdb.parse_and_eval("$r2")) report(status == 0, "status == 0"); # # This runs as the script it sourced (via -x, via run-test.py) # try: inferior = gdb.selected_inferior() arch = inferior.architecture() print("ATTACHED: %s" % arch.name()) except (gdb.error, AttributeError): print("SKIPPING (not connected)", file=sys.stderr) exit(0) if gdb.parse_and_eval("$pc") == 0: print("SKIP: PC not set") exit(0) try: # These are not very useful in scripts gdb.execute("set pagination off") gdb.execute("set confirm off") # Run the actual tests run_test() except (gdb.error): print("GDB Exception: %s" % (sys.exc_info()[0])) failcount += 1 pass print("All tests complete: %d failures" % failcount) exit(failcount)
1,822
22.675325
66
py
qemu
qemu-master/tests/docker/docker.py
#!/usr/bin/env python3 # # Docker controlling module # # Copyright (c) 2016 Red Hat Inc. # # Authors: # Fam Zheng <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 # or (at your option) any later version. See the COPYING file in # the top-level directory. import os import sys import subprocess import json import hashlib import atexit import uuid import argparse import enum import tempfile import re import signal import getpass from tarfile import TarFile, TarInfo from io import StringIO, BytesIO from shutil import copy, rmtree from datetime import datetime, timedelta FILTERED_ENV_NAMES = ['ftp_proxy', 'http_proxy', 'https_proxy'] DEVNULL = open(os.devnull, 'wb') class EngineEnum(enum.IntEnum): AUTO = 1 DOCKER = 2 PODMAN = 3 def __str__(self): return self.name.lower() def __repr__(self): return str(self) @staticmethod def argparse(s): try: return EngineEnum[s.upper()] except KeyError: return s USE_ENGINE = EngineEnum.AUTO def _bytes_checksum(bytes): """Calculate a digest string unique to the text content""" return hashlib.sha1(bytes).hexdigest() def _text_checksum(text): """Calculate a digest string unique to the text content""" return _bytes_checksum(text.encode('utf-8')) def _read_dockerfile(path): return open(path, 'rt', encoding='utf-8').read() def _file_checksum(filename): return _bytes_checksum(open(filename, 'rb').read()) def _guess_engine_command(): """ Guess a working engine command or raise exception if not found""" commands = [] if USE_ENGINE in [EngineEnum.AUTO, EngineEnum.PODMAN]: commands += [["podman"]] if USE_ENGINE in [EngineEnum.AUTO, EngineEnum.DOCKER]: commands += [["docker"], ["sudo", "-n", "docker"]] for cmd in commands: try: # docker version will return the client details in stdout # but still report a status of 1 if it can't contact the daemon if subprocess.call(cmd + ["version"], stdout=DEVNULL, stderr=DEVNULL) == 0: return cmd except OSError: pass commands_txt = "\n".join([" " + " ".join(x) for x in commands]) raise Exception("Cannot find working engine command. Tried:\n%s" % commands_txt) def _copy_with_mkdir(src, root_dir, sub_path='.', name=None): """Copy src into root_dir, creating sub_path as needed.""" dest_dir = os.path.normpath("%s/%s" % (root_dir, sub_path)) try: os.makedirs(dest_dir) except OSError: # we can safely ignore already created directories pass dest_file = "%s/%s" % (dest_dir, name if name else os.path.basename(src)) try: copy(src, dest_file) except FileNotFoundError: print("Couldn't copy %s to %s" % (src, dest_file)) pass def _get_so_libs(executable): """Return a list of libraries associated with an executable. The paths may be symbolic links which would need to be resolved to ensure the right data is copied.""" libs = [] ldd_re = re.compile(r"(?:\S+ => )?(\S*) \(:?0x[0-9a-f]+\)") try: ldd_output = subprocess.check_output(["ldd", executable]).decode('utf-8') for line in ldd_output.split("\n"): search = ldd_re.search(line) if search: try: libs.append(search.group(1)) except IndexError: pass except subprocess.CalledProcessError: print("%s had no associated libraries (static build?)" % (executable)) return libs def _copy_binary_with_libs(src, bin_dest, dest_dir): """Maybe copy a binary and all its dependent libraries. If bin_dest isn't set we only copy the support libraries because we don't need qemu in the docker path to run (due to persistent mapping). Indeed users may get confused if we aren't running what is in the image. This does rely on the host file-system being fairly multi-arch aware so the file don't clash with the guests layout. """ if bin_dest: _copy_with_mkdir(src, dest_dir, os.path.dirname(bin_dest)) else: print("only copying support libraries for %s" % (src)) libs = _get_so_libs(src) if libs: for l in libs: so_path = os.path.dirname(l) name = os.path.basename(l) real_l = os.path.realpath(l) _copy_with_mkdir(real_l, dest_dir, so_path, name) def _check_binfmt_misc(executable): """Check binfmt_misc has entry for executable in the right place. The details of setting up binfmt_misc are outside the scope of this script but we should at least fail early with a useful message if it won't work. Returns the configured binfmt path and a valid flag. For persistent configurations we will still want to copy and dependent libraries. """ binary = os.path.basename(executable) binfmt_entry = "/proc/sys/fs/binfmt_misc/%s" % (binary) if not os.path.exists(binfmt_entry): print ("No binfmt_misc entry for %s" % (binary)) return None, False with open(binfmt_entry) as x: entry = x.read() if re.search("flags:.*F.*\n", entry): print("binfmt_misc for %s uses persistent(F) mapping to host binary" % (binary)) return None, True m = re.search("interpreter (\S+)\n", entry) interp = m.group(1) if interp and interp != executable: print("binfmt_misc for %s does not point to %s, using %s" % (binary, executable, interp)) return interp, True def _read_qemu_dockerfile(img_name): # special case for Debian linux-user images if img_name.startswith("debian") and img_name.endswith("user"): img_name = "debian-bootstrap" df = os.path.join(os.path.dirname(__file__), "dockerfiles", img_name + ".docker") return _read_dockerfile(df) def _dockerfile_verify_flat(df): "Verify we do not include other qemu/ layers" for l in df.splitlines(): if len(l.strip()) == 0 or l.startswith("#"): continue from_pref = "FROM qemu/" if l.startswith(from_pref): print("We no longer support multiple QEMU layers.") print("Dockerfiles should be flat, ideally created by lcitool") return False return True class Docker(object): """ Running Docker commands """ def __init__(self): self._command = _guess_engine_command() if ("docker" in self._command and "TRAVIS" not in os.environ and "GITLAB_CI" not in os.environ): os.environ["DOCKER_BUILDKIT"] = "1" self._buildkit = True else: self._buildkit = False self._instance = None atexit.register(self._kill_instances) signal.signal(signal.SIGTERM, self._kill_instances) signal.signal(signal.SIGHUP, self._kill_instances) def _do(self, cmd, quiet=True, **kwargs): if quiet: kwargs["stdout"] = DEVNULL return subprocess.call(self._command + cmd, **kwargs) def _do_check(self, cmd, quiet=True, **kwargs): if quiet: kwargs["stdout"] = DEVNULL return subprocess.check_call(self._command + cmd, **kwargs) def _do_kill_instances(self, only_known, only_active=True): cmd = ["ps", "-q"] if not only_active: cmd.append("-a") filter = "--filter=label=com.qemu.instance.uuid" if only_known: if self._instance: filter += "=%s" % (self._instance) else: # no point trying to kill, we finished return print("filter=%s" % (filter)) cmd.append(filter) for i in self._output(cmd).split(): self._do(["rm", "-f", i]) def clean(self): self._do_kill_instances(False, False) return 0 def _kill_instances(self, *args, **kwargs): return self._do_kill_instances(True) def _output(self, cmd, **kwargs): try: return subprocess.check_output(self._command + cmd, stderr=subprocess.STDOUT, encoding='utf-8', **kwargs) except TypeError: # 'encoding' argument was added in 3.6+ return subprocess.check_output(self._command + cmd, stderr=subprocess.STDOUT, **kwargs).decode('utf-8') def inspect_tag(self, tag): try: return self._output(["inspect", tag]) except subprocess.CalledProcessError: return None def get_image_creation_time(self, info): return json.loads(info)[0]["Created"] def get_image_dockerfile_checksum(self, tag): resp = self.inspect_tag(tag) labels = json.loads(resp)[0]["Config"].get("Labels", {}) return labels.get("com.qemu.dockerfile-checksum", "") def build_image(self, tag, docker_dir, dockerfile, quiet=True, user=False, argv=None, registry=None, extra_files_cksum=[]): if argv is None: argv = [] if not _dockerfile_verify_flat(dockerfile): return -1 checksum = _text_checksum(dockerfile) tmp_df = tempfile.NamedTemporaryFile(mode="w+t", encoding='utf-8', dir=docker_dir, suffix=".docker") tmp_df.write(dockerfile) if user: uid = os.getuid() uname = getpass.getuser() tmp_df.write("\n") tmp_df.write("RUN id %s 2>/dev/null || useradd -u %d -U %s" % (uname, uid, uname)) tmp_df.write("\n") tmp_df.write("LABEL com.qemu.dockerfile-checksum=%s\n" % (checksum)) for f, c in extra_files_cksum: tmp_df.write("LABEL com.qemu.%s-checksum=%s\n" % (f, c)) tmp_df.flush() build_args = ["build", "-t", tag, "-f", tmp_df.name] if self._buildkit: build_args += ["--build-arg", "BUILDKIT_INLINE_CACHE=1"] if registry is not None: pull_args = ["pull", "%s/%s" % (registry, tag)] self._do(pull_args, quiet=quiet) cache = "%s/%s" % (registry, tag) build_args += ["--cache-from", cache] build_args += argv build_args += [docker_dir] self._do_check(build_args, quiet=quiet) def update_image(self, tag, tarball, quiet=True): "Update a tagged image using " self._do_check(["build", "-t", tag, "-"], quiet=quiet, stdin=tarball) def image_matches_dockerfile(self, tag, dockerfile): try: checksum = self.get_image_dockerfile_checksum(tag) except Exception: return False return checksum == _text_checksum(dockerfile) def run(self, cmd, keep, quiet, as_user=False): label = uuid.uuid4().hex if not keep: self._instance = label if as_user: uid = os.getuid() cmd = [ "-u", str(uid) ] + cmd # podman requires a bit more fiddling if self._command[0] == "podman": cmd.insert(0, '--userns=keep-id') ret = self._do_check(["run", "--rm", "--label", "com.qemu.instance.uuid=" + label] + cmd, quiet=quiet) if not keep: self._instance = None return ret def command(self, cmd, argv, quiet): return self._do([cmd] + argv, quiet=quiet) class SubCommand(object): """A SubCommand template base class""" name = None # Subcommand name def shared_args(self, parser): parser.add_argument("--quiet", action="store_true", help="Run quietly unless an error occurred") def args(self, parser): """Setup argument parser""" pass def run(self, args, argv): """Run command. args: parsed argument by argument parser. argv: remaining arguments from sys.argv. """ pass class RunCommand(SubCommand): """Invoke docker run and take care of cleaning up""" name = "run" def args(self, parser): parser.add_argument("--keep", action="store_true", help="Don't remove image when command completes") parser.add_argument("--run-as-current-user", action="store_true", help="Run container using the current user's uid") def run(self, args, argv): return Docker().run(argv, args.keep, quiet=args.quiet, as_user=args.run_as_current_user) class BuildCommand(SubCommand): """ Build docker image out of a dockerfile. Arg: <tag> <dockerfile>""" name = "build" def args(self, parser): parser.add_argument("--include-executable", "-e", help="""Specify a binary that will be copied to the container together with all its dependent libraries""") parser.add_argument("--skip-binfmt", action="store_true", help="""Skip binfmt entry check (used for testing)""") parser.add_argument("--extra-files", nargs='*', help="""Specify files that will be copied in the Docker image, fulfilling the ADD directive from the Dockerfile""") parser.add_argument("--add-current-user", "-u", dest="user", action="store_true", help="Add the current user to image's passwd") parser.add_argument("--registry", "-r", help="cache from docker registry") parser.add_argument("-t", dest="tag", help="Image Tag") parser.add_argument("-f", dest="dockerfile", help="Dockerfile name") def run(self, args, argv): dockerfile = _read_dockerfile(args.dockerfile) tag = args.tag dkr = Docker() if "--no-cache" not in argv and \ dkr.image_matches_dockerfile(tag, dockerfile): if not args.quiet: print("Image is up to date.") else: # Create a docker context directory for the build docker_dir = tempfile.mkdtemp(prefix="docker_build") # Validate binfmt_misc will work if args.skip_binfmt: qpath = args.include_executable elif args.include_executable: qpath, enabled = _check_binfmt_misc(args.include_executable) if not enabled: return 1 # Is there a .pre file to run in the build context? docker_pre = os.path.splitext(args.dockerfile)[0]+".pre" if os.path.exists(docker_pre): stdout = DEVNULL if args.quiet else None rc = subprocess.call(os.path.realpath(docker_pre), cwd=docker_dir, stdout=stdout) if rc == 3: print("Skip") return 0 elif rc != 0: print("%s exited with code %d" % (docker_pre, rc)) return 1 # Copy any extra files into the Docker context. These can be # included by the use of the ADD directive in the Dockerfile. cksum = [] if args.include_executable: # FIXME: there is no checksum of this executable and the linked # libraries, once the image built any change of this executable # or any library won't trigger another build. _copy_binary_with_libs(args.include_executable, qpath, docker_dir) for filename in args.extra_files or []: _copy_with_mkdir(filename, docker_dir) cksum += [(filename, _file_checksum(filename))] argv += ["--build-arg=" + k.lower() + "=" + v for k, v in os.environ.items() if k.lower() in FILTERED_ENV_NAMES] dkr.build_image(tag, docker_dir, dockerfile, quiet=args.quiet, user=args.user, argv=argv, registry=args.registry, extra_files_cksum=cksum) rmtree(docker_dir) return 0 class FetchCommand(SubCommand): """ Fetch a docker image from the registry. Args: <tag> <registry>""" name = "fetch" def args(self, parser): parser.add_argument("tag", help="Local tag for image") parser.add_argument("registry", help="Docker registry") def run(self, args, argv): dkr = Docker() dkr.command(cmd="pull", quiet=args.quiet, argv=["%s/%s" % (args.registry, args.tag)]) dkr.command(cmd="tag", quiet=args.quiet, argv=["%s/%s" % (args.registry, args.tag), args.tag]) class UpdateCommand(SubCommand): """ Update a docker image. Args: <tag> <actions>""" name = "update" def args(self, parser): parser.add_argument("tag", help="Image Tag") parser.add_argument("--executable", help="Executable to copy") parser.add_argument("--add-current-user", "-u", dest="user", action="store_true", help="Add the current user to image's passwd") def run(self, args, argv): # Create a temporary tarball with our whole build context and # dockerfile for the update tmp = tempfile.NamedTemporaryFile(suffix="dckr.tar.gz") tmp_tar = TarFile(fileobj=tmp, mode='w') # Create a Docker buildfile df = StringIO() df.write(u"FROM %s\n" % args.tag) if args.executable: # Add the executable to the tarball, using the current # configured binfmt_misc path. If we don't get a path then we # only need the support libraries copied ff, enabled = _check_binfmt_misc(args.executable) if not enabled: print("binfmt_misc not enabled, update disabled") return 1 if ff: tmp_tar.add(args.executable, arcname=ff) # Add any associated libraries libs = _get_so_libs(args.executable) if libs: for l in libs: so_path = os.path.dirname(l) name = os.path.basename(l) real_l = os.path.realpath(l) try: tmp_tar.add(real_l, arcname="%s/%s" % (so_path, name)) except FileNotFoundError: print("Couldn't add %s/%s to archive" % (so_path, name)) pass df.write(u"ADD . /\n") if args.user: uid = os.getuid() uname = getpass.getuser() df.write("\n") df.write("RUN id %s 2>/dev/null || useradd -u %d -U %s" % (uname, uid, uname)) df_bytes = BytesIO(bytes(df.getvalue(), "UTF-8")) df_tar = TarInfo(name="Dockerfile") df_tar.size = df_bytes.getbuffer().nbytes tmp_tar.addfile(df_tar, fileobj=df_bytes) tmp_tar.close() # reset the file pointers tmp.flush() tmp.seek(0) # Run the build with our tarball context dkr = Docker() dkr.update_image(args.tag, tmp, quiet=args.quiet) return 0 class CleanCommand(SubCommand): """Clean up docker instances""" name = "clean" def run(self, args, argv): Docker().clean() return 0 class ImagesCommand(SubCommand): """Run "docker images" command""" name = "images" def run(self, args, argv): return Docker().command("images", argv, args.quiet) class ProbeCommand(SubCommand): """Probe if we can run docker automatically""" name = "probe" def run(self, args, argv): try: docker = Docker() if docker._command[0] == "docker": print("docker") elif docker._command[0] == "sudo": print("sudo docker") elif docker._command[0] == "podman": print("podman") except Exception: print("no") return class CcCommand(SubCommand): """Compile sources with cc in images""" name = "cc" def args(self, parser): parser.add_argument("--image", "-i", required=True, help="The docker image in which to run cc") parser.add_argument("--cc", default="cc", help="The compiler executable to call") parser.add_argument("--source-path", "-s", nargs="*", dest="paths", help="""Extra paths to (ro) mount into container for reading sources""") def run(self, args, argv): if argv and argv[0] == "--": argv = argv[1:] cwd = os.getcwd() cmd = ["-w", cwd, "-v", "%s:%s:rw" % (cwd, cwd)] if args.paths: for p in args.paths: cmd += ["-v", "%s:%s:ro,z" % (p, p)] cmd += [args.image, args.cc] cmd += argv return Docker().run(cmd, False, quiet=args.quiet, as_user=True) def main(): global USE_ENGINE parser = argparse.ArgumentParser(description="A Docker helper", usage="%s <subcommand> ..." % os.path.basename(sys.argv[0])) parser.add_argument("--engine", type=EngineEnum.argparse, choices=list(EngineEnum), help="specify which container engine to use") subparsers = parser.add_subparsers(title="subcommands", help=None) for cls in SubCommand.__subclasses__(): cmd = cls() subp = subparsers.add_parser(cmd.name, help=cmd.__doc__) cmd.shared_args(subp) cmd.args(subp) subp.set_defaults(cmdobj=cmd) args, argv = parser.parse_known_args() if args.engine: USE_ENGINE = args.engine return args.cmdobj.run(args, argv) if __name__ == "__main__": sys.exit(main())
22,876
32.445906
87
py
qemu
qemu-master/tests/vm/basevm.py
# # VM testing base class # # Copyright 2017-2019 Red Hat Inc. # # Authors: # Fam Zheng <[email protected]> # Gerd Hoffmann <[email protected]> # # This code is licensed under the GPL version 2 or later. See # the COPYING file in the top-level directory. # import os import re import sys import socket import logging import time import datetime import subprocess import hashlib import argparse import atexit import tempfile import shutil import multiprocessing import traceback import shlex from qemu.machine import QEMUMachine from qemu.utils import get_info_usernet_hostfwd_port, kvm_available SSH_KEY_FILE = os.path.join(os.path.dirname(__file__), "..", "keys", "id_rsa") SSH_PUB_KEY_FILE = os.path.join(os.path.dirname(__file__), "..", "keys", "id_rsa.pub") # This is the standard configuration. # Any or all of these can be overridden by # passing in a config argument to the VM constructor. DEFAULT_CONFIG = { 'cpu' : "max", 'machine' : 'pc', 'guest_user' : "qemu", 'guest_pass' : "qemupass", 'root_user' : "root", 'root_pass' : "qemupass", 'ssh_key_file' : SSH_KEY_FILE, 'ssh_pub_key_file': SSH_PUB_KEY_FILE, 'memory' : "4G", 'extra_args' : [], 'qemu_args' : "", 'dns' : "", 'ssh_port' : 0, 'install_cmds' : "", 'boot_dev_type' : "block", 'ssh_timeout' : 1, } BOOT_DEVICE = { 'block' : "-drive file={},if=none,id=drive0,cache=writeback "\ "-device virtio-blk,drive=drive0,bootindex=0", 'scsi' : "-device virtio-scsi-device,id=scsi "\ "-drive file={},format=raw,if=none,id=hd0 "\ "-device scsi-hd,drive=hd0,bootindex=0", } class BaseVM(object): envvars = [ "https_proxy", "http_proxy", "ftp_proxy", "no_proxy", ] # The script to run in the guest that builds QEMU BUILD_SCRIPT = "" # The guest name, to be overridden by subclasses name = "#base" # The guest architecture, to be overridden by subclasses arch = "#arch" # command to halt the guest, can be overridden by subclasses poweroff = "poweroff" # Time to wait for shutdown to finish. shutdown_timeout_default = 30 # enable IPv6 networking ipv6 = True # This is the timeout on the wait for console bytes. socket_timeout = 120 # Scale up some timeouts under TCG. # 4 is arbitrary, but greater than 2, # since we found we need to wait more than twice as long. tcg_timeout_multiplier = 4 def __init__(self, args, config=None): self._guest = None self._genisoimage = args.genisoimage self._build_path = args.build_path self._efi_aarch64 = args.efi_aarch64 self._source_path = args.source_path # Allow input config to override defaults. self._config = DEFAULT_CONFIG.copy() # 1GB per core, minimum of 4. This is only a default. mem = max(4, args.jobs) self._config['memory'] = f"{mem}G" if config != None: self._config.update(config) self.validate_ssh_keys() self._tmpdir = os.path.realpath(tempfile.mkdtemp(prefix="vm-test-", suffix=".tmp", dir=".")) atexit.register(shutil.rmtree, self._tmpdir) # Copy the key files to a temporary directory. # Also chmod the key file to agree with ssh requirements. self._config['ssh_key'] = \ open(self._config['ssh_key_file']).read().rstrip() self._config['ssh_pub_key'] = \ open(self._config['ssh_pub_key_file']).read().rstrip() self._ssh_tmp_key_file = os.path.join(self._tmpdir, "id_rsa") open(self._ssh_tmp_key_file, "w").write(self._config['ssh_key']) subprocess.check_call(["chmod", "600", self._ssh_tmp_key_file]) self._ssh_tmp_pub_key_file = os.path.join(self._tmpdir, "id_rsa.pub") open(self._ssh_tmp_pub_key_file, "w").write(self._config['ssh_pub_key']) self.debug = args.debug self._console_log_path = None if args.log_console: self._console_log_path = \ os.path.join(os.path.expanduser("~/.cache/qemu-vm"), "{}.install.log".format(self.name)) self._stderr = sys.stderr self._devnull = open(os.devnull, "w") if self.debug: self._stdout = sys.stdout else: self._stdout = self._devnull netdev = "user,id=vnet,hostfwd=:127.0.0.1:{}-:22" self._args = [ \ "-nodefaults", "-m", self._config['memory'], "-cpu", self._config['cpu'], "-netdev", netdev.format(self._config['ssh_port']) + (",ipv6=no" if not self.ipv6 else "") + (",dns=" + self._config['dns'] if self._config['dns'] else ""), "-device", "virtio-net-pci,netdev=vnet", "-vnc", "127.0.0.1:0,to=20"] if args.jobs and args.jobs > 1: self._args += ["-smp", "%d" % args.jobs] if kvm_available(self.arch): self._shutdown_timeout = self.shutdown_timeout_default self._args += ["-enable-kvm"] else: logging.info("KVM not available, not using -enable-kvm") self._shutdown_timeout = \ self.shutdown_timeout_default * self.tcg_timeout_multiplier self._data_args = [] if self._config['qemu_args'] != None: qemu_args = self._config['qemu_args'] qemu_args = qemu_args.replace('\n',' ').replace('\r','') # shlex groups quoted arguments together # we need this to keep the quoted args together for when # the QEMU command is issued later. args = shlex.split(qemu_args) self._config['extra_args'] = [] for arg in args: if arg: # Preserve quotes around arguments. # shlex above takes them out, so add them in. if " " in arg: arg = '"{}"'.format(arg) self._config['extra_args'].append(arg) def validate_ssh_keys(self): """Check to see if the ssh key files exist.""" if 'ssh_key_file' not in self._config or\ not os.path.exists(self._config['ssh_key_file']): raise Exception("ssh key file not found.") if 'ssh_pub_key_file' not in self._config or\ not os.path.exists(self._config['ssh_pub_key_file']): raise Exception("ssh pub key file not found.") def wait_boot(self, wait_string=None): """Wait for the standard string we expect on completion of a normal boot. The user can also choose to override with an alternate string to wait for.""" if wait_string is None: if self.login_prompt is None: raise Exception("self.login_prompt not defined") wait_string = self.login_prompt # Intentionally bump up the default timeout under TCG, # since the console wait below takes longer. timeout = self.socket_timeout if not kvm_available(self.arch): timeout *= 8 self.console_init(timeout=timeout) self.console_wait(wait_string) def _download_with_cache(self, url, sha256sum=None, sha512sum=None): def check_sha256sum(fname): if not sha256sum: return True checksum = subprocess.check_output(["sha256sum", fname]).split()[0] return sha256sum == checksum.decode("utf-8") def check_sha512sum(fname): if not sha512sum: return True checksum = subprocess.check_output(["sha512sum", fname]).split()[0] return sha512sum == checksum.decode("utf-8") cache_dir = os.path.expanduser("~/.cache/qemu-vm/download") if not os.path.exists(cache_dir): os.makedirs(cache_dir) fname = os.path.join(cache_dir, hashlib.sha1(url.encode("utf-8")).hexdigest()) if os.path.exists(fname) and check_sha256sum(fname) and check_sha512sum(fname): return fname logging.debug("Downloading %s to %s...", url, fname) subprocess.check_call(["wget", "-c", url, "-O", fname + ".download"], stdout=self._stdout, stderr=self._stderr) os.rename(fname + ".download", fname) return fname def _ssh_do(self, user, cmd, check): ssh_cmd = ["ssh", "-t", "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=" + os.devnull, "-o", "ConnectTimeout={}".format(self._config["ssh_timeout"]), "-p", str(self.ssh_port), "-i", self._ssh_tmp_key_file, "-o", "IdentitiesOnly=yes"] # If not in debug mode, set ssh to quiet mode to # avoid printing the results of commands. if not self.debug: ssh_cmd.append("-q") for var in self.envvars: ssh_cmd += ['-o', "SendEnv=%s" % var ] assert not isinstance(cmd, str) ssh_cmd += ["%[email protected]" % user] + list(cmd) logging.debug("ssh_cmd: %s", " ".join(ssh_cmd)) r = subprocess.call(ssh_cmd) if check and r != 0: raise Exception("SSH command failed: %s" % cmd) return r def ssh(self, *cmd): return self._ssh_do(self._config["guest_user"], cmd, False) def ssh_root(self, *cmd): return self._ssh_do(self._config["root_user"], cmd, False) def ssh_check(self, *cmd): self._ssh_do(self._config["guest_user"], cmd, True) def ssh_root_check(self, *cmd): self._ssh_do(self._config["root_user"], cmd, True) def build_image(self, img): raise NotImplementedError def exec_qemu_img(self, *args): cmd = [os.environ.get("QEMU_IMG", "qemu-img")] cmd.extend(list(args)) subprocess.check_call(cmd) def add_source_dir(self, src_dir): name = "data-" + hashlib.sha1(src_dir.encode("utf-8")).hexdigest()[:5] tarfile = os.path.join(self._tmpdir, name + ".tar") logging.debug("Creating archive %s for src_dir dir: %s", tarfile, src_dir) subprocess.check_call(["./scripts/archive-source.sh", tarfile], cwd=src_dir, stdin=self._devnull, stdout=self._stdout, stderr=self._stderr) self._data_args += ["-drive", "file=%s,if=none,id=%s,cache=writeback,format=raw" % \ (tarfile, name), "-device", "virtio-blk,drive=%s,serial=%s,bootindex=1" % (name, name)] def boot(self, img, extra_args=[]): boot_dev = BOOT_DEVICE[self._config['boot_dev_type']] boot_params = boot_dev.format(img) args = self._args + boot_params.split(' ') args += self._data_args + extra_args + self._config['extra_args'] logging.debug("QEMU args: %s", " ".join(args)) qemu_path = get_qemu_path(self.arch, self._build_path) # Since console_log_path is only set when the user provides the # log_console option, we will set drain_console=True so the # console is always drained. guest = QEMUMachine(binary=qemu_path, args=args, console_log=self._console_log_path, drain_console=True) guest.set_machine(self._config['machine']) guest.set_console() try: guest.launch() except: logging.error("Failed to launch QEMU, command line:") logging.error(" ".join([qemu_path] + args)) logging.error("Log:") logging.error(guest.get_log()) logging.error("QEMU version >= 2.10 is required") raise atexit.register(self.shutdown) self._guest = guest # Init console so we can start consuming the chars. self.console_init() usernet_info = guest.qmp("human-monitor-command", command_line="info usernet").get("return") self.ssh_port = get_info_usernet_hostfwd_port(usernet_info) if not self.ssh_port: raise Exception("Cannot find ssh port from 'info usernet':\n%s" % \ usernet_info) def console_init(self, timeout = None): if timeout == None: timeout = self.socket_timeout vm = self._guest vm.console_socket.settimeout(timeout) self.console_raw_path = os.path.join(vm._temp_dir, vm._name + "-console.raw") self.console_raw_file = open(self.console_raw_path, 'wb') def console_log(self, text): for line in re.split("[\r\n]", text): # filter out terminal escape sequences line = re.sub("\x1b\[[0-9;?]*[a-zA-Z]", "", line) line = re.sub("\x1b\([0-9;?]*[a-zA-Z]", "", line) # replace unprintable chars line = re.sub("\x1b", "<esc>", line) line = re.sub("[\x00-\x1f]", ".", line) line = re.sub("[\x80-\xff]", ".", line) if line == "": continue # log console line sys.stderr.write("con recv: %s\n" % line) def console_wait(self, expect, expectalt = None): vm = self._guest output = "" while True: try: chars = vm.console_socket.recv(1) if self.console_raw_file: self.console_raw_file.write(chars) self.console_raw_file.flush() except socket.timeout: sys.stderr.write("console: *** read timeout ***\n") sys.stderr.write("console: waiting for: '%s'\n" % expect) if not expectalt is None: sys.stderr.write("console: waiting for: '%s' (alt)\n" % expectalt) sys.stderr.write("console: line buffer:\n") sys.stderr.write("\n") self.console_log(output.rstrip()) sys.stderr.write("\n") raise output += chars.decode("latin1") if expect in output: break if not expectalt is None and expectalt in output: break if "\r" in output or "\n" in output: lines = re.split("[\r\n]", output) output = lines.pop() if self.debug: self.console_log("\n".join(lines)) if self.debug: self.console_log(output) if not expectalt is None and expectalt in output: return False return True def console_consume(self): vm = self._guest output = "" vm.console_socket.setblocking(0) while True: try: chars = vm.console_socket.recv(1) except: break output += chars.decode("latin1") if "\r" in output or "\n" in output: lines = re.split("[\r\n]", output) output = lines.pop() if self.debug: self.console_log("\n".join(lines)) if self.debug: self.console_log(output) vm.console_socket.setblocking(1) def console_send(self, command): vm = self._guest if self.debug: logline = re.sub("\n", "<enter>", command) logline = re.sub("[\x00-\x1f]", ".", logline) sys.stderr.write("con send: %s\n" % logline) for char in list(command): vm.console_socket.send(char.encode("utf-8")) time.sleep(0.01) def console_wait_send(self, wait, command): self.console_wait(wait) self.console_send(command) def console_ssh_init(self, prompt, user, pw): sshkey_cmd = "echo '%s' > .ssh/authorized_keys\n" \ % self._config['ssh_pub_key'].rstrip() self.console_wait_send("login:", "%s\n" % user) self.console_wait_send("Password:", "%s\n" % pw) self.console_wait_send(prompt, "mkdir .ssh\n") self.console_wait_send(prompt, sshkey_cmd) self.console_wait_send(prompt, "chmod 755 .ssh\n") self.console_wait_send(prompt, "chmod 644 .ssh/authorized_keys\n") def console_sshd_config(self, prompt): self.console_wait(prompt) self.console_send("echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config\n") for var in self.envvars: self.console_wait(prompt) self.console_send("echo 'AcceptEnv %s' >> /etc/ssh/sshd_config\n" % var) def print_step(self, text): sys.stderr.write("### %s ...\n" % text) def wait_ssh(self, wait_root=False, seconds=300, cmd="exit 0"): # Allow more time for VM to boot under TCG. if not kvm_available(self.arch): seconds *= self.tcg_timeout_multiplier starttime = datetime.datetime.now() endtime = starttime + datetime.timedelta(seconds=seconds) cmd_success = False while datetime.datetime.now() < endtime: if wait_root and self.ssh_root(cmd) == 0: cmd_success = True break elif self.ssh(cmd) == 0: cmd_success = True break seconds = (endtime - datetime.datetime.now()).total_seconds() logging.debug("%ds before timeout", seconds) time.sleep(1) if not cmd_success: raise Exception("Timeout while waiting for guest ssh") def shutdown(self): self._guest.shutdown(timeout=self._shutdown_timeout) def wait(self): self._guest.wait(timeout=self._shutdown_timeout) def graceful_shutdown(self): self.ssh_root(self.poweroff) self._guest.wait(timeout=self._shutdown_timeout) def qmp(self, *args, **kwargs): return self._guest.qmp(*args, **kwargs) def gen_cloud_init_iso(self): cidir = self._tmpdir mdata = open(os.path.join(cidir, "meta-data"), "w") name = self.name.replace(".","-") mdata.writelines(["instance-id: {}-vm-0\n".format(name), "local-hostname: {}-guest\n".format(name)]) mdata.close() udata = open(os.path.join(cidir, "user-data"), "w") print("guest user:pw {}:{}".format(self._config['guest_user'], self._config['guest_pass'])) udata.writelines(["#cloud-config\n", "chpasswd:\n", " list: |\n", " root:%s\n" % self._config['root_pass'], " %s:%s\n" % (self._config['guest_user'], self._config['guest_pass']), " expire: False\n", "users:\n", " - name: %s\n" % self._config['guest_user'], " sudo: ALL=(ALL) NOPASSWD:ALL\n", " ssh-authorized-keys:\n", " - %s\n" % self._config['ssh_pub_key'], " - name: root\n", " ssh-authorized-keys:\n", " - %s\n" % self._config['ssh_pub_key'], "locale: en_US.UTF-8\n"]) proxy = os.environ.get("http_proxy") if not proxy is None: udata.writelines(["apt:\n", " proxy: %s" % proxy]) udata.close() subprocess.check_call([self._genisoimage, "-output", "cloud-init.iso", "-volid", "cidata", "-joliet", "-rock", "user-data", "meta-data"], cwd=cidir, stdin=self._devnull, stdout=self._stdout, stderr=self._stdout) return os.path.join(cidir, "cloud-init.iso") def get_qemu_path(arch, build_path=None): """Fetch the path to the qemu binary.""" # If QEMU environment variable set, it takes precedence if "QEMU" in os.environ: qemu_path = os.environ["QEMU"] elif build_path: qemu_path = os.path.join(build_path, arch + "-softmmu") qemu_path = os.path.join(qemu_path, "qemu-system-" + arch) else: # Default is to use system path for qemu. qemu_path = "qemu-system-" + arch return qemu_path def get_qemu_version(qemu_path): """Get the version number from the current QEMU, and return the major number.""" output = subprocess.check_output([qemu_path, '--version']) version_line = output.decode("utf-8") version_num = re.split(' |\(', version_line)[3].split('.')[0] return int(version_num) def parse_config(config, args): """ Parse yaml config and populate our config structure. The yaml config allows the user to override the defaults for VM parameters. In many cases these defaults can be overridden without rebuilding the VM.""" if args.config: config_file = args.config elif 'QEMU_CONFIG' in os.environ: config_file = os.environ['QEMU_CONFIG'] else: return config if not os.path.exists(config_file): raise Exception("config file {} does not exist".format(config_file)) # We gracefully handle importing the yaml module # since it might not be installed. # If we are here it means the user supplied a .yml file, # so if the yaml module is not installed we will exit with error. try: import yaml except ImportError: print("The python3-yaml package is needed "\ "to support config.yaml files") # Instead of raising an exception we exit to avoid # a raft of messy (expected) errors to stdout. exit(1) with open(config_file) as f: yaml_dict = yaml.safe_load(f) if 'qemu-conf' in yaml_dict: config.update(yaml_dict['qemu-conf']) else: raise Exception("config file {} is not valid"\ " missing qemu-conf".format(config_file)) return config def parse_args(vmcls): def get_default_jobs(): if multiprocessing.cpu_count() > 1: if kvm_available(vmcls.arch): return multiprocessing.cpu_count() // 2 elif os.uname().machine == "x86_64" and \ vmcls.arch in ["aarch64", "x86_64", "i386"]: # MTTCG is available on these arches and we can allow # more cores. but only up to a reasonable limit. User # can always override these limits with --jobs. return min(multiprocessing.cpu_count() // 2, 8) return 1 parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Utility for provisioning VMs and running builds", epilog="""Remaining arguments are passed to the command. Exit codes: 0 = success, 1 = command line error, 2 = environment initialization failed, 3 = test command failed""") parser.add_argument("--debug", "-D", action="store_true", help="enable debug output") parser.add_argument("--image", "-i", default="%s.img" % vmcls.name, help="image file name") parser.add_argument("--force", "-f", action="store_true", help="force build image even if image exists") parser.add_argument("--jobs", type=int, default=get_default_jobs(), help="number of virtual CPUs") parser.add_argument("--verbose", "-V", action="store_true", help="Pass V=1 to builds within the guest") parser.add_argument("--build-image", "-b", action="store_true", help="build image") parser.add_argument("--build-qemu", help="build QEMU from source in guest") parser.add_argument("--build-target", help="QEMU build target", default="check") parser.add_argument("--build-path", default=None, help="Path of build directory, "\ "for using build tree QEMU binary. ") parser.add_argument("--source-path", default=None, help="Path of source directory, "\ "for finding additional files. ") parser.add_argument("--interactive", "-I", action="store_true", help="Interactively run command") parser.add_argument("--snapshot", "-s", action="store_true", help="run tests with a snapshot") parser.add_argument("--genisoimage", default="genisoimage", help="iso imaging tool") parser.add_argument("--config", "-c", default=None, help="Provide config yaml for configuration. "\ "See config_example.yaml for example.") parser.add_argument("--efi-aarch64", default="/usr/share/qemu-efi-aarch64/QEMU_EFI.fd", help="Path to efi image for aarch64 VMs.") parser.add_argument("--log-console", action="store_true", help="Log console to file.") parser.add_argument("commands", nargs="*", help="""Remaining commands after -- are passed to command inside the VM""") return parser.parse_args() def main(vmcls, config=None): try: if config == None: config = DEFAULT_CONFIG args = parse_args(vmcls) if not args.commands and not args.build_qemu and not args.build_image: print("Nothing to do?") return 1 config = parse_config(config, args) logging.basicConfig(level=(logging.DEBUG if args.debug else logging.WARN)) vm = vmcls(args, config=config) if args.build_image: if os.path.exists(args.image) and not args.force: sys.stderr.writelines(["Image file exists: %s\n" % args.image, "Use --force option to overwrite\n"]) return 1 return vm.build_image(args.image) if args.build_qemu: vm.add_source_dir(args.build_qemu) cmd = [vm.BUILD_SCRIPT.format( configure_opts = " ".join(args.commands), jobs=int(args.jobs), target=args.build_target, verbose = "V=1" if args.verbose else "")] else: cmd = args.commands img = args.image if args.snapshot: img += ",snapshot=on" vm.boot(img) vm.wait_ssh() except Exception as e: if isinstance(e, SystemExit) and e.code == 0: return 0 sys.stderr.write("Failed to prepare guest environment\n") traceback.print_exc() return 2 exitcode = 0 if vm.ssh(*cmd) != 0: exitcode = 3 if args.interactive: vm.ssh() if not args.snapshot: vm.graceful_shutdown() return exitcode
27,602
40.137109
87
py
qemu
qemu-master/tests/vm/ubuntuvm.py
#!/usr/bin/env python3 # # Ubuntu VM testing library # # Copyright 2017 Red Hat Inc. # Copyright 2020 Linaro # # Authors: # Robert Foley <[email protected]> # Originally based on ubuntu.i386 Fam Zheng <[email protected]> # # This code is licensed under the GPL version 2 or later. See # the COPYING file in the top-level directory. import os import subprocess import basevm class UbuntuVM(basevm.BaseVM): def __init__(self, args, config=None): self.login_prompt = "ubuntu-{}-guest login:".format(self.arch) basevm.BaseVM.__init__(self, args, config) def build_image(self, img): """Build an Ubuntu VM image. The child class will define the install_cmds to init the VM.""" os_img = self._download_with_cache(self.image_link, sha256sum=self.image_sha256) img_tmp = img + ".tmp" subprocess.check_call(["cp", "-f", os_img, img_tmp]) self.exec_qemu_img("resize", img_tmp, "+50G") ci_img = self.gen_cloud_init_iso() self.boot(img_tmp, extra_args = [ "-device", "VGA", "-cdrom", ci_img, ]) # First command we issue is fix for slow ssh login. self.wait_ssh(wait_root=True, cmd="chmod -x /etc/update-motd.d/*") # Wait for cloud init to finish self.wait_ssh(wait_root=True, cmd="ls /var/lib/cloud/instance/boot-finished") self.ssh_root("touch /etc/cloud/cloud-init.disabled") # Disable auto upgrades. # We want to keep the VM system state stable. self.ssh_root('sed -ie \'s/"1"/"0"/g\' '\ '/etc/apt/apt.conf.d/20auto-upgrades') self.ssh_root("sed -ie s/^#\ deb-src/deb-src/g /etc/apt/sources.list") # If the user chooses not to do the install phase, # then we will jump right to the graceful shutdown if self._config['install_cmds'] != "": # Issue the install commands. # This can be overriden by the user in the config .yml. install_cmds = self._config['install_cmds'].split(',') for cmd in install_cmds: self.ssh_root(cmd) self.graceful_shutdown() os.rename(img_tmp, img) return 0
2,271
36.245902
80
py
qemu
qemu-master/tests/vm/aarch64vm.py
#!/usr/bin/env python3 # # VM testing aarch64 library # # Copyright 2020 Linaro # # Authors: # Robert Foley <[email protected]> # # This code is licensed under the GPL version 2 or later. See # the COPYING file in the top-level directory. # import os import sys import subprocess import basevm from qemu.utils import kvm_available # This is the config needed for current version of QEMU. # This works for both kvm and tcg. CURRENT_CONFIG = { 'cpu' : "max", 'machine' : "virt,gic-version=max", } # The minimum minor version of QEMU we will support with aarch64 VMs is 3. # QEMU versions less than 3 have various issues running these VMs. QEMU_AARCH64_MIN_VERSION = 3 # The DEFAULT_CONFIG will default to a version of # parameters that works for backwards compatibility. DEFAULT_CONFIG = {'kvm' : {'cpu' : "host", 'machine' : "virt,gic-version=host"}, 'tcg' : {'cpu' : "cortex-a57", 'machine' : "virt"}, } def get_config_defaults(vmcls, default_config): """Fetch the configuration defaults for this VM, taking into consideration the defaults for aarch64 first, followed by the defaults for this VM.""" config = default_config config.update(aarch_get_config_defaults(vmcls)) return config def aarch_get_config_defaults(vmcls): """Set the defaults for current version of QEMU.""" config = CURRENT_CONFIG args = basevm.parse_args(vmcls) qemu_path = basevm.get_qemu_path(vmcls.arch, args.build_path) qemu_version = basevm.get_qemu_version(qemu_path) if qemu_version < QEMU_AARCH64_MIN_VERSION: error = "\nThis major version of QEMU {} is to old for aarch64 VMs.\n"\ "The major version must be at least {}.\n"\ "To continue with the current build of QEMU, "\ "please restart with QEMU_LOCAL=1 .\n" print(error.format(qemu_version, QEMU_AARCH64_MIN_VERSION)) exit(1) if qemu_version == QEMU_AARCH64_MIN_VERSION: # We have an older version of QEMU, # set the config values for backwards compatibility. if kvm_available('aarch64'): config.update(DEFAULT_CONFIG['kvm']) else: config.update(DEFAULT_CONFIG['tcg']) return config def create_flash_images(flash_dir="./", efi_img=""): """Creates the appropriate pflash files for an aarch64 VM.""" flash0_path = get_flash_path(flash_dir, "flash0") flash1_path = get_flash_path(flash_dir, "flash1") fd_null = open(os.devnull, 'w') subprocess.check_call(["dd", "if=/dev/zero", "of={}".format(flash0_path), "bs=1M", "count=64"], stdout=fd_null, stderr=subprocess.STDOUT) # A reliable way to get the QEMU EFI image is via an installed package or # via the bios included with qemu. if not os.path.exists(efi_img): sys.stderr.write("*** efi argument is invalid ({})\n".format(efi_img)) sys.stderr.write("*** please check --efi-aarch64 argument or "\ "install qemu-efi-aarch64 package\n") exit(3) subprocess.check_call(["dd", "if={}".format(efi_img), "of={}".format(flash0_path), "conv=notrunc"], stdout=fd_null, stderr=subprocess.STDOUT) subprocess.check_call(["dd", "if=/dev/zero", "of={}".format(flash1_path), "bs=1M", "count=64"], stdout=fd_null, stderr=subprocess.STDOUT) fd_null.close() def get_pflash_args(flash_dir="./"): """Returns a string that can be used to boot qemu using the appropriate pflash files for aarch64.""" flash0_path = get_flash_path(flash_dir, "flash0") flash1_path = get_flash_path(flash_dir, "flash1") pflash_args_str = "-drive file={},format=raw,if=pflash "\ "-drive file={},format=raw,if=pflash" pflash_args = pflash_args_str.format(flash0_path, flash1_path) return pflash_args.split(" ") def get_flash_path(flash_dir, name): return os.path.join(flash_dir, "{}.img".format(name))
4,245
38.682243
79
py
qemu
qemu-master/tests/image-fuzzer/runner.py
#!/usr/bin/env python3 # Tool for running fuzz tests # # Copyright (C) 2014 Maria Kustova <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import sys import os import signal import subprocess import random import shutil from itertools import count import time import getopt import io import resource try: import json except ImportError: try: import simplejson as json except ImportError: print("Warning: Module for JSON processing is not found.\n" \ "'--config' and '--command' options are not supported.", file=sys.stderr) # Backing file sizes in MB MAX_BACKING_FILE_SIZE = 10 MIN_BACKING_FILE_SIZE = 1 def multilog(msg, *output): """ Write an object to all of specified file descriptors.""" for fd in output: fd.write(msg) fd.flush() def str_signal(sig): """ Convert a numeric value of a system signal to the string one defined by the current operational system. """ for k, v in signal.__dict__.items(): if v == sig: return k def run_app(fd, q_args): """Start an application with specified arguments and return its exit code or kill signal depending on the result of execution. """ class Alarm(Exception): """Exception for signal.alarm events.""" pass def handler(*args): """Notify that an alarm event occurred.""" raise Alarm signal.signal(signal.SIGALRM, handler) signal.alarm(600) term_signal = signal.SIGKILL devnull = open('/dev/null', 'r+') process = subprocess.Popen(q_args, stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.PIPE, errors='replace') try: out, err = process.communicate() signal.alarm(0) fd.write(out) fd.write(err) fd.flush() return process.returncode except Alarm: os.kill(process.pid, term_signal) fd.write('The command was terminated by timeout.\n') fd.flush() return -term_signal class TestException(Exception): """Exception for errors risen by TestEnv objects.""" pass class TestEnv(object): """Test object. The class sets up test environment, generates backing and test images and executes application under tests with specified arguments and a test image provided. All logs are collected. The summary log will contain short descriptions and statuses of tests in a run. The test log will include application (e.g. 'qemu-img') logs besides info sent to the summary log. """ def __init__(self, test_id, seed, work_dir, run_log, cleanup=True, log_all=False): """Set test environment in a specified work directory. Path to qemu-img and qemu-io will be retrieved from 'QEMU_IMG' and 'QEMU_IO' environment variables. """ if seed is not None: self.seed = seed else: self.seed = str(random.randint(0, sys.maxsize)) random.seed(self.seed) self.init_path = os.getcwd() self.work_dir = work_dir self.current_dir = os.path.join(work_dir, 'test-' + test_id) self.qemu_img = \ os.environ.get('QEMU_IMG', 'qemu-img').strip().split(' ') self.qemu_io = os.environ.get('QEMU_IO', 'qemu-io').strip().split(' ') self.commands = [['qemu-img', 'check', '-f', 'qcow2', '$test_img'], ['qemu-img', 'info', '-f', 'qcow2', '$test_img'], ['qemu-io', '$test_img', '-c', 'read $off $len'], ['qemu-io', '$test_img', '-c', 'write $off $len'], ['qemu-io', '$test_img', '-c', 'aio_read $off $len'], ['qemu-io', '$test_img', '-c', 'aio_write $off $len'], ['qemu-io', '$test_img', '-c', 'flush'], ['qemu-io', '$test_img', '-c', 'discard $off $len'], ['qemu-io', '$test_img', '-c', 'truncate $off']] for fmt in ['raw', 'vmdk', 'vdi', 'qcow2', 'file', 'qed', 'vpc']: self.commands.append( ['qemu-img', 'convert', '-f', 'qcow2', '-O', fmt, '$test_img', 'converted_image.' + fmt]) try: os.makedirs(self.current_dir) except OSError as e: print("Error: The working directory '%s' cannot be used. Reason: %s"\ % (self.work_dir, e.strerror), file=sys.stderr) raise TestException self.log = open(os.path.join(self.current_dir, "test.log"), "w") self.parent_log = open(run_log, "a") self.failed = False self.cleanup = cleanup self.log_all = log_all def _create_backing_file(self): """Create a backing file in the current directory. Return a tuple of a backing file name and format. Format of a backing file is randomly chosen from all formats supported by 'qemu-img create'. """ # All formats supported by the 'qemu-img create' command. backing_file_fmt = random.choice(['raw', 'vmdk', 'vdi', 'qcow2', 'file', 'qed', 'vpc']) backing_file_name = 'backing_img.' + backing_file_fmt backing_file_size = random.randint(MIN_BACKING_FILE_SIZE, MAX_BACKING_FILE_SIZE) * (1 << 20) cmd = self.qemu_img + ['create', '-f', backing_file_fmt, backing_file_name, str(backing_file_size)] temp_log = io.StringIO() retcode = run_app(temp_log, cmd) if retcode == 0: temp_log.close() return (backing_file_name, backing_file_fmt) else: multilog("Warning: The %s backing file was not created.\n\n" % backing_file_fmt, sys.stderr, self.log, self.parent_log) self.log.write("Log for the failure:\n" + temp_log.getvalue() + '\n\n') temp_log.close() return (None, None) def execute(self, input_commands=None, fuzz_config=None): """ Execute a test. The method creates backing and test images, runs test app and analyzes its exit status. If the application was killed by a signal, the test is marked as failed. """ if input_commands is None: commands = self.commands else: commands = input_commands os.chdir(self.current_dir) backing_file_name, backing_file_fmt = self._create_backing_file() img_size = image_generator.create_image( 'test.img', backing_file_name, backing_file_fmt, fuzz_config) for item in commands: shutil.copy('test.img', 'copy.img') # 'off' and 'len' are multiple of the sector size sector_size = 512 start = random.randrange(0, img_size + 1, sector_size) end = random.randrange(start, img_size + 1, sector_size) if item[0] == 'qemu-img': current_cmd = list(self.qemu_img) elif item[0] == 'qemu-io': current_cmd = list(self.qemu_io) else: multilog("Warning: test command '%s' is not defined.\n" % item[0], sys.stderr, self.log, self.parent_log) continue # Replace all placeholders with their real values for v in item[1:]: c = (v .replace('$test_img', 'copy.img') .replace('$off', str(start)) .replace('$len', str(end - start))) current_cmd.append(c) # Log string with the test header test_summary = "Seed: %s\nCommand: %s\nTest directory: %s\n" \ "Backing file: %s\n" \ % (self.seed, " ".join(current_cmd), self.current_dir, backing_file_name) temp_log = io.StringIO() try: retcode = run_app(temp_log, current_cmd) except OSError as e: multilog("%sError: Start of '%s' failed. Reason: %s\n\n" % (test_summary, os.path.basename(current_cmd[0]), e.strerror), sys.stderr, self.log, self.parent_log) raise TestException if retcode < 0: self.log.write(temp_log.getvalue()) multilog("%sFAIL: Test terminated by signal %s\n\n" % (test_summary, str_signal(-retcode)), sys.stderr, self.log, self.parent_log) self.failed = True else: if self.log_all: self.log.write(temp_log.getvalue()) multilog("%sPASS: Application exited with the code " \ "'%d'\n\n" % (test_summary, retcode), sys.stdout, self.log, self.parent_log) temp_log.close() os.remove('copy.img') def finish(self): """Restore the test environment after a test execution.""" self.log.close() self.parent_log.close() os.chdir(self.init_path) if self.cleanup and not self.failed: shutil.rmtree(self.current_dir) if __name__ == '__main__': def usage(): print(""" Usage: runner.py [OPTION...] TEST_DIR IMG_GENERATOR Set up test environment in TEST_DIR and run a test in it. A module for test image generation should be specified via IMG_GENERATOR. Example: runner.py -c '[["qemu-img", "info", "$test_img"]]' /tmp/test qcow2 Optional arguments: -h, --help display this help and exit -d, --duration=NUMBER finish tests after NUMBER of seconds -c, --command=JSON run tests for all commands specified in the JSON array -s, --seed=STRING seed for a test image generation, by default will be generated randomly --config=JSON take fuzzer configuration from the JSON array -k, --keep_passed don't remove folders of passed tests -v, --verbose log information about passed tests JSON: '--command' accepts a JSON array of commands. Each command presents an application under test with all its parameters as a list of strings, e.g. ["qemu-io", "$test_img", "-c", "write $off $len"]. Supported application aliases: 'qemu-img' and 'qemu-io'. Supported argument aliases: $test_img for the fuzzed image, $off for an offset, $len for length. Values for $off and $len will be generated based on the virtual disk size of the fuzzed image. Paths to 'qemu-img' and 'qemu-io' are retrevied from 'QEMU_IMG' and 'QEMU_IO' environment variables. '--config' accepts a JSON array of fields to be fuzzed, e.g. '[["header"], ["header", "version"]]'. Each of the list elements can consist of a complex image element only as ["header"] or ["feature_name_table"] or an exact field as ["header", "version"]. In the first case random portion of the element fields will be fuzzed, in the second one the specified field will be fuzzed always. If '--config' argument is specified, fields not listed in the configuration array will not be fuzzed. """) def run_test(test_id, seed, work_dir, run_log, cleanup, log_all, command, fuzz_config): """Setup environment for one test and execute this test.""" try: test = TestEnv(test_id, seed, work_dir, run_log, cleanup, log_all) except TestException: sys.exit(1) # Python 2.4 doesn't support 'finally' and 'except' in the same 'try' # block try: try: test.execute(command, fuzz_config) except TestException: sys.exit(1) finally: test.finish() def should_continue(duration, start_time): """Return True if a new test can be started and False otherwise.""" current_time = int(time.time()) return (duration is None) or (current_time - start_time < duration) try: opts, args = getopt.gnu_getopt(sys.argv[1:], 'c:hs:kvd:', ['command=', 'help', 'seed=', 'config=', 'keep_passed', 'verbose', 'duration=']) except getopt.error as e: print("Error: %s\n\nTry 'runner.py --help' for more information" % e, file=sys.stderr) sys.exit(1) command = None cleanup = True log_all = False seed = None config = None duration = None for opt, arg in opts: if opt in ('-h', '--help'): usage() sys.exit() elif opt in ('-c', '--command'): try: command = json.loads(arg) except (TypeError, ValueError, NameError) as e: print("Error: JSON array of test commands cannot be loaded.\n" \ "Reason: %s" % e, file=sys.stderr) sys.exit(1) elif opt in ('-k', '--keep_passed'): cleanup = False elif opt in ('-v', '--verbose'): log_all = True elif opt in ('-s', '--seed'): seed = arg elif opt in ('-d', '--duration'): duration = int(arg) elif opt == '--config': try: config = json.loads(arg) except (TypeError, ValueError, NameError) as e: print("Error: JSON array with the fuzzer configuration cannot" \ " be loaded\nReason: %s" % e, file=sys.stderr) sys.exit(1) if not len(args) == 2: print("Expected two parameters\nTry 'runner.py --help'" \ " for more information.", file=sys.stderr) sys.exit(1) work_dir = os.path.realpath(args[0]) # run_log is created in 'main', because multiple tests are expected to # log in it run_log = os.path.join(work_dir, 'run.log') # Add the path to the image generator module to sys.path sys.path.append(os.path.realpath(os.path.dirname(args[1]))) # Remove a script extension from image generator module if any generator_name = os.path.splitext(os.path.basename(args[1]))[0] try: image_generator = __import__(generator_name) except ImportError as e: print("Error: The image generator '%s' cannot be imported.\n" \ "Reason: %s" % (generator_name, e), file=sys.stderr) sys.exit(1) # Enable core dumps resource.setrlimit(resource.RLIMIT_CORE, (-1, -1)) # If a seed is specified, only one test will be executed. # Otherwise runner will terminate after a keyboard interruption start_time = int(time.time()) test_id = count(1) while should_continue(duration, start_time): try: run_test(str(next(test_id)), seed, work_dir, run_log, cleanup, log_all, command, config) except (KeyboardInterrupt, SystemExit): sys.exit(1) if seed is not None: break
16,343
36.833333
94
py
qemu
qemu-master/tests/image-fuzzer/qcow2/fuzz.py
# Fuzzing functions for qcow2 fields # # Copyright (C) 2014 Maria Kustova <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import random from functools import reduce UINT8 = 0xff UINT16 = 0xffff UINT32 = 0xffffffff UINT64 = 0xffffffffffffffff # Most significant bit orders UINT32_M = 31 UINT64_M = 63 # Fuzz vectors UINT8_V = [0, 0x10, UINT8//4, UINT8//2 - 1, UINT8//2, UINT8//2 + 1, UINT8 - 1, UINT8] UINT16_V = [0, 0x100, 0x1000, UINT16//4, UINT16//2 - 1, UINT16//2, UINT16//2 + 1, UINT16 - 1, UINT16] UINT32_V = [0, 0x100, 0x1000, 0x10000, 0x100000, UINT32//4, UINT32//2 - 1, UINT32//2, UINT32//2 + 1, UINT32 - 1, UINT32] UINT64_V = UINT32_V + [0x1000000, 0x10000000, 0x100000000, UINT64//4, UINT64//2 - 1, UINT64//2, UINT64//2 + 1, UINT64 - 1, UINT64] BYTES_V = [b'%s%p%x%d', b'.1024d', b'%.2049d', b'%p%p%p%p', b'%x%x%x%x', b'%d%d%d%d', b'%s%s%s%s', b'%99999999999s', b'%08x', b'%%20d', b'%%20n', b'%%20x', b'%%20s', b'%s%s%s%s%s%s%s%s%s%s', b'%p%p%p%p%p%p%p%p%p%p', b'%#0123456x%08x%x%s%p%d%n%o%u%c%h%l%q%j%z%Z%t%i%e%g%f%a%C%S%08x%%', b'%s x 129', b'%x x 257'] def random_from_intervals(intervals): """Select a random integer number from the list of specified intervals. Each interval is a tuple of lower and upper limits of the interval. The limits are included. Intervals in a list should not overlap. """ total = reduce(lambda x, y: x + y[1] - y[0] + 1, intervals, 0) r = random.randint(0, total - 1) + intervals[0][0] for x in zip(intervals, intervals[1:]): r = r + (r > x[0][1]) * (x[1][0] - x[0][1] - 1) return r def random_bits(bit_ranges): """Generate random binary mask with ones in the specified bit ranges. Each bit_ranges is a list of tuples of lower and upper limits of bit positions will be fuzzed. The limits are included. Random amount of bits in range limits will be set to ones. The mask is returned in decimal integer format. """ bit_numbers = [] # Select random amount of random positions in bit_ranges for rng in bit_ranges: bit_numbers += random.sample(range(rng[0], rng[1] + 1), random.randint(0, rng[1] - rng[0] + 1)) val = 0 # Set bits on selected positions to ones for bit in bit_numbers: val |= 1 << bit return val def truncate_bytes(sequences, length): """Return sequences truncated to specified length.""" if type(sequences) == list: return [s[:length] for s in sequences] else: return sequences[:length] def validator(current, pick, choices): """Return a value not equal to the current selected by the pick function from choices. """ while True: val = pick(choices) if not val == current: return val def int_validator(current, intervals): """Return a random value from intervals not equal to the current. This function is useful for selection from valid values except current one. """ return validator(current, random_from_intervals, intervals) def bit_validator(current, bit_ranges): """Return a random bit mask not equal to the current. This function is useful for selection from valid values except current one. """ return validator(current, random_bits, bit_ranges) def bytes_validator(current, sequences): """Return a random bytes value from the list not equal to the current. This function is useful for selection from valid values except current one. """ return validator(current, random.choice, sequences) def selector(current, constraints, validate=int_validator): """Select one value from all defined by constraints. Each constraint produces one random value satisfying to it. The function randomly selects one value satisfying at least one constraint (depending on constraints overlaps). """ def iter_validate(c): """Apply validate() only to constraints represented as lists. This auxiliary function replaces short circuit conditions not supported in Python 2.4 """ if type(c) == list: return validate(current, c) else: return c fuzz_values = [iter_validate(c) for c in constraints] # Remove current for cases it's implicitly specified in constraints # Duplicate validator functionality to prevent decreasing of probability # to get one of allowable values # TODO: remove validators after implementation of intelligent selection # of fields will be fuzzed try: fuzz_values.remove(current) except ValueError: pass return random.choice(fuzz_values) def magic(current): """Fuzz magic header field. The function just returns the current magic value and provides uniformity of calls for all fuzzing functions. """ return current def version(current): """Fuzz version header field.""" constraints = UINT32_V + [ [(2, 3)], # correct values [(0, 1), (4, UINT32)] ] return selector(current, constraints) def backing_file_offset(current): """Fuzz backing file offset header field.""" constraints = UINT64_V return selector(current, constraints) def backing_file_size(current): """Fuzz backing file size header field.""" constraints = UINT32_V return selector(current, constraints) def cluster_bits(current): """Fuzz cluster bits header field.""" constraints = UINT32_V + [ [(9, 20)], # correct values [(0, 9), (20, UINT32)] ] return selector(current, constraints) def size(current): """Fuzz image size header field.""" constraints = UINT64_V return selector(current, constraints) def crypt_method(current): """Fuzz crypt method header field.""" constraints = UINT32_V + [ 1, [(2, UINT32)] ] return selector(current, constraints) def l1_size(current): """Fuzz L1 table size header field.""" constraints = UINT32_V return selector(current, constraints) def l1_table_offset(current): """Fuzz L1 table offset header field.""" constraints = UINT64_V return selector(current, constraints) def refcount_table_offset(current): """Fuzz refcount table offset header field.""" constraints = UINT64_V return selector(current, constraints) def refcount_table_clusters(current): """Fuzz refcount table clusters header field.""" constraints = UINT32_V return selector(current, constraints) def nb_snapshots(current): """Fuzz number of snapshots header field.""" constraints = UINT32_V return selector(current, constraints) def snapshots_offset(current): """Fuzz snapshots offset header field.""" constraints = UINT64_V return selector(current, constraints) def incompatible_features(current): """Fuzz incompatible features header field.""" constraints = [ [(0, 1)], # allowable values [(0, UINT64_M)] ] return selector(current, constraints, bit_validator) def compatible_features(current): """Fuzz compatible features header field.""" constraints = [ [(0, UINT64_M)] ] return selector(current, constraints, bit_validator) def autoclear_features(current): """Fuzz autoclear features header field.""" constraints = [ [(0, UINT64_M)] ] return selector(current, constraints, bit_validator) def refcount_order(current): """Fuzz number of refcount order header field.""" constraints = UINT32_V return selector(current, constraints) def header_length(current): """Fuzz number of refcount order header field.""" constraints = UINT32_V + [ 72, 104, [(0, UINT32)] ] return selector(current, constraints) def bf_name(current): """Fuzz the backing file name.""" constraints = [ truncate_bytes(BYTES_V, len(current)) ] return selector(current, constraints, bytes_validator) def ext_magic(current): """Fuzz magic field of a header extension.""" constraints = UINT32_V return selector(current, constraints) def ext_length(current): """Fuzz length field of a header extension.""" constraints = UINT32_V return selector(current, constraints) def bf_format(current): """Fuzz backing file format in the corresponding header extension.""" constraints = [ truncate_bytes(BYTES_V, len(current)), truncate_bytes(BYTES_V, (len(current) + 7) & ~7) # Fuzz padding ] return selector(current, constraints, bytes_validator) def feature_type(current): """Fuzz feature type field of a feature name table header extension.""" constraints = UINT8_V return selector(current, constraints) def feature_bit_number(current): """Fuzz bit number field of a feature name table header extension.""" constraints = UINT8_V return selector(current, constraints) def feature_name(current): """Fuzz feature name field of a feature name table header extension.""" constraints = [ truncate_bytes(BYTES_V, len(current)), truncate_bytes(BYTES_V, 46) # Fuzz padding (field length = 46) ] return selector(current, constraints, bytes_validator) def l1_entry(current): """Fuzz an entry of the L1 table.""" constraints = UINT64_V # Reserved bits are ignored # Added a possibility when only flags are fuzzed offset = 0x7fffffffffffffff & \ random.choice([selector(current, constraints), current]) is_cow = random.randint(0, 1) return offset + (is_cow << UINT64_M) def l2_entry(current): """Fuzz an entry of an L2 table.""" constraints = UINT64_V # Reserved bits are ignored # Add a possibility when only flags are fuzzed offset = 0x3ffffffffffffffe & \ random.choice([selector(current, constraints), current]) is_compressed = random.randint(0, 1) is_cow = random.randint(0, 1) is_zero = random.randint(0, 1) value = offset + (is_cow << UINT64_M) + \ (is_compressed << UINT64_M - 1) + is_zero return value def refcount_table_entry(current): """Fuzz an entry of the refcount table.""" constraints = UINT64_V return selector(current, constraints) def refcount_block_entry(current): """Fuzz an entry of a refcount block.""" constraints = UINT16_V return selector(current, constraints)
11,104
29.094851
83
py
qemu
qemu-master/tests/image-fuzzer/qcow2/layout.py
# Generator of fuzzed qcow2 images # # Copyright (C) 2014 Maria Kustova <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import random import struct from . import fuzz from math import ceil from os import urandom from itertools import chain MAX_IMAGE_SIZE = 10 * (1 << 20) # Standard sizes UINT32_S = 4 UINT64_S = 8 class Field(object): """Atomic image element (field). The class represents an image field as quadruple of a data format of value necessary for its packing to binary form, an offset from the beginning of the image, a value and a name. The field can be iterated as a list [format, offset, value, name]. """ __slots__ = ('fmt', 'offset', 'value', 'name') def __init__(self, fmt, offset, val, name): self.fmt = fmt self.offset = offset self.value = val self.name = name def __iter__(self): return iter([self.fmt, self.offset, self.value, self.name]) def __repr__(self): return "Field(fmt=%r, offset=%r, value=%r, name=%r)" % \ (self.fmt, self.offset, self.value, self.name) class FieldsList(object): """List of fields. The class allows access to a field in the list by its name. """ def __init__(self, meta_data=None): if meta_data is None: self.data = [] else: self.data = [Field(*f) for f in meta_data] def __getitem__(self, name): return [x for x in self.data if x.name == name] def __iter__(self): return iter(self.data) def __len__(self): return len(self.data) class Image(object): """ Qcow2 image object. This class allows to create qcow2 images with random valid structures and values, fuzz them via external qcow2.fuzz module and write the result to a file. """ def __init__(self, backing_file_name=None): """Create a random valid qcow2 image with the correct header and stored backing file name. """ cluster_bits, self.image_size = self._size_params() self.cluster_size = 1 << cluster_bits self.header = FieldsList() self.backing_file_name = FieldsList() self.backing_file_format = FieldsList() self.feature_name_table = FieldsList() self.end_of_extension_area = FieldsList() self.l2_tables = FieldsList() self.l1_table = FieldsList() self.refcount_table = FieldsList() self.refcount_blocks = FieldsList() self.ext_offset = 0 self.create_header(cluster_bits, backing_file_name) self.set_backing_file_name(backing_file_name) self.data_clusters = self._alloc_data(self.image_size, self.cluster_size) # Percentage of fields will be fuzzed self.bias = random.uniform(0.2, 0.5) def __iter__(self): return chain(self.header, self.backing_file_format, self.feature_name_table, self.end_of_extension_area, self.backing_file_name, self.l1_table, self.l2_tables, self.refcount_table, self.refcount_blocks) def create_header(self, cluster_bits, backing_file_name=None): """Generate a random valid header.""" meta_header = [ ['>4s', 0, b"QFI\xfb", 'magic'], ['>I', 4, random.randint(2, 3), 'version'], ['>Q', 8, 0, 'backing_file_offset'], ['>I', 16, 0, 'backing_file_size'], ['>I', 20, cluster_bits, 'cluster_bits'], ['>Q', 24, self.image_size, 'size'], ['>I', 32, 0, 'crypt_method'], ['>I', 36, 0, 'l1_size'], ['>Q', 40, 0, 'l1_table_offset'], ['>Q', 48, 0, 'refcount_table_offset'], ['>I', 56, 0, 'refcount_table_clusters'], ['>I', 60, 0, 'nb_snapshots'], ['>Q', 64, 0, 'snapshots_offset'], ['>Q', 72, 0, 'incompatible_features'], ['>Q', 80, 0, 'compatible_features'], ['>Q', 88, 0, 'autoclear_features'], # Only refcount_order = 4 is supported by current (07.2014) # implementation of QEMU ['>I', 96, 4, 'refcount_order'], ['>I', 100, 0, 'header_length'] ] self.header = FieldsList(meta_header) if self.header['version'][0].value == 2: self.header['header_length'][0].value = 72 else: self.header['incompatible_features'][0].value = \ random.getrandbits(2) self.header['compatible_features'][0].value = random.getrandbits(1) self.header['header_length'][0].value = 104 # Extensions start at the header last field offset and the field size self.ext_offset = struct.calcsize( self.header['header_length'][0].fmt) + \ self.header['header_length'][0].offset end_of_extension_area_len = 2 * UINT32_S free_space = self.cluster_size - self.ext_offset - \ end_of_extension_area_len # If the backing file name specified and there is enough space for it # in the first cluster, then it's placed in the very end of the first # cluster. if (backing_file_name is not None) and \ (free_space >= len(backing_file_name)): self.header['backing_file_size'][0].value = len(backing_file_name) self.header['backing_file_offset'][0].value = \ self.cluster_size - len(backing_file_name) def set_backing_file_name(self, backing_file_name=None): """Add the name of the backing file at the offset specified in the header. """ if (backing_file_name is not None) and \ (not self.header['backing_file_offset'][0].value == 0): data_len = len(backing_file_name) data_fmt = '>' + str(data_len) + 's' self.backing_file_name = FieldsList([ [data_fmt, self.header['backing_file_offset'][0].value, backing_file_name, 'bf_name'] ]) def set_backing_file_format(self, backing_file_fmt=None): """Generate the header extension for the backing file format.""" if backing_file_fmt is not None: # Calculation of the free space available in the first cluster end_of_extension_area_len = 2 * UINT32_S high_border = (self.header['backing_file_offset'][0].value or (self.cluster_size - 1)) - \ end_of_extension_area_len free_space = high_border - self.ext_offset ext_size = 2 * UINT32_S + ((len(backing_file_fmt) + 7) & ~7) if free_space >= ext_size: ext_data_len = len(backing_file_fmt) ext_data_fmt = '>' + str(ext_data_len) + 's' ext_padding_len = 7 - (ext_data_len - 1) % 8 self.backing_file_format = FieldsList([ ['>I', self.ext_offset, 0xE2792ACA, 'ext_magic'], ['>I', self.ext_offset + UINT32_S, ext_data_len, 'ext_length'], [ext_data_fmt, self.ext_offset + UINT32_S * 2, backing_file_fmt, 'bf_format'] ]) self.ext_offset = \ struct.calcsize( self.backing_file_format['bf_format'][0].fmt) + \ ext_padding_len + \ self.backing_file_format['bf_format'][0].offset def create_feature_name_table(self): """Generate a random header extension for names of features used in the image. """ def gen_feat_ids(): """Return random feature type and feature bit.""" return (random.randint(0, 2), random.randint(0, 63)) end_of_extension_area_len = 2 * UINT32_S high_border = (self.header['backing_file_offset'][0].value or (self.cluster_size - 1)) - \ end_of_extension_area_len free_space = high_border - self.ext_offset # Sum of sizes of 'magic' and 'length' header extension fields ext_header_len = 2 * UINT32_S fnt_entry_size = 6 * UINT64_S num_fnt_entries = min(10, (free_space - ext_header_len) / fnt_entry_size) if not num_fnt_entries == 0: feature_tables = [] feature_ids = [] inner_offset = self.ext_offset + ext_header_len feat_name = b'some cool feature' while len(feature_tables) < num_fnt_entries * 3: feat_type, feat_bit = gen_feat_ids() # Remove duplicates while (feat_type, feat_bit) in feature_ids: feat_type, feat_bit = gen_feat_ids() feature_ids.append((feat_type, feat_bit)) feat_fmt = '>' + str(len(feat_name)) + 's' feature_tables += [['B', inner_offset, feat_type, 'feature_type'], ['B', inner_offset + 1, feat_bit, 'feature_bit_number'], [feat_fmt, inner_offset + 2, feat_name, 'feature_name'] ] inner_offset += fnt_entry_size # No padding for the extension is necessary, because # the extension length is multiple of 8 self.feature_name_table = FieldsList([ ['>I', self.ext_offset, 0x6803f857, 'ext_magic'], # One feature table contains 3 fields and takes 48 bytes ['>I', self.ext_offset + UINT32_S, len(feature_tables) // 3 * 48, 'ext_length'] ] + feature_tables) self.ext_offset = inner_offset def set_end_of_extension_area(self): """Generate a mandatory header extension marking end of header extensions. """ self.end_of_extension_area = FieldsList([ ['>I', self.ext_offset, 0, 'ext_magic'], ['>I', self.ext_offset + UINT32_S, 0, 'ext_length'] ]) def create_l_structures(self): """Generate random valid L1 and L2 tables.""" def create_l2_entry(host, guest, l2_cluster): """Generate one L2 entry.""" offset = l2_cluster * self.cluster_size l2_size = self.cluster_size // UINT64_S entry_offset = offset + UINT64_S * (guest % l2_size) cluster_descriptor = host * self.cluster_size if not self.header['version'][0].value == 2: cluster_descriptor += random.randint(0, 1) # While snapshots are not supported, bit #63 = 1 # Compressed clusters are not supported => bit #62 = 0 entry_val = (1 << 63) + cluster_descriptor return ['>Q', entry_offset, entry_val, 'l2_entry'] def create_l1_entry(l2_cluster, l1_offset, guest): """Generate one L1 entry.""" l2_size = self.cluster_size // UINT64_S entry_offset = l1_offset + UINT64_S * (guest // l2_size) # While snapshots are not supported bit #63 = 1 entry_val = (1 << 63) + l2_cluster * self.cluster_size return ['>Q', entry_offset, entry_val, 'l1_entry'] if len(self.data_clusters) == 0: # All metadata for an empty guest image needs 4 clusters: # header, rfc table, rfc block, L1 table. # Header takes cluster #0, other clusters ##1-3 can be used l1_offset = random.randint(1, 3) * self.cluster_size l1 = [['>Q', l1_offset, 0, 'l1_entry']] l2 = [] else: meta_data = self._get_metadata() guest_clusters = random.sample(range(self.image_size // self.cluster_size), len(self.data_clusters)) # Number of entries in a L1/L2 table l_size = self.cluster_size // UINT64_S # Number of clusters necessary for L1 table l1_size = int(ceil((max(guest_clusters) + 1) / float(l_size**2))) l1_start = self._get_adjacent_clusters(self.data_clusters | meta_data, l1_size) meta_data |= set(range(l1_start, l1_start + l1_size)) l1_offset = l1_start * self.cluster_size # Indices of L2 tables l2_ids = [] # Host clusters allocated for L2 tables l2_clusters = [] # L1 entries l1 = [] # L2 entries l2 = [] for host, guest in zip(self.data_clusters, guest_clusters): l2_id = guest // l_size if l2_id not in l2_ids: l2_ids.append(l2_id) l2_clusters.append(self._get_adjacent_clusters( self.data_clusters | meta_data | set(l2_clusters), 1)) l1.append(create_l1_entry(l2_clusters[-1], l1_offset, guest)) l2.append(create_l2_entry(host, guest, l2_clusters[l2_ids.index(l2_id)])) self.l2_tables = FieldsList(l2) self.l1_table = FieldsList(l1) self.header['l1_size'][0].value = int(ceil(UINT64_S * self.image_size / float(self.cluster_size**2))) self.header['l1_table_offset'][0].value = l1_offset def create_refcount_structures(self): """Generate random refcount blocks and refcount table.""" def allocate_rfc_blocks(data, size): """Return indices of clusters allocated for refcount blocks.""" cluster_ids = set() diff = block_ids = set([x // size for x in data]) while len(diff) != 0: # Allocate all yet not allocated clusters new = self._get_available_clusters(data | cluster_ids, len(diff)) # Indices of new refcount blocks necessary to cover clusters # in 'new' diff = set([x // size for x in new]) - block_ids cluster_ids |= new block_ids |= diff return cluster_ids, block_ids def allocate_rfc_table(data, init_blocks, block_size): """Return indices of clusters allocated for the refcount table and updated indices of clusters allocated for blocks and indices of blocks. """ blocks = set(init_blocks) clusters = set() # Number of entries in one cluster of the refcount table size = self.cluster_size // UINT64_S # Number of clusters necessary for the refcount table based on # the current number of refcount blocks table_size = int(ceil((max(blocks) + 1) / float(size))) # Index of the first cluster of the refcount table table_start = self._get_adjacent_clusters(data, table_size + 1) # Clusters allocated for the current length of the refcount table table_clusters = set(range(table_start, table_start + table_size)) # Clusters allocated for the refcount table including # last optional one for potential l1 growth table_clusters_allocated = set(range(table_start, table_start + table_size + 1)) # New refcount blocks necessary for clusters occupied by the # refcount table diff = set([c // block_size for c in table_clusters]) - blocks blocks |= diff while len(diff) != 0: # Allocate clusters for new refcount blocks new = self._get_available_clusters((data | clusters) | table_clusters_allocated, len(diff)) # Indices of new refcount blocks necessary to cover # clusters in 'new' diff = set([x // block_size for x in new]) - blocks clusters |= new blocks |= diff # Check if the refcount table needs one more cluster if int(ceil((max(blocks) + 1) / float(size))) > table_size: new_block_id = (table_start + table_size) // block_size # Check if the additional table cluster needs # one more refcount block if new_block_id not in blocks: diff.add(new_block_id) table_clusters.add(table_start + table_size) table_size += 1 return table_clusters, blocks, clusters def create_table_entry(table_offset, block_cluster, block_size, cluster): """Generate a refcount table entry.""" offset = table_offset + UINT64_S * (cluster // block_size) return ['>Q', offset, block_cluster * self.cluster_size, 'refcount_table_entry'] def create_block_entry(block_cluster, block_size, cluster): """Generate a list of entries for the current block.""" entry_size = self.cluster_size // block_size offset = block_cluster * self.cluster_size entry_offset = offset + entry_size * (cluster % block_size) # While snapshots are not supported all refcounts are set to 1 return ['>H', entry_offset, 1, 'refcount_block_entry'] # Size of a block entry in bits refcount_bits = 1 << self.header['refcount_order'][0].value # Number of refcount entries per refcount block # Convert self.cluster_size from bytes to bits to have the same # base for the numerator and denominator block_size = self.cluster_size * 8 // refcount_bits meta_data = self._get_metadata() if len(self.data_clusters) == 0: # All metadata for an empty guest image needs 4 clusters: # header, rfc table, rfc block, L1 table. # Header takes cluster #0, other clusters ##1-3 can be used block_clusters = set([random.choice(list(set(range(1, 4)) - meta_data))]) block_ids = set([0]) table_clusters = set([random.choice(list(set(range(1, 4)) - meta_data - block_clusters))]) else: block_clusters, block_ids = \ allocate_rfc_blocks(self.data_clusters | meta_data, block_size) table_clusters, block_ids, new_clusters = \ allocate_rfc_table(self.data_clusters | meta_data | block_clusters, block_ids, block_size) block_clusters |= new_clusters meta_data |= block_clusters | table_clusters table_offset = min(table_clusters) * self.cluster_size block_id = None # Clusters allocated for refcount blocks block_clusters = list(block_clusters) # Indices of refcount blocks block_ids = list(block_ids) # Refcount table entries rfc_table = [] # Refcount entries rfc_blocks = [] for cluster in sorted(self.data_clusters | meta_data): if cluster // block_size != block_id: block_id = cluster // block_size block_cluster = block_clusters[block_ids.index(block_id)] rfc_table.append(create_table_entry(table_offset, block_cluster, block_size, cluster)) rfc_blocks.append(create_block_entry(block_cluster, block_size, cluster)) self.refcount_table = FieldsList(rfc_table) self.refcount_blocks = FieldsList(rfc_blocks) self.header['refcount_table_offset'][0].value = table_offset self.header['refcount_table_clusters'][0].value = len(table_clusters) def fuzz(self, fields_to_fuzz=None): """Fuzz an image by corrupting values of a random subset of its fields. Without parameters the method fuzzes an entire image. If 'fields_to_fuzz' is specified then only fields in this list will be fuzzed. 'fields_to_fuzz' can contain both individual fields and more general image elements as a header or tables. In the first case the field will be fuzzed always. In the second a random subset of fields will be selected and fuzzed. """ def coin(): """Return boolean value proportional to a portion of fields to be fuzzed. """ return random.random() < self.bias if fields_to_fuzz is None: for field in self: if coin(): field.value = getattr(fuzz, field.name)(field.value) else: for item in fields_to_fuzz: if len(item) == 1: for field in getattr(self, item[0]): if coin(): field.value = getattr(fuzz, field.name)(field.value) else: # If fields with the requested name were not generated # getattr(self, item[0])[item[1]] returns an empty list for field in getattr(self, item[0])[item[1]]: field.value = getattr(fuzz, field.name)(field.value) def write(self, filename): """Write an entire image to the file.""" image_file = open(filename, 'wb') for field in self: image_file.seek(field.offset) image_file.write(struct.pack(field.fmt, field.value)) for cluster in sorted(self.data_clusters): image_file.seek(cluster * self.cluster_size) image_file.write(urandom(self.cluster_size)) # Align the real image size to the cluster size image_file.seek(0, 2) size = image_file.tell() rounded = (size + self.cluster_size - 1) & ~(self.cluster_size - 1) if rounded > size: image_file.seek(rounded - 1) image_file.write(b'\x00') image_file.close() @staticmethod def _size_params(): """Generate a random image size aligned to a random correct cluster size. """ cluster_bits = random.randrange(9, 21) cluster_size = 1 << cluster_bits img_size = random.randrange(0, MAX_IMAGE_SIZE + 1, cluster_size) return (cluster_bits, img_size) @staticmethod def _get_available_clusters(used, number): """Return a set of indices of not allocated clusters. 'used' contains indices of currently allocated clusters. All clusters that cannot be allocated between 'used' clusters will have indices appended to the end of 'used'. """ append_id = max(used) + 1 free = set(range(1, append_id)) - used if len(free) >= number: return set(random.sample(free, number)) else: return free | set(range(append_id, append_id + number - len(free))) @staticmethod def _get_adjacent_clusters(used, size): """Return an index of the first cluster in the sequence of free ones. 'used' contains indices of currently allocated clusters. 'size' is the length of the sequence of free clusters. If the sequence of 'size' is not available between 'used' clusters, its first index will be append to the end of 'used'. """ def get_cluster_id(lst, length): """Return the first index of the sequence of the specified length or None if the sequence cannot be inserted in the list. """ if len(lst) != 0: pairs = [] pair = (lst[0], 1) for i in range(1, len(lst)): if lst[i] == lst[i-1] + 1: pair = (lst[i], pair[1] + 1) else: pairs.append(pair) pair = (lst[i], 1) pairs.append(pair) random.shuffle(pairs) for x, s in pairs: if s >= length: return x - length + 1 return None append_id = max(used) + 1 free = list(set(range(1, append_id)) - used) idx = get_cluster_id(free, size) if idx is None: return append_id else: return idx @staticmethod def _alloc_data(img_size, cluster_size): """Return a set of random indices of clusters allocated for guest data. """ num_of_cls = img_size // cluster_size return set(random.sample(range(1, num_of_cls + 1), random.randint(0, num_of_cls))) def _get_metadata(self): """Return indices of clusters allocated for image metadata.""" ids = set() for x in self: ids.add(x.offset // self.cluster_size) return ids def create_image(test_img_path, backing_file_name=None, backing_file_fmt=None, fields_to_fuzz=None): """Create a fuzzed image and write it to the specified file.""" image = Image(backing_file_name.encode()) image.set_backing_file_format(backing_file_fmt.encode()) image.create_feature_name_table() image.set_end_of_extension_area() image.create_l_structures() image.create_refcount_structures() image.fuzz(fields_to_fuzz) image.write(test_img_path) return image.image_size
27,123
43.247961
79
py
qemu
qemu-master/tests/image-fuzzer/qcow2/__init__.py
from .layout import create_image
33
16
32
py
qemu
qemu-master/tests/avocado/ppc_pseries.py
# Test that Linux kernel boots on ppc machines and check the console # # Copyright (c) 2018, 2020 Red Hat, Inc. # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. from avocado.utils import archive from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern class pseriesMachine(QemuSystemTest): timeout = 90 KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' panic_message = 'Kernel panic - not syncing' def test_ppc64_pseries(self): """ :avocado: tags=arch:ppc64 :avocado: tags=machine:pseries """ kernel_url = ('https://archives.fedoraproject.org/pub/archive' '/fedora-secondary/releases/29/Everything/ppc64le/os' '/ppc/ppc64/vmlinuz') kernel_hash = '3fe04abfc852b66653b8c3c897a59a689270bc77' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) self.vm.set_console() kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=hvc0' self.vm.add_args('-kernel', kernel_path, '-append', kernel_command_line) self.vm.launch() console_pattern = 'Kernel command line: %s' % kernel_command_line wait_for_console_pattern(self, console_pattern, self.panic_message)
1,377
37.277778
78
py
qemu
qemu-master/tests/avocado/x86_cpu_model_versions.py
# # Basic validation of x86 versioned CPU models and CPU model aliases # # Copyright (c) 2019 Red Hat Inc # # Author: # Eduardo Habkost <[email protected]> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, see <http://www.gnu.org/licenses/>. # import avocado_qemu import re class X86CPUModelAliases(avocado_qemu.QemuSystemTest): """ Validation of PC CPU model versions and CPU model aliases :avocado: tags=arch:x86_64 """ def validate_aliases(self, cpus): for c in cpus.values(): if 'alias-of' in c: # all aliases must point to a valid CPU model name: self.assertIn(c['alias-of'], cpus, '%s.alias-of (%s) is not a valid CPU model name' % (c['name'], c['alias-of'])) # aliases must not point to aliases self.assertNotIn('alias-of', cpus[c['alias-of']], '%s.alias-of (%s) points to another alias' % (c['name'], c['alias-of'])) # aliases must not be static self.assertFalse(c['static']) def validate_variant_aliases(self, cpus): # -noTSX, -IBRS and -IBPB variants of CPU models are special: # they shouldn't have their own versions: self.assertNotIn("Haswell-noTSX-v1", cpus, "Haswell-noTSX shouldn't be versioned") self.assertNotIn("Broadwell-noTSX-v1", cpus, "Broadwell-noTSX shouldn't be versioned") self.assertNotIn("Nehalem-IBRS-v1", cpus, "Nehalem-IBRS shouldn't be versioned") self.assertNotIn("Westmere-IBRS-v1", cpus, "Westmere-IBRS shouldn't be versioned") self.assertNotIn("SandyBridge-IBRS-v1", cpus, "SandyBridge-IBRS shouldn't be versioned") self.assertNotIn("IvyBridge-IBRS-v1", cpus, "IvyBridge-IBRS shouldn't be versioned") self.assertNotIn("Haswell-noTSX-IBRS-v1", cpus, "Haswell-noTSX-IBRS shouldn't be versioned") self.assertNotIn("Haswell-IBRS-v1", cpus, "Haswell-IBRS shouldn't be versioned") self.assertNotIn("Broadwell-noTSX-IBRS-v1", cpus, "Broadwell-noTSX-IBRS shouldn't be versioned") self.assertNotIn("Broadwell-IBRS-v1", cpus, "Broadwell-IBRS shouldn't be versioned") self.assertNotIn("Skylake-Client-IBRS-v1", cpus, "Skylake-Client-IBRS shouldn't be versioned") self.assertNotIn("Skylake-Server-IBRS-v1", cpus, "Skylake-Server-IBRS shouldn't be versioned") self.assertNotIn("EPYC-IBPB-v1", cpus, "EPYC-IBPB shouldn't be versioned") def test_4_0_alias_compatibility(self): """ Check if pc-*-4.0 unversioned CPU model won't be reported as aliases :avocado: tags=machine:pc-i440fx-4.0 """ # pc-*-4.0 won't expose non-versioned CPU models as aliases # We do this to help management software to keep compatibility # with older QEMU versions that didn't have the versioned CPU model self.vm.add_args('-S') self.vm.launch() cpus = dict((m['name'], m) for m in self.vm.command('query-cpu-definitions')) self.assertFalse(cpus['Cascadelake-Server']['static'], 'unversioned Cascadelake-Server CPU model must not be static') self.assertNotIn('alias-of', cpus['Cascadelake-Server'], 'Cascadelake-Server must not be an alias') self.assertNotIn('alias-of', cpus['Cascadelake-Server-v1'], 'Cascadelake-Server-v1 must not be an alias') self.assertFalse(cpus['qemu64']['static'], 'unversioned qemu64 CPU model must not be static') self.assertNotIn('alias-of', cpus['qemu64'], 'qemu64 must not be an alias') self.assertNotIn('alias-of', cpus['qemu64-v1'], 'qemu64-v1 must not be an alias') self.validate_variant_aliases(cpus) # On pc-*-4.0, no CPU model should be reported as an alias: for name,c in cpus.items(): self.assertNotIn('alias-of', c, "%s shouldn't be an alias" % (name)) def test_4_1_alias(self): """ Check if unversioned CPU model is an alias pointing to right version :avocado: tags=machine:pc-i440fx-4.1 """ self.vm.add_args('-S') self.vm.launch() cpus = dict((m['name'], m) for m in self.vm.command('query-cpu-definitions')) self.assertFalse(cpus['Cascadelake-Server']['static'], 'unversioned Cascadelake-Server CPU model must not be static') self.assertEquals(cpus['Cascadelake-Server'].get('alias-of'), 'Cascadelake-Server-v1', 'Cascadelake-Server must be an alias of Cascadelake-Server-v1') self.assertNotIn('alias-of', cpus['Cascadelake-Server-v1'], 'Cascadelake-Server-v1 must not be an alias') self.assertFalse(cpus['qemu64']['static'], 'unversioned qemu64 CPU model must not be static') self.assertEquals(cpus['qemu64'].get('alias-of'), 'qemu64-v1', 'qemu64 must be an alias of qemu64-v1') self.assertNotIn('alias-of', cpus['qemu64-v1'], 'qemu64-v1 must not be an alias') self.validate_variant_aliases(cpus) # On pc-*-4.1, -noTSX and -IBRS models should be aliases: self.assertEquals(cpus["Haswell"].get('alias-of'), "Haswell-v1", "Haswell must be an alias") self.assertEquals(cpus["Haswell-noTSX"].get('alias-of'), "Haswell-v2", "Haswell-noTSX must be an alias") self.assertEquals(cpus["Haswell-IBRS"].get('alias-of'), "Haswell-v3", "Haswell-IBRS must be an alias") self.assertEquals(cpus["Haswell-noTSX-IBRS"].get('alias-of'), "Haswell-v4", "Haswell-noTSX-IBRS must be an alias") self.assertEquals(cpus["Broadwell"].get('alias-of'), "Broadwell-v1", "Broadwell must be an alias") self.assertEquals(cpus["Broadwell-noTSX"].get('alias-of'), "Broadwell-v2", "Broadwell-noTSX must be an alias") self.assertEquals(cpus["Broadwell-IBRS"].get('alias-of'), "Broadwell-v3", "Broadwell-IBRS must be an alias") self.assertEquals(cpus["Broadwell-noTSX-IBRS"].get('alias-of'), "Broadwell-v4", "Broadwell-noTSX-IBRS must be an alias") self.assertEquals(cpus["Nehalem"].get('alias-of'), "Nehalem-v1", "Nehalem must be an alias") self.assertEquals(cpus["Nehalem-IBRS"].get('alias-of'), "Nehalem-v2", "Nehalem-IBRS must be an alias") self.assertEquals(cpus["Westmere"].get('alias-of'), "Westmere-v1", "Westmere must be an alias") self.assertEquals(cpus["Westmere-IBRS"].get('alias-of'), "Westmere-v2", "Westmere-IBRS must be an alias") self.assertEquals(cpus["SandyBridge"].get('alias-of'), "SandyBridge-v1", "SandyBridge must be an alias") self.assertEquals(cpus["SandyBridge-IBRS"].get('alias-of'), "SandyBridge-v2", "SandyBridge-IBRS must be an alias") self.assertEquals(cpus["IvyBridge"].get('alias-of'), "IvyBridge-v1", "IvyBridge must be an alias") self.assertEquals(cpus["IvyBridge-IBRS"].get('alias-of'), "IvyBridge-v2", "IvyBridge-IBRS must be an alias") self.assertEquals(cpus["Skylake-Client"].get('alias-of'), "Skylake-Client-v1", "Skylake-Client must be an alias") self.assertEquals(cpus["Skylake-Client-IBRS"].get('alias-of'), "Skylake-Client-v2", "Skylake-Client-IBRS must be an alias") self.assertEquals(cpus["Skylake-Server"].get('alias-of'), "Skylake-Server-v1", "Skylake-Server must be an alias") self.assertEquals(cpus["Skylake-Server-IBRS"].get('alias-of'), "Skylake-Server-v2", "Skylake-Server-IBRS must be an alias") self.assertEquals(cpus["EPYC"].get('alias-of'), "EPYC-v1", "EPYC must be an alias") self.assertEquals(cpus["EPYC-IBPB"].get('alias-of'), "EPYC-v2", "EPYC-IBPB must be an alias") self.validate_aliases(cpus) def test_none_alias(self): """ Check if unversioned CPU model is an alias pointing to some version :avocado: tags=machine:none """ self.vm.add_args('-S') self.vm.launch() cpus = dict((m['name'], m) for m in self.vm.command('query-cpu-definitions')) self.assertFalse(cpus['Cascadelake-Server']['static'], 'unversioned Cascadelake-Server CPU model must not be static') self.assertTrue(re.match('Cascadelake-Server-v[0-9]+', cpus['Cascadelake-Server']['alias-of']), 'Cascadelake-Server must be an alias of versioned CPU model') self.assertNotIn('alias-of', cpus['Cascadelake-Server-v1'], 'Cascadelake-Server-v1 must not be an alias') self.assertFalse(cpus['qemu64']['static'], 'unversioned qemu64 CPU model must not be static') self.assertTrue(re.match('qemu64-v[0-9]+', cpus['qemu64']['alias-of']), 'qemu64 must be an alias of versioned CPU model') self.assertNotIn('alias-of', cpus['qemu64-v1'], 'qemu64-v1 must not be an alias') self.validate_aliases(cpus) class CascadelakeArchCapabilities(avocado_qemu.QemuSystemTest): """ Validation of Cascadelake arch-capabilities :avocado: tags=arch:x86_64 """ def get_cpu_prop(self, prop): cpu_path = self.vm.command('query-cpus-fast')[0].get('qom-path') return self.vm.command('qom-get', path=cpu_path, property=prop) def test_4_1(self): """ :avocado: tags=machine:pc-i440fx-4.1 :avocado: tags=cpu:Cascadelake-Server """ # machine-type only: self.vm.add_args('-S') self.set_vm_arg('-cpu', 'Cascadelake-Server,x-force-features=on,check=off,' 'enforce=off') self.vm.launch() self.assertFalse(self.get_cpu_prop('arch-capabilities'), 'pc-i440fx-4.1 + Cascadelake-Server should not have arch-capabilities') def test_4_0(self): """ :avocado: tags=machine:pc-i440fx-4.0 :avocado: tags=cpu:Cascadelake-Server """ self.vm.add_args('-S') self.set_vm_arg('-cpu', 'Cascadelake-Server,x-force-features=on,check=off,' 'enforce=off') self.vm.launch() self.assertFalse(self.get_cpu_prop('arch-capabilities'), 'pc-i440fx-4.0 + Cascadelake-Server should not have arch-capabilities') def test_set_4_0(self): """ :avocado: tags=machine:pc-i440fx-4.0 :avocado: tags=cpu:Cascadelake-Server """ # command line must override machine-type if CPU model is not versioned: self.vm.add_args('-S') self.set_vm_arg('-cpu', 'Cascadelake-Server,x-force-features=on,check=off,' 'enforce=off,+arch-capabilities') self.vm.launch() self.assertTrue(self.get_cpu_prop('arch-capabilities'), 'pc-i440fx-4.0 + Cascadelake-Server,+arch-capabilities should have arch-capabilities') def test_unset_4_1(self): """ :avocado: tags=machine:pc-i440fx-4.1 :avocado: tags=cpu:Cascadelake-Server """ self.vm.add_args('-S') self.set_vm_arg('-cpu', 'Cascadelake-Server,x-force-features=on,check=off,' 'enforce=off,-arch-capabilities') self.vm.launch() self.assertFalse(self.get_cpu_prop('arch-capabilities'), 'pc-i440fx-4.1 + Cascadelake-Server,-arch-capabilities should not have arch-capabilities') def test_v1_4_0(self): """ :avocado: tags=machine:pc-i440fx-4.0 :avocado: tags=cpu:Cascadelake-Server """ # versioned CPU model overrides machine-type: self.vm.add_args('-S') self.set_vm_arg('-cpu', 'Cascadelake-Server-v1,x-force-features=on,check=off,' 'enforce=off') self.vm.launch() self.assertFalse(self.get_cpu_prop('arch-capabilities'), 'pc-i440fx-4.0 + Cascadelake-Server-v1 should not have arch-capabilities') def test_v2_4_0(self): """ :avocado: tags=machine:pc-i440fx-4.0 :avocado: tags=cpu:Cascadelake-Server """ self.vm.add_args('-S') self.set_vm_arg('-cpu', 'Cascadelake-Server-v2,x-force-features=on,check=off,' 'enforce=off') self.vm.launch() self.assertTrue(self.get_cpu_prop('arch-capabilities'), 'pc-i440fx-4.0 + Cascadelake-Server-v2 should have arch-capabilities') def test_v1_set_4_0(self): """ :avocado: tags=machine:pc-i440fx-4.0 :avocado: tags=cpu:Cascadelake-Server """ # command line must override machine-type and versioned CPU model: self.vm.add_args('-S') self.set_vm_arg('-cpu', 'Cascadelake-Server-v1,x-force-features=on,check=off,' 'enforce=off,+arch-capabilities') self.vm.launch() self.assertTrue(self.get_cpu_prop('arch-capabilities'), 'pc-i440fx-4.0 + Cascadelake-Server-v1,+arch-capabilities should have arch-capabilities') def test_v2_unset_4_1(self): """ :avocado: tags=machine:pc-i440fx-4.1 :avocado: tags=cpu:Cascadelake-Server """ self.vm.add_args('-S') self.set_vm_arg('-cpu', 'Cascadelake-Server-v2,x-force-features=on,check=off,' 'enforce=off,-arch-capabilities') self.vm.launch() self.assertFalse(self.get_cpu_prop('arch-capabilities'), 'pc-i440fx-4.1 + Cascadelake-Server-v2,-arch-capabilities should not have arch-capabilities')
15,950
43.431755
118
py
qemu
qemu-master/tests/avocado/boot_linux_console.py
# Functional test that boots a Linux kernel and checks the console # # Copyright (c) 2018 Red Hat, Inc. # # Author: # Cleber Rosa <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import os import lzma import gzip import shutil from avocado import skip from avocado import skipUnless from avocado import skipIf from avocado_qemu import QemuSystemTest from avocado_qemu import exec_command from avocado_qemu import exec_command_and_wait_for_pattern from avocado_qemu import interrupt_interactive_console_until_pattern from avocado_qemu import wait_for_console_pattern from avocado.utils import process from avocado.utils import archive """ Round up to next power of 2 """ def pow2ceil(x): return 1 if x == 0 else 2**(x - 1).bit_length() def file_truncate(path, size): if size != os.path.getsize(path): with open(path, 'ab+') as fd: fd.truncate(size) """ Expand file size to next power of 2 """ def image_pow2ceil_expand(path): size = os.path.getsize(path) size_aligned = pow2ceil(size) if size != size_aligned: with open(path, 'ab+') as fd: fd.truncate(size_aligned) class LinuxKernelTest(QemuSystemTest): KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' def wait_for_console_pattern(self, success_message, vm=None): wait_for_console_pattern(self, success_message, failure_message='Kernel panic - not syncing', vm=vm) def extract_from_deb(self, deb, path): """ Extracts a file from a deb package into the test workdir :param deb: path to the deb archive :param path: path within the deb archive of the file to be extracted :returns: path of the extracted file """ cwd = os.getcwd() os.chdir(self.workdir) file_path = process.run("ar t %s" % deb).stdout_text.split()[2] process.run("ar x %s %s" % (deb, file_path)) archive.extract(file_path, self.workdir) os.chdir(cwd) # Return complete path to extracted file. Because callers to # extract_from_deb() specify 'path' with a leading slash, it is # necessary to use os.path.relpath() as otherwise os.path.join() # interprets it as an absolute path and drops the self.workdir part. return os.path.normpath(os.path.join(self.workdir, os.path.relpath(path, '/'))) def extract_from_rpm(self, rpm, path): """ Extracts a file from an RPM package into the test workdir. :param rpm: path to the rpm archive :param path: path within the rpm archive of the file to be extracted needs to be a relative path (starting with './') because cpio(1), which is used to extract the file, expects that. :returns: path of the extracted file """ cwd = os.getcwd() os.chdir(self.workdir) process.run("rpm2cpio %s | cpio -id %s" % (rpm, path), shell=True) os.chdir(cwd) return os.path.normpath(os.path.join(self.workdir, path)) class BootLinuxConsole(LinuxKernelTest): """ Boots a Linux kernel and checks that the console is operational and the kernel command line is properly passed from QEMU to the kernel """ timeout = 90 def test_x86_64_pc(self): """ :avocado: tags=arch:x86_64 :avocado: tags=machine:pc """ kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora' '/linux/releases/29/Everything/x86_64/os/images/pxeboot' '/vmlinuz') kernel_hash = '23bebd2680757891cf7adedb033532163a792495' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) self.vm.set_console() kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' self.vm.add_args('-kernel', kernel_path, '-append', kernel_command_line) self.vm.launch() console_pattern = 'Kernel command line: %s' % kernel_command_line self.wait_for_console_pattern(console_pattern) def test_mips_malta(self): """ :avocado: tags=arch:mips :avocado: tags=machine:malta :avocado: tags=endian:big """ deb_url = ('http://snapshot.debian.org/archive/debian/' '20130217T032700Z/pool/main/l/linux-2.6/' 'linux-image-2.6.32-5-4kc-malta_2.6.32-48_mips.deb') deb_hash = 'a8cfc28ad8f45f54811fc6cf74fc43ffcfe0ba04' deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) kernel_path = self.extract_from_deb(deb_path, '/boot/vmlinux-2.6.32-5-4kc-malta') self.vm.set_console() kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' self.vm.add_args('-kernel', kernel_path, '-append', kernel_command_line) self.vm.launch() console_pattern = 'Kernel command line: %s' % kernel_command_line self.wait_for_console_pattern(console_pattern) def test_mips64el_malta(self): """ This test requires the ar tool to extract "data.tar.gz" from the Debian package. The kernel can be rebuilt using this Debian kernel source [1] and following the instructions on [2]. [1] http://snapshot.debian.org/package/linux-2.6/2.6.32-48/ #linux-source-2.6.32_2.6.32-48 [2] https://kernel-team.pages.debian.net/kernel-handbook/ ch-common-tasks.html#s-common-official :avocado: tags=arch:mips64el :avocado: tags=machine:malta """ deb_url = ('http://snapshot.debian.org/archive/debian/' '20130217T032700Z/pool/main/l/linux-2.6/' 'linux-image-2.6.32-5-5kc-malta_2.6.32-48_mipsel.deb') deb_hash = '1aaec92083bf22fda31e0d27fa8d9a388e5fc3d5' deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) kernel_path = self.extract_from_deb(deb_path, '/boot/vmlinux-2.6.32-5-5kc-malta') self.vm.set_console() kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' self.vm.add_args('-kernel', kernel_path, '-append', kernel_command_line) self.vm.launch() console_pattern = 'Kernel command line: %s' % kernel_command_line self.wait_for_console_pattern(console_pattern) def test_mips64el_fuloong2e(self): """ :avocado: tags=arch:mips64el :avocado: tags=machine:fuloong2e :avocado: tags=endian:little """ deb_url = ('http://archive.debian.org/debian/pool/main/l/linux/' 'linux-image-3.16.0-6-loongson-2e_3.16.56-1+deb8u1_mipsel.deb') deb_hash = 'd04d446045deecf7b755ef576551de0c4184dd44' deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) kernel_path = self.extract_from_deb(deb_path, '/boot/vmlinux-3.16.0-6-loongson-2e') self.vm.set_console() kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' self.vm.add_args('-kernel', kernel_path, '-append', kernel_command_line) self.vm.launch() console_pattern = 'Kernel command line: %s' % kernel_command_line self.wait_for_console_pattern(console_pattern) def test_mips_malta_cpio(self): """ :avocado: tags=arch:mips :avocado: tags=machine:malta :avocado: tags=endian:big """ deb_url = ('http://snapshot.debian.org/archive/debian/' '20160601T041800Z/pool/main/l/linux/' 'linux-image-4.5.0-2-4kc-malta_4.5.5-1_mips.deb') deb_hash = 'a3c84f3e88b54e06107d65a410d1d1e8e0f340f8' deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) kernel_path = self.extract_from_deb(deb_path, '/boot/vmlinux-4.5.0-2-4kc-malta') initrd_url = ('https://github.com/groeck/linux-build-test/raw/' '8584a59ed9e5eb5ee7ca91f6d74bbb06619205b8/rootfs/' 'mips/rootfs.cpio.gz') initrd_hash = 'bf806e17009360a866bf537f6de66590de349a99' initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash) initrd_path = self.workdir + "rootfs.cpio" archive.gzip_uncompress(initrd_path_gz, initrd_path) self.vm.set_console() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0 console=tty ' + 'rdinit=/sbin/init noreboot') self.vm.add_args('-kernel', kernel_path, '-initrd', initrd_path, '-append', kernel_command_line, '-no-reboot') self.vm.launch() self.wait_for_console_pattern('Boot successful.') exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', 'BogoMIPS') exec_command_and_wait_for_pattern(self, 'uname -a', 'Debian') exec_command_and_wait_for_pattern(self, 'reboot', 'reboot: Restarting system') # Wait for VM to shut down gracefully self.vm.wait() @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') def test_mips64el_malta_5KEc_cpio(self): """ :avocado: tags=arch:mips64el :avocado: tags=machine:malta :avocado: tags=endian:little :avocado: tags=cpu:5KEc """ kernel_url = ('https://github.com/philmd/qemu-testing-blob/' 'raw/9ad2df38/mips/malta/mips64el/' 'vmlinux-3.19.3.mtoman.20150408') kernel_hash = '00d1d268fb9f7d8beda1de6bebcc46e884d71754' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) initrd_url = ('https://github.com/groeck/linux-build-test/' 'raw/8584a59e/rootfs/' 'mipsel64/rootfs.mipsel64r1.cpio.gz') initrd_hash = '1dbb8a396e916847325284dbe2151167' initrd_path_gz = self.fetch_asset(initrd_url, algorithm='md5', asset_hash=initrd_hash) initrd_path = self.workdir + "rootfs.cpio" archive.gzip_uncompress(initrd_path_gz, initrd_path) self.vm.set_console() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0 console=tty ' + 'rdinit=/sbin/init noreboot') self.vm.add_args('-kernel', kernel_path, '-initrd', initrd_path, '-append', kernel_command_line, '-no-reboot') self.vm.launch() wait_for_console_pattern(self, 'Boot successful.') exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', 'MIPS 5KE') exec_command_and_wait_for_pattern(self, 'uname -a', '3.19.3.mtoman.20150408') exec_command_and_wait_for_pattern(self, 'reboot', 'reboot: Restarting system') # Wait for VM to shut down gracefully self.vm.wait() def do_test_mips_malta32el_nanomips(self, kernel_url, kernel_hash): kernel_path_xz = self.fetch_asset(kernel_url, asset_hash=kernel_hash) kernel_path = self.workdir + "kernel" with lzma.open(kernel_path_xz, 'rb') as f_in: with open(kernel_path, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) self.vm.set_console() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'mem=256m@@0x0 ' + 'console=ttyS0') self.vm.add_args('-no-reboot', '-kernel', kernel_path, '-append', kernel_command_line) self.vm.launch() console_pattern = 'Kernel command line: %s' % kernel_command_line self.wait_for_console_pattern(console_pattern) def test_mips_malta32el_nanomips_4k(self): """ :avocado: tags=arch:mipsel :avocado: tags=machine:malta :avocado: tags=endian:little :avocado: tags=cpu:I7200 """ kernel_url = ('https://mipsdistros.mips.com/LinuxDistro/nanomips/' 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/' 'generic_nano32r6el_page4k.xz') kernel_hash = '477456aafd2a0f1ddc9482727f20fe9575565dd6' self.do_test_mips_malta32el_nanomips(kernel_url, kernel_hash) def test_mips_malta32el_nanomips_16k_up(self): """ :avocado: tags=arch:mipsel :avocado: tags=machine:malta :avocado: tags=endian:little :avocado: tags=cpu:I7200 """ kernel_url = ('https://mipsdistros.mips.com/LinuxDistro/nanomips/' 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/' 'generic_nano32r6el_page16k_up.xz') kernel_hash = 'e882868f944c71c816e832e2303b7874d044a7bc' self.do_test_mips_malta32el_nanomips(kernel_url, kernel_hash) def test_mips_malta32el_nanomips_64k_dbg(self): """ :avocado: tags=arch:mipsel :avocado: tags=machine:malta :avocado: tags=endian:little :avocado: tags=cpu:I7200 """ kernel_url = ('https://mipsdistros.mips.com/LinuxDistro/nanomips/' 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/' 'generic_nano32r6el_page64k_dbg.xz') kernel_hash = '18d1c68f2e23429e266ca39ba5349ccd0aeb7180' self.do_test_mips_malta32el_nanomips(kernel_url, kernel_hash) def test_aarch64_xlnx_versal_virt(self): """ :avocado: tags=arch:aarch64 :avocado: tags=machine:xlnx-versal-virt :avocado: tags=device:pl011 :avocado: tags=device:arm_gicv3 :avocado: tags=accel:tcg """ images_url = ('http://ports.ubuntu.com/ubuntu-ports/dists/' 'bionic-updates/main/installer-arm64/' '20101020ubuntu543.19/images/') kernel_url = images_url + 'netboot/ubuntu-installer/arm64/linux' kernel_hash = 'e167757620640eb26de0972f578741924abb3a82' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) initrd_url = images_url + 'netboot/ubuntu-installer/arm64/initrd.gz' initrd_hash = 'cab5cb3fcefca8408aa5aae57f24574bfce8bdb9' initrd_path = self.fetch_asset(initrd_url, asset_hash=initrd_hash) self.vm.set_console() self.vm.add_args('-m', '2G', '-accel', 'tcg', '-kernel', kernel_path, '-initrd', initrd_path) self.vm.launch() self.wait_for_console_pattern('Checked W+X mappings: passed') def test_arm_virt(self): """ :avocado: tags=arch:arm :avocado: tags=machine:virt :avocado: tags=accel:tcg """ kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora' '/linux/releases/29/Everything/armhfp/os/images/pxeboot' '/vmlinuz') kernel_hash = 'e9826d741b4fb04cadba8d4824d1ed3b7fb8b4d4' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) self.vm.set_console() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyAMA0') self.vm.add_args('-kernel', kernel_path, '-append', kernel_command_line) self.vm.launch() console_pattern = 'Kernel command line: %s' % kernel_command_line self.wait_for_console_pattern(console_pattern) def test_arm_emcraft_sf2(self): """ :avocado: tags=arch:arm :avocado: tags=machine:emcraft-sf2 :avocado: tags=endian:little :avocado: tags=u-boot :avocado: tags=accel:tcg """ self.require_netdev('user') uboot_url = ('https://raw.githubusercontent.com/' 'Subbaraya-Sundeep/qemu-test-binaries/' 'fe371d32e50ca682391e1e70ab98c2942aeffb01/u-boot') uboot_hash = 'cbb8cbab970f594bf6523b9855be209c08374ae2' uboot_path = self.fetch_asset(uboot_url, asset_hash=uboot_hash) spi_url = ('https://raw.githubusercontent.com/' 'Subbaraya-Sundeep/qemu-test-binaries/' 'fe371d32e50ca682391e1e70ab98c2942aeffb01/spi.bin') spi_hash = '65523a1835949b6f4553be96dec1b6a38fb05501' spi_path = self.fetch_asset(spi_url, asset_hash=spi_hash) file_truncate(spi_path, 16 << 20) # Spansion S25FL128SDPBHICO is 16 MiB self.vm.set_console() kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE self.vm.add_args('-kernel', uboot_path, '-append', kernel_command_line, '-drive', 'file=' + spi_path + ',if=mtd,format=raw', '-no-reboot') self.vm.launch() self.wait_for_console_pattern('Enter \'help\' for a list') exec_command_and_wait_for_pattern(self, 'ifconfig eth0 10.0.2.15', 'eth0: link becomes ready') exec_command_and_wait_for_pattern(self, 'ping -c 3 10.0.2.2', '3 packets transmitted, 3 packets received, 0% packet loss') def do_test_arm_raspi2(self, uart_id): """ :avocado: tags=accel:tcg The kernel can be rebuilt using the kernel source referenced and following the instructions on the on: https://www.raspberrypi.org/documentation/linux/kernel/building.md """ serial_kernel_cmdline = { 0: 'earlycon=pl011,0x3f201000 console=ttyAMA0', } deb_url = ('http://archive.raspberrypi.org/debian/' 'pool/main/r/raspberrypi-firmware/' 'raspberrypi-kernel_1.20190215-1_armhf.deb') deb_hash = 'cd284220b32128c5084037553db3c482426f3972' deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) kernel_path = self.extract_from_deb(deb_path, '/boot/kernel7.img') dtb_path = self.extract_from_deb(deb_path, '/boot/bcm2709-rpi-2-b.dtb') self.vm.set_console() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + serial_kernel_cmdline[uart_id] + ' root=/dev/mmcblk0p2 rootwait ' + 'dwc_otg.fiq_fsm_enable=0') self.vm.add_args('-kernel', kernel_path, '-dtb', dtb_path, '-append', kernel_command_line, '-device', 'usb-kbd') self.vm.launch() console_pattern = 'Kernel command line: %s' % kernel_command_line self.wait_for_console_pattern(console_pattern) console_pattern = 'Product: QEMU USB Keyboard' self.wait_for_console_pattern(console_pattern) def test_arm_raspi2_uart0(self): """ :avocado: tags=arch:arm :avocado: tags=machine:raspi2b :avocado: tags=device:pl011 :avocado: tags=accel:tcg """ self.do_test_arm_raspi2(0) def test_arm_raspi2_initrd(self): """ :avocado: tags=arch:arm :avocado: tags=machine:raspi2b """ deb_url = ('http://archive.raspberrypi.org/debian/' 'pool/main/r/raspberrypi-firmware/' 'raspberrypi-kernel_1.20190215-1_armhf.deb') deb_hash = 'cd284220b32128c5084037553db3c482426f3972' deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) kernel_path = self.extract_from_deb(deb_path, '/boot/kernel7.img') dtb_path = self.extract_from_deb(deb_path, '/boot/bcm2709-rpi-2-b.dtb') initrd_url = ('https://github.com/groeck/linux-build-test/raw/' '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/' 'arm/rootfs-armv7a.cpio.gz') initrd_hash = '604b2e45cdf35045846b8bbfbf2129b1891bdc9c' initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash) initrd_path = os.path.join(self.workdir, 'rootfs.cpio') archive.gzip_uncompress(initrd_path_gz, initrd_path) self.vm.set_console() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'earlycon=pl011,0x3f201000 console=ttyAMA0 ' 'panic=-1 noreboot ' + 'dwc_otg.fiq_fsm_enable=0') self.vm.add_args('-kernel', kernel_path, '-dtb', dtb_path, '-initrd', initrd_path, '-append', kernel_command_line, '-no-reboot') self.vm.launch() self.wait_for_console_pattern('Boot successful.') exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', 'BCM2835') exec_command_and_wait_for_pattern(self, 'cat /proc/iomem', '/soc/cprman@7e101000') exec_command_and_wait_for_pattern(self, 'halt', 'reboot: System halted') # Wait for VM to shut down gracefully self.vm.wait() def test_arm_exynos4210_initrd(self): """ :avocado: tags=arch:arm :avocado: tags=machine:smdkc210 :avocado: tags=accel:tcg """ deb_url = ('https://snapshot.debian.org/archive/debian/' '20190928T224601Z/pool/main/l/linux/' 'linux-image-4.19.0-6-armmp_4.19.67-2+deb10u1_armhf.deb') deb_hash = 'fa9df4a0d38936cb50084838f2cb933f570d7d82' deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) kernel_path = self.extract_from_deb(deb_path, '/boot/vmlinuz-4.19.0-6-armmp') dtb_path = '/usr/lib/linux-image-4.19.0-6-armmp/exynos4210-smdkv310.dtb' dtb_path = self.extract_from_deb(deb_path, dtb_path) initrd_url = ('https://github.com/groeck/linux-build-test/raw/' '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/' 'arm/rootfs-armv5.cpio.gz') initrd_hash = '2b50f1873e113523967806f4da2afe385462ff9b' initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash) initrd_path = os.path.join(self.workdir, 'rootfs.cpio') archive.gzip_uncompress(initrd_path_gz, initrd_path) self.vm.set_console() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'earlycon=exynos4210,0x13800000 earlyprintk ' + 'console=ttySAC0,115200n8 ' + 'random.trust_cpu=off cryptomgr.notests ' + 'cpuidle.off=1 panic=-1 noreboot') self.vm.add_args('-kernel', kernel_path, '-dtb', dtb_path, '-initrd', initrd_path, '-append', kernel_command_line, '-no-reboot') self.vm.launch() self.wait_for_console_pattern('Boot successful.') # TODO user command, for now the uart is stuck def test_arm_cubieboard_initrd(self): """ :avocado: tags=arch:arm :avocado: tags=machine:cubieboard :avocado: tags=accel:tcg """ deb_url = ('https://apt.armbian.com/pool/main/l/' 'linux-5.10.16-sunxi/linux-image-current-sunxi_21.02.2_armhf.deb') deb_hash = '9fa84beda245cabf0b4fa84cf6eaa7738ead1da0' deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) kernel_path = self.extract_from_deb(deb_path, '/boot/vmlinuz-5.10.16-sunxi') dtb_path = '/usr/lib/linux-image-current-sunxi/sun4i-a10-cubieboard.dtb' dtb_path = self.extract_from_deb(deb_path, dtb_path) initrd_url = ('https://github.com/groeck/linux-build-test/raw/' '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/' 'arm/rootfs-armv5.cpio.gz') initrd_hash = '2b50f1873e113523967806f4da2afe385462ff9b' initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash) initrd_path = os.path.join(self.workdir, 'rootfs.cpio') archive.gzip_uncompress(initrd_path_gz, initrd_path) self.vm.set_console() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0,115200 ' 'usbcore.nousb ' 'panic=-1 noreboot') self.vm.add_args('-kernel', kernel_path, '-dtb', dtb_path, '-initrd', initrd_path, '-append', kernel_command_line, '-no-reboot') self.vm.launch() self.wait_for_console_pattern('Boot successful.') exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', 'Allwinner sun4i/sun5i') exec_command_and_wait_for_pattern(self, 'cat /proc/iomem', 'system-control@1c00000') # cubieboard's reboot is not functioning; omit reboot test. def test_arm_cubieboard_sata(self): """ :avocado: tags=arch:arm :avocado: tags=machine:cubieboard :avocado: tags=accel:tcg """ deb_url = ('https://apt.armbian.com/pool/main/l/' 'linux-5.10.16-sunxi/linux-image-current-sunxi_21.02.2_armhf.deb') deb_hash = '9fa84beda245cabf0b4fa84cf6eaa7738ead1da0' deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) kernel_path = self.extract_from_deb(deb_path, '/boot/vmlinuz-5.10.16-sunxi') dtb_path = '/usr/lib/linux-image-current-sunxi/sun4i-a10-cubieboard.dtb' dtb_path = self.extract_from_deb(deb_path, dtb_path) rootfs_url = ('https://github.com/groeck/linux-build-test/raw/' '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/' 'arm/rootfs-armv5.ext2.gz') rootfs_hash = '093e89d2b4d982234bf528bc9fb2f2f17a9d1f93' rootfs_path_gz = self.fetch_asset(rootfs_url, asset_hash=rootfs_hash) rootfs_path = os.path.join(self.workdir, 'rootfs.cpio') archive.gzip_uncompress(rootfs_path_gz, rootfs_path) self.vm.set_console() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0,115200 ' 'usbcore.nousb ' 'root=/dev/sda ro ' 'panic=-1 noreboot') self.vm.add_args('-kernel', kernel_path, '-dtb', dtb_path, '-drive', 'if=none,format=raw,id=disk0,file=' + rootfs_path, '-device', 'ide-hd,bus=ide.0,drive=disk0', '-append', kernel_command_line, '-no-reboot') self.vm.launch() self.wait_for_console_pattern('Boot successful.') exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', 'Allwinner sun4i/sun5i') exec_command_and_wait_for_pattern(self, 'cat /proc/partitions', 'sda') # cubieboard's reboot is not functioning; omit reboot test. @skipUnless(os.getenv('AVOCADO_ALLOW_LARGE_STORAGE'), 'storage limited') def test_arm_cubieboard_openwrt_22_03_2(self): """ :avocado: tags=arch:arm :avocado: tags=machine:cubieboard :avocado: tags=device:sd """ # This test download a 7.5 MiB compressed image and expand it # to 126 MiB. image_url = ('https://downloads.openwrt.org/releases/22.03.2/targets/' 'sunxi/cortexa8/openwrt-22.03.2-sunxi-cortexa8-' 'cubietech_a10-cubieboard-ext4-sdcard.img.gz') image_hash = ('94b5ecbfbc0b3b56276e5146b899eafa' '2ac5dc2d08733d6705af9f144f39f554') image_path_gz = self.fetch_asset(image_url, asset_hash=image_hash, algorithm='sha256') image_path = archive.extract(image_path_gz, self.workdir) image_pow2ceil_expand(image_path) self.vm.set_console() self.vm.add_args('-drive', 'file=' + image_path + ',if=sd,format=raw', '-nic', 'user', '-no-reboot') self.vm.launch() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'usbcore.nousb ' 'noreboot') self.wait_for_console_pattern('U-Boot SPL') interrupt_interactive_console_until_pattern( self, 'Hit any key to stop autoboot:', '=>') exec_command_and_wait_for_pattern(self, "setenv extraargs '" + kernel_command_line + "'", '=>') exec_command_and_wait_for_pattern(self, 'boot', 'Starting kernel ...'); self.wait_for_console_pattern( 'Please press Enter to activate this console.') exec_command_and_wait_for_pattern(self, ' ', 'root@') exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', 'Allwinner sun4i/sun5i') # cubieboard's reboot is not functioning; omit reboot test. @skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') def test_arm_quanta_gsj(self): """ :avocado: tags=arch:arm :avocado: tags=machine:quanta-gsj :avocado: tags=accel:tcg """ # 25 MiB compressed, 32 MiB uncompressed. image_url = ( 'https://github.com/hskinnemoen/openbmc/releases/download/' '20200711-gsj-qemu-0/obmc-phosphor-image-gsj.static.mtd.gz') image_hash = '14895e634923345cb5c8776037ff7876df96f6b1' image_path_gz = self.fetch_asset(image_url, asset_hash=image_hash) image_name = 'obmc.mtd' image_path = os.path.join(self.workdir, image_name) archive.gzip_uncompress(image_path_gz, image_path) self.vm.set_console() drive_args = 'file=' + image_path + ',if=mtd,bus=0,unit=0' self.vm.add_args('-drive', drive_args) self.vm.launch() # Disable drivers and services that stall for a long time during boot, # to avoid running past the 90-second timeout. These may be removed # as the corresponding device support is added. kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + ( 'console=${console} ' 'mem=${mem} ' 'initcall_blacklist=npcm_i2c_bus_driver_init ' 'systemd.mask=systemd-random-seed.service ' 'systemd.mask=dropbearkey.service ' ) self.wait_for_console_pattern('> BootBlock by Nuvoton') self.wait_for_console_pattern('>Device: Poleg BMC NPCM730') self.wait_for_console_pattern('>Skip DDR init.') self.wait_for_console_pattern('U-Boot ') interrupt_interactive_console_until_pattern( self, 'Hit any key to stop autoboot:', 'U-Boot>') exec_command_and_wait_for_pattern( self, "setenv bootargs ${bootargs} " + kernel_command_line, 'U-Boot>') exec_command_and_wait_for_pattern( self, 'run romboot', 'Booting Kernel from flash') self.wait_for_console_pattern('Booting Linux on physical CPU 0x0') self.wait_for_console_pattern('CPU1: thread -1, cpu 1, socket 0') self.wait_for_console_pattern('OpenBMC Project Reference Distro') self.wait_for_console_pattern('gsj login:') def test_arm_quanta_gsj_initrd(self): """ :avocado: tags=arch:arm :avocado: tags=machine:quanta-gsj :avocado: tags=accel:tcg """ initrd_url = ( 'https://github.com/hskinnemoen/openbmc/releases/download/' '20200711-gsj-qemu-0/obmc-phosphor-initramfs-gsj.cpio.xz') initrd_hash = '98fefe5d7e56727b1eb17d5c00311b1b5c945300' initrd_path = self.fetch_asset(initrd_url, asset_hash=initrd_hash) kernel_url = ( 'https://github.com/hskinnemoen/openbmc/releases/download/' '20200711-gsj-qemu-0/uImage-gsj.bin') kernel_hash = 'fa67b2f141d56d39b3c54305c0e8a899c99eb2c7' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) dtb_url = ( 'https://github.com/hskinnemoen/openbmc/releases/download/' '20200711-gsj-qemu-0/nuvoton-npcm730-gsj.dtb') dtb_hash = '18315f7006d7b688d8312d5c727eecd819aa36a4' dtb_path = self.fetch_asset(dtb_url, asset_hash=dtb_hash) self.vm.set_console() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0,115200n8 ' 'earlycon=uart8250,mmio32,0xf0001000') self.vm.add_args('-kernel', kernel_path, '-initrd', initrd_path, '-dtb', dtb_path, '-append', kernel_command_line) self.vm.launch() self.wait_for_console_pattern('Booting Linux on physical CPU 0x0') self.wait_for_console_pattern('CPU1: thread -1, cpu 1, socket 0') self.wait_for_console_pattern( 'Give root password for system maintenance') def test_arm_orangepi(self): """ :avocado: tags=arch:arm :avocado: tags=machine:orangepi-pc :avocado: tags=accel:tcg """ deb_url = ('https://apt.armbian.com/pool/main/l/' 'linux-5.10.16-sunxi/linux-image-current-sunxi_21.02.2_armhf.deb') deb_hash = '9fa84beda245cabf0b4fa84cf6eaa7738ead1da0' deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) kernel_path = self.extract_from_deb(deb_path, '/boot/vmlinuz-5.10.16-sunxi') dtb_path = '/usr/lib/linux-image-current-sunxi/sun8i-h3-orangepi-pc.dtb' dtb_path = self.extract_from_deb(deb_path, dtb_path) self.vm.set_console() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0,115200n8 ' 'earlycon=uart,mmio32,0x1c28000') self.vm.add_args('-kernel', kernel_path, '-dtb', dtb_path, '-append', kernel_command_line) self.vm.launch() console_pattern = 'Kernel command line: %s' % kernel_command_line self.wait_for_console_pattern(console_pattern) def test_arm_orangepi_initrd(self): """ :avocado: tags=arch:arm :avocado: tags=accel:tcg :avocado: tags=machine:orangepi-pc """ deb_url = ('https://apt.armbian.com/pool/main/l/' 'linux-5.10.16-sunxi/linux-image-current-sunxi_21.02.2_armhf.deb') deb_hash = '9fa84beda245cabf0b4fa84cf6eaa7738ead1da0' deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) kernel_path = self.extract_from_deb(deb_path, '/boot/vmlinuz-5.10.16-sunxi') dtb_path = '/usr/lib/linux-image-current-sunxi/sun8i-h3-orangepi-pc.dtb' dtb_path = self.extract_from_deb(deb_path, dtb_path) initrd_url = ('https://github.com/groeck/linux-build-test/raw/' '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/' 'arm/rootfs-armv7a.cpio.gz') initrd_hash = '604b2e45cdf35045846b8bbfbf2129b1891bdc9c' initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash) initrd_path = os.path.join(self.workdir, 'rootfs.cpio') archive.gzip_uncompress(initrd_path_gz, initrd_path) self.vm.set_console() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0,115200 ' 'panic=-1 noreboot') self.vm.add_args('-kernel', kernel_path, '-dtb', dtb_path, '-initrd', initrd_path, '-append', kernel_command_line, '-no-reboot') self.vm.launch() self.wait_for_console_pattern('Boot successful.') exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', 'Allwinner sun8i Family') exec_command_and_wait_for_pattern(self, 'cat /proc/iomem', 'system-control@1c00000') exec_command_and_wait_for_pattern(self, 'reboot', 'reboot: Restarting system') # Wait for VM to shut down gracefully self.vm.wait() def test_arm_orangepi_sd(self): """ :avocado: tags=arch:arm :avocado: tags=accel:tcg :avocado: tags=machine:orangepi-pc :avocado: tags=device:sd """ self.require_netdev('user') deb_url = ('https://apt.armbian.com/pool/main/l/' 'linux-5.10.16-sunxi/linux-image-current-sunxi_21.02.2_armhf.deb') deb_hash = '9fa84beda245cabf0b4fa84cf6eaa7738ead1da0' deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) kernel_path = self.extract_from_deb(deb_path, '/boot/vmlinuz-5.10.16-sunxi') dtb_path = '/usr/lib/linux-image-current-sunxi/sun8i-h3-orangepi-pc.dtb' dtb_path = self.extract_from_deb(deb_path, dtb_path) rootfs_url = ('http://storage.kernelci.org/images/rootfs/buildroot/' 'buildroot-baseline/20221116.0/armel/rootfs.ext2.xz') rootfs_hash = 'fae32f337c7b87547b10f42599acf109da8b6d9a' rootfs_path_xz = self.fetch_asset(rootfs_url, asset_hash=rootfs_hash) rootfs_path = os.path.join(self.workdir, 'rootfs.cpio') archive.lzma_uncompress(rootfs_path_xz, rootfs_path) image_pow2ceil_expand(rootfs_path) self.vm.set_console() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0,115200 ' 'root=/dev/mmcblk0 rootwait rw ' 'panic=-1 noreboot') self.vm.add_args('-kernel', kernel_path, '-dtb', dtb_path, '-drive', 'file=' + rootfs_path + ',if=sd,format=raw', '-append', kernel_command_line, '-no-reboot') self.vm.launch() shell_ready = "/bin/sh: can't access tty; job control turned off" self.wait_for_console_pattern(shell_ready) exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', 'Allwinner sun8i Family') exec_command_and_wait_for_pattern(self, 'cat /proc/partitions', 'mmcblk0') exec_command_and_wait_for_pattern(self, 'ifconfig eth0 up', 'eth0: Link is Up') exec_command_and_wait_for_pattern(self, 'udhcpc eth0', 'udhcpc: lease of 10.0.2.15 obtained') exec_command_and_wait_for_pattern(self, 'ping -c 3 10.0.2.2', '3 packets transmitted, 3 packets received, 0% packet loss') exec_command_and_wait_for_pattern(self, 'reboot', 'reboot: Restarting system') # Wait for VM to shut down gracefully self.vm.wait() @skipUnless(os.getenv('AVOCADO_ALLOW_LARGE_STORAGE'), 'storage limited') def test_arm_orangepi_bionic_20_08(self): """ :avocado: tags=arch:arm :avocado: tags=machine:orangepi-pc :avocado: tags=device:sd """ # This test download a 275 MiB compressed image and expand it # to 1036 MiB, but the underlying filesystem is 1552 MiB... # As we expand it to 2 GiB we are safe. image_url = ('https://archive.armbian.com/orangepipc/archive/' 'Armbian_20.08.1_Orangepipc_bionic_current_5.8.5.img.xz') image_hash = ('b4d6775f5673486329e45a0586bf06b6' 'dbe792199fd182ac6b9c7bb6c7d3e6dd') image_path_xz = self.fetch_asset(image_url, asset_hash=image_hash, algorithm='sha256') image_path = archive.extract(image_path_xz, self.workdir) image_pow2ceil_expand(image_path) self.vm.set_console() self.vm.add_args('-drive', 'file=' + image_path + ',if=sd,format=raw', '-nic', 'user', '-no-reboot') self.vm.launch() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0,115200 ' 'loglevel=7 ' 'nosmp ' 'systemd.default_timeout_start_sec=9000 ' 'systemd.mask=armbian-zram-config.service ' 'systemd.mask=armbian-ramlog.service') self.wait_for_console_pattern('U-Boot SPL') self.wait_for_console_pattern('Autoboot in ') exec_command_and_wait_for_pattern(self, ' ', '=>') exec_command_and_wait_for_pattern(self, "setenv extraargs '" + kernel_command_line + "'", '=>') exec_command_and_wait_for_pattern(self, 'boot', 'Starting kernel ...'); self.wait_for_console_pattern('systemd[1]: Set hostname ' + 'to <orangepipc>') self.wait_for_console_pattern('Starting Load Kernel Modules...') @skipUnless(os.getenv('AVOCADO_ALLOW_LARGE_STORAGE'), 'storage limited') def test_arm_orangepi_uboot_netbsd9(self): """ :avocado: tags=arch:arm :avocado: tags=machine:orangepi-pc :avocado: tags=device:sd :avocado: tags=os:netbsd """ # This test download a 304MB compressed image and expand it to 2GB deb_url = ('http://snapshot.debian.org/archive/debian/' '20200108T145233Z/pool/main/u/u-boot/' 'u-boot-sunxi_2020.01%2Bdfsg-1_armhf.deb') deb_hash = 'f67f404a80753ca3d1258f13e38f2b060e13db99' deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) # We use the common OrangePi PC 'plus' build of U-Boot for our secondary # program loader (SPL). We will then set the path to the more specific # OrangePi "PC" device tree blob with 'setenv fdtfile' in U-Boot prompt, # before to boot NetBSD. uboot_path = '/usr/lib/u-boot/orangepi_plus/u-boot-sunxi-with-spl.bin' uboot_path = self.extract_from_deb(deb_path, uboot_path) image_url = ('https://cdn.netbsd.org/pub/NetBSD/NetBSD-9.0/' 'evbarm-earmv7hf/binary/gzimg/armv7.img.gz') image_hash = '2babb29d36d8360adcb39c09e31060945259917a' image_path_gz = self.fetch_asset(image_url, asset_hash=image_hash) image_path = os.path.join(self.workdir, 'armv7.img') archive.gzip_uncompress(image_path_gz, image_path) image_pow2ceil_expand(image_path) image_drive_args = 'if=sd,format=raw,snapshot=on,file=' + image_path # dd if=u-boot-sunxi-with-spl.bin of=armv7.img bs=1K seek=8 conv=notrunc with open(uboot_path, 'rb') as f_in: with open(image_path, 'r+b') as f_out: f_out.seek(8 * 1024) shutil.copyfileobj(f_in, f_out) self.vm.set_console() self.vm.add_args('-nic', 'user', '-drive', image_drive_args, '-global', 'allwinner-rtc.base-year=2000', '-no-reboot') self.vm.launch() wait_for_console_pattern(self, 'U-Boot 2020.01+dfsg-1') interrupt_interactive_console_until_pattern(self, 'Hit any key to stop autoboot:', 'switch to partitions #0, OK') exec_command_and_wait_for_pattern(self, '', '=>') cmd = 'setenv bootargs root=ld0a' exec_command_and_wait_for_pattern(self, cmd, '=>') cmd = 'setenv kernel netbsd-GENERIC.ub' exec_command_and_wait_for_pattern(self, cmd, '=>') cmd = 'setenv fdtfile dtb/sun8i-h3-orangepi-pc.dtb' exec_command_and_wait_for_pattern(self, cmd, '=>') cmd = ("setenv bootcmd 'fatload mmc 0:1 ${kernel_addr_r} ${kernel}; " "fatload mmc 0:1 ${fdt_addr_r} ${fdtfile}; " "fdt addr ${fdt_addr_r}; " "bootm ${kernel_addr_r} - ${fdt_addr_r}'") exec_command_and_wait_for_pattern(self, cmd, '=>') exec_command_and_wait_for_pattern(self, 'boot', 'Booting kernel from Legacy Image') wait_for_console_pattern(self, 'Starting kernel ...') wait_for_console_pattern(self, 'NetBSD 9.0 (GENERIC)') # Wait for user-space wait_for_console_pattern(self, 'Starting root file system check') def test_aarch64_raspi3_atf(self): """ :avocado: tags=accel:tcg :avocado: tags=arch:aarch64 :avocado: tags=machine:raspi3b :avocado: tags=cpu:cortex-a53 :avocado: tags=device:pl011 :avocado: tags=atf """ zip_url = ('https://github.com/pbatard/RPi3/releases/download/' 'v1.15/RPi3_UEFI_Firmware_v1.15.zip') zip_hash = '74b3bd0de92683cadb14e008a7575e1d0c3cafb9' zip_path = self.fetch_asset(zip_url, asset_hash=zip_hash) archive.extract(zip_path, self.workdir) efi_fd = os.path.join(self.workdir, 'RPI_EFI.fd') self.vm.set_console(console_index=1) self.vm.add_args('-nodefaults', '-device', 'loader,file=%s,force-raw=true' % efi_fd) self.vm.launch() self.wait_for_console_pattern('version UEFI Firmware v1.15') def test_s390x_s390_ccw_virtio(self): """ :avocado: tags=arch:s390x :avocado: tags=machine:s390-ccw-virtio """ kernel_url = ('https://archives.fedoraproject.org/pub/archive' '/fedora-secondary/releases/29/Everything/s390x/os/images' '/kernel.img') kernel_hash = 'e8e8439103ef8053418ef062644ffd46a7919313' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) self.vm.set_console() kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=sclp0' self.vm.add_args('-nodefaults', '-kernel', kernel_path, '-append', kernel_command_line) self.vm.launch() console_pattern = 'Kernel command line: %s' % kernel_command_line self.wait_for_console_pattern(console_pattern) def test_alpha_clipper(self): """ :avocado: tags=arch:alpha :avocado: tags=machine:clipper """ kernel_url = ('http://archive.debian.org/debian/dists/lenny/main/' 'installer-alpha/20090123lenny10/images/cdrom/vmlinuz') kernel_hash = '3a943149335529e2ed3e74d0d787b85fb5671ba3' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) uncompressed_kernel = archive.uncompress(kernel_path, self.workdir) self.vm.set_console() kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' self.vm.add_args('-nodefaults', '-kernel', uncompressed_kernel, '-append', kernel_command_line) self.vm.launch() console_pattern = 'Kernel command line: %s' % kernel_command_line self.wait_for_console_pattern(console_pattern) def test_m68k_q800(self): """ :avocado: tags=arch:m68k :avocado: tags=machine:q800 """ deb_url = ('https://snapshot.debian.org/archive/debian-ports' '/20191021T083923Z/pool-m68k/main' '/l/linux/kernel-image-5.3.0-1-m68k-di_5.3.7-1_m68k.udeb') deb_hash = '044954bb9be4160a3ce81f8bc1b5e856b75cccd1' deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) kernel_path = self.extract_from_deb(deb_path, '/boot/vmlinux-5.3.0-1-m68k') self.vm.set_console() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0 vga=off') self.vm.add_args('-kernel', kernel_path, '-append', kernel_command_line) self.vm.launch() console_pattern = 'Kernel command line: %s' % kernel_command_line self.wait_for_console_pattern(console_pattern) console_pattern = 'No filesystem could mount root' self.wait_for_console_pattern(console_pattern) def do_test_advcal_2018(self, day, tar_hash, kernel_name, console=0): tar_url = ('https://qemu-advcal.gitlab.io' '/qac-best-of-multiarch/download/day' + day + '.tar.xz') file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) archive.extract(file_path, self.workdir) self.vm.set_console(console_index=console) self.vm.add_args('-kernel', self.workdir + '/day' + day + '/' + kernel_name) self.vm.launch() self.wait_for_console_pattern('QEMU advent calendar') def test_arm_vexpressa9(self): """ :avocado: tags=arch:arm :avocado: tags=machine:vexpress-a9 """ tar_hash = '32b7677ce8b6f1471fb0059865f451169934245b' self.vm.add_args('-dtb', self.workdir + '/day16/vexpress-v2p-ca9.dtb') self.do_test_advcal_2018('16', tar_hash, 'winter.zImage') def test_arm_ast2600_debian(self): """ :avocado: tags=arch:arm :avocado: tags=machine:rainier-bmc """ deb_url = ('http://snapshot.debian.org/archive/debian/' '20220606T211338Z/' 'pool/main/l/linux/' 'linux-image-5.17.0-2-armmp_5.17.6-1%2Bb1_armhf.deb') deb_hash = '8acb2b4439faedc2f3ed4bdb2847ad4f6e0491f73debaeb7f660c8abe4dcdc0e' deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash, algorithm='sha256') kernel_path = self.extract_from_deb(deb_path, '/boot/vmlinuz-5.17.0-2-armmp') dtb_path = self.extract_from_deb(deb_path, '/usr/lib/linux-image-5.17.0-2-armmp/aspeed-bmc-ibm-rainier.dtb') self.vm.set_console() self.vm.add_args('-kernel', kernel_path, '-dtb', dtb_path, '-net', 'nic') self.vm.launch() self.wait_for_console_pattern("Booting Linux on physical CPU 0xf00") self.wait_for_console_pattern("SMP: Total of 2 processors activated") self.wait_for_console_pattern("No filesystem could mount root") def test_m68k_mcf5208evb(self): """ :avocado: tags=arch:m68k :avocado: tags=machine:mcf5208evb """ tar_hash = 'ac688fd00561a2b6ce1359f9ff6aa2b98c9a570c' self.do_test_advcal_2018('07', tar_hash, 'sanity-clause.elf') def test_or1k_sim(self): """ :avocado: tags=arch:or1k :avocado: tags=machine:or1k-sim """ tar_hash = '20334cdaf386108c530ff0badaecc955693027dd' self.do_test_advcal_2018('20', tar_hash, 'vmlinux') def test_nios2_10m50(self): """ :avocado: tags=arch:nios2 :avocado: tags=machine:10m50-ghrd """ tar_hash = 'e4251141726c412ac0407c5a6bceefbbff018918' self.do_test_advcal_2018('14', tar_hash, 'vmlinux.elf') def test_ppc64_e500(self): """ :avocado: tags=arch:ppc64 :avocado: tags=machine:ppce500 :avocado: tags=cpu:e5500 :avocado: tags=accel:tcg """ self.require_accelerator("tcg") tar_hash = '6951d86d644b302898da2fd701739c9406527fe1' self.do_test_advcal_2018('19', tar_hash, 'uImage') def do_test_ppc64_powernv(self, proc): self.require_accelerator("tcg") images_url = ('https://github.com/open-power/op-build/releases/download/v2.7/') kernel_url = images_url + 'zImage.epapr' kernel_hash = '0ab237df661727e5392cee97460e8674057a883c5f74381a128fa772588d45cd' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash, algorithm='sha256') self.vm.set_console() self.vm.add_args('-kernel', kernel_path, '-append', 'console=tty0 console=hvc0', '-device', 'pcie-pci-bridge,id=bridge1,bus=pcie.1,addr=0x0', '-device', 'nvme,bus=pcie.2,addr=0x0,serial=1234', '-device', 'e1000e,bus=bridge1,addr=0x3', '-device', 'nec-usb-xhci,bus=bridge1,addr=0x2') self.vm.launch() self.wait_for_console_pattern("CPU: " + proc + " generation processor") self.wait_for_console_pattern("zImage starting: loaded") self.wait_for_console_pattern("Run /init as init process") self.wait_for_console_pattern("Creating 1 MTD partitions") def test_ppc_powernv8(self): """ :avocado: tags=arch:ppc64 :avocado: tags=machine:powernv8 :avocado: tags=accel:tcg """ self.do_test_ppc64_powernv('P8') def test_ppc_powernv9(self): """ :avocado: tags=arch:ppc64 :avocado: tags=machine:powernv9 :avocado: tags=accel:tcg """ self.do_test_ppc64_powernv('P9') def test_ppc_g3beige(self): """ :avocado: tags=arch:ppc :avocado: tags=machine:g3beige :avocado: tags=accel:tcg """ # TODO: g3beige works with kvm_pr but we don't have a # reliable way ATM (e.g. looking at /proc/modules) to detect # whether we're running kvm_hv or kvm_pr. For now let's # disable this test if we don't have TCG support. self.require_accelerator("tcg") tar_hash = 'e0b872a5eb8fdc5bed19bd43ffe863900ebcedfc' self.vm.add_args('-M', 'graphics=off') self.do_test_advcal_2018('15', tar_hash, 'invaders.elf') def test_ppc_mac99(self): """ :avocado: tags=arch:ppc :avocado: tags=machine:mac99 :avocado: tags=accel:tcg """ # TODO: mac99 works with kvm_pr but we don't have a # reliable way ATM (e.g. looking at /proc/modules) to detect # whether we're running kvm_hv or kvm_pr. For now let's # disable this test if we don't have TCG support. self.require_accelerator("tcg") tar_hash = 'e0b872a5eb8fdc5bed19bd43ffe863900ebcedfc' self.vm.add_args('-M', 'graphics=off') self.do_test_advcal_2018('15', tar_hash, 'invaders.elf') # This test has a 6-10% failure rate on various hosts that look # like issues with a buggy kernel. As a result we don't want it # gating releases on Gitlab. @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') def test_sh4_r2d(self): """ :avocado: tags=arch:sh4 :avocado: tags=machine:r2d """ tar_hash = 'fe06a4fd8ccbf2e27928d64472939d47829d4c7e' self.vm.add_args('-append', 'console=ttySC1') self.do_test_advcal_2018('09', tar_hash, 'zImage', console=1) def test_sparc_ss20(self): """ :avocado: tags=arch:sparc :avocado: tags=machine:SS-20 """ tar_hash = 'b18550d5d61c7615d989a06edace051017726a9f' self.do_test_advcal_2018('11', tar_hash, 'zImage.elf') def test_xtensa_lx60(self): """ :avocado: tags=arch:xtensa :avocado: tags=machine:lx60 :avocado: tags=cpu:dc233c """ tar_hash = '49e88d9933742f0164b60839886c9739cb7a0d34' self.do_test_advcal_2018('02', tar_hash, 'santas-sleigh-ride.elf')
57,340
44.400633
88
py
qemu
qemu-master/tests/avocado/machine_arm_integratorcp.py
# Functional test that boots a Linux kernel and checks the console # # Copyright (c) 2020 Red Hat, Inc. # # Author: # Thomas Huth <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import os import logging from avocado import skipUnless from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern NUMPY_AVAILABLE = True try: import numpy as np except ImportError: NUMPY_AVAILABLE = False CV2_AVAILABLE = True try: import cv2 except ImportError: CV2_AVAILABLE = False class IntegratorMachine(QemuSystemTest): timeout = 90 def boot_integratorcp(self): kernel_url = ('https://github.com/zayac/qemu-arm/raw/master/' 'arm-test/kernel/zImage.integrator') kernel_hash = '0d7adba893c503267c946a3cbdc63b4b54f25468' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) initrd_url = ('https://github.com/zayac/qemu-arm/raw/master/' 'arm-test/kernel/arm_root.img') initrd_hash = 'b51e4154285bf784e017a37586428332d8c7bd8b' initrd_path = self.fetch_asset(initrd_url, asset_hash=initrd_hash) self.vm.set_console() self.vm.add_args('-kernel', kernel_path, '-initrd', initrd_path, '-append', 'printk.time=0 console=ttyAMA0') self.vm.launch() @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') def test_integratorcp_console(self): """ Boots the Linux kernel and checks that the console is operational :avocado: tags=arch:arm :avocado: tags=machine:integratorcp :avocado: tags=device:pl011 """ self.boot_integratorcp() wait_for_console_pattern(self, 'Log in as root') @skipUnless(NUMPY_AVAILABLE, 'Python NumPy not installed') @skipUnless(CV2_AVAILABLE, 'Python OpenCV not installed') @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') def test_framebuffer_tux_logo(self): """ Boot Linux and verify the Tux logo is displayed on the framebuffer. :avocado: tags=arch:arm :avocado: tags=machine:integratorcp :avocado: tags=device:pl110 :avocado: tags=device:framebuffer """ screendump_path = os.path.join(self.workdir, "screendump.pbm") tuxlogo_url = ('https://github.com/torvalds/linux/raw/v2.6.12/' 'drivers/video/logo/logo_linux_vga16.ppm') tuxlogo_hash = '3991c2ddbd1ddaecda7601f8aafbcf5b02dc86af' tuxlogo_path = self.fetch_asset(tuxlogo_url, asset_hash=tuxlogo_hash) self.boot_integratorcp() framebuffer_ready = 'Console: switching to colour frame buffer device' wait_for_console_pattern(self, framebuffer_ready) self.vm.command('human-monitor-command', command_line='stop') self.vm.command('human-monitor-command', command_line='screendump %s' % screendump_path) logger = logging.getLogger('framebuffer') cpu_count = 1 match_threshold = 0.92 screendump_bgr = cv2.imread(screendump_path) screendump_gray = cv2.cvtColor(screendump_bgr, cv2.COLOR_BGR2GRAY) result = cv2.matchTemplate(screendump_gray, cv2.imread(tuxlogo_path, 0), cv2.TM_CCOEFF_NORMED) loc = np.where(result >= match_threshold) tux_count = 0 for tux_count, pt in enumerate(zip(*loc[::-1]), start=1): logger.debug('found Tux at position [x, y] = %s', pt) self.assertGreaterEqual(tux_count, cpu_count)
3,729
36.3
80
py
qemu
qemu-master/tests/avocado/hotplug_cpu.py
# Functional test that hotplugs a CPU and checks it on a Linux guest # # Copyright (c) 2021 Red Hat, Inc. # # Author: # Cleber Rosa <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. from avocado_qemu import LinuxTest class HotPlugCPU(LinuxTest): def test(self): """ :avocado: tags=arch:x86_64 :avocado: tags=machine:q35 :avocado: tags=accel:kvm """ self.require_accelerator('kvm') self.vm.add_args('-accel', 'kvm') self.vm.add_args('-cpu', 'Haswell') self.vm.add_args('-smp', '1,sockets=1,cores=2,threads=1,maxcpus=2') self.launch_and_wait() self.ssh_command('test -e /sys/devices/system/cpu/cpu0') with self.assertRaises(AssertionError): self.ssh_command('test -e /sys/devices/system/cpu/cpu1') self.vm.command('device_add', driver='Haswell-x86_64-cpu', socket_id=0, core_id=1, thread_id=0) self.ssh_command('test -e /sys/devices/system/cpu/cpu1')
1,179
30.052632
75
py
qemu
qemu-master/tests/avocado/replay_linux.py
# Record/replay test that boots a complete Linux system via a cloud image # # Copyright (c) 2020 ISP RAS # # Author: # Pavel Dovgalyuk <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import os import logging import time from avocado import skipUnless from avocado_qemu import BUILD_DIR from avocado.utils import cloudinit from avocado.utils import network from avocado.utils import vmimage from avocado.utils import datadrainer from avocado.utils.path import find_command from avocado_qemu import LinuxTest class ReplayLinux(LinuxTest): """ Boots a Linux system, checking for a successful initialization """ timeout = 1800 chksum = None hdd = 'ide-hd' cd = 'ide-cd' bus = 'ide' def setUp(self): # LinuxTest does many replay-incompatible things, but includes # useful methods. Do not setup LinuxTest here and just # call some functions. super(LinuxTest, self).setUp() self._set_distro() self.boot_path = self.download_boot() self.phone_server = cloudinit.PhoneHomeServer(('0.0.0.0', 0), self.name) ssh_pubkey, self.ssh_key = self.set_up_existing_ssh_keys() self.cloudinit_path = self.prepare_cloudinit(ssh_pubkey) def vm_add_disk(self, vm, path, id, device): bus_string = '' if self.bus: bus_string = ',bus=%s.%d' % (self.bus, id,) vm.add_args('-drive', 'file=%s,snapshot,id=disk%s,if=none' % (path, id)) vm.add_args('-drive', 'driver=blkreplay,id=disk%s-rr,if=none,image=disk%s' % (id, id)) vm.add_args('-device', '%s,drive=disk%s-rr%s' % (device, id, bus_string)) def launch_and_wait(self, record, args, shift): self.require_netdev('user') vm = self.get_vm() vm.add_args('-smp', '1') vm.add_args('-m', '1024') vm.add_args('-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22', '-device', 'virtio-net,netdev=vnet') vm.add_args('-object', 'filter-replay,id=replay,netdev=vnet') if args: vm.add_args(*args) self.vm_add_disk(vm, self.boot_path, 0, self.hdd) self.vm_add_disk(vm, self.cloudinit_path, 1, self.cd) logger = logging.getLogger('replay') if record: logger.info('recording the execution...') mode = 'record' else: logger.info('replaying the execution...') mode = 'replay' replay_path = os.path.join(self.workdir, 'replay.bin') vm.add_args('-icount', 'shift=%s,rr=%s,rrfile=%s' % (shift, mode, replay_path)) start_time = time.time() vm.set_console() vm.launch() console_drainer = datadrainer.LineLogger(vm.console_socket.fileno(), logger=self.log.getChild('console'), stop_check=(lambda : not vm.is_running())) console_drainer.start() if record: while not self.phone_server.instance_phoned_back: self.phone_server.handle_request() vm.shutdown() logger.info('finished the recording with log size %s bytes' % os.path.getsize(replay_path)) else: vm.event_wait('SHUTDOWN', self.timeout) vm.shutdown(True) logger.info('successfully fihished the replay') elapsed = time.time() - start_time logger.info('elapsed time %.2f sec' % elapsed) return elapsed def run_rr(self, args=None, shift=7): t1 = self.launch_and_wait(True, args, shift) t2 = self.launch_and_wait(False, args, shift) logger = logging.getLogger('replay') logger.info('replay overhead {:.2%}'.format(t2 / t1 - 1)) @skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') class ReplayLinuxX8664(ReplayLinux): """ :avocado: tags=arch:x86_64 :avocado: tags=accel:tcg """ chksum = 'e3c1b309d9203604922d6e255c2c5d098a309c2d46215d8fc026954f3c5c27a0' def test_pc_i440fx(self): """ :avocado: tags=machine:pc """ self.run_rr(shift=1) def test_pc_q35(self): """ :avocado: tags=machine:q35 """ self.run_rr(shift=3) @skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') class ReplayLinuxX8664Virtio(ReplayLinux): """ :avocado: tags=arch:x86_64 :avocado: tags=virtio :avocado: tags=accel:tcg """ hdd = 'virtio-blk-pci' cd = 'virtio-blk-pci' bus = None chksum = 'e3c1b309d9203604922d6e255c2c5d098a309c2d46215d8fc026954f3c5c27a0' def test_pc_i440fx(self): """ :avocado: tags=machine:pc """ self.run_rr(shift=1) def test_pc_q35(self): """ :avocado: tags=machine:q35 """ self.run_rr(shift=3) @skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') class ReplayLinuxAarch64(ReplayLinux): """ :avocado: tags=accel:tcg :avocado: tags=arch:aarch64 :avocado: tags=machine:virt :avocado: tags=cpu:max """ chksum = '1e18d9c0cf734940c4b5d5ec592facaed2af0ad0329383d5639c997fdf16fe49' hdd = 'virtio-blk-device' cd = 'virtio-blk-device' bus = None def get_common_args(self): return ('-bios', os.path.join(BUILD_DIR, 'pc-bios', 'edk2-aarch64-code.fd'), "-cpu", "max,lpa2=off", '-device', 'virtio-rng-pci,rng=rng0', '-object', 'rng-builtin,id=rng0') def test_virt_gicv2(self): """ :avocado: tags=machine:gic-version=2 """ self.run_rr(shift=3, args=(*self.get_common_args(), "-machine", "virt,gic-version=2")) def test_virt_gicv3(self): """ :avocado: tags=machine:gic-version=3 """ self.run_rr(shift=3, args=(*self.get_common_args(), "-machine", "virt,gic-version=3"))
6,216
31.046392
80
py
qemu
qemu-master/tests/avocado/ppc_virtex_ml507.py
# Test that Linux kernel boots on ppc machines and check the console # # Copyright (c) 2018, 2020 Red Hat, Inc. # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. from avocado.utils import archive from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern class VirtexMl507Machine(QemuSystemTest): timeout = 90 KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' panic_message = 'Kernel panic - not syncing' def test_ppc_virtex_ml507(self): """ :avocado: tags=arch:ppc :avocado: tags=machine:virtex-ml507 :avocado: tags=accel:tcg """ self.require_accelerator("tcg") tar_url = ('https://qemu-advcal.gitlab.io' '/qac-best-of-multiarch/download/day08.tar.xz') tar_hash = '74c68f5af7a7b8f21c03097b298f3bb77ff52c1f' file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) archive.extract(file_path, self.workdir) self.vm.set_console() self.vm.add_args('-kernel', self.workdir + '/hippo/hippo.linux', '-dtb', self.workdir + '/hippo/virtex440-ml507.dtb', '-m', '512') self.vm.launch() wait_for_console_pattern(self, 'QEMU advent calendar 2020', self.panic_message)
1,399
36.837838
77
py
qemu
qemu-master/tests/avocado/boot_linux.py
# Functional test that boots a complete Linux system via a cloud image # # Copyright (c) 2018-2020 Red Hat, Inc. # # Author: # Cleber Rosa <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import os from avocado_qemu import LinuxTest, BUILD_DIR from avocado import skipIf class BootLinuxX8664(LinuxTest): """ :avocado: tags=arch:x86_64 """ timeout = 480 def test_pc_i440fx_tcg(self): """ :avocado: tags=machine:pc :avocado: tags=accel:tcg """ self.require_accelerator("tcg") self.vm.add_args("-accel", "tcg") self.launch_and_wait(set_up_ssh_connection=False) def test_pc_i440fx_kvm(self): """ :avocado: tags=machine:pc :avocado: tags=accel:kvm """ self.require_accelerator("kvm") self.vm.add_args("-accel", "kvm") self.launch_and_wait(set_up_ssh_connection=False) def test_pc_q35_tcg(self): """ :avocado: tags=machine:q35 :avocado: tags=accel:tcg """ self.require_accelerator("tcg") self.vm.add_args("-accel", "tcg") self.launch_and_wait(set_up_ssh_connection=False) def test_pc_q35_kvm(self): """ :avocado: tags=machine:q35 :avocado: tags=accel:kvm """ self.require_accelerator("kvm") self.vm.add_args("-accel", "kvm") self.launch_and_wait(set_up_ssh_connection=False) # For Aarch64 we only boot KVM tests in CI as booting the current # Fedora OS in TCG tests is very heavyweight. There are lighter weight # distros which we use in the machine_aarch64_virt.py tests. class BootLinuxAarch64(LinuxTest): """ :avocado: tags=arch:aarch64 :avocado: tags=machine:virt """ timeout = 720 def test_virt_kvm(self): """ :avocado: tags=accel:kvm :avocado: tags=cpu:host """ self.require_accelerator("kvm") self.vm.add_args("-accel", "kvm") self.vm.add_args("-machine", "virt,gic-version=host") self.vm.add_args('-bios', os.path.join(BUILD_DIR, 'pc-bios', 'edk2-aarch64-code.fd')) self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0') self.vm.add_args('-object', 'rng-random,id=rng0,filename=/dev/urandom') self.launch_and_wait(set_up_ssh_connection=False) # See the tux_baseline.py tests for almost the same coverage in a lot # less time. class BootLinuxPPC64(LinuxTest): """ :avocado: tags=arch:ppc64 """ timeout = 360 @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') def test_pseries_tcg(self): """ :avocado: tags=machine:pseries :avocado: tags=accel:tcg """ self.require_accelerator("tcg") self.vm.add_args("-accel", "tcg") self.launch_and_wait(set_up_ssh_connection=False) class BootLinuxS390X(LinuxTest): """ :avocado: tags=arch:s390x """ timeout = 240 @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') def test_s390_ccw_virtio_tcg(self): """ :avocado: tags=machine:s390-ccw-virtio :avocado: tags=accel:tcg """ self.require_accelerator("tcg") self.vm.add_args("-accel", "tcg") self.launch_and_wait(set_up_ssh_connection=False)
3,440
26.97561
79
py
qemu
qemu-master/tests/avocado/vnc.py
# Simple functional tests for VNC functionality # # Copyright (c) 2018 Red Hat, Inc. # # Author: # Cleber Rosa <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import socket from typing import List from avocado_qemu import QemuSystemTest VNC_ADDR = '127.0.0.1' VNC_PORT_START = 32768 VNC_PORT_END = VNC_PORT_START + 1024 def check_bind(port: int) -> bool: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: try: sock.bind((VNC_ADDR, port)) except OSError: return False return True def check_connect(port: int) -> bool: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: try: sock.connect((VNC_ADDR, port)) except ConnectionRefusedError: return False return True def find_free_ports(count: int) -> List[int]: result = [] for port in range(VNC_PORT_START, VNC_PORT_END): if check_bind(port): result.append(port) if len(result) >= count: break assert len(result) == count return result class Vnc(QemuSystemTest): """ :avocado: tags=vnc,quick :avocado: tags=machine:none """ def test_no_vnc(self): self.vm.add_args('-nodefaults', '-S') self.vm.launch() self.assertFalse(self.vm.qmp('query-vnc')['return']['enabled']) def test_no_vnc_change_password(self): self.vm.add_args('-nodefaults', '-S') self.vm.launch() self.assertFalse(self.vm.qmp('query-vnc')['return']['enabled']) set_password_response = self.vm.qmp('change-vnc-password', password='new_password') self.assertIn('error', set_password_response) self.assertEqual(set_password_response['error']['class'], 'GenericError') self.assertEqual(set_password_response['error']['desc'], 'Could not set password') def test_change_password_requires_a_password(self): self.vm.add_args('-nodefaults', '-S', '-vnc', ':0') self.vm.launch() self.assertTrue(self.vm.qmp('query-vnc')['return']['enabled']) set_password_response = self.vm.qmp('change-vnc-password', password='new_password') self.assertIn('error', set_password_response) self.assertEqual(set_password_response['error']['class'], 'GenericError') self.assertEqual(set_password_response['error']['desc'], 'Could not set password') def test_change_password(self): self.vm.add_args('-nodefaults', '-S', '-vnc', ':0,password=on') self.vm.launch() self.assertTrue(self.vm.qmp('query-vnc')['return']['enabled']) set_password_response = self.vm.qmp('change-vnc-password', password='new_password') self.assertEqual(set_password_response['return'], {}) def test_change_listen(self): a, b, c = find_free_ports(3) self.assertFalse(check_connect(a)) self.assertFalse(check_connect(b)) self.assertFalse(check_connect(c)) self.vm.add_args('-nodefaults', '-S', '-vnc', f'{VNC_ADDR}:{a - 5900}') self.vm.launch() self.assertEqual(self.vm.qmp('query-vnc')['return']['service'], str(a)) self.assertTrue(check_connect(a)) self.assertFalse(check_connect(b)) self.assertFalse(check_connect(c)) res = self.vm.qmp('display-update', type='vnc', addresses=[{'type': 'inet', 'host': VNC_ADDR, 'port': str(b)}, {'type': 'inet', 'host': VNC_ADDR, 'port': str(c)}]) self.assertEqual(res['return'], {}) self.assertEqual(self.vm.qmp('query-vnc')['return']['service'], str(b)) self.assertFalse(check_connect(a)) self.assertTrue(check_connect(b)) self.assertTrue(check_connect(c))
4,165
34.305085
79
py
qemu
qemu-master/tests/avocado/pc_cpu_hotplug_props.py
# # Ensure CPU die-id can be omitted on -device # # Copyright (c) 2019 Red Hat Inc # # Author: # Eduardo Habkost <[email protected]> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, see <http://www.gnu.org/licenses/>. # from avocado_qemu import QemuSystemTest class OmittedCPUProps(QemuSystemTest): """ :avocado: tags=arch:x86_64 :avocado: tags=cpu:qemu64 """ def test_no_die_id(self): self.vm.add_args('-nodefaults', '-S') self.vm.add_args('-smp', '1,sockets=2,cores=2,threads=2,maxcpus=8') self.vm.add_args('-device', 'qemu64-x86_64-cpu,socket-id=1,core-id=0,thread-id=0') self.vm.launch() self.assertEquals(len(self.vm.command('query-cpus-fast')), 2)
1,293
34.944444
90
py
qemu
qemu-master/tests/avocado/smmu.py
# SMMUv3 Functional tests # # Copyright (c) 2021 Red Hat, Inc. # # Author: # Eric Auger <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import os from avocado import skipIf from avocado_qemu import LinuxTest, BUILD_DIR @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') class SMMU(LinuxTest): """ :avocado: tags=accel:kvm :avocado: tags=cpu:host :avocado: tags=arch:aarch64 :avocado: tags=machine:virt :avocado: tags=distro:fedora :avocado: tags=smmu """ IOMMU_ADDON = ',iommu_platform=on,disable-modern=off,disable-legacy=on' kernel_path = None initrd_path = None kernel_params = None def set_up_boot(self): path = self.download_boot() self.vm.add_args('-device', 'virtio-blk-pci,bus=pcie.0,scsi=off,' + 'drive=drv0,id=virtio-disk0,bootindex=1,' 'werror=stop,rerror=stop' + self.IOMMU_ADDON) self.vm.add_args('-drive', 'file=%s,if=none,cache=writethrough,id=drv0' % path) def setUp(self): super(SMMU, self).setUp(None, 'virtio-net-pci' + self.IOMMU_ADDON) def common_vm_setup(self, custom_kernel=False): self.require_accelerator("kvm") self.vm.add_args("-accel", "kvm") self.vm.add_args("-cpu", "host") self.vm.add_args("-machine", "iommu=smmuv3") self.vm.add_args("-d", "guest_errors") self.vm.add_args('-bios', os.path.join(BUILD_DIR, 'pc-bios', 'edk2-aarch64-code.fd')) self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0') self.vm.add_args('-object', 'rng-random,id=rng0,filename=/dev/urandom') if custom_kernel is False: return kernel_url = self.distro.pxeboot_url + 'vmlinuz' initrd_url = self.distro.pxeboot_url + 'initrd.img' self.kernel_path = self.fetch_asset(kernel_url) self.initrd_path = self.fetch_asset(initrd_url) def run_and_check(self): if self.kernel_path: self.vm.add_args('-kernel', self.kernel_path, '-append', self.kernel_params, '-initrd', self.initrd_path) self.launch_and_wait() self.ssh_command('cat /proc/cmdline') self.ssh_command('dnf -y install numactl-devel') # 5.3 kernel without RIL # def test_smmu_noril(self): """ :avocado: tags=smmu_noril :avocado: tags=smmu_noril_tests :avocado: tags=distro_version:31 """ self.common_vm_setup() self.run_and_check() def test_smmu_noril_passthrough(self): """ :avocado: tags=smmu_noril_passthrough :avocado: tags=smmu_noril_tests :avocado: tags=distro_version:31 """ self.common_vm_setup(True) self.kernel_params = (self.distro.default_kernel_params + ' iommu.passthrough=on') self.run_and_check() def test_smmu_noril_nostrict(self): """ :avocado: tags=smmu_noril_nostrict :avocado: tags=smmu_noril_tests :avocado: tags=distro_version:31 """ self.common_vm_setup(True) self.kernel_params = (self.distro.default_kernel_params + ' iommu.strict=0') self.run_and_check() # 5.8 kernel featuring range invalidation # >= v5.7 kernel def test_smmu_ril(self): """ :avocado: tags=smmu_ril :avocado: tags=smmu_ril_tests :avocado: tags=distro_version:33 """ self.common_vm_setup() self.run_and_check() def test_smmu_ril_passthrough(self): """ :avocado: tags=smmu_ril_passthrough :avocado: tags=smmu_ril_tests :avocado: tags=distro_version:33 """ self.common_vm_setup(True) self.kernel_params = (self.distro.default_kernel_params + ' iommu.passthrough=on') self.run_and_check() def test_smmu_ril_nostrict(self): """ :avocado: tags=smmu_ril_nostrict :avocado: tags=smmu_ril_tests :avocado: tags=distro_version:33 """ self.common_vm_setup(True) self.kernel_params = (self.distro.default_kernel_params + ' iommu.strict=0') self.run_and_check()
4,492
31.557971
77
py
qemu
qemu-master/tests/avocado/ppc_prep_40p.py
# Functional test that boots a PReP/40p machine and checks its serial console. # # Copyright (c) Philippe Mathieu-Daudé <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import os from avocado import skipUnless from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern class IbmPrep40pMachine(QemuSystemTest): timeout = 60 # 12H0455 PPS Firmware Licensed Materials # Property of IBM (C) Copyright IBM Corp. 1994. # All rights reserved. # U.S. Government Users Restricted Rights - Use, duplication or disclosure # restricted by GSA ADP Schedule Contract with IBM Corp. @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') def test_factory_firmware_and_netbsd(self): """ :avocado: tags=arch:ppc :avocado: tags=machine:40p :avocado: tags=os:netbsd :avocado: tags=slowness:high :avocado: tags=accel:tcg """ self.require_accelerator("tcg") bios_url = ('http://ftpmirror.your.org/pub/misc/' 'ftp.software.ibm.com/rs6000/firmware/' '7020-40p/P12H0456.IMG') bios_hash = '1775face4e6dc27f3a6ed955ef6eb331bf817f03' bios_path = self.fetch_asset(bios_url, asset_hash=bios_hash) drive_url = ('https://archive.netbsd.org/pub/NetBSD-archive/' 'NetBSD-4.0/prep/installation/floppy/generic_com0.fs') drive_hash = 'dbcfc09912e71bd5f0d82c7c1ee43082fb596ceb' drive_path = self.fetch_asset(drive_url, asset_hash=drive_hash) self.vm.set_console() self.vm.add_args('-bios', bios_path, '-fda', drive_path) self.vm.launch() os_banner = 'NetBSD 4.0 (GENERIC) #0: Sun Dec 16 00:49:40 PST 2007' wait_for_console_pattern(self, os_banner) wait_for_console_pattern(self, 'Model: IBM PPS Model 6015') def test_openbios_192m(self): """ :avocado: tags=arch:ppc :avocado: tags=machine:40p :avocado: tags=accel:tcg """ self.require_accelerator("tcg") self.vm.set_console() self.vm.add_args('-m', '192') # test fw_cfg self.vm.launch() wait_for_console_pattern(self, '>> OpenBIOS') wait_for_console_pattern(self, '>> Memory: 192M') wait_for_console_pattern(self, '>> CPU type PowerPC,604') def test_openbios_and_netbsd(self): """ :avocado: tags=arch:ppc :avocado: tags=machine:40p :avocado: tags=os:netbsd :avocado: tags=accel:tcg """ self.require_accelerator("tcg") drive_url = ('https://archive.netbsd.org/pub/NetBSD-archive/' 'NetBSD-7.1.2/iso/NetBSD-7.1.2-prep.iso') drive_hash = 'ac6fa2707d888b36d6fa64de6e7fe48e' drive_path = self.fetch_asset(drive_url, asset_hash=drive_hash, algorithm='md5') self.vm.set_console() self.vm.add_args('-cdrom', drive_path, '-boot', 'd') self.vm.launch() wait_for_console_pattern(self, 'NetBSD/prep BOOT, Revision 1.9')
3,257
36.883721
78
py
qemu
qemu-master/tests/avocado/netdev-ethtool.py
# ethtool tests for emulated network devices # # This test leverages ethtool's --test sequence to validate network # device behaviour. # # SPDX-License-Identifier: GPL-2.0-or-late from avocado import skip from avocado_qemu import QemuSystemTest from avocado_qemu import exec_command, exec_command_and_wait_for_pattern from avocado_qemu import wait_for_console_pattern class NetDevEthtool(QemuSystemTest): """ :avocado: tags=arch:x86_64 :avocado: tags=machine:q35 """ # Runs in about 17s under KVM, 19s under TCG, 25s under GCOV timeout = 45 # Fetch assets from the netdev-ethtool subdir of my shared test # images directory on fileserver.linaro.org. def get_asset(self, name, sha1): base_url = ('https://fileserver.linaro.org/s/' 'kE4nCFLdQcoBF9t/download?' 'path=%2Fnetdev-ethtool&files=' ) url = base_url + name # use explicit name rather than failing to neatly parse the # URL into a unique one return self.fetch_asset(name=name, locations=(url), asset_hash=sha1) def common_test_code(self, netdev, extra_args=None, kvm=False): # This custom kernel has drivers for all the supported network # devices we can emulate in QEMU kernel = self.get_asset("bzImage", "33469d7802732d5815226166581442395cb289e2") rootfs = self.get_asset("rootfs.squashfs", "9793cea7021414ae844bda51f558bd6565b50cdc") append = 'printk.time=0 console=ttyS0 ' append += 'root=/dev/sr0 rootfstype=squashfs ' # any additional kernel tweaks for the test if extra_args: append += extra_args # finally invoke ethtool directly append += ' init=/usr/sbin/ethtool -- -t eth1 offline' # add the rootfs via a readonly cdrom image drive = f"file={rootfs},if=ide,index=0,media=cdrom" self.vm.add_args('-kernel', kernel, '-append', append, '-drive', drive, '-device', netdev) if kvm: self.vm.add_args('-accel', 'kvm') self.vm.set_console(console_index=0) self.vm.launch() wait_for_console_pattern(self, "The test result is PASS", "The test result is FAIL", vm=None) # no need to gracefully shutdown, just finish self.vm.kill() # Skip testing for MSI for now. Allegedly it was fixed by: # 28e96556ba (igb: Allocate MSI-X vector when testing) # but I'm seeing oops in the kernel @skip("Kernel bug with MSI enabled") def test_igb(self): """ :avocado: tags=device:igb """ self.common_test_code("igb") def test_igb_nomsi(self): """ :avocado: tags=device:igb """ self.common_test_code("igb", "pci=nomsi") def test_igb_nomsi_kvm(self): """ :avocado: tags=device:igb """ self.require_accelerator('kvm') self.common_test_code("igb", "pci=nomsi", True) # It seems the other popular cards we model in QEMU currently fail # the pattern test with: # # pattern test failed (reg 0x00178): got 0x00000000 expected 0x00005A5A # # So for now we skip them. @skip("Incomplete reg 0x00178 support") def test_e1000(self): """ :avocado: tags=device:e1000 """ self.common_test_code("e1000") @skip("Incomplete reg 0x00178 support") def test_i82550(self): """ :avocado: tags=device:i82550 """ self.common_test_code("i82550")
3,754
31.094017
77
py
qemu
qemu-master/tests/avocado/machine_mips_fuloong2e.py
# Functional tests for the Lemote Fuloong-2E machine. # # Copyright (c) 2019 Philippe Mathieu-Daudé <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or later. # See the COPYING file in the top-level directory. # # SPDX-License-Identifier: GPL-2.0-or-later import os from avocado import skipUnless from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern class MipsFuloong2e(QemuSystemTest): timeout = 60 @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') @skipUnless(os.getenv('RESCUE_YL_PATH'), 'RESCUE_YL_PATH not available') def test_linux_kernel_isa_serial(self): """ :avocado: tags=arch:mips64el :avocado: tags=machine:fuloong2e :avocado: tags=endian:little :avocado: tags=device:bonito64 :avocado: tags=device:via686b """ # Recovery system for the Yeeloong laptop # (enough to test the fuloong2e southbridge, accessing its ISA bus) # http://dev.lemote.com/files/resource/download/rescue/rescue-yl kernel_hash = 'ec4d1bd89a8439c41033ca63db60160cc6d6f09a' kernel_path = self.fetch_asset('file://' + os.getenv('RESCUE_YL_PATH'), asset_hash=kernel_hash) self.vm.set_console() self.vm.add_args('-kernel', kernel_path) self.vm.launch() wait_for_console_pattern(self, 'Linux version 2.6.27.7lemote') cpu_revision = 'CPU revision is: 00006302 (ICT Loongson-2)' wait_for_console_pattern(self, cpu_revision)
1,601
36.255814
79
py
qemu
qemu-master/tests/avocado/info_usernet.py
# Test for the hmp command "info usernet" # # Copyright (c) 2021 Red Hat, Inc. # # Author: # Cleber Rosa <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. from avocado_qemu import QemuSystemTest from qemu.utils import get_info_usernet_hostfwd_port class InfoUsernet(QemuSystemTest): """ :avocado: tags=machine:none """ def test_hostfwd(self): self.require_netdev('user') self.vm.add_args('-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22') self.vm.launch() res = self.vm.command('human-monitor-command', command_line='info usernet') port = get_info_usernet_hostfwd_port(res) self.assertIsNotNone(port, ('"info usernet" output content does not seem to ' 'contain the redirected port')) self.assertGreater(port, 0, ('Found a redirected port that is not greater than' ' zero'))
1,096
31.264706
79
py
qemu
qemu-master/tests/avocado/linux_ssh_mips_malta.py
# Functional test that boots a VM and run commands via a SSH session # # Copyright (c) Philippe Mathieu-Daudé <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import os import re import base64 import logging import time from avocado import skipUnless from avocado_qemu import LinuxSSHMixIn from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado.utils import process from avocado.utils import archive from avocado.utils import ssh @skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') @skipUnless(ssh.SSH_CLIENT_BINARY, 'No SSH client available') class LinuxSSH(QemuSystemTest, LinuxSSHMixIn): """ :avocado: tags=accel:tcg """ timeout = 150 # Not for 'configure --enable-debug --enable-debug-tcg' KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' VM_IP = '127.0.0.1' BASE_URL = 'https://people.debian.org/~aurel32/qemu/' IMAGE_INFO = { 'be': {'base_url': 'mips', 'image_name': 'debian_wheezy_mips_standard.qcow2', 'image_hash': '8987a63270df67345b2135a6b7a4885a35e392d5', 'kernel_hash': { 32: '592e384a4edc16dade52a6cd5c785c637bcbc9ad', 64: 'db6eea7de35d36c77d8c165b6bcb222e16eb91db'} }, 'le': {'base_url': 'mipsel', 'image_name': 'debian_wheezy_mipsel_standard.qcow2', 'image_hash': '7866764d9de3ef536ffca24c9fb9f04ffdb45802', 'kernel_hash': { 32: 'a66bea5a8adaa2cb3d36a1d4e0ccdb01be8f6c2a', 64: '6a7f77245acf231415a0e8b725d91ed2f3487794'} } } CPU_INFO = { 32: {'cpu': 'MIPS 24Kc', 'kernel_release': '3.2.0-4-4kc-malta'}, 64: {'cpu': 'MIPS 20Kc', 'kernel_release': '3.2.0-4-5kc-malta'} } def get_url(self, endianess, path=''): qkey = {'le': 'el', 'be': ''} return '%s/mips%s/%s' % (self.BASE_URL, qkey[endianess], path) def get_image_info(self, endianess): dinfo = self.IMAGE_INFO[endianess] image_url = self.get_url(endianess, dinfo['image_name']) image_hash = dinfo['image_hash'] return (image_url, image_hash) def get_kernel_info(self, endianess, wordsize): minfo = self.CPU_INFO[wordsize] kernel_url = self.get_url(endianess, 'vmlinux-%s' % minfo['kernel_release']) kernel_hash = self.IMAGE_INFO[endianess]['kernel_hash'][wordsize] return kernel_url, kernel_hash def ssh_disconnect_vm(self): self.ssh_session.quit() def boot_debian_wheezy_image_and_ssh_login(self, endianess, kernel_path): image_url, image_hash = self.get_image_info(endianess) image_path = self.fetch_asset(image_url, asset_hash=image_hash) self.vm.set_console() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0 root=/dev/sda1') self.vm.add_args('-no-reboot', '-kernel', kernel_path, '-append', kernel_command_line, '-drive', 'file=%s,snapshot=on' % image_path, '-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22', '-device', 'pcnet,netdev=vnet') self.vm.launch() self.log.info('VM launched, waiting for sshd') console_pattern = 'Starting OpenBSD Secure Shell server: sshd' wait_for_console_pattern(self, console_pattern, 'Oops') self.log.info('sshd ready') self.ssh_connect('root', 'root', False) def shutdown_via_ssh(self): self.ssh_command('poweroff') self.ssh_disconnect_vm() wait_for_console_pattern(self, 'Power down', 'Oops') def ssh_command_output_contains(self, cmd, exp): stdout, _ = self.ssh_command(cmd) for line in stdout: if exp in line: break else: self.fail('"%s" output does not contain "%s"' % (cmd, exp)) def run_common_commands(self, wordsize): self.ssh_command_output_contains( 'cat /proc/cpuinfo', self.CPU_INFO[wordsize]['cpu']) self.ssh_command_output_contains( 'uname -m', 'mips') self.ssh_command_output_contains( 'uname -r', self.CPU_INFO[wordsize]['kernel_release']) self.ssh_command_output_contains( 'cat /proc/interrupts', 'XT-PIC timer') self.ssh_command_output_contains( 'cat /proc/interrupts', 'XT-PIC i8042') self.ssh_command_output_contains( 'cat /proc/interrupts', 'XT-PIC serial') self.ssh_command_output_contains( 'cat /proc/interrupts', 'XT-PIC ata_piix') self.ssh_command_output_contains( 'cat /proc/interrupts', 'XT-PIC eth0') self.ssh_command_output_contains( 'cat /proc/devices', 'input') self.ssh_command_output_contains( 'cat /proc/devices', 'usb') self.ssh_command_output_contains( 'cat /proc/devices', 'fb') self.ssh_command_output_contains( 'cat /proc/ioports', ' : serial') self.ssh_command_output_contains( 'cat /proc/ioports', ' : ata_piix') self.ssh_command_output_contains( 'cat /proc/ioports', ' : piix4_smbus') self.ssh_command_output_contains( 'lspci -d 11ab:4620', 'GT-64120') self.ssh_command_output_contains( 'cat /sys/bus/i2c/devices/i2c-0/name', 'SMBus PIIX4 adapter') self.ssh_command_output_contains( 'cat /proc/mtd', 'YAMON') # Empty 'Board Config' (64KB) self.ssh_command_output_contains( 'md5sum /dev/mtd2ro', '0dfbe8aa4c20b52e1b8bf3cb6cbdf193') def check_mips_malta(self, uname_m, endianess): wordsize = 64 if '64' in uname_m else 32 kernel_url, kernel_hash = self.get_kernel_info(endianess, wordsize) kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) self.boot_debian_wheezy_image_and_ssh_login(endianess, kernel_path) stdout, _ = self.ssh_command('uname -a') self.assertIn(True, [uname_m + " GNU/Linux" in line for line in stdout]) self.run_common_commands(wordsize) self.shutdown_via_ssh() # Wait for VM to shut down gracefully self.vm.wait() def test_mips_malta32eb_kernel3_2_0(self): """ :avocado: tags=arch:mips :avocado: tags=endian:big :avocado: tags=device:pcnet32 """ self.check_mips_malta('mips', 'be') def test_mips_malta32el_kernel3_2_0(self): """ :avocado: tags=arch:mipsel :avocado: tags=endian:little :avocado: tags=device:pcnet32 """ self.check_mips_malta('mips', 'le') def test_mips_malta64eb_kernel3_2_0(self): """ :avocado: tags=arch:mips64 :avocado: tags=endian:big :avocado: tags=device:pcnet32 """ self.check_mips_malta('mips64', 'be') def test_mips_malta64el_kernel3_2_0(self): """ :avocado: tags=arch:mips64el :avocado: tags=endian:little :avocado: tags=device:pcnet32 """ self.check_mips_malta('mips64', 'le')
7,646
34.733645
80
py
qemu
qemu-master/tests/avocado/migration.py
# Migration test # # Copyright (c) 2019 Red Hat, Inc. # # Authors: # Cleber Rosa <[email protected]> # Caio Carrara <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import tempfile from avocado_qemu import QemuSystemTest from avocado import skipUnless from avocado.utils.network import ports from avocado.utils import wait from avocado.utils.path import find_command class Migration(QemuSystemTest): """ :avocado: tags=migration """ timeout = 10 @staticmethod def migration_finished(vm): return vm.command('query-migrate')['status'] in ('completed', 'failed') def assert_migration(self, src_vm, dst_vm): wait.wait_for(self.migration_finished, timeout=self.timeout, step=0.1, args=(src_vm,)) wait.wait_for(self.migration_finished, timeout=self.timeout, step=0.1, args=(dst_vm,)) self.assertEqual(src_vm.command('query-migrate')['status'], 'completed') self.assertEqual(dst_vm.command('query-migrate')['status'], 'completed') self.assertEqual(dst_vm.command('query-status')['status'], 'running') self.assertEqual(src_vm.command('query-status')['status'],'postmigrate') def do_migrate(self, dest_uri, src_uri=None): dest_vm = self.get_vm('-incoming', dest_uri) dest_vm.add_args('-nodefaults') dest_vm.launch() if src_uri is None: src_uri = dest_uri source_vm = self.get_vm() source_vm.add_args('-nodefaults') source_vm.launch() source_vm.qmp('migrate', uri=src_uri) self.assert_migration(source_vm, dest_vm) def _get_free_port(self): port = ports.find_free_port() if port is None: self.cancel('Failed to find a free port') return port def test_migration_with_tcp_localhost(self): dest_uri = 'tcp:localhost:%u' % self._get_free_port() self.do_migrate(dest_uri) def test_migration_with_unix(self): with tempfile.TemporaryDirectory(prefix='socket_') as socket_path: dest_uri = 'unix:%s/qemu-test.sock' % socket_path self.do_migrate(dest_uri) @skipUnless(find_command('nc', default=False), "'nc' command not found") def test_migration_with_exec(self): """The test works for both netcat-traditional and netcat-openbsd packages.""" free_port = self._get_free_port() dest_uri = 'exec:nc -l localhost %u' % free_port src_uri = 'exec:nc localhost %u' % free_port self.do_migrate(dest_uri, src_uri)
2,752
32.573171
85
py
qemu
qemu-master/tests/avocado/tuxrun_baselines.py
# Functional test that boots known good tuxboot images the same way # that tuxrun (www.tuxrun.org) does. This tool is used by things like # the LKFT project to run regression tests on kernels. # # Copyright (c) 2023 Linaro Ltd. # # Author: # Alex Bennée <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later import os import time from avocado import skip, skipIf from avocado_qemu import QemuSystemTest from avocado_qemu import exec_command, exec_command_and_wait_for_pattern from avocado_qemu import wait_for_console_pattern from avocado.utils import process from avocado.utils.path import find_command class TuxRunBaselineTest(QemuSystemTest): """ :avocado: tags=accel:tcg """ KERNEL_COMMON_COMMAND_LINE = 'printk.time=0' # Tests are ~10-40s, allow for --debug/--enable-gcov overhead timeout = 100 def get_tag(self, tagname, default=None): """ Get the metadata tag or return the default. """ utag = self._get_unique_tag_val(tagname) print(f"{tagname}/{default} -> {utag}") if utag: return utag return default def setUp(self): super().setUp() # We need zstd for all the tuxrun tests # See https://github.com/avocado-framework/avocado/issues/5609 zstd = find_command('zstd', False) if zstd is False: self.cancel('Could not find "zstd", which is required to ' 'decompress rootfs') self.zstd = zstd # Process the TuxRun specific tags, most machines work with # reasonable defaults but we sometimes need to tweak the # config. To avoid open coding everything we store all these # details in the metadata for each test. # The tuxboot tag matches the root directory self.tuxboot = self.get_tag('tuxboot') # Most Linux's use ttyS0 for their serial port self.console = self.get_tag('console', "ttyS0") # Does the machine shutdown QEMU nicely on "halt" self.shutdown = self.get_tag('shutdown') # The name of the kernel Image file self.image = self.get_tag('image', "Image") self.root = self.get_tag('root', "vda") # Occasionally we need extra devices to hook things up self.extradev = self.get_tag('extradev') def wait_for_console_pattern(self, success_message, vm=None): wait_for_console_pattern(self, success_message, failure_message='Kernel panic - not syncing', vm=vm) def fetch_tuxrun_assets(self, dt=None): """ Fetch the TuxBoot assets. They are stored in a standard way so we use the per-test tags to fetch details. """ base_url = f"https://storage.tuxboot.com/{self.tuxboot}/" kernel_image = self.fetch_asset(base_url + self.image) disk_image_zst = self.fetch_asset(base_url + "rootfs.ext4.zst") cmd = f"{self.zstd} -d {disk_image_zst} -o {self.workdir}/rootfs.ext4" process.run(cmd) if dt: dtb = self.fetch_asset(base_url + dt) else: dtb = None return (kernel_image, self.workdir + "/rootfs.ext4", dtb) def prepare_run(self, kernel, disk, drive, dtb=None, console_index=0): """ Setup to run and add the common parameters to the system """ self.vm.set_console(console_index=console_index) # all block devices are raw ext4's blockdev = "driver=raw,file.driver=file," \ + f"file.filename={disk},node-name=hd0" kcmd_line = self.KERNEL_COMMON_COMMAND_LINE kcmd_line += f" root=/dev/{self.root}" kcmd_line += f" console={self.console}" self.vm.add_args('-kernel', kernel, '-append', kcmd_line, '-blockdev', blockdev) # Sometimes we need extra devices attached if self.extradev: self.vm.add_args('-device', self.extradev) self.vm.add_args('-device', f"{drive},drive=hd0") # Some machines need an explicit DTB if dtb: self.vm.add_args('-dtb', dtb) def run_tuxtest_tests(self, haltmsg): """ Wait for the system to boot up, wait for the login prompt and then do a few things on the console. Trigger a shutdown and wait to exit cleanly. """ self.wait_for_console_pattern("Welcome to TuxTest") time.sleep(0.2) exec_command(self, 'root') time.sleep(0.2) exec_command(self, 'cat /proc/interrupts') time.sleep(0.1) exec_command(self, 'cat /proc/self/maps') time.sleep(0.1) exec_command(self, 'uname -a') time.sleep(0.1) exec_command_and_wait_for_pattern(self, 'halt', haltmsg) # Wait for VM to shut down gracefully if it can if self.shutdown == "nowait": self.vm.shutdown() else: self.vm.wait() def common_tuxrun(self, dt=None, drive="virtio-blk-device", haltmsg="reboot: System halted", console_index=0): """ Common path for LKFT tests. Unless we need to do something special with the command line we can process most things using the tag metadata. """ (kernel, disk, dtb) = self.fetch_tuxrun_assets(dt) self.prepare_run(kernel, disk, drive, dtb, console_index) self.vm.launch() self.run_tuxtest_tests(haltmsg) # # The tests themselves. The configuration is derived from how # tuxrun invokes qemu (with minor tweaks like using -blockdev # consistently). The tuxrun equivalent is something like: # # tuxrun --device qemu-{ARCH} \ # --kernel https://storage.tuxboot.com/{TUXBOOT}/{IMAGE} # def test_arm64(self): """ :avocado: tags=arch:aarch64 :avocado: tags=cpu:cortex-a57 :avocado: tags=machine:virt :avocado: tags=tuxboot:arm64 :avocado: tags=console:ttyAMA0 :avocado: tags=shutdown:nowait """ self.common_tuxrun() def test_arm64be(self): """ :avocado: tags=arch:aarch64 :avocado: tags=cpu:cortex-a57 :avocado: tags=endian:big :avocado: tags=machine:virt :avocado: tags=tuxboot:arm64be :avocado: tags=console:ttyAMA0 :avocado: tags=shutdown:nowait """ self.common_tuxrun() def test_armv5(self): """ :avocado: tags=arch:arm :avocado: tags=cpu:arm926 :avocado: tags=machine:versatilepb :avocado: tags=tuxboot:armv5 :avocado: tags=image:zImage :avocado: tags=console:ttyAMA0 :avocado: tags=shutdown:nowait """ self.common_tuxrun(drive="virtio-blk-pci", dt="versatile-pb.dtb") def test_armv7(self): """ :avocado: tags=arch:arm :avocado: tags=cpu:cortex-a15 :avocado: tags=machine:virt :avocado: tags=tuxboot:armv7 :avocado: tags=image:zImage :avocado: tags=console:ttyAMA0 :avocado: tags=shutdown:nowait """ self.common_tuxrun() def test_armv7be(self): """ :avocado: tags=arch:arm :avocado: tags=cpu:cortex-a15 :avocado: tags=endian:big :avocado: tags=machine:virt :avocado: tags=tuxboot:armv7be :avocado: tags=image:zImage :avocado: tags=console:ttyAMA0 :avocado: tags=shutdown:nowait """ self.common_tuxrun() def test_i386(self): """ :avocado: tags=arch:i386 :avocado: tags=cpu:coreduo :avocado: tags=machine:q35 :avocado: tags=tuxboot:i386 :avocado: tags=image:bzImage :avocado: tags=shutdown:nowait """ self.common_tuxrun(drive="virtio-blk-pci") def test_mips32(self): """ :avocado: tags=arch:mips :avocado: tags=machine:malta :avocado: tags=cpu:mips32r6-generic :avocado: tags=endian:big :avocado: tags=tuxboot:mips32 :avocado: tags=image:vmlinux :avocado: tags=root:sda :avocado: tags=shutdown:nowait """ self.common_tuxrun(drive="driver=ide-hd,bus=ide.0,unit=0") def test_mips32el(self): """ :avocado: tags=arch:mipsel :avocado: tags=machine:malta :avocado: tags=cpu:mips32r6-generic :avocado: tags=tuxboot:mips32el :avocado: tags=image:vmlinux :avocado: tags=root:sda :avocado: tags=shutdown:nowait """ self.common_tuxrun(drive="driver=ide-hd,bus=ide.0,unit=0") def test_mips64(self): """ :avocado: tags=arch:mips64 :avocado: tags=machine:malta :avocado: tags=tuxboot:mips64 :avocado: tags=endian:big :avocado: tags=image:vmlinux :avocado: tags=root:sda :avocado: tags=shutdown:nowait """ self.common_tuxrun(drive="driver=ide-hd,bus=ide.0,unit=0") def test_mips64el(self): """ :avocado: tags=arch:mips64el :avocado: tags=machine:malta :avocado: tags=tuxboot:mips64el :avocado: tags=image:vmlinux :avocado: tags=root:sda :avocado: tags=shutdown:nowait """ self.common_tuxrun(drive="driver=ide-hd,bus=ide.0,unit=0") def test_ppc32(self): """ :avocado: tags=arch:ppc :avocado: tags=machine:ppce500 :avocado: tags=cpu:e500mc :avocado: tags=tuxboot:ppc32 :avocado: tags=image:uImage :avocado: tags=shutdown:nowait """ self.common_tuxrun(drive="virtio-blk-pci") def test_ppc64(self): """ :avocado: tags=arch:ppc64 :avocado: tags=machine:pseries :avocado: tags=cpu:POWER8 :avocado: tags=endian:big :avocado: tags=console:hvc0 :avocado: tags=tuxboot:ppc64 :avocado: tags=image:vmlinux :avocado: tags=extradev:driver=spapr-vscsi :avocado: tags=root:sda """ self.common_tuxrun(drive="scsi-hd") def test_ppc64le(self): """ :avocado: tags=arch:ppc64 :avocado: tags=machine:pseries :avocado: tags=cpu:POWER8 :avocado: tags=console:hvc0 :avocado: tags=tuxboot:ppc64le :avocado: tags=image:vmlinux :avocado: tags=extradev:driver=spapr-vscsi :avocado: tags=root:sda """ self.common_tuxrun(drive="scsi-hd") def test_riscv32(self): """ :avocado: tags=arch:riscv32 :avocado: tags=machine:virt :avocado: tags=tuxboot:riscv32 """ self.common_tuxrun() def test_riscv64(self): """ :avocado: tags=arch:riscv64 :avocado: tags=machine:virt :avocado: tags=tuxboot:riscv64 """ self.common_tuxrun() def test_s390(self): """ :avocado: tags=arch:s390x :avocado: tags=endian:big :avocado: tags=tuxboot:s390 :avocado: tags=image:bzImage :avocado: tags=shutdown:nowait """ self.common_tuxrun(drive="virtio-blk-ccw", haltmsg="Requesting system halt") # Note: some segfaults caused by unaligned userspace access @skipIf(os.getenv('GITLAB_CI'), 'Skipping unstable test on GitLab') def test_sh4(self): """ :avocado: tags=arch:sh4 :avocado: tags=machine:r2d :avocado: tags=cpu:sh7785 :avocado: tags=tuxboot:sh4 :avocado: tags=image:zImage :avocado: tags=root:sda :avocado: tags=console:ttySC1 """ # The test is currently too unstable to do much in userspace # so we skip common_tuxrun and do a minimal boot and shutdown. (kernel, disk, dtb) = self.fetch_tuxrun_assets() # the console comes on the second serial port self.prepare_run(kernel, disk, "driver=ide-hd,bus=ide.0,unit=0", console_index=1) self.vm.launch() self.wait_for_console_pattern("Welcome to TuxTest") time.sleep(0.1) exec_command(self, 'root') time.sleep(0.1) exec_command_and_wait_for_pattern(self, 'halt', "reboot: System halted") def test_sparc64(self): """ :avocado: tags=arch:sparc64 :avocado: tags=tuxboot:sparc64 :avocado: tags=image:vmlinux :avocado: tags=root:sda :avocado: tags=shutdown:nowait """ self.common_tuxrun(drive="driver=ide-hd,bus=ide.0,unit=0") def test_x86_64(self): """ :avocado: tags=arch:x86_64 :avocado: tags=machine:q35 :avocado: tags=cpu:Nehalem :avocado: tags=tuxboot:x86_64 :avocado: tags=image:bzImage :avocado: tags=root:sda :avocado: tags=shutdown:nowait """ self.common_tuxrun(drive="driver=ide-hd,bus=ide.0,unit=0")
13,187
31.087591
78
py
qemu
qemu-master/tests/avocado/boot_xen.py
# Functional test that boots a Xen hypervisor with a domU kernel and # checks the console output is vaguely sane . # # Copyright (c) 2020 Linaro # # Author: # Alex Bennée <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import os from avocado_qemu import wait_for_console_pattern from boot_linux_console import LinuxKernelTest class BootXenBase(LinuxKernelTest): """ Boots a Xen hypervisor with a Linux DomU kernel. """ timeout = 90 XEN_COMMON_COMMAND_LINE = 'dom0_mem=128M loglvl=all guest_loglvl=all' def fetch_guest_kernel(self): # Using my own built kernel - which works kernel_url = ('https://fileserver.linaro.org/' 's/JSsewXGZ6mqxPr5/download?path=%2F&files=' 'linux-5.9.9-arm64-ajb') kernel_sha1 = '4f92bc4b9f88d5ab792fa7a43a68555d344e1b83' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_sha1) return kernel_path def launch_xen(self, xen_path): """ Launch Xen with a dom0 guest kernel """ self.log.info("launch with xen_path: %s", xen_path) kernel_path = self.fetch_guest_kernel() self.vm.set_console() xen_command_line = self.XEN_COMMON_COMMAND_LINE self.vm.add_args('-machine', 'virtualization=on', '-m', '768', '-kernel', xen_path, '-append', xen_command_line, '-device', 'guest-loader,addr=0x47000000,kernel=%s,bootargs=console=hvc0' % (kernel_path)) self.vm.launch() console_pattern = 'VFS: Cannot open root device' wait_for_console_pattern(self, console_pattern, "Panic on CPU 0:") class BootXen(BootXenBase): def test_arm64_xen_411_and_dom0(self): """ :avocado: tags=arch:aarch64 :avocado: tags=accel:tcg :avocado: tags=cpu:cortex-a57 :avocado: tags=machine:virt """ # archive of file from https://deb.debian.org/debian/pool/main/x/xen/ xen_url = ('https://fileserver.linaro.org/s/JSsewXGZ6mqxPr5/' 'download?path=%2F&files=' 'xen-hypervisor-4.11-arm64_4.11.4%2B37-g3263f257ca-1_arm64.deb') xen_sha1 = '034e634d4416adbad1212d59b62bccdcda63e62a' xen_deb = self.fetch_asset(xen_url, asset_hash=xen_sha1) xen_path = self.extract_from_deb(xen_deb, "/boot/xen-4.11-arm64") self.launch_xen(xen_path) def test_arm64_xen_414_and_dom0(self): """ :avocado: tags=arch:aarch64 :avocado: tags=accel:tcg :avocado: tags=cpu:cortex-a57 :avocado: tags=machine:virt """ # archive of file from https://deb.debian.org/debian/pool/main/x/xen/ xen_url = ('https://fileserver.linaro.org/s/JSsewXGZ6mqxPr5/' 'download?path=%2F&files=' 'xen-hypervisor-4.14-arm64_4.14.0%2B80-gd101b417b7-1_arm64.deb') xen_sha1 = 'b9d209dd689ed2b393e625303a225badefec1160' xen_deb = self.fetch_asset(xen_url, asset_hash=xen_sha1) xen_path = self.extract_from_deb(xen_deb, "/boot/xen-4.14-arm64") self.launch_xen(xen_path) def test_arm64_xen_415_and_dom0(self): """ :avocado: tags=arch:aarch64 :avocado: tags=accel:tcg :avocado: tags=cpu:cortex-a57 :avocado: tags=machine:virt """ xen_url = ('https://fileserver.linaro.org/' 's/JSsewXGZ6mqxPr5/download' '?path=%2F&files=xen-upstream-4.15-unstable.deb') xen_sha1 = 'fc191172b85cf355abb95d275a24cc0f6d6579d8' xen_deb = self.fetch_asset(xen_url, asset_hash=xen_sha1) xen_path = self.extract_from_deb(xen_deb, "/boot/xen-4.15-unstable") self.launch_xen(xen_path)
4,066
33.760684
87
py
qemu
qemu-master/tests/avocado/intel_iommu.py
# INTEL_IOMMU Functional tests # # Copyright (c) 2021 Red Hat, Inc. # # Author: # Eric Auger <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import os from avocado import skipIf from avocado_qemu import LinuxTest @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') class IntelIOMMU(LinuxTest): """ :avocado: tags=arch:x86_64 :avocado: tags=distro:fedora :avocado: tags=distro_version:31 :avocado: tags=machine:q35 :avocado: tags=accel:kvm :avocado: tags=intel_iommu """ IOMMU_ADDON = ',iommu_platform=on,disable-modern=off,disable-legacy=on' kernel_path = None initrd_path = None kernel_params = None def set_up_boot(self): path = self.download_boot() self.vm.add_args('-device', 'virtio-blk-pci,bus=pcie.0,scsi=off,' + 'drive=drv0,id=virtio-disk0,bootindex=1,' 'werror=stop,rerror=stop' + self.IOMMU_ADDON) self.vm.add_args('-device', 'virtio-gpu-pci' + self.IOMMU_ADDON) self.vm.add_args('-drive', 'file=%s,if=none,cache=writethrough,id=drv0' % path) def setUp(self): super(IntelIOMMU, self).setUp(None, 'virtio-net-pci' + self.IOMMU_ADDON) def add_common_args(self): self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0') self.vm.add_args('-object', 'rng-random,id=rng0,filename=/dev/urandom') def common_vm_setup(self, custom_kernel=None): self.require_accelerator("kvm") self.add_common_args() self.vm.add_args("-accel", "kvm") if custom_kernel is None: return kernel_url = self.distro.pxeboot_url + 'vmlinuz' initrd_url = self.distro.pxeboot_url + 'initrd.img' self.kernel_path = self.fetch_asset(kernel_url) self.initrd_path = self.fetch_asset(initrd_url) def run_and_check(self): if self.kernel_path: self.vm.add_args('-kernel', self.kernel_path, '-append', self.kernel_params, '-initrd', self.initrd_path) self.launch_and_wait() self.ssh_command('cat /proc/cmdline') self.ssh_command('dmesg | grep -e DMAR -e IOMMU') self.ssh_command('find /sys/kernel/iommu_groups/ -type l') self.ssh_command('dnf -y install numactl-devel') def test_intel_iommu(self): """ :avocado: tags=intel_iommu_intremap """ self.common_vm_setup(True) self.vm.add_args('-device', 'intel-iommu,intremap=on') self.vm.add_args('-machine', 'kernel_irqchip=split') self.kernel_params = (self.distro.default_kernel_params + ' quiet intel_iommu=on') self.run_and_check() def test_intel_iommu_strict(self): """ :avocado: tags=intel_iommu_strict """ self.common_vm_setup(True) self.vm.add_args('-device', 'intel-iommu,intremap=on') self.vm.add_args('-machine', 'kernel_irqchip=split') self.kernel_params = (self.distro.default_kernel_params + ' quiet intel_iommu=on,strict') self.run_and_check() def test_intel_iommu_strict_cm(self): """ :avocado: tags=intel_iommu_strict_cm """ self.common_vm_setup(True) self.vm.add_args('-device', 'intel-iommu,intremap=on,caching-mode=on') self.vm.add_args('-machine', 'kernel_irqchip=split') self.kernel_params = (self.distro.default_kernel_params + ' quiet intel_iommu=on,strict') self.run_and_check() def test_intel_iommu_pt(self): """ :avocado: tags=intel_iommu_pt """ self.common_vm_setup(True) self.vm.add_args('-device', 'intel-iommu,intremap=on') self.vm.add_args('-machine', 'kernel_irqchip=split') self.kernel_params = (self.distro.default_kernel_params + ' quiet intel_iommu=on iommu=pt') self.run_and_check()
4,167
33.733333
80
py
qemu
qemu-master/tests/avocado/virtio-gpu.py
# virtio-gpu tests # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. from avocado_qemu import BUILD_DIR from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado_qemu import exec_command_and_wait_for_pattern from avocado_qemu import is_readable_executable_file from qemu.utils import kvm_available import os import socket import subprocess def pick_default_vug_bin(): relative_path = "./contrib/vhost-user-gpu/vhost-user-gpu" if is_readable_executable_file(relative_path): return relative_path bld_dir_path = os.path.join(BUILD_DIR, relative_path) if is_readable_executable_file(bld_dir_path): return bld_dir_path class VirtioGPUx86(QemuSystemTest): """ :avocado: tags=virtio-gpu :avocado: tags=arch:x86_64 :avocado: tags=cpu:host """ KERNEL_COMMAND_LINE = "printk.time=0 console=ttyS0 rdinit=/bin/bash" KERNEL_URL = ( "https://archives.fedoraproject.org/pub/fedora" "/linux/releases/33/Everything/x86_64/os/images" "/pxeboot/vmlinuz" ) KERNEL_HASH = '1433cfe3f2ffaa44de4ecfb57ec25dc2399cdecf' INITRD_URL = ( "https://archives.fedoraproject.org/pub/fedora" "/linux/releases/33/Everything/x86_64/os/images" "/pxeboot/initrd.img" ) INITRD_HASH = 'c828d68a027b53e5220536585efe03412332c2d9' def wait_for_console_pattern(self, success_message, vm=None): wait_for_console_pattern( self, success_message, failure_message="Kernel panic - not syncing", vm=vm, ) def test_virtio_vga_virgl(self): """ :avocado: tags=device:virtio-vga-gl """ # FIXME: should check presence of virtio, virgl etc self.require_accelerator('kvm') kernel_path = self.fetch_asset(self.KERNEL_URL, self.KERNEL_HASH) initrd_path = self.fetch_asset(self.INITRD_URL, self.INITRD_HASH) self.vm.set_console() self.vm.add_args("-m", "2G") self.vm.add_args("-machine", "pc,accel=kvm") self.vm.add_args("-device", "virtio-vga-gl") self.vm.add_args("-display", "egl-headless") self.vm.add_args( "-kernel", kernel_path, "-initrd", initrd_path, "-append", self.KERNEL_COMMAND_LINE, ) try: self.vm.launch() except: # TODO: probably fails because we are missing the VirGL features self.cancel("VirGL not enabled?") self.wait_for_console_pattern("as init process") exec_command_and_wait_for_pattern( self, "/usr/sbin/modprobe virtio_gpu", "" ) self.wait_for_console_pattern("features: +virgl +edid") def test_vhost_user_vga_virgl(self): """ :avocado: tags=device:vhost-user-vga """ # FIXME: should check presence of vhost-user-gpu, virgl, memfd etc self.require_accelerator('kvm') vug = pick_default_vug_bin() if not vug: self.cancel("Could not find vhost-user-gpu") kernel_path = self.fetch_asset(self.KERNEL_URL, self.KERNEL_HASH) initrd_path = self.fetch_asset(self.INITRD_URL, self.INITRD_HASH) # Create socketpair to connect proxy and remote processes qemu_sock, vug_sock = socket.socketpair( socket.AF_UNIX, socket.SOCK_STREAM ) os.set_inheritable(qemu_sock.fileno(), True) os.set_inheritable(vug_sock.fileno(), True) self._vug_log_path = os.path.join( self.logdir, "vhost-user-gpu.log" ) self._vug_log_file = open(self._vug_log_path, "wb") self.log.info('Complete vhost-user-gpu.log file can be ' 'found at %s', self._vug_log_path) vugp = subprocess.Popen( [vug, "--virgl", "--fd=%d" % vug_sock.fileno()], stdin=subprocess.DEVNULL, stdout=self._vug_log_file, stderr=subprocess.STDOUT, shell=False, close_fds=False, ) self.vm.set_console() self.vm.add_args("-m", "2G") self.vm.add_args("-object", "memory-backend-memfd,id=mem,size=2G") self.vm.add_args("-machine", "pc,memory-backend=mem,accel=kvm") self.vm.add_args("-chardev", "socket,id=vug,fd=%d" % qemu_sock.fileno()) self.vm.add_args("-device", "vhost-user-vga,chardev=vug") self.vm.add_args("-display", "egl-headless") self.vm.add_args( "-kernel", kernel_path, "-initrd", initrd_path, "-append", self.KERNEL_COMMAND_LINE, ) self.vm.launch() self.wait_for_console_pattern("as init process") exec_command_and_wait_for_pattern( self, "/usr/sbin/modprobe virtio_gpu", "" ) self.wait_for_console_pattern("features: +virgl -edid") self.vm.shutdown() qemu_sock.close() vugp.terminate() vugp.wait()
5,153
32.038462
80
py
qemu
qemu-master/tests/avocado/machine_m68k_nextcube.py
# Functional test that boots a VM and run OCR on the framebuffer # # Copyright (c) 2019 Philippe Mathieu-Daudé <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import os import time from avocado_qemu import QemuSystemTest from avocado import skipUnless from tesseract_utils import tesseract_available, tesseract_ocr PIL_AVAILABLE = True try: from PIL import Image except ImportError: PIL_AVAILABLE = False class NextCubeMachine(QemuSystemTest): """ :avocado: tags=arch:m68k :avocado: tags=machine:next-cube :avocado: tags=device:framebuffer """ timeout = 15 def check_bootrom_framebuffer(self, screenshot_path): rom_url = ('http://www.nextcomputers.org/NeXTfiles/Software/ROM_Files/' '68040_Non-Turbo_Chipset/Rev_2.5_v66.BIN') rom_hash = 'b3534796abae238a0111299fc406a9349f7fee24' rom_path = self.fetch_asset(rom_url, asset_hash=rom_hash) self.vm.add_args('-bios', rom_path) self.vm.launch() self.log.info('VM launched, waiting for display') # TODO: Use avocado.utils.wait.wait_for to catch the # 'displaysurface_create 1120x832' trace-event. time.sleep(2) self.vm.command('human-monitor-command', command_line='screendump %s' % screenshot_path) @skipUnless(PIL_AVAILABLE, 'Python PIL not installed') def test_bootrom_framebuffer_size(self): screenshot_path = os.path.join(self.workdir, "dump.ppm") self.check_bootrom_framebuffer(screenshot_path) width, height = Image.open(screenshot_path).size self.assertEqual(width, 1120) self.assertEqual(height, 832) @skipUnless(tesseract_available(3), 'tesseract v3 OCR tool not available') def test_bootrom_framebuffer_ocr_with_tesseract_v3(self): screenshot_path = os.path.join(self.workdir, "dump.ppm") self.check_bootrom_framebuffer(screenshot_path) lines = tesseract_ocr(screenshot_path, tesseract_version=3) text = '\n'.join(lines) self.assertIn('Backplane', text) self.assertIn('Ethernet address', text) # Tesseract 4 adds a new OCR engine based on LSTM neural networks. The # new version is faster and more accurate than version 3. The drawback is # that it is still alpha-level software. @skipUnless(tesseract_available(4), 'tesseract v4 OCR tool not available') def test_bootrom_framebuffer_ocr_with_tesseract_v4(self): screenshot_path = os.path.join(self.workdir, "dump.ppm") self.check_bootrom_framebuffer(screenshot_path) lines = tesseract_ocr(screenshot_path, tesseract_version=4) text = '\n'.join(lines) self.assertIn('Testing the FPU, SCC', text) self.assertIn('System test failed. Error code', text) self.assertIn('Boot command', text) self.assertIn('Next>', text)
2,989
36.375
79
py
qemu
qemu-master/tests/avocado/empty_cpu_model.py
# Check for crash when using empty -cpu option # # Copyright (c) 2019 Red Hat, Inc. # # Author: # Eduardo Habkost <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. from avocado_qemu import QemuSystemTest class EmptyCPUModel(QemuSystemTest): def test(self): self.vm.add_args('-S', '-display', 'none', '-machine', 'none', '-cpu', '') self.vm.set_qmp_monitor(enabled=False) self.vm.launch() self.vm.wait() self.assertEquals(self.vm.exitcode(), 1, "QEMU exit code should be 1") self.assertRegex(self.vm.get_log(), r'-cpu option cannot be empty')
698
33.95
82
py
qemu
qemu-master/tests/avocado/machine_avr6.py
# # QEMU AVR integration tests # # Copyright (c) 2019-2020 Michael Rolnik <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import time from avocado_qemu import QemuSystemTest class AVR6Machine(QemuSystemTest): timeout = 5 def test_freertos(self): """ :avocado: tags=arch:avr :avocado: tags=machine:arduino-mega-2560-v3 """ """ https://github.com/seharris/qemu-avr-tests/raw/master/free-rtos/Demo/AVR_ATMega2560_GCC/demo.elf constantly prints out 'ABCDEFGHIJKLMNOPQRSTUVWXABCDEFGHIJKLMNOPQRSTUVWX' """ rom_url = ('https://github.com/seharris/qemu-avr-tests' '/raw/36c3e67b8755dcf/free-rtos/Demo' '/AVR_ATMega2560_GCC/demo.elf') rom_hash = '7eb521f511ca8f2622e0a3c5e8dd686efbb911d4' rom_path = self.fetch_asset(rom_url, asset_hash=rom_hash) self.vm.add_args('-bios', rom_path) self.vm.add_args('-nographic') self.vm.launch() time.sleep(2) self.vm.shutdown() self.assertIn('ABCDEFGHIJKLMNOPQRSTUVWXABCDEFGHIJKLMNOPQRSTUVWX', self.vm.get_log())
1,755
33.431373
104
py
qemu
qemu-master/tests/avocado/machine_sparc64_sun4u.py
# Functional test that boots a Linux kernel and checks the console # # Copyright (c) 2020 Red Hat, Inc. # # Author: # Thomas Huth <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import os from avocado_qemu import wait_for_console_pattern from avocado.utils import archive from boot_linux_console import LinuxKernelTest class Sun4uMachine(LinuxKernelTest): """Boots the Linux kernel and checks that the console is operational""" timeout = 90 def test_sparc64_sun4u(self): """ :avocado: tags=arch:sparc64 :avocado: tags=machine:sun4u """ tar_url = ('https://qemu-advcal.gitlab.io' '/qac-best-of-multiarch/download/day23.tar.xz') tar_hash = '142db83cd974ffadc4f75c8a5cad5bcc5722c240' file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) archive.extract(file_path, self.workdir) self.vm.set_console() self.vm.add_args('-kernel', self.workdir + '/day23/vmlinux', '-append', self.KERNEL_COMMON_COMMAND_LINE) self.vm.launch() wait_for_console_pattern(self, 'Starting logging: OK')
1,238
32.486486
75
py
qemu
qemu-master/tests/avocado/replay_kernel.py
# Record/replay test that boots a Linux kernel # # Copyright (c) 2020 ISP RAS # # Author: # Pavel Dovgalyuk <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import os import lzma import shutil import logging import time from avocado import skip from avocado import skipIf from avocado import skipUnless from avocado_qemu import wait_for_console_pattern from avocado.utils import archive from avocado.utils import process from boot_linux_console import LinuxKernelTest class ReplayKernelBase(LinuxKernelTest): """ Boots a Linux kernel in record mode and checks that the console is operational and the kernel command line is properly passed from QEMU to the kernel. Then replays the same scenario and verifies, that QEMU correctly terminates. """ timeout = 120 KERNEL_COMMON_COMMAND_LINE = 'printk.time=1 panic=-1 ' def run_vm(self, kernel_path, kernel_command_line, console_pattern, record, shift, args, replay_path): # icount requires TCG to be available self.require_accelerator('tcg') logger = logging.getLogger('replay') start_time = time.time() vm = self.get_vm() vm.set_console() if record: logger.info('recording the execution...') mode = 'record' else: logger.info('replaying the execution...') mode = 'replay' vm.add_args('-icount', 'shift=%s,rr=%s,rrfile=%s' % (shift, mode, replay_path), '-kernel', kernel_path, '-append', kernel_command_line, '-net', 'none', '-no-reboot') if args: vm.add_args(*args) vm.launch() self.wait_for_console_pattern(console_pattern, vm) if record: vm.shutdown() logger.info('finished the recording with log size %s bytes' % os.path.getsize(replay_path)) else: vm.wait() logger.info('successfully finished the replay') elapsed = time.time() - start_time logger.info('elapsed time %.2f sec' % elapsed) return elapsed def run_rr(self, kernel_path, kernel_command_line, console_pattern, shift=7, args=None): replay_path = os.path.join(self.workdir, 'replay.bin') t1 = self.run_vm(kernel_path, kernel_command_line, console_pattern, True, shift, args, replay_path) t2 = self.run_vm(kernel_path, kernel_command_line, console_pattern, False, shift, args, replay_path) logger = logging.getLogger('replay') logger.info('replay overhead {:.2%}'.format(t2 / t1 - 1)) class ReplayKernelNormal(ReplayKernelBase): @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') def test_x86_64_pc(self): """ :avocado: tags=arch:x86_64 :avocado: tags=machine:pc """ kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora' '/linux/releases/29/Everything/x86_64/os/images/pxeboot' '/vmlinuz') kernel_hash = '23bebd2680757891cf7adedb033532163a792495' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' console_pattern = 'VFS: Cannot open root device' self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5) def test_mips_malta(self): """ :avocado: tags=arch:mips :avocado: tags=machine:malta :avocado: tags=endian:big """ deb_url = ('http://snapshot.debian.org/archive/debian/' '20130217T032700Z/pool/main/l/linux-2.6/' 'linux-image-2.6.32-5-4kc-malta_2.6.32-48_mips.deb') deb_hash = 'a8cfc28ad8f45f54811fc6cf74fc43ffcfe0ba04' deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) kernel_path = self.extract_from_deb(deb_path, '/boot/vmlinux-2.6.32-5-4kc-malta') kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' console_pattern = 'Kernel command line: %s' % kernel_command_line self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5) def test_mips64el_malta(self): """ This test requires the ar tool to extract "data.tar.gz" from the Debian package. The kernel can be rebuilt using this Debian kernel source [1] and following the instructions on [2]. [1] http://snapshot.debian.org/package/linux-2.6/2.6.32-48/ #linux-source-2.6.32_2.6.32-48 [2] https://kernel-team.pages.debian.net/kernel-handbook/ ch-common-tasks.html#s-common-official :avocado: tags=arch:mips64el :avocado: tags=machine:malta """ deb_url = ('http://snapshot.debian.org/archive/debian/' '20130217T032700Z/pool/main/l/linux-2.6/' 'linux-image-2.6.32-5-5kc-malta_2.6.32-48_mipsel.deb') deb_hash = '1aaec92083bf22fda31e0d27fa8d9a388e5fc3d5' deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) kernel_path = self.extract_from_deb(deb_path, '/boot/vmlinux-2.6.32-5-5kc-malta') kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' console_pattern = 'Kernel command line: %s' % kernel_command_line self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5) def test_aarch64_virt(self): """ :avocado: tags=arch:aarch64 :avocado: tags=machine:virt :avocado: tags=cpu:cortex-a53 """ kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora' '/linux/releases/29/Everything/aarch64/os/images/pxeboot' '/vmlinuz') kernel_hash = '8c73e469fc6ea06a58dc83a628fc695b693b8493' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyAMA0') console_pattern = 'VFS: Cannot open root device' self.run_rr(kernel_path, kernel_command_line, console_pattern) def test_arm_virt(self): """ :avocado: tags=arch:arm :avocado: tags=machine:virt """ kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora' '/linux/releases/29/Everything/armhfp/os/images/pxeboot' '/vmlinuz') kernel_hash = 'e9826d741b4fb04cadba8d4824d1ed3b7fb8b4d4' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyAMA0') console_pattern = 'VFS: Cannot open root device' self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=1) @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') def test_arm_cubieboard_initrd(self): """ :avocado: tags=arch:arm :avocado: tags=machine:cubieboard """ deb_url = ('https://apt.armbian.com/pool/main/l/' 'linux-5.10.16-sunxi/linux-image-current-sunxi_21.02.2_armhf.deb') deb_hash = '9fa84beda245cabf0b4fa84cf6eaa7738ead1da0' deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) kernel_path = self.extract_from_deb(deb_path, '/boot/vmlinuz-5.10.16-sunxi') dtb_path = '/usr/lib/linux-image-current-sunxi/sun4i-a10-cubieboard.dtb' dtb_path = self.extract_from_deb(deb_path, dtb_path) initrd_url = ('https://github.com/groeck/linux-build-test/raw/' '2eb0a73b5d5a28df3170c546ddaaa9757e1e0848/rootfs/' 'arm/rootfs-armv5.cpio.gz') initrd_hash = '2b50f1873e113523967806f4da2afe385462ff9b' initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash) initrd_path = os.path.join(self.workdir, 'rootfs.cpio') archive.gzip_uncompress(initrd_path_gz, initrd_path) kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0,115200 ' 'usbcore.nousb ' 'panic=-1 noreboot') console_pattern = 'Boot successful.' self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=1, args=('-dtb', dtb_path, '-initrd', initrd_path, '-no-reboot')) def test_s390x_s390_ccw_virtio(self): """ :avocado: tags=arch:s390x :avocado: tags=machine:s390-ccw-virtio """ kernel_url = ('https://archives.fedoraproject.org/pub/archive' '/fedora-secondary/releases/29/Everything/s390x/os/images' '/kernel.img') kernel_hash = 'e8e8439103ef8053418ef062644ffd46a7919313' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=sclp0' console_pattern = 'Kernel command line: %s' % kernel_command_line self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=9) def test_alpha_clipper(self): """ :avocado: tags=arch:alpha :avocado: tags=machine:clipper """ kernel_url = ('http://archive.debian.org/debian/dists/lenny/main/' 'installer-alpha/20090123lenny10/images/cdrom/vmlinuz') kernel_hash = '3a943149335529e2ed3e74d0d787b85fb5671ba3' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) uncompressed_kernel = archive.uncompress(kernel_path, self.workdir) kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' console_pattern = 'Kernel command line: %s' % kernel_command_line self.run_rr(uncompressed_kernel, kernel_command_line, console_pattern, shift=9, args=('-nodefaults', )) def test_ppc64_pseries(self): """ :avocado: tags=arch:ppc64 :avocado: tags=machine:pseries :avocado: tags=accel:tcg """ kernel_url = ('https://archives.fedoraproject.org/pub/archive' '/fedora-secondary/releases/29/Everything/ppc64le/os' '/ppc/ppc64/vmlinuz') kernel_hash = '3fe04abfc852b66653b8c3c897a59a689270bc77' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=hvc0' # icount is not good enough for PPC64 for complete boot yet console_pattern = 'Kernel command line: %s' % kernel_command_line self.run_rr(kernel_path, kernel_command_line, console_pattern) def test_m68k_q800(self): """ :avocado: tags=arch:m68k :avocado: tags=machine:q800 """ deb_url = ('https://snapshot.debian.org/archive/debian-ports' '/20191021T083923Z/pool-m68k/main' '/l/linux/kernel-image-5.3.0-1-m68k-di_5.3.7-1_m68k.udeb') deb_hash = '044954bb9be4160a3ce81f8bc1b5e856b75cccd1' deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) kernel_path = self.extract_from_deb(deb_path, '/boot/vmlinux-5.3.0-1-m68k') kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0 vga=off') console_pattern = 'No filesystem could mount root' self.run_rr(kernel_path, kernel_command_line, console_pattern) def do_test_advcal_2018(self, file_path, kernel_name, args=None): archive.extract(file_path, self.workdir) for entry in os.scandir(self.workdir): if entry.name.startswith('day') and entry.is_dir(): kernel_path = os.path.join(entry.path, kernel_name) break kernel_command_line = '' console_pattern = 'QEMU advent calendar' self.run_rr(kernel_path, kernel_command_line, console_pattern, args=args) def test_arm_vexpressa9(self): """ :avocado: tags=arch:arm :avocado: tags=machine:vexpress-a9 """ tar_hash = '32b7677ce8b6f1471fb0059865f451169934245b' tar_url = ('https://qemu-advcal.gitlab.io' '/qac-best-of-multiarch/download/day16.tar.xz') file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) dtb_path = self.workdir + '/day16/vexpress-v2p-ca9.dtb' self.do_test_advcal_2018(file_path, 'winter.zImage', args=('-dtb', dtb_path)) def test_m68k_mcf5208evb(self): """ :avocado: tags=arch:m68k :avocado: tags=machine:mcf5208evb """ tar_hash = 'ac688fd00561a2b6ce1359f9ff6aa2b98c9a570c' tar_url = ('https://qemu-advcal.gitlab.io' '/qac-best-of-multiarch/download/day07.tar.xz') file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) self.do_test_advcal_2018(file_path, 'sanity-clause.elf') @skip("Test currently broken") # Console stuck as of 5.2-rc1 def test_microblaze_s3adsp1800(self): """ :avocado: tags=arch:microblaze :avocado: tags=machine:petalogix-s3adsp1800 """ tar_hash = '08bf3e3bfb6b6c7ce1e54ab65d54e189f2caf13f' tar_url = ('https://qemu-advcal.gitlab.io' '/qac-best-of-multiarch/download/day17.tar.xz') file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) self.do_test_advcal_2018(file_path, 'ballerina.bin') def test_ppc64_e500(self): """ :avocado: tags=arch:ppc64 :avocado: tags=machine:ppce500 :avocado: tags=cpu:e5500 """ tar_hash = '6951d86d644b302898da2fd701739c9406527fe1' tar_url = ('https://qemu-advcal.gitlab.io' '/qac-best-of-multiarch/download/day19.tar.xz') file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) self.do_test_advcal_2018(file_path, 'uImage') def test_or1k_sim(self): """ :avocado: tags=arch:or1k :avocado: tags=machine:or1k-sim """ tar_hash = '20334cdaf386108c530ff0badaecc955693027dd' tar_url = ('https://qemu-advcal.gitlab.io' '/qac-best-of-multiarch/download/day20.tar.xz') file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) self.do_test_advcal_2018(file_path, 'vmlinux') @skip("nios2 emulation is buggy under record/replay") def test_nios2_10m50(self): """ :avocado: tags=arch:nios2 :avocado: tags=machine:10m50-ghrd """ tar_hash = 'e4251141726c412ac0407c5a6bceefbbff018918' tar_url = ('https://qemu-advcal.gitlab.io' '/qac-best-of-multiarch/download/day14.tar.xz') file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) self.do_test_advcal_2018(file_path, 'vmlinux.elf') def test_ppc_g3beige(self): """ :avocado: tags=arch:ppc :avocado: tags=machine:g3beige """ tar_hash = 'e0b872a5eb8fdc5bed19bd43ffe863900ebcedfc' tar_url = ('https://qemu-advcal.gitlab.io' '/qac-best-of-multiarch/download/day15.tar.xz') file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) self.do_test_advcal_2018(file_path, 'invaders.elf', args=('-M', 'graphics=off')) def test_ppc_mac99(self): """ :avocado: tags=arch:ppc :avocado: tags=machine:mac99 """ tar_hash = 'e0b872a5eb8fdc5bed19bd43ffe863900ebcedfc' tar_url = ('https://qemu-advcal.gitlab.io' '/qac-best-of-multiarch/download/day15.tar.xz') file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) self.do_test_advcal_2018(file_path, 'invaders.elf', args=('-M', 'graphics=off')) def test_sparc_ss20(self): """ :avocado: tags=arch:sparc :avocado: tags=machine:SS-20 """ tar_hash = 'b18550d5d61c7615d989a06edace051017726a9f' tar_url = ('https://qemu-advcal.gitlab.io' '/qac-best-of-multiarch/download/day11.tar.xz') file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) self.do_test_advcal_2018(file_path, 'zImage.elf') def test_xtensa_lx60(self): """ :avocado: tags=arch:xtensa :avocado: tags=machine:lx60 :avocado: tags=cpu:dc233c """ tar_hash = '49e88d9933742f0164b60839886c9739cb7a0d34' tar_url = ('https://qemu-advcal.gitlab.io' '/qac-best-of-multiarch/download/day02.tar.xz') file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) self.do_test_advcal_2018(file_path, 'santas-sleigh-ride.elf') @skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') class ReplayKernelSlow(ReplayKernelBase): # Override the timeout, because this kernel includes an inner # loop which is executed with TB recompilings during replay, # making it very slow. timeout = 180 def test_mips_malta_cpio(self): """ :avocado: tags=arch:mips :avocado: tags=machine:malta :avocado: tags=endian:big :avocado: tags=slowness:high """ deb_url = ('http://snapshot.debian.org/archive/debian/' '20160601T041800Z/pool/main/l/linux/' 'linux-image-4.5.0-2-4kc-malta_4.5.5-1_mips.deb') deb_hash = 'a3c84f3e88b54e06107d65a410d1d1e8e0f340f8' deb_path = self.fetch_asset(deb_url, asset_hash=deb_hash) kernel_path = self.extract_from_deb(deb_path, '/boot/vmlinux-4.5.0-2-4kc-malta') initrd_url = ('https://github.com/groeck/linux-build-test/raw/' '8584a59ed9e5eb5ee7ca91f6d74bbb06619205b8/rootfs/' 'mips/rootfs.cpio.gz') initrd_hash = 'bf806e17009360a866bf537f6de66590de349a99' initrd_path_gz = self.fetch_asset(initrd_url, asset_hash=initrd_hash) initrd_path = self.workdir + "rootfs.cpio" archive.gzip_uncompress(initrd_path_gz, initrd_path) kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0 console=tty ' 'rdinit=/sbin/init noreboot') console_pattern = 'Boot successful.' self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5, args=('-initrd', initrd_path)) @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') def test_mips64el_malta_5KEc_cpio(self): """ :avocado: tags=arch:mips64el :avocado: tags=machine:malta :avocado: tags=endian:little :avocado: tags=slowness:high :avocado: tags=cpu:5KEc """ kernel_url = ('https://github.com/philmd/qemu-testing-blob/' 'raw/9ad2df38/mips/malta/mips64el/' 'vmlinux-3.19.3.mtoman.20150408') kernel_hash = '00d1d268fb9f7d8beda1de6bebcc46e884d71754' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) initrd_url = ('https://github.com/groeck/linux-build-test/' 'raw/8584a59e/rootfs/' 'mipsel64/rootfs.mipsel64r1.cpio.gz') initrd_hash = '1dbb8a396e916847325284dbe2151167' initrd_path_gz = self.fetch_asset(initrd_url, algorithm='md5', asset_hash=initrd_hash) initrd_path = self.workdir + "rootfs.cpio" archive.gzip_uncompress(initrd_path_gz, initrd_path) kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0 console=tty ' 'rdinit=/sbin/init noreboot') console_pattern = 'Boot successful.' self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5, args=('-initrd', initrd_path)) def do_test_mips_malta32el_nanomips(self, kernel_path_xz): kernel_path = self.workdir + "kernel" with lzma.open(kernel_path_xz, 'rb') as f_in: with open(kernel_path, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'mem=256m@@0x0 ' 'console=ttyS0') console_pattern = 'Kernel command line: %s' % kernel_command_line self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5) def test_mips_malta32el_nanomips_4k(self): """ :avocado: tags=arch:mipsel :avocado: tags=machine:malta :avocado: tags=endian:little :avocado: tags=cpu:I7200 """ kernel_url = ('https://mipsdistros.mips.com/LinuxDistro/nanomips/' 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/' 'generic_nano32r6el_page4k.xz') kernel_hash = '477456aafd2a0f1ddc9482727f20fe9575565dd6' kernel_path_xz = self.fetch_asset(kernel_url, asset_hash=kernel_hash) self.do_test_mips_malta32el_nanomips(kernel_path_xz) def test_mips_malta32el_nanomips_16k_up(self): """ :avocado: tags=arch:mipsel :avocado: tags=machine:malta :avocado: tags=endian:little :avocado: tags=cpu:I7200 """ kernel_url = ('https://mipsdistros.mips.com/LinuxDistro/nanomips/' 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/' 'generic_nano32r6el_page16k_up.xz') kernel_hash = 'e882868f944c71c816e832e2303b7874d044a7bc' kernel_path_xz = self.fetch_asset(kernel_url, asset_hash=kernel_hash) self.do_test_mips_malta32el_nanomips(kernel_path_xz) def test_mips_malta32el_nanomips_64k_dbg(self): """ :avocado: tags=arch:mipsel :avocado: tags=machine:malta :avocado: tags=endian:little :avocado: tags=cpu:I7200 """ kernel_url = ('https://mipsdistros.mips.com/LinuxDistro/nanomips/' 'kernels/v4.15.18-432-gb2eb9a8b07a1-20180627102142/' 'generic_nano32r6el_page64k_dbg.xz') kernel_hash = '18d1c68f2e23429e266ca39ba5349ccd0aeb7180' kernel_path_xz = self.fetch_asset(kernel_url, asset_hash=kernel_hash) self.do_test_mips_malta32el_nanomips(kernel_path_xz)
23,046
42.484906
87
py
qemu
qemu-master/tests/avocado/riscv_opensbi.py
# OpenSBI boot test for RISC-V machines # # Copyright (c) 2022, Ventana Micro # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. from avocado_qemu import QemuSystemTest from avocado import skip from avocado_qemu import wait_for_console_pattern class RiscvOpenSBI(QemuSystemTest): """ :avocado: tags=accel:tcg """ timeout = 5 def boot_opensbi(self): self.vm.set_console() self.vm.launch() wait_for_console_pattern(self, 'Platform Name') wait_for_console_pattern(self, 'Boot HART MEDELEG') @skip("requires OpenSBI fix to work") def test_riscv32_spike(self): """ :avocado: tags=arch:riscv32 :avocado: tags=machine:spike """ self.boot_opensbi() def test_riscv64_spike(self): """ :avocado: tags=arch:riscv64 :avocado: tags=machine:spike """ self.boot_opensbi() def test_riscv32_sifive_u(self): """ :avocado: tags=arch:riscv32 :avocado: tags=machine:sifive_u """ self.boot_opensbi() def test_riscv64_sifive_u(self): """ :avocado: tags=arch:riscv64 :avocado: tags=machine:sifive_u """ self.boot_opensbi() def test_riscv32_virt(self): """ :avocado: tags=arch:riscv32 :avocado: tags=machine:virt """ self.boot_opensbi() def test_riscv64_virt(self): """ :avocado: tags=arch:riscv64 :avocado: tags=machine:virt """ self.boot_opensbi()
1,632
23.742424
68
py
qemu
qemu-master/tests/avocado/machine_aarch64_virt.py
# Functional test that boots a various Linux systems and checks the # console output. # # Copyright (c) 2022 Linaro Ltd. # # Author: # Alex Bennée <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later import time import os import logging from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado_qemu import exec_command from avocado_qemu import BUILD_DIR from avocado.utils import process from avocado.utils.path import find_command class Aarch64VirtMachine(QemuSystemTest): KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' timeout = 360 def wait_for_console_pattern(self, success_message, vm=None): wait_for_console_pattern(self, success_message, failure_message='Kernel panic - not syncing', vm=vm) # This tests the whole boot chain from EFI to Userspace # We only boot a whole OS for the current top level CPU and GIC # Other test profiles should use more minimal boots def test_alpine_virt_tcg_gic_max(self): """ :avocado: tags=arch:aarch64 :avocado: tags=machine:virt :avocado: tags=accel:tcg """ iso_url = ('https://dl-cdn.alpinelinux.org/' 'alpine/v3.17/releases/aarch64/' 'alpine-standard-3.17.2-aarch64.iso') # Alpine use sha256 so I recalculated this myself iso_sha1 = '76284fcd7b41fe899b0c2375ceb8470803eea839' iso_path = self.fetch_asset(iso_url, asset_hash=iso_sha1) self.vm.set_console() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyAMA0') self.require_accelerator("tcg") self.vm.add_args("-accel", "tcg") self.vm.add_args("-cpu", "max,pauth-impdef=on") self.vm.add_args("-machine", "virt,acpi=on," "virtualization=on," "mte=on," "gic-version=max,iommu=smmuv3") self.vm.add_args("-smp", "2", "-m", "1024") self.vm.add_args('-bios', os.path.join(BUILD_DIR, 'pc-bios', 'edk2-aarch64-code.fd')) self.vm.add_args("-drive", f"file={iso_path},format=raw") self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0') self.vm.add_args('-object', 'rng-random,id=rng0,filename=/dev/urandom') self.vm.launch() self.wait_for_console_pattern('Welcome to Alpine Linux 3.17') def common_aarch64_virt(self, machine): """ Common code to launch basic virt machine with kernel+initrd and a scratch disk. """ logger = logging.getLogger('aarch64_virt') kernel_url = ('https://fileserver.linaro.org/s/' 'z6B2ARM7DQT3HWN/download') kernel_hash = 'ed11daab50c151dde0e1e9c9cb8b2d9bd3215347' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) self.vm.set_console() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyAMA0') self.require_accelerator("tcg") self.vm.add_args('-cpu', 'max,pauth-impdef=on', '-machine', machine, '-accel', 'tcg', '-kernel', kernel_path, '-append', kernel_command_line) # A RNG offers an easy way to generate a few IRQs self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0') self.vm.add_args('-object', 'rng-random,id=rng0,filename=/dev/urandom') # Also add a scratch block device logger.info('creating scratch qcow2 image') image_path = os.path.join(self.workdir, 'scratch.qcow2') qemu_img = os.path.join(BUILD_DIR, 'qemu-img') if not os.path.exists(qemu_img): qemu_img = find_command('qemu-img', False) if qemu_img is False: self.cancel('Could not find "qemu-img", which is required to ' 'create the temporary qcow2 image') cmd = '%s create -f qcow2 %s 8M' % (qemu_img, image_path) process.run(cmd) # Add the device self.vm.add_args('-blockdev', f"driver=qcow2,file.driver=file,file.filename={image_path},node-name=scratch") self.vm.add_args('-device', 'virtio-blk-device,drive=scratch') self.vm.launch() self.wait_for_console_pattern('Welcome to Buildroot') time.sleep(0.1) exec_command(self, 'root') time.sleep(0.1) exec_command(self, 'dd if=/dev/hwrng of=/dev/vda bs=512 count=4') time.sleep(0.1) exec_command(self, 'md5sum /dev/vda') time.sleep(0.1) exec_command(self, 'cat /proc/interrupts') time.sleep(0.1) exec_command(self, 'cat /proc/self/maps') time.sleep(0.1) def test_aarch64_virt_gicv3(self): """ :avocado: tags=arch:aarch64 :avocado: tags=machine:virt :avocado: tags=accel:tcg :avocado: tags=cpu:max """ self.common_aarch64_virt("virt,gic_version=3") def test_aarch64_virt_gicv2(self): """ :avocado: tags=arch:aarch64 :avocado: tags=machine:virt :avocado: tags=accel:tcg :avocado: tags=cpu:max """ self.common_aarch64_virt("virt,gic-version=2")
5,515
36.52381
103
py
qemu
qemu-master/tests/avocado/reverse_debugging.py
# Reverse debugging test # # Copyright (c) 2020 ISP RAS # # Author: # Pavel Dovgalyuk <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import os import logging from avocado import skipIf from avocado_qemu import BUILD_DIR from avocado.utils import gdb from avocado.utils import process from avocado.utils.network.ports import find_free_port from avocado.utils.path import find_command from boot_linux_console import LinuxKernelTest class ReverseDebugging(LinuxKernelTest): """ Test GDB reverse debugging commands: reverse step and reverse continue. Recording saves the execution of some instructions and makes an initial VM snapshot to allow reverse execution. Replay saves the order of the first instructions and then checks that they are executed backwards in the correct order. After that the execution is replayed to the end, and reverse continue command is checked by setting several breakpoints, and asserting that the execution is stopped at the last of them. """ timeout = 10 STEPS = 10 endian_is_le = True def run_vm(self, record, shift, args, replay_path, image_path, port): logger = logging.getLogger('replay') vm = self.get_vm() vm.set_console() if record: logger.info('recording the execution...') mode = 'record' else: logger.info('replaying the execution...') mode = 'replay' vm.add_args('-gdb', 'tcp::%d' % port, '-S') vm.add_args('-icount', 'shift=%s,rr=%s,rrfile=%s,rrsnapshot=init' % (shift, mode, replay_path), '-net', 'none') vm.add_args('-drive', 'file=%s,if=none' % image_path) if args: vm.add_args(*args) vm.launch() return vm @staticmethod def get_reg_le(g, reg): res = g.cmd(b'p%x' % reg) num = 0 for i in range(len(res))[-2::-2]: num = 0x100 * num + int(res[i:i + 2], 16) return num @staticmethod def get_reg_be(g, reg): res = g.cmd(b'p%x' % reg) return int(res, 16) def get_reg(self, g, reg): # value may be encoded in BE or LE order if self.endian_is_le: return self.get_reg_le(g, reg) else: return self.get_reg_be(g, reg) def get_pc(self, g): return self.get_reg(g, self.REG_PC) def check_pc(self, g, addr): pc = self.get_pc(g) if pc != addr: self.fail('Invalid PC (read %x instead of %x)' % (pc, addr)) @staticmethod def gdb_step(g): g.cmd(b's', b'T05thread:01;') @staticmethod def gdb_bstep(g): g.cmd(b'bs', b'T05thread:01;') @staticmethod def vm_get_icount(vm): return vm.qmp('query-replay')['return']['icount'] def reverse_debugging(self, shift=7, args=None): logger = logging.getLogger('replay') # create qcow2 for snapshots logger.info('creating qcow2 image for VM snapshots') image_path = os.path.join(self.workdir, 'disk.qcow2') qemu_img = os.path.join(BUILD_DIR, 'qemu-img') if not os.path.exists(qemu_img): qemu_img = find_command('qemu-img', False) if qemu_img is False: self.cancel('Could not find "qemu-img", which is required to ' 'create the temporary qcow2 image') cmd = '%s create -f qcow2 %s 128M' % (qemu_img, image_path) process.run(cmd) replay_path = os.path.join(self.workdir, 'replay.bin') port = find_free_port() # record the log vm = self.run_vm(True, shift, args, replay_path, image_path, port) while self.vm_get_icount(vm) <= self.STEPS: pass last_icount = self.vm_get_icount(vm) vm.shutdown() logger.info("recorded log with %s+ steps" % last_icount) # replay and run debug commands vm = self.run_vm(False, shift, args, replay_path, image_path, port) logger.info('connecting to gdbstub') g = gdb.GDBRemote('127.0.0.1', port, False, False) g.connect() r = g.cmd(b'qSupported') if b'qXfer:features:read+' in r: g.cmd(b'qXfer:features:read:target.xml:0,ffb') if b'ReverseStep+' not in r: self.fail('Reverse step is not supported by QEMU') if b'ReverseContinue+' not in r: self.fail('Reverse continue is not supported by QEMU') logger.info('stepping forward') steps = [] # record first instruction addresses for _ in range(self.STEPS): pc = self.get_pc(g) logger.info('saving position %x' % pc) steps.append(pc) self.gdb_step(g) # visit the recorded instruction in reverse order logger.info('stepping backward') for addr in steps[::-1]: self.gdb_bstep(g) self.check_pc(g, addr) logger.info('found position %x' % addr) logger.info('seeking to the end (icount %s)' % (last_icount - 1)) vm.qmp('replay-break', icount=last_icount - 1) # continue - will return after pausing g.cmd(b'c', b'T02thread:01;') logger.info('setting breakpoints') for addr in steps: # hardware breakpoint at addr with len=1 g.cmd(b'Z1,%x,1' % addr, b'OK') logger.info('running reverse continue to reach %x' % steps[-1]) # reverse continue - will return after stopping at the breakpoint g.cmd(b'bc', b'T05thread:01;') # assume that none of the first instructions is executed again # breaking the order of the breakpoints self.check_pc(g, steps[-1]) logger.info('successfully reached %x' % steps[-1]) logger.info('exitting gdb and qemu') vm.shutdown() class ReverseDebugging_X86_64(ReverseDebugging): """ :avocado: tags=accel:tcg """ REG_PC = 0x10 REG_CS = 0x12 def get_pc(self, g): return self.get_reg_le(g, self.REG_PC) \ + self.get_reg_le(g, self.REG_CS) * 0x10 # unidentified gitlab timeout problem @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') def test_x86_64_pc(self): """ :avocado: tags=arch:x86_64 :avocado: tags=machine:pc """ # start with BIOS only self.reverse_debugging() class ReverseDebugging_AArch64(ReverseDebugging): """ :avocado: tags=accel:tcg """ REG_PC = 32 # unidentified gitlab timeout problem @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') def test_aarch64_virt(self): """ :avocado: tags=arch:aarch64 :avocado: tags=machine:virt :avocado: tags=cpu:cortex-a53 """ kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora' '/linux/releases/29/Everything/aarch64/os/images/pxeboot' '/vmlinuz') kernel_hash = '8c73e469fc6ea06a58dc83a628fc695b693b8493' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) self.reverse_debugging( args=('-kernel', kernel_path))
7,303
32.351598
79
py
qemu
qemu-master/tests/avocado/virtio_check_params.py
# # Test virtio-scsi and virtio-blk queue settings for all machine types # # Copyright (c) 2019 Virtuozzo International GmbH # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import sys import os import re import logging from qemu.machine import QEMUMachine from avocado_qemu import QemuSystemTest from avocado import skip #list of machine types and virtqueue properties to test VIRTIO_SCSI_PROPS = {'seg_max_adjust': 'seg_max_adjust'} VIRTIO_BLK_PROPS = {'seg_max_adjust': 'seg-max-adjust'} DEV_TYPES = {'virtio-scsi-pci': VIRTIO_SCSI_PROPS, 'virtio-blk-pci': VIRTIO_BLK_PROPS} VM_DEV_PARAMS = {'virtio-scsi-pci': ['-device', 'virtio-scsi-pci,id=scsi0'], 'virtio-blk-pci': ['-device', 'virtio-blk-pci,id=scsi0,drive=drive0', '-drive', 'driver=null-co,id=drive0,if=none']} class VirtioMaxSegSettingsCheck(QemuSystemTest): @staticmethod def make_pattern(props): pattern_items = ['{0} = \w+'.format(prop) for prop in props] return '|'.join(pattern_items) def query_virtqueue(self, vm, dev_type_name): query_ok = False error = None props = None output = vm.command('human-monitor-command', command_line = 'info qtree') props_list = DEV_TYPES[dev_type_name].values(); pattern = self.make_pattern(props_list) res = re.findall(pattern, output) if len(res) != len(props_list): props_list = set(props_list) res = set(res) not_found = props_list.difference(res) not_found = ', '.join(not_found) error = '({0}): The following properties not found: {1}'\ .format(dev_type_name, not_found) else: query_ok = True props = dict() for prop in res: p = prop.split(' = ') props[p[0]] = p[1] return query_ok, props, error def check_mt(self, mt, dev_type_name): mt['device'] = dev_type_name # Only for the debug() call. logger = logging.getLogger('machine') logger.debug(mt) with QEMUMachine(self.qemu_bin) as vm: vm.set_machine(mt["name"]) vm.add_args('-nodefaults') for s in VM_DEV_PARAMS[dev_type_name]: vm.add_args(s) try: vm.launch() query_ok, props, error = self.query_virtqueue(vm, dev_type_name) except: query_ok = False error = sys.exc_info()[0] if not query_ok: self.fail('machine type {0}: {1}'.format(mt['name'], error)) for prop_name, prop_val in props.items(): expected_val = mt[prop_name] self.assertEqual(expected_val, prop_val) @staticmethod def seg_max_adjust_enabled(mt): # machine types >= 5.0 should have seg_max_adjust = true # others seg_max_adjust = false mt = mt.split("-") # machine types with one line name and name like pc-x.x if len(mt) <= 2: return False # machine types like pc-<chip_name>-x.x[.x] ver = mt[2] ver = ver.split("."); # versions >= 5.0 goes with seg_max_adjust enabled major = int(ver[0]) if major >= 5: return True return False @skip("break multi-arch CI") def test_machine_types(self): # collect all machine types except 'none', 'isapc', 'microvm' with QEMUMachine(self.qemu_bin) as vm: vm.launch() machines = [m['name'] for m in vm.command('query-machines')] vm.shutdown() machines.remove('none') machines.remove('isapc') machines.remove('microvm') for dev_type in DEV_TYPES: # create the list of machine types and their parameters. mtypes = list() for m in machines: if self.seg_max_adjust_enabled(m): enabled = 'true' else: enabled = 'false' mtypes.append({'name': m, DEV_TYPES[dev_type]['seg_max_adjust']: enabled}) # test each machine type for a device type for mt in mtypes: self.check_mt(mt, dev_type)
5,011
33.805556
80
py
qemu
qemu-master/tests/avocado/version.py
# Version check example test # # Copyright (c) 2018 Red Hat, Inc. # # Author: # Cleber Rosa <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. from avocado_qemu import QemuSystemTest class Version(QemuSystemTest): """ :avocado: tags=quick :avocado: tags=machine:none """ def test_qmp_human_info_version(self): self.vm.add_args('-nodefaults') self.vm.launch() res = self.vm.command('human-monitor-command', command_line='info version') self.assertRegexpMatches(res, r'^(\d+\.\d+\.\d)')
670
24.807692
68
py
qemu
qemu-master/tests/avocado/ppc_mpc8544ds.py
# Test that Linux kernel boots on ppc machines and check the console # # Copyright (c) 2018, 2020 Red Hat, Inc. # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. from avocado.utils import archive from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern class Mpc8544dsMachine(QemuSystemTest): timeout = 90 KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' panic_message = 'Kernel panic - not syncing' def test_ppc_mpc8544ds(self): """ :avocado: tags=arch:ppc :avocado: tags=machine:mpc8544ds :avocado: tags=accel:tcg """ self.require_accelerator("tcg") tar_url = ('https://qemu-advcal.gitlab.io' '/qac-best-of-multiarch/download/day04.tar.xz') tar_hash = 'f46724d281a9f30fa892d458be7beb7d34dc25f9' file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) archive.extract(file_path, self.workdir) self.vm.set_console() self.vm.add_args('-kernel', self.workdir + '/creek/creek.bin') self.vm.launch() wait_for_console_pattern(self, 'QEMU advent calendar 2020', self.panic_message)
1,273
35.4
70
py
qemu
qemu-master/tests/avocado/machine_s390_ccw_virtio.py
# Functional test that boots an s390x Linux guest with ccw and PCI devices # attached and checks whether the devices are recognized by Linux # # Copyright (c) 2020 Red Hat, Inc. # # Author: # Cornelia Huck <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import os import tempfile from avocado import skipIf from avocado_qemu import QemuSystemTest from avocado_qemu import exec_command_and_wait_for_pattern from avocado_qemu import wait_for_console_pattern from avocado.utils import archive class S390CCWVirtioMachine(QemuSystemTest): KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' timeout = 120 def wait_for_console_pattern(self, success_message, vm=None): wait_for_console_pattern(self, success_message, failure_message='Kernel panic - not syncing', vm=vm) def wait_for_crw_reports(self): exec_command_and_wait_for_pattern(self, 'while ! (dmesg -c | grep CRW) ; do sleep 1 ; done', 'CRW reports') dmesg_clear_count = 1 def clear_guest_dmesg(self): exec_command_and_wait_for_pattern(self, 'dmesg -c > /dev/null; ' 'echo dm_clear\ ' + str(self.dmesg_clear_count), 'dm_clear ' + str(self.dmesg_clear_count)) self.dmesg_clear_count += 1 def test_s390x_devices(self): """ :avocado: tags=arch:s390x :avocado: tags=machine:s390-ccw-virtio """ kernel_url = ('https://snapshot.debian.org/archive/debian/' '20201126T092837Z/dists/buster/main/installer-s390x/' '20190702+deb10u6/images/generic/kernel.debian') kernel_hash = '5821fbee57d6220a067a8b967d24595621aa1eb6' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) initrd_url = ('https://snapshot.debian.org/archive/debian/' '20201126T092837Z/dists/buster/main/installer-s390x/' '20190702+deb10u6/images/generic/initrd.debian') initrd_hash = '81ba09c97bef46e8f4660ac25b4ac0a5be3a94d6' initrd_path = self.fetch_asset(initrd_url, asset_hash=initrd_hash) self.vm.set_console() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=sclp0 root=/dev/ram0 BOOT_DEBUG=3') self.vm.add_args('-nographic', '-kernel', kernel_path, '-initrd', initrd_path, '-append', kernel_command_line, '-cpu', 'max,prno-trng=off', '-device', 'virtio-net-ccw,devno=fe.1.1111', '-device', 'virtio-rng-ccw,devno=fe.2.0000,max_revision=0,id=rn1', '-device', 'virtio-rng-ccw,devno=fe.3.1234,max_revision=2,id=rn2', '-device', 'zpci,uid=5,target=zzz', '-device', 'virtio-net-pci,id=zzz', '-device', 'zpci,uid=0xa,fid=12,target=serial', '-device', 'virtio-serial-pci,id=serial', '-device', 'virtio-balloon-ccw') self.vm.launch() shell_ready = "sh: can't access tty; job control turned off" self.wait_for_console_pattern(shell_ready) # first debug shell is too early, we need to wait for device detection exec_command_and_wait_for_pattern(self, 'exit', shell_ready) ccw_bus_ids="0.1.1111 0.2.0000 0.3.1234" pci_bus_ids="0005:00:00.0 000a:00:00.0" exec_command_and_wait_for_pattern(self, 'ls /sys/bus/ccw/devices/', ccw_bus_ids) exec_command_and_wait_for_pattern(self, 'ls /sys/bus/pci/devices/', pci_bus_ids) # check that the device at 0.2.0000 is in legacy mode, while the # device at 0.3.1234 has the virtio-1 feature bit set virtio_rng_features="00000000000000000000000000001100" + \ "10000000000000000000000000000000" virtio_rng_features_legacy="00000000000000000000000000001100" + \ "00000000000000000000000000000000" exec_command_and_wait_for_pattern(self, 'cat /sys/bus/ccw/devices/0.2.0000/virtio?/features', virtio_rng_features_legacy) exec_command_and_wait_for_pattern(self, 'cat /sys/bus/ccw/devices/0.3.1234/virtio?/features', virtio_rng_features) # check that /dev/hwrng works - and that it's gone after ejecting exec_command_and_wait_for_pattern(self, 'dd if=/dev/hwrng of=/dev/null bs=1k count=10', '10+0 records out') self.clear_guest_dmesg() self.vm.command('device_del', id='rn1') self.wait_for_crw_reports() self.clear_guest_dmesg() self.vm.command('device_del', id='rn2') self.wait_for_crw_reports() exec_command_and_wait_for_pattern(self, 'dd if=/dev/hwrng of=/dev/null bs=1k count=10', 'dd: /dev/hwrng: No such device') # verify that we indeed have virtio-net devices (without having the # virtio-net driver handy) exec_command_and_wait_for_pattern(self, 'cat /sys/bus/ccw/devices/0.1.1111/cutype', '3832/01') exec_command_and_wait_for_pattern(self, 'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_vendor', '0x1af4') exec_command_and_wait_for_pattern(self, 'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_device', '0x0001') # check fid propagation exec_command_and_wait_for_pattern(self, 'cat /sys/bus/pci/devices/000a\:00\:00.0/function_id', '0x0000000c') # add another device self.clear_guest_dmesg() self.vm.command('device_add', driver='virtio-net-ccw', devno='fe.0.4711', id='net_4711') self.wait_for_crw_reports() exec_command_and_wait_for_pattern(self, 'for i in 1 2 3 4 5 6 7 ; do ' 'if [ -e /sys/bus/ccw/devices/*4711 ]; then break; fi ;' 'sleep 1 ; done ; ls /sys/bus/ccw/devices/', '0.0.4711') # and detach it again self.clear_guest_dmesg() self.vm.command('device_del', id='net_4711') self.vm.event_wait(name='DEVICE_DELETED', match={'data': {'device': 'net_4711'}}) self.wait_for_crw_reports() exec_command_and_wait_for_pattern(self, 'ls /sys/bus/ccw/devices/0.0.4711', 'No such file or directory') # test the virtio-balloon device exec_command_and_wait_for_pattern(self, 'head -n 1 /proc/meminfo', 'MemTotal: 115640 kB') self.vm.command('human-monitor-command', command_line='balloon 96') exec_command_and_wait_for_pattern(self, 'head -n 1 /proc/meminfo', 'MemTotal: 82872 kB') self.vm.command('human-monitor-command', command_line='balloon 128') exec_command_and_wait_for_pattern(self, 'head -n 1 /proc/meminfo', 'MemTotal: 115640 kB') @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') def test_s390x_fedora(self): """ :avocado: tags=arch:s390x :avocado: tags=machine:s390-ccw-virtio :avocado: tags=device:virtio-gpu :avocado: tags=device:virtio-crypto :avocado: tags=device:virtio-net """ kernel_url = ('https://archives.fedoraproject.org/pub/archive' '/fedora-secondary/releases/31/Server/s390x/os' '/images/kernel.img') kernel_hash = 'b93d1efcafcf29c1673a4ce371a1f8b43941cfeb' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) initrd_url = ('https://archives.fedoraproject.org/pub/archive' '/fedora-secondary/releases/31/Server/s390x/os' '/images/initrd.img') initrd_hash = '3de45d411df5624b8d8ef21cd0b44419ab59b12f' initrd_path_xz = self.fetch_asset(initrd_url, asset_hash=initrd_hash) initrd_path = os.path.join(self.workdir, 'initrd-raw.img') archive.lzma_uncompress(initrd_path_xz, initrd_path) self.vm.set_console() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + ' audit=0 ' 'rd.plymouth=0 plymouth.enable=0 rd.rescue') self.vm.add_args('-nographic', '-smp', '4', '-m', '512', '-name', 'Some Guest Name', '-uuid', '30de4fd9-b4d5-409e-86a5-09b387f70bfa', '-kernel', kernel_path, '-initrd', initrd_path, '-append', kernel_command_line, '-device', 'zpci,uid=7,target=n', '-device', 'virtio-net-pci,id=n,mac=02:ca:fe:fa:ce:12', '-device', 'virtio-rng-ccw,devno=fe.1.9876', '-device', 'virtio-gpu-ccw,devno=fe.2.5432') self.vm.launch() self.wait_for_console_pattern('Entering emergency mode') # Some tests to see whether the CLI options have been considered: self.log.info("Test whether QEMU CLI options have been considered") exec_command_and_wait_for_pattern(self, 'while ! (dmesg | grep enP7p0s0) ; do sleep 1 ; done', 'virtio_net virtio0 enP7p0s0: renamed') exec_command_and_wait_for_pattern(self, 'lspci', '0007:00:00.0 Class 0200: Device 1af4:1000') exec_command_and_wait_for_pattern(self, 'cat /sys/class/net/enP7p0s0/address', '02:ca:fe:fa:ce:12') exec_command_and_wait_for_pattern(self, 'lscss', '0.1.9876') exec_command_and_wait_for_pattern(self, 'lscss', '0.2.5432') exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo', 'processors : 4') exec_command_and_wait_for_pattern(self, 'grep MemTotal /proc/meminfo', 'MemTotal: 499848 kB') exec_command_and_wait_for_pattern(self, 'grep Name /proc/sysinfo', 'Extended Name: Some Guest Name') exec_command_and_wait_for_pattern(self, 'grep UUID /proc/sysinfo', '30de4fd9-b4d5-409e-86a5-09b387f70bfa') # Disable blinking cursor, then write some stuff into the framebuffer. # QEMU's PPM screendumps contain uncompressed 24-bit values, while the # framebuffer uses 32-bit, so we pad our text with some spaces when # writing to the framebuffer. Since the PPM is uncompressed, we then # can simply read the written "magic bytes" back from the PPM file to # check whether the framebuffer is working as expected. self.log.info("Test screendump of virtio-gpu device") exec_command_and_wait_for_pattern(self, 'while ! (dmesg | grep gpudrmfb) ; do sleep 1 ; done', 'virtio_gpudrmfb frame buffer device') exec_command_and_wait_for_pattern(self, 'echo -e "\e[?25l" > /dev/tty0', ':/#') exec_command_and_wait_for_pattern(self, 'for ((i=0;i<250;i++)); do ' 'echo " The qu ick fo x j ump s o ver a laz y d og" >> fox.txt;' 'done', ':/#') exec_command_and_wait_for_pattern(self, 'dd if=fox.txt of=/dev/fb0 bs=1000 oflag=sync,nocache ; rm fox.txt', '12+0 records out') with tempfile.NamedTemporaryFile(suffix='.ppm', prefix='qemu-scrdump-') as ppmfile: self.vm.command('screendump', filename=ppmfile.name) ppmfile.seek(0) line = ppmfile.readline() self.assertEqual(line, b"P6\n") line = ppmfile.readline() self.assertEqual(line, b"1280 800\n") line = ppmfile.readline() self.assertEqual(line, b"255\n") line = ppmfile.readline(256) self.assertEqual(line, b"The quick fox jumps over a lazy dog\n") # Hot-plug a virtio-crypto device and see whether it gets accepted self.log.info("Test hot-plug virtio-crypto device") self.clear_guest_dmesg() self.vm.command('object-add', qom_type='cryptodev-backend-builtin', id='cbe0') self.vm.command('device_add', driver='virtio-crypto-ccw', id='crypdev0', cryptodev='cbe0', devno='fe.0.2342') exec_command_and_wait_for_pattern(self, 'while ! (dmesg -c | grep Accelerator.device) ; do' ' sleep 1 ; done', 'Accelerator device is ready') exec_command_and_wait_for_pattern(self, 'lscss', '0.0.2342') self.vm.command('device_del', id='crypdev0') self.vm.command('object-del', id='cbe0') exec_command_and_wait_for_pattern(self, 'while ! (dmesg -c | grep Start.virtcrypto_remove) ; do' ' sleep 1 ; done', 'Start virtcrypto_remove.')
13,917
49.79562
80
py
qemu
qemu-master/tests/avocado/ppc_405.py
# Test that the U-Boot firmware boots on ppc 405 machines and check the console # # Copyright (c) 2021 Red Hat, Inc. # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. from avocado.utils import archive from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado_qemu import exec_command_and_wait_for_pattern class Ppc405Machine(QemuSystemTest): timeout = 90 def do_test_ppc405(self): uboot_url = ('https://gitlab.com/huth/u-boot/-/raw/' 'taihu-2021-10-09/u-boot-taihu.bin') uboot_hash = ('3208940e908a5edc7c03eab072c60f0dcfadc2ab'); file_path = self.fetch_asset(uboot_url, asset_hash=uboot_hash) self.vm.set_console(console_index=1) self.vm.add_args('-bios', file_path) self.vm.launch() wait_for_console_pattern(self, 'AMCC PPC405EP Evaluation Board') exec_command_and_wait_for_pattern(self, 'reset', 'AMCC PowerPC 405EP') def test_ppc_ref405ep(self): """ :avocado: tags=arch:ppc :avocado: tags=machine:ref405ep :avocado: tags=cpu:405ep :avocado: tags=accel:tcg """ self.require_accelerator("tcg") self.do_test_ppc405()
1,308
34.378378
79
py
qemu
qemu-master/tests/avocado/tcg_plugins.py
# TCG Plugins tests # # These are a little more involved than the basic tests run by check-tcg. # # Copyright (c) 2021 Linaro # # Author: # Alex Bennée <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later import tempfile import mmap import re from boot_linux_console import LinuxKernelTest class PluginKernelBase(LinuxKernelTest): """ Boots a Linux kernel with a TCG plugin enabled. """ timeout = 120 KERNEL_COMMON_COMMAND_LINE = 'printk.time=1 panic=-1 ' def run_vm(self, kernel_path, kernel_command_line, plugin, plugin_log, console_pattern, args=None): vm = self.get_vm() vm.set_console() vm.add_args('-kernel', kernel_path, '-append', kernel_command_line, '-plugin', plugin, '-d', 'plugin', '-D', plugin_log, '-net', 'none', '-no-reboot') if args: vm.add_args(*args) try: vm.launch() except: # TODO: probably fails because plugins not enabled but we # can't currently probe for the feature. self.cancel("TCG Plugins not enabled?") self.wait_for_console_pattern(console_pattern, vm) # ensure logs are flushed vm.shutdown() class PluginKernelNormal(PluginKernelBase): def _grab_aarch64_kernel(self): kernel_url = ('http://security.debian.org/' 'debian-security/pool/updates/main/l/linux-signed-arm64/' 'linux-image-4.19.0-12-arm64_4.19.152-1_arm64.deb') kernel_sha1 = '2036c2792f80ac9c4ccaae742b2e0a28385b6010' kernel_deb = self.fetch_asset(kernel_url, asset_hash=kernel_sha1) kernel_path = self.extract_from_deb(kernel_deb, "/boot/vmlinuz-4.19.0-12-arm64") return kernel_path def test_aarch64_virt_insn(self): """ :avocado: tags=accel:tcg :avocado: tags=arch:aarch64 :avocado: tags=machine:virt :avocado: tags=cpu:cortex-a53 """ kernel_path = self._grab_aarch64_kernel() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyAMA0') console_pattern = 'Kernel panic - not syncing: VFS:' plugin_log = tempfile.NamedTemporaryFile(mode="r+t", prefix="plugin", suffix=".log") self.run_vm(kernel_path, kernel_command_line, "tests/plugin/libinsn.so", plugin_log.name, console_pattern) with plugin_log as lf, \ mmap.mmap(lf.fileno(), 0, access=mmap.ACCESS_READ) as s: m = re.search(br"insns: (?P<count>\d+)", s) if "count" not in m.groupdict(): self.fail("Failed to find instruction count") def test_aarch64_virt_insn_icount(self): """ :avocado: tags=accel:tcg :avocado: tags=arch:aarch64 :avocado: tags=machine:virt :avocado: tags=cpu:cortex-a53 """ kernel_path = self._grab_aarch64_kernel() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyAMA0') console_pattern = 'Kernel panic - not syncing: VFS:' plugin_log = tempfile.NamedTemporaryFile(mode="r+t", prefix="plugin", suffix=".log") self.run_vm(kernel_path, kernel_command_line, "tests/plugin/libinsn.so", plugin_log.name, console_pattern, args=('-icount', 'shift=1')) with plugin_log as lf, \ mmap.mmap(lf.fileno(), 0, access=mmap.ACCESS_READ) as s: m = re.search(br"detected repeat execution @ (?P<addr>0x[0-9A-Fa-f]+)", s) if m is not None and "addr" in m.groupdict(): self.fail("detected repeated instructions") def test_aarch64_virt_mem_icount(self): """ :avocado: tags=accel:tcg :avocado: tags=arch:aarch64 :avocado: tags=machine:virt :avocado: tags=cpu:cortex-a53 """ kernel_path = self._grab_aarch64_kernel() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyAMA0') console_pattern = 'Kernel panic - not syncing: VFS:' plugin_log = tempfile.NamedTemporaryFile(mode="r+t", prefix="plugin", suffix=".log") self.run_vm(kernel_path, kernel_command_line, "tests/plugin/libmem.so,inline=true,callback=true", plugin_log.name, console_pattern, args=('-icount', 'shift=1')) with plugin_log as lf, \ mmap.mmap(lf.fileno(), 0, access=mmap.ACCESS_READ) as s: m = re.findall(br"mem accesses: (?P<count>\d+)", s) if m is None or len(m) != 2: self.fail("no memory access counts found") else: inline = int(m[0]) callback = int(m[1]) if inline != callback: self.fail("mismatched access counts")
5,294
34.777027
88
py
qemu
qemu-master/tests/avocado/machine_arm_n8x0.py
# Functional test that boots a Linux kernel and checks the console # # Copyright (c) 2020 Red Hat, Inc. # # Author: # Thomas Huth <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import os from avocado import skipUnless from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern class N8x0Machine(QemuSystemTest): """Boots the Linux kernel and checks that the console is operational""" timeout = 90 def __do_test_n8x0(self): kernel_url = ('http://stskeeps.subnetmask.net/meego-n8x0/' 'meego-arm-n8x0-1.0.80.20100712.1431-' 'vmlinuz-2.6.35~rc4-129.1-n8x0') kernel_hash = 'e9d5ab8d7548923a0061b6fbf601465e479ed269' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) self.vm.set_console(console_index=1) self.vm.add_args('-kernel', kernel_path, '-append', 'printk.time=0 console=ttyS1') self.vm.launch() wait_for_console_pattern(self, 'TSC2005 driver initializing') @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') def test_n800(self): """ :avocado: tags=arch:arm :avocado: tags=machine:n800 """ self.__do_test_n8x0() @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') def test_n810(self): """ :avocado: tags=arch:arm :avocado: tags=machine:n810 """ self.__do_test_n8x0()
1,604
31.1
76
py
qemu
qemu-master/tests/avocado/kvm_xen_guest.py
# KVM Xen guest functional tests # # Copyright © 2021 Red Hat, Inc. # Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Author: # David Woodhouse <[email protected]> # Alex Bennée <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later import os from qemu.machine import machine from avocado_qemu import LinuxSSHMixIn from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern class KVMXenGuest(QemuSystemTest, LinuxSSHMixIn): """ :avocado: tags=arch:x86_64 :avocado: tags=machine:q35 :avocado: tags=accel:kvm :avocado: tags=kvm_xen_guest """ KERNEL_DEFAULT = 'printk.time=0 root=/dev/xvda console=ttyS0' kernel_path = None kernel_params = None # Fetch assets from the kvm-xen-guest subdir of my shared test # images directory on fileserver.linaro.org where you can find # build instructions for how they where assembled. def get_asset(self, name, sha1): base_url = ('https://fileserver.linaro.org/s/' 'kE4nCFLdQcoBF9t/download?' 'path=%2Fkvm-xen-guest&files=' ) url = base_url + name # use explicit name rather than failing to neatly parse the # URL into a unique one return self.fetch_asset(name=name, locations=(url), asset_hash=sha1) def common_vm_setup(self): # We also catch lack of KVM_XEN support if we fail to launch self.require_accelerator("kvm") self.vm.set_console() self.vm.add_args("-accel", "kvm,xen-version=0x4000a,kernel-irqchip=split") self.vm.add_args("-smp", "2") self.kernel_path = self.get_asset("bzImage", "367962983d0d32109998a70b45dcee4672d0b045") self.rootfs = self.get_asset("rootfs.ext4", "f1478401ea4b3fa2ea196396be44315bab2bb5e4") def run_and_check(self): self.vm.add_args('-kernel', self.kernel_path, '-append', self.kernel_params, '-drive', f"file={self.rootfs},if=none,format=raw,id=drv0", '-device', 'xen-disk,drive=drv0,vdev=xvda', '-device', 'virtio-net-pci,netdev=unet', '-netdev', 'user,id=unet,hostfwd=:127.0.0.1:0-:22') try: self.vm.launch() except machine.VMLaunchFailure as e: if "Xen HVM guest support not present" in e.output: self.cancel("KVM Xen support is not present " "(need v5.12+ kernel with CONFIG_KVM_XEN)") elif "Property 'kvm-accel.xen-version' not found" in e.output: self.cancel("QEMU not built with CONFIG_XEN_EMU support") else: raise e self.log.info('VM launched, waiting for sshd') console_pattern = 'Starting dropbear sshd: OK' wait_for_console_pattern(self, console_pattern, 'Oops') self.log.info('sshd ready') self.ssh_connect('root', '', False) self.ssh_command('cat /proc/cmdline') self.ssh_command('dmesg | grep -e "Grant table initialized"') def test_kvm_xen_guest(self): """ :avocado: tags=kvm_xen_guest """ self.common_vm_setup() self.kernel_params = (self.KERNEL_DEFAULT + ' xen_emul_unplug=ide-disks') self.run_and_check() self.ssh_command('grep xen-pirq.*msi /proc/interrupts') def test_kvm_xen_guest_nomsi(self): """ :avocado: tags=kvm_xen_guest_nomsi """ self.common_vm_setup() self.kernel_params = (self.KERNEL_DEFAULT + ' xen_emul_unplug=ide-disks pci=nomsi') self.run_and_check() self.ssh_command('grep xen-pirq.* /proc/interrupts') def test_kvm_xen_guest_noapic_nomsi(self): """ :avocado: tags=kvm_xen_guest_noapic_nomsi """ self.common_vm_setup() self.kernel_params = (self.KERNEL_DEFAULT + ' xen_emul_unplug=ide-disks noapic pci=nomsi') self.run_and_check() self.ssh_command('grep xen-pirq /proc/interrupts') def test_kvm_xen_guest_vapic(self): """ :avocado: tags=kvm_xen_guest_vapic """ self.common_vm_setup() self.vm.add_args('-cpu', 'host,+xen-vapic') self.kernel_params = (self.KERNEL_DEFAULT + ' xen_emul_unplug=ide-disks') self.run_and_check() self.ssh_command('grep xen-pirq /proc/interrupts') self.ssh_command('grep PCI-MSI /proc/interrupts') def test_kvm_xen_guest_novector(self): """ :avocado: tags=kvm_xen_guest_novector """ self.common_vm_setup() self.kernel_params = (self.KERNEL_DEFAULT + ' xen_emul_unplug=ide-disks' + ' xen_no_vector_callback') self.run_and_check() self.ssh_command('grep xen-platform-pci /proc/interrupts') def test_kvm_xen_guest_novector_nomsi(self): """ :avocado: tags=kvm_xen_guest_novector_nomsi """ self.common_vm_setup() self.kernel_params = (self.KERNEL_DEFAULT + ' xen_emul_unplug=ide-disks pci=nomsi' + ' xen_no_vector_callback') self.run_and_check() self.ssh_command('grep xen-platform-pci /proc/interrupts') def test_kvm_xen_guest_novector_noapic(self): """ :avocado: tags=kvm_xen_guest_novector_noapic """ self.common_vm_setup() self.kernel_params = (self.KERNEL_DEFAULT + ' xen_emul_unplug=ide-disks' + ' xen_no_vector_callback noapic') self.run_and_check() self.ssh_command('grep xen-platform-pci /proc/interrupts')
5,998
33.877907
85
py
qemu
qemu-master/tests/avocado/machine_arm_canona1100.py
# Functional test that boots the canon-a1100 machine with firmware # # Copyright (c) 2020 Red Hat, Inc. # # Author: # Thomas Huth <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado.utils import archive class CanonA1100Machine(QemuSystemTest): """Boots the barebox firmware and checks that the console is operational""" timeout = 90 def test_arm_canona1100(self): """ :avocado: tags=arch:arm :avocado: tags=machine:canon-a1100 :avocado: tags=device:pflash_cfi02 """ tar_url = ('https://qemu-advcal.gitlab.io' '/qac-best-of-multiarch/download/day18.tar.xz') tar_hash = '068b5fc4242b29381acee94713509f8a876e9db6' file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) archive.extract(file_path, self.workdir) self.vm.set_console() self.vm.add_args('-bios', self.workdir + '/day18/barebox.canon-a1100.bin') self.vm.launch() wait_for_console_pattern(self, 'running /env/bin/init')
1,246
33.638889
79
py
qemu
qemu-master/tests/avocado/acpi-bits.py
#!/usr/bin/env python3 # group: rw quick # Exercize QEMU generated ACPI/SMBIOS tables using biosbits, # https://biosbits.org/ # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # # Author: # Ani Sinha <[email protected]> # pylint: disable=invalid-name # pylint: disable=consider-using-f-string """ This is QEMU ACPI/SMBIOS avocado tests using biosbits. Biosbits is available originally at https://biosbits.org/. This test uses a fork of the upstream bits and has numerous fixes including an upgraded acpica. The fork is located here: https://gitlab.com/qemu-project/biosbits-bits . """ import logging import os import platform import re import shutil import subprocess import tarfile import tempfile import time import zipfile from typing import ( List, Optional, Sequence, ) from qemu.machine import QEMUMachine from avocado import skipIf from avocado_qemu import QemuBaseTest deps = ["xorriso", "mformat"] # dependent tools needed in the test setup/box. supported_platforms = ['x86_64'] # supported test platforms. def which(tool): """ looks up the full path for @tool, returns None if not found or if @tool does not have executable permissions. """ paths=os.getenv('PATH') for p in paths.split(os.path.pathsep): p = os.path.join(p, tool) if os.path.exists(p) and os.access(p, os.X_OK): return p return None def missing_deps(): """ returns True if any of the test dependent tools are absent. """ for dep in deps: if which(dep) is None: return True return False def supported_platform(): """ checks if the test is running on a supported platform. """ return platform.machine() in supported_platforms class QEMUBitsMachine(QEMUMachine): # pylint: disable=too-few-public-methods """ A QEMU VM, with isa-debugcon enabled and bits iso passed using -cdrom to QEMU commandline. """ def __init__(self, binary: str, args: Sequence[str] = (), wrapper: Sequence[str] = (), name: Optional[str] = None, base_temp_dir: str = "/var/tmp", debugcon_log: str = "debugcon-log.txt", debugcon_addr: str = "0x403", sock_dir: Optional[str] = None, qmp_timer: Optional[float] = None): # pylint: disable=too-many-arguments if name is None: name = "qemu-bits-%d" % os.getpid() if sock_dir is None: sock_dir = base_temp_dir super().__init__(binary, args, wrapper=wrapper, name=name, base_temp_dir=base_temp_dir, sock_dir=sock_dir, qmp_timer=qmp_timer) self.debugcon_log = debugcon_log self.debugcon_addr = debugcon_addr self.base_temp_dir = base_temp_dir @property def _base_args(self) -> List[str]: args = super()._base_args args.extend([ '-chardev', 'file,path=%s,id=debugcon' %os.path.join(self.base_temp_dir, self.debugcon_log), '-device', 'isa-debugcon,iobase=%s,chardev=debugcon' %self.debugcon_addr, ]) return args def base_args(self): """return the base argument to QEMU binary""" return self._base_args @skipIf(not supported_platform() or missing_deps() or os.getenv('GITLAB_CI'), 'incorrect platform or dependencies (%s) not installed ' \ 'or running on GitLab' % ','.join(deps)) class AcpiBitsTest(QemuBaseTest): #pylint: disable=too-many-instance-attributes """ ACPI and SMBIOS tests using biosbits. :avocado: tags=arch:x86_64 :avocado: tags=acpi """ # in slower systems the test can take as long as 3 minutes to complete. timeout = 200 def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._vm = None self._workDir = None self._baseDir = None # following are some standard configuration constants self._bitsInternalVer = 2020 self._bitsCommitHash = 'b48b88ff' # commit hash must match # the artifact tag below self._bitsTag = "qemu-bits-10182022" # this is the latest bits # release as of today. self._bitsArtSHA1Hash = 'b04790ac9b99b5662d0416392c73b97580641fe5' self._bitsArtURL = ("https://gitlab.com/qemu-project/" "biosbits-bits/-/jobs/artifacts/%s/" "download?job=qemu-bits-build" %self._bitsTag) self._debugcon_addr = '0x403' self._debugcon_log = 'debugcon-log.txt' logging.basicConfig(level=logging.INFO) self.logger = logging.getLogger('acpi-bits') def _print_log(self, log): self.logger.info('\nlogs from biosbits follows:') self.logger.info('==========================================\n') self.logger.info(log) self.logger.info('==========================================\n') def copy_bits_config(self): """ copies the bios bits config file into bits. """ config_file = 'bits-cfg.txt' bits_config_dir = os.path.join(self._baseDir, 'acpi-bits', 'bits-config') target_config_dir = os.path.join(self._workDir, 'bits-%d' %self._bitsInternalVer, 'boot') self.assertTrue(os.path.exists(bits_config_dir)) self.assertTrue(os.path.exists(target_config_dir)) self.assertTrue(os.access(os.path.join(bits_config_dir, config_file), os.R_OK)) shutil.copy2(os.path.join(bits_config_dir, config_file), target_config_dir) self.logger.info('copied config file %s to %s', config_file, target_config_dir) def copy_test_scripts(self): """copies the python test scripts into bits. """ bits_test_dir = os.path.join(self._baseDir, 'acpi-bits', 'bits-tests') target_test_dir = os.path.join(self._workDir, 'bits-%d' %self._bitsInternalVer, 'boot', 'python') self.assertTrue(os.path.exists(bits_test_dir)) self.assertTrue(os.path.exists(target_test_dir)) for filename in os.listdir(bits_test_dir): if os.path.isfile(os.path.join(bits_test_dir, filename)) and \ filename.endswith('.py2'): # all test scripts are named with extension .py2 so that # avocado does not try to load them. These scripts are # written for python 2.7 not python 3 and hence if avocado # loaded them, it would complain about python 3 specific # syntaxes. newfilename = os.path.splitext(filename)[0] + '.py' shutil.copy2(os.path.join(bits_test_dir, filename), os.path.join(target_test_dir, newfilename)) self.logger.info('copied test file %s to %s', filename, target_test_dir) # now remove the pyc test file if it exists, otherwise the # changes in the python test script won't be executed. testfile_pyc = os.path.splitext(filename)[0] + '.pyc' if os.access(os.path.join(target_test_dir, testfile_pyc), os.F_OK): os.remove(os.path.join(target_test_dir, testfile_pyc)) self.logger.info('removed compiled file %s', os.path.join(target_test_dir, testfile_pyc)) def fix_mkrescue(self, mkrescue): """ grub-mkrescue is a bash script with two variables, 'prefix' and 'libdir'. They must be pointed to the right location so that the iso can be generated appropriately. We point the two variables to the directory where we have extracted our pre-built bits grub tarball. """ grub_x86_64_mods = os.path.join(self._workDir, 'grub-inst-x86_64-efi') grub_i386_mods = os.path.join(self._workDir, 'grub-inst') self.assertTrue(os.path.exists(grub_x86_64_mods)) self.assertTrue(os.path.exists(grub_i386_mods)) new_script = "" with open(mkrescue, 'r', encoding='utf-8') as filehandle: orig_script = filehandle.read() new_script = re.sub('(^prefix=)(.*)', r'\1"%s"' %grub_x86_64_mods, orig_script, flags=re.M) new_script = re.sub('(^libdir=)(.*)', r'\1"%s/lib"' %grub_i386_mods, new_script, flags=re.M) with open(mkrescue, 'w', encoding='utf-8') as filehandle: filehandle.write(new_script) def generate_bits_iso(self): """ Uses grub-mkrescue to generate a fresh bits iso with the python test scripts """ bits_dir = os.path.join(self._workDir, 'bits-%d' %self._bitsInternalVer) iso_file = os.path.join(self._workDir, 'bits-%d.iso' %self._bitsInternalVer) mkrescue_script = os.path.join(self._workDir, 'grub-inst-x86_64-efi', 'bin', 'grub-mkrescue') self.assertTrue(os.access(mkrescue_script, os.R_OK | os.W_OK | os.X_OK)) self.fix_mkrescue(mkrescue_script) self.logger.info('using grub-mkrescue for generating biosbits iso ...') try: if os.getenv('V') or os.getenv('BITS_DEBUG'): subprocess.check_call([mkrescue_script, '-o', iso_file, bits_dir], stderr=subprocess.STDOUT) else: subprocess.check_call([mkrescue_script, '-o', iso_file, bits_dir], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) except Exception as e: # pylint: disable=broad-except self.skipTest("Error while generating the bits iso. " "Pass V=1 in the environment to get more details. " + str(e)) self.assertTrue(os.access(iso_file, os.R_OK)) self.logger.info('iso file %s successfully generated.', iso_file) def setUp(self): # pylint: disable=arguments-differ super().setUp('qemu-system-') self._baseDir = os.getenv('AVOCADO_TEST_BASEDIR') # workdir could also be avocado's own workdir in self.workdir. # At present, I prefer to maintain my own temporary working # directory. It gives us more control over the generated bits # log files and also for debugging, we may chose not to remove # this working directory so that the logs and iso can be # inspected manually and archived if needed. self._workDir = tempfile.mkdtemp(prefix='acpi-bits-', suffix='.tmp') self.logger.info('working dir: %s', self._workDir) prebuiltDir = os.path.join(self._workDir, 'prebuilt') if not os.path.isdir(prebuiltDir): os.mkdir(prebuiltDir, mode=0o775) bits_zip_file = os.path.join(prebuiltDir, 'bits-%d-%s.zip' %(self._bitsInternalVer, self._bitsCommitHash)) grub_tar_file = os.path.join(prebuiltDir, 'bits-%d-%s-grub.tar.gz' %(self._bitsInternalVer, self._bitsCommitHash)) bitsLocalArtLoc = self.fetch_asset(self._bitsArtURL, asset_hash=self._bitsArtSHA1Hash) self.logger.info("downloaded bits artifacts to %s", bitsLocalArtLoc) # extract the bits artifact in the temp working directory with zipfile.ZipFile(bitsLocalArtLoc, 'r') as zref: zref.extractall(prebuiltDir) # extract the bits software in the temp working directory with zipfile.ZipFile(bits_zip_file, 'r') as zref: zref.extractall(self._workDir) with tarfile.open(grub_tar_file, 'r', encoding='utf-8') as tarball: tarball.extractall(self._workDir) self.copy_test_scripts() self.copy_bits_config() self.generate_bits_iso() def parse_log(self): """parse the log generated by running bits tests and check for failures. """ debugconf = os.path.join(self._workDir, self._debugcon_log) log = "" with open(debugconf, 'r', encoding='utf-8') as filehandle: log = filehandle.read() matchiter = re.finditer(r'(.*Summary: )(\d+ passed), (\d+ failed).*', log) for match in matchiter: # verify that no test cases failed. try: self.assertEqual(match.group(3).split()[0], '0', 'Some bits tests seems to have failed. ' \ 'Please check the test logs for more info.') except AssertionError as e: self._print_log(log) raise e else: if os.getenv('V') or os.getenv('BITS_DEBUG'): self._print_log(log) def tearDown(self): """ Lets do some cleanups. """ if self._vm: self.assertFalse(not self._vm.is_running) if not os.getenv('BITS_DEBUG'): self.logger.info('removing the work directory %s', self._workDir) shutil.rmtree(self._workDir) else: self.logger.info('not removing the work directory %s ' \ 'as BITS_DEBUG is ' \ 'passed in the environment', self._workDir) super().tearDown() def test_acpi_smbios_bits(self): """The main test case implementaion.""" iso_file = os.path.join(self._workDir, 'bits-%d.iso' %self._bitsInternalVer) self.assertTrue(os.access(iso_file, os.R_OK)) self._vm = QEMUBitsMachine(binary=self.qemu_bin, base_temp_dir=self._workDir, debugcon_log=self._debugcon_log, debugcon_addr=self._debugcon_addr) self._vm.add_args('-cdrom', '%s' %iso_file) # the vm needs to be run under icount so that TCG emulation is # consistent in terms of timing. smilatency tests have consistent # timing requirements. self._vm.add_args('-icount', 'auto') args = " ".join(str(arg) for arg in self._vm.base_args()) + \ " " + " ".join(str(arg) for arg in self._vm.args) self.logger.info("launching QEMU vm with the following arguments: %s", args) self._vm.launch() # biosbits has been configured to run all the specified test suites # in batch mode and then automatically initiate a vm shutdown. # Rely on avocado's unit test timeout. self._vm.wait(timeout=None) self.parse_log()
16,312
39.884712
80
py
qemu
qemu-master/tests/avocado/machine_microblaze.py
# Functional test that boots a microblaze Linux kernel and checks the console # # Copyright (c) 2018, 2021 Red Hat, Inc. # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado.utils import archive class MicroblazeMachine(QemuSystemTest): timeout = 90 def test_microblaze_s3adsp1800(self): """ :avocado: tags=arch:microblaze :avocado: tags=machine:petalogix-s3adsp1800 """ tar_url = ('https://qemu-advcal.gitlab.io' '/qac-best-of-multiarch/download/day17.tar.xz') tar_hash = '08bf3e3bfb6b6c7ce1e54ab65d54e189f2caf13f' file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) archive.extract(file_path, self.workdir) self.vm.set_console() self.vm.add_args('-kernel', self.workdir + '/day17/ballerina.bin') self.vm.launch() wait_for_console_pattern(self, 'This architecture does not have ' 'kernel memory protection') # Note: # The kernel sometimes gets stuck after the "This architecture ..." # message, that's why we don't test for a later string here. This # needs some investigation by a microblaze wizard one day...
1,396
37.805556
77
py
qemu
qemu-master/tests/avocado/ppc_bamboo.py
# Test that Linux kernel boots on the ppc bamboo board and check the console # # Copyright (c) 2021 Red Hat # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. from avocado.utils import archive from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado_qemu import exec_command_and_wait_for_pattern class BambooMachine(QemuSystemTest): timeout = 90 def test_ppc_bamboo(self): """ :avocado: tags=arch:ppc :avocado: tags=machine:bamboo :avocado: tags=cpu:440epb :avocado: tags=device:rtl8139 :avocado: tags=accel:tcg """ self.require_accelerator("tcg") self.require_netdev('user') tar_url = ('http://landley.net/aboriginal/downloads/binaries/' 'system-image-powerpc-440fp.tar.gz') tar_hash = '53e5f16414b195b82d2c70272f81c2eedb39bad9' file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) archive.extract(file_path, self.workdir) self.vm.set_console() self.vm.add_args('-kernel', self.workdir + '/system-image-powerpc-440fp/linux', '-initrd', self.workdir + '/system-image-powerpc-440fp/rootfs.cpio.gz', '-nic', 'user,model=rtl8139,restrict=on') self.vm.launch() wait_for_console_pattern(self, 'Type exit when done') exec_command_and_wait_for_pattern(self, 'ping 10.0.2.2', '10.0.2.2 is alive!') exec_command_and_wait_for_pattern(self, 'halt', 'System Halted')
1,731
39.27907
80
py
qemu
qemu-master/tests/avocado/cpu_queries.py
# Sanity check of query-cpu-* results # # Copyright (c) 2019 Red Hat, Inc. # # Author: # Eduardo Habkost <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. from avocado_qemu import QemuSystemTest class QueryCPUModelExpansion(QemuSystemTest): """ Run query-cpu-model-expansion for each CPU model, and validate results """ def test(self): """ :avocado: tags=arch:x86_64 :avocado: tags=machine:none """ self.vm.add_args('-S') self.vm.launch() cpus = self.vm.command('query-cpu-definitions') for c in cpus: self.log.info("Checking CPU: %s", c) self.assertNotIn('', c['unavailable-features'], c['name']) for c in cpus: model = {'name': c['name']} e = self.vm.command('query-cpu-model-expansion', model=model, type='full') self.assertEquals(e['model']['name'], c['name'])
1,021
28.2
86
py
qemu
qemu-master/tests/avocado/machine_sparc_leon3.py
# Functional test that boots a Leon3 machine and checks its serial console. # # Copyright (c) Philippe Mathieu-Daudé <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado import skip class Leon3Machine(QemuSystemTest): timeout = 60 @skip("Test currently broken") # A Window Underflow exception occurs before booting the kernel, # and QEMU exit calling cpu_abort(), which makes this test to fail. def test_leon3_helenos_uimage(self): """ :avocado: tags=arch:sparc :avocado: tags=machine:leon3_generic :avocado: tags=binfmt:uimage """ kernel_url = ('http://www.helenos.org/releases/' 'HelenOS-0.6.0-sparc32-leon3.bin') kernel_hash = 'a88c9cfdb8430c66650e5290a08765f9bf049a30' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) self.vm.set_console() self.vm.add_args('-kernel', kernel_path) self.vm.launch() wait_for_console_pattern(self, 'Copyright (c) 2001-2014 HelenOS project') wait_for_console_pattern(self, 'Booting the kernel ...')
1,302
33.289474
81
py
qemu
qemu-master/tests/avocado/multiprocess.py
# Test for multiprocess qemu # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import os import socket from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado_qemu import exec_command from avocado_qemu import exec_command_and_wait_for_pattern class Multiprocess(QemuSystemTest): """ :avocado: tags=multiprocess """ KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' def do_test(self, kernel_url, initrd_url, kernel_command_line, machine_type): """Main test method""" self.require_accelerator('kvm') self.require_multiprocess() # Create socketpair to connect proxy and remote processes proxy_sock, remote_sock = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM) os.set_inheritable(proxy_sock.fileno(), True) os.set_inheritable(remote_sock.fileno(), True) kernel_path = self.fetch_asset(kernel_url) initrd_path = self.fetch_asset(initrd_url) # Create remote process remote_vm = self.get_vm() remote_vm.add_args('-machine', 'x-remote') remote_vm.add_args('-nodefaults') remote_vm.add_args('-device', 'lsi53c895a,id=lsi1') remote_vm.add_args('-object', 'x-remote-object,id=robj1,' 'devid=lsi1,fd='+str(remote_sock.fileno())) remote_vm.launch() # Create proxy process self.vm.set_console() self.vm.add_args('-machine', machine_type) self.vm.add_args('-accel', 'kvm') self.vm.add_args('-cpu', 'host') self.vm.add_args('-object', 'memory-backend-memfd,id=sysmem-file,size=2G') self.vm.add_args('--numa', 'node,memdev=sysmem-file') self.vm.add_args('-m', '2048') self.vm.add_args('-kernel', kernel_path, '-initrd', initrd_path, '-append', kernel_command_line) self.vm.add_args('-device', 'x-pci-proxy-dev,' 'id=lsi1,fd='+str(proxy_sock.fileno())) self.vm.launch() wait_for_console_pattern(self, 'as init process', 'Kernel panic - not syncing') exec_command(self, 'mount -t sysfs sysfs /sys') exec_command_and_wait_for_pattern(self, 'cat /sys/bus/pci/devices/*/uevent', 'PCI_ID=1000:0012') def test_multiprocess_x86_64(self): """ :avocado: tags=arch:x86_64 """ kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora' '/linux/releases/31/Everything/x86_64/os/images' '/pxeboot/vmlinuz') initrd_url = ('https://archives.fedoraproject.org/pub/archive/fedora' '/linux/releases/31/Everything/x86_64/os/images' '/pxeboot/initrd.img') kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0 rdinit=/bin/bash') machine_type = 'pc' self.do_test(kernel_url, initrd_url, kernel_command_line, machine_type) def test_multiprocess_aarch64(self): """ :avocado: tags=arch:aarch64 """ kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora' '/linux/releases/31/Everything/aarch64/os/images' '/pxeboot/vmlinuz') initrd_url = ('https://archives.fedoraproject.org/pub/archive/fedora' '/linux/releases/31/Everything/aarch64/os/images' '/pxeboot/initrd.img') kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'rdinit=/bin/bash console=ttyAMA0') machine_type = 'virt,gic-version=3' self.do_test(kernel_url, initrd_url, kernel_command_line, machine_type)
4,081
41.082474
79
py
qemu
qemu-master/tests/avocado/tesseract_utils.py
# ... # # Copyright (c) 2019 Philippe Mathieu-Daudé <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import re import logging from avocado.utils import process from avocado.utils.path import find_command, CmdNotFoundError def tesseract_available(expected_version): try: find_command('tesseract') except CmdNotFoundError: return False res = process.run('tesseract --version') try: version = res.stdout_text.split()[1] except IndexError: version = res.stderr_text.split()[1] return int(version.split('.')[0]) == expected_version match = re.match(r'tesseract\s(\d)', res) if match is None: return False # now this is guaranteed to be a digit return int(match.groups()[0]) == expected_version def tesseract_ocr(image_path, tesseract_args='', tesseract_version=3): console_logger = logging.getLogger('tesseract') console_logger.debug(image_path) if tesseract_version == 4: tesseract_args += ' --oem 1' proc = process.run("tesseract {} {} stdout".format(tesseract_args, image_path)) lines = [] for line in proc.stdout_text.split('\n'): sline = line.strip() if len(sline): console_logger.debug(sline) lines += [sline] return lines
1,439
29.638298
70
py
qemu
qemu-master/tests/avocado/linux_initrd.py
# Linux initrd integration test. # # Copyright (c) 2018 Red Hat, Inc. # # Author: # Wainer dos Santos Moschetta <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import os import logging import tempfile from avocado_qemu import QemuSystemTest from avocado import skipIf class LinuxInitrd(QemuSystemTest): """ Checks QEMU evaluates correctly the initrd file passed as -initrd option. :avocado: tags=arch:x86_64 :avocado: tags=machine:pc """ timeout = 300 def test_with_2gib_file_should_exit_error_msg_with_linux_v3_6(self): """ Pretends to boot QEMU with an initrd file with size of 2GiB and expect it exits with error message. Fedora-18 shipped with linux-3.6 which have not supported xloadflags cannot support more than 2GiB initrd. """ kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora/li' 'nux/releases/18/Fedora/x86_64/os/images/pxeboot/vmlinuz') kernel_hash = '41464f68efe42b9991250bed86c7081d2ccdbb21' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) max_size = 2 * (1024 ** 3) - 1 with tempfile.NamedTemporaryFile() as initrd: initrd.seek(max_size) initrd.write(b'\0') initrd.flush() self.vm.add_args('-kernel', kernel_path, '-initrd', initrd.name, '-m', '4096') self.vm.set_qmp_monitor(enabled=False) self.vm.launch() self.vm.wait() self.assertEqual(self.vm.exitcode(), 1) expected_msg = r'.*initrd is too large.*max: \d+, need %s.*' % ( max_size + 1) self.assertRegex(self.vm.get_log(), expected_msg) @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') def test_with_2gib_file_should_work_with_linux_v4_16(self): """ QEMU has supported up to 4 GiB initrd for recent kernel Expect guest can reach 'Unpacking initramfs...' """ kernel_url = ('https://archives.fedoraproject.org/pub/archive/fedora' '/linux/releases/28/Everything/x86_64/os/images/pxeboot/' 'vmlinuz') kernel_hash = '238e083e114c48200f80d889f7e32eeb2793e02a' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) max_size = 2 * (1024 ** 3) + 1 with tempfile.NamedTemporaryFile() as initrd: initrd.seek(max_size) initrd.write(b'\0') initrd.flush() self.vm.set_console() kernel_command_line = 'console=ttyS0' self.vm.add_args('-kernel', kernel_path, '-append', kernel_command_line, '-initrd', initrd.name, '-m', '5120') self.vm.launch() console = self.vm.console_socket.makefile() console_logger = logging.getLogger('console') while True: msg = console.readline() console_logger.debug(msg.strip()) if 'Unpacking initramfs...' in msg: break if 'Kernel panic - not syncing' in msg: self.fail("Kernel panic reached")
3,386
36.633333
80
py
qemu
qemu-master/tests/avocado/virtio_version.py
""" Check compatibility of virtio device types """ # Copyright (c) 2018 Red Hat, Inc. # # Author: # Eduardo Habkost <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import sys import os from qemu.machine import QEMUMachine from avocado_qemu import QemuSystemTest # Virtio Device IDs: VIRTIO_NET = 1 VIRTIO_BLOCK = 2 VIRTIO_CONSOLE = 3 VIRTIO_RNG = 4 VIRTIO_BALLOON = 5 VIRTIO_RPMSG = 7 VIRTIO_SCSI = 8 VIRTIO_9P = 9 VIRTIO_RPROC_SERIAL = 11 VIRTIO_CAIF = 12 VIRTIO_GPU = 16 VIRTIO_INPUT = 18 VIRTIO_VSOCK = 19 VIRTIO_CRYPTO = 20 PCI_VENDOR_ID_REDHAT_QUMRANET = 0x1af4 # Device IDs for legacy/transitional devices: PCI_LEGACY_DEVICE_IDS = { VIRTIO_NET: 0x1000, VIRTIO_BLOCK: 0x1001, VIRTIO_BALLOON: 0x1002, VIRTIO_CONSOLE: 0x1003, VIRTIO_SCSI: 0x1004, VIRTIO_RNG: 0x1005, VIRTIO_9P: 0x1009, VIRTIO_VSOCK: 0x1012, } def pci_modern_device_id(virtio_devid): return virtio_devid + 0x1040 def devtype_implements(vm, devtype, implements): return devtype in [d['name'] for d in vm.command('qom-list-types', implements=implements)] def get_pci_interfaces(vm, devtype): interfaces = ('pci-express-device', 'conventional-pci-device') return [i for i in interfaces if devtype_implements(vm, devtype, i)] class VirtioVersionCheck(QemuSystemTest): """ Check if virtio-version-specific device types result in the same device tree created by `disable-modern` and `disable-legacy`. :avocado: tags=arch:x86_64 """ # just in case there are failures, show larger diff: maxDiff = 4096 def run_device(self, devtype, opts=None, machine='pc'): """ Run QEMU with `-device DEVTYPE`, return device info from `query-pci` """ with QEMUMachine(self.qemu_bin) as vm: vm.set_machine(machine) if opts: devtype += ',' + opts vm.add_args('-device', '%s,id=devfortest' % (devtype)) vm.add_args('-S') vm.launch() pcibuses = vm.command('query-pci') alldevs = [dev for bus in pcibuses for dev in bus['devices']] devfortest = [dev for dev in alldevs if dev['qdev_id'] == 'devfortest'] return devfortest[0], get_pci_interfaces(vm, devtype) def assert_devids(self, dev, devid, non_transitional=False): self.assertEqual(dev['id']['vendor'], PCI_VENDOR_ID_REDHAT_QUMRANET) self.assertEqual(dev['id']['device'], devid) if non_transitional: self.assertTrue(0x1040 <= dev['id']['device'] <= 0x107f) self.assertGreaterEqual(dev['id']['subsystem'], 0x40) def check_all_variants(self, qemu_devtype, virtio_devid): """Check if a virtio device type and its variants behave as expected""" # Force modern mode: dev_modern, _ = self.run_device(qemu_devtype, 'disable-modern=off,disable-legacy=on') self.assert_devids(dev_modern, pci_modern_device_id(virtio_devid), non_transitional=True) # <prefix>-non-transitional device types should be 100% equivalent to # <prefix>,disable-modern=off,disable-legacy=on dev_1_0, nt_ifaces = self.run_device('%s-non-transitional' % (qemu_devtype)) self.assertEqual(dev_modern, dev_1_0) # Force transitional mode: dev_trans, _ = self.run_device(qemu_devtype, 'disable-modern=off,disable-legacy=off') self.assert_devids(dev_trans, PCI_LEGACY_DEVICE_IDS[virtio_devid]) # Force legacy mode: dev_legacy, _ = self.run_device(qemu_devtype, 'disable-modern=on,disable-legacy=off') self.assert_devids(dev_legacy, PCI_LEGACY_DEVICE_IDS[virtio_devid]) # No options: default to transitional on PC machine-type: no_opts_pc, generic_ifaces = self.run_device(qemu_devtype) self.assertEqual(dev_trans, no_opts_pc) #TODO: check if plugging on a PCI Express bus will make the # device non-transitional #no_opts_q35 = self.run_device(qemu_devtype, machine='q35') #self.assertEqual(dev_modern, no_opts_q35) # <prefix>-transitional device types should be 100% equivalent to # <prefix>,disable-modern=off,disable-legacy=off dev_trans, trans_ifaces = self.run_device('%s-transitional' % (qemu_devtype)) self.assertEqual(dev_trans, dev_trans) # ensure the interface information is correct: self.assertIn('conventional-pci-device', generic_ifaces) self.assertIn('pci-express-device', generic_ifaces) self.assertIn('conventional-pci-device', nt_ifaces) self.assertIn('pci-express-device', nt_ifaces) self.assertIn('conventional-pci-device', trans_ifaces) self.assertNotIn('pci-express-device', trans_ifaces) def test_conventional_devs(self): self.check_all_variants('virtio-net-pci', VIRTIO_NET) # virtio-blk requires 'driver' parameter #self.check_all_variants('virtio-blk-pci', VIRTIO_BLOCK) self.check_all_variants('virtio-serial-pci', VIRTIO_CONSOLE) self.check_all_variants('virtio-rng-pci', VIRTIO_RNG) self.check_all_variants('virtio-balloon-pci', VIRTIO_BALLOON) self.check_all_variants('virtio-scsi-pci', VIRTIO_SCSI) # virtio-9p requires 'fsdev' parameter #self.check_all_variants('virtio-9p-pci', VIRTIO_9P) def check_modern_only(self, qemu_devtype, virtio_devid): """Check if a modern-only virtio device type behaves as expected""" # Force modern mode: dev_modern, _ = self.run_device(qemu_devtype, 'disable-modern=off,disable-legacy=on') self.assert_devids(dev_modern, pci_modern_device_id(virtio_devid), non_transitional=True) # No options: should be modern anyway dev_no_opts, ifaces = self.run_device(qemu_devtype) self.assertEqual(dev_modern, dev_no_opts) self.assertIn('conventional-pci-device', ifaces) self.assertIn('pci-express-device', ifaces) def test_modern_only_devs(self): self.check_modern_only('virtio-vga', VIRTIO_GPU) self.check_modern_only('virtio-gpu-pci', VIRTIO_GPU) self.check_modern_only('virtio-mouse-pci', VIRTIO_INPUT) self.check_modern_only('virtio-tablet-pci', VIRTIO_INPUT) self.check_modern_only('virtio-keyboard-pci', VIRTIO_INPUT)
6,672
37.131429
94
py
qemu
qemu-master/tests/avocado/machine_mips_malta.py
# Functional tests for the MIPS Malta board # # Copyright (c) Philippe Mathieu-Daudé <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or later. # See the COPYING file in the top-level directory. # # SPDX-License-Identifier: GPL-2.0-or-later import os import gzip import logging from avocado import skipIf from avocado import skipUnless from avocado.utils import archive from avocado_qemu import QemuSystemTest from avocado_qemu import exec_command_and_wait_for_pattern from avocado_qemu import interrupt_interactive_console_until_pattern from avocado_qemu import wait_for_console_pattern NUMPY_AVAILABLE = True try: import numpy as np except ImportError: NUMPY_AVAILABLE = False CV2_AVAILABLE = True try: import cv2 except ImportError: CV2_AVAILABLE = False @skipUnless(NUMPY_AVAILABLE, 'Python NumPy not installed') @skipUnless(CV2_AVAILABLE, 'Python OpenCV not installed') class MaltaMachineFramebuffer(QemuSystemTest): timeout = 30 KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' def do_test_i6400_framebuffer_logo(self, cpu_cores_count): """ Boot Linux kernel and check Tux logo is displayed on the framebuffer. """ screendump_path = os.path.join(self.workdir, 'screendump.pbm') kernel_url = ('https://github.com/philmd/qemu-testing-blob/raw/' 'a5966ca4b5/mips/malta/mips64el/' 'vmlinux-4.7.0-rc1.I6400.gz') kernel_hash = '096f50c377ec5072e6a366943324622c312045f6' kernel_path_gz = self.fetch_asset(kernel_url, asset_hash=kernel_hash) kernel_path = self.workdir + "vmlinux" archive.gzip_uncompress(kernel_path_gz, kernel_path) tuxlogo_url = ('https://github.com/torvalds/linux/raw/v2.6.12/' 'drivers/video/logo/logo_linux_vga16.ppm') tuxlogo_hash = '3991c2ddbd1ddaecda7601f8aafbcf5b02dc86af' tuxlogo_path = self.fetch_asset(tuxlogo_url, asset_hash=tuxlogo_hash) self.vm.set_console() kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + 'clocksource=GIC console=tty0 console=ttyS0') self.vm.add_args('-kernel', kernel_path, '-smp', '%u' % cpu_cores_count, '-vga', 'std', '-append', kernel_command_line) self.vm.launch() framebuffer_ready = 'Console: switching to colour frame buffer device' wait_for_console_pattern(self, framebuffer_ready, failure_message='Kernel panic - not syncing') self.vm.command('human-monitor-command', command_line='stop') self.vm.command('human-monitor-command', command_line='screendump %s' % screendump_path) logger = logging.getLogger('framebuffer') match_threshold = 0.95 screendump_bgr = cv2.imread(screendump_path, cv2.IMREAD_COLOR) tuxlogo_bgr = cv2.imread(tuxlogo_path, cv2.IMREAD_COLOR) result = cv2.matchTemplate(screendump_bgr, tuxlogo_bgr, cv2.TM_CCOEFF_NORMED) loc = np.where(result >= match_threshold) tuxlogo_count = 0 h, w = tuxlogo_bgr.shape[:2] debug_png = os.getenv('AVOCADO_CV2_SCREENDUMP_PNG_PATH') for tuxlogo_count, pt in enumerate(zip(*loc[::-1]), start=1): logger.debug('found Tux at position (x, y) = %s', pt) cv2.rectangle(screendump_bgr, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2) if debug_png: cv2.imwrite(debug_png, screendump_bgr) self.assertGreaterEqual(tuxlogo_count, cpu_cores_count) def test_mips_malta_i6400_framebuffer_logo_1core(self): """ :avocado: tags=arch:mips64el :avocado: tags=machine:malta :avocado: tags=cpu:I6400 """ self.do_test_i6400_framebuffer_logo(1) @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') def test_mips_malta_i6400_framebuffer_logo_7cores(self): """ :avocado: tags=arch:mips64el :avocado: tags=machine:malta :avocado: tags=cpu:I6400 :avocado: tags=mips:smp """ self.do_test_i6400_framebuffer_logo(7) @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') def test_mips_malta_i6400_framebuffer_logo_8cores(self): """ :avocado: tags=arch:mips64el :avocado: tags=machine:malta :avocado: tags=cpu:I6400 :avocado: tags=mips:smp """ self.do_test_i6400_framebuffer_logo(8) class MaltaMachine(QemuSystemTest): def do_test_yamon(self): rom_url = ('http://www.imgtec.com/tools/mips-tools/downloads/' 'yamon/yamon-bin-02.22.zip') rom_hash = '8da7ecddbc5312704b8b324341ee238189bde480' zip_path = self.fetch_asset(rom_url, asset_hash=rom_hash) archive.extract(zip_path, self.workdir) yamon_path = os.path.join(self.workdir, 'yamon-02.22.bin') self.vm.set_console() self.vm.add_args('-bios', yamon_path) self.vm.launch() prompt = 'YAMON>' pattern = 'YAMON ROM Monitor' interrupt_interactive_console_until_pattern(self, pattern, prompt) wait_for_console_pattern(self, prompt) self.vm.shutdown() def test_mipsel_malta_yamon(self): """ :avocado: tags=arch:mipsel :avocado: tags=machine:malta :avocado: tags=endian:little """ self.do_test_yamon() def test_mips64el_malta_yamon(self): """ :avocado: tags=arch:mips64el :avocado: tags=machine:malta :avocado: tags=endian:little """ self.do_test_yamon()
5,766
35.04375
78
py
qemu
qemu-master/tests/avocado/load_bflt.py
# Test the bFLT loader format # # Copyright (C) 2019 Philippe Mathieu-Daudé <[email protected]> # # SPDX-License-Identifier: GPL-2.0-or-later import os import bz2 import subprocess from avocado import skipUnless from avocado_qemu import QemuUserTest from avocado_qemu import has_cmd class LoadBFLT(QemuUserTest): def extract_cpio(self, cpio_path): """ Extracts a cpio archive into the test workdir :param cpio_path: path to the cpio archive """ cwd = os.getcwd() os.chdir(self.workdir) with bz2.open(cpio_path, 'rb') as archive_cpio: subprocess.run(['cpio', '-i'], input=archive_cpio.read(), stderr=subprocess.DEVNULL) os.chdir(cwd) @skipUnless(*has_cmd('cpio')) @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') def test_stm32(self): """ :avocado: tags=arch:arm :avocado: tags=linux_user :avocado: tags=quick """ # See https://elinux.org/STM32#User_Space rootfs_url = ('https://elinux.org/images/5/51/' 'Stm32_mini_rootfs.cpio.bz2') rootfs_hash = '9f065e6ba40cce7411ba757f924f30fcc57951e6' rootfs_path_bz2 = self.fetch_asset(rootfs_url, asset_hash=rootfs_hash) busybox_path = os.path.join(self.workdir, "/bin/busybox") self.extract_cpio(rootfs_path_bz2) res = self.run(busybox_path) ver = 'BusyBox v1.24.0.git (2015-02-03 22:17:13 CET) multi-call binary.' self.assertIn(ver, res.stdout_text) res = self.run(busybox_path, ['uname', '-a']) unm = 'armv7l GNU/Linux' self.assertIn(unm, res.stdout_text)
1,708
30.072727
80
py
qemu
qemu-master/tests/avocado/machine_rx_gdbsim.py
# Functional test that boots a Linux kernel and checks the console # # Copyright (c) 2018 Red Hat, Inc. # # Author: # Cleber Rosa <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import os from avocado import skipIf from avocado_qemu import QemuSystemTest from avocado_qemu import exec_command_and_wait_for_pattern from avocado_qemu import wait_for_console_pattern from avocado.utils import archive class RxGdbSimMachine(QemuSystemTest): timeout = 30 KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') def test_uboot(self): """ U-Boot and checks that the console is operational. :avocado: tags=arch:rx :avocado: tags=machine:gdbsim-r5f562n8 :avocado: tags=endian:little """ uboot_url = ('https://acc.dl.osdn.jp/users/23/23888/u-boot.bin.gz') uboot_hash = '9b78dbd43b40b2526848c0b1ce9de02c24f4dcdb' uboot_path = self.fetch_asset(uboot_url, asset_hash=uboot_hash) uboot_path = archive.uncompress(uboot_path, self.workdir) self.vm.set_console() self.vm.add_args('-bios', uboot_path, '-no-reboot') self.vm.launch() uboot_version = 'U-Boot 2016.05-rc3-23705-ga1ef3c71cb-dirty' wait_for_console_pattern(self, uboot_version) gcc_version = 'rx-unknown-linux-gcc (GCC) 9.0.0 20181105 (experimental)' # FIXME limit baudrate on chardev, else we type too fast #exec_command_and_wait_for_pattern(self, 'version', gcc_version) @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') def test_linux_sash(self): """ Boots a Linux kernel and checks that the console is operational. :avocado: tags=arch:rx :avocado: tags=machine:gdbsim-r5f562n7 :avocado: tags=endian:little """ dtb_url = ('https://acc.dl.osdn.jp/users/23/23887/rx-virt.dtb') dtb_hash = '7b4e4e2c71905da44e86ce47adee2210b026ac18' dtb_path = self.fetch_asset(dtb_url, asset_hash=dtb_hash) kernel_url = ('http://acc.dl.osdn.jp/users/23/23845/zImage') kernel_hash = '39a81067f8d72faad90866ddfefa19165d68fc99' kernel_path = self.fetch_asset(kernel_url, asset_hash=kernel_hash) self.vm.set_console() kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'earlycon' self.vm.add_args('-kernel', kernel_path, '-dtb', dtb_path, '-no-reboot') self.vm.launch() wait_for_console_pattern(self, 'Sash command shell (version 1.1.1)', failure_message='Kernel panic - not syncing') exec_command_and_wait_for_pattern(self, 'printenv', 'TERM=linux')
2,858
37.635135
80
py
qemu
qemu-master/tests/avocado/machine_mips_loongson3v.py
# Functional tests for the Generic Loongson-3 Platform. # # Copyright (c) 2021 Jiaxun Yang <[email protected]> # # This work is licensed under the terms of the GNU GPL, version 2 or later. # See the COPYING file in the top-level directory. # # SPDX-License-Identifier: GPL-2.0-or-later import os import time from avocado import skipUnless from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern class MipsLoongson3v(QemuSystemTest): timeout = 60 @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') def test_pmon_serial_console(self): """ :avocado: tags=arch:mips64el :avocado: tags=endian:little :avocado: tags=machine:loongson3-virt :avocado: tags=cpu:Loongson-3A1000 :avocado: tags=device:liointc :avocado: tags=device:goldfish_rtc """ pmon_hash = '7c8b45dd81ccfc55ff28f5aa267a41c3' pmon_path = self.fetch_asset('https://github.com/loongson-community/pmon/' 'releases/download/20210112/pmon-3avirt.bin', asset_hash=pmon_hash, algorithm='md5') self.vm.set_console() self.vm.add_args('-bios', pmon_path) self.vm.launch() wait_for_console_pattern(self, 'CPU GODSON3 BogoMIPS:')
1,341
32.55
82
py
qemu
qemu-master/tests/avocado/ppc_74xx.py
# Smoke tests for 74xx cpus (aka G4). # # Copyright (c) 2021, IBM Corp. # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern class ppc74xxCpu(QemuSystemTest): """ :avocado: tags=arch:ppc :avocado: tags=accel:tcg """ timeout = 5 def test_ppc_7400(self): """ :avocado: tags=cpu:7400 """ self.require_accelerator("tcg") self.vm.set_console() self.vm.launch() wait_for_console_pattern(self, '>> OpenBIOS') wait_for_console_pattern(self, '>> CPU type PowerPC,G4') def test_ppc_7410(self): """ :avocado: tags=cpu:7410 """ self.require_accelerator("tcg") self.vm.set_console() self.vm.launch() wait_for_console_pattern(self, '>> OpenBIOS') wait_for_console_pattern(self, '>> CPU type PowerPC,74xx') def test_ppc_7441(self): """ :avocado: tags=cpu:7441 """ self.require_accelerator("tcg") self.vm.set_console() self.vm.launch() wait_for_console_pattern(self, '>> OpenBIOS') wait_for_console_pattern(self, '>> CPU type PowerPC,G4') def test_ppc_7445(self): """ :avocado: tags=cpu:7445 """ self.require_accelerator("tcg") self.vm.set_console() self.vm.launch() wait_for_console_pattern(self, '>> OpenBIOS') wait_for_console_pattern(self, '>> CPU type PowerPC,G4') def test_ppc_7447(self): """ :avocado: tags=cpu:7447 """ self.require_accelerator("tcg") self.vm.set_console() self.vm.launch() wait_for_console_pattern(self, '>> OpenBIOS') wait_for_console_pattern(self, '>> CPU type PowerPC,G4') def test_ppc_7447a(self): """ :avocado: tags=cpu:7447a """ self.require_accelerator("tcg") self.vm.set_console() self.vm.launch() wait_for_console_pattern(self, '>> OpenBIOS') wait_for_console_pattern(self, '>> CPU type PowerPC,G4') def test_ppc_7448(self): """ :avocado: tags=cpu:7448 """ self.require_accelerator("tcg") self.vm.set_console() self.vm.launch() wait_for_console_pattern(self, '>> OpenBIOS') wait_for_console_pattern(self, '>> CPU type PowerPC,MPC86xx') def test_ppc_7450(self): """ :avocado: tags=cpu:7450 """ self.require_accelerator("tcg") self.vm.set_console() self.vm.launch() wait_for_console_pattern(self, '>> OpenBIOS') wait_for_console_pattern(self, '>> CPU type PowerPC,G4') def test_ppc_7451(self): """ :avocado: tags=cpu:7451 """ self.require_accelerator("tcg") self.vm.set_console() self.vm.launch() wait_for_console_pattern(self, '>> OpenBIOS') wait_for_console_pattern(self, '>> CPU type PowerPC,G4') def test_ppc_7455(self): """ :avocado: tags=cpu:7455 """ self.require_accelerator("tcg") self.vm.set_console() self.vm.launch() wait_for_console_pattern(self, '>> OpenBIOS') wait_for_console_pattern(self, '>> CPU type PowerPC,G4') def test_ppc_7457(self): """ :avocado: tags=cpu:7457 """ self.require_accelerator("tcg") self.vm.set_console() self.vm.launch() wait_for_console_pattern(self, '>> OpenBIOS') wait_for_console_pattern(self, '>> CPU type PowerPC,G4') def test_ppc_7457a(self): """ :avocado: tags=cpu:7457a """ self.require_accelerator("tcg") self.vm.set_console() self.vm.launch() wait_for_console_pattern(self, '>> OpenBIOS') wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
4,028
28.408759
69
py
qemu
qemu-master/tests/avocado/machine_aspeed.py
# Functional test that boots the ASPEED SoCs with firmware # # Copyright (C) 2022 ASPEED Technology Inc # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. import time import os from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado_qemu import exec_command from avocado_qemu import exec_command_and_wait_for_pattern from avocado_qemu import interrupt_interactive_console_until_pattern from avocado.utils import archive from avocado import skipIf class AST1030Machine(QemuSystemTest): """Boots the zephyr os and checks that the console is operational""" timeout = 10 def test_ast1030_zephyros_1_04(self): """ :avocado: tags=arch:arm :avocado: tags=machine:ast1030-evb :avocado: tags=os:zephyr """ tar_url = ('https://github.com/AspeedTech-BMC' '/zephyr/releases/download/v00.01.04/ast1030-evb-demo.zip') tar_hash = '4c6a8ce3a8ba76ef1a65dae419ae3409343c4b20' tar_path = self.fetch_asset(tar_url, asset_hash=tar_hash) archive.extract(tar_path, self.workdir) kernel_file = self.workdir + "/ast1030-evb-demo/zephyr.elf" self.vm.set_console() self.vm.add_args('-kernel', kernel_file, '-nographic') self.vm.launch() wait_for_console_pattern(self, "Booting Zephyr OS") exec_command_and_wait_for_pattern(self, "help", "Available commands") def test_ast1030_zephyros_1_07(self): """ :avocado: tags=arch:arm :avocado: tags=machine:ast1030-evb :avocado: tags=os:zephyr """ tar_url = ('https://github.com/AspeedTech-BMC' '/zephyr/releases/download/v00.01.07/ast1030-evb-demo.zip') tar_hash = '40ac87eabdcd3b3454ce5aad11fedc72a33ecda2' tar_path = self.fetch_asset(tar_url, asset_hash=tar_hash) archive.extract(tar_path, self.workdir) kernel_file = self.workdir + "/ast1030-evb-demo/zephyr.bin" self.vm.set_console() self.vm.add_args('-kernel', kernel_file, '-nographic') self.vm.launch() wait_for_console_pattern(self, "Booting Zephyr OS") for shell_cmd in [ 'kernel stacks', 'otp info conf', 'otp info scu', 'hwinfo devid', 'crypto aes256_cbc_vault', 'random get', 'jtag JTAG1 sw_xfer high TMS', 'adc ADC0 resolution 12', 'adc ADC0 read 42', 'adc ADC1 read 69', 'i2c scan I2C_0', 'i3c attach I3C_0', 'hash test', 'kernel uptime', 'kernel reboot warm', 'kernel uptime', 'kernel reboot cold', 'kernel uptime', ]: exec_command_and_wait_for_pattern(self, shell_cmd, "uart:~$") class AST2x00Machine(QemuSystemTest): timeout = 90 def wait_for_console_pattern(self, success_message, vm=None): wait_for_console_pattern(self, success_message, failure_message='Kernel panic - not syncing', vm=vm) def do_test_arm_aspeed(self, image): self.vm.set_console() self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw', '-net', 'nic') self.vm.launch() self.wait_for_console_pattern("U-Boot 2016.07") self.wait_for_console_pattern("## Loading kernel from FIT Image at 20080000") self.wait_for_console_pattern("Starting kernel ...") self.wait_for_console_pattern("Booting Linux on physical CPU 0x0") wait_for_console_pattern(self, "aspeed-smc 1e620000.spi: read control register: 203b0641") self.wait_for_console_pattern("ftgmac100 1e660000.ethernet eth0: irq ") self.wait_for_console_pattern("systemd[1]: Set hostname to") def test_arm_ast2400_palmetto_openbmc_v2_9_0(self): """ :avocado: tags=arch:arm :avocado: tags=machine:palmetto-bmc """ image_url = ('https://github.com/openbmc/openbmc/releases/download/2.9.0/' 'obmc-phosphor-image-palmetto.static.mtd') image_hash = ('3e13bbbc28e424865dc42f35ad672b10f2e82cdb11846bb28fa625b48beafd0d') image_path = self.fetch_asset(image_url, asset_hash=image_hash, algorithm='sha256') self.do_test_arm_aspeed(image_path) def test_arm_ast2500_romulus_openbmc_v2_9_0(self): """ :avocado: tags=arch:arm :avocado: tags=machine:romulus-bmc """ image_url = ('https://github.com/openbmc/openbmc/releases/download/2.9.0/' 'obmc-phosphor-image-romulus.static.mtd') image_hash = ('820341076803f1955bc31e647a512c79f9add4f5233d0697678bab4604c7bb25') image_path = self.fetch_asset(image_url, asset_hash=image_hash, algorithm='sha256') self.do_test_arm_aspeed(image_path) def do_test_arm_aspeed_buildroot_start(self, image, cpu_id): self.require_netdev('user') self.vm.set_console() self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw', '-net', 'nic', '-net', 'user') self.vm.launch() self.wait_for_console_pattern('U-Boot 2019.04') self.wait_for_console_pattern('## Loading kernel from FIT Image') self.wait_for_console_pattern('Starting kernel ...') self.wait_for_console_pattern('Booting Linux on physical CPU ' + cpu_id) self.wait_for_console_pattern('lease of 10.0.2.15') # the line before login: self.wait_for_console_pattern('Aspeed EVB') time.sleep(0.1) exec_command(self, 'root') time.sleep(0.1) def do_test_arm_aspeed_buildroot_poweroff(self): exec_command_and_wait_for_pattern(self, 'poweroff', 'reboot: System halted'); def test_arm_ast2500_evb_buildroot(self): """ :avocado: tags=arch:arm :avocado: tags=machine:ast2500-evb """ image_url = ('https://github.com/legoater/qemu-aspeed-boot/raw/master/' 'images/ast2500-evb/buildroot-2022.11-2-g15d3648df9/flash.img') image_hash = ('f96d11db521fe7a2787745e9e391225deeeec3318ee0fc07c8b799b8833dd474') image_path = self.fetch_asset(image_url, asset_hash=image_hash, algorithm='sha256') self.vm.add_args('-device', 'tmp105,bus=aspeed.i2c.bus.3,address=0x4d,id=tmp-test'); self.do_test_arm_aspeed_buildroot_start(image_path, '0x0') exec_command_and_wait_for_pattern(self, 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-3/device/new_device', 'i2c i2c-3: new_device: Instantiated device lm75 at 0x4d'); exec_command_and_wait_for_pattern(self, 'cat /sys/class/hwmon/hwmon1/temp1_input', '0') self.vm.command('qom-set', path='/machine/peripheral/tmp-test', property='temperature', value=18000); exec_command_and_wait_for_pattern(self, 'cat /sys/class/hwmon/hwmon1/temp1_input', '18000') self.do_test_arm_aspeed_buildroot_poweroff() def test_arm_ast2600_evb_buildroot(self): """ :avocado: tags=arch:arm :avocado: tags=machine:ast2600-evb """ image_url = ('https://github.com/legoater/qemu-aspeed-boot/raw/master/' 'images/ast2600-evb/buildroot-2022.11-2-g15d3648df9/flash.img') image_hash = ('e598d86e5ea79671ca8b59212a326c911bc8bea728dec1a1f5390d717a28bb8b') image_path = self.fetch_asset(image_url, asset_hash=image_hash, algorithm='sha256') self.vm.add_args('-device', 'tmp105,bus=aspeed.i2c.bus.3,address=0x4d,id=tmp-test'); self.vm.add_args('-device', 'ds1338,bus=aspeed.i2c.bus.3,address=0x32'); self.vm.add_args('-device', 'i2c-echo,bus=aspeed.i2c.bus.3,address=0x42'); self.do_test_arm_aspeed_buildroot_start(image_path, '0xf00') exec_command_and_wait_for_pattern(self, 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-3/device/new_device', 'i2c i2c-3: new_device: Instantiated device lm75 at 0x4d'); exec_command_and_wait_for_pattern(self, 'cat /sys/class/hwmon/hwmon0/temp1_input', '0') self.vm.command('qom-set', path='/machine/peripheral/tmp-test', property='temperature', value=18000); exec_command_and_wait_for_pattern(self, 'cat /sys/class/hwmon/hwmon0/temp1_input', '18000') exec_command_and_wait_for_pattern(self, 'echo ds1307 0x32 > /sys/class/i2c-dev/i2c-3/device/new_device', 'i2c i2c-3: new_device: Instantiated device ds1307 at 0x32'); year = time.strftime("%Y") exec_command_and_wait_for_pattern(self, 'hwclock -f /dev/rtc1', year); exec_command_and_wait_for_pattern(self, 'echo slave-24c02 0x1064 > /sys/bus/i2c/devices/i2c-3/new_device', 'i2c i2c-3: new_device: Instantiated device slave-24c02 at 0x64'); exec_command(self, 'i2cset -y 3 0x42 0x64 0x00 0xaa i'); time.sleep(0.1) exec_command_and_wait_for_pattern(self, 'hexdump /sys/bus/i2c/devices/3-1064/slave-eeprom', '0000000 ffaa ffff ffff ffff ffff ffff ffff ffff'); self.do_test_arm_aspeed_buildroot_poweroff() class AST2x00MachineSDK(QemuSystemTest): EXTRA_BOOTARGS = ( 'quiet ' 'systemd.mask=org.openbmc.HostIpmi.service ' '[email protected] ' '[email protected] ' 'systemd.mask=rngd.service ' '[email protected] ' ) # FIXME: Although these tests boot a whole distro they are still # slower than comparable machine models. There may be some # optimisations which bring down the runtime. In the meantime they # have generous timeouts and are disable for CI which aims for all # tests to run in less than 60 seconds. timeout = 240 def wait_for_console_pattern(self, success_message, vm=None): wait_for_console_pattern(self, success_message, failure_message='Kernel panic - not syncing', vm=vm) def do_test_arm_aspeed_sdk_start(self, image): self.require_netdev('user') self.vm.set_console() self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw', '-net', 'nic', '-net', 'user') self.vm.launch() self.wait_for_console_pattern('U-Boot 2019.04') interrupt_interactive_console_until_pattern( self, 'Hit any key to stop autoboot:', 'ast#') exec_command_and_wait_for_pattern( self, 'setenv bootargs ${bootargs} ' + self.EXTRA_BOOTARGS, 'ast#') exec_command_and_wait_for_pattern( self, 'boot', '## Loading kernel from FIT Image') self.wait_for_console_pattern('Starting kernel ...') @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') def test_arm_ast2500_evb_sdk(self): """ :avocado: tags=arch:arm :avocado: tags=machine:ast2500-evb """ image_url = ('https://github.com/AspeedTech-BMC/openbmc/releases/' 'download/v08.01/ast2500-default-obmc.tar.gz') image_hash = ('5375f82b4c43a79427909342a1e18b4e48bd663e38466862145d27bb358796fd') image_path = self.fetch_asset(image_url, asset_hash=image_hash, algorithm='sha256') archive.extract(image_path, self.workdir) self.do_test_arm_aspeed_sdk_start( self.workdir + '/ast2500-default/image-bmc') self.wait_for_console_pattern('ast2500-default login:') @skipIf(os.getenv('GITLAB_CI'), 'Running on GitLab') def test_arm_ast2600_evb_sdk(self): """ :avocado: tags=arch:arm :avocado: tags=machine:ast2600-evb """ image_url = ('https://github.com/AspeedTech-BMC/openbmc/releases/' 'download/v08.01/ast2600-default-obmc.tar.gz') image_hash = ('f12ef15e8c1f03a214df3b91c814515c5e2b2f56119021398c1dbdd626817d15') image_path = self.fetch_asset(image_url, asset_hash=image_hash, algorithm='sha256') archive.extract(image_path, self.workdir) self.vm.add_args('-device', 'tmp105,bus=aspeed.i2c.bus.5,address=0x4d,id=tmp-test'); self.vm.add_args('-device', 'ds1338,bus=aspeed.i2c.bus.5,address=0x32'); self.do_test_arm_aspeed_sdk_start( self.workdir + '/ast2600-default/image-bmc') self.wait_for_console_pattern('ast2600-default login:') exec_command_and_wait_for_pattern(self, 'root', 'Password:') exec_command_and_wait_for_pattern(self, '0penBmc', 'root@ast2600-default:~#') exec_command_and_wait_for_pattern(self, 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-5/device/new_device', 'i2c i2c-5: new_device: Instantiated device lm75 at 0x4d'); exec_command_and_wait_for_pattern(self, 'cat /sys/class/hwmon/hwmon19/temp1_input', '0') self.vm.command('qom-set', path='/machine/peripheral/tmp-test', property='temperature', value=18000); exec_command_and_wait_for_pattern(self, 'cat /sys/class/hwmon/hwmon19/temp1_input', '18000') exec_command_and_wait_for_pattern(self, 'echo ds1307 0x32 > /sys/class/i2c-dev/i2c-5/device/new_device', 'i2c i2c-5: new_device: Instantiated device ds1307 at 0x32'); year = time.strftime("%Y") exec_command_and_wait_for_pattern(self, 'hwclock -f /dev/rtc1', year);
14,461
42.957447
89
py